[yt-svn] commit/yt: 2 new changesets

Bitbucket commits-noreply at bitbucket.org
Sun Jun 3 09:57:31 PDT 2012


2 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/f2c8241051f9/
changeset:   f2c8241051f9
branch:      yt
user:        chummels
date:        2012-06-03 18:56:48
summary:     Adding documentation to cut_region function.
affected #:  1 file

diff -r 90c24c4198fbd5e3542c8333784215eecf8b8b00 -r f2c8241051f96b7f3ab388ec3f2038f187266320 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -2543,7 +2543,18 @@
     def cut_region(self, field_cuts):
         """
         Return an InLineExtractedRegion, where the grid cells are cut on the
-        fly with a set of field_cuts.
+        fly with a set of field_cuts.  It is very useful for applying 
+        conditions to the fields in your data object.
+        
+        Examples
+        --------
+        To find the total mass of gas above 10^6 K in your volume:
+
+        >>> pf = load("RedshiftOutput0005")
+        >>> ad = pf.h.all_data()
+        >>> cr = ad.cut_region(["grid['Temperature'] > 1e6"])
+        >>> print cr.quantities["TotalQuantity"]("CellMassMsun")
+
         """
         return InLineExtractedRegionBase(self, field_cuts)
 



https://bitbucket.org/yt_analysis/yt/changeset/2f074d314956/
changeset:   2f074d314956
branch:      yt
user:        chummels
date:        2012-06-03 18:57:09
summary:     Merging.
affected #:  9 files

diff -r f2c8241051f96b7f3ab388ec3f2038f187266320 -r 2f074d314956f5b004e73dbae6abc3ea1edfd0e1 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,3 @@
 include distribute_setup.py
-recursive-include yt/gui/reason/html/ *.html *.png *.ico *.js
-recursive-include yt/ *.pyx *.pxd *.hh *.h README* 
+recursive-include yt/gui/reason/html *.html *.png *.ico *.js
+recursive-include yt *.pyx *.pxd *.hh *.h README* 


diff -r f2c8241051f96b7f3ab388ec3f2038f187266320 -r 2f074d314956f5b004e73dbae6abc3ea1edfd0e1 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -353,7 +353,7 @@
 
 # Now we dump all our SHA512 files out.
 
-echo '8da1b0af98203254a1cf776d73d09433f15b5090871f9fd6d712cea32bcd44446b7323ae1069b28907d2728e77944a642825c61bc3b54ceb46c91897cc4f6051  Cython-0.15.1.tar.gz' > Cython-0.15.1.tar.gz.sha512
+echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec  Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
 echo 'b8a12bf05b3aafa71135e47da81440fd0f16a4bd91954bc5615ad3d3b7f9df7d5a7d5620dc61088dc6b04952c5c66ebda947a4cfa33ed1be614c8ca8c0f11dff  PhiloGL-1.4.2.zip' > PhiloGL-1.4.2.zip.sha512
 echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
 echo '1a754d560bfa433f0960ab3b5a62edb5f291be98ec48cf4e5941fa5b84139e200b87a52efbbd6fa4a76d6feeff12439eed3e7a84db4421940d1bbb576f7a684e  Python-2.7.2.tgz' > Python-2.7.2.tgz.sha512
@@ -391,7 +391,7 @@
 get_enzotools mercurial-2.2.tar.gz
 get_enzotools ipython-0.12.tar.gz
 get_enzotools h5py-2.0.1.tar.gz
-get_enzotools Cython-0.15.1.tar.gz
+get_enzotools Cython-0.16.tar.gz
 get_enzotools ext-3.3.2.zip
 get_enzotools ext-slate-110328.zip
 get_enzotools PhiloGL-1.4.2.zip
@@ -631,7 +631,7 @@
 
 do_setup_py ipython-0.12
 do_setup_py h5py-2.0.1
-do_setup_py Cython-0.15.1
+do_setup_py Cython-0.16
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"


diff -r f2c8241051f96b7f3ab388ec3f2038f187266320 -r 2f074d314956f5b004e73dbae6abc3ea1edfd0e1 yt/analysis_modules/halo_profiler/api.py
--- a/yt/analysis_modules/halo_profiler/api.py
+++ b/yt/analysis_modules/halo_profiler/api.py
@@ -34,5 +34,4 @@
 from .multi_halo_profiler import \
     HaloProfiler, \
     FakeProfile, \
-    get_halo_sphere, \
     standard_fields


diff -r f2c8241051f96b7f3ab388ec3f2038f187266320 -r 2f074d314956f5b004e73dbae6abc3ea1edfd0e1 yt/analysis_modules/halo_profiler/centering_methods.py
--- a/yt/analysis_modules/halo_profiler/centering_methods.py
+++ b/yt/analysis_modules/halo_profiler/centering_methods.py
@@ -43,14 +43,14 @@
 @add_function("Min_Dark_Matter_Density")
 def find_minimum_dm_density(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MinLocation']('Dark_Matter_Density',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
 @add_function("Max_Dark_Matter_Density")
 def find_maximum_dm_density(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Dark_Matter_Density',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
@@ -58,7 +58,7 @@
 def find_CoM_dm_density(data):
    dc_x, dc_y, dc_z = data.quantities['CenterOfMass'](use_cells=False, 
                                                       use_particles=True,
-                                                      lazy_reader=False,
+                                                      lazy_reader=True,
                                                       preload=False)
    return (dc_x, dc_y, dc_z)
 
@@ -67,14 +67,14 @@
 @add_function("Min_Gas_Density")
 def find_minimum_gas_density(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MinLocation']('Density',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
 @add_function("Max_Gas_Density")
 def find_maximum_gas_density(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Density',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
@@ -82,7 +82,7 @@
 def find_CoM_gas_density(data):
    dc_x, dc_y, dc_z = data.quantities['CenterOfMass'](use_cells=True, 
                                                       use_particles=False,
-                                                      lazy_reader=False,
+                                                      lazy_reader=True,
                                                       preload=False)
    return (dc_x, dc_y, dc_z)
 
@@ -91,14 +91,14 @@
 @add_function("Min_Total_Density")
 def find_minimum_total_density(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MinLocation']('Matter_Density',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
 @add_function("Max_Total_Density")
 def find_maximum_total_density(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Matter_Density',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
@@ -106,7 +106,7 @@
 def find_CoM_total_density(data):
    dc_x, dc_y, dc_z = data.quantities['CenterOfMass'](use_cells=True, 
                                                       use_particles=True,
-                                                      lazy_reader=False,
+                                                      lazy_reader=True,
                                                       preload=False)
    return (dc_x, dc_y, dc_z)
 
@@ -115,14 +115,14 @@
 @add_function("Min_Temperature")
 def find_minimum_temperature(data):
     ma, mini, mx, my, mz, mg = data.quantities['MinLocation']('Temperature',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
 @add_function("Max_Temperature")
 def find_maximum_temperature(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Temperature',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 


diff -r f2c8241051f96b7f3ab388ec3f2038f187266320 -r 2f074d314956f5b004e73dbae6abc3ea1edfd0e1 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -64,7 +64,7 @@
                                          dm_only=False, resize=True, 
                                          fancy_padding=True, rearrange=True),
                  halo_radius=None, radius_units='1', n_profile_bins=50,
-                 recenter = None,
+                 recenter=None,
                  profile_output_dir='radial_profiles', projection_output_dir='projections',
                  projection_width=8.0, projection_width_units='mpc', project_at_level='max',
                  velocity_center=['bulk', 'halo'], filter_quantities=['id', 'center', 'r_max'], 
@@ -111,8 +111,32 @@
             Args given with call to halo finder function.  Default: None.
         halo_finder_kwargs : dict
             kwargs given with call to halo finder function. Default: None.
-        recenter : {string, function
-            The name of a function that recenters the halo for analysis.
+        recenter : {string, function}
+            The exact location of the sphere center can significantly affect 
+            radial profiles.  The halo center loaded by the HaloProfiler will 
+            typically be the dark matter center of mass calculated by a halo 
+            finder.  However, this may not be the best location for centering 
+            profiles of baryon quantities.  For example, one may want to center 
+            on the maximum density.
+            If recenter is given as a string, one of the existing recentering 
+            functions will be used:
+                Min_Dark_Matter_Density : location of minimum dark matter density
+                Max_Dark_Matter_Density : location of maximum dark matter density
+                CoM_Dark_Matter_Density : dark matter center of mass
+                Min_Gas_Density : location of minimum gas density
+                Max_Gas_Density : location of maximum gas density
+                CoM_Gas_Density : gas center of mass
+                Min_Total_Density : location of minimum total density
+                Max_Total_Density : location of maximum total density
+                CoM_Total_Density : total center of mass
+                Min_Temperature : location of minimum temperature
+                Max_Temperature : location of maximum temperature
+            Alternately, a function can be supplied for custom recentering.
+            The function should take only one argument, a sphere object.
+                Example function:
+                    def my_center_of_mass(data):
+                       my_x, my_y, my_z = data.quantities['CenterOfMass']()
+                       return (my_x, my_y, my_z)
             Default: None.
         halo_radius : float
             If no halo radii are provided in the halo list file, this
@@ -148,8 +172,7 @@
                 * ["bulk", "sphere"]: the bulk velocity of the sphere
                   centered on the halo center.
     	        * ["max", field]: the velocity of the cell that is the
-    	          location of the maximum of the field 
-                  specified (used only when halos set to single).
+    	          location of the maximum of the field specified.
         filter_quantities : array_like
             Quantities from the original halo list file to be written out in the 
             filtered list file.  Default: ['id','center'].
@@ -161,8 +184,8 @@
         
         Examples
         --------
-        >>> import yt.analysis_modules.halo_profiler.api as HP
-        >>> hp = HP.halo_profiler("DD0242/DD0242")
+        >>> from yt.analysis_modules.halo_profiler.api import *
+        >>> hp = HaloProfiler("RedshiftOutput0005/RD0005")
         
         """
         ParallelAnalysisInterface.__init__(self)
@@ -226,13 +249,9 @@
         # Option to recenter sphere someplace else.
         self.recenter = recenter
 
-        # Look for any field that might need to have the bulk velocity set.
+        # Flag for whether calculating halo bulk velocity is necessary.
         self._need_bulk_velocity = False
-        for field in [hp['field'] for hp in self.profile_fields]:
-            if 'Velocity' in field or 'Mach' in field:
-                self._need_bulk_velocity = True
-                break
-
+        
         # Check validity for VelocityCenter parameter which toggles how the 
         # velocity is zeroed out for radial velocity profiles.
         self.velocity_center = velocity_center[:]
@@ -250,9 +269,7 @@
                 mylog.error("Second value of VelocityCenter must be either 'halo' or 'sphere' if first value is 'bulk'.")
                 return None
         elif self.velocity_center[0] == 'max':
-            if self.halos is 'multiple':
-                mylog.error("Getting velocity center from a max field value only works with halos='single'.")
-                return None
+            mylog.info('Using position of max %s for velocity center.' % self.velocity_center[1])
         else:
             mylog.error("First value of parameter, VelocityCenter, must be either 'bulk' or 'max'.")
             return None
@@ -284,7 +301,7 @@
                 mylog.error("No halos loaded, there will be nothing to do.")
                 return None
         else:
-            mylog.error("I don't know whether to get halos from hop or from density maximum.  This should not have happened.")
+            mylog.error("Keyword, halos, must be either 'single' or 'multiple'.")
             return None
 
     def add_halo_filter(self, function, *args, **kwargs):
@@ -351,6 +368,10 @@
             
         """
 
+        # Check for any field that might need to have the bulk velocity set.
+        if 'Velocity' in field or 'Mach' in field:
+            self._need_bulk_velocity = True
+
         self.profile_fields.append({'field':field, 'weight_field':weight_field, 
                                     'accumulation':accumulation})
 
@@ -379,11 +400,15 @@
 
         """
 
+        # Check for any field that might need to have the bulk velocity set.
+        if 'Velocity' in field or 'Mach' in field:
+            self._need_bulk_velocity = True
+
         self.projection_fields.append({'field':field, 'weight_field':weight_field, 
                                        'cmap': cmap})
 
     @parallel_blocking_call
-    def make_profiles(self, filename=None, prefilters=None, **kwargs):
+    def make_profiles(self, filename=None, prefilters=None, njobs=-1):
         r"""Make radial profiles for all halos in the list.
         
         After all the calls to `add_profile`, this will trigger the actual
@@ -394,7 +419,7 @@
         filename : string
             If set, a file will be written with all of the filtered halos
             and the quantities returned by the filter functions.
-            Default=None.
+            Default: None.
         prefilters : array_like
             A single dataset can contain thousands or tens of thousands of
             halos. Significant time can be saved by not profiling halos
@@ -402,6 +427,11 @@
             Simple filters based on quantities provided in the initial
             halo list can be used to filter out unwanted halos using this
             parameter.
+            Default: None.
+        njobs : int
+            The number of jobs over which to split the profiling.  Set
+            to -1 so that each halo is done by a single processor.
+            Default: -1.
         
         Examples
         --------
@@ -454,7 +484,7 @@
 
         # Profile all halos.
         updated_halos = []
-        for halo in parallel_objects(self.all_halos, -1):
+        for halo in parallel_objects(self.all_halos, njobs=njobs):
             # Apply prefilters to avoid profiling unwanted halos.
             filter_result = True
             haloQuantities = {}
@@ -468,7 +498,8 @@
 
                 profile_filename = "%s/Halo_%04d_profile.dat" % (my_output_dir, halo['id'])
 
-                profiledHalo = self._get_halo_profile(halo, profile_filename, virial_filter=virial_filter)
+                profiledHalo = self._get_halo_profile(halo, profile_filename,
+                                                      virial_filter=virial_filter)
 
                 if profiledHalo is None:
                     continue
@@ -487,26 +518,26 @@
                 for quantity in self.filter_quantities:
                     if halo.has_key(quantity): haloQuantities[quantity] = halo[quantity]
 
-                self.filtered_halos.append(haloQuantities)
+                only_on_root(self.filtered_halos.append, haloQuantities)
 
             # If we've gotten this far down, this halo is good and we want
             # to keep it. But we need to communicate the recentering changes
             # to all processors (the root one in particular) without having
             # one task clobber the other.
-            updated_halos.append(halo)
-        
+            only_on_root(updated_halos.append, halo)
+
         # And here is where we bring it all together.
         updated_halos = self.comm.par_combine_object(updated_halos,
                             datatype="list", op="cat")
-        updated_halos.sort(key = lambda a:a['id'])
+        updated_halos.sort(key=lambda a:a['id'])
         self.all_halos = updated_halos
 
         self.filtered_halos = self.comm.par_combine_object(self.filtered_halos,
                             datatype="list", op="cat")
-        self.filtered_halos.sort(key = lambda a:a['id'])
+        self.filtered_halos.sort(key=lambda a:a['id'])
 
         if filename is not None:
-            self._write_filtered_halo_list(filename, **kwargs)
+            self._write_filtered_halo_list(filename)
 
     def _get_halo_profile(self, halo, filename, virial_filter=True,
             force_write=False):
@@ -529,31 +560,13 @@
                 return None
 
             # get a sphere object to profile
-            sphere = get_halo_sphere(halo, self.pf, recenter=self.recenter)
+            sphere = self._get_halo_sphere(halo)
             if sphere is None: return None
 
-            if self._need_bulk_velocity:
-                # Set bulk velocity to zero out radial velocity profiles.
-                if self.velocity_center[0] == 'bulk':
-                    if self.velocity_center[1] == 'halo':
-                        sphere.set_field_parameter('bulk_velocity', halo['velocity'])
-                    elif self.velocity_center[1] == 'sphere':
-                        sphere.set_field_parameter('bulk_velocity', 
-                                                   sphere.quantities['BulkVelocity'](lazy_reader=False, 
-                                                                                     preload=False))
-                    else:
-                        mylog.error("Invalid parameter: VelocityCenter.")
-                elif self.velocity_center[0] == 'max':
-                    max_grid, max_cell, max_value, max_location = \
-                        self.pf.h.find_max_cell_location(self.velocity_center[1])
-                    sphere.set_field_parameter('bulk_velocity', [max_grid['x-velocity'][max_cell],
-                                                                 max_grid['y-velocity'][max_cell],
-                                                                 max_grid['z-velocity'][max_cell]])
-
             try:
                 profile = BinnedProfile1D(sphere, self.n_profile_bins, "RadiusMpc",
                                                 r_min, halo['r_max'],
-                                                log_space=True, lazy_reader=False,
+                                                log_space=True, lazy_reader=True,
                                                 end_collect=True)
             except EmptyProfileData:
                 mylog.error("Caught EmptyProfileData exception, returning None for this halo.")
@@ -586,9 +599,75 @@
 
         return profile
 
+    def _get_halo_sphere(self, halo):
+        """
+        Returns a sphere object for a given halo, performs the recentering,
+        and calculates bulk velocities.
+        """
+
+        sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
+        if len(sphere._grids) == 0: return None
+        new_sphere = False
+
+        if self.recenter:
+            old = halo['center']
+            if self.recenter in centering_registry:
+                new_x, new_y, new_z = \
+                    centering_registry[self.recenter](sphere)
+            else:
+                # user supplied function
+                new_x, new_y, new_z = self.recenter(sphere)
+            if new_x < self.pf.domain_left_edge[0] or \
+                    new_y < self.pf.domain_left_edge[1] or \
+                    new_z < self.pf.domain_left_edge[2]:
+                mylog.info("Recentering rejected, skipping halo %d" % \
+                    halo['id'])
+                return None
+            halo['center'] = [new_x, new_y, new_z]
+            d = self.pf['kpc'] * periodic_dist(old, halo['center'],
+                self.pf.domain_right_edge - self.pf.domain_left_edge)
+            mylog.info("Recentered halo %d %1.3e kpc away." % (halo['id'], d))
+            # Expand the halo to account for recentering. 
+            halo['r_max'] += d / 1000. # d is in kpc -> want mpc
+            new_sphere = True
+
+        if new_sphere:
+            # Temporary solution to memory leak.
+            for g in self.pf.h.grids:
+                g.clear_data()
+            sphere.clear_data()
+            del sphere
+            sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
+
+        if self._need_bulk_velocity:
+            # Set bulk velocity to zero out radial velocity profiles.
+            if self.velocity_center[0] == 'bulk':
+                if self.velocity_center[1] == 'halo':
+                    sphere.set_field_parameter('bulk_velocity', halo['velocity'])
+                elif self.velocity_center[1] == 'sphere':
+                    mylog.info('Calculating sphere bulk velocity.')
+                    sphere.set_field_parameter('bulk_velocity', 
+                                               sphere.quantities['BulkVelocity'](lazy_reader=True, 
+                                                                                 preload=False))
+                else:
+                    mylog.error("Invalid parameter: velocity_center.")
+                    return None
+            elif self.velocity_center[0] == 'max':
+                mylog.info('Setting bulk velocity with value at max %s.' % self.velocity_center[1])
+                max_val, maxi, mx, my, mz, mg = sphere.quantities['MaxLocation'](self.velocity_center[1],
+                                                                                 lazy_reader=True)
+                max_grid = self.pf.h.grids[mg]
+                max_cell = na.unravel_index(maxi, max_grid.ActiveDimensions)
+                sphere.set_field_parameter('bulk_velocity', [max_grid['x-velocity'][max_cell],
+                                                             max_grid['y-velocity'][max_cell],
+                                                             max_grid['z-velocity'][max_cell]])
+            mylog.info('Bulk velocity set.')
+
+        return sphere
+
     @parallel_blocking_call
     def make_projections(self, axes=[0, 1, 2], halo_list='filtered',
-            save_images=False, save_cube=True):
+                         save_images=False, save_cube=True, njobs=-1):
         r"""Make projections of all halos using specified fields.
         
         After adding fields using `add_projection`, this starts the actual
@@ -608,6 +687,10 @@
         save_cube : bool
             Whether or not to save the HDF5 files of the halo projections.
             Default=True.
+        njobs : int
+            The number of jobs over which to split the projections.  Set
+            to -1 so that each halo is done by a single processor.
+            Default: -1.
         
         Examples
         --------
@@ -656,7 +739,7 @@
                          self.pf.parameters['DomainRightEdge'][w])
                   for w in range(self.pf.parameters['TopGridRank'])]
 
-        for halo in parallel_objects(halo_projection_list, -1):
+        for halo in parallel_objects(halo_projection_list, njobs=njobs):
             if halo is None:
                 continue
             # Check if region will overlap domain edge.
@@ -745,7 +828,7 @@
 
     @parallel_blocking_call
     def analyze_halo_spheres(self, analysis_function, halo_list='filtered',
-                             analysis_output_dir=None):
+                             analysis_output_dir=None, njobs=-1):
         r"""Perform custom analysis on all halos.
         
         This will loop through all halo on the HaloProfiler's list, 
@@ -768,6 +851,10 @@
         analysis_output_dir : string, optional
             If specified, this directory will be created within the dataset to 
             contain any output from the analysis function.  Default: None.
+        njobs : int
+            The number of jobs over which to split the analysis.  Set
+            to -1 so that each halo is done by a single processor.
+            Default: -1.
 
         Examples
         --------
@@ -803,11 +890,11 @@
                 my_output_dir = "%s/%s" % (self.pf.fullpath, analysis_output_dir)
             self.__check_directory(my_output_dir)
 
-        for halo in parallel_objects(halo_analysis_list, -1):
+        for halo in parallel_objects(halo_analysis_list, njobs=njobs):
             if halo is None: continue
 
             # Get a sphere object to analze.
-            sphere = get_halo_sphere(halo, self.pf, recenter=self.recenter)
+            sphere = self._get_halo_sphere(halo)
             if sphere is None: continue
 
             # Call the given analysis function.
@@ -1042,94 +1129,6 @@
         else:
             os.mkdir(my_output_dir)
 
-def get_halo_sphere(halo, pf, recenter=None):
-    r"""Returns a sphere object for a given halo.
-        
-    With a dictionary containing halo properties, such as center 
-    and r_max, this creates a sphere object and optionally 
-    recenters and recreates the sphere using a recentering function.
-    This is to be used primarily to make spheres for a set of halos 
-    loaded by the HaloProfiler.
-    
-    Parameters
-    ----------
-    halo : dict, required
-        The dictionary containing halo properties used to make the sphere.
-        Required entries:
-            center : list with center coordinates.
-            r_max : sphere radius in Mpc.
-    pf : parameter file object, required
-        The parameter file from which the sphere will be made.
-    recenter : {None, string or function}
-        The exact location of the sphere center can significantly affect 
-        radial profiles.  The halo center loaded by the HaloProfiler will 
-        typically be the dark matter center of mass calculated by a halo 
-        finder.  However, this may not be the best location for centering 
-        profiles of baryon quantities.  For example, one may want to center 
-        on the maximum density.
-        If recenter is given as a string, one of the existing recentering 
-        functions will be used:
-            Min_Dark_Matter_Density : location of minimum dark matter density
-            Max_Dark_Matter_Density : location of maximum dark matter density
-            CoM_Dark_Matter_Density : dark matter center of mass
-            Min_Gas_Density : location of minimum gas density
-            Max_Gas_Density : location of maximum gas density
-            CoM_Gas_Density : gas center of mass
-            Min_Total_Density : location of minimum total density
-            Max_Total_Density : location of maximum total density
-            CoM_Total_Density : total center of mass
-            Min_Temperature : location of minimum temperature
-            Max_Temperature : location of maximum temperature
-        Alternately, a function can be supplied for custom recentering.
-        The function should take only one argument, a sphere object.
-            Example function:
-                def my_center_of_mass(data):
-                   my_x, my_y, my_z = data.quantities['CenterOfMass']()
-                   return (my_x, my_y, my_z)
-
-        Examples: this should primarily be used with the halo list of the HaloProfiler.
-        This is an example with an abstract halo asssuming a pre-defined pf.
-        >>> halo = {'center': [0.5, 0.5, 0.5], 'r_max': 1.0}
-        >>> my_sphere = get_halo_sphere(halo, pf, recenter='Max_Gas_Density')
-        >>> # Assuming the above example function has been defined.
-        >>> my_sphere = get_halo_sphere(halo, pf, recenter=my_center_of_mass)
-    """
-        
-    sphere = pf.h.sphere(halo['center'], halo['r_max']/pf.units['mpc'])
-    if len(sphere._grids) == 0: return None
-    new_sphere = False
-
-    if recenter:
-        old = halo['center']
-        if recenter in centering_registry:
-            new_x, new_y, new_z = \
-                centering_registry[recenter](sphere)
-        else:
-            # user supplied function
-            new_x, new_y, new_z = recenter(sphere)
-        if new_x < pf.domain_left_edge[0] or \
-                new_y < pf.domain_left_edge[1] or \
-                new_z < pf.domain_left_edge[2]:
-            mylog.info("Recentering rejected, skipping halo %d" % \
-                halo['id'])
-            return None
-        halo['center'] = [new_x, new_y, new_z]
-        d = pf['kpc'] * periodic_dist(old, halo['center'],
-            pf.domain_right_edge - pf.domain_left_edge)
-        mylog.info("Recentered halo %d %1.3e kpc away." % (halo['id'], d))
-        # Expand the halo to account for recentering. 
-        halo['r_max'] += d / 1000 # d is in kpc -> want mpc
-        new_sphere = True
-
-    if new_sphere:
-        # Temporary solution to memory leak.
-        for g in pf.h.grids:
-            g.clear_data()
-        sphere.clear_data()
-        del sphere
-        sphere = pf.h.sphere(halo['center'], halo['r_max']/pf.units['mpc'])
-    return sphere
-
 def _shift_projections(pf, projections, oldCenter, newCenter, axis):
     """
     Shift projection data around.


diff -r f2c8241051f96b7f3ab388ec3f2038f187266320 -r 2f074d314956f5b004e73dbae6abc3ea1edfd0e1 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -495,7 +495,7 @@
         self._sorted = {}
 
     def get_data(self, fields=None, in_grids=False):
-        if self._grids == None:
+        if self._grids is None:
             self._get_list_of_grids()
         points = []
         if not fields:
@@ -3303,6 +3303,40 @@
         pointI = na.where(k == True)
         return pointI
 
+class AMRMaxLevelCollection(AMR3DData):
+    _type_name = "grid_collection_max_level"
+    _con_args = ("center", "max_level")
+    def __init__(self, center, max_level, fields = None,
+                 pf = None, **kwargs):
+        """
+        By selecting an arbitrary *max_level*, we can act on those grids.
+        Child cells are masked when the level of the grid is below the max
+        level.
+        """
+        AMR3DData.__init__(self, center, fields, pf, **kwargs)
+        self.max_level = max_level
+        self._refresh_data()
+
+    def _get_list_of_grids(self):
+        if self._grids is not None: return
+        gi = (self.pf.h.grid_levels <= self.max_level)[:,0]
+        self._grids = self.pf.h.grids[gi]
+
+    def _is_fully_enclosed(self, grid):
+        return True
+
+    @cache_mask
+    def _get_cut_mask(self, grid):
+        return na.ones(grid.ActiveDimensions, dtype='bool')
+
+    def _get_point_indices(self, grid, use_child_mask=True):
+        k = na.ones(grid.ActiveDimensions, dtype='bool')
+        if use_child_mask and grid.Level < self.max_level:
+            k[grid.child_indices] = False
+        pointI = na.where(k == True)
+        return pointI
+
+
 class AMRSphereBase(AMR3DData):
     """
     A sphere of points


diff -r f2c8241051f96b7f3ab388ec3f2038f187266320 -r 2f074d314956f5b004e73dbae6abc3ea1edfd0e1 yt/frontends/enzo/api.py
--- a/yt/frontends/enzo/api.py
+++ b/yt/frontends/enzo/api.py
@@ -38,6 +38,9 @@
       EnzoStaticOutput, \
       EnzoStaticOutputInMemory
 
+from .simulation_handling import \
+    EnzoSimulation
+
 from .fields import \
       EnzoFieldInfo, \
       Enzo2DFieldInfo, \


diff -r f2c8241051f96b7f3ab388ec3f2038f187266320 -r 2f074d314956f5b004e73dbae6abc3ea1edfd0e1 yt/frontends/enzo/simulation_handling.py
--- /dev/null
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -0,0 +1,692 @@
+"""
+EnzoSimulation class and member functions.
+
+Author: Britton Smith <brittonsmith at gmail.com>
+Affiliation: Michigan State University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2012 Britton Smith.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.funcs import *
+
+import numpy as na
+import glob
+import os
+
+from yt.data_objects.time_series import \
+    TimeSeriesData
+from yt.utilities.cosmology import \
+    Cosmology, \
+    EnzoCosmology
+from yt.utilities.exceptions import \
+    YTException
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_root_only
+
+from yt.convenience import \
+    load
+
+class EnzoSimulation(TimeSeriesData):
+    r"""Super class for performing the same operation over all data outputs in 
+    a simulation from one redshift to another.
+    """
+    def __init__(self, parameter_filename):
+        r"""Initialize an Enzo Simulation object.
+
+        Upon creation, the parameter file is parsed and the time and redshift
+        are calculated and stored in all_outputs.  A time units dictionary is
+        instantiated to allow for time outputs to be requested with physical
+        time units.  The get_time_series can be used to generate a
+        TimeSeriesData object.
+
+        parameter_filename : str
+            The simulation parameter file.
+        
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> es = ES.EnzoSimulation("my_simulation.par")
+        >>> print es.all_outputs
+
+        """
+        self.parameter_filename = parameter_filename
+        self.parameters = {}
+
+        # Set some parameter defaults.
+        self._set_parameter_defaults()
+        # Read the simulation parameter file.
+        self._parse_parameter_file()
+        # Set up time units dictionary.
+        self._set_time_units()
+
+        # Figure out the starting and stopping times and redshift.
+        self._calculate_simulation_bounds()
+        self.print_key_parameters()
+        
+        # Get all possible datasets.
+        self._get_all_outputs()
+
+    def get_time_series(self, time_data=True, redshift_data=True,
+                        initial_time=None, final_time=None, time_units='1',
+                        initial_redshift=None, final_redshift=None,
+                        initial_cycle=None, final_cycle=None,
+                        times=None, redshifts=None, tolerance=None,
+                        find_outputs=False, parallel=True):
+
+        """
+        Instantiate a TimeSeriesData object for a set of outputs.
+
+        If no additional keywords given, a TimeSeriesData object will be
+        created with all potential datasets created by the simulation.
+
+        Outputs can be gather by specifying a time or redshift range
+        (or combination of time and redshift), with a specific list of
+        times or redshifts, a range of cycle numbers (for cycle based
+        output), or by simply searching all subdirectories within the
+        simulation directory.
+
+        time_data : bool
+            Whether or not to include time outputs when gathering
+            datasets for time series.
+            Default: True.
+        redshift_data : bool
+            Whether or not to include redshift outputs when gathering
+            datasets for time series.
+            Default: True.
+        initial_time : float
+            The earliest time for outputs to be included.  If None,
+            the initial time of the simulation is used.  This can be
+            used in combination with either final_time or
+            final_redshift.
+            Default: None.
+        final_time : float
+            The latest time for outputs to be included.  If None,
+            the final time of the simulation is used.  This can be
+            used in combination with either initial_time or
+            initial_redshift.
+            Default: None.
+        times : array_like
+            A list of times for which outputs will be found.
+            Default: None.
+        time_units : str
+            The time units used for requesting outputs by time.
+            Default: '1' (code units).
+        initial_redshift : float
+            The earliest redshift for outputs to be included.  If None,
+            the initial redshift of the simulation is used.  This can be
+            used in combination with either final_time or
+            final_redshift.
+            Default: None.
+        final_time : float
+            The latest redshift for outputs to be included.  If None,
+            the final redshift of the simulation is used.  This can be
+            used in combination with either initial_time or
+            initial_redshift.
+            Default: None.
+        redshifts : array_like
+            A list of redshifts for which outputs will be found.
+            Default: None.
+        initial_cycle : float
+            The earliest cycle for outputs to be included.  If None,
+            the initial cycle of the simulation is used.  This can
+            only be used with final_cycle.
+            Default: None.
+        final_cycle : float
+            The latest cycle for outputs to be included.  If None,
+            the final cycle of the simulation is used.  This can
+            only be used in combination with initial_cycle.
+            Default: None.
+        tolerance : float
+            Used in combination with "times" or "redshifts" keywords,
+            this is the tolerance within which outputs are accepted
+            given the requested times or redshifts.  If None, the
+            nearest output is always taken.
+            Default: None.
+        find_outputs : bool
+            If True, subdirectories within the GlobalDir directory are
+            searched one by one for datasets.  Time and redshift
+            information are gathered by temporarily instantiating each
+            dataset.  This can be used when simulation data was created
+            in a non-standard way, making it difficult to guess the
+            corresponding time and redshift information.
+            Default: False.
+        parallel : bool/int
+            If True, the generated TimeSeriesData will divide the work
+            such that a single processor works on each dataset.  If an
+            integer is supplied, the work will be divided into that
+            number of jobs.
+            Default: True.
+
+        Examples
+        --------
+        >>> es.get_time_series(initial_redshift=10, final_time=13.7,
+                               time_units='Gyr', redshift_data=False)
+
+        >>> es.get_time_series(redshifts=[3, 2, 1, 0])
+
+        >>> es.get_time_series(final_cycle=100000)
+
+        >>> es.get_time_series(find_outputs=True)
+
+        >>> # after calling get_time_series
+        >>> for pf in es.piter():
+        >>>     pc = PlotCollection(pf, 'c')
+        >>>     pc.add_projection('Density', 0)
+        >>>     pc.save()
+
+        """
+
+        if (initial_redshift is not None or \
+            final_redshift is not None) and \
+            not self.cosmological_simulation:
+            mylog.error('An initial or final redshift has been given for a noncosmological simulation.')
+            return
+
+        if find_outputs:
+            my_outputs = self._find_outputs()
+
+        else:
+            if time_data and redshift_data:
+                my_all_outputs = self.all_outputs
+            elif time_data:
+                my_all_outputs = self.all_time_outputs
+            elif redshift_data:
+                my_all_outputs = self.all_redshift_outputs
+            else:
+                mylog.error('Both time_data and redshift_data are False.')
+                return
+
+            if times is not None:
+                my_outputs = self._get_outputs_by_time(times, tolerance=tolerance,
+                                                       outputs=my_all_outputs,
+                                                       time_units=time_units)
+
+            elif redshifts is not None:
+                my_outputs = self._get_outputs_by_redshift(redshifts, tolerance=tolerance,
+                                                           outputs=my_all_outputs)
+
+            elif initial_cycle is not None or final_cycle is not None:
+                if initial_cycle is None:
+                    initial_cycle = 0
+                else:
+                    initial_cycle = max(initial_cycle, 0)
+                if final_cycle is None:
+                    final_cycle = self.parameters['StopCycle']
+                else:
+                    final_cycle = min(final_cycle, self.parameters['StopCycle'])
+                my_outputs = my_all_outputs[int(ceil(float(initial_cycle) /
+                                                     self.parameters['CycleSkipDataDump'])):
+                                            (final_cycle /  self.parameters['CycleSkipDataDump'])+1]
+
+            else:
+                if initial_time is not None:
+                    my_initial_time = initial_time / self.time_units[time_units]
+                elif initial_redshift is not None:
+                    my_initial_time = self.enzo_cosmology.ComputeTimeFromRedshift(initial_redshift) / \
+                        self.enzo_cosmology.TimeUnits
+                else:
+                    my_initial_time = self.initial_time
+
+                if final_time is not None:
+                    my_final_time = final_time / self.time_units[time_units]
+                elif final_redshift is not None:
+                    my_final_time = self.enzo_cosmology.ComputeTimeFromRedshift(final_redshift) / \
+                        self.enzo_cosmology.TimeUnits
+                else:
+                    my_final_time = self.final_time
+                    
+                my_times = na.array(map(lambda a:a['time'], my_all_outputs))
+                my_indices = na.digitize([my_initial_time, my_final_time], my_times)
+                if my_initial_time == my_times[my_indices[0] - 1]: my_indices[0] -= 1
+                my_outputs = my_all_outputs[my_indices[0]:my_indices[1]]
+
+        TimeSeriesData.__init__(self, outputs=[output['filename'] for output in my_outputs],
+                                parallel=parallel)
+        mylog.info("%d outputs loaded into time series." % len(my_outputs))
+
+    @parallel_root_only
+    def print_key_parameters(self):
+        """
+        Print out some key parameters for the simulation.
+        """
+        for a in ["domain_dimensions", "domain_left_edge",
+                  "domain_right_edge", "initial_time", "final_time",
+                  "stop_cycle", "cosmological_simulation"]:
+            if not hasattr(self, a):
+                mylog.error("Missing %s in parameter file definition!", a)
+                continue
+            v = getattr(self, a)
+            mylog.info("Parameters: %-25s = %s", a, v)
+        if hasattr(self, "cosmological_simulation") and \
+           getattr(self, "cosmological_simulation"):
+            for a in ["omega_lambda", "omega_matter",
+                      "hubble_constant", "initial_redshift",
+                      "final_redshift"]:
+                if not hasattr(self, a):
+                    mylog.error("Missing %s in parameter file definition!", a)
+                    continue
+                v = getattr(self, a)
+                mylog.info("Parameters: %-25s = %s", a, v)
+
+    def _parse_parameter_file(self):
+        """
+        Parses the parameter file and establishes the various
+        dictionaries.
+        """
+
+        self.conversion_factors = {}
+        redshift_outputs = []
+
+        # Let's read the file
+        lines = open(self.parameter_filename).readlines()
+        for line in (l.strip() for l in lines):
+            if '#' in line: line = line[0:line.find('#')]
+            if '//' in line: line = line[0:line.find('//')]
+            if len(line) < 2: continue
+            param, vals = (i.strip() for i in line.split("="))
+            # First we try to decipher what type of value it is.
+            vals = vals.split()
+            # Special case approaching.
+            if "(do" in vals: vals = vals[:1]
+            if len(vals) == 0:
+                pcast = str # Assume NULL output
+            else:
+                v = vals[0]
+                # Figure out if it's castable to floating point:
+                try:
+                    float(v)
+                except ValueError:
+                    pcast = str
+                else:
+                    if any("." in v or "e+" in v or "e-" in v for v in vals):
+                        pcast = float
+                    elif v == "inf":
+                        pcast = str
+                    else:
+                        pcast = int
+            # Now we figure out what to do with it.
+            if param.endswith("Units") and not param.startswith("Temperature"):
+                dataType = param[:-5]
+                # This one better be a float.
+                self.conversion_factors[dataType] = float(vals[0])
+            if param.startswith("CosmologyOutputRedshift["):
+                index = param[param.find("[")+1:param.find("]")]
+                redshift_outputs.append({'index':int(index), 'redshift':float(vals[0])})
+            elif len(vals) == 0:
+                vals = ""
+            elif len(vals) == 1:
+                vals = pcast(vals[0])
+            else:
+                vals = na.array([pcast(i) for i in vals if i != "-99999"])
+            self.parameters[param] = vals
+        self.refine_by = self.parameters["RefineBy"]
+        self.dimensionality = self.parameters["TopGridRank"]
+        if self.dimensionality > 1:
+            self.domain_dimensions = self.parameters["TopGridDimensions"]
+            if len(self.domain_dimensions) < 3:
+                tmp = self.domain_dimensions.tolist()
+                tmp.append(1)
+                self.domain_dimensions = na.array(tmp)
+            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+                                             "float64").copy()
+            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+                                             "float64").copy()
+        else:
+            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+                                             "float64")
+            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+                                             "float64")
+            self.domain_dimensions = na.array([self.parameters["TopGridDimensions"],1,1])
+
+        if self.parameters["ComovingCoordinates"]:
+            cosmo_attr = {'omega_lambda': 'CosmologyOmegaLambdaNow',
+                          'omega_matter': 'CosmologyOmegaMatterNow',
+                          'hubble_constant': 'CosmologyHubbleConstantNow',
+                          'initial_redshift': 'CosmologyInitialRedshift',
+                          'final_redshift': 'CosmologyFinalRedshift'}
+            self.cosmological_simulation = 1
+            for a, v in cosmo_attr.items():
+                if not v in self.parameters:
+                    raise MissingParameter(self.parameter_filename, v)
+                setattr(self, a, self.parameters[v])
+        else:
+            self.omega_lambda = self.omega_matter = \
+                self.hubble_constant = self.cosmological_simulation = 0.0
+
+        # make list of redshift outputs
+        self.all_redshift_outputs = []
+        if not self.cosmological_simulation: return
+        for output in redshift_outputs:
+            output['filename'] = os.path.join(self.parameters['GlobalDir'],
+                                              "%s%04d" % (self.parameters['RedshiftDumpDir'],
+                                                          output['index']),
+                                              "%s%04d" % (self.parameters['RedshiftDumpName'],
+                                                          output['index']))
+            del output['index']
+        self.all_redshift_outputs = redshift_outputs
+
+    def _calculate_redshift_dump_times(self):
+        "Calculates time from redshift of redshift outputs."
+
+        if not self.cosmological_simulation: return
+        for output in self.all_redshift_outputs:
+            output['time'] = self.enzo_cosmology.ComputeTimeFromRedshift(output['redshift']) / \
+                self.enzo_cosmology.TimeUnits
+
+    def _calculate_time_outputs(self):
+        "Calculate time outputs and their redshifts if cosmological."
+
+        if self.final_time is None or \
+            not 'dtDataDump' in self.parameters or \
+            self.parameters['dtDataDump'] <= 0.0: return []
+
+        self.all_time_outputs = []
+        index = 0
+        current_time = self.initial_time
+        while current_time <= self.final_time + self.parameters['dtDataDump']:
+            filename = os.path.join(self.parameters['GlobalDir'],
+                                    "%s%04d" % (self.parameters['DataDumpDir'], index),
+                                    "%s%04d" % (self.parameters['DataDumpName'], index))
+
+            output = {'index': index, 'filename': filename, 'time': current_time}
+            output['time'] = min(output['time'], self.final_time)
+            if self.cosmological_simulation:
+                output['redshift'] = self.enzo_cosmology.ComputeRedshiftFromTime(
+                    current_time * self.enzo_cosmology.TimeUnits)
+
+            self.all_time_outputs.append(output)
+            if na.abs(self.final_time - current_time) / self.final_time < 1e-4: break
+            current_time += self.parameters['dtDataDump']
+            index += 1
+
+    def _calculate_cycle_outputs(self):
+        "Calculate cycle outputs."
+
+        mylog.warn('Calculating cycle outputs.  Dataset times will be unavailable.')
+
+        if self.stop_cycle is None or \
+            not 'CycleSkipDataDump' in self.parameters or \
+            self.parameters['CycleSkipDataDump'] <= 0.0: return []
+
+        self.all_time_outputs = []
+        index = 0
+        for cycle in range(0, self.stop_cycle+1, self.parameters['CycleSkipDataDump']):
+            filename = os.path.join(self.parameters['GlobalDir'],
+                                    "%s%04d" % (self.parameters['DataDumpDir'], index),
+                                    "%s%04d" % (self.parameters['DataDumpName'], index))
+
+            output = {'index': index, 'filename': filename, 'cycle': cycle}
+            self.all_time_outputs.append(output)
+            index += 1
+
+    def _get_all_outputs(self):
+        "Get all potential datasets and combine into a time-sorted list."
+
+        if self.parameters['dtDataDump'] > 0 and \
+            self.parameters['CycleSkipDataDump'] > 0:
+            raise AmbiguousOutputs(self.parameter_filename)
+
+        # Get all time or cycle outputs.
+        if self.parameters['CycleSkipDataDump'] > 0:
+            self._calculate_cycle_outputs()
+        else:
+            self._calculate_time_outputs()
+
+        # Calculate times for redshift outputs.
+        self._calculate_redshift_dump_times()
+
+        self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
+        if self.parameters['CycleSkipDataDump'] <= 0:
+            self.all_outputs.sort(key=lambda obj:obj['time'])
+
+        mylog.info("Total datasets: %d." % len(self.all_outputs))
+
+    def _calculate_simulation_bounds(self):
+        """
+        Figure out the starting and stopping time and redshift for the simulation.
+        """
+
+        if 'StopCycle' in self.parameters:
+            self.stop_cycle = self.parameters['StopCycle']
+
+        # Convert initial/final redshifts to times.
+        if self.cosmological_simulation:
+            # Instantiate EnzoCosmology object for units and time conversions.
+            self.enzo_cosmology = EnzoCosmology(HubbleConstantNow=
+                                                (100.0 * self.parameters['CosmologyHubbleConstantNow']),
+                                                OmegaMatterNow=self.parameters['CosmologyOmegaMatterNow'],
+                                                OmegaLambdaNow=self.parameters['CosmologyOmegaLambdaNow'],
+                                                InitialRedshift=self.parameters['CosmologyInitialRedshift'])
+            self.initial_time = self.enzo_cosmology.ComputeTimeFromRedshift(self.initial_redshift) / \
+                self.enzo_cosmology.TimeUnits
+            self.final_time = self.enzo_cosmology.ComputeTimeFromRedshift(self.final_redshift) / \
+                self.enzo_cosmology.TimeUnits
+
+        # If not a cosmology simulation, figure out the stopping criteria.
+        else:
+            if 'InitialTime' in self.parameters:
+                self.initial_time = self.parameters['InitialTime']
+            else:
+                self.initial_time = 0.
+
+            if 'StopTime' in self.parameters:
+                self.final_time = self.parameters['StopTime']
+            else:
+                self.final_time = None
+            if not ('StopTime' in self.parameters or
+                    'StopCycle' in self.parameters):
+                raise NoStoppingCondition(self.parameter_filename)
+            if self.final_time is None:
+                mylog.warn('Simulation %s has no stop time set, stopping condition will be based only on cycles.' %
+                           self.parameter_filename)
+
+    def _set_parameter_defaults(self):
+        "Set some default parameters to avoid problems if they are not in the parameter file."
+
+        self.parameters['GlobalDir'] = "."
+        self.parameters['DataDumpName'] = "data"
+        self.parameters['DataDumpDir'] = "DD"
+        self.parameters['RedshiftDumpName'] = "RedshiftOutput"
+        self.parameters['RedshiftDumpDir'] = "RD"
+        self.parameters['ComovingCoordinates'] = 0
+        self.parameters['TopGridRank'] = 3
+        self.parameters['DomainLeftEdge'] = na.zeros(self.parameters['TopGridRank'])
+        self.parameters['DomainRightEdge'] = na.ones(self.parameters['TopGridRank'])
+        self.parameters['Refineby'] = 2 # technically not the enzo default
+        self.parameters['StopCycle'] = 100000
+        self.parameters['dtDataDump'] = 0.
+        self.parameters['CycleSkipDataDump'] = 0.
+        self.parameters['TimeUnits'] = 1.
+
+    def _set_time_units(self):
+        """
+        Set up a dictionary of time units conversions.
+        """
+
+        self.time_units = {}
+        if self.cosmological_simulation:
+            self.parameters['TimeUnits'] = 2.52e17 / na.sqrt(self.omega_matter) \
+                / self.hubble_constant / (1 + self.initial_redshift)**1.5
+        self.time_units['1'] = 1.
+        self.time_units['seconds'] = self.parameters['TimeUnits']
+        self.time_units['years'] = self.time_units['seconds'] / (365*3600*24.0)
+        self.time_units['days']  = self.time_units['seconds'] / (3600*24.0)
+        self.time_units['Myr'] = self.time_units['years'] / 1.0e6
+        self.time_units['Gyr']  = self.time_units['years'] / 1.0e9
+
+    def _find_outputs(self):
+        """
+        Search for directories matching the data dump keywords.
+        If found, get dataset times py opening the pf.
+        """
+
+        # look for time outputs.
+        potential_outputs = glob.glob(os.path.join(self.parameters['GlobalDir'],
+                                                   "%s*" % self.parameters['DataDumpDir'])) + \
+                            glob.glob(os.path.join(self.parameters['GlobalDir'],
+                                                   "%s*" % self.parameters['RedshiftDumpDir']))
+        time_outputs = []
+        mylog.info("Checking %d potential time outputs." % 
+                   len(potential_outputs))
+
+        for output in potential_outputs:
+            if self.parameters['DataDumpDir'] in output:
+                dir_key = self.parameters['DataDumpDir']
+                output_key = self.parameters['DataDumpName']
+            else:
+                dir_key = self.parameters['RedshiftDumpDir']
+                output_key = self.parameters['RedshiftDumpName']
+            index = output[output.find(dir_key) + len(dir_key):]
+            filename = os.path.join(self.parameters['GlobalDir'],
+                                    "%s%s" % (dir_key, index),
+                                    "%s%s" % (output_key, index))
+            if os.path.exists(filename):
+                pf = load(filename)
+                if pf is not None:
+                    time_outputs.append({'filename': filename, 'time': pf.current_time})
+                    if pf.cosmological_simulation:
+                        time_outputs[-1]['redshift'] = pf.current_redshift
+                del pf
+        mylog.info("Located %d time outputs." % len(time_outputs))
+        time_outputs.sort(key=lambda obj: obj['time'])
+        return time_outputs
+
+    def _get_outputs_by_key(self, key, values, tolerance=None, outputs=None):
+        r"""Get datasets at or near to given values.
+        
+        Parameters
+        ----------
+        key: str
+            The key by which to retrieve outputs, usually 'time' or
+            'redshift'.
+        values: array_like
+            A list of values, given as floats.
+        tolerance : float
+            If not None, do not return a dataset unless the value is
+            within the tolerance value.  If None, simply return the
+            nearest dataset.
+            Default: None.
+        outputs : list
+            The list of outputs from which to choose.  If None,
+            self.all_outputs is used.
+            Default: None.
+        
+        Examples
+        --------
+        >>> datasets = es.get_outputs_by_key('redshift', [0, 1, 2], tolerance=0.1)
+        
+        """
+
+        values = ensure_list(values)
+        if outputs is None:
+            outputs = self.all_outputs
+        my_outputs = []
+        for value in values:
+            outputs.sort(key=lambda obj:na.fabs(value - obj[key]))
+            if (tolerance is None or na.abs(value - outputs[0][key]) <= tolerance) \
+                    and outputs[0] not in my_outputs:
+                my_outputs.append(outputs[0])
+            else:
+                mylog.error("No dataset added for %s = %f." % (key, value))
+
+        outputs.sort(key=lambda obj: obj['time'])
+        return my_outputs
+
+    def _get_outputs_by_redshift(self, redshifts, tolerance=None, outputs=None):
+        r"""Get datasets at or near to given redshifts.
+        
+        Parameters
+        ----------
+        redshifts: array_like
+            A list of redshifts, given as floats.
+        tolerance : float
+            If not None, do not return a dataset unless the value is
+            within the tolerance value.  If None, simply return the
+            nearest dataset.
+            Default: None.
+        outputs : list
+            The list of outputs from which to choose.  If None,
+            self.all_outputs is used.
+            Default: None.
+        
+        Examples
+        --------
+        >>> datasets = es.get_outputs_by_redshift([0, 1, 2], tolerance=0.1)
+        
+        """
+
+        return self._get_outputs_by_key('redshift', redshifts, tolerance=tolerance,
+                                     outputs=outputs)
+
+    def _get_outputs_by_time(self, times, tolerance=None, outputs=None,
+                             time_units='1'):
+        r"""Get datasets at or near to given times.
+        
+        Parameters
+        ----------
+        times: array_like
+            A list of times, given in code units as floats.
+        tolerance : float
+            If not None, do not return a dataset unless the time is
+            within the tolerance value.  If None, simply return the
+            nearest dataset.
+            Default = None.
+        outputs : list
+            The list of outputs from which to choose.  If None,
+            self.all_outputs is used.
+            Default: None.
+        time_units : str
+            The units of the list of times.
+            Default: '1' (code units).
+        
+        Examples
+        --------
+        >>> datasets = es.get_outputs_by_time([600, 500, 400], tolerance=10.)
+        
+        """
+
+        times = na.array(times) / self.time_units[time_units]
+        return self._get_outputs_by_key('time', times, tolerance=tolerance,
+                                        outputs=outputs)
+
+class MissingParameter(YTException):
+    def __init__(self, pf, parameter):
+        YTException.__init__(self, pf)
+        self.parameter = parameter
+
+    def __str__(self):
+        return "Parameter file %s is missing %s parameter." % \
+            (self.pf, self.parameter)
+
+class NoStoppingCondition(YTException):
+    def __init__(self, pf):
+        YTException.__init__(self, pf)
+
+    def __str__(self):
+        return "Simulation %s has no stopping condition.  StopTime or StopCycle should be set." % \
+            self.pf
+
+class AmbiguousOutputs(YTException):
+    def __init__(self, pf):
+        YTException.__init__(self, pf)
+
+    def __str__(self):
+        return "Simulation %s has both dtDataDump and CycleSkipDataDump set.  Unable to calculate datasets." % \
+            self.pf
+


diff -r f2c8241051f96b7f3ab388ec3f2038f187266320 -r 2f074d314956f5b004e73dbae6abc3ea1edfd0e1 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -67,7 +67,8 @@
     add_quantity, quantity_info
 
 from yt.frontends.enzo.api import \
-    EnzoStaticOutput, EnzoStaticOutputInMemory, EnzoFieldInfo, \
+    EnzoStaticOutput, EnzoStaticOutputInMemory, \
+    EnzoSimulation, EnzoFieldInfo, \
     add_enzo_field, add_enzo_1d_field, add_enzo_2d_field
 
 from yt.frontends.castro.api import \

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list