[yt-svn] commit/yt: 6 new changesets
Bitbucket
commits-noreply at bitbucket.org
Wed Jun 27 10:36:27 PDT 2012
6 new commits in yt:
https://bitbucket.org/yt_analysis/yt/changeset/80d3c224557c/
changeset: 80d3c224557c
branch: yt
user: jwise77
date: 2012-06-04 21:27:04
summary: Adding a missing factor of Time in radiation pressure conversion
affected #: 1 file
diff -r 1d422a9bc9e3bc0c7af24ae9f97b2a1c75cf64a1 -r 80d3c224557cf42dea9a8f79691f809231758aed yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -259,7 +259,7 @@
f.take_log=True
def _convertRadiationAccel(data):
- return data.convert("cm") / data.convert("Time")
+ return data.convert("cm") / data.convert("Time")**2
for dim in range(1,4):
f = KnownEnzoFields["RadAccel%d" % dim]
f._convert_function = _convertRadiationAccel
https://bitbucket.org/yt_analysis/yt/changeset/165ac54e7602/
changeset: 165ac54e7602
branch: yt
user: jwise77
date: 2012-06-04 21:27:12
summary: Merging.
affected #: 30 files
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,3 @@
include distribute_setup.py
-recursive-include yt/gui/reason/html/ *.html *.png *.ico *.js
-recursive-include yt/ *.pyx *.pxd *.hh *.h README*
+recursive-include yt/gui/reason/html *.html *.png *.ico *.js
+recursive-include yt *.pyx *.pxd *.hh *.h README*
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -353,7 +353,7 @@
# Now we dump all our SHA512 files out.
-echo '8da1b0af98203254a1cf776d73d09433f15b5090871f9fd6d712cea32bcd44446b7323ae1069b28907d2728e77944a642825c61bc3b54ceb46c91897cc4f6051 Cython-0.15.1.tar.gz' > Cython-0.15.1.tar.gz.sha512
+echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
echo 'b8a12bf05b3aafa71135e47da81440fd0f16a4bd91954bc5615ad3d3b7f9df7d5a7d5620dc61088dc6b04952c5c66ebda947a4cfa33ed1be614c8ca8c0f11dff PhiloGL-1.4.2.zip' > PhiloGL-1.4.2.zip.sha512
echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478 PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
echo '1a754d560bfa433f0960ab3b5a62edb5f291be98ec48cf4e5941fa5b84139e200b87a52efbbd6fa4a76d6feeff12439eed3e7a84db4421940d1bbb576f7a684e Python-2.7.2.tgz' > Python-2.7.2.tgz.sha512
@@ -391,7 +391,7 @@
get_enzotools mercurial-2.2.tar.gz
get_enzotools ipython-0.12.tar.gz
get_enzotools h5py-2.0.1.tar.gz
-get_enzotools Cython-0.15.1.tar.gz
+get_enzotools Cython-0.16.tar.gz
get_enzotools ext-3.3.2.zip
get_enzotools ext-slate-110328.zip
get_enzotools PhiloGL-1.4.2.zip
@@ -631,7 +631,7 @@
do_setup_py ipython-0.12
do_setup_py h5py-2.0.1
-do_setup_py Cython-0.15.1
+do_setup_py Cython-0.16
[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -1,5 +1,6 @@
#!python
import os, re
+from distutils import version
from yt.mods import *
from yt.data_objects.data_containers import AMRData
namespace = locals().copy()
@@ -22,6 +23,11 @@
code.interact(doc, None, namespace)
sys.exit()
+if version.LooseVersion(IPython.__version__) <= version.LooseVersion('0.10'):
+ api_version = '0.10'
+else:
+ api_version = '0.11'
+
if IPython.__version__.startswith("0.10"):
api_version = '0.10'
elif IPython.__version__.startswith("0.11") or \
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/analysis_modules/halo_profiler/api.py
--- a/yt/analysis_modules/halo_profiler/api.py
+++ b/yt/analysis_modules/halo_profiler/api.py
@@ -34,5 +34,4 @@
from .multi_halo_profiler import \
HaloProfiler, \
FakeProfile, \
- get_halo_sphere, \
standard_fields
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/analysis_modules/halo_profiler/centering_methods.py
--- a/yt/analysis_modules/halo_profiler/centering_methods.py
+++ b/yt/analysis_modules/halo_profiler/centering_methods.py
@@ -43,14 +43,14 @@
@add_function("Min_Dark_Matter_Density")
def find_minimum_dm_density(data):
ma, maxi, mx, my, mz, mg = data.quantities['MinLocation']('Dark_Matter_Density',
- lazy_reader=False,
+ lazy_reader=True,
preload=False)
return (mx, my, mz)
@add_function("Max_Dark_Matter_Density")
def find_maximum_dm_density(data):
ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Dark_Matter_Density',
- lazy_reader=False,
+ lazy_reader=True,
preload=False)
return (mx, my, mz)
@@ -58,7 +58,7 @@
def find_CoM_dm_density(data):
dc_x, dc_y, dc_z = data.quantities['CenterOfMass'](use_cells=False,
use_particles=True,
- lazy_reader=False,
+ lazy_reader=True,
preload=False)
return (dc_x, dc_y, dc_z)
@@ -67,14 +67,14 @@
@add_function("Min_Gas_Density")
def find_minimum_gas_density(data):
ma, maxi, mx, my, mz, mg = data.quantities['MinLocation']('Density',
- lazy_reader=False,
+ lazy_reader=True,
preload=False)
return (mx, my, mz)
@add_function("Max_Gas_Density")
def find_maximum_gas_density(data):
ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Density',
- lazy_reader=False,
+ lazy_reader=True,
preload=False)
return (mx, my, mz)
@@ -82,7 +82,7 @@
def find_CoM_gas_density(data):
dc_x, dc_y, dc_z = data.quantities['CenterOfMass'](use_cells=True,
use_particles=False,
- lazy_reader=False,
+ lazy_reader=True,
preload=False)
return (dc_x, dc_y, dc_z)
@@ -91,14 +91,14 @@
@add_function("Min_Total_Density")
def find_minimum_total_density(data):
ma, maxi, mx, my, mz, mg = data.quantities['MinLocation']('Matter_Density',
- lazy_reader=False,
+ lazy_reader=True,
preload=False)
return (mx, my, mz)
@add_function("Max_Total_Density")
def find_maximum_total_density(data):
ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Matter_Density',
- lazy_reader=False,
+ lazy_reader=True,
preload=False)
return (mx, my, mz)
@@ -106,7 +106,7 @@
def find_CoM_total_density(data):
dc_x, dc_y, dc_z = data.quantities['CenterOfMass'](use_cells=True,
use_particles=True,
- lazy_reader=False,
+ lazy_reader=True,
preload=False)
return (dc_x, dc_y, dc_z)
@@ -115,14 +115,14 @@
@add_function("Min_Temperature")
def find_minimum_temperature(data):
ma, mini, mx, my, mz, mg = data.quantities['MinLocation']('Temperature',
- lazy_reader=False,
+ lazy_reader=True,
preload=False)
return (mx, my, mz)
@add_function("Max_Temperature")
def find_maximum_temperature(data):
ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Temperature',
- lazy_reader=False,
+ lazy_reader=True,
preload=False)
return (mx, my, mz)
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -64,7 +64,7 @@
dm_only=False, resize=True,
fancy_padding=True, rearrange=True),
halo_radius=None, radius_units='1', n_profile_bins=50,
- recenter = None,
+ recenter=None,
profile_output_dir='radial_profiles', projection_output_dir='projections',
projection_width=8.0, projection_width_units='mpc', project_at_level='max',
velocity_center=['bulk', 'halo'], filter_quantities=['id', 'center', 'r_max'],
@@ -111,8 +111,32 @@
Args given with call to halo finder function. Default: None.
halo_finder_kwargs : dict
kwargs given with call to halo finder function. Default: None.
- recenter : {string, function
- The name of a function that recenters the halo for analysis.
+ recenter : {string, function}
+ The exact location of the sphere center can significantly affect
+ radial profiles. The halo center loaded by the HaloProfiler will
+ typically be the dark matter center of mass calculated by a halo
+ finder. However, this may not be the best location for centering
+ profiles of baryon quantities. For example, one may want to center
+ on the maximum density.
+ If recenter is given as a string, one of the existing recentering
+ functions will be used:
+ Min_Dark_Matter_Density : location of minimum dark matter density
+ Max_Dark_Matter_Density : location of maximum dark matter density
+ CoM_Dark_Matter_Density : dark matter center of mass
+ Min_Gas_Density : location of minimum gas density
+ Max_Gas_Density : location of maximum gas density
+ CoM_Gas_Density : gas center of mass
+ Min_Total_Density : location of minimum total density
+ Max_Total_Density : location of maximum total density
+ CoM_Total_Density : total center of mass
+ Min_Temperature : location of minimum temperature
+ Max_Temperature : location of maximum temperature
+ Alternately, a function can be supplied for custom recentering.
+ The function should take only one argument, a sphere object.
+ Example function:
+ def my_center_of_mass(data):
+ my_x, my_y, my_z = data.quantities['CenterOfMass']()
+ return (my_x, my_y, my_z)
Default: None.
halo_radius : float
If no halo radii are provided in the halo list file, this
@@ -148,8 +172,7 @@
* ["bulk", "sphere"]: the bulk velocity of the sphere
centered on the halo center.
* ["max", field]: the velocity of the cell that is the
- location of the maximum of the field
- specified (used only when halos set to single).
+ location of the maximum of the field specified.
filter_quantities : array_like
Quantities from the original halo list file to be written out in the
filtered list file. Default: ['id','center'].
@@ -161,8 +184,8 @@
Examples
--------
- >>> import yt.analysis_modules.halo_profiler.api as HP
- >>> hp = HP.halo_profiler("DD0242/DD0242")
+ >>> from yt.analysis_modules.halo_profiler.api import *
+ >>> hp = HaloProfiler("RedshiftOutput0005/RD0005")
"""
ParallelAnalysisInterface.__init__(self)
@@ -226,13 +249,9 @@
# Option to recenter sphere someplace else.
self.recenter = recenter
- # Look for any field that might need to have the bulk velocity set.
+ # Flag for whether calculating halo bulk velocity is necessary.
self._need_bulk_velocity = False
- for field in [hp['field'] for hp in self.profile_fields]:
- if 'Velocity' in field or 'Mach' in field:
- self._need_bulk_velocity = True
- break
-
+
# Check validity for VelocityCenter parameter which toggles how the
# velocity is zeroed out for radial velocity profiles.
self.velocity_center = velocity_center[:]
@@ -250,9 +269,7 @@
mylog.error("Second value of VelocityCenter must be either 'halo' or 'sphere' if first value is 'bulk'.")
return None
elif self.velocity_center[0] == 'max':
- if self.halos is 'multiple':
- mylog.error("Getting velocity center from a max field value only works with halos='single'.")
- return None
+ mylog.info('Using position of max %s for velocity center.' % self.velocity_center[1])
else:
mylog.error("First value of parameter, VelocityCenter, must be either 'bulk' or 'max'.")
return None
@@ -284,7 +301,7 @@
mylog.error("No halos loaded, there will be nothing to do.")
return None
else:
- mylog.error("I don't know whether to get halos from hop or from density maximum. This should not have happened.")
+ mylog.error("Keyword, halos, must be either 'single' or 'multiple'.")
return None
def add_halo_filter(self, function, *args, **kwargs):
@@ -351,6 +368,10 @@
"""
+ # Check for any field that might need to have the bulk velocity set.
+ if 'Velocity' in field or 'Mach' in field:
+ self._need_bulk_velocity = True
+
self.profile_fields.append({'field':field, 'weight_field':weight_field,
'accumulation':accumulation})
@@ -379,11 +400,15 @@
"""
+ # Check for any field that might need to have the bulk velocity set.
+ if 'Velocity' in field or 'Mach' in field:
+ self._need_bulk_velocity = True
+
self.projection_fields.append({'field':field, 'weight_field':weight_field,
'cmap': cmap})
@parallel_blocking_call
- def make_profiles(self, filename=None, prefilters=None, **kwargs):
+ def make_profiles(self, filename=None, prefilters=None, njobs=-1):
r"""Make radial profiles for all halos in the list.
After all the calls to `add_profile`, this will trigger the actual
@@ -394,7 +419,7 @@
filename : string
If set, a file will be written with all of the filtered halos
and the quantities returned by the filter functions.
- Default=None.
+ Default: None.
prefilters : array_like
A single dataset can contain thousands or tens of thousands of
halos. Significant time can be saved by not profiling halos
@@ -402,6 +427,11 @@
Simple filters based on quantities provided in the initial
halo list can be used to filter out unwanted halos using this
parameter.
+ Default: None.
+ njobs : int
+ The number of jobs over which to split the profiling. Set
+ to -1 so that each halo is done by a single processor.
+ Default: -1.
Examples
--------
@@ -454,7 +484,7 @@
# Profile all halos.
updated_halos = []
- for halo in parallel_objects(self.all_halos, -1):
+ for halo in parallel_objects(self.all_halos, njobs=njobs):
# Apply prefilters to avoid profiling unwanted halos.
filter_result = True
haloQuantities = {}
@@ -468,7 +498,8 @@
profile_filename = "%s/Halo_%04d_profile.dat" % (my_output_dir, halo['id'])
- profiledHalo = self._get_halo_profile(halo, profile_filename, virial_filter=virial_filter)
+ profiledHalo = self._get_halo_profile(halo, profile_filename,
+ virial_filter=virial_filter)
if profiledHalo is None:
continue
@@ -487,26 +518,26 @@
for quantity in self.filter_quantities:
if halo.has_key(quantity): haloQuantities[quantity] = halo[quantity]
- self.filtered_halos.append(haloQuantities)
+ only_on_root(self.filtered_halos.append, haloQuantities)
# If we've gotten this far down, this halo is good and we want
# to keep it. But we need to communicate the recentering changes
# to all processors (the root one in particular) without having
# one task clobber the other.
- updated_halos.append(halo)
-
+ only_on_root(updated_halos.append, halo)
+
# And here is where we bring it all together.
updated_halos = self.comm.par_combine_object(updated_halos,
datatype="list", op="cat")
- updated_halos.sort(key = lambda a:a['id'])
+ updated_halos.sort(key=lambda a:a['id'])
self.all_halos = updated_halos
self.filtered_halos = self.comm.par_combine_object(self.filtered_halos,
datatype="list", op="cat")
- self.filtered_halos.sort(key = lambda a:a['id'])
+ self.filtered_halos.sort(key=lambda a:a['id'])
if filename is not None:
- self._write_filtered_halo_list(filename, **kwargs)
+ self._write_filtered_halo_list(filename)
def _get_halo_profile(self, halo, filename, virial_filter=True,
force_write=False):
@@ -529,31 +560,13 @@
return None
# get a sphere object to profile
- sphere = get_halo_sphere(halo, self.pf, recenter=self.recenter)
+ sphere = self._get_halo_sphere(halo)
if sphere is None: return None
- if self._need_bulk_velocity:
- # Set bulk velocity to zero out radial velocity profiles.
- if self.velocity_center[0] == 'bulk':
- if self.velocity_center[1] == 'halo':
- sphere.set_field_parameter('bulk_velocity', halo['velocity'])
- elif self.velocity_center[1] == 'sphere':
- sphere.set_field_parameter('bulk_velocity',
- sphere.quantities['BulkVelocity'](lazy_reader=False,
- preload=False))
- else:
- mylog.error("Invalid parameter: VelocityCenter.")
- elif self.velocity_center[0] == 'max':
- max_grid, max_cell, max_value, max_location = \
- self.pf.h.find_max_cell_location(self.velocity_center[1])
- sphere.set_field_parameter('bulk_velocity', [max_grid['x-velocity'][max_cell],
- max_grid['y-velocity'][max_cell],
- max_grid['z-velocity'][max_cell]])
-
try:
profile = BinnedProfile1D(sphere, self.n_profile_bins, "RadiusMpc",
r_min, halo['r_max'],
- log_space=True, lazy_reader=False,
+ log_space=True, lazy_reader=True,
end_collect=True)
except EmptyProfileData:
mylog.error("Caught EmptyProfileData exception, returning None for this halo.")
@@ -586,9 +599,75 @@
return profile
+ def _get_halo_sphere(self, halo):
+ """
+ Returns a sphere object for a given halo, performs the recentering,
+ and calculates bulk velocities.
+ """
+
+ sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
+ if len(sphere._grids) == 0: return None
+ new_sphere = False
+
+ if self.recenter:
+ old = halo['center']
+ if self.recenter in centering_registry:
+ new_x, new_y, new_z = \
+ centering_registry[self.recenter](sphere)
+ else:
+ # user supplied function
+ new_x, new_y, new_z = self.recenter(sphere)
+ if new_x < self.pf.domain_left_edge[0] or \
+ new_y < self.pf.domain_left_edge[1] or \
+ new_z < self.pf.domain_left_edge[2]:
+ mylog.info("Recentering rejected, skipping halo %d" % \
+ halo['id'])
+ return None
+ halo['center'] = [new_x, new_y, new_z]
+ d = self.pf['kpc'] * periodic_dist(old, halo['center'],
+ self.pf.domain_right_edge - self.pf.domain_left_edge)
+ mylog.info("Recentered halo %d %1.3e kpc away." % (halo['id'], d))
+ # Expand the halo to account for recentering.
+ halo['r_max'] += d / 1000. # d is in kpc -> want mpc
+ new_sphere = True
+
+ if new_sphere:
+ # Temporary solution to memory leak.
+ for g in self.pf.h.grids:
+ g.clear_data()
+ sphere.clear_data()
+ del sphere
+ sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
+
+ if self._need_bulk_velocity:
+ # Set bulk velocity to zero out radial velocity profiles.
+ if self.velocity_center[0] == 'bulk':
+ if self.velocity_center[1] == 'halo':
+ sphere.set_field_parameter('bulk_velocity', halo['velocity'])
+ elif self.velocity_center[1] == 'sphere':
+ mylog.info('Calculating sphere bulk velocity.')
+ sphere.set_field_parameter('bulk_velocity',
+ sphere.quantities['BulkVelocity'](lazy_reader=True,
+ preload=False))
+ else:
+ mylog.error("Invalid parameter: velocity_center.")
+ return None
+ elif self.velocity_center[0] == 'max':
+ mylog.info('Setting bulk velocity with value at max %s.' % self.velocity_center[1])
+ max_val, maxi, mx, my, mz, mg = sphere.quantities['MaxLocation'](self.velocity_center[1],
+ lazy_reader=True)
+ max_grid = self.pf.h.grids[mg]
+ max_cell = na.unravel_index(maxi, max_grid.ActiveDimensions)
+ sphere.set_field_parameter('bulk_velocity', [max_grid['x-velocity'][max_cell],
+ max_grid['y-velocity'][max_cell],
+ max_grid['z-velocity'][max_cell]])
+ mylog.info('Bulk velocity set.')
+
+ return sphere
+
@parallel_blocking_call
def make_projections(self, axes=[0, 1, 2], halo_list='filtered',
- save_images=False, save_cube=True):
+ save_images=False, save_cube=True, njobs=-1):
r"""Make projections of all halos using specified fields.
After adding fields using `add_projection`, this starts the actual
@@ -608,6 +687,10 @@
save_cube : bool
Whether or not to save the HDF5 files of the halo projections.
Default=True.
+ njobs : int
+ The number of jobs over which to split the projections. Set
+ to -1 so that each halo is done by a single processor.
+ Default: -1.
Examples
--------
@@ -656,7 +739,7 @@
self.pf.parameters['DomainRightEdge'][w])
for w in range(self.pf.parameters['TopGridRank'])]
- for halo in parallel_objects(halo_projection_list, -1):
+ for halo in parallel_objects(halo_projection_list, njobs=njobs):
if halo is None:
continue
# Check if region will overlap domain edge.
@@ -745,7 +828,7 @@
@parallel_blocking_call
def analyze_halo_spheres(self, analysis_function, halo_list='filtered',
- analysis_output_dir=None):
+ analysis_output_dir=None, njobs=-1):
r"""Perform custom analysis on all halos.
This will loop through all halo on the HaloProfiler's list,
@@ -768,6 +851,10 @@
analysis_output_dir : string, optional
If specified, this directory will be created within the dataset to
contain any output from the analysis function. Default: None.
+ njobs : int
+ The number of jobs over which to split the analysis. Set
+ to -1 so that each halo is done by a single processor.
+ Default: -1.
Examples
--------
@@ -803,11 +890,11 @@
my_output_dir = "%s/%s" % (self.pf.fullpath, analysis_output_dir)
self.__check_directory(my_output_dir)
- for halo in parallel_objects(halo_analysis_list, -1):
+ for halo in parallel_objects(halo_analysis_list, njobs=njobs):
if halo is None: continue
# Get a sphere object to analze.
- sphere = get_halo_sphere(halo, self.pf, recenter=self.recenter)
+ sphere = self._get_halo_sphere(halo)
if sphere is None: continue
# Call the given analysis function.
@@ -1042,94 +1129,6 @@
else:
os.mkdir(my_output_dir)
-def get_halo_sphere(halo, pf, recenter=None):
- r"""Returns a sphere object for a given halo.
-
- With a dictionary containing halo properties, such as center
- and r_max, this creates a sphere object and optionally
- recenters and recreates the sphere using a recentering function.
- This is to be used primarily to make spheres for a set of halos
- loaded by the HaloProfiler.
-
- Parameters
- ----------
- halo : dict, required
- The dictionary containing halo properties used to make the sphere.
- Required entries:
- center : list with center coordinates.
- r_max : sphere radius in Mpc.
- pf : parameter file object, required
- The parameter file from which the sphere will be made.
- recenter : {None, string or function}
- The exact location of the sphere center can significantly affect
- radial profiles. The halo center loaded by the HaloProfiler will
- typically be the dark matter center of mass calculated by a halo
- finder. However, this may not be the best location for centering
- profiles of baryon quantities. For example, one may want to center
- on the maximum density.
- If recenter is given as a string, one of the existing recentering
- functions will be used:
- Min_Dark_Matter_Density : location of minimum dark matter density
- Max_Dark_Matter_Density : location of maximum dark matter density
- CoM_Dark_Matter_Density : dark matter center of mass
- Min_Gas_Density : location of minimum gas density
- Max_Gas_Density : location of maximum gas density
- CoM_Gas_Density : gas center of mass
- Min_Total_Density : location of minimum total density
- Max_Total_Density : location of maximum total density
- CoM_Total_Density : total center of mass
- Min_Temperature : location of minimum temperature
- Max_Temperature : location of maximum temperature
- Alternately, a function can be supplied for custom recentering.
- The function should take only one argument, a sphere object.
- Example function:
- def my_center_of_mass(data):
- my_x, my_y, my_z = data.quantities['CenterOfMass']()
- return (my_x, my_y, my_z)
-
- Examples: this should primarily be used with the halo list of the HaloProfiler.
- This is an example with an abstract halo asssuming a pre-defined pf.
- >>> halo = {'center': [0.5, 0.5, 0.5], 'r_max': 1.0}
- >>> my_sphere = get_halo_sphere(halo, pf, recenter='Max_Gas_Density')
- >>> # Assuming the above example function has been defined.
- >>> my_sphere = get_halo_sphere(halo, pf, recenter=my_center_of_mass)
- """
-
- sphere = pf.h.sphere(halo['center'], halo['r_max']/pf.units['mpc'])
- if len(sphere._grids) == 0: return None
- new_sphere = False
-
- if recenter:
- old = halo['center']
- if recenter in centering_registry:
- new_x, new_y, new_z = \
- centering_registry[recenter](sphere)
- else:
- # user supplied function
- new_x, new_y, new_z = recenter(sphere)
- if new_x < pf.domain_left_edge[0] or \
- new_y < pf.domain_left_edge[1] or \
- new_z < pf.domain_left_edge[2]:
- mylog.info("Recentering rejected, skipping halo %d" % \
- halo['id'])
- return None
- halo['center'] = [new_x, new_y, new_z]
- d = pf['kpc'] * periodic_dist(old, halo['center'],
- pf.domain_right_edge - pf.domain_left_edge)
- mylog.info("Recentered halo %d %1.3e kpc away." % (halo['id'], d))
- # Expand the halo to account for recentering.
- halo['r_max'] += d / 1000 # d is in kpc -> want mpc
- new_sphere = True
-
- if new_sphere:
- # Temporary solution to memory leak.
- for g in pf.h.grids:
- g.clear_data()
- sphere.clear_data()
- del sphere
- sphere = pf.h.sphere(halo['center'], halo['r_max']/pf.units['mpc'])
- return sphere
-
def _shift_projections(pf, projections, oldCenter, newCenter, axis):
"""
Shift projection data around.
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -190,7 +190,7 @@
elif (child._isValid()):
these_children.append(child)
else:
- print "Eliminating invalid, childless clump with %d cells." % len(child.data["CellMassMsun"])
+ print "Eliminating invalid, childless clump with %d cells." % len(child.data["Ones"])
if (len(these_children) > 1):
print "%d of %d children survived." % (len(these_children),len(clump.children))
clump.children = these_children
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -240,6 +240,8 @@
pass
elif isinstance(center, (types.ListType, types.TupleType, na.ndarray)):
center = na.array(center)
+ elif center in ("c", "center"):
+ center = self.pf.domain_center
elif center == ("max"): # is this dangerous for race conditions?
center = self.pf.h.find_max("Density")[1]
elif center.startswith("max_"):
@@ -493,7 +495,7 @@
self._sorted = {}
def get_data(self, fields=None, in_grids=False):
- if self._grids == None:
+ if self._grids is None:
self._get_list_of_grids()
points = []
if not fields:
@@ -1152,6 +1154,9 @@
def _mrep(self):
return MinimalSliceData(self)
+ def hub_upload(self):
+ self._mrep.upload()
+
class AMRCuttingPlaneBase(AMR2DData):
_plane = None
_top_node = "/CuttingPlanes"
@@ -1673,6 +1678,9 @@
def _mrep(self):
return MinimalProjectionData(self)
+ def hub_upload(self):
+ self._mrep.upload()
+
def _convert_field_name(self, field):
if field == "weight_field": return "weight_field_%s" % self._weight
if field in self._key_fields: return field
@@ -2535,7 +2543,18 @@
def cut_region(self, field_cuts):
"""
Return an InLineExtractedRegion, where the grid cells are cut on the
- fly with a set of field_cuts.
+ fly with a set of field_cuts. It is very useful for applying
+ conditions to the fields in your data object.
+
+ Examples
+ --------
+ To find the total mass of gas above 10^6 K in your volume:
+
+ >>> pf = load("RedshiftOutput0005")
+ >>> ad = pf.h.all_data()
+ >>> cr = ad.cut_region(["grid['Temperature'] > 1e6"])
+ >>> print cr.quantities["TotalQuantity"]("CellMassMsun")
+
"""
return InLineExtractedRegionBase(self, field_cuts)
@@ -3284,6 +3303,40 @@
pointI = na.where(k == True)
return pointI
+class AMRMaxLevelCollection(AMR3DData):
+ _type_name = "grid_collection_max_level"
+ _con_args = ("center", "max_level")
+ def __init__(self, center, max_level, fields = None,
+ pf = None, **kwargs):
+ """
+ By selecting an arbitrary *max_level*, we can act on those grids.
+ Child cells are masked when the level of the grid is below the max
+ level.
+ """
+ AMR3DData.__init__(self, center, fields, pf, **kwargs)
+ self.max_level = max_level
+ self._refresh_data()
+
+ def _get_list_of_grids(self):
+ if self._grids is not None: return
+ gi = (self.pf.h.grid_levels <= self.max_level)[:,0]
+ self._grids = self.pf.h.grids[gi]
+
+ def _is_fully_enclosed(self, grid):
+ return True
+
+ @cache_mask
+ def _get_cut_mask(self, grid):
+ return na.ones(grid.ActiveDimensions, dtype='bool')
+
+ def _get_point_indices(self, grid, use_child_mask=True):
+ k = na.ones(grid.ActiveDimensions, dtype='bool')
+ if use_child_mask and grid.Level < self.max_level:
+ k[grid.child_indices] = False
+ pointI = na.where(k == True)
+ return pointI
+
+
class AMRSphereBase(AMR3DData):
"""
A sphere of points
@@ -3359,9 +3412,18 @@
The resolution level data is uniformly gridded at
left_edge : array_like
The left edge of the region to be extracted
- right_edge : array_like
+ dims : array_like
+ Number of cells along each axis of resulting covering_grid
+ right_edge : array_like, optional
The right edge of the region to be extracted
-
+ fields : array_like, optional
+ A list of fields that you'd like pre-generated for your object
+
+ Example
+ -------
+ cube = pf.h.covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
+ right_edge=[1.0, 1.0, 1.0],
+ dims=[128, 128, 128])
"""
AMR3DData.__init__(self, center=kwargs.pop("center", None),
fields=fields, pf=pf, **kwargs)
@@ -3497,7 +3559,8 @@
@wraps(AMRCoveringGridBase.__init__)
def __init__(self, *args, **kwargs):
"""A 3D region with all data extracted and interpolated to a
- single, specified resolution.
+ single, specified resolution. (Identical to covering_grid,
+ except that it interpolates.)
Smoothed covering grids start at level 0, interpolating to
fill the region to level 1, replacing any cells actually
@@ -3510,9 +3573,18 @@
The resolution level data is uniformly gridded at
left_edge : array_like
The left edge of the region to be extracted
- right_edge : array_like
+ dims : array_like
+ Number of cells along each axis of resulting covering_grid
+ right_edge : array_like, optional
The right edge of the region to be extracted
-
+ fields : array_like, optional
+ A list of fields that you'd like pre-generated for your object
+
+ Example
+ -------
+ cube = pf.h.smoothed_covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
+ right_edge=[1.0, 1.0, 1.0],
+ dims=[128, 128, 128])
"""
self._base_dx = (
(self.pf.domain_right_edge - self.pf.domain_left_edge) /
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -128,6 +128,9 @@
def _mrep(self):
return MinimalStaticOutput(self)
+ def hub_upload(self):
+ self._mrep.upload()
+
@classmethod
def _is_valid(cls, *args, **kwargs):
return False
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -467,7 +467,7 @@
units=r"\rm{s}^{-1}")
def _Contours(field, data):
- return na.ones(data["Density"].shape)*-1
+ return -na.ones_like(data["Ones"])
add_field("Contours", validators=[ValidateSpatial(0)], take_log=False,
display_field=False, function=_Contours)
add_field("tempContours", function=_Contours,
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -54,9 +54,8 @@
#Add the fields, then later we'll individually defined units and names
for f in known_art_fields:
- if f not in ARTFieldInfo:
- add_field(f, function=lambda a,b: None, take_log=True,
- validators = [ValidateDataField(f)])
+ add_art_field(f, function=NullFunc, take_log=True,
+ validators = [ValidateDataField(f)])
#Hydro Fields that are verified to be OK unit-wise:
#Density
@@ -91,81 +90,81 @@
def _convertDensity(data):
return data.convert("Density")
-ARTFieldInfo["Density"]._units = r"\rm{g}/\rm{cm}^3"
-ARTFieldInfo["Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
-ARTFieldInfo["Density"]._convert_function=_convertDensity
+KnownARTFields["Density"]._units = r"\rm{g}/\rm{cm}^3"
+KnownARTFields["Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
+KnownARTFields["Density"]._convert_function=_convertDensity
def _convertTotalEnergy(data):
return data.convert("GasEnergy")
-ARTFieldInfo["TotalEnergy"]._units = r"\rm{g}/\rm{cm}^3"
-ARTFieldInfo["TotalEnergy"]._projected_units = r"\rm{K}"
-ARTFieldInfo["TotalEnergy"]._convert_function=_convertTotalEnergy
+KnownARTFields["TotalEnergy"]._units = r"\rm{g}/\rm{cm}^3"
+KnownARTFields["TotalEnergy"]._projected_units = r"\rm{K}"
+KnownARTFields["TotalEnergy"]._convert_function=_convertTotalEnergy
def _convertXMomentumDensity(data):
tr = data.convert("Mass")*data.convert("Velocity")
tr *= (data.convert("Density")/data.convert("Mass"))
return tr
-ARTFieldInfo["XMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
-ARTFieldInfo["XMomentumDensity"]._projected_units = r"\rm{K}"
-ARTFieldInfo["XMomentumDensity"]._convert_function=_convertXMomentumDensity
+KnownARTFields["XMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
+KnownARTFields["XMomentumDensity"]._projected_units = r"\rm{K}"
+KnownARTFields["XMomentumDensity"]._convert_function=_convertXMomentumDensity
def _convertYMomentumDensity(data):
tr = data.convert("Mass")*data.convert("Velocity")
tr *= (data.convert("Density")/data.convert("Mass"))
return tr
-ARTFieldInfo["YMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
-ARTFieldInfo["YMomentumDensity"]._projected_units = r"\rm{K}"
-ARTFieldInfo["YMomentumDensity"]._convert_function=_convertYMomentumDensity
+KnownARTFields["YMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
+KnownARTFields["YMomentumDensity"]._projected_units = r"\rm{K}"
+KnownARTFields["YMomentumDensity"]._convert_function=_convertYMomentumDensity
def _convertZMomentumDensity(data):
tr = data.convert("Mass")*data.convert("Velocity")
tr *= (data.convert("Density")/data.convert("Mass"))
return tr
-ARTFieldInfo["ZMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
-ARTFieldInfo["ZMomentumDensity"]._projected_units = r"\rm{K}"
-ARTFieldInfo["ZMomentumDensity"]._convert_function=_convertZMomentumDensity
+KnownARTFields["ZMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
+KnownARTFields["ZMomentumDensity"]._projected_units = r"\rm{K}"
+KnownARTFields["ZMomentumDensity"]._convert_function=_convertZMomentumDensity
def _convertPressure(data):
return data.convert("Pressure")
-ARTFieldInfo["Pressure"]._units = r"\rm{g}/\rm{cm}/\rm{s}^2"
-ARTFieldInfo["Pressure"]._projected_units = r"\rm{g}/\rm{s}^2"
-ARTFieldInfo["Pressure"]._convert_function=_convertPressure
+KnownARTFields["Pressure"]._units = r"\rm{g}/\rm{cm}/\rm{s}^2"
+KnownARTFields["Pressure"]._projected_units = r"\rm{g}/\rm{s}^2"
+KnownARTFields["Pressure"]._convert_function=_convertPressure
def _convertGamma(data):
return 1.0
-ARTFieldInfo["Gamma"]._units = r""
-ARTFieldInfo["Gamma"]._projected_units = r""
-ARTFieldInfo["Gamma"]._convert_function=_convertGamma
+KnownARTFields["Gamma"]._units = r""
+KnownARTFields["Gamma"]._projected_units = r""
+KnownARTFields["Gamma"]._convert_function=_convertGamma
def _convertGasEnergy(data):
return data.convert("GasEnergy")
-ARTFieldInfo["GasEnergy"]._units = r"\rm{ergs}/\rm{g}"
-ARTFieldInfo["GasEnergy"]._projected_units = r""
-ARTFieldInfo["GasEnergy"]._convert_function=_convertGasEnergy
+KnownARTFields["GasEnergy"]._units = r"\rm{ergs}/\rm{g}"
+KnownARTFields["GasEnergy"]._projected_units = r""
+KnownARTFields["GasEnergy"]._convert_function=_convertGasEnergy
def _convertMetalDensitySNII(data):
return data.convert("Density")
-ARTFieldInfo["MetalDensitySNII"]._units = r"\rm{g}/\rm{cm}^3"
-ARTFieldInfo["MetalDensitySNII"]._projected_units = r"\rm{g}/\rm{cm}^2"
-ARTFieldInfo["MetalDensitySNII"]._convert_function=_convertMetalDensitySNII
+KnownARTFields["MetalDensitySNII"]._units = r"\rm{g}/\rm{cm}^3"
+KnownARTFields["MetalDensitySNII"]._projected_units = r"\rm{g}/\rm{cm}^2"
+KnownARTFields["MetalDensitySNII"]._convert_function=_convertMetalDensitySNII
def _convertMetalDensitySNIa(data):
return data.convert("Density")
-ARTFieldInfo["MetalDensitySNIa"]._units = r"\rm{g}/\rm{cm}^3"
-ARTFieldInfo["MetalDensitySNIa"]._projected_units = r"\rm{g}/\rm{cm}^2"
-ARTFieldInfo["MetalDensitySNIa"]._convert_function=_convertMetalDensitySNIa
+KnownARTFields["MetalDensitySNIa"]._units = r"\rm{g}/\rm{cm}^3"
+KnownARTFields["MetalDensitySNIa"]._projected_units = r"\rm{g}/\rm{cm}^2"
+KnownARTFields["MetalDensitySNIa"]._convert_function=_convertMetalDensitySNIa
def _convertPotentialNew(data):
return data.convert("Potential")
-ARTFieldInfo["PotentialNew"]._units = r"\rm{g}/\rm{cm}^3"
-ARTFieldInfo["PotentialNew"]._projected_units = r"\rm{g}/\rm{cm}^2"
-ARTFieldInfo["PotentialNew"]._convert_function=_convertPotentialNew
+KnownARTFields["PotentialNew"]._units = r"\rm{g}/\rm{cm}^3"
+KnownARTFields["PotentialNew"]._projected_units = r"\rm{g}/\rm{cm}^2"
+KnownARTFields["PotentialNew"]._convert_function=_convertPotentialNew
def _convertPotentialOld(data):
return data.convert("Potential")
-ARTFieldInfo["PotentialOld"]._units = r"\rm{g}/\rm{cm}^3"
-ARTFieldInfo["PotentialOld"]._projected_units = r"\rm{g}/\rm{cm}^2"
-ARTFieldInfo["PotentialOld"]._convert_function=_convertPotentialOld
+KnownARTFields["PotentialOld"]._units = r"\rm{g}/\rm{cm}^3"
+KnownARTFields["PotentialOld"]._projected_units = r"\rm{g}/\rm{cm}^2"
+KnownARTFields["PotentialOld"]._convert_function=_convertPotentialOld
####### Derived fields
@@ -181,24 +180,24 @@
x /= data.pf.conversion_factors["GasEnergy"]
x *= data.pf.conversion_factors["Temperature"]
return x
-add_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
-ARTFieldInfo["Temperature"]._units = r"\mathrm{K}"
-ARTFieldInfo["Temperature"]._projected_units = r"\mathrm{K}"
-ARTFieldInfo["Temperature"]._convert_function=_converttemperature
+add_art_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
+KnownARTFields["Temperature"]._units = r"\mathrm{K}"
+KnownARTFields["Temperature"]._projected_units = r"\mathrm{K}"
+KnownARTFields["Temperature"]._convert_function=_converttemperature
def _metallicity_snII(field, data):
tr = data["MetalDensitySNII"] / data["Density"]
return tr
-add_field("Metallicity_SNII", function=_metallicity_snII, units = r"\mathrm{K}",take_log=True)
-ARTFieldInfo["Metallicity_SNII"]._units = r""
-ARTFieldInfo["Metallicity_SNII"]._projected_units = r""
+add_art_field("Metallicity_SNII", function=_metallicity_snII, units = r"\mathrm{K}",take_log=True)
+KnownARTFields["Metallicity_SNII"]._units = r""
+KnownARTFields["Metallicity_SNII"]._projected_units = r""
def _metallicity_snIa(field, data):
tr = data["MetalDensitySNIa"] / data["Density"]
return tr
-add_field("Metallicity_SNIa", function=_metallicity_snIa, units = r"\mathrm{K}",take_log=True)
-ARTFieldInfo["Metallicity_SNIa"]._units = r""
-ARTFieldInfo["Metallicity_SNIa"]._projected_units = r""
+add_art_field("Metallicity_SNIa", function=_metallicity_snIa, units = r"\mathrm{K}",take_log=True)
+KnownARTFields["Metallicity_SNIa"]._units = r""
+KnownARTFields["Metallicity_SNIa"]._projected_units = r""
def _x_velocity(data):
tr = data["XMomentumDensity"]/data["Density"]
@@ -226,9 +225,9 @@
tr = data["MetalDensitySNIa"]
tr += data["MetalDensitySNII"]
return tr
-add_field("Metal_Density", function=_metal_density, units = r"\mathrm{K}",take_log=True)
-ARTFieldInfo["Metal_Density"]._units = r""
-ARTFieldInfo["Metal_Density"]._projected_units = r""
+add_art_field("Metal_Density", function=_metal_density, units = r"\mathrm{K}",take_log=True)
+KnownARTFields["Metal_Density"]._units = r""
+KnownARTFields["Metal_Density"]._projected_units = r""
#Particle fields
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/frontends/enzo/api.py
--- a/yt/frontends/enzo/api.py
+++ b/yt/frontends/enzo/api.py
@@ -38,6 +38,9 @@
EnzoStaticOutput, \
EnzoStaticOutputInMemory
+from .simulation_handling import \
+ EnzoSimulation
+
from .fields import \
EnzoFieldInfo, \
Enzo2DFieldInfo, \
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/frontends/enzo/simulation_handling.py
--- /dev/null
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -0,0 +1,692 @@
+"""
+EnzoSimulation class and member functions.
+
+Author: Britton Smith <brittonsmith at gmail.com>
+Affiliation: Michigan State University
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2008-2012 Britton Smith. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.funcs import *
+
+import numpy as na
+import glob
+import os
+
+from yt.data_objects.time_series import \
+ TimeSeriesData
+from yt.utilities.cosmology import \
+ Cosmology, \
+ EnzoCosmology
+from yt.utilities.exceptions import \
+ YTException
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+ parallel_root_only
+
+from yt.convenience import \
+ load
+
+class EnzoSimulation(TimeSeriesData):
+ r"""Super class for performing the same operation over all data outputs in
+ a simulation from one redshift to another.
+ """
+ def __init__(self, parameter_filename):
+ r"""Initialize an Enzo Simulation object.
+
+ Upon creation, the parameter file is parsed and the time and redshift
+ are calculated and stored in all_outputs. A time units dictionary is
+ instantiated to allow for time outputs to be requested with physical
+ time units. The get_time_series can be used to generate a
+ TimeSeriesData object.
+
+ parameter_filename : str
+ The simulation parameter file.
+
+ Examples
+ --------
+ >>> from yt.mods import *
+ >>> es = ES.EnzoSimulation("my_simulation.par")
+ >>> print es.all_outputs
+
+ """
+ self.parameter_filename = parameter_filename
+ self.parameters = {}
+
+ # Set some parameter defaults.
+ self._set_parameter_defaults()
+ # Read the simulation parameter file.
+ self._parse_parameter_file()
+ # Set up time units dictionary.
+ self._set_time_units()
+
+ # Figure out the starting and stopping times and redshift.
+ self._calculate_simulation_bounds()
+ self.print_key_parameters()
+
+ # Get all possible datasets.
+ self._get_all_outputs()
+
+ def get_time_series(self, time_data=True, redshift_data=True,
+ initial_time=None, final_time=None, time_units='1',
+ initial_redshift=None, final_redshift=None,
+ initial_cycle=None, final_cycle=None,
+ times=None, redshifts=None, tolerance=None,
+ find_outputs=False, parallel=True):
+
+ """
+ Instantiate a TimeSeriesData object for a set of outputs.
+
+ If no additional keywords given, a TimeSeriesData object will be
+ created with all potential datasets created by the simulation.
+
+ Outputs can be gather by specifying a time or redshift range
+ (or combination of time and redshift), with a specific list of
+ times or redshifts, a range of cycle numbers (for cycle based
+ output), or by simply searching all subdirectories within the
+ simulation directory.
+
+ time_data : bool
+ Whether or not to include time outputs when gathering
+ datasets for time series.
+ Default: True.
+ redshift_data : bool
+ Whether or not to include redshift outputs when gathering
+ datasets for time series.
+ Default: True.
+ initial_time : float
+ The earliest time for outputs to be included. If None,
+ the initial time of the simulation is used. This can be
+ used in combination with either final_time or
+ final_redshift.
+ Default: None.
+ final_time : float
+ The latest time for outputs to be included. If None,
+ the final time of the simulation is used. This can be
+ used in combination with either initial_time or
+ initial_redshift.
+ Default: None.
+ times : array_like
+ A list of times for which outputs will be found.
+ Default: None.
+ time_units : str
+ The time units used for requesting outputs by time.
+ Default: '1' (code units).
+ initial_redshift : float
+ The earliest redshift for outputs to be included. If None,
+ the initial redshift of the simulation is used. This can be
+ used in combination with either final_time or
+ final_redshift.
+ Default: None.
+ final_time : float
+ The latest redshift for outputs to be included. If None,
+ the final redshift of the simulation is used. This can be
+ used in combination with either initial_time or
+ initial_redshift.
+ Default: None.
+ redshifts : array_like
+ A list of redshifts for which outputs will be found.
+ Default: None.
+ initial_cycle : float
+ The earliest cycle for outputs to be included. If None,
+ the initial cycle of the simulation is used. This can
+ only be used with final_cycle.
+ Default: None.
+ final_cycle : float
+ The latest cycle for outputs to be included. If None,
+ the final cycle of the simulation is used. This can
+ only be used in combination with initial_cycle.
+ Default: None.
+ tolerance : float
+ Used in combination with "times" or "redshifts" keywords,
+ this is the tolerance within which outputs are accepted
+ given the requested times or redshifts. If None, the
+ nearest output is always taken.
+ Default: None.
+ find_outputs : bool
+ If True, subdirectories within the GlobalDir directory are
+ searched one by one for datasets. Time and redshift
+ information are gathered by temporarily instantiating each
+ dataset. This can be used when simulation data was created
+ in a non-standard way, making it difficult to guess the
+ corresponding time and redshift information.
+ Default: False.
+ parallel : bool/int
+ If True, the generated TimeSeriesData will divide the work
+ such that a single processor works on each dataset. If an
+ integer is supplied, the work will be divided into that
+ number of jobs.
+ Default: True.
+
+ Examples
+ --------
+ >>> es.get_time_series(initial_redshift=10, final_time=13.7,
+ time_units='Gyr', redshift_data=False)
+
+ >>> es.get_time_series(redshifts=[3, 2, 1, 0])
+
+ >>> es.get_time_series(final_cycle=100000)
+
+ >>> es.get_time_series(find_outputs=True)
+
+ >>> # after calling get_time_series
+ >>> for pf in es.piter():
+ >>> pc = PlotCollection(pf, 'c')
+ >>> pc.add_projection('Density', 0)
+ >>> pc.save()
+
+ """
+
+ if (initial_redshift is not None or \
+ final_redshift is not None) and \
+ not self.cosmological_simulation:
+ mylog.error('An initial or final redshift has been given for a noncosmological simulation.')
+ return
+
+ if find_outputs:
+ my_outputs = self._find_outputs()
+
+ else:
+ if time_data and redshift_data:
+ my_all_outputs = self.all_outputs
+ elif time_data:
+ my_all_outputs = self.all_time_outputs
+ elif redshift_data:
+ my_all_outputs = self.all_redshift_outputs
+ else:
+ mylog.error('Both time_data and redshift_data are False.')
+ return
+
+ if times is not None:
+ my_outputs = self._get_outputs_by_time(times, tolerance=tolerance,
+ outputs=my_all_outputs,
+ time_units=time_units)
+
+ elif redshifts is not None:
+ my_outputs = self._get_outputs_by_redshift(redshifts, tolerance=tolerance,
+ outputs=my_all_outputs)
+
+ elif initial_cycle is not None or final_cycle is not None:
+ if initial_cycle is None:
+ initial_cycle = 0
+ else:
+ initial_cycle = max(initial_cycle, 0)
+ if final_cycle is None:
+ final_cycle = self.parameters['StopCycle']
+ else:
+ final_cycle = min(final_cycle, self.parameters['StopCycle'])
+ my_outputs = my_all_outputs[int(ceil(float(initial_cycle) /
+ self.parameters['CycleSkipDataDump'])):
+ (final_cycle / self.parameters['CycleSkipDataDump'])+1]
+
+ else:
+ if initial_time is not None:
+ my_initial_time = initial_time / self.time_units[time_units]
+ elif initial_redshift is not None:
+ my_initial_time = self.enzo_cosmology.ComputeTimeFromRedshift(initial_redshift) / \
+ self.enzo_cosmology.TimeUnits
+ else:
+ my_initial_time = self.initial_time
+
+ if final_time is not None:
+ my_final_time = final_time / self.time_units[time_units]
+ elif final_redshift is not None:
+ my_final_time = self.enzo_cosmology.ComputeTimeFromRedshift(final_redshift) / \
+ self.enzo_cosmology.TimeUnits
+ else:
+ my_final_time = self.final_time
+
+ my_times = na.array(map(lambda a:a['time'], my_all_outputs))
+ my_indices = na.digitize([my_initial_time, my_final_time], my_times)
+ if my_initial_time == my_times[my_indices[0] - 1]: my_indices[0] -= 1
+ my_outputs = my_all_outputs[my_indices[0]:my_indices[1]]
+
+ TimeSeriesData.__init__(self, outputs=[output['filename'] for output in my_outputs],
+ parallel=parallel)
+ mylog.info("%d outputs loaded into time series." % len(my_outputs))
+
+ @parallel_root_only
+ def print_key_parameters(self):
+ """
+ Print out some key parameters for the simulation.
+ """
+ for a in ["domain_dimensions", "domain_left_edge",
+ "domain_right_edge", "initial_time", "final_time",
+ "stop_cycle", "cosmological_simulation"]:
+ if not hasattr(self, a):
+ mylog.error("Missing %s in parameter file definition!", a)
+ continue
+ v = getattr(self, a)
+ mylog.info("Parameters: %-25s = %s", a, v)
+ if hasattr(self, "cosmological_simulation") and \
+ getattr(self, "cosmological_simulation"):
+ for a in ["omega_lambda", "omega_matter",
+ "hubble_constant", "initial_redshift",
+ "final_redshift"]:
+ if not hasattr(self, a):
+ mylog.error("Missing %s in parameter file definition!", a)
+ continue
+ v = getattr(self, a)
+ mylog.info("Parameters: %-25s = %s", a, v)
+
+ def _parse_parameter_file(self):
+ """
+ Parses the parameter file and establishes the various
+ dictionaries.
+ """
+
+ self.conversion_factors = {}
+ redshift_outputs = []
+
+ # Let's read the file
+ lines = open(self.parameter_filename).readlines()
+ for line in (l.strip() for l in lines):
+ if '#' in line: line = line[0:line.find('#')]
+ if '//' in line: line = line[0:line.find('//')]
+ if len(line) < 2: continue
+ param, vals = (i.strip() for i in line.split("="))
+ # First we try to decipher what type of value it is.
+ vals = vals.split()
+ # Special case approaching.
+ if "(do" in vals: vals = vals[:1]
+ if len(vals) == 0:
+ pcast = str # Assume NULL output
+ else:
+ v = vals[0]
+ # Figure out if it's castable to floating point:
+ try:
+ float(v)
+ except ValueError:
+ pcast = str
+ else:
+ if any("." in v or "e+" in v or "e-" in v for v in vals):
+ pcast = float
+ elif v == "inf":
+ pcast = str
+ else:
+ pcast = int
+ # Now we figure out what to do with it.
+ if param.endswith("Units") and not param.startswith("Temperature"):
+ dataType = param[:-5]
+ # This one better be a float.
+ self.conversion_factors[dataType] = float(vals[0])
+ if param.startswith("CosmologyOutputRedshift["):
+ index = param[param.find("[")+1:param.find("]")]
+ redshift_outputs.append({'index':int(index), 'redshift':float(vals[0])})
+ elif len(vals) == 0:
+ vals = ""
+ elif len(vals) == 1:
+ vals = pcast(vals[0])
+ else:
+ vals = na.array([pcast(i) for i in vals if i != "-99999"])
+ self.parameters[param] = vals
+ self.refine_by = self.parameters["RefineBy"]
+ self.dimensionality = self.parameters["TopGridRank"]
+ if self.dimensionality > 1:
+ self.domain_dimensions = self.parameters["TopGridDimensions"]
+ if len(self.domain_dimensions) < 3:
+ tmp = self.domain_dimensions.tolist()
+ tmp.append(1)
+ self.domain_dimensions = na.array(tmp)
+ self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+ "float64").copy()
+ self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+ "float64").copy()
+ else:
+ self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+ "float64")
+ self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+ "float64")
+ self.domain_dimensions = na.array([self.parameters["TopGridDimensions"],1,1])
+
+ if self.parameters["ComovingCoordinates"]:
+ cosmo_attr = {'omega_lambda': 'CosmologyOmegaLambdaNow',
+ 'omega_matter': 'CosmologyOmegaMatterNow',
+ 'hubble_constant': 'CosmologyHubbleConstantNow',
+ 'initial_redshift': 'CosmologyInitialRedshift',
+ 'final_redshift': 'CosmologyFinalRedshift'}
+ self.cosmological_simulation = 1
+ for a, v in cosmo_attr.items():
+ if not v in self.parameters:
+ raise MissingParameter(self.parameter_filename, v)
+ setattr(self, a, self.parameters[v])
+ else:
+ self.omega_lambda = self.omega_matter = \
+ self.hubble_constant = self.cosmological_simulation = 0.0
+
+ # make list of redshift outputs
+ self.all_redshift_outputs = []
+ if not self.cosmological_simulation: return
+ for output in redshift_outputs:
+ output['filename'] = os.path.join(self.parameters['GlobalDir'],
+ "%s%04d" % (self.parameters['RedshiftDumpDir'],
+ output['index']),
+ "%s%04d" % (self.parameters['RedshiftDumpName'],
+ output['index']))
+ del output['index']
+ self.all_redshift_outputs = redshift_outputs
+
+ def _calculate_redshift_dump_times(self):
+ "Calculates time from redshift of redshift outputs."
+
+ if not self.cosmological_simulation: return
+ for output in self.all_redshift_outputs:
+ output['time'] = self.enzo_cosmology.ComputeTimeFromRedshift(output['redshift']) / \
+ self.enzo_cosmology.TimeUnits
+
+ def _calculate_time_outputs(self):
+ "Calculate time outputs and their redshifts if cosmological."
+
+ if self.final_time is None or \
+ not 'dtDataDump' in self.parameters or \
+ self.parameters['dtDataDump'] <= 0.0: return []
+
+ self.all_time_outputs = []
+ index = 0
+ current_time = self.initial_time
+ while current_time <= self.final_time + self.parameters['dtDataDump']:
+ filename = os.path.join(self.parameters['GlobalDir'],
+ "%s%04d" % (self.parameters['DataDumpDir'], index),
+ "%s%04d" % (self.parameters['DataDumpName'], index))
+
+ output = {'index': index, 'filename': filename, 'time': current_time}
+ output['time'] = min(output['time'], self.final_time)
+ if self.cosmological_simulation:
+ output['redshift'] = self.enzo_cosmology.ComputeRedshiftFromTime(
+ current_time * self.enzo_cosmology.TimeUnits)
+
+ self.all_time_outputs.append(output)
+ if na.abs(self.final_time - current_time) / self.final_time < 1e-4: break
+ current_time += self.parameters['dtDataDump']
+ index += 1
+
+ def _calculate_cycle_outputs(self):
+ "Calculate cycle outputs."
+
+ mylog.warn('Calculating cycle outputs. Dataset times will be unavailable.')
+
+ if self.stop_cycle is None or \
+ not 'CycleSkipDataDump' in self.parameters or \
+ self.parameters['CycleSkipDataDump'] <= 0.0: return []
+
+ self.all_time_outputs = []
+ index = 0
+ for cycle in range(0, self.stop_cycle+1, self.parameters['CycleSkipDataDump']):
+ filename = os.path.join(self.parameters['GlobalDir'],
+ "%s%04d" % (self.parameters['DataDumpDir'], index),
+ "%s%04d" % (self.parameters['DataDumpName'], index))
+
+ output = {'index': index, 'filename': filename, 'cycle': cycle}
+ self.all_time_outputs.append(output)
+ index += 1
+
+ def _get_all_outputs(self):
+ "Get all potential datasets and combine into a time-sorted list."
+
+ if self.parameters['dtDataDump'] > 0 and \
+ self.parameters['CycleSkipDataDump'] > 0:
+ raise AmbiguousOutputs(self.parameter_filename)
+
+ # Get all time or cycle outputs.
+ if self.parameters['CycleSkipDataDump'] > 0:
+ self._calculate_cycle_outputs()
+ else:
+ self._calculate_time_outputs()
+
+ # Calculate times for redshift outputs.
+ self._calculate_redshift_dump_times()
+
+ self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
+ if self.parameters['CycleSkipDataDump'] <= 0:
+ self.all_outputs.sort(key=lambda obj:obj['time'])
+
+ mylog.info("Total datasets: %d." % len(self.all_outputs))
+
+ def _calculate_simulation_bounds(self):
+ """
+ Figure out the starting and stopping time and redshift for the simulation.
+ """
+
+ if 'StopCycle' in self.parameters:
+ self.stop_cycle = self.parameters['StopCycle']
+
+ # Convert initial/final redshifts to times.
+ if self.cosmological_simulation:
+ # Instantiate EnzoCosmology object for units and time conversions.
+ self.enzo_cosmology = EnzoCosmology(HubbleConstantNow=
+ (100.0 * self.parameters['CosmologyHubbleConstantNow']),
+ OmegaMatterNow=self.parameters['CosmologyOmegaMatterNow'],
+ OmegaLambdaNow=self.parameters['CosmologyOmegaLambdaNow'],
+ InitialRedshift=self.parameters['CosmologyInitialRedshift'])
+ self.initial_time = self.enzo_cosmology.ComputeTimeFromRedshift(self.initial_redshift) / \
+ self.enzo_cosmology.TimeUnits
+ self.final_time = self.enzo_cosmology.ComputeTimeFromRedshift(self.final_redshift) / \
+ self.enzo_cosmology.TimeUnits
+
+ # If not a cosmology simulation, figure out the stopping criteria.
+ else:
+ if 'InitialTime' in self.parameters:
+ self.initial_time = self.parameters['InitialTime']
+ else:
+ self.initial_time = 0.
+
+ if 'StopTime' in self.parameters:
+ self.final_time = self.parameters['StopTime']
+ else:
+ self.final_time = None
+ if not ('StopTime' in self.parameters or
+ 'StopCycle' in self.parameters):
+ raise NoStoppingCondition(self.parameter_filename)
+ if self.final_time is None:
+ mylog.warn('Simulation %s has no stop time set, stopping condition will be based only on cycles.' %
+ self.parameter_filename)
+
+ def _set_parameter_defaults(self):
+ "Set some default parameters to avoid problems if they are not in the parameter file."
+
+ self.parameters['GlobalDir'] = "."
+ self.parameters['DataDumpName'] = "data"
+ self.parameters['DataDumpDir'] = "DD"
+ self.parameters['RedshiftDumpName'] = "RedshiftOutput"
+ self.parameters['RedshiftDumpDir'] = "RD"
+ self.parameters['ComovingCoordinates'] = 0
+ self.parameters['TopGridRank'] = 3
+ self.parameters['DomainLeftEdge'] = na.zeros(self.parameters['TopGridRank'])
+ self.parameters['DomainRightEdge'] = na.ones(self.parameters['TopGridRank'])
+ self.parameters['Refineby'] = 2 # technically not the enzo default
+ self.parameters['StopCycle'] = 100000
+ self.parameters['dtDataDump'] = 0.
+ self.parameters['CycleSkipDataDump'] = 0.
+ self.parameters['TimeUnits'] = 1.
+
+ def _set_time_units(self):
+ """
+ Set up a dictionary of time units conversions.
+ """
+
+ self.time_units = {}
+ if self.cosmological_simulation:
+ self.parameters['TimeUnits'] = 2.52e17 / na.sqrt(self.omega_matter) \
+ / self.hubble_constant / (1 + self.initial_redshift)**1.5
+ self.time_units['1'] = 1.
+ self.time_units['seconds'] = self.parameters['TimeUnits']
+ self.time_units['years'] = self.time_units['seconds'] / (365*3600*24.0)
+ self.time_units['days'] = self.time_units['seconds'] / (3600*24.0)
+ self.time_units['Myr'] = self.time_units['years'] / 1.0e6
+ self.time_units['Gyr'] = self.time_units['years'] / 1.0e9
+
+ def _find_outputs(self):
+ """
+ Search for directories matching the data dump keywords.
+ If found, get dataset times py opening the pf.
+ """
+
+ # look for time outputs.
+ potential_outputs = glob.glob(os.path.join(self.parameters['GlobalDir'],
+ "%s*" % self.parameters['DataDumpDir'])) + \
+ glob.glob(os.path.join(self.parameters['GlobalDir'],
+ "%s*" % self.parameters['RedshiftDumpDir']))
+ time_outputs = []
+ mylog.info("Checking %d potential time outputs." %
+ len(potential_outputs))
+
+ for output in potential_outputs:
+ if self.parameters['DataDumpDir'] in output:
+ dir_key = self.parameters['DataDumpDir']
+ output_key = self.parameters['DataDumpName']
+ else:
+ dir_key = self.parameters['RedshiftDumpDir']
+ output_key = self.parameters['RedshiftDumpName']
+ index = output[output.find(dir_key) + len(dir_key):]
+ filename = os.path.join(self.parameters['GlobalDir'],
+ "%s%s" % (dir_key, index),
+ "%s%s" % (output_key, index))
+ if os.path.exists(filename):
+ pf = load(filename)
+ if pf is not None:
+ time_outputs.append({'filename': filename, 'time': pf.current_time})
+ if pf.cosmological_simulation:
+ time_outputs[-1]['redshift'] = pf.current_redshift
+ del pf
+ mylog.info("Located %d time outputs." % len(time_outputs))
+ time_outputs.sort(key=lambda obj: obj['time'])
+ return time_outputs
+
+ def _get_outputs_by_key(self, key, values, tolerance=None, outputs=None):
+ r"""Get datasets at or near to given values.
+
+ Parameters
+ ----------
+ key: str
+ The key by which to retrieve outputs, usually 'time' or
+ 'redshift'.
+ values: array_like
+ A list of values, given as floats.
+ tolerance : float
+ If not None, do not return a dataset unless the value is
+ within the tolerance value. If None, simply return the
+ nearest dataset.
+ Default: None.
+ outputs : list
+ The list of outputs from which to choose. If None,
+ self.all_outputs is used.
+ Default: None.
+
+ Examples
+ --------
+ >>> datasets = es.get_outputs_by_key('redshift', [0, 1, 2], tolerance=0.1)
+
+ """
+
+ values = ensure_list(values)
+ if outputs is None:
+ outputs = self.all_outputs
+ my_outputs = []
+ for value in values:
+ outputs.sort(key=lambda obj:na.fabs(value - obj[key]))
+ if (tolerance is None or na.abs(value - outputs[0][key]) <= tolerance) \
+ and outputs[0] not in my_outputs:
+ my_outputs.append(outputs[0])
+ else:
+ mylog.error("No dataset added for %s = %f." % (key, value))
+
+ outputs.sort(key=lambda obj: obj['time'])
+ return my_outputs
+
+ def _get_outputs_by_redshift(self, redshifts, tolerance=None, outputs=None):
+ r"""Get datasets at or near to given redshifts.
+
+ Parameters
+ ----------
+ redshifts: array_like
+ A list of redshifts, given as floats.
+ tolerance : float
+ If not None, do not return a dataset unless the value is
+ within the tolerance value. If None, simply return the
+ nearest dataset.
+ Default: None.
+ outputs : list
+ The list of outputs from which to choose. If None,
+ self.all_outputs is used.
+ Default: None.
+
+ Examples
+ --------
+ >>> datasets = es.get_outputs_by_redshift([0, 1, 2], tolerance=0.1)
+
+ """
+
+ return self._get_outputs_by_key('redshift', redshifts, tolerance=tolerance,
+ outputs=outputs)
+
+ def _get_outputs_by_time(self, times, tolerance=None, outputs=None,
+ time_units='1'):
+ r"""Get datasets at or near to given times.
+
+ Parameters
+ ----------
+ times: array_like
+ A list of times, given in code units as floats.
+ tolerance : float
+ If not None, do not return a dataset unless the time is
+ within the tolerance value. If None, simply return the
+ nearest dataset.
+ Default = None.
+ outputs : list
+ The list of outputs from which to choose. If None,
+ self.all_outputs is used.
+ Default: None.
+ time_units : str
+ The units of the list of times.
+ Default: '1' (code units).
+
+ Examples
+ --------
+ >>> datasets = es.get_outputs_by_time([600, 500, 400], tolerance=10.)
+
+ """
+
+ times = na.array(times) / self.time_units[time_units]
+ return self._get_outputs_by_key('time', times, tolerance=tolerance,
+ outputs=outputs)
+
+class MissingParameter(YTException):
+ def __init__(self, pf, parameter):
+ YTException.__init__(self, pf)
+ self.parameter = parameter
+
+ def __str__(self):
+ return "Parameter file %s is missing %s parameter." % \
+ (self.pf, self.parameter)
+
+class NoStoppingCondition(YTException):
+ def __init__(self, pf):
+ YTException.__init__(self, pf)
+
+ def __str__(self):
+ return "Simulation %s has no stopping condition. StopTime or StopCycle should be set." % \
+ self.pf
+
+class AmbiguousOutputs(YTException):
+ def __init__(self, pf):
+ YTException.__init__(self, pf)
+
+ def __str__(self):
+ return "Simulation %s has both dtDataDump and CycleSkipDataDump set. Unable to calculate datasets." % \
+ self.pf
+
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -94,22 +94,6 @@
except ImportError:
pass
-def __memory_fallback(pid):
- """
- Get process memory from a system call.
- """
- value = os.popen('ps -o rss= -p %d' % pid).read().strip().split('\n')
- if len(value) == 1: return float(value[0])
- value.pop(0)
- for line in value:
- online = line.split()
- if online[0] != pid: continue
- try:
- return float(online[2])
- except:
- return 0.0
- return 0.0
-
def get_memory_usage():
"""
Returning resident size in megabytes
@@ -118,10 +102,10 @@
try:
pagesize = resource.getpagesize()
except NameError:
- return __memory_fallback(pid) / 1024
+ return -1024
status_file = "/proc/%s/statm" % (pid)
if not os.path.isfile(status_file):
- return __memory_fallback(pid) / 1024
+ return -1024
line = open(status_file).read()
size, resident, share, text, library, data, dt = [int(i) for i in line.split()]
return resident * pagesize / (1024 * 1024) # return in megs
@@ -568,10 +552,11 @@
def parallel_profile(prefix):
import cProfile
from yt.config import ytcfg
- fn = "%s_%04i.cprof" % (prefix,
+ fn = "%s_%04i_%04i.cprof" % (prefix,
+ ytcfg.getint("yt", "__topcomm_parallel_size"),
ytcfg.getint("yt", "__topcomm_parallel_rank"))
p = cProfile.Profile()
p.enable()
- yield
+ yield fn
p.disable()
p.dump_stats(fn)
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -26,6 +26,7 @@
import json
import os
+import stat
import cStringIO
import logging
import uuid
@@ -276,7 +277,7 @@
for i in range(30):
# Check for stop
if self.stopped: return {'type':'shutdown'} # No race condition
- if self.payload_handler.event.wait(1): # One second timeout
+ if self.payload_handler.event.wait(0.01): # One second timeout
return self.payload_handler.deliver_payloads()
if self.debug: print "### Heartbeat ... finished: %s" % (time.ctime())
return []
@@ -459,6 +460,36 @@
return command
@lockit
+ def load(self, base_dir, filename):
+ pp = os.path.join(base_dir, filename)
+ funccall = "pfs.append(load('%s'))" % pp
+ self.execute(funccall)
+ return []
+
+ def file_listing(self, base_dir, sub_dir):
+ if base_dir == "":
+ cur_dir = os.getcwd()
+ elif sub_dir == "":
+ cur_dir = base_dir
+ else:
+ cur_dir = os.path.join(base_dir, sub_dir)
+ cur_dir = os.path.abspath(cur_dir)
+ if not os.path.isdir(cur_dir):
+ return {'change':False}
+ fns = os.listdir(cur_dir)
+ results = [("..", 0, "directory")]
+ for fn in sorted((os.path.join(cur_dir, f) for f in fns)):
+ if not os.access(fn, os.R_OK): continue
+ if os.path.isfile(fn):
+ size = os.path.getsize(fn)
+ t = "file"
+ else:
+ size = 0
+ t = "directory"
+ results.append((os.path.basename(fn), size, t))
+ return dict(objs = results, cur_dir=cur_dir)
+
+ @lockit
def create_phase(self, objname, field_x, field_y, field_z, weight):
if weight == "None": weight = None
else: weight = "'%s'" % (weight)
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/gui/reason/html/images/file_dialog_directory.png
Binary file yt/gui/reason/html/images/file_dialog_directory.png has changed
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/gui/reason/html/images/file_dialog_file.png
Binary file yt/gui/reason/html/images/file_dialog_file.png has changed
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/gui/reason/html/index.html
--- a/yt/gui/reason/html/index.html
+++ b/yt/gui/reason/html/index.html
@@ -78,7 +78,8 @@
<!-- FONTS --><!-- These will get pulled from Google, but Google might not be accessible.
In that case, it will default to whatever is in the family. -->
- <link rel="stylesheet" type="text/css" href="http://fonts.googleapis.com/css?family=Inconsolata">
+ <!--<link rel="stylesheet" type="text/css"
+ href="http://fonts.googleapis.com/css?family=Inconsolata">--><!-- LEAFLET STUFF --><script type="text/javascript" src="leaflet/leaflet.js"></script>
@@ -103,6 +104,9 @@
<script type="text/javascript" src="js/menu_items.js"></script><!-- THE PLOT WINDOW FUNCTIONS -->
+ <script type="text/javascript" src="js/file_open.js"></script>
+
+ <!-- THE PLOT WINDOW FUNCTIONS --><script type="text/javascript" src="js/widget_plotwindow.js"></script><!-- THE GRID VIEWER FUNCTIONS -->
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/gui/reason/html/js/file_open.js
--- /dev/null
+++ b/yt/gui/reason/html/js/file_open.js
@@ -0,0 +1,146 @@
+/**********************************************************************
+A file opener
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2012 Matthew Turk. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+***********************************************************************/
+
+function open_file() {
+ var filestore = new Ext.data.ArrayStore({
+ fields: ['filename',
+ {name:'size', type:'float'},
+ 'type'
+ ]
+ });
+ var cur_dir;
+ function fillStore(f, a){
+ if(a.status == false){
+ Ext.Msg.alert("Error", "Something has gone wrong.");
+ return;
+ }
+ if(a.result['change'] == false) {
+ win.get("current_file").setValue(cur_dir);
+ return;
+ }
+ filestore.removeAll();
+ var rec = [];
+ filestore.loadData(a.result['objs']);
+ cur_dir = a.result['cur_dir'];
+ win.get("current_file").setValue(cur_dir);
+ }
+
+ var win = new Ext.Window({
+ layout:'vbox',
+ layoutConfig: {
+ align: 'stretch',
+ pack: 'start',
+ defaultMargins: "5px 5px 5px 5px",
+ },
+ width:540,
+ height:480,
+ modal:true,
+ resizable:true,
+ draggable:true,
+ title:'Open File',
+ items: [
+ { xtype: 'textfield',
+ id: 'current_file',
+ listeners: {
+ specialkey: function(f, e) {
+ if (e.getKey() != e.ENTER) { return; }
+ yt_rpc.ExtDirectREPL.file_listing(
+ {base_dir:f.getValue(), sub_dir:''}, fillStore);
+ }
+ }
+ }, {
+ xtype:'listview',
+ id: 'file_listing',
+ store: filestore ,
+ singleSelect:true,
+ emptyText: 'No images to display',
+ flex: 1.0,
+ columns: [
+ {
+ header: 'Type',
+ width: 0.1,
+ tpl: '<img src="images/file_dialog_{type}.png" width=16 height=16>',
+ dataIndex: 'type'
+ },{
+ header: 'Filename',
+ width: .75,
+ dataIndex: 'filename'
+ },{
+ header: 'Size',
+ dataIndex: 'size',
+ tpl: '{size:fileSize}',
+ align: 'right',
+ cls: 'listview-filesize'
+ }],
+ listeners: {
+ dblclick: function(view, index, node, e) {
+ var fileRecord = filestore.getAt(index).data;
+ if (fileRecord.type == 'directory') {
+ yt_rpc.ExtDirectREPL.file_listing(
+ {base_dir:cur_dir, sub_dir:fileRecord.filename},
+ fillStore);
+ } else {
+ yt_rpc.ExtDirectREPL.load(
+ {base_dir:cur_dir, filename:fileRecord.filename},
+ handle_result);
+ win.destroy();
+ }
+ },
+ selectionchange: function(view, index, node, e) {
+ },
+ },
+ }, {
+ xtype: 'panel',
+ height: 40,
+ layout: 'hbox',
+ layoutConfig: {
+ align: 'stretch',
+ pack: 'start',
+ defaultMargins: "5px 5px 5px 5px",
+ },
+ items: [
+ { flex: 1.0, xtype: 'button', text: 'Cancel',
+ handler: function(b, e) { win.destroy(); } },
+ { flex: 1.0, xtype: 'button', text: 'Load',
+ handler: function(b, e) {
+ filename = "";
+ var fl = win.get("file_listing");
+ if (fl.getSelectionCount() == 1) {
+ filename = fl.getSelectedRecords()[0].data.filename;
+ }
+ yt_rpc.ExtDirectREPL.load(
+ {base_dir:cur_dir, filename:filename},
+ handle_result);
+ win.destroy();
+ }
+ },
+ ],
+ },
+ ],
+ });
+ yt_rpc.ExtDirectREPL.file_listing(
+ {base_dir:"", sub_dir:""}, fillStore);
+ win.show(this);
+}
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/gui/reason/html/js/menu_items.js
--- a/yt/gui/reason/html/js/menu_items.js
+++ b/yt/gui/reason/html/js/menu_items.js
@@ -33,7 +33,11 @@
text: 'Menu',
id: 'main_menu',
menu: [
- {xtype:'menuitem', text: 'Open', disabled: true},
+ {xtype:'menuitem', text: 'Open File',
+ handler: function(b,e) {
+ open_file()
+ },
+ },
{xtype:'menuitem', text: 'Open Directory', disabled: true},
{xtype: 'menuseparator'},
{xtype:'menuitem', text: 'Save Script',
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -67,7 +67,8 @@
add_quantity, quantity_info
from yt.frontends.enzo.api import \
- EnzoStaticOutput, EnzoStaticOutputInMemory, EnzoFieldInfo, \
+ EnzoStaticOutput, EnzoStaticOutputInMemory, \
+ EnzoSimulation, EnzoFieldInfo, \
add_enzo_field, add_enzo_1d_field, add_enzo_2d_field
from yt.frontends.castro.api import \
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -124,6 +124,9 @@
#sys.argv = [a for a in unparsed_args]
if opts.parallel:
parallel_capable = turn_on_parallelism()
+ subparsers = parser.add_subparsers(title="subcommands",
+ dest='subcommands',
+ description="Valid subcommands",)
else:
subparsers = parser.add_subparsers(title="subcommands",
dest='subcommands',
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -287,6 +287,7 @@
uniquedims[i] = <np.float64_t *> \
alloca(2*n_grids * sizeof(np.float64_t))
my_max = 0
+ best_dim = -1
for dim in range(3):
n_unique = 0
uniques = uniquedims[dim]
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -1012,7 +1012,7 @@
# This node belongs to someone else, move along
current_node, previous_node = self.step_depth(current_node, previous_node)
continue
-
+
# If we are down to one grid, we are either in it or the parent grid
if len(current_node.grids) == 1:
thisgrid = current_node.grids[0]
@@ -1031,25 +1031,27 @@
if len(children) > 0:
current_node.grids = self.pf.hierarchy.grids[na.array(children,copy=False)]
current_node.parent_grid = thisgrid
- # print 'My single grid covers the rest of the volume, and I have children, about to iterate on them'
+ #print 'My single grid covers the rest of the volume, and I have children, about to iterate on them'
del children
continue
# Else make a leaf node (brick container)
+ #print 'My single grid covers the rest of the volume, and I have no children', thisgrid
set_leaf(current_node, thisgrid, current_node.l_corner, current_node.r_corner)
- # print 'My single grid covers the rest of the volume, and I have no children'
current_node, previous_node = self.step_depth(current_node, previous_node)
continue
# If we don't have any grids, this volume belongs to the parent
if len(current_node.grids) == 0:
+ #print 'This volume does not have a child grid, so it belongs to my parent!'
set_leaf(current_node, current_node.parent_grid, current_node.l_corner, current_node.r_corner)
- # print 'This volume does not have a child grid, so it belongs to my parent!'
current_node, previous_node = self.step_depth(current_node, previous_node)
continue
# If we've made it this far, time to build a dividing node
- self._build_dividing_node(current_node)
+ # print 'Building dividing node'
+ # Continue if building failed
+ if self._build_dividing_node(current_node): continue
# Step to the nest node in a depth-first traversal.
current_node, previous_node = self.step_depth(current_node, previous_node)
@@ -1058,10 +1060,10 @@
'''
Given a node, finds all the choices for the next dividing plane.
'''
- data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
# For some reason doing dim 0 separately is slightly faster.
# This could be rewritten to all be in the loop below.
+ data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
best_dim, split, less_ids, greater_ids = \
kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
return data[:,:,best_dim], best_dim, split, less_ids, greater_ids
@@ -1071,8 +1073,19 @@
Makes the current node a dividing node, and initializes the
left and right children.
'''
-
- data,best_dim,split,less_ids,greater_ids = self._get_choices(current_node)
+
+ data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
+ best_dim, split, less_ids, greater_ids = \
+ kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
+
+ del data
+
+ # Here we break out if no unique grids were found. In this case, there
+ # are likely overlapping grids, and we assume that the first grid takes
+ # precedence. This is fragile.
+ if best_dim == -1:
+ current_node.grids = [current_node.grids[0]]
+ return 1
current_node.split_ax = best_dim
current_node.split_pos = split
@@ -1080,7 +1093,7 @@
#greater_ids0 = (split < data[:,1])
#assert(na.all(less_ids0 == less_ids))
#assert(na.all(greater_ids0 == greater_ids))
-
+
current_node.left_child = MasterNode(my_id=_lchild_id(current_node.id),
parent=current_node,
parent_grid=current_node.parent_grid,
@@ -1099,7 +1112,9 @@
# build to work. The other deletions are just to save memory.
del current_node.grids, current_node.parent_grid, current_node.brick,\
current_node.li, current_node.ri, current_node.dims
-
+
+ return 0
+
def traverse(self, back_center, front_center, image):
r"""Traverses the kd-Tree, casting the partitioned grids from back to
front.
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -82,11 +82,15 @@
if cls.npfs > 1:
self(args)
else:
- if len(getattr(args, "pf", [])) > 1:
+ pf_args = getattr(args, "pf", [])
+ if len(pf_args) > 1:
pfs = args.pf
for pf in pfs:
args.pf = pf
self(args)
+ elif len(pf_args) == 0:
+ pfs = []
+ self(args)
else:
args.pf = getattr(args, 'pf', [None])[0]
self(args)
@@ -105,6 +109,8 @@
_common_options = dict(
pf = dict(short="pf", action=GetParameterFiles,
nargs="+", help="Parameter files to run on"),
+ opf = dict(action=GetParameterFiles, dest="pf",
+ nargs="*", help="(Optional) Parameter files to run on"),
axis = dict(short="-a", long="--axis",
action="store", type=int,
dest="axis", default=4,
@@ -1269,7 +1275,8 @@
help="At startup, find all *.hierarchy files in the CWD"),
dict(short="-d", long="--debug", action="store_true",
default = False, dest="debug",
- help="Add a debugging mode for cell execution")
+ help="Add a debugging mode for cell execution"),
+ "opf"
)
description = \
"""
@@ -1315,12 +1322,12 @@
from yt.gui.reason.bottle_mods import uuid_serve_functions, PayloadHandler
hr = ExtDirectREPL(base_extjs_path)
hr.debug = PayloadHandler.debug = args.debug
+ command_line = ["pfs = []"]
if args.find:
# We just have to find them and store references to them.
- command_line = ["pfs = []"]
for fn in sorted(glob.glob("*/*.hierarchy")):
command_line.append("pfs.append(load('%s'))" % fn[:-10])
- hr.execute("\n".join(command_line))
+ hr.execute("\n".join(command_line))
bottle.debug()
uuid_serve_functions(open_browser=args.open_browser,
port=int(args.port), repl=hr)
@@ -1430,7 +1437,7 @@
if 'upload' in rv and 'links' in rv['upload']:
print
print "Image successfully uploaded! You can find it at:"
- print " %s" % (rv['upload']['links']['imgur_page'])
+ print " %s" % (rv['upload']['links']['original'])
print
print "If you'd like to delete it, visit this page:"
print " %s" % (rv['upload']['links']['delete_page'])
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -58,6 +58,7 @@
setattr(self, attr, getattr(obj, attr, None))
if hasattr(obj, "pf"):
self.output_hash = obj.pf._hash()
+ self._pf_mrep = obj.pf._mrep
def __init__(self, obj):
self._update_attrs(obj, self._attr_list)
@@ -93,6 +94,8 @@
api_key = ytcfg.get("yt","hub_api_key")
url = ytcfg.get("yt","hub_url")
metadata, (final_name, chunks) = self._generate_post()
+ if hasattr(self, "_pf_mrep"):
+ self._pf_mrep.upload()
for i in metadata:
if isinstance(metadata[i], na.ndarray):
metadata[i] = metadata[i].tolist()
@@ -110,7 +113,15 @@
'api_key' : api_key})
request = urllib2.Request(url, datagen, headers)
# Actually do the request, and get the response
- rv = urllib2.urlopen(request).read()
+ try:
+ rv = urllib2.urlopen(request).read()
+ except urllib2.HTTPError as ex:
+ if ex.code == 401:
+ mylog.error("You must create an API key before uploading.")
+ mylog.error("https://data.yt-project.org/getting_started.html")
+ return
+ else:
+ raise ex
uploader_info = json.loads(rv)
new_url = url + "/handler/%s" % uploader_info['handler_uuid']
for i, (cn, cv) in enumerate(chunks):
@@ -125,8 +136,9 @@
datagen, headers = multipart_encode({'status' : 'FINAL'})
request = urllib2.Request(new_url, datagen, headers)
- rv = urllib2.urlopen(request).read()
- return json.loads(rv)
+ rv = json.loads(urllib2.urlopen(request).read())
+ mylog.info("Upload succeeded! View here: %s", rv['url'])
+ return rv
class FilteredRepresentation(MinimalRepresentation):
def _generate_post(self):
@@ -180,3 +192,25 @@
chunks = [(fn, d) for fn, d in self.images]
return (metadata, ('images', chunks))
+_hub_categories = ("News", "Documents", "Simulation Management",
+ "Data Management", "Analysis and Visualization",
+ "Paper Repositories", "Astrophysical Utilities",
+ "yt Scripts")
+
+class MinimalProjectDescription(MinimalRepresentation):
+ type = "project"
+ _attr_list = ("title", "url", "description", "category", "image_url")
+
+ def __init__(self, title, url, description,
+ category, image_url = ""):
+ assert(category in _hub_categories)
+ self.title = title
+ self.url = url
+ self.description = description
+ self.category = category
+ self.image_url = image_url
+
+ def _generate_post(self):
+ metadata = self._attrs
+ chunks = []
+ return (metadata, ("chunks", []))
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -288,7 +288,7 @@
if size is None:
size = len(self.available_ranks)
if len(self.available_ranks) < size:
- print 'Not enough resources available'
+ print 'Not enough resources available', size, self.available_ranks
raise RuntimeError
if ranks is None:
ranks = [self.available_ranks.pop(0) for i in range(size)]
@@ -315,12 +315,32 @@
for wg in self.workgroups:
self.free_workgroup(wg)
+ @classmethod
+ def from_sizes(cls, sizes):
+ sizes = ensure_list(sizes)
+ pool = cls()
+ rank = pool.comm.rank
+ for i,size in enumerate(sizes):
+ if iterable(size):
+ size, name = size
+ else:
+ name = "workgroup_%02i" % i
+ pool.add_workgroup(size, name = name)
+ for wg in pool.workgroups:
+ if rank in wg.ranks: workgroup = wg
+ return pool, workgroup
+
+ def __getitem__(self, key):
+ for wg in self.workgroups:
+ if wg.name == key: return wg
+ raise KeyError(key)
+
class ResultsStorage(object):
slots = ['result', 'result_id']
result = None
result_id = None
-def parallel_objects(objects, njobs, storage = None):
+def parallel_objects(objects, njobs = 0, storage = None, barrier = True):
if not parallel_capable:
njobs = 1
mylog.warn("parallel_objects() is being used when parallel_capable is false. The loop is not being run in parallel. This may not be what was expected.")
@@ -362,6 +382,8 @@
new_storage = my_communicator.par_combine_object(
to_share, datatype = 'dict', op = 'join')
storage.update(new_storage)
+ if barrier:
+ my_communicator.barrier()
class CommunicationSystem(object):
communicators = []
@@ -395,6 +417,9 @@
self.communicators.pop()
self._update_parallel_state(self.communicators[-1])
+def _reconstruct_communicator():
+ return communication_system.communicators[-1]
+
class Communicator(object):
comm = None
_grids = None
@@ -409,6 +434,11 @@
functions for analyzing something in parallel.
"""
+ def __reduce__(self):
+ # We don't try to reconstruct any of the properties of the communicator
+ # or the processors. In general, we don't want to.
+ return (_reconstruct_communicator, ())
+
def barrier(self):
if not self._distributed: return
mylog.debug("Opening MPI Barrier on %s", self.comm.rank)
@@ -507,24 +537,24 @@
raise NotImplementedError
@parallel_passthrough
- def mpi_bcast(self, data):
+ def mpi_bcast(self, data, root = 0):
# The second check below makes sure that we know how to communicate
# this type of array. Otherwise, we'll pickle it.
if isinstance(data, na.ndarray) and \
get_mpi_type(data.dtype) is not None:
- if self.comm.rank == 0:
+ if self.comm.rank == root:
info = (data.shape, data.dtype)
else:
info = ()
- info = self.comm.bcast(info, root=0)
- if self.comm.rank != 0:
+ info = self.comm.bcast(info, root=root)
+ if self.comm.rank != root:
data = na.empty(info[0], dtype=info[1])
mpi_type = get_mpi_type(info[1])
- self.comm.Bcast([data, mpi_type], root = 0)
+ self.comm.Bcast([data, mpi_type], root = root)
return data
else:
# Use pickled methods.
- data = self.comm.bcast(data, root = 0)
+ data = self.comm.bcast(data, root = root)
return data
def preload(self, grids, fields, io_handler):
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -24,7 +24,10 @@
"""
from matplotlib import figure
+import shutil
+import tempfile
import numpy as na
+import os
from yt.funcs import *
@@ -47,6 +50,8 @@
PhasePlot, \
LineQueryPlot, \
ScatterPlot
+from yt.utilities.minimal_representation import \
+ MinimalImageCollectionData
# No better place to put this
def concatenate_pdfs(output_fn, input_fns):
@@ -60,6 +65,18 @@
def _fix_axis(axis):
return inv_axis_names.get(axis, axis)
+
+class ImageCollection(object):
+ def __init__(self, pf, name):
+ self.pf = pf
+ self.name = name
+ self.images = []
+ self.image_metadata = []
+
+ def add_image(self, fn, descr):
+ self.image_metadata.append(descr)
+ self.images.append((os.path.basename(fn), na.fromfile(fn, dtype='c')))
+
class PlotCollection(object):
__id_counter = 0
def __init__(self, pf, center=None):
@@ -117,6 +134,19 @@
for p in self.plots:
yield p
+ @property
+ def _mrep(self):
+ ic = ImageCollection(self.pf, "Plot Collection with center %s" % self.c)
+ dd = tempfile.mkdtemp()
+ fns = self.save(os.path.join(dd, "temp"))
+ for fn, p in zip(fns, self.plots):
+ ic.add_image(fn, p._pretty_name())
+ shutil.rmtree(dd)
+ return MinimalImageCollectionData(ic)
+
+ def hub_upload(self):
+ self._mrep.upload()
+
def save(self, basename=None, format="png", override=False, force_save=False):
r"""Save out all the plots hanging off this plot collection, using
generated names.
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -295,6 +295,17 @@
if not hasattr(c, '_type_name'): continue
self.modify[c._type_name] = c
+ def _pretty_name(self):
+ width = self.im.get("Width", "NA")
+ unit = self.im.get("Unit", "NA")
+ field = self.axis_names.get("Z", self.axis_names.get("Field1"))
+ if hasattr(self.data, "_data_source"):
+ data = self.data._data_source
+ else:
+ data = self.data
+ return "%s: %s (%s %s) %s" % (self._type_name,
+ field, width, unit, data)
+
class VMPlot(RavenPlot):
_antialias = True
_period = (0.0, 0.0)
@@ -493,6 +504,7 @@
if self.colorbar != None:
self.colorbar.set_label(str(data_label), **self.label_kws)
+
class FixedResolutionPlot(VMPlot):
# This is a great argument in favor of changing the name
diff -r 80d3c224557cf42dea9a8f79691f809231758aed -r 165ac54e7602bf750d5c8085700b1aefda06dd31 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1291,8 +1291,13 @@
fields = [field]
if weight is not None:
# This is a temporary field, which we will remove at the end.
+ def _wf(f1, w1):
+ def WeightField(field, data):
+ return data[f1].astype("float64") * \
+ data[w1].astype("float64")
+ return WeightField
pf.field_info.add_field("temp_weightfield",
- function=lambda a,b:b[field]*b[weight])
+ function=_wf(field, weight))
fields = ["temp_weightfield", weight]
tf = ProjectionTransferFunction(n_fields = 2)
tf = ProjectionTransferFunction(n_fields = len(fields))
@@ -1313,4 +1318,7 @@
else:
image /= vals[:,:,1]
pf.field_info.pop("temp_weightfield")
+ for g in pf.h.grids:
+ if "temp_weightfield" in g.keys():
+ del g["temp_weightfield"]
return image
https://bitbucket.org/yt_analysis/yt/changeset/b958cd57b50c/
changeset: b958cd57b50c
branch: yt
user: jwise77
date: 2012-06-27 17:11:21
summary: Can pass single particles to spectrum_builder and check to see if the
stellar age is positive because there can be some small discrepancies
between pf.current_time and ComputeTimeFromRedshift.
affected #: 1 file
diff -r 165ac54e7602bf750d5c8085700b1aefda06dd31 -r b958cd57b50c64a9c76fd53554e6366b88e53e6c yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -342,9 +342,18 @@
# Initialize values
self.final_spec = na.zeros(self.wavelength.size, dtype='float64')
self._data_source = data_source
- self.star_mass = star_mass
- self.star_creation_time = star_creation_time
- self.star_metal = star_metallicity_fraction
+ if iterable(star_mass):
+ self.star_mass = star_mass
+ else:
+ self.star_mass = [star_mass]
+ if iterable(star_creation_time):
+ self.star_creation_time = star_creation_time
+ else:
+ self.star_creation_time = [star_creation_time]
+ if iterable(star_metallicity_fraction):
+ self.star_metal = star_metallicity_fraction
+ else:
+ self.star_metal = [star_metallicity_fraction]
self.min_age = min_age
# Check to make sure we have the right set of data.
@@ -381,8 +390,9 @@
self.star_metal /= Zsun
# Age of star in years.
dt = (self.time_now - self.star_creation_time * self._pf['Time']) / YEAR
+ dt = na.maximum(dt, 0.0)
# Remove young stars
- sub = dt > self.min_age
+ sub = dt >= self.min_age
self.star_metal = self.star_metal[sub]
dt = dt[sub]
self.star_creation_time = self.star_creation_time[sub]
https://bitbucket.org/yt_analysis/yt/changeset/981831a1c241/
changeset: 981831a1c241
branch: yt
user: jwise77
date: 2012-06-27 17:11:39
summary: Merging
affected #: 287 files
Diff too large to display.
https://bitbucket.org/yt_analysis/yt/changeset/13b0fb9c78f6/
changeset: 13b0fb9c78f6
branch: yt
user: jwise77
date: 2012-06-27 17:16:37
summary: Fixing unit conversion for Enzo radiation fields.
affected #: 1 file
diff -r 981831a1c241c748431cf7dad72c654f386e5fb6 -r 13b0fb9c78f6018216deae3696e119736457521a yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -251,7 +251,7 @@
f.take_log=False
def _convertkph(data):
- return data.convert("Time")
+ return 1.0/data.convert("Time")
for field in ["HI_kph", "HeI_kph", "HeII_kph", "H2I_kdiss"]:
f = KnownEnzoFields[field]
f._convert_function = _convertkph
https://bitbucket.org/yt_analysis/yt/changeset/7ec625f67ddd/
changeset: 7ec625f67ddd
branch: yt
user: jwise77
date: 2012-06-27 17:20:39
summary: Adding some information about Enzo's PhotoGamma field.
affected #: 1 file
diff -r 13b0fb9c78f6018216deae3696e119736457521a -r 7ec625f67ddd097914b07d1ad82939ef698b3714 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -250,14 +250,18 @@
f._units=r"\mathrm{Gau\ss}"
f.take_log=False
-def _convertkph(data):
+def _convertRadiation(data):
return 1.0/data.convert("Time")
for field in ["HI_kph", "HeI_kph", "HeII_kph", "H2I_kdiss"]:
f = KnownEnzoFields[field]
- f._convert_function = _convertkph
+ f._convert_function = _convertRadiation
f._units=r"\rm{s}^{-1}"
f.take_log=True
+KnownEnzoFields["PhotoGamma"]._convert_function = _convertRadiation
+KnownEnzoFields["PhotoGamma"]._units = r"\rm{eV} \rm{s}^{-1}"
+KnownEnzoFields["PhotoGamma"].take_log = True
+
def _convertRadiationAccel(data):
return data.convert("cm") / data.convert("Time")**2
for dim in range(1,4):
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list