[yt-svn] commit/yt: 3 new changesets
Bitbucket
commits-noreply at bitbucket.org
Wed Jan 9 11:23:21 PST 2013
3 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/d9e6695b25c0/
changeset: d9e6695b25c0
branch: yt
user: brittonsmith
date: 2013-01-09 20:08:34
summary: Allowing the finding of outputs in simulation time series to work in
parallel by dividing up the list of possible pfs with parallel_objects.
affected #: 1 file
diff -r 554d144d9d248c6f70d8c665a5963aa39b2d6bb3 -r d9e6695b25c0400e4cf9e1bddebe2912a1a2b5b7 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -40,6 +40,8 @@
InvalidSimulationTimeSeries, \
MissingParameter, \
NoStoppingCondition
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+ parallel_objects
from yt.convenience import \
load
@@ -528,7 +530,7 @@
self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
self.all_outputs.sort(key=lambda obj: obj['time'])
- mylog.info("Located %d total outputs." % len(self.all_outputs))
+ only_on_root(mylog.info, "Located %d total outputs." % len(self.all_outputs))
# manually set final time and redshift with last output
if self.all_outputs:
@@ -539,11 +541,12 @@
def _check_for_outputs(self, potential_outputs):
r"""Check a list of files to see if they are valid datasets."""
- mylog.info("Checking %d potential outputs." %
- len(potential_outputs))
+ only_on_root(mylog.info, "Checking %d potential outputs." %
+ len(potential_outputs))
- my_outputs = []
- for output in potential_outputs:
+ my_outputs = {}
+ for my_storage, output in parallel_objects(potential_outputs,
+ storage=my_outputs):
if self.parameters['DataDumpDir'] in output:
dir_key = self.parameters['DataDumpDir']
output_key = self.parameters['DataDumpName']
@@ -558,12 +561,14 @@
try:
pf = load(filename)
if pf is not None:
- my_outputs.append({'filename': filename,
- 'time': pf.current_time})
+ my_storage.result = {'filename': filename,
+ 'time': pf.current_time}
if pf.cosmological_simulation:
- my_outputs[-1]['redshift'] = pf.current_redshift
+ my_storage.result['redshift'] = pf.current_redshift
except YTOutputNotIdentified:
mylog.error('Failed to load %s' % filename)
+ my_outputs = [my_output for my_output in my_outputs.values() \
+ if my_output is not None]
return my_outputs
https://bitbucket.org/yt_analysis/yt/commits/37455dc141c3/
changeset: 37455dc141c3
branch: yt
user: brittonsmith
date: 2013-01-09 20:21:12
summary: Changing mylog calls to not use % for string formatting.
affected #: 1 file
diff -r d9e6695b25c0400e4cf9e1bddebe2912a1a2b5b7 -r 37455dc141c31f7a3c3f3042e9f3184e0269f636 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -198,7 +198,7 @@
if not my_all_outputs:
TimeSeriesData.__init__(self, outputs=[], parallel=parallel)
- mylog.info("%d outputs loaded into time series." % 0)
+ mylog.info("0 outputs loaded into time series.")
return
# Apply selection criteria to the set.
@@ -253,7 +253,7 @@
init_outputs.append(output['filename'])
TimeSeriesData.__init__(self, outputs=init_outputs, parallel=parallel)
- mylog.info("%d outputs loaded into time series." % len(init_outputs))
+ mylog.info("%d outputs loaded into time series.", len(init_outputs))
def _parse_parameter_file(self):
"""
@@ -417,7 +417,7 @@
elif self.parameters['dtDataDump'] > 0 and \
self.parameters['CycleSkipDataDump'] > 0:
- mylog.info("Simulation %s has both dtDataDump and CycleSkipDataDump set." % self.parameter_filename )
+ mylog.info("Simulation %s has both dtDataDump and CycleSkipDataDump set.", self.parameter_filename )
mylog.info(" Unable to calculate datasets. Attempting to search in the current directory")
self._find_outputs()
@@ -471,7 +471,7 @@
'StopCycle' in self.parameters):
raise NoStoppingCondition(self.parameter_filename)
if self.final_time is None:
- mylog.warn('Simulation %s has no stop time set, stopping condition will be based only on cycles.' %
+ mylog.warn('Simulation %s has no stop time set, stopping condition will be based only on cycles.',
self.parameter_filename)
def _set_parameter_defaults(self):
@@ -530,7 +530,7 @@
self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
self.all_outputs.sort(key=lambda obj: obj['time'])
- only_on_root(mylog.info, "Located %d total outputs." % len(self.all_outputs))
+ only_on_root(mylog.info, "Located %d total outputs.", len(self.all_outputs))
# manually set final time and redshift with last output
if self.all_outputs:
@@ -541,7 +541,7 @@
def _check_for_outputs(self, potential_outputs):
r"""Check a list of files to see if they are valid datasets."""
- only_on_root(mylog.info, "Checking %d potential outputs." %
+ only_on_root(mylog.info, "Checking %d potential outputs.",
len(potential_outputs))
my_outputs = {}
@@ -566,7 +566,7 @@
if pf.cosmological_simulation:
my_storage.result['redshift'] = pf.current_redshift
except YTOutputNotIdentified:
- mylog.error('Failed to load %s' % filename)
+ mylog.error('Failed to load %s', filename)
my_outputs = [my_output for my_output in my_outputs.values() \
if my_output is not None]
@@ -610,7 +610,7 @@
and outputs[0] not in my_outputs:
my_outputs.append(outputs[0])
else:
- mylog.error("No dataset added for %s = %f." % (key, value))
+ mylog.error("No dataset added for %s = %f.", key, value)
outputs.sort(key=lambda obj: obj['time'])
return my_outputs
@@ -677,7 +677,7 @@
r"""Write cosmology output parameters for a cosmology splice.
"""
- mylog.info("Writing redshift output list to %s." % filename)
+ mylog.info("Writing redshift output list to %s.", filename)
f = open(filename, 'w')
for q, output in enumerate(outputs):
z_string = "%%s[%%d] = %%.%df" % decimals
https://bitbucket.org/yt_analysis/yt/commits/7044e4e1dcce/
changeset: 7044e4e1dcce
branch: yt
user: MatthewTurk
date: 2013-01-09 20:23:17
summary: Merged in brittonsmith/yt (pull request #393: Making simulation time series work better in parallel.)
affected #: 1 file
diff -r f218705c9a85854d9d1dbd52c8f48fc13521be43 -r 7044e4e1dcce838c1c0423396bd20060a3fd6b52 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -40,6 +40,8 @@
InvalidSimulationTimeSeries, \
MissingParameter, \
NoStoppingCondition
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+ parallel_objects
from yt.convenience import \
load
@@ -196,7 +198,7 @@
if not my_all_outputs:
TimeSeriesData.__init__(self, outputs=[], parallel=parallel)
- mylog.info("%d outputs loaded into time series." % 0)
+ mylog.info("0 outputs loaded into time series.")
return
# Apply selection criteria to the set.
@@ -251,7 +253,7 @@
init_outputs.append(output['filename'])
TimeSeriesData.__init__(self, outputs=init_outputs, parallel=parallel)
- mylog.info("%d outputs loaded into time series." % len(init_outputs))
+ mylog.info("%d outputs loaded into time series.", len(init_outputs))
def _parse_parameter_file(self):
"""
@@ -415,7 +417,7 @@
elif self.parameters['dtDataDump'] > 0 and \
self.parameters['CycleSkipDataDump'] > 0:
- mylog.info("Simulation %s has both dtDataDump and CycleSkipDataDump set." % self.parameter_filename )
+ mylog.info("Simulation %s has both dtDataDump and CycleSkipDataDump set.", self.parameter_filename )
mylog.info(" Unable to calculate datasets. Attempting to search in the current directory")
self._find_outputs()
@@ -469,7 +471,7 @@
'StopCycle' in self.parameters):
raise NoStoppingCondition(self.parameter_filename)
if self.final_time is None:
- mylog.warn('Simulation %s has no stop time set, stopping condition will be based only on cycles.' %
+ mylog.warn('Simulation %s has no stop time set, stopping condition will be based only on cycles.',
self.parameter_filename)
def _set_parameter_defaults(self):
@@ -528,7 +530,7 @@
self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
self.all_outputs.sort(key=lambda obj: obj['time'])
- mylog.info("Located %d total outputs." % len(self.all_outputs))
+ only_on_root(mylog.info, "Located %d total outputs.", len(self.all_outputs))
# manually set final time and redshift with last output
if self.all_outputs:
@@ -539,11 +541,12 @@
def _check_for_outputs(self, potential_outputs):
r"""Check a list of files to see if they are valid datasets."""
- mylog.info("Checking %d potential outputs." %
- len(potential_outputs))
+ only_on_root(mylog.info, "Checking %d potential outputs.",
+ len(potential_outputs))
- my_outputs = []
- for output in potential_outputs:
+ my_outputs = {}
+ for my_storage, output in parallel_objects(potential_outputs,
+ storage=my_outputs):
if self.parameters['DataDumpDir'] in output:
dir_key = self.parameters['DataDumpDir']
output_key = self.parameters['DataDumpName']
@@ -558,12 +561,14 @@
try:
pf = load(filename)
if pf is not None:
- my_outputs.append({'filename': filename,
- 'time': pf.current_time})
+ my_storage.result = {'filename': filename,
+ 'time': pf.current_time}
if pf.cosmological_simulation:
- my_outputs[-1]['redshift'] = pf.current_redshift
+ my_storage.result['redshift'] = pf.current_redshift
except YTOutputNotIdentified:
- mylog.error('Failed to load %s' % filename)
+ mylog.error('Failed to load %s', filename)
+ my_outputs = [my_output for my_output in my_outputs.values() \
+ if my_output is not None]
return my_outputs
@@ -605,7 +610,7 @@
and outputs[0] not in my_outputs:
my_outputs.append(outputs[0])
else:
- mylog.error("No dataset added for %s = %f." % (key, value))
+ mylog.error("No dataset added for %s = %f.", key, value)
outputs.sort(key=lambda obj: obj['time'])
return my_outputs
@@ -672,7 +677,7 @@
r"""Write cosmology output parameters for a cosmology splice.
"""
- mylog.info("Writing redshift output list to %s." % filename)
+ mylog.info("Writing redshift output list to %s.", filename)
f = open(filename, 'w')
for q, output in enumerate(outputs):
z_string = "%%s[%%d] = %%.%df" % decimals
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list