[yt-svn] commit/yt: 7 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Fri Nov 20 11:42:16 PST 2015
7 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/b197890829d3/
Changeset: b197890829d3
Branch: yt
User: chummels
Date: 2015-11-17 06:57:21+00:00
Summary: Not requiring a user to save a spectrum to disk while generating it.
Affected #: 1 file
diff -r 90f900be7a36433fdd48941cae4bc91066ff5c76 -r b197890829d3362f798526a9645fc840e21a7fa2 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -201,7 +201,9 @@
self.flux_field = np.exp(-self.tau_field)
- if output_file.endswith('.h5'):
+ if output_file is None:
+ pass
+ elif output_file.endswith('.h5'):
self._write_spectrum_hdf5(output_file)
elif output_file.endswith('.fits'):
self._write_spectrum_fits(output_file)
https://bitbucket.org/yt_analysis/yt/commits/644f20d47dce/
Changeset: 644f20d47dce
Branch: yt
User: chummels
Date: 2015-11-17 22:43:59+00:00
Summary: Making default behavior not save an hdf5 file to disk.
Affected #: 1 file
diff -r b197890829d3362f798526a9645fc840e21a7fa2 -r 644f20d47dce7b9dbf3e4321f31f66cc449ddcfc yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -114,7 +114,7 @@
'normalization': normalization,
'index': index})
- def make_spectrum(self, input_file, output_file="spectrum.h5",
+ def make_spectrum(self, input_file, output_file=None,
line_list_file="lines.txt",
use_peculiar_velocity=True,
subgrid_resolution=10, njobs="auto"):
@@ -127,10 +127,10 @@
input_file : string or dataset
path to input ray data or a loaded ray dataset
output_file : optional, string
- path for output file. File formats are chosen based on the
- filename extension. ``.h5`` for hdf5, ``.fits`` for fits,
- and everything else is ASCII.
- Default: "spectrum.h5"
+ File containing the wavelength, flux, and optical depth fields.
+ File formats are chosen based on the filename extension.
+ ``.h5`` for hdf5, ``.fits`` for fits, and everything else is ASCII.
+ Default: None
line_list_file : optional, string
path to file in which the list of all deposited lines
will be saved. If set to None, the line list will not
https://bitbucket.org/yt_analysis/yt/commits/fed0e05e7a59/
Changeset: fed0e05e7a59
Branch: yt
User: chummels
Date: 2015-11-17 23:03:56+00:00
Summary: Changing line_list_file to output_absorber_file for more clarity and setting to None by default.
Affected #: 1 file
diff -r 644f20d47dce7b9dbf3e4321f31f66cc449ddcfc -r fed0e05e7a59ce09f5df83ea0c9e48880ff1757e yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -115,7 +115,7 @@
'index': index})
def make_spectrum(self, input_file, output_file=None,
- line_list_file="lines.txt",
+ output_absorbers_file=None,
use_peculiar_velocity=True,
subgrid_resolution=10, njobs="auto"):
"""
@@ -127,18 +127,16 @@
input_file : string or dataset
path to input ray data or a loaded ray dataset
output_file : optional, string
- File containing the wavelength, flux, and optical depth fields.
- File formats are chosen based on the filename extension.
+ Option to save a file containing the wavelength, flux, and optical
+ depth fields. File formats are chosen based on the filename extension.
``.h5`` for hdf5, ``.fits`` for fits, and everything else is ASCII.
Default: None
- line_list_file : optional, string
- path to file in which the list of all deposited lines
- will be saved. If set to None, the line list will not
- be saved. Note, when running in parallel, combining the
- line lists can be quite slow, so it is recommended to set
- this to None when running in parallel unless you really
- want them.
- Default: "lines.txt"
+ output_absorbers_file : optional, string
+ Option to save a text file containing all of the absorbers and
+ corresponding wavelength and redshift information.
+ For parallel jobs, combining the lines lists can be slow so it
+ is recommended to set to None in such circumstances.
+ Default: None
use_peculiar_velocity : optional, bool
if True, include line of sight velocity for shifting lines.
Default: True
@@ -194,7 +192,7 @@
njobs = min(comm.size, len(self.line_list))
self._add_lines_to_spectrum(field_data, use_peculiar_velocity,
- line_list_file is not None,
+ output_absorbers_file,
subgrid_resolution=subgrid_resolution,
njobs=njobs)
self._add_continua_to_spectrum(field_data, use_peculiar_velocity)
@@ -209,8 +207,8 @@
self._write_spectrum_fits(output_file)
else:
self._write_spectrum_ascii(output_file)
- if line_list_file is not None:
- self._write_spectrum_line_list(line_list_file)
+ if output_absorbers_file is not None:
+ self._write_spectrum_line_list(output_absorbers_file)
del field_data
return (self.lambda_bins, self.flux_field)
https://bitbucket.org/yt_analysis/yt/commits/892cef9cba9c/
Changeset: 892cef9cba9c
Branch: yt
User: chummels
Date: 2015-11-18 00:36:18+00:00
Summary: Deprecating line_list_file keyword.
Affected #: 1 file
diff -r fed0e05e7a59ce09f5df83ea0c9e48880ff1757e -r 892cef9cba9cf807568ad94eacfb23c34e85cf12 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -115,7 +115,7 @@
'index': index})
def make_spectrum(self, input_file, output_file=None,
- output_absorbers_file=None,
+ line_list_file=None, output_absorbers_file=None,
use_peculiar_velocity=True,
subgrid_resolution=10, njobs="auto"):
"""
@@ -165,6 +165,10 @@
spectrum generation.
Default: "auto"
"""
+ if line_list_file is not None:
+ mylog.info("'line_list_file' keyword is deprecated. Please use " \
+ "'output_absorbers_file'.")
+ output_absorbers_file = line_list_file
input_fields = ['dl', 'redshift', 'temperature']
field_units = {"dl": "cm", "redshift": "", "temperature": "K"}
https://bitbucket.org/yt_analysis/yt/commits/9c7d5c0251d6/
Changeset: 9c7d5c0251d6
Branch: yt
User: chummels
Date: 2015-11-18 01:02:21+00:00
Summary: Changing variables name of "spectral_line_list" to "absorber_list" for clarity.
Affected #: 1 file
diff -r 892cef9cba9cf807568ad94eacfb23c34e85cf12 -r 9c7d5c0251d6fef4a67f13088bd6632ec605877f yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -53,7 +53,7 @@
self.n_lambda = n_lambda
self.tau_field = None
self.flux_field = None
- self.spectrum_line_list = None
+ self.absorbers_list = None
self.lambda_bins = YTArray(np.linspace(lambda_min, lambda_max, n_lambda),
"angstrom")
self.bin_width = YTQuantity((lambda_max - lambda_min) /
@@ -189,7 +189,7 @@
field_data = input_ds.all_data()
self.tau_field = np.zeros(self.lambda_bins.size)
- self.spectrum_line_list = []
+ self.absorbers_list = []
if njobs == "auto":
comm = _get_comm(())
@@ -212,7 +212,7 @@
else:
self._write_spectrum_ascii(output_file)
if output_absorbers_file is not None:
- self._write_spectrum_line_list(output_absorbers_file)
+ self._write_absorbers_file(output_absorbers_file)
del field_data
return (self.lambda_bins, self.flux_field)
@@ -253,7 +253,8 @@
pbar.finish()
def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity,
- save_line_list, subgrid_resolution=10, njobs=-1):
+ output_absorbers_file, subgrid_resolution=10,
+ njobs=-1):
"""
Add the absorption lines to the spectrum.
"""
@@ -383,18 +384,23 @@
EW = np.array(EW)/self.bin_width.d
self.tau_field[left_index:right_index] += EW
- if save_line_list and line['label_threshold'] is not None and \
- cdens[i] >= line['label_threshold']:
+ # write out absorbers to file if the column density of
+ # an absorber is greater than the specified "label_threshold"
+ # of that absorption line
+ if output_absorbers_file and \
+ line['label_threshold'] is not None and \
+ cdens[i] >= line['label_threshold']:
+
if use_peculiar_velocity:
peculiar_velocity = vlos[i]
else:
peculiar_velocity = 0.0
- self.spectrum_line_list.append({'label': line['label'],
- 'wavelength': (lambda_0 + dlambda[i]),
- 'column_density': column_density[i],
- 'b_thermal': thermal_b[i],
- 'redshift': field_data['redshift'][i],
- 'v_pec': peculiar_velocity})
+ self.absorbers_list.append({'label': line['label'],
+ 'wavelength': (lambda_0 + dlambda[i]),
+ 'column_density': column_density[i],
+ 'b_thermal': thermal_b[i],
+ 'redshift': field_data['redshift'][i],
+ 'v_pec': peculiar_velocity})
pbar.update(i)
pbar.finish()
@@ -405,23 +411,23 @@
comm = _get_comm(())
self.tau_field = comm.mpi_allreduce(self.tau_field, op="sum")
- if save_line_list:
- self.spectrum_line_list = comm.par_combine_object(
- self.spectrum_line_list, "cat", datatype="list")
+ if output_absorbers_file:
+ self.absorbers_list = comm.par_combine_object(
+ self.absorbers_list, "cat", datatype="list")
@parallel_root_only
- def _write_spectrum_line_list(self, filename):
+ def _write_absorbers_file(self, filename):
"""
- Write out list of spectral lines.
+ Write out ASCII list of all substantial absorbers found in spectrum
"""
if filename is None:
return
- mylog.info("Writing spectral line list: %s." % filename)
- self.spectrum_line_list.sort(key=lambda obj: obj['wavelength'])
+ mylog.info("Writing absorber list: %s." % filename)
+ self.absorbers_list.sort(key=lambda obj: obj['wavelength'])
f = open(filename, 'w')
f.write('#%-14s %-14s %-12s %-12s %-12s %-12s\n' %
('Wavelength', 'Line', 'N [cm^-2]', 'b [km/s]', 'z', 'v_pec [km/s]'))
- for line in self.spectrum_line_list:
+ for line in self.absorbers_list:
f.write('%-14.6f %-14ls %e %e %e %e.\n' % (line['wavelength'], line['label'],
line['column_density'], line['b_thermal'],
line['redshift'], line['v_pec']))
https://bitbucket.org/yt_analysis/yt/commits/e90a01262f3a/
Changeset: e90a01262f3a
Branch: yt
User: chummels
Date: 2015-11-18 01:32:35+00:00
Summary: Including effective redshift in absorber list information.
Affected #: 1 file
diff -r 9c7d5c0251d6fef4a67f13088bd6632ec605877f -r e90a01262f3a74f17d90ed72a394cf67b0c025ad yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -400,6 +400,7 @@
'column_density': column_density[i],
'b_thermal': thermal_b[i],
'redshift': field_data['redshift'][i],
+ 'redshift_eff': field_data['redshift_eff'][i],
'v_pec': peculiar_velocity})
pbar.update(i)
pbar.finish()
@@ -425,12 +426,13 @@
mylog.info("Writing absorber list: %s." % filename)
self.absorbers_list.sort(key=lambda obj: obj['wavelength'])
f = open(filename, 'w')
- f.write('#%-14s %-14s %-12s %-12s %-12s %-12s\n' %
- ('Wavelength', 'Line', 'N [cm^-2]', 'b [km/s]', 'z', 'v_pec [km/s]'))
+ f.write('#%-14s %-14s %-12s %-14s %-15s %-9s %-10s\n' %
+ ('Wavelength', 'Line', 'N [cm^-2]', 'b [km/s]', 'z_cosmo', \
+ 'z_eff', 'v_pec [km/s]'))
for line in self.absorbers_list:
- f.write('%-14.6f %-14ls %e %e %e %e.\n' % (line['wavelength'], line['label'],
- line['column_density'], line['b_thermal'],
- line['redshift'], line['v_pec']))
+ f.write('%-14.6f %-14ls %e %e % e % e % e\n' % (line['wavelength'], \
+ line['label'], line['column_density'], line['b_thermal'], \
+ line['redshift'], line['redshift_eff'], line['v_pec']))
f.close()
@parallel_root_only
https://bitbucket.org/yt_analysis/yt/commits/27f664fe46a4/
Changeset: 27f664fe46a4
Branch: yt
User: chummels
Date: 2015-11-20 18:03:51+00:00
Summary: Merging with tip.
Affected #: 2 files
diff -r e90a01262f3a74f17d90ed72a394cf67b0c025ad -r 27f664fe46a4ec9361421167ffb7f963428ac912 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -51,11 +51,12 @@
def __init__(self, lambda_min, lambda_max, n_lambda):
self.n_lambda = n_lambda
+ # lambda, flux, and tau are wavelength, flux, and optical depth
+ self.lambda_field = YTArray(np.linspace(lambda_min, lambda_max,
+ n_lambda), "angstrom")
self.tau_field = None
self.flux_field = None
self.absorbers_list = None
- self.lambda_bins = YTArray(np.linspace(lambda_min, lambda_max, n_lambda),
- "angstrom")
self.bin_width = YTQuantity((lambda_max - lambda_min) /
float(n_lambda - 1), "angstrom")
self.line_list = []
@@ -143,13 +144,14 @@
subgrid_resolution : optional, int
When a line is being added that is unresolved (ie its thermal
width is less than the spectral bin width), the voigt profile of
- the line is deposited into an array of virtual bins at higher
- resolution. The optical depth from these virtual bins is integrated
- and then added to the coarser spectral bin. The subgrid_resolution
- value determines the ratio between the thermal width and the
- bin width of the virtual bins. Increasing this value yields smaller
- virtual bins, which increases accuracy, but is more expensive.
- A value of 10 yields accuracy to the 4th significant digit.
+ the line is deposited into an array of virtual wavelength bins at
+ higher resolution. The optical depth from these virtual bins is
+ integrated and then added to the coarser spectral wavelength bin.
+ The subgrid_resolution value determines the ratio between the
+ thermal width and the bin width of the virtual bins. Increasing
+ this value yields smaller virtual bins, which increases accuracy,
+ but is more expensive. A value of 10 yields accuracy to the 4th
+ significant digit in tau.
Default: 10
njobs : optional, int or "auto"
the number of process groups into which the loop over
@@ -188,7 +190,7 @@
input_ds = input_file
field_data = input_ds.all_data()
- self.tau_field = np.zeros(self.lambda_bins.size)
+ self.tau_field = np.zeros(self.lambda_field.size)
self.absorbers_list = []
if njobs == "auto":
@@ -215,7 +217,7 @@
self._write_absorbers_file(output_absorbers_file)
del field_data
- return (self.lambda_bins, self.flux_field)
+ return (self.lambda_field, self.flux_field)
def _add_continua_to_spectrum(self, field_data, use_peculiar_velocity):
"""
@@ -232,11 +234,11 @@
else:
delta_lambda = continuum['wavelength'] * field_data['redshift']
this_wavelength = delta_lambda + continuum['wavelength']
- right_index = np.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
+ right_index = np.digitize(this_wavelength, self.lambda_field).clip(0, self.n_lambda)
left_index = np.digitize((this_wavelength *
np.power((min_tau * continuum['normalization'] /
column_density), (1. / continuum['index']))),
- self.lambda_bins).clip(0, self.n_lambda)
+ self.lambda_field).clip(0, self.n_lambda)
valid_continuua = np.where(((column_density /
continuum['normalization']) > min_tau) &
@@ -245,7 +247,7 @@
(continuum['label'], continuum['wavelength']),
valid_continuua.size)
for i, lixel in enumerate(valid_continuua):
- line_tau = np.power((self.lambda_bins[left_index[lixel]:right_index[lixel]] /
+ line_tau = np.power((self.lambda_field[left_index[lixel]:right_index[lixel]] /
this_wavelength[lixel]), continuum['index']) * \
column_density[lixel] / continuum['normalization']
self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
@@ -277,8 +279,8 @@
delta_lambda = line['wavelength'] * field_data['redshift']
# lambda_obs is central wavelength of line after redshift
lambda_obs = line['wavelength'] + delta_lambda
- # bin index in lambda_bins of central wavelength of line after z
- center_index = np.digitize(lambda_obs, self.lambda_bins)
+ # bin index in lambda_field of central wavelength of line after z
+ center_index = np.digitize(lambda_obs, self.lambda_field)
# thermal broadening b parameter
thermal_b = np.sqrt((2 * boltzmann_constant_cgs *
@@ -299,10 +301,11 @@
# When we actually deposit the voigt profile, sometimes we will
# have underresolved lines (ie lines with smaller widths than
- # the spectral bin size). Here, we create virtual bins small
- # enough in width to well resolve each line, deposit the voigt
- # profile into them, then numerically integrate their tau values
- # and sum them to redeposit them into the actual spectral bins.
+ # the spectral bin size). Here, we create virtual wavelength bins
+ # small enough in width to well resolve each line, deposit the
+ # voigt profile into them, then numerically integrate their tau
+ # values and sum them to redeposit them into the actual spectral
+ # bins.
# virtual bins (vbins) will be:
# 1) <= the bin_width; assures at least as good as spectral bins
@@ -360,7 +363,7 @@
my_n_vbins *= 2
# identify the extrema of the vbin_window so as to speed
- # up searching over the entire lambda_bins array
+ # up searching over the entire lambda_field array
bins_from_center = np.ceil((my_vbin_window_width/2.) / \
self.bin_width.d) + 1
left_index = (center_index[i] - bins_from_center).clip(0, self.n_lambda)
@@ -373,7 +376,7 @@
# this has the effect of assuring np.digitize will place
# the vbins in the closest bin center.
binned = np.digitize(vbins,
- self.lambda_bins[left_index:right_index] \
+ self.lambda_field[left_index:right_index] \
+ (0.5 * self.bin_width))
# numerically integrate the virtual bins to calculate a
@@ -443,8 +446,8 @@
mylog.info("Writing spectrum to ascii file: %s." % filename)
f = open(filename, 'w')
f.write("# wavelength[A] tau flux\n")
- for i in range(self.lambda_bins.size):
- f.write("%e %e %e\n" % (self.lambda_bins[i],
+ for i in range(self.lambda_field.size):
+ f.write("%e %e %e\n" % (self.lambda_field[i],
self.tau_field[i], self.flux_field[i]))
f.close()
@@ -454,7 +457,7 @@
Write spectrum to a fits file.
"""
mylog.info("Writing spectrum to fits file: %s." % filename)
- col1 = pyfits.Column(name='wavelength', format='E', array=self.lambda_bins)
+ col1 = pyfits.Column(name='wavelength', format='E', array=self.lambda_field)
col2 = pyfits.Column(name='flux', format='E', array=self.flux_field)
cols = pyfits.ColDefs([col1, col2])
tbhdu = pyfits.BinTableHDU.from_columns(cols)
@@ -468,7 +471,7 @@
"""
mylog.info("Writing spectrum to hdf5 file: %s." % filename)
output = h5py.File(filename, 'w')
- output.create_dataset('wavelength', data=self.lambda_bins)
+ output.create_dataset('wavelength', data=self.lambda_field)
output.create_dataset('tau', data=self.tau_field)
output.create_dataset('flux', data=self.flux_field)
output.close()
diff -r e90a01262f3a74f17d90ed72a394cf67b0c025ad -r 27f664fe46a4ec9361421167ffb7f963428ac912 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -293,6 +293,11 @@
self.root_ncells)
self.iOctFree, self.nOct = fpu.read_vector(f, 'i', '>')
self.child_grid_offset = f.tell()
+ # lextra needs to be loaded as a string, but it's actually
+ # array values. So pop it off here, and then re-insert.
+ lextra = amr_header_vals.pop("lextra")
+ amr_header_vals['lextra'] = np.fromstring(
+ lextra, '>f4')
self.parameters.update(amr_header_vals)
amr_header_vals = None
# estimate the root level
@@ -314,6 +319,11 @@
n = particle_header_vals['Nspecies']
wspecies = np.fromfile(fh, dtype='>f', count=10)
lspecies = np.fromfile(fh, dtype='>i', count=10)
+ # extras needs to be loaded as a string, but it's actually
+ # array values. So pop it off here, and then re-insert.
+ extras = particle_header_vals.pop("extras")
+ particle_header_vals['extras'] = np.fromstring(
+ extras, '>f4')
self.parameters['wspecies'] = wspecies[:n]
self.parameters['lspecies'] = lspecies[:n]
for specie in range(n):
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list