[yt-svn] commit/yt: ngoldbaum: Merged in chummels/yt (pull request #1858)
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Mon Nov 16 13:27:28 PST 2015
1 new commit in yt:
https://bitbucket.org/yt_analysis/yt/commits/90f900be7a36/
Changeset: 90f900be7a36
Branch: yt
User: ngoldbaum
Date: 2015-11-16 21:27:16+00:00
Summary: Merged in chummels/yt (pull request #1858)
Enabling AbsorptionSpectrum to deposit unresolved spectral lines
Affected #: 2 files
diff -r 50f82a593a983ca8ee997a0650fc03eddfdbb30b -r 90f900be7a36433fdd48941cae4bc91066ff5c76 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -116,7 +116,8 @@
def make_spectrum(self, input_file, output_file="spectrum.h5",
line_list_file="lines.txt",
- use_peculiar_velocity=True, njobs="auto"):
+ use_peculiar_velocity=True,
+ subgrid_resolution=10, njobs="auto"):
"""
Make spectrum from ray data using the line list.
@@ -141,6 +142,17 @@
use_peculiar_velocity : optional, bool
if True, include line of sight velocity for shifting lines.
Default: True
+ subgrid_resolution : optional, int
+ When a line is being added that is unresolved (ie its thermal
+ width is less than the spectral bin width), the voigt profile of
+ the line is deposited into an array of virtual bins at higher
+ resolution. The optical depth from these virtual bins is integrated
+ and then added to the coarser spectral bin. The subgrid_resolution
+ value determines the ratio between the thermal width and the
+ bin width of the virtual bins. Increasing this value yields smaller
+ virtual bins, which increases accuracy, but is more expensive.
+ A value of 10 yields accuracy to the 4th significant digit.
+ Default: 10
njobs : optional, int or "auto"
the number of process groups into which the loop over
absorption lines will be divided. If set to -1, each
@@ -182,7 +194,9 @@
njobs = min(comm.size, len(self.line_list))
self._add_lines_to_spectrum(field_data, use_peculiar_velocity,
- line_list_file is not None, njobs=njobs)
+ line_list_file is not None,
+ subgrid_resolution=subgrid_resolution,
+ njobs=njobs)
self._add_continua_to_spectrum(field_data, use_peculiar_velocity)
self.flux_field = np.exp(-self.tau_field)
@@ -204,7 +218,7 @@
Add continuum features to the spectrum.
"""
# Only add continuum features down to tau of 1.e-4.
- tau_min = 1.e-4
+ min_tau = 1.e-3
for continuum in self.continuum_list:
column_density = field_data[continuum['field_name']] * field_data['dl']
@@ -216,12 +230,12 @@
this_wavelength = delta_lambda + continuum['wavelength']
right_index = np.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
left_index = np.digitize((this_wavelength *
- np.power((tau_min * continuum['normalization'] /
+ np.power((min_tau * continuum['normalization'] /
column_density), (1. / continuum['index']))),
self.lambda_bins).clip(0, self.n_lambda)
valid_continuua = np.where(((column_density /
- continuum['normalization']) > tau_min) &
+ continuum['normalization']) > min_tau) &
(right_index - left_index > 1))[0]
pbar = get_pbar("Adding continuum feature - %s [%f A]: " % \
(continuum['label'], continuum['wavelength']),
@@ -235,98 +249,155 @@
pbar.finish()
def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity,
- save_line_list, njobs=-1):
+ save_line_list, subgrid_resolution=10, njobs=-1):
"""
Add the absorption lines to the spectrum.
"""
- # Only make voigt profile for slice of spectrum that is 10 times the line width.
- spectrum_bin_ratio = 5
- # Widen wavelength window until optical depth reaches a max value at the ends.
- max_tau = 0.001
+ # Widen wavelength window until optical depth falls below this tau
+ # value at the ends to assure that the wings of a line have been
+ # fully resolved.
+ min_tau = 1e-3
+ # step through each ionic transition (e.g. HI, HII, MgII) specified
+ # and deposit the lines into the spectrum
for line in parallel_objects(self.line_list, njobs=njobs):
column_density = field_data[line['field_name']] * field_data['dl']
+
# redshift_eff field combines cosmological and velocity redshifts
+ # so delta_lambda gives the offset in angstroms from the rest frame
+ # wavelength to the observed wavelength of the transition
if use_peculiar_velocity:
delta_lambda = line['wavelength'] * field_data['redshift_eff']
else:
delta_lambda = line['wavelength'] * field_data['redshift']
+ # lambda_obs is central wavelength of line after redshift
+ lambda_obs = line['wavelength'] + delta_lambda
+ # bin index in lambda_bins of central wavelength of line after z
+ center_index = np.digitize(lambda_obs, self.lambda_bins)
+
+ # thermal broadening b parameter
thermal_b = np.sqrt((2 * boltzmann_constant_cgs *
field_data['temperature']) /
line['atomic_mass'])
- center_bins = np.digitize((delta_lambda + line['wavelength']),
- self.lambda_bins)
- # ratio of line width to bin width
- width_ratio = ((line['wavelength'] + delta_lambda) * \
- thermal_b / speed_of_light_cgs / self.bin_width).in_units("").d
+ # the actual thermal width of the lines
+ thermal_width = (lambda_obs * thermal_b /
+ speed_of_light_cgs).convert_to_units("angstrom")
- if (width_ratio < 1.0).any():
- mylog.warn(("%d out of %d line components are unresolved, " +
- "consider increasing spectral resolution.") %
- ((width_ratio < 1.0).sum(), width_ratio.size))
+ # Sanitize units for faster runtime of the tau_profile machinery.
+ lambda_0 = line['wavelength'].d # line's rest frame; angstroms
+ lambda_1 = lambda_obs.d # line's observed frame; angstroms
+ cdens = column_density.in_units("cm**-2").d # cm**-2
+ thermb = thermal_b.in_cgs().d # thermal b coefficient; cm / s
+ dlambda = delta_lambda.d # lambda offset; angstroms
+ vlos = field_data['velocity_los'].in_units("km/s").d # km/s
- # do voigt profiles for a subset of the full spectrum
- left_index = (center_bins -
- spectrum_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)
- right_index = (center_bins +
- spectrum_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)
+ # When we actually deposit the voigt profile, sometimes we will
+ # have underresolved lines (ie lines with smaller widths than
+ # the spectral bin size). Here, we create virtual bins small
+ # enough in width to well resolve each line, deposit the voigt
+ # profile into them, then numerically integrate their tau values
+ # and sum them to redeposit them into the actual spectral bins.
- # loop over all lines wider than the bin width
- valid_lines = np.where((width_ratio >= 1.0) &
- (right_index - left_index > 1))[0]
- pbar = get_pbar("Adding line - %s [%f A]: " % (line['label'], line['wavelength']),
- valid_lines.size)
+ # virtual bins (vbins) will be:
+ # 1) <= the bin_width; assures at least as good as spectral bins
+ # 2) <= 1/10th the thermal width; assures resolving voigt profiles
+ # (actually 1/subgrid_resolution value, default is 1/10)
+ # 3) a bin width will be divisible by vbin_width times a power of
+ # 10; this will assure we don't get spikes in the deposited
+ # spectra from uneven numbers of vbins per bin
+ resolution = thermal_width / self.bin_width
+ vbin_width = self.bin_width / \
+ 10**(np.ceil(np.log10(subgrid_resolution/resolution)).clip(0, np.inf))
+ vbin_width = vbin_width.in_units('angstrom').d
- # Sanitize units here
- column_density.convert_to_units("cm ** -2")
- lbins = self.lambda_bins.d # Angstroms
- lambda_0 = line['wavelength'].d # Angstroms
- v_doppler = thermal_b.in_cgs().d # cm / s
- cdens = column_density.d
- dlambda = delta_lambda.d # Angstroms
- vlos = field_data['velocity_los'].in_units("km/s").d
+ # the virtual window into which the line is deposited initially
+ # spans a region of 5 thermal_widths, but this may expand
+ n_vbins = np.ceil(5*thermal_width.d/vbin_width)
+ vbin_window_width = n_vbins*vbin_width
- for i, lixel in parallel_objects(enumerate(valid_lines), njobs=-1):
- my_bin_ratio = spectrum_bin_ratio
+ if (thermal_width < self.bin_width).any():
+ mylog.info(("%d out of %d line components will be " + \
+ "deposited as unresolved lines.") %
+ ((thermal_width < self.bin_width).sum(),
+ thermal_width.size))
+
+ valid_lines = np.arange(len(thermal_width))
+ pbar = get_pbar("Adding line - %s [%f A]: " % \
+ (line['label'], line['wavelength']),
+ thermal_width.size)
+
+ # for a given transition, step through each location in the
+ # observed spectrum where it occurs and deposit a voigt profile
+ for i in parallel_objects(valid_lines, njobs=-1):
+ my_vbin_window_width = vbin_window_width[i]
+ my_n_vbins = n_vbins[i]
+ my_vbin_width = vbin_width[i]
while True:
- lambda_bins, line_tau = \
+ vbins = \
+ np.linspace(lambda_1[i]-my_vbin_window_width/2.,
+ lambda_1[i]+my_vbin_window_width/2.,
+ my_n_vbins, endpoint=False)
+
+ vbins, vtau = \
tau_profile(
- lambda_0, line['f_value'], line['gamma'], v_doppler[lixel],
- cdens[lixel], delta_lambda=dlambda[lixel],
- lambda_bins=lbins[left_index[lixel]:right_index[lixel]])
+ lambda_0, line['f_value'], line['gamma'], thermb[i],
+ cdens[i], delta_lambda=dlambda[i],
+ lambda_bins=vbins)
- # Widen wavelength window until optical depth reaches a max value at the ends.
- if (line_tau[0] < max_tau and line_tau[-1] < max_tau) or \
- (left_index[lixel] <= 0 and right_index[lixel] >= self.n_lambda):
+ # If tau has not dropped below min tau threshold by the
+ # edges (ie the wings), then widen the wavelength
+ # window and repeat process.
+ if (vtau[0] < min_tau and vtau[-1] < min_tau):
break
- my_bin_ratio *= 2
- left_index[lixel] = (center_bins[lixel] -
- my_bin_ratio *
- width_ratio[lixel]).astype(int).clip(0, self.n_lambda)
- right_index[lixel] = (center_bins[lixel] +
- my_bin_ratio *
- width_ratio[lixel]).astype(int).clip(0, self.n_lambda)
+ my_vbin_window_width *= 2
+ my_n_vbins *= 2
- self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
+ # identify the extrema of the vbin_window so as to speed
+ # up searching over the entire lambda_bins array
+ bins_from_center = np.ceil((my_vbin_window_width/2.) / \
+ self.bin_width.d) + 1
+ left_index = (center_index[i] - bins_from_center).clip(0, self.n_lambda)
+ right_index = (center_index[i] + bins_from_center).clip(0, self.n_lambda)
+ window_width = right_index - left_index
+
+ # run digitize to identify which vbins are deposited into which
+ # global lambda bins.
+ # shift global lambda bins over by half a bin width;
+ # this has the effect of assuring np.digitize will place
+ # the vbins in the closest bin center.
+ binned = np.digitize(vbins,
+ self.lambda_bins[left_index:right_index] \
+ + (0.5 * self.bin_width))
+
+ # numerically integrate the virtual bins to calculate a
+ # virtual equivalent width; then sum the virtual equivalent
+ # widths and deposit into each spectral bin
+ vEW = vtau * my_vbin_width
+ EW = [vEW[binned == j].sum() for j in np.arange(window_width)]
+ EW = np.array(EW)/self.bin_width.d
+ self.tau_field[left_index:right_index] += EW
+
if save_line_list and line['label_threshold'] is not None and \
- cdens[lixel] >= line['label_threshold']:
+ cdens[i] >= line['label_threshold']:
if use_peculiar_velocity:
- peculiar_velocity = vlos[lixel]
+ peculiar_velocity = vlos[i]
else:
peculiar_velocity = 0.0
self.spectrum_line_list.append({'label': line['label'],
- 'wavelength': (lambda_0 + dlambda[lixel]),
- 'column_density': column_density[lixel],
- 'b_thermal': thermal_b[lixel],
- 'redshift': field_data['redshift'][lixel],
+ 'wavelength': (lambda_0 + dlambda[i]),
+ 'column_density': column_density[i],
+ 'b_thermal': thermal_b[i],
+ 'redshift': field_data['redshift'][i],
'v_pec': peculiar_velocity})
pbar.update(i)
pbar.finish()
- del column_density, delta_lambda, thermal_b, \
- center_bins, width_ratio, left_index, right_index
+ del column_density, delta_lambda, lambda_obs, center_index, \
+ thermal_b, thermal_width, lambda_1, cdens, thermb, dlambda, \
+ vlos, resolution, vbin_width, n_vbins, vbin_window_width, \
+ valid_lines, vbins, vtau, vEW
comm = _get_comm(())
self.tau_field = comm.mpi_allreduce(self.tau_field, op="sum")
diff -r 50f82a593a983ca8ee997a0650fc03eddfdbb30b -r 90f900be7a36433fdd48941cae4bc91066ff5c76 yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -12,24 +12,33 @@
import numpy as np
from yt.testing import \
- assert_allclose_units, requires_file, requires_module
+ assert_allclose_units, requires_file, requires_module, \
+ assert_almost_equal, assert_array_almost_equal
from yt.analysis_modules.absorption_spectrum.absorption_line import \
voigt_old, voigt_scipy
from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
from yt.analysis_modules.cosmological_observation.api import LightRay
+from yt.config import ytcfg
import tempfile
import os
import shutil
+from yt.utilities.on_demand_imports import \
+ _h5py as h5
+
+test_dir = ytcfg.get("yt", "test_data_dir")
COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo"
-
+COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
+HI_SPECTRUM_COSMO = "absorption_spectrum_data/enzo_lyman_alpha_cosmo_spec.h5"
+HI_SPECTRUM_COSMO_FILE = os.path.join(test_dir, HI_SPECTRUM_COSMO)
+HI_SPECTRUM = "absorption_spectrum_data/enzo_lyman_alpha_spec.h5"
+HI_SPECTRUM_FILE = os.path.join(test_dir, HI_SPECTRUM)
@requires_file(COSMO_PLUS)
-def test_absorption_spectrum():
+ at requires_file(HI_SPECTRUM_COSMO)
+def test_absorption_spectrum_cosmo():
"""
- This test is simply following the description in the docs for how to
- generate an absorption spectrum from a cosmological light ray for one
- of the sample datasets
+ This test generates an absorption spectrum from a cosmological light ray
"""
# Set up in a temp dir
@@ -37,7 +46,7 @@
curdir = os.getcwd()
os.chdir(tmpdir)
- lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.1)
+ lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
lr.make_light_ray(seed=1234567,
fields=['temperature', 'density', 'H_number_density'],
@@ -65,22 +74,30 @@
sp.add_continuum(my_label, field, wavelength, normalization, index)
wavelength, flux = sp.make_spectrum('lightray.h5',
- output_file='spectrum.txt',
+ output_file='spectrum.h5',
line_list_file='lines.txt',
use_peculiar_velocity=True)
+ # load just-generated hdf5 file of spectral data (for consistency)
+ f_new = h5.File('spectrum.h5', 'r')
+
+ # load standard data for comparison
+ f_old = h5.File(HI_SPECTRUM_COSMO_FILE, 'r')
+
+ # compare between standard data and current data for each array saved
+ # (wavelength, flux, tau)
+ for key in f_old.keys():
+ assert_array_almost_equal(f_new[key].value, f_old[key].value, 10)
+
# clean up
os.chdir(curdir)
shutil.rmtree(tmpdir)
-
- at requires_file(COSMO_PLUS)
- at requires_module("astropy")
-def test_absorption_spectrum_fits():
+ at requires_file(COSMO_PLUS_SINGLE)
+ at requires_file(HI_SPECTRUM)
+def test_absorption_spectrum_non_cosmo():
"""
- This test is simply following the description in the docs for how to
- generate an absorption spectrum from a cosmological light ray for one
- of the sample datasets. Outputs to fits file if astropy is installed.
+ This test generates an absorption spectrum from a non-cosmological light ray
"""
# Set up in a temp dir
@@ -88,11 +105,114 @@
curdir = os.getcwd()
os.chdir(tmpdir)
- lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.1)
+ lr = LightRay(COSMO_PLUS_SINGLE)
- lr.make_light_ray(seed=1234567,
+ ray_start = [0,0,0]
+ ray_end = [1,1,1]
+ lr.make_light_ray(start_position=ray_start, end_position=ray_end,
fields=['temperature', 'density', 'H_number_density'],
- get_los_velocity=True,
+ data_filename='lightray.h5')
+
+ sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
+
+ my_label = 'HI Lya'
+ field = 'H_number_density'
+ wavelength = 1215.6700 # Angstromss
+ f_value = 4.164E-01
+ gamma = 6.265e+08
+ mass = 1.00794
+
+ sp.add_line(my_label, field, wavelength, f_value,
+ gamma, mass, label_threshold=1.e10)
+
+ wavelength, flux = sp.make_spectrum('lightray.h5',
+ output_file='spectrum.h5',
+ line_list_file='lines.txt',
+ use_peculiar_velocity=True)
+
+ # load just-generated hdf5 file of spectral data (for consistency)
+ f_new = h5.File('spectrum.h5', 'r')
+
+ # load standard data for comparison
+ f_old = h5.File(HI_SPECTRUM_FILE, 'r')
+
+ # compare between standard data and current data for each array saved
+ # (wavelength, flux, tau)
+ for key in f_old.keys():
+ assert_array_almost_equal(f_new[key].value, f_old[key].value, 10)
+
+ # clean up
+ os.chdir(curdir)
+ shutil.rmtree(tmpdir)
+
+ at requires_file(COSMO_PLUS_SINGLE)
+def test_equivalent_width_conserved():
+ """
+ This tests that the equivalent width of the optical depth is conserved
+ regardless of the bin width employed in wavelength space.
+ Unresolved lines should still deposit optical depth into the spectrum.
+ """
+
+ # Set up in a temp dir
+ tmpdir = tempfile.mkdtemp()
+ curdir = os.getcwd()
+ os.chdir(tmpdir)
+
+ lr = LightRay(COSMO_PLUS_SINGLE)
+
+ ray_start = [0,0,0]
+ ray_end = [1,1,1]
+ lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+ fields=['temperature', 'density', 'H_number_density'],
+ data_filename='lightray.h5')
+
+ my_label = 'HI Lya'
+ field = 'H_number_density'
+ wave = 1215.6700 # Angstromss
+ f_value = 4.164E-01
+ gamma = 6.265e+08
+ mass = 1.00794
+
+ lambda_min= 1200
+ lambda_max= 1300
+ lambda_bin_widths = [1e-3, 1e-2, 1e-1, 1e0, 1e1]
+ total_tau = []
+
+ for lambda_bin_width in lambda_bin_widths:
+ n_lambda = ((lambda_max - lambda_min)/ lambda_bin_width) + 1
+ sp = AbsorptionSpectrum(lambda_min=lambda_min, lambda_max=lambda_max,
+ n_lambda=n_lambda)
+ sp.add_line(my_label, field, wave, f_value, gamma, mass)
+ wavelength, flux = sp.make_spectrum('lightray.h5')
+ total_tau.append((lambda_bin_width * sp.tau_field).sum())
+
+ # assure that the total tau values are all within 1e-5 of each other
+ for tau in total_tau:
+ assert_almost_equal(tau, total_tau[0], 5)
+
+ # clean up
+ os.chdir(curdir)
+ shutil.rmtree(tmpdir)
+
+
+ at requires_file(COSMO_PLUS_SINGLE)
+ at requires_module("astropy")
+def test_absorption_spectrum_fits():
+ """
+ This test generates an absorption spectrum and saves it as a fits file.
+ """
+
+ # Set up in a temp dir
+ tmpdir = tempfile.mkdtemp()
+ curdir = os.getcwd()
+ os.chdir(tmpdir)
+
+ lr = LightRay(COSMO_PLUS_SINGLE)
+
+ ray_start = [0,0,0]
+ ray_end = [1,1,1]
+ lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+ fields=['temperature', 'density', 'H_number_density'],
data_filename='lightray.h5')
sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list