[yt-svn] commit/yt: MatthewTurk: Merged in xarthisius/yt (pull request #1701)

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Aug 27 09:35:09 PDT 2015


1 new commit in yt:

https://bitbucket.org/yt_analysis/yt/commits/798706bc587f/
Changeset:   798706bc587f
Branch:      yt
User:        MatthewTurk
Date:        2015-08-27 16:34:57+00:00
Summary:     Merged in xarthisius/yt (pull request #1701)

[opt] Drop units from computationally expensive parts of absorption_spectrum module
Affected #:  2 files

diff -r 14a4cacf4dfaa1b0af3830e7d2efa1ab69db1b35 -r 798706bc587f36f15ab279807b69a420daed42df yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -21,18 +21,22 @@
 from yt.utilities.on_demand_imports import _scipy, NotAModule
 
 special = _scipy.special
+tau_factor = None
+_cs = None
+
 
 def voigt_scipy(a, u):
     x = np.asarray(u).astype(np.float64)
     y = np.asarray(a).astype(np.float64)
     return special.wofz(x + 1j * y).real
 
+
 def voigt_old(a, u):
     """
     NAME:
-        VOIGT 
+        VOIGT
     PURPOSE:
-        Implementation of Voigt function 
+        Implementation of Voigt function
     CATEGORY:
             Math
     CALLING SEQUENCE:
@@ -57,9 +61,10 @@
     OUTPUTS:
             An array of the same type as u
     RESTRICTIONS:
-            U must be an array, a should not be. Also this procedure is only valid
-            for the region a<1.0, u<4.0 or a<1.8(u+1), u>4, which should be most 
-            astrophysical conditions (see the article below for further comments
+            U must be an array, a should not be. Also this procedure is only
+            valid for the region a<1.0, u<4.0 or a<1.8(u+1), u>4, which should
+            be most astrophysical conditions (see the article below for further
+            comments
     PROCEDURE:
             Follows procedure in Armstrong JQSRT 7, 85 (1967)
             also the same as the intrinsic in the previous version of IDL
@@ -71,17 +76,17 @@
     y = np.asarray(a).astype(np.float64)
 
     # Hummer's Chebyshev Coefficients
-    c = ( 0.1999999999972224, -0.1840000000029998,   0.1558399999965025, 
-         -0.1216640000043988,  0.0877081599940391,  -0.0585141248086907, 
-          0.0362157301623914, -0.0208497654398036,   0.0111960116346270, 
-         -0.56231896167109e-2, 0.26487634172265e-2, -0.11732670757704e-2, 
-          0.4899519978088e-3, -0.1933630801528e-3,   0.722877446788e-4, 
-         -0.256555124979e-4,   0.86620736841e-5,    -0.27876379719e-5, 
-          0.8566873627e-6,    -0.2518433784e-6,      0.709360221e-7, 
-         -0.191732257e-7,      0.49801256e-8,       -0.12447734e-8, 
-          0.2997777e-9,       -0.696450e-10,         0.156262e-10, 
-         -0.33897e-11,         0.7116e-12,          -0.1447e-12, 
-          0.285e-13,          -0.55e-14,             0.10e-14,
+    c = (0.1999999999972224, -0.1840000000029998,   0.1558399999965025,
+         -0.1216640000043988,  0.0877081599940391,  -0.0585141248086907,
+         0.0362157301623914, -0.0208497654398036,   0.0111960116346270,
+         -0.56231896167109e-2, 0.26487634172265e-2, -0.11732670757704e-2,
+         0.4899519978088e-3, -0.1933630801528e-3,   0.722877446788e-4,
+         -0.256555124979e-4,   0.86620736841e-5,    -0.27876379719e-5,
+         0.8566873627e-6,    -0.2518433784e-6,      0.709360221e-7,
+         -0.191732257e-7,      0.49801256e-8,       -0.12447734e-8,
+         0.2997777e-9,       -0.696450e-10,         0.156262e-10,
+         -0.33897e-11,         0.7116e-12,          -0.1447e-12,
+         0.285e-13,          -0.55e-14,             0.10e-14,
          -0.2e-15)
 
     y2 = y * y
@@ -108,11 +113,11 @@
         x14 = np.power(np.clip(x[q], -np.inf, 500.),  14)
         x12 = np.power(np.clip(x[q], -np.inf, 1000.), 12)
         x10 = np.power(np.clip(x[q], -np.inf, 5000.), 10)
-        x8  = np.power(np.clip(x[q], -np.inf, 50000.), 8)
-        x6  = np.power(np.clip(x[q], -np.inf, 1.e6),   6)
-        x4  = np.power(np.clip(x[q], -np.inf, 1.e9),   4)
-        x2  = np.power(np.clip(x[q], -np.inf, 1.e18),  2)
-        dno1[q] = -(0.5 / x2 + 0.75 / x4 + 1.875 / x6 + 
+        x8 = np.power(np.clip(x[q], -np.inf, 50000.), 8)
+        x6 = np.power(np.clip(x[q], -np.inf, 1.e6),   6)
+        x4 = np.power(np.clip(x[q], -np.inf, 1.e9),   4)
+        x2 = np.power(np.clip(x[q], -np.inf, 1.e18),  2)
+        dno1[q] = -(0.5 / x2 + 0.75 / x4 + 1.875 / x6 +
                     6.5625 / x8 + 29.53125 / x10 +
                     162.4218 / x12 + 1055.7421 / x14)
         dno2[q] = (1. - dno1[q]) / (2. * x[q])
@@ -130,81 +135,89 @@
                 yn = yn * y2
                 g = dn.astype(np.float64) * yn
                 funct = funct + q * g
-                if np.max(np.abs(g / funct)) <= 1.e-8: break
+                if np.max(np.abs(g / funct)) <= 1.e-8:
+                    break
 
     k1 = u1 - 1.12837917 * funct
     k1 = k1.astype(np.float64).clip(0)
     return k1
 
-def tau_profile(lamba_0, f_value, gamma, v_doppler, column_density, 
+
+def tau_profile(lambda_0, f_value, gamma, v_doppler, column_density,
                 delta_v=None, delta_lambda=None,
                 lambda_bins=None, n_lambda=12000, dlambda=0.01):
     r"""
-    Create an optical depth vs. wavelength profile for an 
+    Create an optical depth vs. wavelength profile for an
     absorption line using a voigt profile.
 
     Parameters
     ----------
-    
-    lamba_0 : float YTQuantity in length units
+
+    lambda_0 : float in angstroms
        central wavelength.
     f_value : float
        absorption line f-value.
     gamma : float
        absorption line gamma value.
-    v_doppler : float YTQuantity in velocity units
+    v_doppler : float in cm/s
        doppler b-parameter.
-    column_density : float YTQuantity in (length units)^-2
+    column_density : float in cm^-2
        column density.
-    delta_v : float YTQuantity in velocity units
-       velocity offset from lamba_0.
+    delta_v : float in cm/s
+       velocity offset from lambda_0.
        Default: None (no shift).
-    delta_lambda : float YTQuantity in length units
+    delta_lambda : float in angstroms
         wavelength offset.
         Default: None (no shift).
-    lambda_bins : YTArray in length units
-        wavelength array for line deposition.  If None, one will be 
+    lambda_bins : array in angstroms
+        wavelength array for line deposition.  If None, one will be
         created using n_lambda and dlambda.
         Default: None.
     n_lambda : int
         size of lambda bins to create if lambda_bins is None.
         Default: 12000.
-    dlambda : float 
+    dlambda : float in angstroms
         lambda bin width in angstroms if lambda_bins is None.
         Default: 0.01.
-        
+
     """
+    global tau_factor
+    if tau_factor is None:
+        tau_factor = (
+            np.sqrt(np.pi) * charge_proton_cgs ** 2 /
+            (mass_electron_cgs * speed_of_light_cgs)
+        ).in_cgs().d
 
-    ## shift lamba_0 by delta_v
+    global _cs
+    if _cs is None:
+        _cs = speed_of_light_cgs.d[()]
+
+    # shift lambda_0 by delta_v
     if delta_v is not None:
-        lam1 = lamba_0 * (1 + delta_v / speed_of_light_cgs)
+        lam1 = lambda_0 * (1 + delta_v / _cs)
     elif delta_lambda is not None:
-        lam1 = lamba_0 + delta_lambda
+        lam1 = lambda_0 + delta_lambda
     else:
-        lam1 = lamba_0
+        lam1 = lambda_0
 
-    ## conversions
-    nu1 = speed_of_light_cgs / lam1           # line freq in Hz
-    nudop = v_doppler / speed_of_light_cgs * nu1   # doppler width in Hz
+    # conversions
+    nudop = 1e8 * v_doppler / lam1   # doppler width in Hz
 
-    ## create wavelength
+    # create wavelength
     if lambda_bins is None:
         lambda_bins = lam1 + \
             np.arange(n_lambda, dtype=np.float) * dlambda - \
             n_lambda * dlambda / 2  # wavelength vector (angstroms)
-    nua = (speed_of_light_cgs / lambda_bins)  # frequency vector (Hz)
 
-    ## tau_0
-    tau_X = np.sqrt(np.pi) * charge_proton_cgs**2 / \
-      (mass_electron_cgs * speed_of_light_cgs) * \
-      column_density * f_value / v_doppler
-    tau0 = tau_X * lamba_0
+    # tau_0
+    tau_X = tau_factor * column_density * f_value / v_doppler
+    tau0 = tau_X * lambda_0 * 1e-8
 
     # dimensionless frequency offset in units of doppler freq
-    x = ((nua - nu1) / nudop).in_units("")
-    a = (gamma / (4 * np.pi * nudop)).in_units("s")  # damping parameter 
-    phi = voigt(a, x)                                # line profile
-    tauphi = (tau0 * phi).in_units("")               # profile scaled with tau0
+    x = _cs / v_doppler * (lam1 / lambda_bins - 1.0)
+    a = gamma / (4.0 * np.pi * nudop)               # damping parameter
+    phi = voigt(a, x)                               # line profile
+    tauphi = tau0 * phi              # profile scaled with tau0
 
     return (lambda_bins, tauphi)
 

diff -r 14a4cacf4dfaa1b0af3830e7d2efa1ab69db1b35 -r 798706bc587f36f15ab279807b69a420daed42df yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -54,7 +54,7 @@
         self.spectrum_line_list = None
         self.lambda_bins = YTArray(np.linspace(lambda_min, lambda_max, n_lambda),
                                    "angstrom")
-        self.bin_width = YTQuantity((lambda_max - lambda_min) / 
+        self.bin_width = YTQuantity((lambda_max - lambda_min) /
                                     float(n_lambda - 1), "angstrom")
         self.line_list = []
         self.continuum_list = []
@@ -66,7 +66,7 @@
 
         Parameters
         ----------
-        
+
         label : string
            label for the line.
         field_name : string
@@ -124,15 +124,15 @@
         input_file : string
            path to input ray data.
         output_file : optional, string
-           path for output file.  File formats are chosen based on the 
-           filename extension.  ``.h5`` for hdf5, ``.fits`` for fits, 
+           path for output file.  File formats are chosen based on the
+           filename extension.  ``.h5`` for hdf5, ``.fits`` for fits,
            and everything else is ASCII.
            Default: "spectrum.h5"
         line_list_file : optional, string
-           path to file in which the list of all deposited lines 
-           will be saved.  If set to None, the line list will not 
-           be saved.  Note, when running in parallel, combining the 
-           line lists can be quite slow, so it is recommended to set 
+           path to file in which the list of all deposited lines
+           will be saved.  If set to None, the line list will not
+           be saved.  Note, when running in parallel, combining the
+           line lists can be quite slow, so it is recommended to set
            this to None when running in parallel unless you really
            want them.
            Default: "lines.txt"
@@ -141,15 +141,15 @@
            Default: True
         njobs : optional, int or "auto"
            the number of process groups into which the loop over
-           absorption lines will be divided.  If set to -1, each 
+           absorption lines will be divided.  If set to -1, each
            absorption line will be deposited by exactly one processor.
-           If njobs is set to a value less than the total number of 
-           available processors (N), then the deposition of an 
+           If njobs is set to a value less than the total number of
+           available processors (N), then the deposition of an
            individual line will be parallelized over (N / njobs)
-           processors.  If set to "auto", it will first try to 
-           parallelize over the list of lines and only parallelize 
+           processors.  If set to "auto", it will first try to
+           parallelize over the list of lines and only parallelize
            the line deposition if there are more processors than
-           lines.  This is the optimal strategy for parallelizing 
+           lines.  This is the optimal strategy for parallelizing
            spectrum generation.
            Default: "auto"
         """
@@ -176,7 +176,7 @@
         if njobs == "auto":
             comm = _get_comm(())
             njobs = min(comm.size, len(self.line_list))
-        
+
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity,
                                     line_list_file is not None, njobs=njobs)
         self._add_continua_to_spectrum(field_data, use_peculiar_velocity)
@@ -273,17 +273,26 @@
                                    (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding line - %s [%f A]: " % (line['label'], line['wavelength']),
                             valid_lines.size)
+
+            # Sanitize units here
+            column_density.convert_to_units("cm ** -2")
+            lbins = self.lambda_bins.d  # Angstroms
+            lambda_0 = line['wavelength'].d  # Angstroms
+            v_doppler = thermal_b.in_cgs().d  # cm / s
+            cdens = column_density.d
+            dlambda = delta_lambda.d  # Angstroms
+            vlos = field_data['velocity_los'].in_units("km/s").d
+
             for i, lixel in parallel_objects(enumerate(valid_lines), njobs=-1):
                 my_bin_ratio = spectrum_bin_ratio
+
                 while True:
                     lambda_bins, line_tau = \
                         tau_profile(
-                            line['wavelength'], line['f_value'],
-                            line['gamma'], thermal_b[lixel].in_units("km/s"),
-                            column_density[lixel],
-                            delta_lambda=delta_lambda[lixel],
-                            lambda_bins=self.lambda_bins[left_index[lixel]:right_index[lixel]])
-                        
+                            lambda_0, line['f_value'], line['gamma'], v_doppler[lixel],
+                            cdens[lixel], delta_lambda=dlambda[lixel],
+                            lambda_bins=lbins[left_index[lixel]:right_index[lixel]])
+
                     # Widen wavelength window until optical depth reaches a max value at the ends.
                     if (line_tau[0] < max_tau and line_tau[-1] < max_tau) or \
                       (left_index[lixel] <= 0 and right_index[lixel] >= self.n_lambda):
@@ -295,16 +304,16 @@
                     right_index[lixel] = (center_bins[lixel] +
                                           my_bin_ratio *
                                           width_ratio[lixel]).astype(int).clip(0, self.n_lambda)
+
                 self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
                 if save_line_list and line['label_threshold'] is not None and \
-                        column_density[lixel] >= line['label_threshold']:
+                        cdens[lixel] >= line['label_threshold']:
                     if use_peculiar_velocity:
-                        peculiar_velocity = field_data['velocity_los'][lixel].in_units("km/s")
+                        peculiar_velocity = vlos[lixel]
                     else:
                         peculiar_velocity = 0.0
                     self.spectrum_line_list.append({'label': line['label'],
-                                                    'wavelength': (line['wavelength'] +
-                                                                   delta_lambda[lixel]),
+                                                    'wavelength': (lambda_0 + dlambda[lixel]),
                                                     'column_density': column_density[lixel],
                                                     'b_thermal': thermal_b[lixel],
                                                     'redshift': field_data['redshift'][lixel],

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list