[yt-svn] commit/yt: 10 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Fri Sep 4 05:10:36 PDT 2015


10 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/3fa9694c7b86/
Changeset:   3fa9694c7b86
Branch:      stable
User:        ngoldbaum
Date:        2015-08-14 16:17:26+00:00
Summary:     Add support for particle fields to the [Min,Max]Location derived quantities
Affected #:  1 file

diff -r 841164c022392ca2c2eacde930c9da6df0383520 -r 3fa9694c7b868c27fe113096ae5016290d08b314 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -33,6 +33,16 @@
 
 derived_quantity_registry = {}
 
+def get_position_fields(field, data):
+    axis_names = [data.ds.coordinates.axis_name[num] for num in [0, 1, 2]]
+    if field[0] in data.ds.particle_types:
+        position_fields = [(field[0], 'particle_position_%s' % d)
+                           for d in axis_names]
+    else:
+        position_fields = axis_names
+
+    return position_fields
+
 class RegisteredDerivedQuantity(type):
     def __init__(cls, name, b, d):
         type.__init__(cls, name, b, d)
@@ -545,16 +555,15 @@
         axis_names = data.ds.coordinates.axis_name
         field = data._determine_fields(field)[0]
         ma = array_like_field(data, -HUGE, field)
-        mx = array_like_field(data, -1, axis_names[0])
-        my = array_like_field(data, -1, axis_names[1])
-        mz = array_like_field(data, -1, axis_names[2])
+        position_fields = get_position_fields(field, data)
+        mx = array_like_field(data, -1, position_fields[0])
+        my = array_like_field(data, -1, position_fields[1])
+        mz = array_like_field(data, -1, position_fields[2])
         maxi = -1
         if data[field].size > 0:
             maxi = np.argmax(data[field])
             ma = data[field][maxi]
-            mx, my, mz = [data[ax][maxi] for ax in (axis_names[0],
-                                                    axis_names[1],
-                                                    axis_names[2])]
+            mx, my, mz = [data[ax][maxi] for ax in position_fields]
         return (ma, maxi, mx, my, mz)
 
     def reduce_intermediate(self, values):
@@ -590,14 +599,15 @@
     def process_chunk(self, data, field):
         field = data._determine_fields(field)[0]
         ma = array_like_field(data, HUGE, field)
-        mx = array_like_field(data, -1, "x")
-        my = array_like_field(data, -1, "y")
-        mz = array_like_field(data, -1, "z")
+        position_fields = get_position_fields(field, data)
+        mx = array_like_field(data, -1, position_fields[0])
+        my = array_like_field(data, -1, position_fields[1])
+        mz = array_like_field(data, -1, position_fields[2])
         mini = -1
         if data[field].size > 0:
             mini = np.argmin(data[field])
             ma = data[field][mini]
-            mx, my, mz = [data[ax][mini] for ax in 'xyz']
+            mx, my, mz = [data[ax][mini] for ax in position_fields]
         return (ma, mini, mx, my, mz)
 
     def reduce_intermediate(self, values):


https://bitbucket.org/yt_analysis/yt/commits/5cc69dbb9abf/
Changeset:   5cc69dbb9abf
Branch:      stable
User:        xarthisius
Date:        2015-08-16 00:56:17+00:00
Summary:     Backporting PR #1701 https://bitbucket.org/yt_analysis/yt/pull-requests/1701
Affected #:  2 files

diff -r 3fa9694c7b868c27fe113096ae5016290d08b314 -r 5cc69dbb9abf557616af6753d782576d1a0c6bd8 yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -21,18 +21,22 @@
 from yt.utilities.on_demand_imports import _scipy, NotAModule
 
 special = _scipy.special
+tau_factor = None
+_cs = None
+
 
 def voigt_scipy(a, u):
     x = np.asarray(u).astype(np.float64)
     y = np.asarray(a).astype(np.float64)
     return special.wofz(x + 1j * y).real
 
+
 def voigt_old(a, u):
     """
     NAME:
-        VOIGT 
+        VOIGT
     PURPOSE:
-        Implementation of Voigt function 
+        Implementation of Voigt function
     CATEGORY:
             Math
     CALLING SEQUENCE:
@@ -57,9 +61,10 @@
     OUTPUTS:
             An array of the same type as u
     RESTRICTIONS:
-            U must be an array, a should not be. Also this procedure is only valid
-            for the region a<1.0, u<4.0 or a<1.8(u+1), u>4, which should be most 
-            astrophysical conditions (see the article below for further comments
+            U must be an array, a should not be. Also this procedure is only
+            valid for the region a<1.0, u<4.0 or a<1.8(u+1), u>4, which should
+            be most astrophysical conditions (see the article below for further
+            comments
     PROCEDURE:
             Follows procedure in Armstrong JQSRT 7, 85 (1967)
             also the same as the intrinsic in the previous version of IDL
@@ -70,27 +75,27 @@
     x = np.asarray(u).astype(np.float64)
     y = np.asarray(a).astype(np.float64)
 
-    w = np.array([0.462243670,   0.286675505,   0.109017206, 
-                  0.0248105209,  0.00324377334, 0.000228338636, 
-                  7.80255648e-6, 1.08606937e-7, 4.39934099e-10, 
+    w = np.array([0.462243670,   0.286675505,   0.109017206,
+                  0.0248105209,  0.00324377334, 0.000228338636,
+                  7.80255648e-6, 1.08606937e-7, 4.39934099e-10,
                   2.22939365e-13])
 
-    t = np.array([0.245340708, 0.737473729, 1.23407622, 1.73853771, 
-                  2.25497400,  2.78880606,  3.34785457, 3.94476404, 
+    t = np.array([0.245340708, 0.737473729, 1.23407622, 1.73853771,
+                  2.25497400,  2.78880606,  3.34785457, 3.94476404,
                   4.60368245,  5.38748089])
 
     # Hummer's Chebyshev Coefficients
-    c = ( 0.1999999999972224, -0.1840000000029998,   0.1558399999965025, 
-         -0.1216640000043988,  0.0877081599940391,  -0.0585141248086907, 
-          0.0362157301623914, -0.0208497654398036,   0.0111960116346270, 
-         -0.56231896167109e-2, 0.26487634172265e-2, -0.11732670757704e-2, 
-          0.4899519978088e-3, -0.1933630801528e-3,   0.722877446788e-4, 
-         -0.256555124979e-4,   0.86620736841e-5,    -0.27876379719e-5, 
-          0.8566873627e-6,    -0.2518433784e-6,      0.709360221e-7, 
-         -0.191732257e-7,      0.49801256e-8,       -0.12447734e-8, 
-          0.2997777e-9,       -0.696450e-10,         0.156262e-10, 
-         -0.33897e-11,         0.7116e-12,          -0.1447e-12, 
-          0.285e-13,          -0.55e-14,             0.10e-14,
+    c = (0.1999999999972224, -0.1840000000029998,   0.1558399999965025,
+         -0.1216640000043988,  0.0877081599940391,  -0.0585141248086907,
+         0.0362157301623914, -0.0208497654398036,   0.0111960116346270,
+         -0.56231896167109e-2, 0.26487634172265e-2, -0.11732670757704e-2,
+         0.4899519978088e-3, -0.1933630801528e-3,   0.722877446788e-4,
+         -0.256555124979e-4,   0.86620736841e-5,    -0.27876379719e-5,
+         0.8566873627e-6,    -0.2518433784e-6,      0.709360221e-7,
+         -0.191732257e-7,      0.49801256e-8,       -0.12447734e-8,
+         0.2997777e-9,       -0.696450e-10,         0.156262e-10,
+         -0.33897e-11,         0.7116e-12,          -0.1447e-12,
+         0.285e-13,          -0.55e-14,             0.10e-14,
          -0.2e-15)
 
     y2 = y * y
@@ -117,11 +122,11 @@
         x14 = np.power(np.clip(x[q], -np.inf, 500.),  14)
         x12 = np.power(np.clip(x[q], -np.inf, 1000.), 12)
         x10 = np.power(np.clip(x[q], -np.inf, 5000.), 10)
-        x8  = np.power(np.clip(x[q], -np.inf, 50000.), 8)
-        x6  = np.power(np.clip(x[q], -np.inf, 1.e6),   6)
-        x4  = np.power(np.clip(x[q], -np.inf, 1.e9),   4)
-        x2  = np.power(np.clip(x[q], -np.inf, 1.e18),  2)
-        dno1[q] = -(0.5 / x2 + 0.75 / x4 + 1.875 / x6 + 
+        x8 = np.power(np.clip(x[q], -np.inf, 50000.), 8)
+        x6 = np.power(np.clip(x[q], -np.inf, 1.e6),   6)
+        x4 = np.power(np.clip(x[q], -np.inf, 1.e9),   4)
+        x2 = np.power(np.clip(x[q], -np.inf, 1.e18),  2)
+        dno1[q] = -(0.5 / x2 + 0.75 / x4 + 1.875 / x6 +
                     6.5625 / x8 + 29.53125 / x10 +
                     162.4218 / x12 + 1055.7421 / x14)
         dno2[q] = (1. - dno1[q]) / (2. * x[q])
@@ -139,82 +144,89 @@
                 yn = yn * y2
                 g = dn.astype(np.float64) * yn
                 funct = funct + q * g
-                if np.max(np.abs(g / funct)) <= 1.e-8: break
+                if np.max(np.abs(g / funct)) <= 1.e-8:
+                    break
 
     k1 = u1 - 1.12837917 * funct
     k1 = k1.astype(np.float64).clip(0)
     return k1
 
-def tau_profile(lamba_0, f_value, gamma, v_doppler, column_density, 
+
+def tau_profile(lambda_0, f_value, gamma, v_doppler, column_density,
                 delta_v=None, delta_lambda=None,
                 lambda_bins=None, n_lambda=12000, dlambda=0.01):
     r"""
-    Create an optical depth vs. wavelength profile for an 
+    Create an optical depth vs. wavelength profile for an
     absorption line using a voigt profile.
 
     Parameters
     ----------
-    
-    lamba_0 : float YTQuantity in length units
+
+    lambda_0 : float in angstroms
        central wavelength.
     f_value : float
        absorption line f-value.
     gamma : float
        absorption line gamma value.
-    v_doppler : float YTQuantity in velocity units
+    v_doppler : float in cm/s
        doppler b-parameter.
-    column_density : float YTQuantity in (length units)^-2
+    column_density : float in cm^-2
        column density.
-    delta_v : float YTQuantity in velocity units
-       velocity offset from lamba_0.
+    delta_v : float in cm/s
+       velocity offset from lambda_0.
        Default: None (no shift).
-    delta_lambda : float YTQuantity in length units
+    delta_lambda : float in angstroms
         wavelength offset.
         Default: None (no shift).
-    lambda_bins : YTArray in length units
-        wavelength array for line deposition.  If None, one will be 
+    lambda_bins : array in angstroms
+        wavelength array for line deposition.  If None, one will be
         created using n_lambda and dlambda.
         Default: None.
     n_lambda : int
         size of lambda bins to create if lambda_bins is None.
         Default: 12000.
-    dlambda : float 
+    dlambda : float in angstroms
         lambda bin width in angstroms if lambda_bins is None.
         Default: 0.01.
-        
+
     """
+    global tau_factor
+    if tau_factor is None:
+        tau_factor = (
+            np.sqrt(np.pi) * charge_proton_cgs ** 2 /
+            (mass_electron_cgs * speed_of_light_cgs)
+        ).in_cgs().d
 
-    ## shift lamba_0 by delta_v
+    global _cs
+    if _cs is None:
+        _cs = speed_of_light_cgs.d[()]
+
+    # shift lambda_0 by delta_v
     if delta_v is not None:
-        lam1 = lamba_0 * (1 + delta_v / speed_of_light_cgs)
+        lam1 = lambda_0 * (1 + delta_v / _cs)
     elif delta_lambda is not None:
-        lam1 = lamba_0 + delta_lambda
+        lam1 = lambda_0 + delta_lambda
     else:
-        lam1 = lamba_0
+        lam1 = lambda_0
 
-    ## conversions
-    nu1 = speed_of_light_cgs / lam1           # line freq in Hz
-    nudop = v_doppler / speed_of_light_cgs * nu1   # doppler width in Hz
-    lamdop = v_doppler / speed_of_light_cgs * lam1 # doppler width in Ang
+    # conversions
+    nudop = 1e8 * v_doppler / lam1   # doppler width in Hz
 
-    ## create wavelength
+    # create wavelength
     if lambda_bins is None:
         lambda_bins = lam1 + \
             np.arange(n_lambda, dtype=np.float) * dlambda - \
             n_lambda * dlambda / 2  # wavelength vector (angstroms)
-    nua = (speed_of_light_cgs / lambda_bins)  # frequency vector (Hz)
 
-    ## tau_0
-    tau_X = np.sqrt(np.pi) * charge_proton_cgs**2 / \
-      (mass_electron_cgs * speed_of_light_cgs) * \
-      column_density * f_value / v_doppler
-    tau0 = tau_X * lamba_0
+    # tau_0
+    tau_X = tau_factor * column_density * f_value / v_doppler
+    tau0 = tau_X * lambda_0 * 1e-8
 
     # dimensionless frequency offset in units of doppler freq
-    x = ((nua - nu1) / nudop).in_units("")
-    a = (gamma / (4 * np.pi * nudop)).in_units("s")  # damping parameter 
-    phi = voigt(a, x)                                # line profile
-    tauphi = (tau0 * phi).in_units("")               # profile scaled with tau0
+    x = _cs / v_doppler * (lam1 / lambda_bins - 1.0)
+    a = gamma / (4.0 * np.pi * nudop)               # damping parameter
+    phi = voigt(a, x)                               # line profile
+    tauphi = tau0 * phi              # profile scaled with tau0
 
     return (lambda_bins, tauphi)
 

diff -r 3fa9694c7b868c27fe113096ae5016290d08b314 -r 5cc69dbb9abf557616af6753d782576d1a0c6bd8 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -50,7 +50,7 @@
         self.spectrum_line_list = None
         self.lambda_bins = YTArray(np.linspace(lambda_min, lambda_max, n_lambda),
                                    "angstrom")
-        self.bin_width = YTQuantity((lambda_max - lambda_min) / 
+        self.bin_width = YTQuantity((lambda_max - lambda_min) /
                                     float(n_lambda - 1), "angstrom")
         self.line_list = []
         self.continuum_list = []
@@ -62,7 +62,7 @@
 
         Parameters
         ----------
-        
+
         label : string
            label for the line.
         field_name : string
@@ -238,17 +238,26 @@
                                    (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding line - %s [%f A]: " % (line['label'], line['wavelength']),
                             valid_lines.size)
+
+            # Sanitize units here
+            column_density.convert_to_units("cm ** -2")
+            lbins = self.lambda_bins.d  # Angstroms
+            lambda_0 = line['wavelength'].d  # Angstroms
+            v_doppler = thermal_b.in_cgs().d  # cm / s
+            cdens = column_density.d
+            dlambda = delta_lambda.d  # Angstroms
+            vlos = field_data['velocity_los'].in_units("km/s").d
+
             for i, lixel in enumerate(valid_lines):
                 my_bin_ratio = spectrum_bin_ratio
+
                 while True:
                     lambda_bins, line_tau = \
                         tau_profile(
-                            line['wavelength'], line['f_value'],
-                            line['gamma'], thermal_b[lixel].in_units("km/s"),
-                            column_density[lixel],
-                            delta_lambda=delta_lambda[lixel],
-                            lambda_bins=self.lambda_bins[left_index[lixel]:right_index[lixel]])
-                        
+                            lambda_0, line['f_value'], line['gamma'], v_doppler[lixel],
+                            cdens[lixel], delta_lambda=dlambda[lixel],
+                            lambda_bins=lbins[left_index[lixel]:right_index[lixel]])
+
                     # Widen wavelength window until optical depth reaches a max value at the ends.
                     if (line_tau[0] < max_tau and line_tau[-1] < max_tau) or \
                       (left_index[lixel] <= 0 and right_index[lixel] >= self.n_lambda):
@@ -260,16 +269,16 @@
                     right_index[lixel] = (center_bins[lixel] +
                                           my_bin_ratio *
                                           width_ratio[lixel]).astype(int).clip(0, self.n_lambda)
+
                 self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
                 if line['label_threshold'] is not None and \
-                        column_density[lixel] >= line['label_threshold']:
+                        cdens[lixel] >= line['label_threshold']:
                     if use_peculiar_velocity:
-                        peculiar_velocity = field_data['velocity_los'][lixel].in_units("km/s")
+                        peculiar_velocity = vlos[lixel]
                     else:
                         peculiar_velocity = 0.0
                     self.spectrum_line_list.append({'label': line['label'],
-                                                    'wavelength': (line['wavelength'] +
-                                                                   delta_lambda[lixel]),
+                                                    'wavelength': (lambda_0 + dlambda[lixel]),
                                                     'column_density': column_density[lixel],
                                                     'b_thermal': thermal_b[lixel],
                                                     'redshift': field_data['redshift'][lixel],


https://bitbucket.org/yt_analysis/yt/commits/3c605214f515/
Changeset:   3c605214f515
Branch:      stable
User:        xarthisius
Date:        2015-08-19 17:12:43+00:00
Summary:     Backporting PR #1707 https://bitbucket.org/yt_analysis/yt/pull-requests/1707
Affected #:  4 files

diff -r 5cc69dbb9abf557616af6753d782576d1a0c6bd8 -r 3c605214f515de42acdf54ded7cbab22ebc6771a yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -321,7 +321,7 @@
         _units_initialized = False
         with self.data_source._field_parameter_state(self.field_parameters):
             for chunk in parallel_objects(self.data_source.chunks(
-                                          [], "io", local_only = True)): 
+                                          [], "io", local_only = True)):
                 mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)",
                             chunk.ires.size, get_memory_usage()/1024.)
                 if _units_initialized is False:
@@ -910,22 +910,22 @@
         return ls
 
     def _minimal_box(self, dds):
-        LL = self.left_edge - self.ds.domain_left_edge
+        LL = self.left_edge.d - self.ds.domain_left_edge.d
         # Nudge in case we're on the edge
-        LL += LL.uq * np.finfo(np.float64).eps
-        LS = self.right_edge - self.ds.domain_left_edge
-        LS += LS.uq * np.finfo(np.float64).eps
+        LL += np.finfo(np.float64).eps
+        LS = self.right_edge.d - self.ds.domain_left_edge.d
+        LS += np.finfo(np.float64).eps
         cell_start = LL / dds  # This is the cell we're inside
         cell_end = LS / dds
         if self.level == 0:
             start_index = np.array(np.floor(cell_start), dtype="int64")
             end_index = np.array(np.ceil(cell_end), dtype="int64")
-            dims = np.rint((self.ActiveDimensions * self.dds) / dds).astype("int64")
+            dims = np.rint((self.ActiveDimensions * self.dds.d) / dds).astype("int64")
         else:
             # Give us one buffer
-            start_index = np.rint(cell_start.d).astype('int64') - 1
+            start_index = np.rint(cell_start).astype('int64') - 1
             # How many root cells do we occupy?
-            end_index = np.rint(cell_end.d).astype('int64')
+            end_index = np.rint(cell_end).astype('int64')
             dims = end_index - start_index + 1
         return start_index, end_index.astype("int64"), dims.astype("int32")
 
@@ -1211,9 +1211,9 @@
         >>> distf = 3.1e18*1e3 # distances into kpc
         >>> for i, r in enumerate(rhos):
         ...     surf = ds.surface(sp,'density',r)
-        ...     surf.export_obj("my_galaxy", transparency=trans[i], 
-        ...                      color_field='temperature', dist_fac = distf, 
-        ...                      plot_index = i, color_field_max = ma, 
+        ...     surf.export_obj("my_galaxy", transparency=trans[i],
+        ...                      color_field='temperature', dist_fac = distf,
+        ...                      plot_index = i, color_field_max = ma,
         ...                      color_field_min = mi)
 
         >>> sp = ds.sphere("max", (10, "kpc"))

diff -r 5cc69dbb9abf557616af6753d782576d1a0c6bd8 -r 3c605214f515de42acdf54ded7cbab22ebc6771a yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -77,13 +77,12 @@
         if self.start_index is not None:
             return self.start_index
         if self.Parent is None:
-            left = self.LeftEdge - self.ds.domain_left_edge
-            start_index = left / self.dds
-            return np.rint(start_index).astype('int64').ravel().view(np.ndarray)
+            left = self.LeftEdge.d - self.ds.domain_left_edge.d
+            start_index = left / self.dds.d
+            return np.rint(start_index).astype('int64').ravel()
 
-        pdx = self.Parent.dds.ndarray_view()
-        di = np.rint( (self.LeftEdge.ndarray_view() -
-                       self.Parent.LeftEdge.ndarray_view()) / pdx)
+        pdx = self.Parent.dds.d
+        di = np.rint((self.LeftEdge.d - self.Parent.LeftEdge.d) / pdx)
         start_index = self.Parent.get_global_startindex() + di
         self.start_index = (start_index * self.ds.refine_by).astype('int64').ravel()
         return self.start_index
@@ -251,7 +250,7 @@
         field_parameters.update(self.field_parameters)
         if smoothed:
             cube = self.ds.smoothed_covering_grid(
-                level, new_left_edge, 
+                level, new_left_edge,
                 field_parameters = field_parameters,
                 **kwargs)
         else:

diff -r 5cc69dbb9abf557616af6753d782576d1a0c6bd8 -r 3c605214f515de42acdf54ded7cbab22ebc6771a yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -109,6 +109,8 @@
 cdef class SelectorObject:
 
     def __cinit__(self, dobj, *args):
+        cdef np.float64_t [:] DLE
+        cdef np.float64_t [:] DRE
         self.min_level = getattr(dobj, "min_level", 0)
         self.max_level = getattr(dobj, "max_level", 99)
         self.overlap_cells = 0

diff -r 5cc69dbb9abf557616af6753d782576d1a0c6bd8 -r 3c605214f515de42acdf54ded7cbab22ebc6771a yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -399,7 +399,8 @@
         (conversion_factor, offset) = self.units.get_conversion_factor(new_units)
 
         self.units = new_units
-        self *= conversion_factor
+        values = self.d
+        values *= conversion_factor
 
         if offset:
             np.subtract(self, offset*self.uq, self)


https://bitbucket.org/yt_analysis/yt/commits/a7c4b9566c9e/
Changeset:   a7c4b9566c9e
Branch:      stable
User:        xarthisius
Date:        2015-08-19 18:22:44+00:00
Summary:     [py3] Use OrderedDict for tests as it is included in answer's name
Affected #:  1 file

diff -r 3c605214f515de42acdf54ded7cbab22ebc6771a -r a7c4b9566c9e65046c892ffb54d13e462dde6181 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -27,6 +27,7 @@
 from yt.visualization.api import \
     SlicePlot, ProjectionPlot, OffAxisSlicePlot, OffAxisProjectionPlot
 from yt.units.yt_array import YTArray, YTQuantity
+from collections import OrderedDict
 
 def setup():
     """Test specific setup."""
@@ -77,8 +78,10 @@
              "set_center": [(((0.4, 0.3), ), {})],
              "set_cmap": [(('density', 'RdBu'), {}),
                           (('density', 'kamae'), {})],
-             "set_font": [(({'family': 'sans-serif', 'style': 'italic',
-                             'weight': 'bold', 'size': 24}, ), {})],
+             "set_font": [((OrderedDict({'family': 'sans-serif',
+                                         'style': 'italic',
+                                         'weight': 'bold', 'size': 24}), ),
+                           {})],
              "set_log": [(('density', False), {})],
              "set_window_size": [((7.0, ), {})],
              "set_zlim": [(('density', 1e-25, 1e-23), {}),


https://bitbucket.org/yt_analysis/yt/commits/b95a5c3152c8/
Changeset:   b95a5c3152c8
Branch:      stable
User:        ngoldbaum
Date:        2015-08-19 21:01:11+00:00
Summary:     Backporting PR #1710 https://bitbucket.org/yt_analysis/yt/pull-requests/1710
Affected #:  2 files

diff -r a7c4b9566c9e65046c892ffb54d13e462dde6181 -r b95a5c3152c860a05775fb79eb4c9b2f5aa7e1a5 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -683,6 +683,7 @@
         self.unit_registry.add("code_velocity", 1.0, dimensions.velocity)
         self.unit_registry.add("code_metallicity", 1.0,
                                dimensions.dimensionless)
+        self.unit_registry.add("a", 1.0, dimensions.dimensionless)
 
     def set_units(self):
         """
@@ -700,6 +701,7 @@
                 self.unit_registry.add(new_unit, self.unit_registry.lut[my_unit][0] /
                                        (1 + self.current_redshift),
                                        length, "\\rm{%s}/(1+z)" % my_unit)
+            self.unit_registry.modify('a', 1/(1+self.current_redshift))
 
         self.set_code_units()
 

diff -r a7c4b9566c9e65046c892ffb54d13e462dde6181 -r b95a5c3152c860a05775fb79eb4c9b2f5aa7e1a5 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -108,6 +108,14 @@
             raise RuntimeError("units_override is not supported for GadgetDataset. "+
                                "Use unit_base instead.")
         super(GadgetDataset, self).__init__(filename, dataset_type)
+        if self.cosmological_simulation:
+            self.time_unit.convert_to_units('s/h')
+            self.length_unit.convert_to_units('kpccm/h')
+            self.mass_unit.convert_to_units('g/h')
+        else:
+            self.time_unit.convert_to_units('s')
+            self.length_unit.convert_to_units('kpc')
+            self.mass_unit.convert_to_units('Msun')
 
     def _setup_binary_spec(self, spec, spec_dict):
         if isinstance(spec, str):
@@ -218,12 +226,21 @@
         self.length_unit = self.quan(length_unit[0], length_unit[1])
 
         unit_base = self._unit_base or {}
+
+        if self.cosmological_simulation:
+            # see http://www.mpa-garching.mpg.de/gadget/gadget-list/0113.html
+            # for why we need to include a factor of square root of the
+            # scale factor
+            vel_units = "cm/s * sqrt(a)"
+        else:
+            vel_units = "cm/s"
+
         if "velocity" in unit_base:
             velocity_unit = unit_base["velocity"]
         elif "UnitVelocity_in_cm_per_s" in unit_base:
-            velocity_unit = (unit_base["UnitVelocity_in_cm_per_s"], "cm/s")
+            velocity_unit = (unit_base["UnitVelocity_in_cm_per_s"], vel_units)
         else:
-            velocity_unit = (1e5, "cm/s")
+            velocity_unit = (1e5, vel_units)
         velocity_unit = _fix_unit_ordering(velocity_unit)
         self.velocity_unit = self.quan(velocity_unit[0], velocity_unit[1])
 
@@ -238,10 +255,26 @@
                 mass_unit = (unit_base["UnitMass_in_g"], "g/h")
         else:
             # Sane default
-            mass_unit = (1.0, "1e10*Msun/h")
+            mass_unit = (1e10, "Msun/h")
         mass_unit = _fix_unit_ordering(mass_unit)
         self.mass_unit = self.quan(mass_unit[0], mass_unit[1])
-        self.time_unit = self.length_unit / self.velocity_unit
+        if self.cosmological_simulation:
+            # self.velocity_unit is the unit to rescale on-disk velocities, The
+            # actual internal velocity unit is really in comoving units
+            # since the time unit is derived from the internal velocity unit, we
+            # infer the internal velocity unit here and name it vel_unit
+            #
+            # see http://www.mpa-garching.mpg.de/gadget/gadget-list/0113.html
+            if 'velocity' in unit_base:
+                vel_unit = unit_base['velocity']
+            elif "UnitVelocity_in_cm_per_s" in unit_base:
+                vel_unit = (unit_base['UnitVelocity_in_cm_per_s'], 'cmcm/s')
+            else:
+                vel_unit = (1, 'kmcm/s')
+            vel_unit = self.quan(*vel_unit)
+        else:
+            vel_unit = self.velocity_unit
+        self.time_unit = self.length_unit / vel_unit
 
     @staticmethod
     def _validate_header(filename):


https://bitbucket.org/yt_analysis/yt/commits/c0fc8c29ffc0/
Changeset:   c0fc8c29ffc0
Branch:      stable
User:        smumford
Date:        2015-08-26 10:54:39+00:00
Summary:     small doc theme css tweak
Affected #:  1 file

diff -r b95a5c3152c860a05775fb79eb4c9b2f5aa7e1a5 -r c0fc8c29ffc00c2e4fe7941e69ea456c49aaa193 doc/source/_static/custom.css
--- a/doc/source/_static/custom.css
+++ b/doc/source/_static/custom.css
@@ -109,3 +109,8 @@
 .table {
     width: 50%
 }
+
+
+.navbar-form.navbar-right:last-child {
+    margin-right: -20px;
+}


https://bitbucket.org/yt_analysis/yt/commits/30265878c27b/
Changeset:   30265878c27b
Branch:      stable
User:        xarthisius
Date:        2015-08-26 17:56:11+00:00
Summary:     Add proper start value for FNV hash
Affected #:  1 file

diff -r c0fc8c29ffc00c2e4fe7941e69ea456c49aaa193 -r 30265878c27bfcdf879e0d6bd292ac28bcbc0d43 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -607,7 +607,7 @@
     def __hash__(self):
         # https://bitbucket.org/yt_analysis/yt/issues/1052/field-access-tests-fail-under-python3
         # http://www.eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx
-        cdef np.int64_t hash_val = 0
+        cdef np.int64_t hash_val = 2166136261
         for v in self._hash_vals() + self._base_hash():
             # FNV hash cf. http://www.isthe.com/chongo/tech/comp/fnv/index.html
             hash_val = (hash_val * 16777619) ^ hash(v)


https://bitbucket.org/yt_analysis/yt/commits/1cbcfd369c62/
Changeset:   1cbcfd369c62
Branch:      stable
User:        ngoldbaum
Date:        2015-08-26 18:06:00+00:00
Summary:     Use truediv instead of div in units unit tests for py3 compat
Affected #:  1 file

diff -r 30265878c27bfcdf879e0d6bd292ac28bcbc0d43 -r 1cbcfd369c62aff9f43c5d92733bb5da505039ea yt/units/tests/test_units.py
--- a/yt/units/tests/test_units.py
+++ b/yt/units/tests/test_units.py
@@ -449,4 +449,4 @@
     u2 = Unit('degF')
 
     assert_raises(InvalidUnitOperation, operator.mul, u1, u2)
-    assert_raises(InvalidUnitOperation, operator.div, u1, u2)
+    assert_raises(InvalidUnitOperation, operator.truediv, u1, u2)


https://bitbucket.org/yt_analysis/yt/commits/205102d5eb4a/
Changeset:   205102d5eb4a
Branch:      stable
User:        xarthisius
Date:        2015-08-26 21:00:13+00:00
Summary:     [tests] sort dictionary to ensure that test name is reproducible with py3
Affected #:  1 file

diff -r 1cbcfd369c62aff9f43c5d92733bb5da505039ea -r 205102d5eb4ad4f716ae9c7c8fb6255b32d88592 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -67,6 +67,9 @@
 M7 = "DD0010/moving7_0010"
 WT = "WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030"
 
+FPROPS = {'family': 'sans-serif', 'style': 'italic',
+          'weight': 'bold', 'size': 24}
+
 ATTR_ARGS = {"pan": [(((0.1, 0.1), ), {})],
              "pan_rel": [(((0.1, 0.1), ), {})],
              "set_axes_unit": [(("kpc", ), {}),
@@ -78,9 +81,7 @@
              "set_center": [(((0.4, 0.3), ), {})],
              "set_cmap": [(('density', 'RdBu'), {}),
                           (('density', 'kamae'), {})],
-             "set_font": [((OrderedDict({'family': 'sans-serif',
-                                         'style': 'italic',
-                                         'weight': 'bold', 'size': 24}), ),
+             "set_font": [((OrderedDict(sorted(FPROPS.items(), key=lambda t: t[0])), ),
                            {})],
              "set_log": [(('density', False), {})],
              "set_window_size": [((7.0, ), {})],


https://bitbucket.org/yt_analysis/yt/commits/906a63f0edd1/
Changeset:   906a63f0edd1
Branch:      stable
User:        jzuhone
Date:        2015-08-26 21:59:58+00:00
Summary:     Backporting PR #1726 https://bitbucket.org/yt_analysis/yt/pull-requests/1726
Affected #:  3 files

diff -r 205102d5eb4ad4f716ae9c7c8fb6255b32d88592 -r 906a63f0edd1dd8cb1380cad7371553e1633bb6d doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -476,6 +476,7 @@
 .. code:: python
 
    import yt
+   import numpy as np
    from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, mp
    from yt.utilities.cosmology import Cosmology
    from yt.analysis_modules.photon_simulator.api import *
@@ -548,14 +549,15 @@
                                0.01, 20.0, 20000)
    abs_model = TableAbsorbModel("tbabs_table.h5", 0.1)
 
-   thermal_model = ThermalPhotonModel(apec_model)
+   thermal_model = ThermalPhotonModel(apec_model, photons_per_chunk=40000000)
    photons = PhotonList.from_scratch(sphere, redshift, A,
                                      exp_time, thermal_model, center="c")
 
 
    events = photons.project_photons([0.0,0.0,1.0], 
                                     responses=["sim_arf.fits","sim_rmf.fits"], 
-                                    absorb_model=abs_model)
+                                    absorb_model=abs_model,
+                                    north_vector=[0.0,1.0,0.0])
 
    events.write_fits_image("img.fits", clobber=True)
 

diff -r 205102d5eb4ad4f716ae9c7c8fb6255b32d88592 -r 906a63f0edd1dd8cb1380cad7371553e1633bb6d yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -137,7 +137,7 @@
 
             idxs = np.argsort(kT)
 
-            kT_bins = np.linspace(kT_min, max(my_kT_max, kT_max), num=n_kT+1)
+            kT_bins = np.linspace(kT_min, max(my_kT_max.v, kT_max), num=n_kT+1)
             dkT = kT_bins[1]-kT_bins[0]
             kT_idxs = np.digitize(kT[idxs], kT_bins)
             kT_idxs = np.minimum(np.maximum(1, kT_idxs), n_kT) - 1

diff -r 205102d5eb4ad4f716ae9c7c8fb6255b32d88592 -r 906a63f0edd1dd8cb1380cad7371553e1633bb6d yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -26,7 +26,8 @@
 #-----------------------------------------------------------------------------
 from yt.extern.six import string_types
 import numpy as np
-from yt.funcs import *
+from yt.funcs import \
+    mylog, get_pbar, iterable, ensure_list
 from yt.utilities.physical_constants import clight
 from yt.utilities.cosmology import Cosmology
 from yt.utilities.orientation import Orientation
@@ -879,21 +880,24 @@
         f = h5py.File(h5file, "r")
 
         parameters["ExposureTime"] = YTQuantity(f["/exp_time"].value, "s")
-        parameters["Area"] = YTQuantity(f["/area"].value, "cm**2")
+        if isinstance(f["/area"].value, (string_types, bytes)):
+            parameters["Area"] = f["/area"].value.decode("utf8")
+        else:
+            parameters["Area"] = YTQuantity(f["/area"].value, "cm**2")
         parameters["Redshift"] = f["/redshift"].value
         parameters["AngularDiameterDistance"] = YTQuantity(f["/d_a"].value, "Mpc")
         if "rmf" in f:
-            parameters["RMF"] = f["/rmf"].value
+            parameters["RMF"] = f["/rmf"].value.decode("utf8")
         if "arf" in f:
-            parameters["ARF"] = f["/arf"].value
+            parameters["ARF"] = f["/arf"].value.decode("utf8")
         if "channel_type" in f:
-            parameters["ChannelType"] = f["/channel_type"].value
+            parameters["ChannelType"] = f["/channel_type"].value.decode("utf8")
         if "mission" in f:
-            parameters["Mission"] = f["/mission"].value
+            parameters["Mission"] = f["/mission"].value.decode("utf8")
         if "telescope" in f:
-            parameters["Telescope"] = f["/telescope"].value
+            parameters["Telescope"] = f["/telescope"].value.decode("utf8")
         if "instrument" in f:
-            parameters["Instrument"] = f["/instrument"].value
+            parameters["Instrument"] = f["/instrument"].value.decode("utf8")
 
         events["xpix"] = f["/xpix"][:]
         events["ypix"] = f["/ypix"][:]
@@ -923,7 +927,10 @@
         parameters = {}
 
         parameters["ExposureTime"] = YTQuantity(tblhdu.header["EXPOSURE"], "s")
-        parameters["Area"] = YTQuantity(tblhdu.header["AREA"], "cm**2")
+        if isinstance(tblhdu.header["AREA"], (string_types, bytes)):
+            parameters["Area"] = tblhdu.header["AREA"]
+        else:
+            parameters["Area"] = YTQuantity(tblhdu.header["AREA"], "cm**2")
         parameters["Redshift"] = tblhdu.header["REDSHIFT"]
         parameters["AngularDiameterDistance"] = YTQuantity(tblhdu.header["D_A"], "Mpc")
         if "RMF" in tblhdu.header:

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list