[yt-svn] commit/yt: 9 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Mar 23 13:50:48 PDT 2016


9 new commits in yt:

http://link.bitbucket.org/wf/click?upn=8USRlNyft-2BCzk2l4Ywl6gDx2lD2xxoS9E7MwXb2SMR-2BXcTD42YocdnOFkyGBVHOUKWP7hJaQRDneQXEEIV3Bk04U9Waslcrw2ZJ0nExPTW4-3D_ll4ctv0L-2ByeRZFC1LslHcg6aJmnQ70VruLbmeLQr27Chp4tbBaYQJh-2BYFo0ahY91tgRPGfZuSCITApOp1Z-2FKyCCylgX9J-2B66FrwALboA5caKngOZqZCZiPdZGQlxfPh8fIl6pM8cRbFcKSU-2BE1PQqAWU3a-2BUry-2BdDDmrRS5DVpMWPpbwh5P-2BywVyccSwT-2FxaT9EYxzMKhJZUMnAzDAdp9yePjd81H7Pe7VpctheX0u8-3D
Changeset:   e1d4435ed014
Branch:      yt
User:        chummels
Date:        2015-11-30 07:13:11+00:00
Summary:     Adding observing_redshift kwarg to make_spectrum()
Affected #:  1 file

diff -r 03a54b627189e63eaee9f2bc1d4a36c3ab6b9637 -r e1d4435ed014f2d6205d25b8166c3fd22af333bc yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -118,7 +118,8 @@
     def make_spectrum(self, input_file, output_file=None,
                       line_list_file=None, output_absorbers_file=None,
                       use_peculiar_velocity=True, 
-                      subgrid_resolution=10, njobs="auto"):
+                      subgrid_resolution=10, observing_redshift=0., 
+                      njobs="auto"):
         """
         Make spectrum from ray data using the line list.
 
@@ -153,6 +154,10 @@
            but is more expensive.  A value of 10 yields accuracy to the 4th 
            significant digit in tau.
            Default: 10
+        observing_redshift : optional, float
+           This is the redshift at which the observer is observing
+           the absorption spectrum.  
+           Default: 0
         njobs : optional, int or "auto"
            the number of process groups into which the loop over
            absorption lines will be divided.  If set to -1, each
@@ -179,6 +184,11 @@
             input_fields.append('redshift_eff')
             field_units["velocity_los"] = "cm/s"
             field_units["redshift_eff"] = ""
+        if observing_redshift != 0.:
+            input_fields.append('redshift_dopp')
+            input_fields.append('redshift')
+            field_units["redshift_dopp"] = ""
+            field_units["redshift"] = ""
         for feature in self.line_list + self.continuum_list:
             if not feature['field_name'] in input_fields:
                 input_fields.append(feature['field_name'])
@@ -219,7 +229,8 @@
         del field_data
         return (self.lambda_field, self.flux_field)
 
-    def _add_continua_to_spectrum(self, field_data, use_peculiar_velocity):
+    def _add_continua_to_spectrum(self, field_data, use_peculiar_velocity,
+                                  observing_redshift):
         """
         Add continuum features to the spectrum.
         """


http://link.bitbucket.org/wf/click?upn=8USRlNyft-2BCzk2l4Ywl6gDx2lD2xxoS9E7MwXb2SMR-2BXcTD42YocdnOFkyGBVHOUFjKpxeiedxvHZU15Y1MwXHbaydVNeuwNAmGdjgHhdis-3D_ll4ctv0L-2ByeRZFC1LslHcg6aJmnQ70VruLbmeLQr27Chp4tbBaYQJh-2BYFo0ahY91tgRPGfZuSCITApOp1Z-2FKyGzLPIcAHilYDRarBX2k0MgeUs8uCJNzcoZNxqSNJyx0K-2BSCv0-2BIbwv1-2Fdv7ywdxXQLtADT1MrjN2-2FgC-2FNGxa2Nmqxz6PKE5ExR-2FxklVuUB5pCojMn1nLnSqVQqag697ONdKNYfnk-2BEqm5OdfNIOQ7U-3D
Changeset:   4be2c32b5c0a
Branch:      yt
User:        chummels
Date:        2015-12-05 18:28:45+00:00
Summary:     Adding observing_redshift to AbsorptionSpectrum analysis module.
Affected #:  1 file

diff -r e1d4435ed014f2d6205d25b8166c3fd22af333bc -r 4be2c32b5c0afe1e64279728e0ed788cf5cb60fe yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -118,7 +118,7 @@
     def make_spectrum(self, input_file, output_file=None,
                       line_list_file=None, output_absorbers_file=None,
                       use_peculiar_velocity=True, 
-                      subgrid_resolution=10, observing_redshift=0., 
+                      subgrid_resolution=10, observing_redshift=0.,
                       njobs="auto"):
         """
         Make spectrum from ray data using the line list.
@@ -140,7 +140,9 @@
            is recommended to set to None in such circumstances.
            Default: None
         use_peculiar_velocity : optional, bool
-           if True, include line of sight velocity for shifting lines.
+           if True, include peculiar velocity for calculating doppler redshift
+           to shift lines.  Requires similar flag to be set in LightRay 
+           generation.
            Default: True
         subgrid_resolution : optional, int
            When a line is being added that is unresolved (ie its thermal
@@ -186,9 +188,7 @@
             field_units["redshift_eff"] = ""
         if observing_redshift != 0.:
             input_fields.append('redshift_dopp')
-            input_fields.append('redshift')
             field_units["redshift_dopp"] = ""
-            field_units["redshift"] = ""
         for feature in self.line_list + self.continuum_list:
             if not feature['field_name'] in input_fields:
                 input_fields.append(feature['field_name'])
@@ -210,8 +210,10 @@
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity,
                                     output_absorbers_file,
                                     subgrid_resolution=subgrid_resolution,
+                                    observing_redshift=observing_redshift,
                                     njobs=njobs)
-        self._add_continua_to_spectrum(field_data, use_peculiar_velocity)
+        self._add_continua_to_spectrum(field_data, use_peculiar_velocity,
+                                       observing_redshift=observing_redshift)
 
         self.flux_field = np.exp(-self.tau_field)
 
@@ -230,7 +232,7 @@
         return (self.lambda_field, self.flux_field)
 
     def _add_continua_to_spectrum(self, field_data, use_peculiar_velocity,
-                                  observing_redshift):
+                                  observing_redshift=0.):
         """
         Add continuum features to the spectrum.
         """
@@ -239,11 +241,32 @@
 
         for continuum in self.continuum_list:
             column_density = field_data[continuum['field_name']] * field_data['dl']
+            if observing_redshift == 0.:
+                # This is already assumed in the generation of the LightRay
+                redshift = field_data['redshift']
+                if use_peculiar_velocity:
+                    redshift_eff = field_data['redshift_eff']
+            else:
+                # The intermediate redshift that is seen by an observer
+                # at a redshift other than z=0 is z12, where z1 is the 
+                # observing redshift and z2 is the emitted photon's redshift
+                # Hogg (2000) eq. 13:
+                # 1 + z12 = (1 + z2) / (1 + z1)
+                redshift = ((1 + field_data['redshift']) / \
+                            (1 + observing_redshift)) - 1.
+                # Combining cosmological redshift and doppler redshift 
+                # into an effective redshift is found in Peacock's 
+                # Cosmological Physics eqn 3.75:
+                # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler)
+                if use_peculiar_velocity:
+                    redshift_eff = ((1 + redshift) * \
+                                    (1 + field_data['redshift_dopp'])) - 1.
+
             # redshift_eff field combines cosmological and velocity redshifts
             if use_peculiar_velocity:
-                delta_lambda = continuum['wavelength'] * field_data['redshift_eff']
+                delta_lambda = continuum['wavelength'] * redshift_eff
             else:
-                delta_lambda = continuum['wavelength'] * field_data['redshift']
+                delta_lambda = continuum['wavelength'] * redshift
             this_wavelength = delta_lambda + continuum['wavelength']
             right_index = np.digitize(this_wavelength, self.lambda_field).clip(0, self.n_lambda)
             left_index = np.digitize((this_wavelength *
@@ -267,7 +290,7 @@
 
     def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity,
                                output_absorbers_file, subgrid_resolution=10, 
-                               njobs=-1):
+                               observing_redshift=0., njobs=-1):
         """
         Add the absorption lines to the spectrum.
         """
@@ -281,13 +304,34 @@
         for line in parallel_objects(self.line_list, njobs=njobs):
             column_density = field_data[line['field_name']] * field_data['dl']
 
+            if observing_redshift == 0.:
+                # This is already assumed in the generation of the LightRay
+                redshift = field_data['redshift']
+                if use_peculiar_velocity:
+                    redshift_eff = field_data['redshift_eff']
+            else:
+                # The intermediate redshift that is seen by an observer
+                # at a redshift other than z=0 is z12, where z1 is the 
+                # observing redshift and z2 is the emitted photon's redshift
+                # Hogg (2000) eq. 13:
+                # 1 + z12 = (1 + z2) / (1 + z1)
+                redshift = ((1 + field_data['redshift']) / \
+                            (1 + observing_redshift)) - 1.
+                # Combining cosmological redshift and doppler redshift 
+                # into an effective redshift is found in Peacock's 
+                # Cosmological Physics eqn 3.75:
+                # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler)
+                if use_peculiar_velocity:
+                    redshift_eff = ((1 + redshift) * \
+                                    (1 + field_data['redshift_dopp'])) - 1.
+
             # redshift_eff field combines cosmological and velocity redshifts
             # so delta_lambda gives the offset in angstroms from the rest frame
             # wavelength to the observed wavelength of the transition 
             if use_peculiar_velocity:
-                delta_lambda = line['wavelength'] * field_data['redshift_eff']
+                delta_lambda = line['wavelength'] * redshift_eff
             else:
-                delta_lambda = line['wavelength'] * field_data['redshift']
+                delta_lambda = line['wavelength'] * redshift
             # lambda_obs is central wavelength of line after redshift
             lambda_obs = line['wavelength'] + delta_lambda
             # bin index in lambda_field of central wavelength of line after z
@@ -413,8 +457,8 @@
                                                 'wavelength': (lambda_0 + dlambda[i]),
                                                 'column_density': column_density[i],
                                                 'b_thermal': thermal_b[i],
-                                                'redshift': field_data['redshift'][i],
-                                                'redshift_eff': field_data['redshift_eff'][i],
+                                                'redshift': redshift[i],
+                                                'redshift_eff': redshift_eff[i],
                                                 'v_pec': peculiar_velocity})
                 pbar.update(i)
             pbar.finish()


http://link.bitbucket.org/wf/click?upn=8USRlNyft-2BCzk2l4Ywl6gDx2lD2xxoS9E7MwXb2SMR-2BXcTD42YocdnOFkyGBVHOUCQS-2B1vQtPU6nDXAEwrcCzyEW8xo-2BqAOCkbMt5SLpDZw-3D_ll4ctv0L-2ByeRZFC1LslHcg6aJmnQ70VruLbmeLQr27Chp4tbBaYQJh-2BYFo0ahY91tgRPGfZuSCITApOp1Z-2FKyKNBPfiHhJ7wxpEQ-2BQf3HQCRCL3Mv5KA7HZlD0XoE2aKXjQS-2BUECcer4fdCfMIxMf6Mm1dYwXDwghbMmmVVlgQGuqeAdaZ0mvKoYYwMd9-2Fw0dSDroNsSPwDLiUV-2Bn-2FGI4P-2BUtzvcuROenZ-2BjQRfVXOI-3D
Changeset:   02417c5a8f7f
Branch:      yt
User:        chummels
Date:        2016-03-15 23:43:42+00:00
Summary:     Cleaning up code.
Affected #:  1 file

diff -r 4be2c32b5c0afe1e64279728e0ed788cf5cb60fe -r 02417c5a8f7f2fad0bcd7f9f6fc3bd970c3712af yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -52,7 +52,7 @@
     def __init__(self, lambda_min, lambda_max, n_lambda):
         self.n_lambda = n_lambda
         # lambda, flux, and tau are wavelength, flux, and optical depth
-        self.lambda_field = YTArray(np.linspace(lambda_min, lambda_max, 
+        self.lambda_field = YTArray(np.linspace(lambda_min, lambda_max,
                                     n_lambda), "angstrom")
         self.tau_field = None
         self.flux_field = None
@@ -117,7 +117,7 @@
 
     def make_spectrum(self, input_file, output_file=None,
                       line_list_file=None, output_absorbers_file=None,
-                      use_peculiar_velocity=True, 
+                      use_peculiar_velocity=True,
                       subgrid_resolution=10, observing_redshift=0.,
                       njobs="auto"):
         """
@@ -129,36 +129,37 @@
         input_file : string or dataset
            path to input ray data or a loaded ray dataset
         output_file : optional, string
-           Option to save a file containing the wavelength, flux, and optical 
-           depth fields.  File formats are chosen based on the filename extension.  
-           ``.h5`` for hdf5, ``.fits`` for fits, and everything else is ASCII.
+           Option to save a file containing the wavelength, flux, and optical
+           depth fields.  File formats are chosen based on the filename
+           extension. ``.h5`` for hdf5, ``.fits`` for fits, and everything
+           else is ASCII.
            Default: None
         output_absorbers_file : optional, string
-           Option to save a text file containing all of the absorbers and 
+           Option to save a text file containing all of the absorbers and
            corresponding wavelength and redshift information.
            For parallel jobs, combining the lines lists can be slow so it
            is recommended to set to None in such circumstances.
            Default: None
         use_peculiar_velocity : optional, bool
            if True, include peculiar velocity for calculating doppler redshift
-           to shift lines.  Requires similar flag to be set in LightRay 
+           to shift lines.  Requires similar flag to be set in LightRay
            generation.
            Default: True
         subgrid_resolution : optional, int
            When a line is being added that is unresolved (ie its thermal
            width is less than the spectral bin width), the voigt profile of
-           the line is deposited into an array of virtual wavelength bins at 
-           higher resolution.  The optical depth from these virtual bins is 
-           integrated and then added to the coarser spectral wavelength bin.  
-           The subgrid_resolution value determines the ratio between the 
-           thermal width and the bin width of the virtual bins.  Increasing 
-           this value yields smaller virtual bins, which increases accuracy, 
-           but is more expensive.  A value of 10 yields accuracy to the 4th 
+           the line is deposited into an array of virtual wavelength bins at
+           higher resolution.  The optical depth from these virtual bins is
+           integrated and then added to the coarser spectral wavelength bin.
+           The subgrid_resolution value determines the ratio between the
+           thermal width and the bin width of the virtual bins.  Increasing
+           this value yields smaller virtual bins, which increases accuracy,
+           but is more expensive.  A value of 10 yields accuracy to the 4th
            significant digit in tau.
            Default: 10
         observing_redshift : optional, float
            This is the redshift at which the observer is observing
-           the absorption spectrum.  
+           the absorption spectrum.
            Default: 0
         njobs : optional, int or "auto"
            the number of process groups into which the loop over
@@ -248,14 +249,14 @@
                     redshift_eff = field_data['redshift_eff']
             else:
                 # The intermediate redshift that is seen by an observer
-                # at a redshift other than z=0 is z12, where z1 is the 
+                # at a redshift other than z=0 is z12, where z1 is the
                 # observing redshift and z2 is the emitted photon's redshift
                 # Hogg (2000) eq. 13:
                 # 1 + z12 = (1 + z2) / (1 + z1)
                 redshift = ((1 + field_data['redshift']) / \
                             (1 + observing_redshift)) - 1.
-                # Combining cosmological redshift and doppler redshift 
-                # into an effective redshift is found in Peacock's 
+                # Combining cosmological redshift and doppler redshift
+                # into an effective redshift is found in Peacock's
                 # Cosmological Physics eqn 3.75:
                 # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler)
                 if use_peculiar_velocity:
@@ -289,13 +290,13 @@
             pbar.finish()
 
     def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity,
-                               output_absorbers_file, subgrid_resolution=10, 
+                               output_absorbers_file, subgrid_resolution=10,
                                observing_redshift=0., njobs=-1):
         """
         Add the absorption lines to the spectrum.
         """
-        # Widen wavelength window until optical depth falls below this tau 
-        # value at the ends to assure that the wings of a line have been 
+        # Widen wavelength window until optical depth falls below this tau
+        # value at the ends to assure that the wings of a line have been
         # fully resolved.
         min_tau = 1e-3
 
@@ -311,14 +312,14 @@
                     redshift_eff = field_data['redshift_eff']
             else:
                 # The intermediate redshift that is seen by an observer
-                # at a redshift other than z=0 is z12, where z1 is the 
+                # at a redshift other than z=0 is z12, where z1 is the
                 # observing redshift and z2 is the emitted photon's redshift
                 # Hogg (2000) eq. 13:
                 # 1 + z12 = (1 + z2) / (1 + z1)
                 redshift = ((1 + field_data['redshift']) / \
                             (1 + observing_redshift)) - 1.
-                # Combining cosmological redshift and doppler redshift 
-                # into an effective redshift is found in Peacock's 
+                # Combining cosmological redshift and doppler redshift
+                # into an effective redshift is found in Peacock's
                 # Cosmological Physics eqn 3.75:
                 # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler)
                 if use_peculiar_velocity:
@@ -327,7 +328,7 @@
 
             # redshift_eff field combines cosmological and velocity redshifts
             # so delta_lambda gives the offset in angstroms from the rest frame
-            # wavelength to the observed wavelength of the transition 
+            # wavelength to the observed wavelength of the transition
             if use_peculiar_velocity:
                 delta_lambda = line['wavelength'] * redshift_eff
             else:
@@ -343,7 +344,7 @@
                                   line['atomic_mass'])
 
             # the actual thermal width of the lines
-            thermal_width = (lambda_obs * thermal_b / 
+            thermal_width = (lambda_obs * thermal_b /
                              speed_of_light_cgs).convert_to_units("angstrom")
 
             # Sanitize units for faster runtime of the tau_profile machinery.
@@ -356,25 +357,25 @@
 
             # When we actually deposit the voigt profile, sometimes we will
             # have underresolved lines (ie lines with smaller widths than
-            # the spectral bin size).  Here, we create virtual wavelength bins 
-            # small enough in width to well resolve each line, deposit the 
-            # voigt profile into them, then numerically integrate their tau 
-            # values and sum them to redeposit them into the actual spectral 
+            # the spectral bin size).  Here, we create virtual wavelength bins
+            # small enough in width to well resolve each line, deposit the
+            # voigt profile into them, then numerically integrate their tau
+            # values and sum them to redeposit them into the actual spectral
             # bins.
 
             # virtual bins (vbins) will be:
             # 1) <= the bin_width; assures at least as good as spectral bins
             # 2) <= 1/10th the thermal width; assures resolving voigt profiles
             #   (actually 1/subgrid_resolution value, default is 1/10)
-            # 3) a bin width will be divisible by vbin_width times a power of 
+            # 3) a bin width will be divisible by vbin_width times a power of
             #    10; this will assure we don't get spikes in the deposited
             #    spectra from uneven numbers of vbins per bin
-            resolution = thermal_width / self.bin_width 
+            resolution = thermal_width / self.bin_width
             vbin_width = self.bin_width / \
                          10**(np.ceil(np.log10(subgrid_resolution/resolution)).clip(0, np.inf))
             vbin_width = vbin_width.in_units('angstrom').d
 
-            # the virtual window into which the line is deposited initially 
+            # the virtual window into which the line is deposited initially
             # spans a region of 5 thermal_widths, but this may expand
             n_vbins = np.ceil(5*thermal_width.d/vbin_width)
             vbin_window_width = n_vbins*vbin_width
@@ -382,7 +383,7 @@
             if (thermal_width < self.bin_width).any():
                 mylog.info(("%d out of %d line components will be " + \
                             "deposited as unresolved lines.") %
-                           ((thermal_width < self.bin_width).sum(), 
+                           ((thermal_width < self.bin_width).sum(),
                             thermal_width.size))
 
             valid_lines = np.arange(len(thermal_width))
@@ -390,7 +391,7 @@
                             (line['label'], line['wavelength']),
                             thermal_width.size)
 
-            # for a given transition, step through each location in the 
+            # for a given transition, step through each location in the
             # observed spectrum where it occurs and deposit a voigt profile
             for i in parallel_objects(valid_lines, njobs=-1):
                 my_vbin_window_width = vbin_window_width[i]
@@ -400,7 +401,7 @@
                 while True:
                     vbins = \
                         np.linspace(lambda_1[i]-my_vbin_window_width/2.,
-                                    lambda_1[i]+my_vbin_window_width/2., 
+                                    lambda_1[i]+my_vbin_window_width/2.,
                                     my_n_vbins, endpoint=False)
 
                     vbins, vtau = \
@@ -410,8 +411,8 @@
                             lambda_bins=vbins)
 
                     # If tau has not dropped below min tau threshold by the
-                    # edges (ie the wings), then widen the wavelength 
-                    # window and repeat process. 
+                    # edges (ie the wings), then widen the wavelength
+                    # window and repeat process.
                     if (vtau[0] < min_tau and vtau[-1] < min_tau):
                         break
                     my_vbin_window_width *= 2
@@ -427,10 +428,10 @@
 
                 # run digitize to identify which vbins are deposited into which
                 # global lambda bins.
-                # shift global lambda bins over by half a bin width; 
-                # this has the effect of assuring np.digitize will place 
+                # shift global lambda bins over by half a bin width;
+                # this has the effect of assuring np.digitize will place
                 # the vbins in the closest bin center.
-                binned = np.digitize(vbins, 
+                binned = np.digitize(vbins,
                                      self.lambda_field[left_index:right_index] \
                                      + (0.5 * self.bin_width))
 
@@ -443,7 +444,7 @@
                 self.tau_field[left_index:right_index] += EW
 
                 # write out absorbers to file if the column density of
-                # an absorber is greater than the specified "label_threshold" 
+                # an absorber is greater than the specified "label_threshold"
                 # of that absorption line
                 if output_absorbers_file and \
                    line['label_threshold'] is not None and \


http://link.bitbucket.org/wf/click?upn=8USRlNyft-2BCzk2l4Ywl6gDx2lD2xxoS9E7MwXb2SMR-2BXcTD42YocdnOFkyGBVHOUoxeWxrfr5snrg5pwhBexnvWlnPWIjuGMSgtzK6Ts-2Bno-3D_ll4ctv0L-2ByeRZFC1LslHcg6aJmnQ70VruLbmeLQr27Chp4tbBaYQJh-2BYFo0ahY91tgRPGfZuSCITApOp1Z-2FKyMXtEML-2BFAGN19isNToboIKq4hiXPXBJaaw6rPcCr87zkMaHVe2sdKNzj2pZlb0QGmhiLZXVUWjG4FJLwLTMo1-2FPtG9z8gQohNkfV-2B7qSf2rsapEodyK0AdAaDnYR7HPl1fk4WFYZayYEoa0DH8U6H4-3D
Changeset:   8ac0f630b3f0
Branch:      yt
User:        chummels
Date:        2016-03-15 23:54:32+00:00
Summary:     Merging.
Affected #:  463 files

diff -r 02417c5a8f7f2fad0bcd7f9f6fc3bd970c3712af -r 8ac0f630b3f0b6c0d12f907a4efb384794b3d0d7 .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -22,4 +22,21 @@
 ngoldbau at ucsc.edu = goldbaum at ucolick.org
 biondo at wisc.edu = Biondo at wisc.edu
 samgeen at googlemail.com = samgeen at gmail.com
-fbogert = fbogert at ucsc.edu
\ No newline at end of file
+fbogert = fbogert at ucsc.edu
+bwoshea = oshea at msu.edu
+mornkr at slac.stanford.edu = me at jihoonkim.org
+kbarrow = kssbarrow at gatech.edu
+kssbarrow at gmail.com = kssbarrow at gatech.edu
+kassbarrow at gmail.com = kssbarrow at gatech.edu
+antoine.strugarek at cea.fr = strugarek at astro.umontreal.ca
+rosen at ucolick.org = alrosen at ucsc.edu
+jzuhone = jzuhone at gmail.com
+karraki at nmsu.edu = karraki at gmail.com
+hckr at eml.cc = astrohckr at gmail.com
+julian3 at illinois.edu = astrohckr at gmail.com
+cosmosquark = bthompson2090 at gmail.com
+chris.m.malone at lanl.gov = chris.m.malone at gmail.com
+jnaiman at ucolick.org = jnaiman
+migueld.deval = miguel at archlinux.net
+slevy at ncsa.illinois.edu = salevy at illinois.edu
+malzraa at gmail.com = kellerbw at mcmaster.ca
\ No newline at end of file

diff -r 02417c5a8f7f2fad0bcd7f9f6fc3bd970c3712af -r 8ac0f630b3f0b6c0d12f907a4efb384794b3d0d7 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -28,31 +28,34 @@
 yt/utilities/spatial/ckdtree.c
 yt/utilities/lib/alt_ray_tracers.c
 yt/utilities/lib/amr_kdtools.c
+yt/utilities/lib/basic_octree.c
 yt/utilities/lib/bitarray.c
-yt/utilities/lib/CICDeposit.c
-yt/utilities/lib/ContourFinding.c
-yt/utilities/lib/DepthFirstOctree.c
+yt/utilities/lib/bounding_volume_hierarchy.c
+yt/utilities/lib/contour_finding.c
+yt/utilities/lib/depth_first_octree.c
 yt/utilities/lib/element_mappings.c
-yt/utilities/lib/FixedInterpolator.c
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c
 yt/utilities/lib/geometry_utils.c
 yt/utilities/lib/image_utilities.c
-yt/utilities/lib/Interpolators.c
+yt/utilities/lib/interpolators.c
 yt/utilities/lib/kdtree.c
 yt/utilities/lib/line_integral_convolution.c
+yt/utilities/lib/mesh_construction.cpp
+yt/utilities/lib/mesh_intersection.cpp
+yt/utilities/lib/mesh_samplers.cpp
+yt/utilities/lib/mesh_traversal.cpp
 yt/utilities/lib/mesh_utilities.c
 yt/utilities/lib/misc_utilities.c
-yt/utilities/lib/Octree.c
-yt/utilities/lib/GridTree.c
+yt/utilities/lib/particle_mesh_operations.c
 yt/utilities/lib/origami.c
+yt/utilities/lib/particle_mesh_operations.c
 yt/utilities/lib/pixelization_routines.c
 yt/utilities/lib/png_writer.c
-yt/utilities/lib/PointsInVolume.c
-yt/utilities/lib/QuadTree.c
-yt/utilities/lib/RayIntegrators.c
+yt/utilities/lib/points_in_volume.c
+yt/utilities/lib/quad_tree.c
+yt/utilities/lib/ray_integrators.c
 yt/utilities/lib/ragged_arrays.c
-yt/utilities/lib/VolumeIntegrator.c
 yt/utilities/lib/grid_traversal.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h

diff -r 02417c5a8f7f2fad0bcd7f9f6fc3bd970c3712af -r 8ac0f630b3f0b6c0d12f907a4efb384794b3d0d7 CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -795,8 +795,8 @@
    rather than explicitly. Ex: ``super(SpecialGridSubclass, self).__init__()``
    rather than ``SpecialGrid.__init__()``.
  * Docstrings should describe input, output, behavior, and any state changes
-   that occur on an object.  See the file ``doc/docstring_example.txt`` for a
-   fiducial example of a docstring.
+   that occur on an object.  See :ref:`docstrings` below for a fiducial example
+   of a docstring.
  * Use only one top-level import per line. Unless there is a good reason not to,
    imports should happen at the top of the file, after the copyright blurb.
  * Never compare with ``True`` or ``False`` using ``==`` or ``!=``, always use
@@ -843,7 +843,7 @@
    be avoided, they must be explained, even if they are only to be passed on to
    a nested function.
 
-.. _docstrings
+.. _docstrings:
 
 Docstrings
 ----------

diff -r 02417c5a8f7f2fad0bcd7f9f6fc3bd970c3712af -r 8ac0f630b3f0b6c0d12f907a4efb384794b3d0d7 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -4,20 +4,30 @@
                 Tom Abel (tabel at stanford.edu)
                 Gabriel Altay (gabriel.altay at gmail.com)
                 Kenza Arraki (karraki at gmail.com)
+                Kirk Barrow (kssbarrow at gatech.edu)
+                Ricarda Beckmann (Ricarda.Beckmann at astro.ox.ac.uk)
                 Elliott Biondo (biondo at wisc.edu)
                 Alex Bogert (fbogert at ucsc.edu)
+                André-Patrick Bubel (code at andre-bubel.de)
                 Pengfei Chen (madcpf at gmail.com)
                 David Collins (dcollins4096 at gmail.com)
                 Brian Crosby (crosby.bd at gmail.com)
                 Andrew Cunningham (ajcunn at gmail.com)
                 Miguel de Val-Borro (miguel.deval at gmail.com)
+                Bili Dong (qobilidop at gmail.com)
+                Nicholas Earl (nchlsearl at gmail.com)
                 Hilary Egan (hilaryye at gmail.com)
+                Daniel Fenn (df11c at my.fsu.edu)
                 John Forces (jforbes at ucolick.org)
+                Adam Ginsburg (keflavich at gmail.com)
                 Sam Geen (samgeen at gmail.com)
                 Nathan Goldbaum (goldbaum at ucolick.org)
+                William Gray (graywilliamj at gmail.com)
                 Markus Haider (markus.haider at uibk.ac.at)
                 Eric Hallman (hallman13 at gmail.com)
                 Cameron Hummels (chummels at gmail.com)
+                Anni Järvenpää (anni.jarvenpaa at gmail.com)
+                Allyson Julian (astrohckr at gmail.com)
                 Christian Karch (chiffre at posteo.de)
                 Ben W. Keller (kellerbw at mcmaster.ca)
                 Ji-hoon Kim (me at jihoonkim.org)
@@ -25,11 +35,15 @@
                 Kacper Kowalik (xarthisius.kk at gmail.com)
                 Mark Krumholz (mkrumhol at ucsc.edu)
                 Michael Kuhlen (mqk at astro.berkeley.edu)
+                Meagan Lang (langmm.astro at gmail.com)
+                Doris Lee (dorislee at berkeley.edu)
                 Eve Lee (elee at cita.utoronto.ca)
                 Sam Leitner (sam.leitner at gmail.com)
+                Stuart Levy (salevy at illinois.edu)
                 Yuan Li (yuan at astro.columbia.edu)
                 Chris Malone (chris.m.malone at gmail.com)
                 Josh Maloney (joshua.moloney at colorado.edu)
+                Jonah Miller (jonah.maxwell.miller at gmail.com)
                 Chris Moody (cemoody at ucsc.edu)
                 Stuart Mumford (stuart at mumford.me.uk)
                 Andrew Myers (atmyers at astro.berkeley.edu)
@@ -44,6 +58,7 @@
                 Mark Richardson (Mark.L.Richardson at asu.edu)
                 Thomas Robitaille (thomas.robitaille at gmail.com)
                 Anna Rosen (rosen at ucolick.org)
+                Chuck Rozhon (rozhon2 at illinois.edu)
                 Douglas Rudd (drudd at uchicago.edu)
                 Anthony Scopatz (scopatz at gmail.com)
                 Noel Scudder (noel.scudder at stonybrook.edu)
@@ -59,6 +74,7 @@
                 Ji Suoqing (jisuoqing at gmail.com)
                 Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
                 Benjamin Thompson (bthompson2090 at gmail.com)
+                Robert Thompson (rthompsonj at gmail.com)
                 Stephanie Tonnesen (stonnes at gmail.com)
                 Matthew Turk (matthewturk at gmail.com)
                 Rich Wagner (rwagner at physics.ucsd.edu)

diff -r 02417c5a8f7f2fad0bcd7f9f6fc3bd970c3712af -r 8ac0f630b3f0b6c0d12f907a4efb384794b3d0d7 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt
+include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt setupext.py
 include yt/visualization/mapserver/html/map_index.html
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js

diff -r 02417c5a8f7f2fad0bcd7f9f6fc3bd970c3712af -r 8ac0f630b3f0b6c0d12f907a4efb384794b3d0d7 clean.sh
--- a/clean.sh
+++ b/clean.sh
@@ -1,4 +1,1 @@
-find . -name "*.so" -exec rm -v {} \;
-find . -name "*.pyc" -exec rm -v {} \;
-find . -name "__config__.py" -exec rm -v {} \;
-rm -rvf build dist
+hg --config extensions.purge= purge --all yt

diff -r 02417c5a8f7f2fad0bcd7f9f6fc3bd970c3712af -r 8ac0f630b3f0b6c0d12f907a4efb384794b3d0d7 doc/extensions/notebook_sphinxext.py
--- a/doc/extensions/notebook_sphinxext.py
+++ /dev/null
@@ -1,241 +0,0 @@
-import errno
-import os
-import shutil
-import string
-import re
-import tempfile
-import uuid
-from sphinx.util.compat import Directive
-from docutils import nodes
-from docutils.parsers.rst import directives
-from IPython.config import Config
-from IPython.nbconvert import html, python
-from IPython.nbformat import current as nbformat
-from runipy.notebook_runner import NotebookRunner, NotebookError
-
-class NotebookDirective(Directive):
-    """Insert an evaluated notebook into a document
-
-    This uses runipy and nbconvert to transform a path to an unevaluated notebook
-    into html suitable for embedding in a Sphinx document.
-    """
-    required_arguments = 1
-    optional_arguments = 1
-    option_spec = {'skip_exceptions': directives.flag}
-    final_argument_whitespace = True
-
-    def run(self): # check if there are spaces in the notebook name
-        nb_path = self.arguments[0]
-        if ' ' in nb_path: raise ValueError(
-            "Due to issues with docutils stripping spaces from links, white "
-            "space is not allowed in notebook filenames '{0}'".format(nb_path))
-        # check if raw html is supported
-        if not self.state.document.settings.raw_enabled:
-            raise self.warning('"%s" directive disabled.' % self.name)
-
-        cwd = os.getcwd()
-        tmpdir = tempfile.mkdtemp()
-        os.chdir(tmpdir)
-
-        # get path to notebook
-        nb_filename = self.arguments[0]
-        nb_basename = os.path.basename(nb_filename)
-        rst_file = self.state_machine.document.attributes['source']
-        rst_dir = os.path.abspath(os.path.dirname(rst_file))
-        nb_abs_path = os.path.abspath(os.path.join(rst_dir, nb_filename))
-
-        # Move files around.
-        rel_dir = os.path.relpath(rst_dir, setup.confdir)
-        dest_dir = os.path.join(setup.app.builder.outdir, rel_dir)
-        dest_path = os.path.join(dest_dir, nb_basename)
-
-        image_dir, image_rel_dir = make_image_dir(setup, rst_dir)
-
-        # Ensure desination build directory exists
-        thread_safe_mkdir(os.path.dirname(dest_path))
-
-        # Copy unevaluated notebook
-        shutil.copyfile(nb_abs_path, dest_path)
-
-        # Construct paths to versions getting copied over
-        dest_path_eval = string.replace(dest_path, '.ipynb', '_evaluated.ipynb')
-        dest_path_script = string.replace(dest_path, '.ipynb', '.py')
-        rel_path_eval = string.replace(nb_basename, '.ipynb', '_evaluated.ipynb')
-        rel_path_script = string.replace(nb_basename, '.ipynb', '.py')
-
-        # Create python script vesion
-        script_text = nb_to_python(nb_abs_path)
-        f = open(dest_path_script, 'w')
-        f.write(script_text.encode('utf8'))
-        f.close()
-
-        skip_exceptions = 'skip_exceptions' in self.options
-
-        ret = evaluate_notebook(
-            nb_abs_path, dest_path_eval, skip_exceptions=skip_exceptions)
-
-        try:
-            evaluated_text, resources = ret
-            evaluated_text = write_notebook_output(
-                resources, image_dir, image_rel_dir, evaluated_text)
-        except ValueError:
-            # This happens when a notebook raises an unhandled exception
-            evaluated_text = ret
-
-        # Create link to notebook and script files
-        link_rst = "(" + \
-                   formatted_link(nb_basename) + "; " + \
-                   formatted_link(rel_path_eval) + "; " + \
-                   formatted_link(rel_path_script) + \
-                   ")"
-
-        self.state_machine.insert_input([link_rst], rst_file)
-
-        # create notebook node
-        attributes = {'format': 'html', 'source': 'nb_path'}
-        nb_node = notebook_node('', evaluated_text, **attributes)
-        (nb_node.source, nb_node.line) = \
-            self.state_machine.get_source_and_line(self.lineno)
-
-        # add dependency
-        self.state.document.settings.record_dependencies.add(nb_abs_path)
-
-        # clean up
-        os.chdir(cwd)
-        shutil.rmtree(tmpdir, True)
-
-        return [nb_node]
-
-
-class notebook_node(nodes.raw):
-    pass
-
-def nb_to_python(nb_path):
-    """convert notebook to python script"""
-    exporter = python.PythonExporter()
-    output, resources = exporter.from_filename(nb_path)
-    return output
-
-def nb_to_html(nb_path):
-    """convert notebook to html"""
-    c = Config({'ExtractOutputPreprocessor':{'enabled':True}})
-
-    exporter = html.HTMLExporter(template_file='full', config=c)
-    notebook = nbformat.read(open(nb_path), 'json')
-    output, resources = exporter.from_notebook_node(notebook)
-    header = output.split('<head>', 1)[1].split('</head>',1)[0]
-    body = output.split('<body>', 1)[1].split('</body>',1)[0]
-
-    # http://link.bitbucket.org/wf/click?upn=5coGVbsfgQvfK24RVUKNw8OSruoxozfEBRYP6eKQOFg-3D_ll4ctv0L-2ByeRZFC1LslHcg6aJmnQ70VruLbmeLQr27Chp4tbBaYQJh-2BYFo0ahY91tgRPGfZuSCITApOp1Z-2FKyF9WnoueHu-2BewjrAhzIcwPTsO9eN5Kl23Hq-2FE01pO3UpN3kOyY0QcAescjQpxbUaVvLx7q8wCzghiakY9sOQC1QynxWwP6KKZw0bCaJdwbucPeq0YvpZWjw2Lc5VmMBN4wQHLQap9RqcPVBTU0ltmu0-3D
-    header = header.replace('<style', '<style scoped="scoped"')
-    header = header.replace('body {\n  overflow: visible;\n  padding: 8px;\n}\n',
-                            '')
-    header = header.replace("code,pre{", "code{")
-
-    # Filter out styles that conflict with the sphinx theme.
-    filter_strings = [
-        'navbar',
-        'body{',
-        'alert{',
-        'uneditable-input{',
-        'collapse{',
-    ]
-
-    filter_strings.extend(['h%s{' % (i+1) for i in range(6)])
-
-    line_begin = [
-        'pre{',
-        'p{margin'
-    ]
-
-    filterfunc = lambda x: not any([s in x for s in filter_strings])
-    header_lines = filter(filterfunc, header.split('\n'))
-
-    filterfunc = lambda x: not any([x.startswith(s) for s in line_begin])
-    header_lines = filter(filterfunc, header_lines)
-
-    header = '\n'.join(header_lines)
-
-    # concatenate raw html lines
-    lines = ['<div class="ipynotebook">']
-    lines.append(header)
-    lines.append(body)
-    lines.append('</div>')
-    return '\n'.join(lines), resources
-
-def evaluate_notebook(nb_path, dest_path=None, skip_exceptions=False):
-    # Create evaluated version and save it to the dest path.
-    notebook = nbformat.read(open(nb_path), 'json')
-    nb_runner = NotebookRunner(notebook, pylab=False)
-    try:
-        nb_runner.run_notebook(skip_exceptions=skip_exceptions)
-    except NotebookError as e:
-        print('')
-        print(e)
-        # Return the traceback, filtering out ANSI color codes.
-        # http://link.bitbucket.org/wf/click?upn=h-2F4jTGrCxy1-2FSrs21iLYdOh8hrOIvk5pCG5VO-2Bd3YMJxbyA17GoK16BVkus0TpF-2BuIvBw8XikaR5dJK7Mvj9qbbateo1rllNu7dLpSSrXvq02zvYJ9WKUV-2BaFvSvJodw_ll4ctv0L-2ByeRZFC1LslHcg6aJmnQ70VruLbmeLQr27Chp4tbBaYQJh-2BYFo0ahY91tgRPGfZuSCITApOp1Z-2FKyOQVlYwzekIDh2oLi8Cor1EzO-2BtMja2N-2B9JnSOtHoKw-2Bhls75Qs4WBxGqzCo-2BXxtjiAkyB-2BJuo-2B3hfyrei3GNRiwb6CawOPxJ3s7GSx1e0ULyqWTL61L3U-2FkzVCivQeq0Gf4LnncKrYR-2FXrM6InmtSM-3D
-        return "Notebook conversion failed with the " \
-               "following traceback: \n%s" % \
-            re.sub(r'\\033[\[\]]([0-9]{1,2}([;@][0-9]{0,2})*)*[mKP]?', '',
-                   str(e))
-
-    if dest_path is None:
-        dest_path = 'temp_evaluated.ipynb'
-    nbformat.write(nb_runner.nb, open(dest_path, 'w'), 'json')
-    ret = nb_to_html(dest_path)
-    if dest_path is 'temp_evaluated.ipynb':
-        os.remove(dest_path)
-    return ret
-
-def formatted_link(path):
-    return "`%s <%s>`__" % (os.path.basename(path), path)
-
-def visit_notebook_node(self, node):
-    self.visit_raw(node)
-
-def depart_notebook_node(self, node):
-    self.depart_raw(node)
-
-def setup(app):
-    setup.app = app
-    setup.config = app.config
-    setup.confdir = app.confdir
-
-    app.add_node(notebook_node,
-                 html=(visit_notebook_node, depart_notebook_node))
-
-    app.add_directive('notebook', NotebookDirective)
-
-    retdict = dict(
-        version='0.1',
-        parallel_read_safe=True,
-        parallel_write_safe=True
-    )
-
-    return retdict
-
-def make_image_dir(setup, rst_dir):
-    image_dir = setup.app.builder.outdir + os.path.sep + '_images'
-    rel_dir = os.path.relpath(setup.confdir, rst_dir)
-    image_rel_dir = rel_dir + os.path.sep + '_images'
-    thread_safe_mkdir(image_dir)
-    return image_dir, image_rel_dir
-
-def write_notebook_output(resources, image_dir, image_rel_dir, evaluated_text):
-    my_uuid = uuid.uuid4().hex
-
-    for output in resources['outputs']:
-        new_name = image_dir + os.path.sep + my_uuid + output
-        new_relative_name = image_rel_dir + os.path.sep + my_uuid + output
-        evaluated_text = evaluated_text.replace(output, new_relative_name)
-        with open(new_name, 'wb') as f:
-            f.write(resources['outputs'][output])
-    return evaluated_text
-
-def thread_safe_mkdir(dirname):
-    try:
-        os.makedirs(dirname)
-    except OSError as e:
-        if e.errno != errno.EEXIST:
-            raise
-        pass

diff -r 02417c5a8f7f2fad0bcd7f9f6fc3bd970c3712af -r 8ac0f630b3f0b6c0d12f907a4efb384794b3d0d7 doc/extensions/notebookcell_sphinxext.py
--- a/doc/extensions/notebookcell_sphinxext.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import os
-import shutil
-import io
-import tempfile
-from sphinx.util.compat import Directive
-from docutils.parsers.rst import directives
-from IPython.nbformat import current
-from notebook_sphinxext import \
-    notebook_node, visit_notebook_node, depart_notebook_node, \
-    evaluate_notebook, make_image_dir, write_notebook_output
-
-
-class NotebookCellDirective(Directive):
-    """Insert an evaluated notebook cell into a document
-
-    This uses runipy and nbconvert to transform an inline python
-    script into html suitable for embedding in a Sphinx document.
-    """
-    required_arguments = 0
-    optional_arguments = 1
-    has_content = True
-    option_spec = {'skip_exceptions': directives.flag}
-
-    def run(self):
-        # check if raw html is supported
-        if not self.state.document.settings.raw_enabled:
-            raise self.warning('"%s" directive disabled.' % self.name)
-
-        cwd = os.getcwd()
-        tmpdir = tempfile.mkdtemp()
-        os.chdir(tmpdir)
-
-        rst_file = self.state_machine.document.attributes['source']
-        rst_dir = os.path.abspath(os.path.dirname(rst_file))
-
-        image_dir, image_rel_dir = make_image_dir(setup, rst_dir)
-
-        # Construct notebook from cell content
-        content = "\n".join(self.content)
-        with open("temp.py", "w") as f:
-            f.write(content)
-
-        convert_to_ipynb('temp.py', 'temp.ipynb')
-
-        skip_exceptions = 'skip_exceptions' in self.options
-
-        evaluated_text, resources = evaluate_notebook(
-            'temp.ipynb', skip_exceptions=skip_exceptions)
-
-        evaluated_text = write_notebook_output(
-            resources, image_dir, image_rel_dir, evaluated_text)
-
-        # create notebook node
-        attributes = {'format': 'html', 'source': 'nb_path'}
-        nb_node = notebook_node('', evaluated_text, **attributes)
-        (nb_node.source, nb_node.line) = \
-            self.state_machine.get_source_and_line(self.lineno)
-
-        # clean up
-        os.chdir(cwd)
-        shutil.rmtree(tmpdir, True)
-
-        return [nb_node]
-
-def setup(app):
-    setup.app = app
-    setup.config = app.config
-    setup.confdir = app.confdir
-
-    app.add_node(notebook_node,
-                 html=(visit_notebook_node, depart_notebook_node))
-
-    app.add_directive('notebook-cell', NotebookCellDirective)
-
-    retdict = dict(
-        version='0.1',
-        parallel_read_safe=True,
-        parallel_write_safe=True
-    )
-
-    return retdict
-
-def convert_to_ipynb(py_file, ipynb_file):
-    with io.open(py_file, 'r', encoding='utf-8') as f:
-        notebook = current.reads(f.read(), format='py')
-    with io.open(ipynb_file, 'w', encoding='utf-8') as f:
-        current.write(notebook, f, format='ipynb')

diff -r 02417c5a8f7f2fad0bcd7f9f6fc3bd970c3712af -r 8ac0f630b3f0b6c0d12f907a4efb384794b3d0d7 doc/extensions/numpydocmod/__init__.py
--- a/doc/extensions/numpydocmod/__init__.py
+++ /dev/null
@@ -1,1 +0,0 @@
-from numpydoc import setup

diff -r 02417c5a8f7f2fad0bcd7f9f6fc3bd970c3712af -r 8ac0f630b3f0b6c0d12f907a4efb384794b3d0d7 doc/extensions/numpydocmod/comment_eater.py
--- a/doc/extensions/numpydocmod/comment_eater.py
+++ /dev/null
@@ -1,158 +0,0 @@
-from cStringIO import StringIO
-import compiler
-import inspect
-import textwrap
-import tokenize
-
-from compiler_unparse import unparse
-
-
-class Comment(object):
-    """ A comment block.
-    """
-    is_comment = True
-    def __init__(self, start_lineno, end_lineno, text):
-        # int : The first line number in the block. 1-indexed.
-        self.start_lineno = start_lineno
-        # int : The last line number. Inclusive!
-        self.end_lineno = end_lineno
-        # str : The text block including '#' character but not any leading spaces.
-        self.text = text
-
-    def add(self, string, start, end, line):
-        """ Add a new comment line.
-        """
-        self.start_lineno = min(self.start_lineno, start[0])
-        self.end_lineno = max(self.end_lineno, end[0])
-        self.text += string
-
-    def __repr__(self):
-        return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
-            self.end_lineno, self.text)
-
-
-class NonComment(object):
-    """ A non-comment block of code.
-    """
-    is_comment = False
-    def __init__(self, start_lineno, end_lineno):
-        self.start_lineno = start_lineno
-        self.end_lineno = end_lineno
-
-    def add(self, string, start, end, line):
-        """ Add lines to the block.
-        """
-        if string.strip():
-            # Only add if not entirely whitespace.
-            self.start_lineno = min(self.start_lineno, start[0])
-            self.end_lineno = max(self.end_lineno, end[0])
-
-    def __repr__(self):
-        return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
-            self.end_lineno)
-
-
-class CommentBlocker(object):
-    """ Pull out contiguous comment blocks.
-    """
-    def __init__(self):
-        # Start with a dummy.
-        self.current_block = NonComment(0, 0)
-
-        # All of the blocks seen so far.
-        self.blocks = []
-
-        # The index mapping lines of code to their associated comment blocks.
-        self.index = {}
-
-    def process_file(self, file):
-        """ Process a file object.
-        """
-        for token in tokenize.generate_tokens(file.next):
-            self.process_token(*token)
-        self.make_index()
-
-    def process_token(self, kind, string, start, end, line):
-        """ Process a single token.
-        """
-        if self.current_block.is_comment:
-            if kind == tokenize.COMMENT:
-                self.current_block.add(string, start, end, line)
-            else:
-                self.new_noncomment(start[0], end[0])
-        else:
-            if kind == tokenize.COMMENT:
-                self.new_comment(string, start, end, line)
-            else:
-                self.current_block.add(string, start, end, line)
-
-    def new_noncomment(self, start_lineno, end_lineno):
-        """ We are transitioning from a noncomment to a comment.
-        """
-        block = NonComment(start_lineno, end_lineno)
-        self.blocks.append(block)
-        self.current_block = block
-
-    def new_comment(self, string, start, end, line):
-        """ Possibly add a new comment.
-        
-        Only adds a new comment if this comment is the only thing on the line.
-        Otherwise, it extends the noncomment block.
-        """
-        prefix = line[:start[1]]
-        if prefix.strip():
-            # Oops! Trailing comment, not a comment block.
-            self.current_block.add(string, start, end, line)
-        else:
-            # A comment block.
-            block = Comment(start[0], end[0], string)
-            self.blocks.append(block)
-            self.current_block = block
-
-    def make_index(self):
-        """ Make the index mapping lines of actual code to their associated
-        prefix comments.
-        """
-        for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
-            if not block.is_comment:
-                self.index[block.start_lineno] = prev
-
-    def search_for_comment(self, lineno, default=None):
-        """ Find the comment block just before the given line number.
-
-        Returns None (or the specified default) if there is no such block.
-        """
-        if not self.index:
-            self.make_index()
-        block = self.index.get(lineno, None)
-        text = getattr(block, 'text', default)
-        return text
-
-
-def strip_comment_marker(text):
-    """ Strip # markers at the front of a block of comment text.
-    """
-    lines = []
-    for line in text.splitlines():
-        lines.append(line.lstrip('#'))
-    text = textwrap.dedent('\n'.join(lines))
-    return text
-
-
-def get_class_traits(klass):
-    """ Yield all of the documentation for trait definitions on a class object.
-    """
-    # FIXME: gracefully handle errors here or in the caller?
-    source = inspect.getsource(klass)
-    cb = CommentBlocker()
-    cb.process_file(StringIO(source))
-    mod_ast = compiler.parse(source)
-    class_ast = mod_ast.node.nodes[0]
-    for node in class_ast.code.nodes:
-        # FIXME: handle other kinds of assignments?
-        if isinstance(node, compiler.ast.Assign):
-            name = node.nodes[0].name
-            rhs = unparse(node.expr).strip()
-            doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
-            yield name, rhs, doc
-

diff -r 02417c5a8f7f2fad0bcd7f9f6fc3bd970c3712af -r 8ac0f630b3f0b6c0d12f907a4efb384794b3d0d7 doc/extensions/numpydocmod/compiler_unparse.py
--- a/doc/extensions/numpydocmod/compiler_unparse.py
+++ /dev/null
@@ -1,860 +0,0 @@
-""" Turn compiler.ast structures back into executable python code.
-
-    The unparse method takes a compiler.ast tree and transforms it back into
-    valid python code.  It is incomplete and currently only works for
-    import statements, function calls, function definitions, assignments, and
-    basic expressions.
-
-    Inspired by python-2.5-svn/Demo/parser/unparse.py
-
-    fixme: We may want to move to using _ast trees because the compiler for
-           them is about 6 times faster than compiler.compile.
-"""
-
-import sys
-import cStringIO
-from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
-
-def unparse(ast, single_line_functions=False):
-    s = cStringIO.StringIO()
-    UnparseCompilerAst(ast, s, single_line_functions)
-    return s.getvalue().lstrip()
-
-op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
-                  'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
-
-class UnparseCompilerAst:
-    """ Methods in this class recursively traverse an AST and
-        output source code for the abstract syntax; original formatting
-        is disregarged.
-    """
-
-    #########################################################################
-    # object interface.
-    #########################################################################
-
-    def __init__(self, tree, file = sys.stdout, single_line_functions=False):
-        """ Unparser(tree, file=sys.stdout) -> None.
-
-            Print the source for tree to file.
-        """
-        self.f = file
-        self._single_func = single_line_functions
-        self._do_indent = True
-        self._indent = 0
-        self._dispatch(tree)
-        self._write("\n")
-        self.f.flush()
-
-    #########################################################################
-    # Unparser private interface.
-    #########################################################################
-
-    ### format, output, and dispatch methods ################################
-
-    def _fill(self, text = ""):
-        "Indent a piece of text, according to the current indentation level"
-        if self._do_indent:
-            self._write("\n"+"    "*self._indent + text)
-        else:
-            self._write(text)
-
-    def _write(self, text):
-        "Append a piece of text to the current line."
-        self.f.write(text)
-
-    def _enter(self):
-        "Print ':', and increase the indentation."
-        self._write(": ")
-        self._indent += 1
-
-    def _leave(self):
-        "Decrease the indentation level."
-        self._indent -= 1
-
-    def _dispatch(self, tree):
-        "_dispatcher function, _dispatching tree type T to method _T."
-        if isinstance(tree, list):
-            for t in tree:
-                self._dispatch(t)
-            return
-        meth = getattr(self, "_"+tree.__class__.__name__)
-        if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
-            return
-        meth(tree)
-
-
-    #########################################################################
-    # compiler.ast unparsing methods.
-    #
-    # There should be one method per concrete grammar type. They are
-    # organized in alphabetical order.
-    #########################################################################
-
-    def _Add(self, t):
-        self.__binary_op(t, '+')
-
-    def _And(self, t):
-        self._write(" (")
-        for i, node in enumerate(t.nodes):
-            self._dispatch(node)
-            if i != len(t.nodes)-1:
-                self._write(") and (")
-        self._write(")")
-               
-    def _AssAttr(self, t):
-        """ Handle assigning an attribute of an object
-        """
-        self._dispatch(t.expr)
-        self._write('.'+t.attrname)
- 
-    def _Assign(self, t):
-        """ Expression Assignment such as "a = 1".
-
-            This only handles assignment in expressions.  Keyword assignment
-            is handled separately.
-        """
-        self._fill()
-        for target in t.nodes:
-            self._dispatch(target)
-            self._write(" = ")
-        self._dispatch(t.expr)
-        if not self._do_indent:
-            self._write('; ')
-
-    def _AssName(self, t):
-        """ Name on left hand side of expression.
-
-            Treat just like a name on the right side of an expression.
-        """
-        self._Name(t)
-
-    def _AssTuple(self, t):
-        """ Tuple on left hand side of an expression.
-        """
-
-        # _write each elements, separated by a comma.
-        for element in t.nodes[:-1]:
-            self._dispatch(element)
-            self._write(", ")
-
-        # Handle the last one without writing comma
-        last_element = t.nodes[-1]
-        self._dispatch(last_element)
-
-    def _AugAssign(self, t):
-        """ +=,-=,*=,/=,**=, etc. operations
-        """
-        
-        self._fill()
-        self._dispatch(t.node)
-        self._write(' '+t.op+' ')
-        self._dispatch(t.expr)
-        if not self._do_indent:
-            self._write(';')
-            
-    def _Bitand(self, t):
-        """ Bit and operation.
-        """
-        
-        for i, node in enumerate(t.nodes):
-            self._write("(")
-            self._dispatch(node)
-            self._write(")")
-            if i != len(t.nodes)-1:
-                self._write(" & ")
-                
-    def _Bitor(self, t):
-        """ Bit or operation
-        """
-        
-        for i, node in enumerate(t.nodes):
-            self._write("(")
-            self._dispatch(node)
-            self._write(")")
-            if i != len(t.nodes)-1:
-                self._write(" | ")
-                
-    def _CallFunc(self, t):
-        """ Function call.
-        """
-        self._dispatch(t.node)
-        self._write("(")
-        comma = False
-        for e in t.args:
-            if comma: self._write(", ")
-            else: comma = True
-            self._dispatch(e)
-        if t.star_args:
-            if comma: self._write(", ")
-            else: comma = True
-            self._write("*")
-            self._dispatch(t.star_args)
-        if t.dstar_args:
-            if comma: self._write(", ")
-            else: comma = True
-            self._write("**")
-            self._dispatch(t.dstar_args)
-        self._write(")")
-
-    def _Compare(self, t):
-        self._dispatch(t.expr)
-        for op, expr in t.ops:
-            self._write(" " + op + " ")
-            self._dispatch(expr)
-
-    def _Const(self, t):
-        """ A constant value such as an integer value, 3, or a string, "hello".
-        """
-        self._dispatch(t.value)
-
-    def _Decorators(self, t):
-        """ Handle function decorators (eg. @has_units)
-        """
-        for node in t.nodes:
-            self._dispatch(node)
-
-    def _Dict(self, t):
-        self._write("{")
-        for  i, (k, v) in enumerate(t.items):
-            self._dispatch(k)
-            self._write(": ")
-            self._dispatch(v)
-            if i < len(t.items)-1:
-                self._write(", ")
-        self._write("}")
-
-    def _Discard(self, t):
-        """ Node for when return value is ignored such as in "foo(a)".
-        """
-        self._fill()
-        self._dispatch(t.expr)
-
-    def _Div(self, t):
-        self.__binary_op(t, '/')
-
-    def _Ellipsis(self, t):
-        self._write("...")
-
-    def _From(self, t):
-        """ Handle "from xyz import foo, bar as baz".
-        """
-        # fixme: Are From and ImportFrom handled differently?
-        self._fill("from ")
-        self._write(t.modname)
-        self._write(" import ")
-        for i, (name,asname) in enumerate(t.names):
-            if i != 0:
-                self._write(", ")
-            self._write(name)
-            if asname is not None:
-                self._write(" as "+asname)
-                
-    def _Function(self, t):
-        """ Handle function definitions
-        """
-        if t.decorators is not None:
-            self._fill("@")
-            self._dispatch(t.decorators)
-        self._fill("def "+t.name + "(")
-        defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
-        for i, arg in enumerate(zip(t.argnames, defaults)):
-            self._write(arg[0])
-            if arg[1] is not None:
-                self._write('=')
-                self._dispatch(arg[1])
-            if i < len(t.argnames)-1:
-                self._write(', ')
-        self._write(")")
-        if self._single_func:
-            self._do_indent = False
-        self._enter()
-        self._dispatch(t.code)
-        self._leave()
-        self._do_indent = True
-
-    def _Getattr(self, t):
-        """ Handle getting an attribute of an object
-        """
-        if isinstance(t.expr, (Div, Mul, Sub, Add)):
-            self._write('(')
-            self._dispatch(t.expr)
-            self._write(')')
-        else:
-            self._dispatch(t.expr)
-            
-        self._write('.'+t.attrname)
-        
-    def _If(self, t):
-        self._fill()
-        
-        for i, (compare,code) in enumerate(t.tests):
-            if i == 0:
-                self._write("if ")
-            else:
-                self._write("elif ")
-            self._dispatch(compare)
-            self._enter()
-            self._fill()
-            self._dispatch(code)
-            self._leave()
-            self._write("\n")
-
-        if t.else_ is not None:
-            self._write("else")
-            self._enter()
-            self._fill()
-            self._dispatch(t.else_)
-            self._leave()
-            self._write("\n")
-            
-    def _IfExp(self, t):
-        self._dispatch(t.then)
-        self._write(" if ")
-        self._dispatch(t.test)
-
-        if t.else_ is not None:
-            self._write(" else (")
-            self._dispatch(t.else_)
-            self._write(")")
-
-    def _Import(self, t):
-        """ Handle "import xyz.foo".
-        """
-        self._fill("import ")
-        
-        for i, (name,asname) in enumerate(t.names):
-            if i != 0:
-                self._write(", ")
-            self._write(name)
-            if asname is not None:
-                self._write(" as "+asname)
-
-    def _Keyword(self, t):
-        """ Keyword value assignment within function calls and definitions.
-        """
-        self._write(t.name)
-        self._write("=")
-        self._dispatch(t.expr)
-        
-    def _List(self, t):
-        self._write("[")
-        for  i,node in enumerate(t.nodes):
-            self._dispatch(node)
-            if i < len(t.nodes)-1:
-                self._write(", ")
-        self._write("]")
-
-    def _Module(self, t):
-        if t.doc is not None:
-            self._dispatch(t.doc)
-        self._dispatch(t.node)
-
-    def _Mul(self, t):
-        self.__binary_op(t, '*')
-
-    def _Name(self, t):
-        self._write(t.name)
-
-    def _NoneType(self, t):
-        self._write("None")
-        
-    def _Not(self, t):
-        self._write('not (')
-        self._dispatch(t.expr)
-        self._write(')')
-        
-    def _Or(self, t):
-        self._write(" (")
-        for i, node in enumerate(t.nodes):
-            self._dispatch(node)
-            if i != len(t.nodes)-1:
-                self._write(") or (")
-        self._write(")")
-                
-    def _Pass(self, t):
-        self._write("pass\n")
-
-    def _Printnl(self, t):
-        self._fill("print ")
-        if t.dest:
-            self._write(">> ")
-            self._dispatch(t.dest)
-            self._write(", ")
-        comma = False
-        for node in t.nodes:
-            if comma: self._write(', ')
-            else: comma = True
-            self._dispatch(node)
-
-    def _Power(self, t):
-        self.__binary_op(t, '**')
-
-    def _Return(self, t):
-        self._fill("return ")
-        if t.value:
-            if isinstance(t.value, Tuple):
-                text = ', '.join([ name.name for name in t.value.asList() ])
-                self._write(text)
-            else:
-                self._dispatch(t.value)
-            if not self._do_indent:
-                self._write('; ')
-
-    def _Slice(self, t):
-        self._dispatch(t.expr)
-        self._write("[")
-        if t.lower:
-            self._dispatch(t.lower)
-        self._write(":")
-        if t.upper:
-            self._dispatch(t.upper)
-        #if t.step:
-        #    self._write(":")
-        #    self._dispatch(t.step)
-        self._write("]")
-
-    def _Sliceobj(self, t):
-        for i, node in enumerate(t.nodes):
-            if i != 0:
-                self._write(":")
-            if not (isinstance(node, Const) and node.value is None):
-                self._dispatch(node)
-
-    def _Stmt(self, tree):
-        for node in tree.nodes:
-            self._dispatch(node)
-
-    def _Sub(self, t):
-        self.__binary_op(t, '-')
-
-    def _Subscript(self, t):
-        self._dispatch(t.expr)
-        self._write("[")
-        for i, value in enumerate(t.subs):
-            if i != 0:
-                self._write(",")
-            self._dispatch(value)
-        self._write("]")
-
-    def _TryExcept(self, t):
-        self._fill("try")
-        self._enter()
-        self._dispatch(t.body)
-        self._leave()
-
-        for handler in t.handlers:
-            self._fill('except ')
-            self._dispatch(handler[0])
-            if handler[1] is not None:
-                self._write(', ')
-                self._dispatch(handler[1])
-            self._enter()
-            self._dispatch(handler[2])
-            self._leave()
-            
-        if t.else_:
-            self._fill("else")
-            self._enter()
-            self._dispatch(t.else_)
-            self._leave()
-
-    def _Tuple(self, t):
-
-        if not t.nodes:
-            # Empty tuple.
-            self._write("()")
-        else:
-            self._write("(")
-
-            # _write each elements, separated by a comma.
-            for element in t.nodes[:-1]:
-                self._dispatch(element)
-                self._write(", ")
-
-            # Handle the last one without writing comma
-            last_element = t.nodes[-1]
-            self._dispatch(last_element)
-
-            self._write(")")
-            
-    def _UnaryAdd(self, t):
-        self._write("+")
-        self._dispatch(t.expr)
-        
-    def _UnarySub(self, t):
-        self._write("-")
-        self._dispatch(t.expr)        
-
-    def _With(self, t):
-        self._fill('with ')
-        self._dispatch(t.expr)
-        if t.vars:
-            self._write(' as ')
-            self._dispatch(t.vars.name)
-        self._enter()
-        self._dispatch(t.body)
-        self._leave()
-        self._write('\n')
-        
-    def _int(self, t):
-        self._write(repr(t))
-
-    def __binary_op(self, t, symbol):
-        # Check if parenthesis are needed on left side and then dispatch
-        has_paren = False
-        left_class = str(t.left.__class__)
-        if (left_class in op_precedence.keys() and
-            op_precedence[left_class] < op_precedence[str(t.__class__)]):
-            has_paren = True
-        if has_paren:
-            self._write('(')
-        self._dispatch(t.left)
-        if has_paren:
-            self._write(')')
-        # Write the appropriate symbol for operator
-        self._write(symbol)
-        # Check if parenthesis are needed on the right side and then dispatch
-        has_paren = False
-        right_class = str(t.right.__class__)
-        if (right_class in op_precedence.keys() and
-            op_precedence[right_class] < op_precedence[str(t.__class__)]):
-            has_paren = True
-        if has_paren:
-            self._write('(')
-        self._dispatch(t.right)
-        if has_paren:
-            self._write(')')
-
-    def _float(self, t):
-        # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
-        # We prefer str here.
-        self._write(str(t))
-
-    def _str(self, t):
-        self._write(repr(t))
-        
-    def _tuple(self, t):
-        self._write(str(t))
-
-    #########################################################################
-    # These are the methods from the _ast modules unparse.
-    #
-    # As our needs to handle more advanced code increase, we may want to
-    # modify some of the methods below so that they work for compiler.ast.
-    #########################################################################
-
-#    # stmt
-#    def _Expr(self, tree):
-#        self._fill()
-#        self._dispatch(tree.value)
-#
-#    def _Import(self, t):
-#        self._fill("import ")
-#        first = True
-#        for a in t.names:
-#            if first:
-#                first = False
-#            else:
-#                self._write(", ")
-#            self._write(a.name)
-#            if a.asname:
-#                self._write(" as "+a.asname)
-#
-##    def _ImportFrom(self, t):
-##        self._fill("from ")
-##        self._write(t.module)
-##        self._write(" import ")
-##        for i, a in enumerate(t.names):
-##            if i == 0:
-##                self._write(", ")
-##            self._write(a.name)
-##            if a.asname:
-##                self._write(" as "+a.asname)
-##        # XXX(jpe) what is level for?
-##
-#
-#    def _Break(self, t):
-#        self._fill("break")
-#
-#    def _Continue(self, t):
-#        self._fill("continue")
-#
-#    def _Delete(self, t):
-#        self._fill("del ")
-#        self._dispatch(t.targets)
-#
-#    def _Assert(self, t):
-#        self._fill("assert ")
-#        self._dispatch(t.test)
-#        if t.msg:
-#            self._write(", ")
-#            self._dispatch(t.msg)
-#
-#    def _Exec(self, t):
-#        self._fill("exec ")
-#        self._dispatch(t.body)
-#        if t.globals:
-#            self._write(" in ")
-#            self._dispatch(t.globals)
-#        if t.locals:
-#            self._write(", ")
-#            self._dispatch(t.locals)
-#
-#    def _Print(self, t):
-#        self._fill("print ")
-#        do_comma = False
-#        if t.dest:
-#            self._write(">>")
-#            self._dispatch(t.dest)
-#            do_comma = True
-#        for e in t.values:
-#            if do_comma:self._write(", ")
-#            else:do_comma=True
-#            self._dispatch(e)
-#        if not t.nl:
-#            self._write(",")
-#
-#    def _Global(self, t):
-#        self._fill("global")
-#        for i, n in enumerate(t.names):
-#            if i != 0:
-#                self._write(",")
-#            self._write(" " + n)
-#
-#    def _Yield(self, t):
-#        self._fill("yield")
-#        if t.value:
-#            self._write(" (")
-#            self._dispatch(t.value)
-#            self._write(")")
-#
-#    def _Raise(self, t):
-#        self._fill('raise ')
-#        if t.type:
-#            self._dispatch(t.type)
-#        if t.inst:
-#            self._write(", ")
-#            self._dispatch(t.inst)
-#        if t.tback:
-#            self._write(", ")
-#            self._dispatch(t.tback)
-#
-#
-#    def _TryFinally(self, t):
-#        self._fill("try")
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#        self._fill("finally")
-#        self._enter()
-#        self._dispatch(t.finalbody)
-#        self._leave()
-#
-#    def _excepthandler(self, t):
-#        self._fill("except ")
-#        if t.type:
-#            self._dispatch(t.type)
-#        if t.name:
-#            self._write(", ")
-#            self._dispatch(t.name)
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#    def _ClassDef(self, t):
-#        self._write("\n")
-#        self._fill("class "+t.name)
-#        if t.bases:
-#            self._write("(")
-#            for a in t.bases:
-#                self._dispatch(a)
-#                self._write(", ")
-#            self._write(")")
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#    def _FunctionDef(self, t):
-#        self._write("\n")
-#        for deco in t.decorators:
-#            self._fill("@")
-#            self._dispatch(deco)
-#        self._fill("def "+t.name + "(")
-#        self._dispatch(t.args)
-#        self._write(")")
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#
-#    def _For(self, t):
-#        self._fill("for ")
-#        self._dispatch(t.target)
-#        self._write(" in ")
-#        self._dispatch(t.iter)
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#        if t.orelse:
-#            self._fill("else")
-#            self._enter()
-#            self._dispatch(t.orelse)
-#            self._leave
-#
-#    def _While(self, t):
-#        self._fill("while ")
-#        self._dispatch(t.test)
-#        self._enter()
-#        self._dispatch(t.body)
-#        self._leave()
-#        if t.orelse:
-#            self._fill("else")
-#            self._enter()
-#            self._dispatch(t.orelse)
-#            self._leave
-#
-#    # expr
-#    def _Str(self, tree):
-#        self._write(repr(tree.s))
-##
-#    def _Repr(self, t):
-#        self._write("`")
-#        self._dispatch(t.value)
-#        self._write("`")
-#
-#    def _Num(self, t):
-#        self._write(repr(t.n))
-#
-#    def _ListComp(self, t):
-#        self._write("[")
-#        self._dispatch(t.elt)
-#        for gen in t.generators:
-#            self._dispatch(gen)
-#        self._write("]")
-#
-#    def _GeneratorExp(self, t):
-#        self._write("(")
-#        self._dispatch(t.elt)
-#        for gen in t.generators:
-#            self._dispatch(gen)
-#        self._write(")")
-#
-#    def _comprehension(self, t):
-#        self._write(" for ")
-#        self._dispatch(t.target)
-#        self._write(" in ")
-#        self._dispatch(t.iter)
-#        for if_clause in t.ifs:
-#            self._write(" if ")
-#            self._dispatch(if_clause)
-#
-#    def _IfExp(self, t):
-#        self._dispatch(t.body)
-#        self._write(" if ")
-#        self._dispatch(t.test)
-#        if t.orelse:
-#            self._write(" else ")
-#            self._dispatch(t.orelse)
-#
-#    unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
-#    def _UnaryOp(self, t):
-#        self._write(self.unop[t.op.__class__.__name__])
-#        self._write("(")
-#        self._dispatch(t.operand)
-#        self._write(")")
-#
-#    binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
-#                    "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
-#                    "FloorDiv":"//", "Pow": "**"}
-#    def _BinOp(self, t):
-#        self._write("(")
-#        self._dispatch(t.left)
-#        self._write(")" + self.binop[t.op.__class__.__name__] + "(")
-#        self._dispatch(t.right)
-#        self._write(")")
-#
-#    boolops = {_ast.And: 'and', _ast.Or: 'or'}
-#    def _BoolOp(self, t):
-#        self._write("(")
-#        self._dispatch(t.values[0])
-#        for v in t.values[1:]:
-#            self._write(" %s " % self.boolops[t.op.__class__])
-#            self._dispatch(v)
-#        self._write(")")
-#
-#    def _Attribute(self,t):
-#        self._dispatch(t.value)
-#        self._write(".")
-#        self._write(t.attr)
-#
-##    def _Call(self, t):
-##        self._dispatch(t.func)
-##        self._write("(")
-##        comma = False
-##        for e in t.args:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._dispatch(e)
-##        for e in t.keywords:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._dispatch(e)
-##        if t.starargs:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._write("*")
-##            self._dispatch(t.starargs)
-##        if t.kwargs:
-##            if comma: self._write(", ")
-##            else: comma = True
-##            self._write("**")
-##            self._dispatch(t.kwargs)
-##        self._write(")")
-#
-#    # slice
-#    def _Index(self, t):
-#        self._dispatch(t.value)
-#
-#    def _ExtSlice(self, t):
-#        for i, d in enumerate(t.dims):
-#            if i != 0:
-#                self._write(': ')
-#            self._dispatch(d)
-#
-#    # others
-#    def _arguments(self, t):
-#        first = True
-#        nonDef = len(t.args)-len(t.defaults)
-#        for a in t.args[0:nonDef]:
-#            if first:first = False
-#            else: self._write(", ")
-#            self._dispatch(a)
-#        for a,d in zip(t.args[nonDef:], t.defaults):
-#            if first:first = False
-#            else: self._write(", ")
-#            self._dispatch(a),
-#            self._write("=")
-#            self._dispatch(d)
-#        if t.vararg:
-#            if first:first = False
-#            else: self._write(", ")
-#            self._write("*"+t.vararg)
-#        if t.kwarg:
-#            if first:first = False
-#            else: self._write(", ")
-#            self._write("**"+t.kwarg)
-#
-##    def _keyword(self, t):
-##        self._write(t.arg)
-##        self._write("=")
-##        self._dispatch(t.value)
-#
-#    def _Lambda(self, t):
-#        self._write("lambda ")
-#        self._dispatch(t.args)
-#        self._write(": ")
-#        self._dispatch(t.body)
-
-
-

This diff is so big that we needed to truncate the remainder.

http://link.bitbucket.org/wf/click?upn=8USRlNyft-2BCzk2l4Ywl6gDx2lD2xxoS9E7MwXb2SMR-2BXcTD42YocdnOFkyGBVHOUoZ8PHTVVR-2B7gkeeh01YmxMmeSqcpO5dmuhR1zW2hyus-3D_ll4ctv0L-2ByeRZFC1LslHcg6aJmnQ70VruLbmeLQr27Chp4tbBaYQJh-2BYFo0ahY91tgRPGfZuSCITApOp1Z-2FKyMNMYluhhsymOLKDb4-2FV0gXDbbBLzWsLwTTnBDezmGQgiPC9y-2FRMeu1oKZSl0tbZszLcZxQutX2UeEJ6Q4H9Q-2BzrJwPFha02MRL6nqyKmc-2FEVWfJ47LZUtmXFjZ7Tyt7rcHDBaLN7VJ3kDGEdcp-2FHCg-3D
Changeset:   8274418a9da0
Branch:      yt
User:        chummels
Date:        2016-03-16 15:56:51+00:00
Summary:     Making observing_redshift loop happen in its own method.
Affected #:  1 file

diff -r 8ac0f630b3f0b6c0d12f907a4efb384794b3d0d7 -r 8274418a9da017df6f69946c00f1fe04664b099d yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -237,31 +237,16 @@
         """
         Add continuum features to the spectrum.
         """
+        # Change the redshifts of continuum sources to account for the 
+        # redshift at which the observer sits
+        redshift, redshift_eff = self._apply_observing_redshift(field_data, 
+                                 use_peculiar_velocity, observing_redshift)
+
         # Only add continuum features down to tau of 1.e-4.
         min_tau = 1.e-3
 
         for continuum in self.continuum_list:
             column_density = field_data[continuum['field_name']] * field_data['dl']
-            if observing_redshift == 0.:
-                # This is already assumed in the generation of the LightRay
-                redshift = field_data['redshift']
-                if use_peculiar_velocity:
-                    redshift_eff = field_data['redshift_eff']
-            else:
-                # The intermediate redshift that is seen by an observer
-                # at a redshift other than z=0 is z12, where z1 is the
-                # observing redshift and z2 is the emitted photon's redshift
-                # Hogg (2000) eq. 13:
-                # 1 + z12 = (1 + z2) / (1 + z1)
-                redshift = ((1 + field_data['redshift']) / \
-                            (1 + observing_redshift)) - 1.
-                # Combining cosmological redshift and doppler redshift
-                # into an effective redshift is found in Peacock's
-                # Cosmological Physics eqn 3.75:
-                # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler)
-                if use_peculiar_velocity:
-                    redshift_eff = ((1 + redshift) * \
-                                    (1 + field_data['redshift_dopp'])) - 1.
 
             # redshift_eff field combines cosmological and velocity redshifts
             if use_peculiar_velocity:
@@ -289,12 +274,54 @@
                 pbar.update(i)
             pbar.finish()
 
+    def _apply_observing_redshift(self, field_data, use_peculiar_velocity,
+                                 observing_redshift):
+        """
+        Change the redshifts of individual absorbers to account for the 
+        redshift at which the observer sits.
+
+        The intermediate redshift that is seen by an observer
+        at a redshift other than z=0 is z12, where z1 is the
+        observing redshift and z2 is the emitted photon's redshift
+        Hogg (2000) eq. 13:
+
+        1 + z12 = (1 + z2) / (1 + z1)
+        """
+        if observing_redshift == 0.:
+            # This is already assumed in the generation of the LightRay
+            redshift = field_data['redshift']
+            if use_peculiar_velocity:
+                redshift_eff = field_data['redshift_eff']
+        else:
+            # The intermediate redshift that is seen by an observer
+            # at a redshift other than z=0 is z12, where z1 is the
+            # observing redshift and z2 is the emitted photon's redshift
+            # Hogg (2000) eq. 13:
+            # 1 + z12 = (1 + z2) / (1 + z1)
+            redshift = ((1 + field_data['redshift']) / \
+                        (1 + observing_redshift)) - 1.
+            # Combining cosmological redshift and doppler redshift
+            # into an effective redshift is found in Peacock's
+            # Cosmological Physics eqn 3.75:
+            # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler)
+            if use_peculiar_velocity:
+                redshift_eff = ((1 + redshift) * \
+                                (1 + field_data['redshift_dopp'])) - 1.
+
+        return redshift, redshift_eff
+
     def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity,
                                output_absorbers_file, subgrid_resolution=10,
                                observing_redshift=0., njobs=-1):
         """
         Add the absorption lines to the spectrum.
         """
+
+        # Change the redshifts of individual absorbers to account for the 
+        # redshift at which the observer sits
+        redshift, redshift_eff = self._apply_observing_redshift(field_data, 
+                                 use_peculiar_velocity, observing_redshift)
+
         # Widen wavelength window until optical depth falls below this tau
         # value at the ends to assure that the wings of a line have been
         # fully resolved.
@@ -305,27 +332,6 @@
         for line in parallel_objects(self.line_list, njobs=njobs):
             column_density = field_data[line['field_name']] * field_data['dl']
 
-            if observing_redshift == 0.:
-                # This is already assumed in the generation of the LightRay
-                redshift = field_data['redshift']
-                if use_peculiar_velocity:
-                    redshift_eff = field_data['redshift_eff']
-            else:
-                # The intermediate redshift that is seen by an observer
-                # at a redshift other than z=0 is z12, where z1 is the
-                # observing redshift and z2 is the emitted photon's redshift
-                # Hogg (2000) eq. 13:
-                # 1 + z12 = (1 + z2) / (1 + z1)
-                redshift = ((1 + field_data['redshift']) / \
-                            (1 + observing_redshift)) - 1.
-                # Combining cosmological redshift and doppler redshift
-                # into an effective redshift is found in Peacock's
-                # Cosmological Physics eqn 3.75:
-                # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler)
-                if use_peculiar_velocity:
-                    redshift_eff = ((1 + redshift) * \
-                                    (1 + field_data['redshift_dopp'])) - 1.
-
             # redshift_eff field combines cosmological and velocity redshifts
             # so delta_lambda gives the offset in angstroms from the rest frame
             # wavelength to the observed wavelength of the transition


http://link.bitbucket.org/wf/click?upn=8USRlNyft-2BCzk2l4Ywl6gDx2lD2xxoS9E7MwXb2SMR-2BXcTD42YocdnOFkyGBVHOUMM-2B4Ak6BBeVYMIG6Y7Qc3pIjV2zvd9-2BRrjE-2BJ4e-2Blrg-3D_ll4ctv0L-2ByeRZFC1LslHcg6aJmnQ70VruLbmeLQr27Chp4tbBaYQJh-2BYFo0ahY91tgRPGfZuSCITApOp1Z-2FKyM3vAjLO-2BSQgROJ-2FEx8ggzG-2BPY3nAXHhmhdXTysXvTGh6hctYz1KloT1tWXhD0dVXopnIHTtMcl-2B-2FjI-2BE3TQ91dXDfYFEaQDWtIT5luL74v-2Fp-2FostpKFPEHzBAZqtF6r-2BL1oybuslEJs1RRgRjzos-2FQ-3D
Changeset:   ad805640beab
Branch:      yt
User:        chummels
Date:        2016-03-16 18:46:45+00:00
Summary:     Merging with tip.
Affected #:  13 files

diff -r 8274418a9da017df6f69946c00f1fe04664b099d -r ad805640beab44a19be7d3e56ac39532d9dfb604 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -415,9 +415,19 @@
 determined by the ``thresh`` parameter, which can be varied to make the lines thicker or
 thinner.
 
+The above example all involve 8-node hexahedral mesh elements. Here is another example from
+a dataset that uses 6-node wedge elements:
+
+.. python-script::
+   
+   import yt
+   ds = yt.load("MOOSE_sample_data/wedge_out.e")
+   sl = yt.SlicePlot(ds, 2, ('connect2', 'diffused'))
+   sl.save()
+
 Finally, slices can also be used to examine 2D unstructured mesh datasets, but the
 slices must be taken to be normal to the ``'z'`` axis, or you'll get an error. Here is
-an example using another MOOSE dataset:
+an example using another MOOSE dataset that uses triangular mesh elements:
 
 .. python-script::
 

diff -r 8274418a9da017df6f69946c00f1fe04664b099d -r ad805640beab44a19be7d3e56ac39532d9dfb604 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -214,6 +214,29 @@
     # render and save
     sc.save()
 
+Here is an example using 6-node wedge elements:
+
+.. python-script::
+
+   import yt
+
+   ds = yt.load("MOOSE_sample_data/wedge_out.e")
+
+   # create a default scene
+   sc = yt.create_scene(ds, ('connect2', 'diffused'))
+
+   # override the default colormap
+   ms = sc.get_source(0)
+   ms.cmap = 'Eos A'
+
+   # adjust the camera position and orientation
+   cam = sc.camera
+   cam.set_position(ds.arr([1.0, -1.0, 1.0], 'code_length'))
+   cam.width = ds.arr([1.5, 1.5, 1.5], 'code_length')
+
+   # render and save
+   sc.save()
+
 Another example, this time plotting the temperature field from a 20-node hex 
 MOOSE dataset:
 
@@ -273,7 +296,7 @@
     # adjust the camera position and orientation
     cam = sc.camera
     camera_position = ds.arr([-1.0, 1.0, -0.5], 'code_length')
-    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
     cam.width = ds.arr([0.05, 0.05, 0.05], 'code_length')
     cam.set_position(camera_position, north_vector)
     

diff -r 8274418a9da017df6f69946c00f1fe04664b099d -r ad805640beab44a19be7d3e56ac39532d9dfb604 tests/tests_2.7.yaml
--- a/tests/tests_2.7.yaml
+++ b/tests/tests_2.7.yaml
@@ -39,12 +39,13 @@
   local_tipsy_270:
     - yt/frontends/tipsy/tests/test_outputs.py
   
-  local_varia_270:
+  local_varia_271:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py
     - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
     - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+    - yt/visualization/volume_rendering/tests/test_mesh_render.py
 
   local_orion_270:
     - yt/frontends/boxlib/tests/test_orion.py
@@ -55,7 +56,7 @@
   local_ytdata_270:
     - yt/frontends/ytdata
 
-  local_absorption_spectrum_270:
+  local_absorption_spectrum_271:
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
 

diff -r 8274418a9da017df6f69946c00f1fe04664b099d -r ad805640beab44a19be7d3e56ac39532d9dfb604 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -52,7 +52,9 @@
     def __init__(self, lambda_min, lambda_max, n_lambda):
         self.n_lambda = n_lambda
         # lambda, flux, and tau are wavelength, flux, and optical depth
-        self.lambda_field = YTArray(np.linspace(lambda_min, lambda_max,
+        self.lambda_min = lambda_min
+        self.lambda_max = lambda_max
+        self.lambda_field = YTArray(np.linspace(lambda_min, lambda_max, 
                                     n_lambda), "angstrom")
         self.tau_field = None
         self.flux_field = None
@@ -341,8 +343,24 @@
                 delta_lambda = line['wavelength'] * redshift
             # lambda_obs is central wavelength of line after redshift
             lambda_obs = line['wavelength'] + delta_lambda
-            # bin index in lambda_field of central wavelength of line after z
-            center_index = np.digitize(lambda_obs, self.lambda_field)
+            # the total number of absorbers per transition
+            n_absorbers = len(lambda_obs)
+
+            # we want to know the bin index in the lambda_field array
+            # where each line has its central wavelength after being
+            # redshifted.  however, because we don't know a priori how wide
+            # a line will be (ie DLAs), we have to include bin indices 
+            # *outside* the spectral range of the AbsorptionSpectrum 
+            # object.  Thus, we find the "equivalent" bin index, which
+            # may be <0 or >the size of the array.  In the end, we deposit
+            # the bins that actually overlap with the AbsorptionSpectrum's
+            # range in lambda.
+            
+            # this equation gives us the "equivalent" bin index for each line
+            # if it were placed into the self.lambda_field array
+            center_index = (lambda_obs.in_units('Angstrom').d - self.lambda_min) \
+                            / self.bin_width.d
+            center_index = np.ceil(center_index).astype('int')
 
             # thermal broadening b parameter
             thermal_b =  np.sqrt((2 * boltzmann_constant_cgs *
@@ -355,7 +373,6 @@
 
             # Sanitize units for faster runtime of the tau_profile machinery.
             lambda_0 = line['wavelength'].d  # line's rest frame; angstroms
-            lambda_1 = lambda_obs.d # line's observed frame; angstroms
             cdens = column_density.in_units("cm**-2").d # cm**-2
             thermb = thermal_b.in_cgs().d  # thermal b coefficient; cm / s
             dlambda = delta_lambda.d  # lambda offset; angstroms
@@ -377,77 +394,90 @@
             #    10; this will assure we don't get spikes in the deposited
             #    spectra from uneven numbers of vbins per bin
             resolution = thermal_width / self.bin_width
-            vbin_width = self.bin_width / \
-                         10**(np.ceil(np.log10(subgrid_resolution/resolution)).clip(0, np.inf))
-            vbin_width = vbin_width.in_units('angstrom').d
+            n_vbins_per_bin = 10**(np.ceil(np.log10(subgrid_resolution/resolution)).clip(0, np.inf))
+            vbin_width = self.bin_width.d / n_vbins_per_bin
 
-            # the virtual window into which the line is deposited initially
-            # spans a region of 5 thermal_widths, but this may expand
-            n_vbins = np.ceil(5*thermal_width.d/vbin_width)
-            vbin_window_width = n_vbins*vbin_width
-
+            # a note to the user about which lines components are unresolved
             if (thermal_width < self.bin_width).any():
                 mylog.info(("%d out of %d line components will be " + \
                             "deposited as unresolved lines.") %
                            ((thermal_width < self.bin_width).sum(),
-                            thermal_width.size))
+                            n_absorbers))
 
-            valid_lines = np.arange(len(thermal_width))
+            # provide a progress bar with information about lines processsed
             pbar = get_pbar("Adding line - %s [%f A]: " % \
-                            (line['label'], line['wavelength']),
-                            thermal_width.size)
+                            (line['label'], line['wavelength']), n_absorbers)
 
             # for a given transition, step through each location in the
             # observed spectrum where it occurs and deposit a voigt profile
-            for i in parallel_objects(valid_lines, njobs=-1):
-                my_vbin_window_width = vbin_window_width[i]
-                my_n_vbins = n_vbins[i]
-                my_vbin_width = vbin_width[i]
+            for i in parallel_objects(np.arange(n_absorbers), njobs=-1):
+ 
+                # the virtual window into which the line is deposited initially 
+                # spans a region of 2 coarse spectral bins 
+                # (one on each side of the center_index) but the window
+                # can expand as necessary.
+                # it will continue to expand until the tau value in the far
+                # edge of the wings is less than the min_tau value or it 
+                # reaches the edge of the spectrum
+                window_width_in_bins = 2
 
                 while True:
+                    left_index = (center_index[i] - window_width_in_bins/2)
+                    right_index = (center_index[i] + window_width_in_bins/2)
+                    n_vbins = (right_index - left_index) * n_vbins_per_bin[i]
+                    
+                   # the array of virtual bins in lambda space
                     vbins = \
-                        np.linspace(lambda_1[i]-my_vbin_window_width/2.,
-                                    lambda_1[i]+my_vbin_window_width/2.,
-                                    my_n_vbins, endpoint=False)
+                        np.linspace(self.lambda_min + self.bin_width.d * left_index, 
+                                    self.lambda_min + self.bin_width.d * right_index, 
+                                    n_vbins, endpoint=False)
 
+                    # the virtual bins and their corresponding opacities
                     vbins, vtau = \
                         tau_profile(
-                            lambda_0, line['f_value'], line['gamma'], thermb[i],
-                            cdens[i], delta_lambda=dlambda[i],
-                            lambda_bins=vbins)
+                            lambda_0, line['f_value'], line['gamma'], 
+                            thermb[i], cdens[i], 
+                            delta_lambda=dlambda[i], lambda_bins=vbins)
 
                     # If tau has not dropped below min tau threshold by the
                     # edges (ie the wings), then widen the wavelength
                     # window and repeat process.
                     if (vtau[0] < min_tau and vtau[-1] < min_tau):
                         break
-                    my_vbin_window_width *= 2
-                    my_n_vbins *= 2
-
-                # identify the extrema of the vbin_window so as to speed
-                # up searching over the entire lambda_field array
-                bins_from_center = np.ceil((my_vbin_window_width/2.) / \
-                                           self.bin_width.d) + 1
-                left_index = (center_index[i] - bins_from_center).clip(0, self.n_lambda)
-                right_index = (center_index[i] + bins_from_center).clip(0, self.n_lambda)
-                window_width = right_index - left_index
-
-                # run digitize to identify which vbins are deposited into which
-                # global lambda bins.
-                # shift global lambda bins over by half a bin width;
-                # this has the effect of assuring np.digitize will place
-                # the vbins in the closest bin center.
-                binned = np.digitize(vbins,
-                                     self.lambda_field[left_index:right_index] \
-                                     + (0.5 * self.bin_width))
+                    window_width_in_bins *= 2
 
                 # numerically integrate the virtual bins to calculate a
                 # virtual equivalent width; then sum the virtual equivalent
                 # widths and deposit into each spectral bin
-                vEW = vtau * my_vbin_width
-                EW = [vEW[binned == j].sum() for j in np.arange(window_width)]
-                EW = np.array(EW)/self.bin_width.d
-                self.tau_field[left_index:right_index] += EW
+                vEW = vtau * vbin_width[i]
+                EW = np.zeros(right_index - left_index)
+                EW_indices = np.arange(left_index, right_index)
+                for k, val in enumerate(EW_indices):
+                    EW[k] = vEW[n_vbins_per_bin[i] * k: \
+                                n_vbins_per_bin[i] * (k + 1)].sum()
+                EW = EW/self.bin_width.d
+
+                # only deposit EW bins that actually intersect the original
+                # spectral wavelength range (i.e. lambda_field)
+
+                # if EW bins don't intersect the original spectral range at all
+                # then skip the deposition
+                if ((left_index >= self.n_lambda) or \
+                    (right_index < 0)):
+                    pbar.update(i)
+                    continue
+
+                # otherwise, determine how much of the original spectrum
+                # is intersected by the expanded line window to be deposited, 
+                # and deposit the Equivalent Width data into that intersecting
+                # window in the original spectrum's tau
+                else:
+                    intersect_left_index = max(left_index, 0)
+                    intersect_right_index = min(right_index, self.n_lambda-1)
+                    self.tau_field[intersect_left_index:intersect_right_index] \
+                        += EW[(intersect_left_index - left_index): \
+                              (intersect_right_index - left_index)]
+
 
                 # write out absorbers to file if the column density of
                 # an absorber is greater than the specified "label_threshold"
@@ -471,9 +501,8 @@
             pbar.finish()
 
             del column_density, delta_lambda, lambda_obs, center_index, \
-                thermal_b, thermal_width, lambda_1, cdens, thermb, dlambda, \
-                vlos, resolution, vbin_width, n_vbins, vbin_window_width, \
-                valid_lines, vbins, vtau, vEW
+                thermal_b, thermal_width, cdens, thermb, dlambda, \
+                vlos, resolution, vbin_width, n_vbins, n_vbins_per_bin
 
         comm = _get_comm(())
         self.tau_field = comm.mpi_allreduce(self.tau_field, op="sum")

diff -r 8274418a9da017df6f69946c00f1fe04664b099d -r ad805640beab44a19be7d3e56ac39532d9dfb604 yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -180,9 +180,9 @@
         wavelength, flux = sp.make_spectrum('lightray.h5')
         total_tau.append((lambda_bin_width * sp.tau_field).sum())
         
-    # assure that the total tau values are all within 1e-5 of each other
+    # assure that the total tau values are all within 1e-3 of each other
     for tau in total_tau:
-        assert_almost_equal(tau, total_tau[0], 5)
+        assert_almost_equal(tau, total_tau[0], 3)
 
     # clean up
     os.chdir(curdir)

diff -r 8274418a9da017df6f69946c00f1fe04664b099d -r ad805640beab44a19be7d3e56ac39532d9dfb604 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -480,6 +480,9 @@
 
                     sub_vel_mag = sub_ray['velocity_magnitude']
                     cos_theta = np.dot(line_of_sight, sub_vel) / sub_vel_mag
+                    # Protect against stituations where velocity mag is exactly
+                    # zero, in which case zero / zero = NaN.
+                    cos_theta = np.nan_to_num(cos_theta)
                     redshift_dopp = \
                         (1 + sub_vel_mag * cos_theta / speed_of_light_cgs) / \
                          np.sqrt(1 - sub_vel_mag**2 / speed_of_light_cgs**2) - 1

diff -r 8274418a9da017df6f69946c00f1fe04664b099d -r ad805640beab44a19be7d3e56ac39532d9dfb604 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -451,6 +451,7 @@
         deps, unloaded = self.field_info.check_derived_fields()
         self.field_dependencies.update(deps)
         self.fields = FieldTypeContainer(self)
+        self.index.field_list = sorted(self.field_list)
 
     def setup_deprecated_fields(self):
         from yt.fields.field_aliases import _field_name_aliases
@@ -520,7 +521,10 @@
         self.particle_types += (union.name,)
         self.particle_unions[union.name] = union
         fields = [ (union.name, field) for field in fields]
-        self.field_list.extend(fields)
+        new_fields = [_ for _ in fields if _ not in self.field_list]
+        self.field_list.extend(new_fields)
+        self.field_info.field_list.extend(new_fields)
+        self.index.field_list = sorted(self.field_list)
         # Give ourselves a chance to add them here, first, then...
         # ...if we can't find them, we set them up as defaults.
         new_fields = self._setup_particle_types([union.name])

diff -r 8274418a9da017df6f69946c00f1fe04664b099d -r ad805640beab44a19be7d3e56ac39532d9dfb604 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -41,6 +41,7 @@
     MaestroFieldInfo, \
     CastroFieldInfo
 
+
 # This is what we use to find scientific notation that might include d's
 # instead of e's.
 _scinot_finder = re.compile(r"[-+]?[0-9]*\.?[0-9]+([eEdD][-+]?[0-9]+)?")
@@ -907,7 +908,7 @@
         self._read_particle_header()
 
     def _read_particle_header(self):
-        if not self.ds.parameters["particles.write_in_plotfile"]:
+        if not self.ds.parameters["particles"]:
             self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
             return
         for fn in ['particle_position_%s' % ax for ax in 'xyz'] + \
@@ -949,31 +950,48 @@
     @classmethod
     def _is_valid(cls, *args, **kwargs):
         # fill our args
-        pname = args[0].rstrip("/")
+        output_dir = args[0]
         # boxlib datasets are always directories
-        if not os.path.isdir(pname): return False
-        dn = os.path.dirname(pname)
-        if len(args) > 1:
-            kwargs['paramFilename'] = args[1]
-
-        pfname = kwargs.get("paramFilename", os.path.join(dn, "inputs"))
-
-        # @todo: new Nyx output.
-        # We check for the job_info file's existence because this is currently
-        # what distinguishes Nyx data from MAESTRO data.
-        pfn = os.path.join(pfname)
-        if not os.path.exists(pfn) or os.path.isdir(pfn): return False
-        nyx = any(("nyx." in line for line in open(pfn)))
-        return nyx
+        if not os.path.isdir(output_dir): return False
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.
+            return False
+        if not os.path.exists(jobinfo_filename):
+            return False
+        # Now we check the job_info for the mention of maestro
+        lines = open(jobinfo_filename).readlines()
+        if any(line.startswith("Nyx  ") for line in lines): return True
+        if any(line.startswith("nyx.") for line in lines): return True
+        return False
 
     def _parse_parameter_file(self):
         super(NyxDataset, self)._parse_parameter_file()
-        # return
+
         # Nyx is always cosmological.
         self.cosmological_simulation = 1
-        self.omega_lambda = self.parameters["comoving_OmL"]
-        self.omega_matter = self.parameters["comoving_OmM"]
-        self.hubble_constant = self.parameters["comoving_h"]
+
+        jobinfo_filename = os.path.join(self.output_dir, "job_info")
+        line = ""
+        with open(jobinfo_filename, "r") as f:
+            while not line.startswith(" Cosmology Information"):
+                # get the code git hashes
+                if "git hash" in line:
+                    # line format: codename git hash:  the-hash
+                    fields = line.split(":")
+                    self.parameters[fields[0]] = fields[1].strip()
+                line = next(f)
+
+            # get the cosmology
+            for line in f:
+                if "Omega_m (comoving)" in line:
+                    self.omega_matter = float(line.split(":")[1])
+                elif "Omega_lambda (comoving)" in line:
+                    self.omega_lambda = float(line.split(":")[1])
+                elif "h (comoving)" in line:
+                    self.hubble_constant = float(line.split(":")[1])
+
 
         # Read in the `comoving_a` file and parse the value. We should fix this
         # in the new Nyx output format...
@@ -987,7 +1005,9 @@
 
         # alias
         self.current_redshift = self.parameters["CosmologyCurrentRedshift"]
-        if self.parameters["particles.write_in_plotfile"]:
+        if os.path.isdir(os.path.join(self.output_dir, "DM")):
+            # we have particles
+            self.parameters["particles"] = 1 
             self.particle_types = ("io",)
             self.particle_types_raw = self.particle_types
 

diff -r 8274418a9da017df6f69946c00f1fe04664b099d -r ad805640beab44a19be7d3e56ac39532d9dfb604 yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -354,6 +354,11 @@
             displaced_coords = self._apply_displacement(coords, mesh_id)
             mi = np.minimum(displaced_coords.min(axis=0), mi)
             ma = np.maximum(displaced_coords.max(axis=0), ma)
+
+        # pad domain boundaries
+        width = ma - mi
+        mi -= 0.1 * width
+        ma += 0.1 * width
         return mi, ma
 
     @classmethod

diff -r 8274418a9da017df6f69946c00f1fe04664b099d -r ad805640beab44a19be7d3e56ac39532d9dfb604 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -133,11 +133,13 @@
         self.light = None
         self.data_source = data_source_or_all(data_source)
         self._resolution = (512, 512)
+
         if self.data_source is not None:
             self.scene.set_new_unit_registry(self.data_source.ds.unit_registry)
             self._focus = self.data_source.ds.domain_center
             self._position = self.data_source.ds.domain_right_edge
-            self._width = 1.5*self.data_source.ds.domain_width
+            self._width = self.data_source.ds.arr(
+                [1.5*self.data_source.ds.domain_width.max()]*3)
             self._domain_center = self.data_source.ds.domain_center
             self._domain_width = self.data_source.ds.domain_width
         else:
@@ -175,7 +177,8 @@
                 raise RuntimeError(
                     'Cannot set the camera focus and position to the same value')
             self._position = position
-            self.switch_orientation()
+            self.switch_orientation(normal_vector=self.focus - self._position,
+                                    north_vector=None)
 
         def fdel(self):
             del self._position
@@ -232,7 +235,8 @@
                 raise RuntimeError(
                     'Cannot set the camera focus and position to the same value')
             self._focus = focus
-            self.switch_orientation()
+            self.switch_orientation(normal_vector=self.focus - self._position,
+                                    north_vector=None)
 
         def fdel(self):
             del self._focus
@@ -327,7 +331,7 @@
         if not isinstance(width, YTArray):
             width = data_source.ds.arr(width, input_units="code_length")
         if not isinstance(focus, YTArray):
-            focus = self.ds.arr(focus, input_units="code_length")
+            focus = data_source.ds.arr(focus, input_units="code_length")
 
         # We can't use the property setters yet, since they rely on attributes
         # that will not be set up until the base class initializer is called.

diff -r 8274418a9da017df6f69946c00f1fe04664b099d -r ad805640beab44a19be7d3e56ac39532d9dfb604 yt/visualization/volume_rendering/off_axis_projection.py
--- a/yt/visualization/volume_rendering/off_axis_projection.py
+++ b/yt/visualization/volume_rendering/off_axis_projection.py
@@ -155,7 +155,7 @@
     camera.resolution = resolution
     if not iterable(width):
         width = data_source.ds.arr([width]*3)
-    camera.position = center - width[2]*camera.normal_vector
+    camera.position = center - width[2]*normal_vector
     camera.focus = center
     
     # If north_vector is None, we set the default here.

diff -r 8274418a9da017df6f69946c00f1fe04664b099d -r ad805640beab44a19be7d3e56ac39532d9dfb604 yt/visualization/volume_rendering/tests/test_mesh_render.py
--- a/yt/visualization/volume_rendering/tests/test_mesh_render.py
+++ b/yt/visualization/volume_rendering/tests/test_mesh_render.py
@@ -11,11 +11,18 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import fake_tetrahedral_ds
-from yt.testing import fake_hexahedral_ds
-from yt.testing import requires_module
-from yt.visualization.volume_rendering.render_source import MeshSource
-from yt.visualization.volume_rendering.scene import Scene
+from yt.testing import \
+    fake_tetrahedral_ds, \
+    fake_hexahedral_ds, \
+    requires_module
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    data_dir_load, \
+    GenericImageTest
+from yt.visualization.volume_rendering.api import \
+    MeshSource, \
+    Scene, \
+    create_scene
 
 
 @requires_module("pyembree")
@@ -39,3 +46,101 @@
         images.append(im)
 
     return images
+
+
+def compare(ds, im, test_prefix, decimals=12):
+    def mesh_render_image_func(filename_prefix):
+        return im.write_image(filename_prefix)
+
+    test = GenericImageTest(ds, mesh_render_image_func, decimals)
+    test.prefix = test_prefix
+    return test
+
+hex8 = "MOOSE_sample_data/out.e-s010"
+hex8_fields = [('connect1', 'diffused'), ('connect2', 'convected')]
+
+ at requires_ds(hex8)
+ at requires_module("pyembree")
+def test_hex8_render():
+    for field in hex8_fields:
+        ds = data_dir_load(hex8, kwargs={'step':-1})
+        sc = create_scene(ds, field)
+        im = sc.render()
+        yield compare(ds, im, "render_answers_hex8_%s_%s" % field)
+
+
+tet4 = "MOOSE_sample_data/high_order_elems_tet4_refine_out.e"
+tet4_fields = [("connect1", "u")]
+
+ at requires_ds(tet4)
+ at requires_module("pyembree")
+def test_tet4_render():
+    for field in tet4_fields:
+        ds = data_dir_load(tet4, kwargs={'step':-1})
+        sc = create_scene(ds, field)
+        im = sc.render()
+        yield compare(ds, im, "render_answers_tet4_%s_%s" % field)
+
+
+hex20 = "MOOSE_sample_data/mps_out.e"
+hex20_fields = [('connect2', 'temp')]
+
+ at requires_ds(hex20)
+ at requires_module("pyembree")
+def test_hex20_render():
+    for field in hex20_fields:
+        ds = data_dir_load(hex20, kwargs={'step':-1})
+        sc = create_scene(ds, field)
+        im = sc.render()
+        yield compare(ds, im, "render_answers_hex20_%s_%s" % field)
+
+
+wedge6 = "MOOSE_sample_data/wedge_out.e"
+wedge6_fields = [('connect1', 'diffused')]
+
+ at requires_ds(wedge6)
+ at requires_module("pyembree")
+def test_wedge6_render():
+    for field in wedge6_fields:
+        ds = data_dir_load(wedge6, kwargs={'step':-1})
+        sc = create_scene(ds, field)
+        im = sc.render()
+        yield compare(ds, im, "render_answers_wedge6_%s_%s" % field)
+
+
+ at requires_ds(hex8)
+ at requires_module("pyembree")
+def test_perspective_mesh_render():
+    ds = data_dir_load(hex8)
+    sc = create_scene(ds, ("connect2", "diffused"))
+
+    cam = sc.add_camera(ds, lens_type='perspective')
+    cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
+    cam_pos = ds.arr([-4.5, 4.5, -4.5], 'code_length')
+    north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
+    cam.set_position(cam_pos, north_vector)
+    cam.resolution = (800, 800)
+    im = sc.render()
+    yield compare(ds, im, "perspective_mesh_render")
+
+
+ at requires_ds(hex8)
+ at requires_module("pyembree")
+def test_composite_mesh_render():
+    ds = data_dir_load(hex8)
+    sc = Scene()
+    cam = sc.add_camera(ds)
+    cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
+    cam.set_position(ds.arr([-3.0, 3.0, -3.0], 'code_length'),
+                     ds.arr([0.0, -1.0, 0.0], 'dimensionless'))
+    cam.set_width = ds.arr([8.0, 8.0, 8.0], 'code_length')
+    cam.resolution = (800, 800)
+
+    ms1 = MeshSource(ds, ('connect1', 'diffused'))
+    ms2 = MeshSource(ds, ('connect2', 'diffused'))
+
+    sc.add_source(ms1)
+    sc.add_source(ms2)
+
+    im = sc.render()
+    yield compare(ds, im, "composite_mesh_render")

diff -r 8274418a9da017df6f69946c00f1fe04664b099d -r ad805640beab44a19be7d3e56ac39532d9dfb604 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -29,7 +29,7 @@
 
     Transfer functions are defined by boundaries, bins, and the value that
     governs transmission through that bin.  This is scaled between 0 and 1.
-    When integrating through a volume. the value through a given cell is
+    When integrating through a volume the value through a given cell is
     defined by the value calculated in the transfer function.
 
     Parameters
@@ -38,7 +38,7 @@
         The min and max for the transfer function.  Values below or above
         these values are discarded.
     nbins : int
-        How many bins to calculate; in betwee, linear interpolation is
+        How many bins to calculate; in between, linear interpolation is
         used, so low values are typically fine.
 
     Notes
@@ -63,7 +63,7 @@
     def add_gaussian(self, location, width, height):
         r"""Add a Gaussian distribution to the transfer function.
 
-        Typically, when rendering isocontours, a Guassian distribution is the
+        Typically, when rendering isocontours, a Gaussian distribution is the
         easiest way to draw out features.  The spread provides a softness.
         The values are calculated as :math:`f(x) = h \exp{-(x-x_0)^2 / w}`.
 
@@ -239,7 +239,7 @@
 
 class MultiVariateTransferFunction(object):
     r"""This object constructs a set of field tables that allow for
-    multiple field variables to control the integration through a volme.
+    multiple field variables to control the integration through a volume.
 
     The integration through a volume typically only utilizes a single field
     variable (for instance, Density) to set up and control the values
@@ -825,7 +825,7 @@
     this transfer function should be used.  It will create a very simple
     table that merely sums along each ray.  Note that the end product will
     need to be scaled by the total width through which the rays were cast,
-    a piece of information inacessible to the transfer function.
+    a piece of information inaccessible to the transfer function.
 
     Parameters
     ----------


http://link.bitbucket.org/wf/click?upn=8USRlNyft-2BCzk2l4Ywl6gDx2lD2xxoS9E7MwXb2SMR-2BXcTD42YocdnOFkyGBVHOUu2SOyHSzpMOSQAZl9RumugpTkheyO-2Bk2r742rJYXSpc-3D_ll4ctv0L-2ByeRZFC1LslHcg6aJmnQ70VruLbmeLQr27Chp4tbBaYQJh-2BYFo0ahY91tgRPGfZuSCITApOp1Z-2FKyD73CrRgde-2B2gGYjMsCUxJPfzaui4EM2YR8poUzj0v1ZvpM79rkCVXFLFE8CY4LSILBnuPXVpLh-2BT-2BuyovaGnc-2FLb3aBB7nsrGr4ndfdlAAavCQlW1NbGc9y-2FZliszYXLbJzjfcwMC7YWxLLYJ2UDDc-3D
Changeset:   0c002ab73f18
Branch:      yt
User:        chummels
Date:        2016-03-16 18:49:03+00:00
Summary:     Shifting absorption spectrum method order to retain logical grouping.
Affected #:  1 file

diff -r ad805640beab44a19be7d3e56ac39532d9dfb604 -r 0c002ab73f180837a684aa1f86e18026b5bfa02f yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -234,6 +234,42 @@
         del field_data
         return (self.lambda_field, self.flux_field)
 
+    def _apply_observing_redshift(self, field_data, use_peculiar_velocity,
+                                 observing_redshift):
+        """
+        Change the redshifts of individual absorbers to account for the 
+        redshift at which the observer sits.
+
+        The intermediate redshift that is seen by an observer
+        at a redshift other than z=0 is z12, where z1 is the
+        observing redshift and z2 is the emitted photon's redshift
+        Hogg (2000) eq. 13:
+
+        1 + z12 = (1 + z2) / (1 + z1)
+        """
+        if observing_redshift == 0.:
+            # This is already assumed in the generation of the LightRay
+            redshift = field_data['redshift']
+            if use_peculiar_velocity:
+                redshift_eff = field_data['redshift_eff']
+        else:
+            # The intermediate redshift that is seen by an observer
+            # at a redshift other than z=0 is z12, where z1 is the
+            # observing redshift and z2 is the emitted photon's redshift
+            # Hogg (2000) eq. 13:
+            # 1 + z12 = (1 + z2) / (1 + z1)
+            redshift = ((1 + field_data['redshift']) / \
+                        (1 + observing_redshift)) - 1.
+            # Combining cosmological redshift and doppler redshift
+            # into an effective redshift is found in Peacock's
+            # Cosmological Physics eqn 3.75:
+            # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler)
+            if use_peculiar_velocity:
+                redshift_eff = ((1 + redshift) * \
+                                (1 + field_data['redshift_dopp'])) - 1.
+
+        return redshift, redshift_eff
+
     def _add_continua_to_spectrum(self, field_data, use_peculiar_velocity,
                                   observing_redshift=0.):
         """
@@ -276,42 +312,6 @@
                 pbar.update(i)
             pbar.finish()
 
-    def _apply_observing_redshift(self, field_data, use_peculiar_velocity,
-                                 observing_redshift):
-        """
-        Change the redshifts of individual absorbers to account for the 
-        redshift at which the observer sits.
-
-        The intermediate redshift that is seen by an observer
-        at a redshift other than z=0 is z12, where z1 is the
-        observing redshift and z2 is the emitted photon's redshift
-        Hogg (2000) eq. 13:
-
-        1 + z12 = (1 + z2) / (1 + z1)
-        """
-        if observing_redshift == 0.:
-            # This is already assumed in the generation of the LightRay
-            redshift = field_data['redshift']
-            if use_peculiar_velocity:
-                redshift_eff = field_data['redshift_eff']
-        else:
-            # The intermediate redshift that is seen by an observer
-            # at a redshift other than z=0 is z12, where z1 is the
-            # observing redshift and z2 is the emitted photon's redshift
-            # Hogg (2000) eq. 13:
-            # 1 + z12 = (1 + z2) / (1 + z1)
-            redshift = ((1 + field_data['redshift']) / \
-                        (1 + observing_redshift)) - 1.
-            # Combining cosmological redshift and doppler redshift
-            # into an effective redshift is found in Peacock's
-            # Cosmological Physics eqn 3.75:
-            # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler)
-            if use_peculiar_velocity:
-                redshift_eff = ((1 + redshift) * \
-                                (1 + field_data['redshift_dopp'])) - 1.
-
-        return redshift, redshift_eff
-
     def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity,
                                output_absorbers_file, subgrid_resolution=10,
                                observing_redshift=0., njobs=-1):


http://link.bitbucket.org/wf/click?upn=8USRlNyft-2BCzk2l4Ywl6gDx2lD2xxoS9E7MwXb2SMR-2BXcTD42YocdnOFkyGBVHOUGKYWZmy7muRqC0LhKWfJ-2BH60MvWOatmJaM7S4EW-2BFC0-3D_ll4ctv0L-2ByeRZFC1LslHcg6aJmnQ70VruLbmeLQr27Chp4tbBaYQJh-2BYFo0ahY91tgRPGfZuSCITApOp1Z-2FKyP421kYJi8ifHvt02cPzk4vVExtfU5ZU-2FzLw5TIUl-2FjOIypufF5fZrlORdSAD14s5Zb-2FrO3mi2GDKms8kndZUixbPC4b0KquCzGbcqOv3dJbuT1mEHVfAp-2BjCVbt2jw-2F8SLSh6zdvpCa9RF1FOyxoxc-3D
Changeset:   44cd6dd59d4f
Branch:      yt
User:        chummels
Date:        2016-03-16 20:56:32+00:00
Summary:     Correcting pyflake issue with bad indentation.
Affected #:  1 file

diff -r 0c002ab73f180837a684aa1f86e18026b5bfa02f -r 44cd6dd59d4f0b38313af14565aba9f4723a3b54 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -426,7 +426,7 @@
                     right_index = (center_index[i] + window_width_in_bins/2)
                     n_vbins = (right_index - left_index) * n_vbins_per_bin[i]
                     
-                   # the array of virtual bins in lambda space
+                    # the array of virtual bins in lambda space
                     vbins = \
                         np.linspace(self.lambda_min + self.bin_width.d * left_index, 
                                     self.lambda_min + self.bin_width.d * right_index, 


http://link.bitbucket.org/wf/click?upn=8USRlNyft-2BCzk2l4Ywl6gDx2lD2xxoS9E7MwXb2SMR-2BXcTD42YocdnOFkyGBVHOURVLnwI9JFXOUwxpRdpSKLAcIhiHmOPDaWGUUyUZVytk-3D_ll4ctv0L-2ByeRZFC1LslHcg6aJmnQ70VruLbmeLQr27Chp4tbBaYQJh-2BYFo0ahY91tgRPGfZuSCITApOp1Z-2FKyCi39SL9BgW18LMRZODMeQdnw-2FVx71hyQa-2FMSk9jvpFywnIMCpbPw4VQi4g5Yl1RuKV-2Fo8LefOrpLGM6upmyk3x28K5iCj-2FcIgCTFWgbOOFbGtlWoGUjB6j-2FImizMrshxpKIsTWTjgQ-2B05hh3uWk3pE-3D
Changeset:   b00711a542af
Branch:      yt
User:        ngoldbaum
Date:        2016-03-23 20:50:32+00:00
Summary:     Merged in chummels/yt (pull request #2044)

Adding observing_redshift kwarg to AbsorptionSpectrum
Affected #:  1 file

diff -r a58aff2c0219d6abd66ffc128fd82b3c6fbde296 -r b00711a542af2b45a03e6aec8ee35ee83b0d3ccc yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -119,8 +119,9 @@
 
     def make_spectrum(self, input_file, output_file=None,
                       line_list_file=None, output_absorbers_file=None,
-                      use_peculiar_velocity=True, 
-                      subgrid_resolution=10, njobs="auto"):
+                      use_peculiar_velocity=True,
+                      subgrid_resolution=10, observing_redshift=0.,
+                      njobs="auto"):
         """
         Make spectrum from ray data using the line list.
 
@@ -130,33 +131,38 @@
         input_file : string or dataset
            path to input ray data or a loaded ray dataset
         output_file : optional, string
-           Option to save a file containing the wavelength, flux, and optical 
-           depth fields.  File formats are chosen based on the filename extension.  
-           ``.h5`` for hdf5, ``.fits`` for fits, and everything else is ASCII.
+           Option to save a file containing the wavelength, flux, and optical
+           depth fields.  File formats are chosen based on the filename
+           extension. ``.h5`` for hdf5, ``.fits`` for fits, and everything
+           else is ASCII.
            Default: None
         output_absorbers_file : optional, string
-           Option to save a text file containing all of the absorbers and 
+           Option to save a text file containing all of the absorbers and
            corresponding wavelength and redshift information.
            For parallel jobs, combining the lines lists can be slow so it
            is recommended to set to None in such circumstances.
            Default: None
         use_peculiar_velocity : optional, bool
            if True, include peculiar velocity for calculating doppler redshift
-           to shift lines.  Requires similar flag to be set in LightRay 
+           to shift lines.  Requires similar flag to be set in LightRay
            generation.
            Default: True
         subgrid_resolution : optional, int
            When a line is being added that is unresolved (ie its thermal
            width is less than the spectral bin width), the voigt profile of
-           the line is deposited into an array of virtual wavelength bins at 
-           higher resolution.  The optical depth from these virtual bins is 
-           integrated and then added to the coarser spectral wavelength bin.  
-           The subgrid_resolution value determines the ratio between the 
-           thermal width and the bin width of the virtual bins.  Increasing 
-           this value yields smaller virtual bins, which increases accuracy, 
-           but is more expensive.  A value of 10 yields accuracy to the 4th 
+           the line is deposited into an array of virtual wavelength bins at
+           higher resolution.  The optical depth from these virtual bins is
+           integrated and then added to the coarser spectral wavelength bin.
+           The subgrid_resolution value determines the ratio between the
+           thermal width and the bin width of the virtual bins.  Increasing
+           this value yields smaller virtual bins, which increases accuracy,
+           but is more expensive.  A value of 10 yields accuracy to the 4th
            significant digit in tau.
            Default: 10
+        observing_redshift : optional, float
+           This is the redshift at which the observer is observing
+           the absorption spectrum.
+           Default: 0
         njobs : optional, int or "auto"
            the number of process groups into which the loop over
            absorption lines will be divided.  If set to -1, each
@@ -183,6 +189,9 @@
             input_fields.append('redshift_eff')
             field_units["velocity_los"] = "cm/s"
             field_units["redshift_eff"] = ""
+        if observing_redshift != 0.:
+            input_fields.append('redshift_dopp')
+            field_units["redshift_dopp"] = ""
         for feature in self.line_list + self.continuum_list:
             if not feature['field_name'] in input_fields:
                 input_fields.append(feature['field_name'])
@@ -204,8 +213,10 @@
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity,
                                     output_absorbers_file,
                                     subgrid_resolution=subgrid_resolution,
+                                    observing_redshift=observing_redshift,
                                     njobs=njobs)
-        self._add_continua_to_spectrum(field_data, use_peculiar_velocity)
+        self._add_continua_to_spectrum(field_data, use_peculiar_velocity,
+                                       observing_redshift=observing_redshift)
 
         self.flux_field = np.exp(-self.tau_field)
 
@@ -223,20 +234,63 @@
         del field_data
         return (self.lambda_field, self.flux_field)
 
-    def _add_continua_to_spectrum(self, field_data, use_peculiar_velocity):
+    def _apply_observing_redshift(self, field_data, use_peculiar_velocity,
+                                 observing_redshift):
+        """
+        Change the redshifts of individual absorbers to account for the 
+        redshift at which the observer sits.
+
+        The intermediate redshift that is seen by an observer
+        at a redshift other than z=0 is z12, where z1 is the
+        observing redshift and z2 is the emitted photon's redshift
+        Hogg (2000) eq. 13:
+
+        1 + z12 = (1 + z2) / (1 + z1)
+        """
+        if observing_redshift == 0.:
+            # This is already assumed in the generation of the LightRay
+            redshift = field_data['redshift']
+            if use_peculiar_velocity:
+                redshift_eff = field_data['redshift_eff']
+        else:
+            # The intermediate redshift that is seen by an observer
+            # at a redshift other than z=0 is z12, where z1 is the
+            # observing redshift and z2 is the emitted photon's redshift
+            # Hogg (2000) eq. 13:
+            # 1 + z12 = (1 + z2) / (1 + z1)
+            redshift = ((1 + field_data['redshift']) / \
+                        (1 + observing_redshift)) - 1.
+            # Combining cosmological redshift and doppler redshift
+            # into an effective redshift is found in Peacock's
+            # Cosmological Physics eqn 3.75:
+            # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler)
+            if use_peculiar_velocity:
+                redshift_eff = ((1 + redshift) * \
+                                (1 + field_data['redshift_dopp'])) - 1.
+
+        return redshift, redshift_eff
+
+    def _add_continua_to_spectrum(self, field_data, use_peculiar_velocity,
+                                  observing_redshift=0.):
         """
         Add continuum features to the spectrum.
         """
+        # Change the redshifts of continuum sources to account for the 
+        # redshift at which the observer sits
+        redshift, redshift_eff = self._apply_observing_redshift(field_data, 
+                                 use_peculiar_velocity, observing_redshift)
+
         # Only add continuum features down to tau of 1.e-4.
         min_tau = 1.e-3
 
         for continuum in self.continuum_list:
             column_density = field_data[continuum['field_name']] * field_data['dl']
+
             # redshift_eff field combines cosmological and velocity redshifts
             if use_peculiar_velocity:
-                delta_lambda = continuum['wavelength'] * field_data['redshift_eff']
+                delta_lambda = continuum['wavelength'] * redshift_eff
             else:
-                delta_lambda = continuum['wavelength'] * field_data['redshift']
+                delta_lambda = continuum['wavelength'] * redshift
             this_wavelength = delta_lambda + continuum['wavelength']
             right_index = np.digitize(this_wavelength, self.lambda_field).clip(0, self.n_lambda)
             left_index = np.digitize((this_wavelength *
@@ -259,13 +313,19 @@
             pbar.finish()
 
     def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity,
-                               output_absorbers_file, subgrid_resolution=10, 
-                               njobs=-1):
+                               output_absorbers_file, subgrid_resolution=10,
+                               observing_redshift=0., njobs=-1):
         """
         Add the absorption lines to the spectrum.
         """
-        # Widen wavelength window until optical depth falls below this tau 
-        # value at the ends to assure that the wings of a line have been 
+
+        # Change the redshifts of individual absorbers to account for the 
+        # redshift at which the observer sits
+        redshift, redshift_eff = self._apply_observing_redshift(field_data, 
+                                 use_peculiar_velocity, observing_redshift)
+
+        # Widen wavelength window until optical depth falls below this tau
+        # value at the ends to assure that the wings of a line have been
         # fully resolved.
         min_tau = 1e-3
 
@@ -276,11 +336,11 @@
 
             # redshift_eff field combines cosmological and velocity redshifts
             # so delta_lambda gives the offset in angstroms from the rest frame
-            # wavelength to the observed wavelength of the transition 
+            # wavelength to the observed wavelength of the transition
             if use_peculiar_velocity:
-                delta_lambda = line['wavelength'] * field_data['redshift_eff']
+                delta_lambda = line['wavelength'] * redshift_eff
             else:
-                delta_lambda = line['wavelength'] * field_data['redshift']
+                delta_lambda = line['wavelength'] * redshift
             # lambda_obs is central wavelength of line after redshift
             lambda_obs = line['wavelength'] + delta_lambda
             # the total number of absorbers per transition
@@ -308,7 +368,7 @@
                                   line['atomic_mass'])
 
             # the actual thermal width of the lines
-            thermal_width = (lambda_obs * thermal_b / 
+            thermal_width = (lambda_obs * thermal_b /
                              speed_of_light_cgs).convert_to_units("angstrom")
 
             # Sanitize units for faster runtime of the tau_profile machinery.
@@ -320,20 +380,20 @@
 
             # When we actually deposit the voigt profile, sometimes we will
             # have underresolved lines (ie lines with smaller widths than
-            # the spectral bin size).  Here, we create virtual wavelength bins 
-            # small enough in width to well resolve each line, deposit the 
-            # voigt profile into them, then numerically integrate their tau 
-            # values and sum them to redeposit them into the actual spectral 
+            # the spectral bin size).  Here, we create virtual wavelength bins
+            # small enough in width to well resolve each line, deposit the
+            # voigt profile into them, then numerically integrate their tau
+            # values and sum them to redeposit them into the actual spectral
             # bins.
 
             # virtual bins (vbins) will be:
             # 1) <= the bin_width; assures at least as good as spectral bins
             # 2) <= 1/10th the thermal width; assures resolving voigt profiles
             #   (actually 1/subgrid_resolution value, default is 1/10)
-            # 3) a bin width will be divisible by vbin_width times a power of 
+            # 3) a bin width will be divisible by vbin_width times a power of
             #    10; this will assure we don't get spikes in the deposited
             #    spectra from uneven numbers of vbins per bin
-            resolution = thermal_width / self.bin_width 
+            resolution = thermal_width / self.bin_width
             n_vbins_per_bin = 10**(np.ceil(np.log10(subgrid_resolution/resolution)).clip(0, np.inf))
             vbin_width = self.bin_width.d / n_vbins_per_bin
 
@@ -341,17 +401,17 @@
             if (thermal_width < self.bin_width).any():
                 mylog.info(("%d out of %d line components will be " + \
                             "deposited as unresolved lines.") %
-                           ((thermal_width < self.bin_width).sum(), 
+                           ((thermal_width < self.bin_width).sum(),
                             n_absorbers))
 
             # provide a progress bar with information about lines processsed
             pbar = get_pbar("Adding line - %s [%f A]: " % \
                             (line['label'], line['wavelength']), n_absorbers)
 
-            # for a given transition, step through each location in the 
+            # for a given transition, step through each location in the
             # observed spectrum where it occurs and deposit a voigt profile
             for i in parallel_objects(np.arange(n_absorbers), njobs=-1):
-
+ 
                 # the virtual window into which the line is deposited initially 
                 # spans a region of 2 coarse spectral bins 
                 # (one on each side of the center_index) but the window
@@ -362,12 +422,9 @@
                 window_width_in_bins = 2
 
                 while True:
-                    left_index = (center_index[i] - \
-                            window_width_in_bins/2)
-                    right_index = (center_index[i] + \
-                            window_width_in_bins/2)
-                    n_vbins = (right_index - left_index) * \
-                              n_vbins_per_bin[i]
+                    left_index = (center_index[i] - window_width_in_bins/2)
+                    right_index = (center_index[i] + window_width_in_bins/2)
+                    n_vbins = (right_index - left_index) * n_vbins_per_bin[i]
                     
                     # the array of virtual bins in lambda space
                     vbins = \
@@ -384,8 +441,8 @@
 
                     # If tau has not dropped below min tau threshold by the
                     # edges (ie the wings), then widen the wavelength
-                    # window and repeat process. 
-                    if ((vtau[0] < min_tau) and (vtau[-1] < min_tau)):
+                    # window and repeat process.
+                    if (vtau[0] < min_tau and vtau[-1] < min_tau):
                         break
                     window_width_in_bins *= 2
 
@@ -421,8 +478,9 @@
                         += EW[(intersect_left_index - left_index): \
                               (intersect_right_index - left_index)]
 
+
                 # write out absorbers to file if the column density of
-                # an absorber is greater than the specified "label_threshold" 
+                # an absorber is greater than the specified "label_threshold"
                 # of that absorption line
                 if output_absorbers_file and \
                    line['label_threshold'] is not None and \
@@ -436,15 +494,15 @@
                                                 'wavelength': (lambda_0 + dlambda[i]),
                                                 'column_density': column_density[i],
                                                 'b_thermal': thermal_b[i],
-                                                'redshift': field_data['redshift'][i],
-                                                'redshift_eff': field_data['redshift_eff'][i],
+                                                'redshift': redshift[i],
+                                                'redshift_eff': redshift_eff[i],
                                                 'v_pec': peculiar_velocity})
                 pbar.update(i)
             pbar.finish()
 
             del column_density, delta_lambda, lambda_obs, center_index, \
                 thermal_b, thermal_width, cdens, thermb, dlambda, \
-                vlos, resolution, vbin_width, n_vbins_per_bin
+                vlos, resolution, vbin_width, n_vbins, n_vbins_per_bin
 
         comm = _get_comm(())
         self.tau_field = comm.mpi_allreduce(self.tau_field, op="sum")

Repository URL: http://link.bitbucket.org/wf/click?upn=8USRlNyft-2BCzk2l4Ywl6gDx2lD2xxoS9E7MwXb2SMR-2BI0v8SbQq-2B8-2FZaaHaJT85r_ll4ctv0L-2ByeRZFC1LslHcg6aJmnQ70VruLbmeLQr27Chp4tbBaYQJh-2BYFo0ahY91tgRPGfZuSCITApOp1Z-2FKyB8RNDu3cmYW-2BcZ8LSR-2FFT1-2FVhHCQ1OpJFoJl2v7XMrYBq5xffJgm2h6kp1nMdjDjW7eXNJ7rmMC9rggYWHqRrpq0rwhoacd-2BylUYZKEp6eY1rQjRBF9gv2wjT5nQVRuzLcb-2BErfDrMIegbHGjlm3M8-3D

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.spacepope.org/pipermail/yt-svn-spacepope.org/attachments/20160323/59bdb47f/attachment-0002.htm>


More information about the yt-svn mailing list