[yt-svn] commit/yt: 65 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Oct 26 10:35:45 PDT 2016


65 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/143d0006df7d/
Changeset:   143d0006df7d
Branch:      stable
User:        ngoldbaum
Date:        2016-07-25 01:01:50+00:00
Summary:     Make the show() method display the scene instead of relying on return semantics of notebook cells
Affected #:  1 file

diff -r 8752de74f4e32a222e8af3bf9c2edd9c585479ee -r 143d0006df7d71a4f80186d0f4680c3e653c38cc yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -778,8 +778,9 @@
 
         """
         if "__IPYTHON__" in dir(builtins):
+            from IPython.display import display
             self._sigma_clip = sigma_clip
-            return self
+            display(self)
         else:
             raise YTNotInsideNotebook
 


https://bitbucket.org/yt_analysis/yt/commits/fff04e67204f/
Changeset:   fff04e67204f
Branch:      stable
User:        mzingale
Date:        2016-07-25 17:56:45+00:00
Summary:     store git hashes from Castro output
Affected #:  1 file

diff -r 143d0006df7d71a4f80186d0f4680c3e653c38cc -r fff04e67204f86cfc3973dc8f191ee4050b87e58 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -818,6 +818,10 @@
                 if any(b in line for b in bcs):
                     p, v = line.strip().split(":")
                     self.parameters[p] = v.strip()
+                if "git hash" in line:
+                    # line format: codename git hash:  the-hash
+                    fields = line.split(":")
+                    self.parameters[fields[0]] = fields[1].strip()
                 line = next(f)
             
             # runtime parameters that we overrode follow "Inputs File


https://bitbucket.org/yt_analysis/yt/commits/9e148f24cae9/
Changeset:   9e148f24cae9
Branch:      stable
User:        jwise77
Date:        2014-12-27 20:56:15+00:00
Summary:     Adding rockstar halo properties from rockstar-galaxies fork.
Affected #:  1 file

diff -r fff04e67204f86cfc3973dc8f191ee4050b87e58 -r 9e148f24cae9e04b618bfd260696fe30bdca2392 yt/frontends/rockstar/definitions.py
--- a/yt/frontends/rockstar/definitions.py
+++ b/yt/frontends/rockstar/definitions.py
@@ -38,7 +38,7 @@
 # Note the final field here, which is a field for min/max format revision in
 # which the field appears.
 
-KNOWN_REVISIONS=[0, 1]
+KNOWN_REVISIONS=[0, 1, 2]
 
 halo_dt = [
     ('particle_identifier', np.int64),
@@ -101,6 +101,12 @@
     ('min_pos_err', np.float32),
     ('min_vel_err', np.float32),
     ('min_bulkvel_err', np.float32),
+    ('type', np.int32, (2, 100)),
+    ('sm', np.float32, (2, 100)),
+    ('gas', np.float32, (2, 100)),
+    ('bh', np.float32, (2, 100)),
+    ('peak_density', np.float32, (2, 100)),
+    ('av_density', np.float32, (2, 100)),
 ]
 
 halo_dts = {}


https://bitbucket.org/yt_analysis/yt/commits/2d280ba11c76/
Changeset:   2d280ba11c76
Branch:      stable
User:        ngoldbaum
Date:        2016-07-26 22:07:57+00:00
Summary:     Fix compatibility with latest version of Pint
Affected #:  2 files

diff -r 9e148f24cae9e04b618bfd260696fe30bdca2392 -r 2d280ba11c76aeb2723eec991a862566d9a832bc yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -926,12 +926,12 @@
     yt_quan2 = YTQuantity.from_pint(p_quan)
 
     yield assert_array_equal, p_arr, yt_arr.to_pint()
-    assert p_quan.units == yt_quan.to_pint().units
+    assert_equal(p_quan, yt_quan.to_pint())
     yield assert_array_equal, yt_arr, YTArray.from_pint(p_arr)
     yield assert_array_equal, yt_arr, yt_arr2
 
     yield assert_equal, p_quan.magnitude, yt_quan.to_pint().magnitude
-    assert p_quan.units == yt_quan.to_pint().units
+    assert_equal(p_quan, yt_quan.to_pint())
     yield assert_equal, yt_quan, YTQuantity.from_pint(p_quan)
     yield assert_equal, yt_quan, yt_quan2
 

diff -r 9e148f24cae9e04b618bfd260696fe30bdca2392 -r 2d280ba11c76aeb2723eec991a862566d9a832bc yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -713,7 +713,7 @@
         >>> c = yt.YTArray.from_pint(b)
         """
         p_units = []
-        for base, exponent in arr.units.items():
+        for base, exponent in arr._units.items():
             bs = convert_pint_units(base)
             p_units.append("%s**(%s)" % (bs, Rational(exponent)))
         p_units = "*".join(p_units)


https://bitbucket.org/yt_analysis/yt/commits/04c21ef0aa53/
Changeset:   04c21ef0aa53
Branch:      stable
User:        mzingale
Date:        2016-07-27 22:38:20+00:00
Summary:     Backporting PR #2312 https://bitbucket.org/yt_analysis/yt/pull-requests/2312
Affected #:  1 file

diff -r 2d280ba11c76aeb2723eec991a862566d9a832bc -r 04c21ef0aa53b56e690a879b30c236b8270578f9 yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -181,6 +181,15 @@
         ("magmom", ("g*cm/s", ["momentum_magnitude"], r"\rho |\mathbf{U}|")),
         ("maggrav", ("cm/s**2", [], r"|\mathbf{g}|")),
         ("phiGrav", ("erg/g", [], r"\Phi")),
+        ("enuc", ("erg/(g*s)", [], r"\dot{e}_{\rm{nuc}}")),
+        ("rho_enuc", ("erg/(cm**3*s)", [], r"\rho \dot{e}_{\rm{nuc}}")),
+        ("angular_momentum_x", ("g/(cm*s)", [], r"\ell_x")),
+        ("angular_momentum_y", ("g/(cm*s)", [], r"\ell_y")),
+        ("angular_momentum_z", ("g/(cm*s)", [], r"\ell_z")),
+        ("phiRot", ("erg/g", [], r"\Phi_{\rm{rot}}")),
+        ("rot_x", ("cm/s**2", [], r"\mathbf{f}_{\rm{rot}} \cdot \mathbf{e}_x")),
+        ("rot_y", ("cm/s**2", [], r"\mathbf{f}_{\rm{rot}} \cdot \mathbf{e}_y")),
+        ("rot_z", ("cm/s**2", [], r"\mathbf{f}_{\rm{rot}} \cdot \mathbf{e}_z")),
     )
 
     def setup_fluid_fields(self):


https://bitbucket.org/yt_analysis/yt/commits/cd67a9891ce4/
Changeset:   cd67a9891ce4
Branch:      stable
User:        Rafael Ruggiero
Date:        2016-07-29 20:07:49+00:00
Summary:     Backporting PR #2313 https://bitbucket.org/yt_analysis/yt/pull-requests/2313
Affected #:  2 files

diff -r 04c21ef0aa53b56e690a879b30c236b8270578f9 -r cd67a9891ce4bf1212f4eb4d0e38e79290099fe5 CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -301,7 +301,15 @@
 This downloads that new forked repository to your local machine, so that you
 can access it, read it, make modifications, etc.  It will put the repository in
 a local directory of the same name as the repository in the current working
-directory.  You can see any past state of the code by using the hg log command.
+directory. You should also run the following command, to make sure you are at
+the "yt" branch, and not other ones like "stable" (this will be important
+later when you want to submit your pull requests):
+
+.. code-block:: bash
+
+   $ hg update yt
+
+You can see any past state of the code by using the hg log command.
 For example, the following command would show you the last 5 changesets
 (modifications to the code) that were submitted to that repository.
 

diff -r 04c21ef0aa53b56e690a879b30c236b8270578f9 -r cd67a9891ce4bf1212f4eb4d0e38e79290099fe5 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1492,7 +1492,7 @@
          Defaults to None, which automatically picks an appropriate unit.
          If axes_unit is '1', 'u', or 'unitary', it will not display the
          units, and only show the axes name.
-    north-vector : a sequence of floats
+    north_vector : a sequence of floats
          A vector defining the 'up' direction in the plot.  This
          option sets the orientation of the slicing plane.  If not
          set, an arbitrary grid-aligned north-vector is chosen.
@@ -1628,7 +1628,7 @@
          Defaults to None, which automatically picks an appropriate unit.
          If axes_unit is '1', 'u', or 'unitary', it will not display the
          units, and only show the axes name.
-    north-vector : a sequence of floats
+    north_vector : a sequence of floats
          A vector defining the 'up' direction in the plot.  This
          option sets the orientation of the slicing plane.  If not
          set, an arbitrary grid-aligned north-vector is chosen.
@@ -1824,7 +1824,7 @@
          ('{yloc}', '{space}')                  ('lower', 'window')
          ('{yloc}', '{xloc}', '{space}')        ('lower', 'right', 'window')
          ==================================     ============================
-    north-vector : a sequence of floats
+    north_vector : a sequence of floats
         A vector defining the 'up' direction in the `OffAxisSlicePlot`; not
         used in `AxisAlignedSlicePlot`.  This option sets the orientation of the
         slicing plane.  If not set, an arbitrary grid-aligned north-vector is


https://bitbucket.org/yt_analysis/yt/commits/5cfa8c2b5b59/
Changeset:   5cfa8c2b5b59
Branch:      stable
User:        chummels
Date:        2016-07-30 17:57:07+00:00
Summary:     Adding tau field to fits output file for absorption spectrum
Affected #:  1 file

diff -r cd67a9891ce4bf1212f4eb4d0e38e79290099fe5 -r 5cfa8c2b5b5954d61e8007a75db20397b3583f0d yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -550,8 +550,9 @@
         """
         mylog.info("Writing spectrum to fits file: %s.", filename)
         col1 = pyfits.Column(name='wavelength', format='E', array=self.lambda_field)
-        col2 = pyfits.Column(name='flux', format='E', array=self.flux_field)
-        cols = pyfits.ColDefs([col1, col2])
+        col2 = pyfits.Column(name='tau', format='E', array=self.tau_field)
+        col3 = pyfits.Column(name='flux', format='E', array=self.flux_field)
+        cols = pyfits.ColDefs([col1, col2, col3])
         tbhdu = pyfits.BinTableHDU.from_columns(cols)
         tbhdu.writeto(filename, clobber=True)
 


https://bitbucket.org/yt_analysis/yt/commits/4a367b206486/
Changeset:   4a367b206486
Branch:      stable
User:        ngoldbaum
Date:        2016-08-01 22:24:06+00:00
Summary:     Adding an admonition to install using conda-forge if the install script detects an activated conda environment
Affected #:  1 file

diff -r 5cfa8c2b5b5954d61e8007a75db20397b3583f0d -r 4a367b2064860406e89cb616b4664e7f936ac0b5 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -115,7 +115,10 @@
         echo
         echo "    $ source deactivate"
         echo
-        echo "or install yt into your current environment"
+        echo "or install yt into your current environment with:"
+        echo
+        echo "    $ conda install -c conda-forge yt"
+        echo
         exit 1
     fi
     DEST_SUFFIX="yt-conda"


https://bitbucket.org/yt_analysis/yt/commits/12a8747aa232/
Changeset:   12a8747aa232
Branch:      stable
User:        MatthewTurk
Date:        2016-08-02 18:52:15+00:00
Summary:     Quick units fix
Affected #:  1 file

diff -r 4a367b2064860406e89cb616b4664e7f936ac0b5 -r 12a8747aa2324ca96276e2d747e63274abec7d05 doc/source/visualizing/streamlines.rst
--- a/doc/source/visualizing/streamlines.rst
+++ b/doc/source/visualizing/streamlines.rst
@@ -118,7 +118,7 @@
     from yt.visualization.api import Streamlines
 
     ds = yt.load('DD1701') # Load ds
-    streamlines = Streamlines(ds, [0.5]*3)
+    streamlines = Streamlines(ds, ds.domain_center)
     streamlines.integrate_through_volume()
     stream = streamlines.path(0)
     matplotlib.pylab.semilogy(stream['t'], stream['density'], '-x')


https://bitbucket.org/yt_analysis/yt/commits/29c92b47af78/
Changeset:   29c92b47af78
Branch:      stable
User:        chummels
Date:        2016-08-04 15:10:23+00:00
Summary:     Backporting PR #2321 https://bitbucket.org/yt_analysis/yt/pull-requests/2321
Affected #:  1 file

diff -r 12a8747aa2324ca96276e2d747e63274abec7d05 -r 29c92b47af784cde6814032d886362057273d1eb yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -45,12 +45,12 @@
        lower wavelength bound in angstroms.
     lambda_max : float
        upper wavelength bound in angstroms.
-    n_lambda : float
+    n_lambda : int
        number of wavelength bins.
     """
 
     def __init__(self, lambda_min, lambda_max, n_lambda):
-        self.n_lambda = n_lambda
+        self.n_lambda = int(n_lambda)
         # lambda, flux, and tau are wavelength, flux, and optical depth
         self.lambda_min = lambda_min
         self.lambda_max = lambda_max
@@ -301,7 +301,7 @@
             valid_continuua = np.where(((column_density /
                                          continuum['normalization']) > min_tau) &
                                        (right_index - left_index > 1))[0]
-            pbar = get_pbar("Adding continuum feature - %s [%f A]: " % \
+            pbar = get_pbar("Adding continuum - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
                             valid_continuua.size)
             for i, lixel in enumerate(valid_continuua):


https://bitbucket.org/yt_analysis/yt/commits/9b23afb00e6a/
Changeset:   9b23afb00e6a
Branch:      stable
User:        chummels
Date:        2016-08-17 18:45:52+00:00
Summary:     Backporting PR #2323 https://bitbucket.org/yt_analysis/yt/pull-requests/2323
Affected #:  5 files

diff -r 29c92b47af784cde6814032d886362057273d1eb -r 9b23afb00e6ace1cf2022867b2bac629da99b702 doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -116,7 +116,12 @@
 Continuum features with optical depths that follow a power law can also be
 added.  Like adding lines, you must specify details like the wavelength
 and the field in the dataset and LightRay that is tied to this feature.
-Below, we will add H Lyman continuum.
+The wavelength refers to the location at which the continuum begins to be 
+applied to the dataset, and as it moves to lower wavelength values, the 
+optical depth value decreases according to the defined power law.  The 
+normalization value is the column density of the linked field which results
+in an optical depth of 1 at the defined wavelength.  Below, we add the hydrogen 
+Lyman continuum.
 
 .. code-block:: python
 
@@ -131,7 +136,7 @@
 Making the Spectrum
 ^^^^^^^^^^^^^^^^^^^
 
-Once all the lines and continuum are added, it is time to make a spectrum out
+Once all the lines and continuua are added, it is time to make a spectrum out
 of some light ray data.
 
 .. code-block:: python

diff -r 29c92b47af784cde6814032d886362057273d1eb -r 9b23afb00e6ace1cf2022867b2bac629da99b702 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -67,11 +67,12 @@
   local_ytdata_000:
     - yt/frontends/ytdata
 
-  local_absorption_spectrum_001:
+  local_absorption_spectrum_003:
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_sph
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo_sph
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_with_continuum
 
 other_tests:
   unittests:

diff -r 29c92b47af784cde6814032d886362057273d1eb -r 9b23afb00e6ace1cf2022867b2bac629da99b702 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -273,42 +273,85 @@
     def _add_continua_to_spectrum(self, field_data, use_peculiar_velocity,
                                   observing_redshift=0.):
         """
-        Add continuum features to the spectrum.
+        Add continuum features to the spectrum.  Continuua are recorded as
+        a name, associated field, wavelength, normalization value, and index.
+        Continuua are applied at and below the denoted wavelength, where the
+        optical depth decreases as a power law of desired index.  For positive 
+        index values, this means optical depth is highest at the denoted 
+        wavelength, and it drops with shorter and shorter wavelengths.  
+        Consequently, transmitted flux undergoes a discontinuous cutoff at the 
+        denoted wavelength, and then slowly increases with decreasing wavelength 
+        according to the power law.
         """
         # Change the redshifts of continuum sources to account for the
         # redshift at which the observer sits
         redshift, redshift_eff = self._apply_observing_redshift(field_data,
                                  use_peculiar_velocity, observing_redshift)
 
-        # Only add continuum features down to tau of 1.e-4.
-        min_tau = 1.e-3
+        # min_tau is the minimum optical depth value that warrants 
+        # accounting for an absorber.  for a single absorber, noticeable 
+        # continuum effects begin for tau = 1e-3 (leading to transmitted 
+        # flux of e^-tau ~ 0.999).  but we apply a cutoff to remove
+        # absorbers with insufficient column_density to contribute 
+        # significantly to a continuum (see below).  because lots of 
+        # low column density absorbers can add up to a significant
+        # continuum effect, we normalize min_tau by the n_absorbers.
+        n_absorbers = field_data['dl'].size
+        min_tau = 1.e-3/n_absorbers
 
         for continuum in self.continuum_list:
-            column_density = field_data[continuum['field_name']] * field_data['dl']
+
+            # Normalization is in cm**-2, so column density must be as well
+            column_density = (field_data[continuum['field_name']] * 
+                              field_data['dl']).in_units('cm**-2')
 
             # redshift_eff field combines cosmological and velocity redshifts
             if use_peculiar_velocity:
                 delta_lambda = continuum['wavelength'] * redshift_eff
             else:
                 delta_lambda = continuum['wavelength'] * redshift
+
+            # right index of continuum affected area is wavelength itself
             this_wavelength = delta_lambda + continuum['wavelength']
-            right_index = np.digitize(this_wavelength, self.lambda_field).clip(0, self.n_lambda)
+            right_index = np.digitize(this_wavelength, 
+                                      self.lambda_field).clip(0, self.n_lambda)
+            # left index of continuum affected area wavelength at which 
+            # optical depth reaches tau_min
             left_index = np.digitize((this_wavelength *
-                                     np.power((min_tau * continuum['normalization'] /
-                                               column_density), (1. / continuum['index']))),
-                                    self.lambda_field).clip(0, self.n_lambda)
+                              np.power((min_tau * continuum['normalization'] /
+                                        column_density),
+                                       (1. / continuum['index']))),
+                              self.lambda_field).clip(0, self.n_lambda)
 
+            # Only calculate the effects of continuua where normalized 
+            # column_density is greater than min_tau
+            # because lower column will not have significant contribution
             valid_continuua = np.where(((column_density /
                                          continuum['normalization']) > min_tau) &
                                        (right_index - left_index > 1))[0]
+            if valid_continuua.size == 0:
+                mylog.info("Not adding continuum %s: insufficient column density" %
+                    continuum['label'])
+                return
+
             pbar = get_pbar("Adding continuum - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
                             valid_continuua.size)
+
+            # Tau value is (wavelength / continuum_wavelength)**index / 
+            #              (column_dens / norm)
+            # i.e. a power law decreasing as wavelength decreases
+
+            # Step through the absorber list and add continuum tau for each to
+            # the total optical depth for all wavelengths
             for i, lixel in enumerate(valid_continuua):
-                line_tau = np.power((self.lambda_field[left_index[lixel]:right_index[lixel]] /
-                                     this_wavelength[lixel]), continuum['index']) * \
-                                     column_density[lixel] / continuum['normalization']
-                self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
+                cont_tau = \
+                    np.power((self.lambda_field[left_index[lixel] :
+                                                right_index[lixel]] /
+                                   this_wavelength[lixel]), \
+                              continuum['index']) * \
+                    (column_density[lixel] / continuum['normalization'])
+                self.tau_field[left_index[lixel]:right_index[lixel]] += cont_tau
                 pbar.update(i)
             pbar.finish()
 

diff -r 29c92b47af784cde6814032d886362057273d1eb -r 9b23afb00e6ace1cf2022867b2bac629da99b702 yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -33,7 +33,7 @@
 COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
 GIZMO_PLUS = "gizmo_cosmology_plus/N128L16.param"
 GIZMO_PLUS_SINGLE = "gizmo_cosmology_plus/snap_N128L16_151.hdf5"
-
+ISO_GALAXY = "IsolatedGalaxy/galaxy0030/galaxy0030"
 
 @requires_file(COSMO_PLUS)
 @requires_answer_testing()
@@ -360,3 +360,64 @@
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
+
+ at requires_file(ISO_GALAXY)
+ at requires_answer_testing()
+def test_absorption_spectrum_with_continuum():
+    """
+    This test generates an absorption spectrum from a simple light ray on a
+    grid dataset and adds Lyman alpha and Lyman continuum to it
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = load(ISO_GALAXY)
+    lr = LightRay(ds)
+
+    ray_start = ds.domain_left_edge
+    ray_end = ds.domain_right_edge
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    sp = AbsorptionSpectrum(800.0, 1300.0, 5001)
+
+    my_label = 'HI Lya'
+    field = 'H_number_density'
+    wavelength = 1215.6700  # Angstromss
+    f_value = 4.164E-01
+    gamma = 6.265e+08
+    mass = 1.00794
+
+    sp.add_line(my_label, field, wavelength, f_value,
+                gamma, mass, label_threshold=1.e10)
+
+    my_label = 'Ly C'
+    field = 'H_number_density'
+    wavelength = 912.323660  # Angstroms
+    normalization = 1.6e17
+    index = 3.0
+
+    sp.add_continuum(my_label, field, wavelength, normalization, index)
+
+    wavelength, flux = sp.make_spectrum('lightray.h5',
+                                        output_file='spectrum.h5',
+                                        line_list_file='lines.txt',
+                                        use_peculiar_velocity=True)
+
+    # load just-generated hdf5 file of spectral data (for consistency)
+    data = h5.File('spectrum.h5', 'r')
+    
+    for key in data.keys():
+        func = lambda x=key: data[x][:]
+        func.__name__ = "{}_continuum".format(key)
+        test = GenericArrayTest(None, func)
+        test_absorption_spectrum_with_continuum.__name__ = test.description
+        yield test
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

diff -r 29c92b47af784cde6814032d886362057273d1eb -r 9b23afb00e6ace1cf2022867b2bac629da99b702 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -817,6 +817,10 @@
             kwargs = self.kwargs
         return self.array_func(*args, **kwargs)
     def compare(self, new_result, old_result):
+        if not isinstance(new_result, dict):
+            new_result = {'answer': new_result}
+            old_result = {'answer': old_result}
+
         assert_equal(len(new_result), len(old_result),
                                           err_msg="Number of outputs not equal.",
                                           verbose=True)


https://bitbucket.org/yt_analysis/yt/commits/0ee2cd6d8943/
Changeset:   0ee2cd6d8943
Branch:      stable
User:        MatthewTurk
Date:        2016-08-08 19:52:25+00:00
Summary:     Account for some really tough round-off cases
Affected #:  1 file

diff -r 9b23afb00e6ace1cf2022867b2bac629da99b702 -r 0ee2cd6d8943c45652498ca22240acb8e0fbc8f7 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -110,6 +110,13 @@
             # We are somewhere in the middle of the face
             temp_x = intersect_t * v_dir[i] + v_pos[i] # current position
             temp_y = ((temp_x - vc.left_edge[i])*vc.idds[i])
+            # There are some really tough cases where we just within a couple
+            # least significant places of the edge, and this helps prevent
+            # killing the calculation through a segfault in those cases.
+            if -1 < temp_y < 0 and step[i] > 0:
+                temp_y = 0.0
+            elif vc.dims[i] - 1 < temp_y < vc.dims[i] and step[i] < 0:
+                temp_y = vc.dims[i] - 1
             cur_ind[i] =  <int> (floor(temp_y))
         if step[i] > 0:
             temp_y = (cur_ind[i] + 1) * vc.dds[i] + vc.left_edge[i]


https://bitbucket.org/yt_analysis/yt/commits/4cbe0ed719ad/
Changeset:   4cbe0ed719ad
Branch:      stable
User:        chummels
Date:        2016-08-10 05:38:48+00:00
Summary:     Adding metallicity to alias fields in stream frontend.
Affected #:  1 file

diff -r 0ee2cd6d8943c45652498ca22240acb8e0fbc8f7 -r 4cbe0ed719adade763a132de98e05625d65cf10d yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -34,6 +34,7 @@
         ("radiation_acceleration_x", ("code_length/code_time**2", ["radiation_acceleration_x"], None)),
         ("radiation_acceleration_y", ("code_length/code_time**2", ["radiation_acceleration_y"], None)),
         ("radiation_acceleration_z", ("code_length/code_time**2", ["radiation_acceleration_z"], None)),
+        ("metallicity", ("Zsun", ["metallicity"], None)),
 
         # We need to have a bunch of species fields here, too
         ("metal_density",   ("code_mass/code_length**3", ["metal_density"], None)),


https://bitbucket.org/yt_analysis/yt/commits/a95d12ad587d/
Changeset:   a95d12ad587d
Branch:      stable
User:        Chang-Goo Kim
Date:        2016-08-11 17:08:53+00:00
Summary:     Add gravitational_potential to known_other_fields for athena frontend
Affected #:  1 file

diff -r 4cbe0ed719adade763a132de98e05625d65cf10d -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -34,6 +34,7 @@
         ("cell_centered_B_x", (b_units, [], None)),
         ("cell_centered_B_y", (b_units, [], None)),
         ("cell_centered_B_z", (b_units, [], None)),
+        ("gravitational_potential", ("code_velocity**2", ["gravitational_potential"], None)),
     )
 
 # In Athena, conservative or primitive variables may be written out.


https://bitbucket.org/yt_analysis/yt/commits/61bc5c910a78/
Changeset:   61bc5c910a78
Branch:      stable
User:        ngoldbaum
Date:        2016-08-11 21:00:18+00:00
Summary:     Backporting PR #2334 https://bitbucket.org/yt_analysis/yt/pull-requests/2334
Affected #:  21 files

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -889,10 +889,12 @@
         return new_unit
 
     def set_code_units(self):
-        self._set_code_unit_attributes()
         # here we override units, if overrides have been provided.
         self._override_code_units()
 
+        # set attributes like ds.length_unit
+        self._set_code_unit_attributes()
+
         self.unit_registry.modify("code_length", self.length_unit)
         self.unit_registry.modify("code_mass", self.mass_unit)
         self.unit_registry.modify("code_time", self.time_unit)
@@ -932,19 +934,22 @@
     def _override_code_units(self):
         if len(self.units_override) == 0:
             return
-        mylog.warning("Overriding code units. This is an experimental and potentially "+
-                      "dangerous option that may yield inconsistent results, and must be used "+
-                      "very carefully, and only if you know what you want from it.")
+        mylog.warning(
+            "Overriding code units. This is an experimental and potentially "
+            "dangerous option that may yield inconsistent results, and must be "
+            "used very carefully, and only if you know what you want from it.")
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g"),
-                          ("velocity","cm/s"), ("magnetic","gauss"), ("temperature","K")]:
+                          ("velocity","cm/s"), ("magnetic","gauss"), 
+                          ("temperature","K")]:
             val = self.units_override.get("%s_unit" % unit, None)
             if val is not None:
                 if isinstance(val, YTQuantity):
                     val = (val.v, str(val.units))
                 elif not isinstance(val, tuple):
                     val = (val, cgs)
-                u = getattr(self, "%s_unit" % unit)
-                mylog.info("Overriding %s_unit: %g %s -> %g %s.", unit, u.v, u.units, val[0], val[1])
+                u = getattr(self, "%s_unit" % unit, None)
+                mylog.info("Overriding %s_unit: %g -> %g %s.",
+                           unit, u, val[0], val[1])
                 setattr(self, "%s_unit" % unit, self.quan(val[0], val[1]))
 
     _arr = None

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -25,7 +25,8 @@
 from yt.data_objects.octree_subset import \
     OctreeSubset
 from yt.funcs import \
-    mylog
+    mylog, \
+    setdefaultattr
 from yt.geometry.oct_container import \
     ARTOctreeContainer
 from yt.frontends.art.definitions import \
@@ -243,10 +244,10 @@
         mass = aM0 * 1.98892e33
 
         self.cosmological_simulation = True
-        self.mass_unit = self.quan(mass, "g*%s" % ng**3)
-        self.length_unit = self.quan(box_proper, "Mpc")
-        self.velocity_unit = self.quan(velocity, "cm/s")
-        self.time_unit = self.length_unit / self.velocity_unit
+        setdefaultattr(self, 'mass_unit', self.quan(mass, "g*%s" % ng**3))
+        setdefaultattr(self, 'length_unit', self.quan(box_proper, "Mpc"))
+        setdefaultattr(self, 'velocity_unit', self.quan(velocity, "cm/s"))
+        setdefaultattr(self, 'time_unit', self.length_unit / self.velocity_unit)
 
     def _parse_parameter_file(self):
         """

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -30,7 +30,8 @@
     ARTIOFieldInfo
 
 from yt.funcs import \
-    mylog
+    mylog, \
+    setdefaultattr
 from yt.geometry.geometry_handler import \
     Index, \
     YTDataChunk
@@ -354,10 +355,13 @@
         self.storage_filename = storage_filename
 
     def _set_code_unit_attributes(self):
-        self.mass_unit = self.quan(self.parameters["unit_m"], "g")
-        self.length_unit = self.quan(self.parameters["unit_l"], "cm")
-        self.time_unit = self.quan(self.parameters["unit_t"], "s")
-        self.velocity_unit = self.length_unit / self.time_unit
+        setdefaultattr(
+            self, 'mass_unit', self.quan(self.parameters["unit_m"], "g"))
+        setdefaultattr(
+            self, 'length_unit', self.quan(self.parameters["unit_l"], "cm"))
+        setdefaultattr(
+            self, 'time_unit', self.quan(self.parameters["unit_t"], "s"))
+        setdefaultattr(self, 'velocity_unit', self.length_unit / self.time_unit)
 
     def _parse_parameter_file(self):
         # hard-coded -- not provided by headers

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -471,12 +471,15 @@
 
     def _set_code_unit_attributes(self):
         """
-        Generates the conversion to various physical _units based on the parameter file
+        Generates the conversion to various physical _units based on the
+        parameter file
         """
         if "length_unit" not in self.units_override:
             self.no_cgs_equiv_length = True
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
-            # We set these to cgs for now, but they may be overridden later.
+            # We set these to cgs for now, but they may have been overriden
+            if getattr(self, unit+'_unit', None) is not None:
+                continue
             mylog.warning("Assuming 1.0 = 1.0 %s", cgs)
             setattr(self, "%s_unit" % unit, self.quan(1.0, cgs))
         self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/frontends/athena/tests/test_outputs.py
--- a/yt/frontends/athena/tests/test_outputs.py
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -49,6 +49,20 @@
         test_blast.__name__ = test.description
         yield test
 
+uo_blast = {
+    'length_unit': (1.0, 'pc'),
+    'mass_unit': (2.38858753789e-24, 'g/cm**3*pc**3'),
+    'time_unit': (1.0, 's*pc/km'),
+}
+
+ at requires_file(blast)
+def test_blast_override():
+    # verify that overriding units causes derived unit values to be updated.
+    # see issue #1259
+    ds = load(blast, units_override=uo_blast)
+    assert_equal(float(ds.magnetic_unit.in_units('gauss')),
+                 5.478674679698131e-07)
+
 uo_stripping = {"time_unit":3.086e14,
                 "length_unit":8.0236e22,
                 "mass_unit":9.999e-30*8.0236e22**3}

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -22,8 +22,9 @@
 import numpy as np
 
 from yt.funcs import \
+    ensure_tuple, \
     mylog, \
-    ensure_tuple
+    setdefaultattr
 from yt.data_objects.grid_patch import AMRGridPatch
 from yt.extern.six.moves import zip as izip
 from yt.geometry.grid_geometry_handler import GridIndex
@@ -608,10 +609,10 @@
             self._setup2d()
 
     def _set_code_unit_attributes(self):
-        self.length_unit = self.quan(1.0, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.time_unit = self.quan(1.0, "s")
-        self.velocity_unit = self.quan(1.0, "cm/s")
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'velocity_unit', self.quan(1.0, "cm/s"))
 
     def _setup1d(self):
         # self._index_class = BoxlibHierarchy1D
@@ -1016,10 +1017,11 @@
             self.particle_types_raw = self.particle_types
 
     def _set_code_unit_attributes(self):
-        self.mass_unit = self.quan(1.0, "Msun")
-        self.time_unit = self.quan(1.0 / 3.08568025e19, "s")
-        self.length_unit = self.quan(1.0 / (1 + self.current_redshift), "Mpc")
-        self.velocity_unit = self.length_unit / self.time_unit
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "Msun"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0 / 3.08568025e19, "s"))
+        setdefaultattr(self, 'length_unit',
+                       self.quan(1.0 / (1 + self.current_redshift), "Mpc"))
+        setdefaultattr(self, 'velocity_unit', self.length_unit / self.time_unit)
 
 def _guess_pcast(vals):
     # Now we guess some things about the parameter and its type

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -23,7 +23,9 @@
 from stat import \
     ST_CTIME
 
-from yt.funcs import mylog
+from yt.funcs import \
+    mylog, \
+    setdefaultattr
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.extern import six
@@ -275,14 +277,19 @@
         self.parameters["EOSType"] = -1 # default
 
     def _set_code_unit_attributes(self):
-        mylog.warning("Setting code length to be 1.0 cm")
-        mylog.warning("Setting code mass to be 1.0 g")
-        mylog.warning("Setting code time to be 1.0 s")
-        self.length_unit = self.quan(1.0, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.time_unit = self.quan(1.0, "s")
-        self.magnetic_unit = self.quan(np.sqrt(4.*np.pi), "gauss")
-        self.velocity_unit = self.length_unit / self.time_unit
+        if not hasattr(self, 'length_unit'):
+            mylog.warning("Setting code length unit to be 1.0 cm")
+        if not hasattr(self, 'mass_unit'):
+            mylog.warning("Setting code mass unit to be 1.0 g")
+        if not hasattr(self, 'time_unit'):
+            mylog.warning("Setting code time unit to be 1.0 s")
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'magnetic_unit',
+                       self.quan(np.sqrt(4.*np.pi), "gauss"))
+        setdefaultattr(self, 'velocity_unit',
+                       self.length_unit / self.time_unit)
 
     def _localize(self, f, default):
         if f is None:

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -28,7 +28,8 @@
 from yt.funcs import \
     ensure_list, \
     ensure_tuple, \
-    get_pbar
+    get_pbar, \
+    setdefaultattr
 from yt.config import ytcfg
 from yt.data_objects.grid_patch import \
     AMRGridPatch
@@ -917,11 +918,12 @@
             if box_size is None:
                 box_size = self.parameters["Physics"]["Cosmology"]\
                     ["CosmologyComovingBoxSize"]
-            self.length_unit = self.quan(box_size, "Mpccm/h")
-            self.mass_unit = \
-                self.quan(k['urho'], 'g/cm**3') * (self.length_unit.in_cgs())**3
-            self.time_unit = self.quan(k['utim'], 's')
-            self.velocity_unit = self.quan(k['uvel'], 'cm/s')
+            setdefaultattr(self, 'length_unit', self.quan(box_size, "Mpccm/h"))
+            setdefaultattr(
+                self, 'mass_unit',
+                self.quan(k['urho'], 'g/cm**3') * (self.length_unit.in_cgs())**3)
+            setdefaultattr(self, 'time_unit', self.quan(k['utim'], 's'))
+            setdefaultattr(self, 'velocity_unit', self.quan(k['uvel'], 'cm/s'))
         else:
             if "LengthUnits" in self.parameters:
                 length_unit = self.parameters["LengthUnits"]
@@ -937,15 +939,16 @@
                 mylog.warning("Setting 1.0 in code units to be 1.0 s")
                 length_unit = mass_unit = time_unit = 1.0
 
-            self.length_unit = self.quan(length_unit, "cm")
-            self.mass_unit = self.quan(mass_unit, "g")
-            self.time_unit = self.quan(time_unit, "s")
-            self.velocity_unit = self.length_unit / self.time_unit
+            setdefaultattr(self, 'length_unit', self.quan(length_unit, "cm"))
+            setdefaultattr(self, 'mass_unit', self.quan(mass_unit, "g"))
+            setdefaultattr(self, 'time_unit', self.quan(time_unit, "s"))
+            setdefaultattr(
+                self, 'velocity_unit', self.length_unit / self.time_unit)
 
         magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
                                 (self.time_unit**2 * self.length_unit))
         magnetic_unit = np.float64(magnetic_unit.in_cgs())
-        self.magnetic_unit = self.quan(magnetic_unit, "gauss")
+        setdefaultattr(self, 'magnetic_unit', self.quan(magnetic_unit, "gauss"))
 
     def cosmology_get_units(self):
         """

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -14,6 +14,8 @@
 #-----------------------------------------------------------------------------
 import numpy as np
 
+from yt.funcs import \
+    setdefaultattr
 from yt.geometry.unstructured_mesh_handler import \
     UnstructuredIndex
 from yt.data_objects.unstructured_mesh import \
@@ -163,9 +165,9 @@
         # should be set, along with examples of how to set them to standard
         # values.
         #
-        self.length_unit = self.quan(1.0, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.time_unit = self.quan(1.0, "s")
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
         #
         # These can also be set:
         # self.velocity_unit = self.quan(1.0, "cm/s")

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -25,8 +25,9 @@
 
 from yt.config import ytcfg
 from yt.funcs import \
+    ensure_list, \
     mylog, \
-    ensure_list
+    setdefaultattr
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.geometry.grid_geometry_handler import \
@@ -447,10 +448,10 @@
             mylog.warning("No length conversion provided. Assuming 1 = 1 cm.")
             length_factor = 1.0
             length_unit = "cm"
-        self.length_unit = self.quan(length_factor,length_unit)
-        self.mass_unit = self.quan(1.0, "g")
-        self.time_unit = self.quan(1.0, "s")
-        self.velocity_unit = self.quan(1.0, "cm/s")
+        setdefaultattr(self, 'length_unit', self.quan(length_factor,length_unit))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'velocity_unit', self.quan(1.0, "cm/s"))
         if "beam_size" in self.specified_parameters:
             beam_size = self.specified_parameters["beam_size"]
             beam_size = self.quan(beam_size[0], beam_size[1]).in_cgs().value

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -22,7 +22,9 @@
     AMRGridPatch
 from yt.data_objects.static_output import \
     Dataset, ParticleFile
-from yt.funcs import mylog
+from yt.funcs import \
+    mylog, \
+    setdefaultattr
 from yt.geometry.grid_geometry_handler import \
     GridIndex
 from yt.geometry.particle_geometry_handler import \
@@ -246,13 +248,14 @@
         else:
             length_factor = 1.0
             temperature_factor = 1.0
-        self.magnetic_unit = self.quan(b_factor, "gauss")
 
-        self.length_unit = self.quan(length_factor, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.time_unit = self.quan(1.0, "s")
-        self.velocity_unit = self.quan(1.0, "cm/s")
-        self.temperature_unit = self.quan(temperature_factor, "K")
+        setdefaultattr(self, 'magnetic_unit', self.quan(b_factor, "gauss"))
+        setdefaultattr(self, 'length_unit', self.quan(length_factor, "cm"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'velocity_unit', self.quan(1.0, "cm/s"))
+        setdefaultattr(
+            self, 'temperature_unit', self.quan(temperature_factor, "K"))
 
     def set_code_units(self):
         super(FLASHDataset, self).set_code_units()

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -33,7 +33,8 @@
     GadgetFOFFieldInfo, \
     GadgetFOFHaloFieldInfo
 from yt.funcs import \
-    only_on_root
+    only_on_root, \
+    setdefaultattr
 from yt.geometry.particle_geometry_handler import \
     ParticleIndex
 from yt.utilities.cosmology import \
@@ -232,7 +233,8 @@
         else:
             raise RuntimeError
         length_unit = _fix_unit_ordering(length_unit)
-        self.length_unit = self.quan(length_unit[0], length_unit[1])
+        setdefaultattr(self, 'length_unit',
+                       self.quan(length_unit[0], length_unit[1]))
         
         if "velocity" in unit_base:
             velocity_unit = unit_base["velocity"]
@@ -244,7 +246,8 @@
             else:
                 velocity_unit = (1e5, "cmcm/s")
         velocity_unit = _fix_unit_ordering(velocity_unit)
-        self.velocity_unit = self.quan(velocity_unit[0], velocity_unit[1])
+        setdefaultattr(self, 'velocity_unit',
+                       self.quan(velocity_unit[0], velocity_unit[1]))
 
         # We set hubble_constant = 1.0 for non-cosmology, so this is safe.
         # Default to 1e10 Msun/h if mass is not specified.
@@ -259,7 +262,7 @@
             # Sane default
             mass_unit = (1.0, "1e10*Msun/h")
         mass_unit = _fix_unit_ordering(mass_unit)
-        self.mass_unit = self.quan(mass_unit[0], mass_unit[1])
+        setdefaultattr(self, 'mass_unit', self.quan(mass_unit[0], mass_unit[1]))
 
         if "time" in unit_base:
             time_unit = unit_base["time"]
@@ -267,7 +270,7 @@
             time_unit = (unit_base["UnitTime_in_s"], "s")
         else:
             time_unit = (1., "s")        
-        self.time_unit = self.quan(time_unit[0], time_unit[1])
+        setdefaultattr(self, 'time_unit', self.quan(time_unit[0], time_unit[1]))
 
     def __repr__(self):
         return self.basename.split(".", 1)[0]

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/frontends/gamer/data_structures.py
--- a/yt/frontends/gamer/data_structures.py
+++ b/yt/frontends/gamer/data_structures.py
@@ -18,7 +18,9 @@
 import numpy as np
 import weakref
 
-from yt.funcs import mylog
+from yt.funcs import \
+    mylog, \
+    setdefaultattr
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.geometry.grid_geometry_handler import \
@@ -205,7 +207,7 @@
                           "Use units_override to specify the units")
 
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
-            setattr(self, "%s_unit"%unit, self.quan(1.0, cgs))
+            setdefaultattr(self, "%s_unit"%unit, self.quan(1.0, cgs))
 
             if len(self.units_override) == 0:
                 mylog.warning("Assuming 1.0 = 1.0 %s", cgs)

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -19,7 +19,9 @@
 import os
 from yt.extern.six import string_types
 from yt.funcs import \
-    just_one, ensure_tuple
+    ensure_tuple, \
+    just_one, \
+    setdefaultattr
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.geometry.grid_geometry_handler import \
@@ -223,17 +225,17 @@
                     un = unit_name[:-5]
                     un = un.replace('magnetic', 'magnetic_field', 1)
                     unit = self.unit_system[un]
-                    setattr(self, unit_name, self.quan(value, unit))
-                setattr(self, unit_name, self.quan(value, unit))
+                    setdefaultattr(self, unit_name, self.quan(value, unit))
+                setdefaultattr(self, unit_name, self.quan(value, unit))
                 if unit_name in h5f["/field_types"]:
                     if unit_name in self.field_units:
                         mylog.warning("'field_units' was overridden by 'dataset_units/%s'"
                                       % (unit_name))
                     self.field_units[unit_name] = str(unit)
         else:
-            self.length_unit = self.quan(1.0, "cm")
-            self.mass_unit = self.quan(1.0, "g")
-            self.time_unit = self.quan(1.0, "s")
+            setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+            setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+            setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
 
         h5f.close()
 

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/frontends/halo_catalog/data_structures.py
--- a/yt/frontends/halo_catalog/data_structures.py
+++ b/yt/frontends/halo_catalog/data_structures.py
@@ -23,6 +23,8 @@
 from .fields import \
     HaloCatalogFieldInfo
 
+from yt.funcs import \
+    setdefaultattr
 from yt.geometry.particle_geometry_handler import \
     ParticleIndex
 from yt.data_objects.static_output import \
@@ -76,10 +78,10 @@
         self.parameters.update(hvals)
 
     def _set_code_unit_attributes(self):
-        self.length_unit = self.quan(1.0, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.velocity_unit = self.quan(1.0, "cm / s")
-        self.time_unit = self.quan(1.0, "s")
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+        setdefaultattr(self, 'velocity_unit', self.quan(1.0, "cm / s"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -19,6 +19,8 @@
 import weakref
 from yt.data_objects.unstructured_mesh import \
     SemiStructuredMesh
+from yt.funcs import \
+    setdefaultattr
 from yt.geometry.unstructured_mesh_handler import \
     UnstructuredIndex
 from yt.data_objects.static_output import \
@@ -78,9 +80,9 @@
     def _set_code_unit_attributes(self):
         # Almost everything is regarded as dimensionless in MOAB, so these will
         # not be used very much or at all.
-        self.length_unit = self.quan(1.0, "cm")
-        self.time_unit = self.quan(1.0, "s")
-        self.mass_unit = self.quan(1.0, "g")
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
 
     def _parse_parameter_file(self):
         self._handle = h5py.File(self.parameter_filename, "r")
@@ -161,9 +163,9 @@
     def _set_code_unit_attributes(self):
         # Almost everything is regarded as dimensionless in MOAB, so these will
         # not be used very much or at all.
-        self.length_unit = self.quan(1.0, "cm")
-        self.time_unit = self.quan(1.0, "s")
-        self.mass_unit = self.quan(1.0, "g")
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
 
     def _parse_parameter_file(self):
         #  not sure if this import has side-effects so I'm not deleting it

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/frontends/owls_subfind/data_structures.py
--- a/yt/frontends/owls_subfind/data_structures.py
+++ b/yt/frontends/owls_subfind/data_structures.py
@@ -24,7 +24,9 @@
 from .fields import \
     OWLSSubfindFieldInfo
 
-from yt.funcs import only_on_root
+from yt.funcs import \
+    only_on_root, \
+    setdefaultattr
 from yt.utilities.exceptions import \
     YTException
 from yt.utilities.logger import ytLogger as \
@@ -176,7 +178,8 @@
         else:
             raise RuntimeError
         length_unit = _fix_unit_ordering(length_unit)
-        self.length_unit = self.quan(length_unit[0], length_unit[1])
+        setdefaultattr(self, 'length_unit',
+                       self.quan(length_unit[0], length_unit[1]))
 
         if "velocity" in unit_base:
             velocity_unit = unit_base["velocity"]
@@ -185,7 +188,8 @@
         else:
             velocity_unit = (1e5, "cm/s")
         velocity_unit = _fix_unit_ordering(velocity_unit)
-        self.velocity_unit = self.quan(velocity_unit[0], velocity_unit[1])
+        setdefaultattr(self, 'velocity_unit',
+                       self.quan(velocity_unit[0], velocity_unit[1]))
 
         # We set hubble_constant = 1.0 for non-cosmology, so this is safe.
         # Default to 1e10 Msun/h if mass is not specified.
@@ -200,7 +204,7 @@
             # Sane default
             mass_unit = (1.0, "1e10*Msun/h")
         mass_unit = _fix_unit_ordering(mass_unit)
-        self.mass_unit = self.quan(mass_unit[0], mass_unit[1])
+        setdefaultattr(self, 'mass_unit', self.quan(mass_unit[0], mass_unit[1]))
 
         if "time" in unit_base:
             time_unit = unit_base["time"]
@@ -208,7 +212,7 @@
             time_unit = (unit_base["UnitTime_in_s"], "s")
         else:
             time_unit = (1., "s")        
-        self.time_unit = self.quan(time_unit[0], time_unit[1])
+        setdefaultattr(self, 'time_unit', self.quan(time_unit[0], time_unit[1]))
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -23,7 +23,8 @@
 
 from yt.extern.six import string_types
 from yt.funcs import \
-    mylog
+    mylog, \
+    setdefaultattr
 from yt.geometry.oct_geometry_handler import \
     OctreeIndex
 from yt.geometry.geometry_handler import \
@@ -565,17 +566,21 @@
         # For now assume an atomic ideal gas with cosmic abundances (x_H = 0.76)
         mean_molecular_weight_factor = _X**-1
 
-        self.density_unit = self.quan(density_unit, 'g/cm**3')
-        self.magnetic_unit = self.quan(magnetic_unit, "gauss")
-        self.pressure_unit = self.quan(pressure_unit, 'dyne/cm**2')
-        self.time_unit = self.quan(time_unit, "s")
-        self.mass_unit = self.quan(mass_unit, "g")
-        self.velocity_unit = self.quan(length_unit, 'cm') / self.time_unit
-        self.temperature_unit = (self.velocity_unit**2*mp* 
-                                 mean_molecular_weight_factor/kb).in_units('K')
+        setdefaultattr(self, 'density_unit', self.quan(density_unit, 'g/cm**3'))
+        setdefaultattr(self, 'magnetic_unit', self.quan(magnetic_unit, "gauss"))
+        setdefaultattr(self, 'pressure_unit',
+                       self.quan(pressure_unit, 'dyne/cm**2'))
+        setdefaultattr(self, 'time_unit', self.quan(time_unit, "s"))
+        setdefaultattr(self, 'mass_unit', self.quan(mass_unit, "g"))
+        setdefaultattr(self, 'velocity_unit',
+                       self.quan(length_unit, 'cm') / self.time_unit)
+        temperature_unit = (
+            self.velocity_unit**2*mp*mean_molecular_weight_factor/kb)
+        setdefaultattr(self, 'temperature_unit', temperature_unit.in_units('K'))
 
         # Only the length unit get scales by a factor of boxlen
-        self.length_unit = self.quan(length_unit * boxlen, "cm")
+        setdefaultattr(self, 'length_unit',
+                       self.quan(length_unit * boxlen, "cm"))
 
     def _parse_parameter_file(self):
         # hardcoded for now

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/frontends/rockstar/data_structures.py
--- a/yt/frontends/rockstar/data_structures.py
+++ b/yt/frontends/rockstar/data_structures.py
@@ -22,12 +22,14 @@
 from .fields import \
     RockstarFieldInfo
 
-from yt.utilities.cosmology import Cosmology
-from yt.geometry.particle_geometry_handler import \
-    ParticleIndex
 from yt.data_objects.static_output import \
     Dataset, \
     ParticleFile
+from yt.funcs import \
+    setdefaultattr
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
+from yt.utilities.cosmology import Cosmology
 import yt.utilities.fortran_utils as fpu
 
 from .definitions import \
@@ -92,10 +94,10 @@
 
     def _set_code_unit_attributes(self):
         z = self.current_redshift
-        self.length_unit = self.quan(1.0 / (1.0+z), "Mpc / h")
-        self.mass_unit = self.quan(1.0, "Msun / h")
-        self.velocity_unit = self.quan(1.0, "km / s")
-        self.time_unit = self.length_unit / self.velocity_unit
+        setdefaultattr(self, 'length_unit', self.quan(1.0 / (1.0+z), "Mpc / h"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "Msun / h"))
+        setdefaultattr(self, 'velocity_unit', self.quan(1.0, "km / s"))
+        setdefaultattr(self, 'time_unit', self.length_unit / self.velocity_unit)
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -28,7 +28,8 @@
 from yt.data_objects.static_output import \
     Dataset, ParticleFile
 from yt.funcs import \
-    get_requests
+    get_requests, \
+    setdefaultattr
 from .fields import \
     SDFFieldInfo
 from yt.utilities.sdf import \
@@ -177,16 +178,22 @@
         return self._midx
 
     def _set_code_unit_attributes(self):
-        self.length_unit = self.quan(1.0, self.parameters.get("length_unit", 'kpc'))
-        self.velocity_unit = self.quan(1.0, self.parameters.get("velocity_unit", 'kpc/Gyr'))
-        self.time_unit = self.quan(1.0, self.parameters.get("time_unit", 'Gyr'))
+        setdefaultattr(
+            self, 'length_unit',
+            self.quan(1.0, self.parameters.get("length_unit", 'kpc')))
+        setdefaultattr(
+            self, 'velocity_unit',
+            self.quan(1.0, self.parameters.get("velocity_unit", 'kpc/Gyr')))
+        setdefaultattr(
+            self, 'time_unit',
+            self.quan(1.0, self.parameters.get("time_unit", 'Gyr')))
         mass_unit = self.parameters.get("mass_unit", '1e10 Msun')
         if ' ' in mass_unit:
             factor, unit = mass_unit.split(' ')
         else:
             factor = 1.0
             unit = mass_unit
-        self.mass_unit = self.quan(float(factor), unit)
+        setdefaultattr(self, 'mass_unit', self.quan(float(factor), unit))
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):

diff -r a95d12ad587d2787a0d7b86b3988ac7c4a13306b -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -986,3 +986,12 @@
     except ImportError:
         pass
     return dummy_context_manager()
+
+def setdefaultattr(obj, name, value):
+    """Set attribute with *name* on *obj* with *value* if it doesn't exist yet
+
+    Analogous to dict.setdefault
+    """
+    if not hasattr(obj, name):
+        setattr(obj, name, value)
+    return getattr(obj, name)


https://bitbucket.org/yt_analysis/yt/commits/e6875164d199/
Changeset:   e6875164d199
Branch:      stable
User:        ngoldbaum
Date:        2016-08-12 01:05:55+00:00
Summary:     Backporting PR #2335 https://bitbucket.org/yt_analysis/yt/pull-requests/2335
Affected #:  2 files

diff -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 -r e6875164d1990a0fe2981f4efa4d2220bc7127ab yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -237,16 +237,18 @@
     def frb():
         doc = "The frb property."
         def fget(self):
-            if self._frb is None:
+            if self._frb is None or self._data_valid is False:
                 self._recreate_frb()
             return self._frb
 
         def fset(self, value):
             self._frb = value
+            self._data_valid = True
 
         def fdel(self):
             del self._frb
             self._frb = None
+            self._data_valid = False
 
         return locals()
     frb = property(**frb())
@@ -255,8 +257,8 @@
         old_fields = None
         # If we are regenerating an frb, we want to know what fields we had before
         if self._frb is not None:
-            old_fields = list(self.frb.keys())
-            old_units = [str(self.frb[of].units) for of in old_fields]
+            old_fields = list(self._frb.keys())
+            old_units = [str(self._frb[of].units) for of in old_fields]
 
         # Set the bounds
         if hasattr(self,'zlim'):
@@ -267,12 +269,11 @@
             bounds = np.array([b.in_units('code_length') for b in bounds])
 
         # Generate the FRB
-        self._frb = self._frb_generator(self.data_source, bounds,
-                                        self.buff_size, self.antialias,
-                                        periodic=self._periodic)
+        self.frb = self._frb_generator(self.data_source, bounds,
+                                       self.buff_size, self.antialias,
+                                       periodic=self._periodic)
 
         # At this point the frb has the valid bounds, size, aliasing, etc.
-        self._data_valid = True
         if old_fields is None:
             self._frb._get_data_source_fields()
         else:

diff -r 61bc5c910a781bd401383eb413b4b51f1402d1f8 -r e6875164d1990a0fe2981f4efa4d2220bc7127ab yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -442,3 +442,10 @@
         for field_name_list in field_names:
             assert_raises(
                 YTInvalidFieldType, object, ds, normal, field_name_list)
+
+
+def test_frb_regen():
+    ds = fake_random_ds(32)
+    slc = SlicePlot(ds, 2, 'density')
+    slc.set_buff_size(1200)
+    assert_equal(slc.frb['density'].shape, (1200, 1200))


https://bitbucket.org/yt_analysis/yt/commits/117d0dee8c90/
Changeset:   117d0dee8c90
Branch:      stable
User:        ngoldbaum
Date:        2016-08-13 01:24:31+00:00
Summary:     Backporting PR #2338 https://bitbucket.org/yt_analysis/yt/pull-requests/2338
Affected #:  3 files

diff -r e6875164d1990a0fe2981f4efa4d2220bc7127ab -r 117d0dee8c9094ad365b97b714072fdbf5b444b2 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -27,6 +27,8 @@
     YTQuantity
 from yt.units.unit_object import Unit
 from yt.data_objects.data_containers import YTFieldData
+from yt.utilities.exceptions import \
+    YTIllDefinedProfile
 from yt.utilities.lib.misc_utilities import \
     new_bin_profile1d, \
     new_bin_profile2d, \
@@ -940,10 +942,18 @@
     fields = ensure_list(fields)
     is_pfield = [data_source.ds._get_field_info(f).particle_type
                  for f in bin_fields + fields]
+    wf = None
+    if weight_field is not None:
+        wf = data_source.ds._get_field_info(weight_field)
+        is_pfield.append(wf.particle_type)
+        wf = wf.name
 
-    if len(bin_fields) == 1:
+    if any(is_pfield) and not all(is_pfield):
+        raise YTIllDefinedProfile(
+            bin_fields, data_source._determine_fields(fields), wf, is_pfield)
+    elif len(bin_fields) == 1:
         cls = Profile1D
-    elif len(bin_fields) == 2 and np.all(is_pfield):
+    elif len(bin_fields) == 2 and all(is_pfield):
         # log bin_fields set to False for Particle Profiles.
         # doesn't make much sense for CIC deposition.
         # accumulation and fractional set to False as well.

diff -r e6875164d1990a0fe2981f4efa4d2220bc7127ab -r 117d0dee8c9094ad365b97b714072fdbf5b444b2 yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -8,7 +8,13 @@
 from yt.testing import \
     fake_random_ds, \
     assert_equal, \
+    assert_raises, \
     assert_rel_equal
+from yt.utilities.exceptions import \
+    YTIllDefinedProfile
+from yt.visualization.profile_plotter import \
+    ProfilePlot, \
+    PhasePlot
 
 _fields = ("density", "temperature", "dinosaurs", "tribbles")
 _units = ("g/cm**3", "K", "dyne", "erg")
@@ -158,3 +164,34 @@
                         weight_field = None)
         p3d.add_fields(["particle_ones"])
         yield assert_equal, p3d["particle_ones"].sum(), 32**3
+
+def test_mixed_particle_mesh_profiles():
+    ds = fake_random_ds(32, particles=10)
+    ad = ds.all_data()
+    assert_raises(
+        YTIllDefinedProfile, ProfilePlot, ad, 'radius', 'particle_mass')
+    assert_raises(
+        YTIllDefinedProfile, ProfilePlot, ad, 'radius',
+        ['particle_mass', 'particle_ones'])
+    assert_raises(
+        YTIllDefinedProfile, ProfilePlot, ad, 'radius',
+        ['particle_mass', 'ones'])
+    assert_raises(
+        YTIllDefinedProfile, ProfilePlot, ad, 'particle_radius', 'particle_mass',
+        'cell_mass')
+    assert_raises(
+        YTIllDefinedProfile, ProfilePlot, ad, 'radius', 'cell_mass',
+        'particle_ones')
+
+    assert_raises(
+        YTIllDefinedProfile, PhasePlot, ad, 'radius', 'particle_mass',
+        'velocity_x')
+    assert_raises(
+        YTIllDefinedProfile, PhasePlot, ad, 'particle_radius', 'particle_mass',
+        'cell_mass')
+    assert_raises(
+        YTIllDefinedProfile, PhasePlot, ad, 'radius', 'cell_mass',
+        'particle_ones')
+    assert_raises(
+        YTIllDefinedProfile, PhasePlot, ad, 'particle_radius', 'particle_mass',
+        'particle_ones')

diff -r e6875164d1990a0fe2981f4efa4d2220bc7127ab -r 117d0dee8c9094ad365b97b714072fdbf5b444b2 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -593,3 +593,43 @@
         v += self.message
         v += " Specified bounds are %s" % self.bounds
         return v
+
+def screen_one_element_list(lis):
+    if len(lis) == 1:
+        return lis[0]
+    return lis
+
+class YTIllDefinedProfile(YTException):
+    def __init__(self, bin_fields, fields, weight_field, is_pfield):
+        nbin = len(bin_fields)
+        nfields = len(fields)
+        self.bin_fields = screen_one_element_list(bin_fields)
+        self.bin_fields_ptype = screen_one_element_list(is_pfield[:nbin])
+        self.fields = screen_one_element_list(fields)
+        self.fields_ptype = screen_one_element_list(is_pfield[nbin:nbin+nfields])
+        self.weight_field = weight_field
+        if self.weight_field is not None:
+            self.weight_field_ptype = is_pfield[-1]
+
+    def __str__(self):
+        msg = (
+            "\nCannot create a profile object that mixes particle and mesh "
+            "fields.\n\n"
+            "Received the following bin_fields:\n\n"
+            "   %s, particle_type = %s\n\n"
+            "Profile fields:\n\n"
+            "   %s, particle_type = %s\n"
+        )
+        msg = msg % (
+            self.bin_fields, self.bin_fields_ptype,
+            self.fields, self.fields_ptype
+        )
+
+        if self.weight_field is not None:
+            weight_msg = "\nAnd weight field:\n\n   %s, particle_type = %s\n"
+            weight_msg = weight_msg % (
+                self.weight_field, self.weight_field_ptype)
+        else:
+            weight_msg = ""
+
+        return msg + weight_msg


https://bitbucket.org/yt_analysis/yt/commits/60ff1eb23a6e/
Changeset:   60ff1eb23a6e
Branch:      stable
User:        brittonsmith
Date:        2016-08-18 09:48:37+00:00
Summary:     Backporting PR #2342 https://bitbucket.org/yt_analysis/yt/pull-requests/2342
Affected #:  3 files

diff -r 117d0dee8c9094ad365b97b714072fdbf5b444b2 -r 60ff1eb23a6ee53ff97e106caea98504ebbe37f9 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -67,8 +67,9 @@
   local_ytdata_000:
     - yt/frontends/ytdata
 
-  local_absorption_spectrum_003:
+  local_absorption_spectrum_004:
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_novpec
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_sph
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo_sph

diff -r 117d0dee8c9094ad365b97b714072fdbf5b444b2 -r 60ff1eb23a6ee53ff97e106caea98504ebbe37f9 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -268,6 +268,9 @@
                 redshift_eff = ((1 + redshift) * \
                                 (1 + field_data['redshift_dopp'])) - 1.
 
+        if not use_peculiar_velocity:
+            redshift_eff = redshift
+
         return redshift, redshift_eff
 
     def _add_continua_to_spectrum(self, field_data, use_peculiar_velocity,
@@ -419,7 +422,10 @@
             cdens = column_density.in_units("cm**-2").d # cm**-2
             thermb = thermal_b.in_cgs().d  # thermal b coefficient; cm / s
             dlambda = delta_lambda.d  # lambda offset; angstroms
-            vlos = field_data['velocity_los'].in_units("km/s").d # km/s
+            if use_peculiar_velocity:
+                vlos = field_data['velocity_los'].in_units("km/s").d # km/s
+            else:
+                vlos = np.zeros(field_data['temperature'].size)
 
             # When we actually deposit the voigt profile, sometimes we will
             # have underresolved lines (ie lines with smaller widths than

diff -r 117d0dee8c9094ad365b97b714072fdbf5b444b2 -r 60ff1eb23a6ee53ff97e106caea98504ebbe37f9 yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -145,6 +145,58 @@
     shutil.rmtree(tmpdir)
 
 @requires_file(COSMO_PLUS_SINGLE)
+ at requires_answer_testing()
+def test_absorption_spectrum_non_cosmo_novpec():
+    """
+    This test generates an absorption spectrum from a simple light ray on a
+    grid dataset
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS_SINGLE)
+
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5', use_peculiar_velocity=False)
+
+    sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
+
+    my_label = 'HI Lya'
+    field = 'H_number_density'
+    wavelength = 1215.6700  # Angstromss
+    f_value = 4.164E-01
+    gamma = 6.265e+08
+    mass = 1.00794
+
+    sp.add_line(my_label, field, wavelength, f_value,
+                gamma, mass, label_threshold=1.e10)
+
+    wavelength, flux = sp.make_spectrum('lightray.h5',
+                                        output_file='spectrum.h5',
+                                        line_list_file='lines.txt',
+                                        use_peculiar_velocity=False)
+
+    # load just-generated hdf5 file of spectral data (for consistency)
+    data = h5.File('spectrum.h5', 'r')
+
+    for key in data.keys():
+        func = lambda x=key: data[x][:]
+        func.__name__ = "{}_non_cosmo_novpec".format(key)
+        test = GenericArrayTest(None, func)
+        test_absorption_spectrum_non_cosmo_novpec.__name__ = test.description
+        yield test
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(COSMO_PLUS_SINGLE)
 def test_equivalent_width_conserved():
     """
     This tests that the equivalent width of the optical depth is conserved 


https://bitbucket.org/yt_analysis/yt/commits/fe79f0be6bb8/
Changeset:   fe79f0be6bb8
Branch:      stable
User:        chummels
Date:        2016-08-19 22:27:17+00:00
Summary:     Backporting PR #2344 https://bitbucket.org/yt_analysis/yt/pull-requests/2344
Affected #:  3 files

diff -r 60ff1eb23a6ee53ff97e106caea98504ebbe37f9 -r fe79f0be6bb81a2fa08d0e92ffc928bc1fcd8269 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -203,6 +203,13 @@
             input_ds = input_file
         field_data = input_ds.all_data()
 
+        # temperature field required to calculate voigt profile widths
+        if ('temperature' not in input_ds.derived_field_list) and \
+           (('gas', 'temperature') not in input_ds.derived_field_list):
+            raise RuntimeError(
+                "('gas', 'temperature') field required to be present in %s "
+                "for AbsorptionSpectrum to function." % input_file)
+
         self.tau_field = np.zeros(self.lambda_field.size)
         self.absorbers_list = []
 
@@ -210,6 +217,7 @@
             comm = _get_comm(())
             njobs = min(comm.size, len(self.line_list))
 
+        mylog.info("Creating spectrum")
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity,
                                     output_absorbers_file,
                                     subgrid_resolution=subgrid_resolution,
@@ -307,6 +315,9 @@
             # Normalization is in cm**-2, so column density must be as well
             column_density = (field_data[continuum['field_name']] * 
                               field_data['dl']).in_units('cm**-2')
+            if (column_density == 0).all():
+                mylog.info("Not adding continuum %s: insufficient column density" % continuum['label'])
+                continue
 
             # redshift_eff field combines cosmological and velocity redshifts
             if use_peculiar_velocity:
@@ -333,9 +344,9 @@
                                          continuum['normalization']) > min_tau) &
                                        (right_index - left_index > 1))[0]
             if valid_continuua.size == 0:
-                mylog.info("Not adding continuum %s: insufficient column density" %
+                mylog.info("Not adding continuum %s: insufficient column density or out of range" %
                     continuum['label'])
-                return
+                continue
 
             pbar = get_pbar("Adding continuum - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
@@ -379,6 +390,9 @@
         # and deposit the lines into the spectrum
         for line in parallel_objects(self.line_list, njobs=njobs):
             column_density = field_data[line['field_name']] * field_data['dl']
+            if (column_density == 0).all():
+                mylog.info("Not adding line %s: insufficient column density" % line['label'])
+                continue
 
             # redshift_eff field combines cosmological and velocity redshifts
             # so delta_lambda gives the offset in angstroms from the rest frame
@@ -462,6 +476,12 @@
             # observed spectrum where it occurs and deposit a voigt profile
             for i in parallel_objects(np.arange(n_absorbers), njobs=-1):
 
+                # if there is a ray element with temperature = 0 or column
+                # density = 0, skip it
+                if (thermal_b[i] == 0.) or (cdens[i] == 0.):
+                    pbar.update(i)
+                    continue
+
                 # the virtual window into which the line is deposited initially
                 # spans a region of 2 coarse spectral bins
                 # (one on each side of the center_index) but the window

diff -r 60ff1eb23a6ee53ff97e106caea98504ebbe37f9 -r fe79f0be6bb81a2fa08d0e92ffc928bc1fcd8269 yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -34,6 +34,7 @@
 GIZMO_PLUS = "gizmo_cosmology_plus/N128L16.param"
 GIZMO_PLUS_SINGLE = "gizmo_cosmology_plus/snap_N128L16_151.hdf5"
 ISO_GALAXY = "IsolatedGalaxy/galaxy0030/galaxy0030"
+FIRE = "FIRE_M12i_ref11/snapshot_600.hdf5"
 
 @requires_file(COSMO_PLUS)
 @requires_answer_testing()
@@ -473,3 +474,85 @@
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
+
+ at requires_file(FIRE)
+def test_absorption_spectrum_with_zero_field():
+    """
+    This test generates an absorption spectrum with some 
+    particle dataset
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = load(FIRE)
+    lr = LightRay(ds)
+
+    # Define species and associated parameters to add to continuum
+    # Parameters used for both adding the transition to the spectrum
+    # and for fitting
+    # Note that for single species that produce multiple lines
+    # (as in the OVI doublet), 'numLines' will be equal to the number
+    # of lines, and f,gamma, and wavelength will have multiple values.
+
+    HI_parameters = {
+        'name': 'HI',
+        'field': 'H_number_density',
+        'f': [.4164],
+        'Gamma': [6.265E8],
+        'wavelength': [1215.67],
+        'mass': 1.00794,
+        'numLines': 1,
+        'maxN': 1E22, 'minN': 1E11,
+        'maxb': 300, 'minb': 1,
+        'maxz': 6, 'minz': 0,
+        'init_b': 30,
+        'init_N': 1E14
+    }
+
+    species_dicts = {'HI': HI_parameters}
+
+
+    # Get all fields that need to be added to the light ray
+    fields = [('gas','temperature')]
+    for s, params in species_dicts.items():
+        fields.append(params['field'])
+
+    # With a single dataset, a start_position and
+    # end_position or trajectory must be given.
+    # Trajectory should be given as (r, theta, phi)
+    lr.make_light_ray(
+        start_position=ds.arr([0., 0., 0.], 'unitary'),
+        end_position=ds.arr([1., 1., 1.], 'unitary'),
+        solution_filename='test_lightraysolution.txt',
+        data_filename='test_lightray.h5',
+        fields=fields)
+    
+    # Create an AbsorptionSpectrum object extending from
+    # lambda = 900 to lambda = 1800, with 10000 pixels
+    sp = AbsorptionSpectrum(900.0, 1400.0, 50000)
+    
+    # Iterate over species
+    for s, params in species_dicts.items():
+        # Iterate over transitions for a single species
+        for i in range(params['numLines']):
+            # Add the lines to the spectrum
+            sp.add_line(
+                s, params['field'],
+                params['wavelength'][i], params['f'][i],
+                params['Gamma'][i], params['mass'],
+                label_threshold=1.e10)
+    
+    
+    # Make and save spectrum
+    wavelength, flux = sp.make_spectrum(
+        'test_lightray.h5',
+        output_file='test_spectrum.h5',
+        line_list_file='test_lines.txt',
+        use_peculiar_velocity=True)
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

diff -r 60ff1eb23a6ee53ff97e106caea98504ebbe37f9 -r fe79f0be6bb81a2fa08d0e92ffc928bc1fcd8269 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -21,8 +21,6 @@
     load
 from yt.frontends.ytdata.utilities import \
     save_as_dataset
-from yt.units.unit_object import \
-    Unit
 from yt.units.yt_array import \
     YTArray
 from yt.utilities.cosmology import \
@@ -391,7 +389,11 @@
 
         # Initialize data structures.
         self._data = {}
+        # temperature field is automatically added to fields
         if fields is None: fields = []
+        if (('gas', 'temperature') not in fields) and \
+           ('temperature' not in fields):
+           fields.append(('gas', 'temperature'))
         data_fields = fields[:]
         all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
@@ -604,19 +606,18 @@
               self.cosmology.t_from_z(ds["current_redshift"])
         extra_attrs = {"data_type": "yt_light_ray"}
         field_types = dict([(field, "grid") for field in data.keys()])
+
         # Only return LightRay elements with non-zero density
-        mask_field_units = ['K', 'cm**-3', 'g/cm**3']
-        mask_field_units = [Unit(u) for u in mask_field_units]
-        for f in data:
-            for u in mask_field_units:
-                if data[f].units.same_dimensions_as(u):
-                    mask = data[f] > 0
-                    if not np.any(mask):
-                        raise RuntimeError(
-                            "No zones along light ray with nonzero %s. "
-                            "Please modify your light ray trajectory." % (f,))
-                    for key in data.keys():
-                        data[key] = data[key][mask]
+        if 'temperature' in data: f = 'temperature'
+        if ('gas', 'temperature') in data: f = ('gas', 'temperature')
+        if 'temperature' in data or ('gas', 'temperature') in data:
+            mask = data[f] > 0
+            if not np.any(mask):
+                raise RuntimeError(
+                    "No zones along light ray with nonzero %s. "
+                    "Please modify your light ray trajectory." % (f,))
+            for key in data.keys():
+                data[key] = data[key][mask]
         save_as_dataset(ds, filename, data, field_types=field_types,
                         extra_attrs=extra_attrs)
 


https://bitbucket.org/yt_analysis/yt/commits/1207066c8059/
Changeset:   1207066c8059
Branch:      stable
User:        Astrodude87
Date:        2016-08-26 23:31:49+00:00
Summary:     Backporting PR #2346 https://bitbucket.org/yt_analysis/yt/pull-requests/2346
Affected #:  7 files

diff -r fe79f0be6bb81a2fa08d0e92ffc928bc1fcd8269 -r 1207066c8059bb4c39b6c53537b22bcfecdb349c .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -61,6 +61,7 @@
 yt/utilities/lib/quad_tree.c
 yt/utilities/lib/ray_integrators.c
 yt/utilities/lib/ragged_arrays.c
+yt/utilities/lib/cosmology_time.c
 yt/utilities/lib/grid_traversal.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h

diff -r fe79f0be6bb81a2fa08d0e92ffc928bc1fcd8269 -r 1207066c8059bb4c39b6c53537b22bcfecdb349c setup.py
--- a/setup.py
+++ b/setup.py
@@ -152,6 +152,8 @@
     Extension("yt.utilities.lib.primitives",
               ["yt/utilities/lib/primitives.pyx"],
               libraries=std_libs),
+    Extension("yt.utilities.lib.cosmology_time",
+              ["yt/utilities/lib/cosmology_time.pyx"]),
     Extension("yt.utilities.lib.origami",
               ["yt/utilities/lib/origami.pyx",
                "yt/utilities/lib/origami_tags.c"],

diff -r fe79f0be6bb81a2fa08d0e92ffc928bc1fcd8269 -r 1207066c8059bb4c39b6c53537b22bcfecdb349c yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -175,6 +175,14 @@
                 function=function, units=unit_system["velocity"], take_log=False,
                 validators=[ValidateSpatial(0)])
 
+    for method, name in zip(("cic", "sum"), ("cic", "nn")):
+        function = _get_density_weighted_deposit_field(
+            "age", "s", method)
+        registry.add_field(
+            ("deposit", ("%s_"+name+"_age") % (ptype)),
+            function=function, units=unit_system["time"], take_log=False,
+            validators=[ValidateSpatial(0)])
+
     # Now some translation functions.
 
     def particle_ones(field, data):

diff -r fe79f0be6bb81a2fa08d0e92ffc928bc1fcd8269 -r 1207066c8059bb4c39b6c53537b22bcfecdb349c yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -43,6 +43,9 @@
     RAMSESOctreeContainer
 from yt.arraytypes import blankRecordArray
 
+from yt.utilities.lib.cosmology_time import \
+    friedman
+
 class RAMSESDomainFile(object):
     _last_mask = None
     _last_selector_id = None
@@ -620,7 +623,6 @@
                 dom, mi, ma = f.readline().split()
                 self.hilbert_indices[int(dom)] = (float(mi), float(ma))
         self.parameters.update(rheader)
-        self.current_time = self.parameters['time'] * self.parameters['unit_t']
         self.domain_left_edge = np.zeros(3, dtype='float64')
         self.domain_dimensions = np.ones(3, dtype='int32') * \
                         2**(self.min_level+1)
@@ -643,6 +645,23 @@
         self.max_level = rheader['levelmax'] - self.min_level - 1
         f.close()
 
+
+        if self.cosmological_simulation == 0:
+            self.current_time = self.parameters['time'] * self.parameters['unit_t']
+        else :
+            self.tau_frw, self.t_frw, self.dtau, self.n_frw, self.time_tot = \
+                friedman( self.omega_matter, self.omega_lambda, 1. - self.omega_matter - self.omega_lambda )
+
+            age = self.parameters['time']
+            iage = 1 + int(10.*age/self.dtau)
+            iage = np.min([iage,self.n_frw/2 + (iage - self.n_frw/2)/10])
+
+            self.time_simu = self.t_frw[iage  ]*(age-self.tau_frw[iage-1])/(self.tau_frw[iage]-self.tau_frw[iage-1])+ \
+                             self.t_frw[iage-1]*(age-self.tau_frw[iage  ])/(self.tau_frw[iage-1]-self.tau_frw[iage])
+ 
+            self.current_time = (self.time_tot + self.time_simu)/(self.hubble_constant*1e7/3.08e24)/self.parameters['unit_t']
+
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
         if not os.path.basename(args[0]).startswith("info_"): return False

diff -r fe79f0be6bb81a2fa08d0e92ffc928bc1fcd8269 -r 1207066c8059bb4c39b6c53537b22bcfecdb349c yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -80,7 +80,7 @@
         ("particle_mass", ("code_mass", [], None)),
         ("particle_identifier", ("", ["particle_index"], None)),
         ("particle_refinement_level", ("", [], None)),
-        ("particle_age", ("code_time", [], None)),
+        ("particle_age", ("code_time", ['age'], None)),
         ("particle_metallicity", ("", [], None)),
     )
 

diff -r fe79f0be6bb81a2fa08d0e92ffc928bc1fcd8269 -r 1207066c8059bb4c39b6c53537b22bcfecdb349c yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -19,7 +19,10 @@
 from yt.utilities.io_handler import \
     BaseIOHandler
 from yt.utilities.logger import ytLogger as mylog
+from yt.utilities.physical_ratios import cm_per_km, cm_per_mpc
 import yt.utilities.fortran_utils as fpu
+from yt.utilities.lib.cosmology_time import \
+    get_ramses_ages
 from yt.extern.six import PY3
 
 if PY3:
@@ -101,4 +104,15 @@
             tr[field] = fpu.read_vector(f, dt)
             if field[1].startswith("particle_position"):
                 np.divide(tr[field], subset.domain.ds["boxlen"], tr[field])
+            cosmo = subset.domain.ds.cosmological_simulation
+            if cosmo == 1 and field[1] == "particle_age":
+                tf = subset.domain.ds.t_frw
+                dtau = subset.domain.ds.dtau
+                tauf = subset.domain.ds.tau_frw
+                tsim = subset.domain.ds.time_simu
+                h100 = subset.domain.ds.hubble_constant
+                nOver2 = subset.domain.ds.n_frw/2
+                t_scale = 1./(h100 * 100 * cm_per_km / cm_per_mpc)/subset.domain.ds['unit_t']
+                ages = tr[field]
+                tr[field] = get_ramses_ages(tf,tauf,dtau,tsim,t_scale,ages,nOver2,len(ages))            
         return tr

diff -r fe79f0be6bb81a2fa08d0e92ffc928bc1fcd8269 -r 1207066c8059bb4c39b6c53537b22bcfecdb349c yt/utilities/lib/cosmology_time.pyx
--- /dev/null
+++ b/yt/utilities/lib/cosmology_time.pyx
@@ -0,0 +1,100 @@
+cimport numpy as np
+import numpy as np
+
+
+cdef double dadtau(double aexp_tau,double O_mat_0,double O_vac_0,double O_k_0):
+    return ( aexp_tau**3 * (O_mat_0 + O_vac_0*aexp_tau**3 + O_k_0*aexp_tau) )**0.5
+
+cdef double dadt(double aexp_t,double O_mat_0,double O_vac_0,double O_k_0):
+    return ( (1./aexp_t)*(O_mat_0 + O_vac_0*aexp_t**3 + O_k_0*aexp_t) )**0.5
+
+
+cdef step_cosmo(double alpha,double tau,double aexp_tau,double t,double aexp_t,double O_mat_0,double O_vac_0,double O_k_0):
+    cdef double dtau,aexp_tau_pre,dt,aexp_t_pre
+
+    dtau = alpha * aexp_tau / dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)
+    aexp_tau_pre = aexp_tau - dadtau(aexp_tau,O_mat_0,O_vac_0,O_k_0)*dtau/2.0
+    aexp_tau = aexp_tau - dadtau(aexp_tau_pre,O_mat_0,O_vac_0,O_k_0)*dtau
+    tau = tau - dtau
+
+    dt = alpha * aexp_t / dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)
+    aexp_t_pre = aexp_t - dadt(aexp_t,O_mat_0,O_vac_0,O_k_0)*dt/2.0
+    aexp_t = aexp_t - dadt(aexp_t_pre,O_mat_0,O_vac_0,O_k_0)*dt
+    t = t - dt
+
+    return tau,aexp_tau,t,aexp_t
+
+
+cpdef friedman(double O_mat_0,double O_vac_0,double O_k_0):
+    cdef double alpha=1.e-5,aexp_min=1.e-3,aexp_tau=1.,aexp_t=1.,tau=0.,t=0.
+    cdef int nstep=0,ntable=1000,n_out
+    cdef np.ndarray[double,mode='c'] t_out=np.zeros([ntable+1]),tau_out=np.zeros([ntable+1])
+    cdef double age_tot,delta_tau,next_tau
+
+    while aexp_tau >= aexp_min or aexp_t >= aexp_min:
+       nstep = nstep + 1
+       tau,aexp_tau,t,aexp_t = step_cosmo(alpha,tau,aexp_tau,t,aexp_t,O_mat_0,O_vac_0,O_k_0)
+
+    age_tot=-t
+    if nstep < ntable :
+        ntable = nstep
+        alpha = alpha / 2.
+
+    delta_tau = 20.*tau/ntable/11.
+
+    aexp_tau = 1.
+    aexp_t = 1.
+    tau = 0.
+    t = 0.
+
+    n_out = 0
+    t_out[n_out] = t
+    tau_out[n_out] = tau
+
+    next_tau = tau + delta_tau/10.
+
+    while n_out < ntable/2 :
+        tau,aexp_tau,t,aexp_t = step_cosmo(alpha,tau,aexp_tau,t,aexp_t,O_mat_0,O_vac_0,O_k_0)
+
+        if tau < next_tau:
+            n_out = n_out + 1
+            t_out[n_out] = t
+            tau_out[n_out] = tau
+            next_tau = next_tau + delta_tau/10.
+
+    while aexp_tau >= aexp_min or aexp_t >= aexp_min:
+        tau,aexp_tau,t,aexp_t = step_cosmo(alpha,tau,aexp_tau,t,aexp_t,O_mat_0,O_vac_0,O_k_0)
+
+        if tau < next_tau:
+            n_out = n_out + 1
+            t_out[n_out] = t
+            tau_out[n_out] = tau
+            next_tau = next_tau + delta_tau
+
+    n_out = ntable
+    t_out[n_out] = t
+    tau_out[n_out] = tau
+
+    return tau_out,t_out,delta_tau,ntable,age_tot
+
+cpdef get_ramses_ages(np.ndarray[double,mode='c'] tf, 
+                     np.ndarray[double,mode='c'] tauf,  
+                     double dtau, 
+                     double tsim, 
+                     double t_scale, 
+                     np.ndarray[double,mode='c'] ages, 
+                     int nOver2, 
+                     int ntot):
+
+    cdef np.ndarray[double,mode='c'] t
+    cdef np.ndarray[double,mode='c'] dage
+    cdef np.ndarray[int,mode='c'] iage
+
+    dage = 1 + (10*ages/dtau)
+    dage = np.minimum(dage, nOver2 + (dage - nOver2)/10.)
+    iage = np.array(dage,dtype=np.int32)
+
+    t = (tf[iage]*(ages - tauf[iage - 1]) / (tauf[iage] - tauf[iage - 1]))
+    t = t + (tf[iage-1]*(ages-tauf[iage]) / (tauf[iage-1]-tauf[iage]))
+    return  (tsim - t)*t_scale
+ 


https://bitbucket.org/yt_analysis/yt/commits/fac0475d3110/
Changeset:   fac0475d3110
Branch:      stable
User:        ngoldbaum
Date:        2016-08-24 16:27:27+00:00
Summary:     Backporting PR #2347 https://bitbucket.org/yt_analysis/yt/pull-requests/2347
Affected #:  3 files

diff -r 1207066c8059bb4c39b6c53537b22bcfecdb349c -r fac0475d311000b814cc0674184b287884d8291c yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -28,7 +28,8 @@
     assert_array_equal, \
     assert_equal, assert_raises, \
     assert_array_almost_equal_nulp, \
-    assert_array_almost_equal
+    assert_array_almost_equal, \
+    assert_almost_equal
 from numpy import array
 from yt.units.yt_array import \
     YTArray, YTQuantity, \
@@ -1221,3 +1222,16 @@
     arr = [1, 2, 3]*km
     assert_equal(sum(arr), 6*km)
 
+def test_initialization_different_registries():
+    from yt.testing import fake_random_ds
+
+    ds1 = fake_random_ds(32, length_unit=1)
+    ds2 = fake_random_ds(32, length_unit=3)
+
+    l1 = ds1.quan(0.3, 'unitary')
+    l2 = ds2.quan(l1, 'unitary')
+
+    assert_almost_equal(float(l1.in_cgs()), 0.3)
+    assert_almost_equal(float(l2.in_cgs()), 0.9)
+    assert_almost_equal(float(ds1.quan(0.3, 'unitary').in_cgs()), 0.3)
+    assert_almost_equal(float(ds2.quan(0.3, 'unitary').in_cgs()), 0.9)

diff -r 1207066c8059bb4c39b6c53537b22bcfecdb349c -r fac0475d311000b814cc0674184b287884d8291c yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -389,17 +389,18 @@
                     "ds.arr(%s, \"%s\")" % (input_array, input_units)
                     )
         if isinstance(input_array, YTArray):
+            ret = input_array.view(cls)
             if input_units is None:
                 if registry is None:
                     pass
                 else:
                     units = Unit(str(input_array.units), registry=registry)
-                    input_array.units = units
+                    ret.units = units
             elif isinstance(input_units, Unit):
-                input_array.units = input_units
+                ret.units = input_units
             else:
-                input_array.units = Unit(input_units, registry=registry)
-            return input_array.view(cls)
+                ret.units = Unit(input_units, registry=registry)
+            return ret
         elif isinstance(input_array, np.ndarray):
             pass
         elif iterable(input_array) and input_array:

diff -r 1207066c8059bb4c39b6c53537b22bcfecdb349c -r fac0475d311000b814cc0674184b287884d8291c yt/visualization/volume_rendering/off_axis_projection.py
--- a/yt/visualization/volume_rendering/off_axis_projection.py
+++ b/yt/visualization/volume_rendering/off_axis_projection.py
@@ -156,21 +156,26 @@
     camera.resolution = resolution
     if not iterable(width):
         width = data_source.ds.arr([width]*3)
-    camera.position = center - width[2]*normal_vector
+    normal = np.array(normal_vector)
+    normal = normal / np.linalg.norm(normal)
+
+    camera.position = center - width[2]*normal
     camera.focus = center
-    
+
     # If north_vector is None, we set the default here.
-    # This is chosen so that if normal_vector is one of the 
+    # This is chosen so that if normal_vector is one of the
     # cartesian coordinate axes, the projection will match
     # the corresponding on-axis projection.
     if north_vector is None:
         vecs = np.identity(3)
-        t = np.cross(vecs, normal_vector).sum(axis=1)
+        t = np.cross(vecs, normal).sum(axis=1)
         ax = t.argmax()
-        east_vector = np.cross(vecs[ax, :], normal_vector).ravel()
-        north_vector = np.cross(normal_vector, east_vector).ravel()
-    camera.switch_orientation(normal_vector,
-                              north_vector)
+        east_vector = np.cross(vecs[ax, :], normal).ravel()
+        north = np.cross(normal, east_vector).ravel()
+    else:
+        north = np.array(north_vector)
+        north = north / np.linalg.norm(north)
+    camera.switch_orientation(normal, north)
 
     sc.add_source(vol)
 


https://bitbucket.org/yt_analysis/yt/commits/73aed87f5ed5/
Changeset:   73aed87f5ed5
Branch:      stable
User:        ngoldbaum
Date:        2016-08-28 14:51:28+00:00
Summary:     Ensure locally defined fields with physical units can still generate labels
Affected #:  1 file

diff -r fac0475d311000b814cc0674184b287884d8291c -r 73aed87f5ed510deefb44d65ad8a62a3810cf007 yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -144,12 +144,18 @@
         return dd
 
     def get_units(self):
-        u = Unit(self.units, registry=self.ds.unit_registry)
+        if self.ds is not None:
+            u = Unit(self.units, registry=self.ds.unit_registry)
+        else:
+            u = Unit(self.units)
         return u.latex_representation()
 
     def get_projected_units(self):
-        u = Unit(self.units, registry=self.ds.unit_registry)*Unit('cm')
-        return u.latex_representation()
+        if self.ds is not None:
+            u = Unit(self.units, registry=self.ds.unit_registry)
+        else:
+            u = Unit(self.units)
+        return (u*Unit('cm')).latex_representation()
 
     def check_available(self, data):
         """
@@ -222,7 +228,10 @@
         if projected:
             raise NotImplementedError
         else:
-            units = Unit(self.units, registry=self.ds.unit_registry)
+            if self.ds is not None:
+                units = Unit(self.units, registry=self.ds.unit_registry)
+            else:
+                units = Unit(self.units)
         # Add unit label
         if not units.is_dimensionless:
             data_label += r"\ \ (%s)" % (units.latex_representation())


https://bitbucket.org/yt_analysis/yt/commits/e813322653ac/
Changeset:   e813322653ac
Branch:      stable
User:        jzuhone
Date:        2016-08-26 19:25:17+00:00
Summary:     If the particle file and the plot file are not at the same time, log a warning but don't error out. Ignore the particle file.
Affected #:  1 file

diff -r 73aed87f5ed510deefb44d65ad8a62a3810cf007 -r e813322653ac1200551317e013de6bea75f1d38a yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -214,7 +214,9 @@
             part_time = self._particle_handle.handle.get('real scalars')[0][1]
             plot_time = self._handle.handle.get('real scalars')[0][1]
             if not np.isclose(part_time, plot_time):
-                raise IOError('%s and  %s are not at the same time.' % (self.particle_filename, filename))
+                self._particle_handle = self._handle
+                mylog.warning('%s and %s are not at the same time. ' % (self.particle_filename, filename) +
+                              'This particle file will not be used.')
 
         # These should be explicitly obtained from the file, but for now that
         # will wait until a reorganization of the source tree and better


https://bitbucket.org/yt_analysis/yt/commits/db0a7feeb7ab/
Changeset:   db0a7feeb7ab
Branch:      stable
User:        ngoldbaum
Date:        2016-08-29 22:28:59+00:00
Summary:     Make smoothed_covering_grid work correctly with reduced dimensionality data. Closes #1266
Affected #:  2 files

diff -r e813322653ac1200551317e013de6bea75f1d38a -r db0a7feeb7ab90cdb0fbef4669395fb54d73bf96 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -940,8 +940,12 @@
             if level < min_level:
                 self._update_level_state(ls)
                 continue
-            domain_dims = self.ds.domain_dimensions.astype("int64") \
-                        * self.ds.relative_refinement(0, ls.current_level)
+            nd = self.ds.dimensionality
+            refinement = np.zeros_like(ls.base_dx)
+            refinement += self.ds.relative_refinement(0, ls.current_level)
+            refinement[nd:] = 1
+            domain_dims = self.ds.domain_dimensions * refinement
+            domain_dims = domain_dims.astype("int64")
             tot = ls.current_dims.prod()
             for chunk in ls.data_source.chunks(fields, "io"):
                 chunk[fields[0]]
@@ -955,7 +959,8 @@
                 raise RuntimeError
             self._update_level_state(ls)
         for name, v in zip(fields, ls.fields):
-            if self.level > 0: v = v[1:-1,1:-1,1:-1]
+            if self.level > 0:
+                v = v[1:-1, 1:-1, 1:-1]
             fi = self.ds._get_field_info(*name)
             self[name] = self.ds.arr(v, fi.units)
 
@@ -1009,8 +1014,11 @@
         rf = float(self.ds.relative_refinement(
                     ls.current_level, ls.current_level + 1))
         ls.current_level += 1
-        ls.current_dx = ls.base_dx / \
-            self.ds.relative_refinement(0, ls.current_level)
+        nd = self.ds.dimensionality
+        refinement = np.zeros_like(ls.base_dx)
+        refinement += self.ds.relative_refinement(0, ls.current_level)
+        refinement[nd:] = 1
+        ls.current_dx = ls.base_dx / refinement
         ls.old_global_startindex = ls.global_startindex
         ls.global_startindex, end_index, ls.current_dims = \
             self._minimal_box(ls.current_dx)

diff -r e813322653ac1200551317e013de6bea75f1d38a -r db0a7feeb7ab90cdb0fbef4669395fb54d73bf96 yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -131,3 +131,11 @@
     cgrid = ds.covering_grid(0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions)
     density_field = cgrid["density"]
     assert_equal((density_field == 0.0).sum(), 0)
+
+ekh = 'EnzoKelvinHelmholtz/DD0011/DD0011'
+ at requires_file(ekh)
+def test_smoothed_covering_grid_2d_dataset():
+    ds = load(ekh)
+    ds.periodicity = (True, True, True)
+    scg = ds.smoothed_covering_grid(1, [0, 0, 0], [128, 128, 1])
+    assert_equal(scg['density'].shape, [128, 128, 1])


https://bitbucket.org/yt_analysis/yt/commits/6a016accb9fe/
Changeset:   6a016accb9fe
Branch:      stable
User:        brittonsmith
Date:        2016-09-01 10:00:54+00:00
Summary:     Backporting PR #2357 https://bitbucket.org/yt_analysis/yt/pull-requests/2357
Affected #:  3 files

diff -r db0a7feeb7ab90cdb0fbef4669395fb54d73bf96 -r 6a016accb9feff43407162ae57e42dda97908161 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -372,8 +372,9 @@
         for ax in "xyz"[:ds.dimensionality]:
             setattr(self, ax, ds.data[ax])
             setattr(self, "%s_bins" % ax, ds.data["%s_bins" % ax])
-            setattr(self, "%s_field" % ax,
-                    tuple(ds.parameters["%s_field" % ax]))
+            field_name = tuple(ds.parameters["%s_field" % ax])
+            setattr(self, "%s_field" % ax, field_name)
+            self.field_info[field_name] = ds.field_info[field_name]
             setattr(self, "%s_log" % ax, ds.parameters["%s_log" % ax])
             exclude_fields.extend([ax, "%s_bins" % ax,
                                    ds.parameters["%s_field" % ax][1]])
@@ -384,6 +385,7 @@
         for field in profile_fields:
             self.field_map[field[1]] = field
             self.field_data[field] = ds.data[field]
+            self.field_info[field] = ds.field_info[field]
             self.field_units[field] = ds.data[field].units
 
 class Profile1D(ProfileND):

diff -r db0a7feeb7ab90cdb0fbef4669395fb54d73bf96 -r 6a016accb9feff43407162ae57e42dda97908161 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -78,14 +78,12 @@
         self.refine_by = 2
         with h5py.File(self.parameter_filename, "r") as f:
             for key in f.attrs.keys():
-                v = f.attrs[key]
-                if isinstance(v, bytes):
-                    v = v.decode("utf8")
+                v = parse_h5_attr(f, key)
                 if key == "con_args":
                     v = v.astype("str")
                 self.parameters[key] = v
             self.num_particles = \
-              dict([(group, f[group].attrs["num_elements"])
+              dict([(group, parse_h5_attr(f[group], "num_elements"))
                     for group in f if group != self.default_fluid_type])
         for attr in ["cosmological_simulation", "current_time", "current_redshift",
                      "hubble_constant", "omega_matter", "omega_lambda",
@@ -146,7 +144,7 @@
 class YTDataHDF5File(ParticleFile):
     def __init__(self, ds, io, filename, file_id):
         with h5py.File(filename, "r") as f:
-            self.header = dict((field, f.attrs[field]) \
+            self.header = dict((field, parse_h5_attr(f, field)) \
                                for field in f.attrs.keys())
 
         super(YTDataHDF5File, self).__init__(ds, io, filename, file_id)
@@ -329,7 +327,7 @@
                     field_name = (str(group), str(field))
                     self.field_list.append(field_name)
                     self.ds.field_units[field_name] = \
-                      f[group][field].attrs["units"]
+                      parse_h5_attr(f[group][field], "units")
 
 class YTGridHierarchy(YTDataHierarchy):
     grid = YTGrid
@@ -613,7 +611,7 @@
             self.parameters["weight_field"] = None
         elif isinstance(self.parameters["weight_field"], np.ndarray):
             self.parameters["weight_field"] = \
-              tuple(self.parameters["weight_field"])
+              tuple(self.parameters["weight_field"].astype(str))
 
         for a in ["profile_dimensions"] + \
           ["%s_%s" % (ax, attr)
@@ -647,13 +645,20 @@
                 self.parameters[bin_field] = None
             elif isinstance(self.parameters[bin_field], np.ndarray):
                 self.parameters[bin_field] = \
-                  tuple(self.parameters[bin_field])
+                  tuple(self.parameters[bin_field].astype(str))
             setattr(self, bin_field, self.parameters[bin_field])
 
+    def _setup_gas_alias(self):
+        "Alias the grid type to gas with a field alias."
+        for ftype, field in self.field_list:
+            if ftype == "data":
+                self.field_info.alias(("gas", field), (ftype, field))
+
     def create_field_info(self):
         super(YTProfileDataset, self).create_field_info()
-        self.field_info.alias(self.parameters["weight_field"],
-                              (self.default_fluid_type, "weight"))
+        if self.parameters["weight_field"] is not None:
+            self.field_info.alias(self.parameters["weight_field"],
+                                  (self.default_fluid_type, "weight"))
 
     def _set_derived_attrs(self):
         self.domain_center = 0.5 * (self.domain_right_edge +

diff -r db0a7feeb7ab90cdb0fbef4669395fb54d73bf96 -r 6a016accb9feff43407162ae57e42dda97908161 yt/frontends/ytdata/tests/test_outputs.py
--- a/yt/frontends/ytdata/tests/test_outputs.py
+++ b/yt/frontends/ytdata/tests/test_outputs.py
@@ -15,6 +15,8 @@
 
 from yt.convenience import \
     load
+from yt.data_objects.api import \
+    create_profile
 from yt.frontends.ytdata.api import \
     YTDataContainerDataset, \
     YTSpatialPlotDataset, \
@@ -32,8 +34,9 @@
 from yt.units.yt_array import \
     YTArray, \
     YTQuantity
-from yt.data_objects.api import \
-    create_profile
+from yt.visualization.profile_plotter import \
+    ProfilePlot, \
+    PhasePlot
 import numpy as np
 import tempfile
 import os
@@ -143,6 +146,10 @@
     prof_1d_ds = load(full_fn)
     assert isinstance(prof_1d_ds, YTProfileDataset)
 
+    p1 = ProfilePlot(prof_1d_ds.data, "density", "temperature",
+                     weight_field="cell_mass")
+    p1.save()
+
     yield YTDataFieldTest(full_fn, "temperature", geometric=False)
     yield YTDataFieldTest(full_fn, "x", geometric=False)
     yield YTDataFieldTest(full_fn, "density", geometric=False)
@@ -153,6 +160,11 @@
     full_fn = os.path.join(tmpdir, fn)
     prof_2d_ds = load(full_fn)
     assert isinstance(prof_2d_ds, YTProfileDataset)
+
+    p2 = PhasePlot(prof_2d_ds.data, "density", "temperature",
+                   "cell_mass", weight_field=None)
+    p2.save()
+
     yield YTDataFieldTest(full_fn, "density", geometric=False)
     yield YTDataFieldTest(full_fn, "x", geometric=False)
     yield YTDataFieldTest(full_fn, "temperature", geometric=False)


https://bitbucket.org/yt_analysis/yt/commits/8b94ecf10978/
Changeset:   8b94ecf10978
Branch:      stable
User:        al007
Date:        2016-08-31 22:58:37+00:00
Summary:     Backporting PR #2358 https://bitbucket.org/yt_analysis/yt/pull-requests/2358
Affected #:  1 file

diff -r 6a016accb9feff43407162ae57e42dda97908161 -r 8b94ecf109788c1767cd2609d4d8e7d42b947fb1 doc/source/quickstart/index.rst
--- a/doc/source/quickstart/index.rst
+++ b/doc/source/quickstart/index.rst
@@ -12,15 +12,27 @@
 on time, you can non-interactively go through the linked pages below and view the
 worked examples.
 
-To execute the quickstart interactively, you need to download the repository and
-start the IPython notebook.  If you do not already have the yt repository, the
-easiest way to get the repository is to clone it using mercurial:
+To execute the quickstart interactively, you have a couple of options: 1) run
+the notebook from your own system or 2) run it from the url
+https://demo.use.yt. Option 1 requires an existing installation of yt (see
+:ref:`getting-and-installing-yt`), a copy of the yt source (which you may
+already have depending on your installation choice), and a download of the
+tutorial data-sets (total about 3 GB). If you know you are going to be a yt user
+and have the time to download the data-sets, option 1 is a good choice. However,
+if you're only interested in getting a feel for yt and its capabilities, or you
+already have yt but don't want to spend time downloading the data, go ahead to
+https://demo.use.yt.
+
+If you're running the tutorial from your own system and you do not already have
+the yt repository, the easiest way to get the repository is to clone it using
+mercurial:
 
 .. code-block:: bash
 
    hg clone https://bitbucket.org/yt_analysis/yt
 
-Now start the IPython notebook from within the repository:
+Now start the IPython notebook from within the repository (we presume you have
+yt installed):
 
 .. code-block:: bash
 


https://bitbucket.org/yt_analysis/yt/commits/9ba2e683e6c0/
Changeset:   9ba2e683e6c0
Branch:      stable
User:        hyschive
Date:        2016-09-06 20:03:20+00:00
Summary:     Bugfix: _skeleton/fields.py
Affected #:  1 file

diff -r 8b94ecf109788c1767cd2609d4d8e7d42b947fb1 -r 9ba2e683e6c002b42c778894da2c1d1f5255d156 yt/frontends/_skeleton/fields.py
--- a/yt/frontends/_skeleton/fields.py
+++ b/yt/frontends/_skeleton/fields.py
@@ -42,5 +42,5 @@
         pass
 
     def setup_particle_fields(self, ptype):
+        super(SkeletonFieldInfo, self).setup_particle_fields(ptype)
         # This will get called for every particle type.
-        pass


https://bitbucket.org/yt_analysis/yt/commits/3da56963e330/
Changeset:   3da56963e330
Branch:      stable
User:        cphyc
Date:        2016-09-12 20:49:24+00:00
Summary:     Backporting PR #2358 https://bitbucket.org/yt_analysis/yt/pull-requests/2358
Affected #:  2 files

diff -r 9ba2e683e6c002b42c778894da2c1d1f5255d156 -r 3da56963e330099e1ebcfc97adb8a8c032779461 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -647,7 +647,7 @@
 
 
         if self.cosmological_simulation == 0:
-            self.current_time = self.parameters['time'] * self.parameters['unit_t']
+            self.current_time = self.parameters['time']
         else :
             self.tau_frw, self.t_frw, self.dtau, self.n_frw, self.time_tot = \
                 friedman( self.omega_matter, self.omega_lambda, 1. - self.omega_matter - self.omega_lambda )

diff -r 9ba2e683e6c002b42c778894da2c1d1f5255d156 -r 3da56963e330099e1ebcfc97adb8a8c032779461 yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -25,6 +25,8 @@
     FieldValuesTest, \
     create_obj
 from yt.frontends.ramses.api import RAMSESDataset
+import os
+import yt
 
 _fields = ("temperature", "density", "velocity_magnitude",
            ("deposit", "all_density"), ("deposit", "all_count"))
@@ -57,3 +59,15 @@
 def test_units_override():
     for test in units_override_check(output_00080):
         yield test
+
+
+ramsesNonCosmo = 'DICEGalaxyDisk_nonCosmological/output_00002'
+ at requires_file(ramsesNonCosmo)
+def test_unit_non_cosmo():
+    ds = yt.load(os.path.join(ramsesNonCosmo, 'info_00002.txt'))
+
+    expected_raw_time = 0.0299468077820411 # in ramses unit
+    yield assert_equal, ds.current_time.value, expected_raw_time
+
+    expected_time = 14087886140997.336 # in seconds
+    assert_equal(ds.current_time.in_units('s').value, expected_time)


https://bitbucket.org/yt_analysis/yt/commits/784bb3ccc436/
Changeset:   784bb3ccc436
Branch:      stable
User:        ngoldbaum
Date:        2016-09-13 20:17:20+00:00
Summary:     Backporting PR 2345 https://bitbucket.org/yt_analysis/yt/pull-requests/2345
Affected #:  12 files

diff -r 3da56963e330099e1ebcfc97adb8a8c032779461 -r 784bb3ccc436b610c202f9898c349f2122bac5af doc/source/analyzing/analysis_modules/cosmology_calculator.rst
--- a/doc/source/analyzing/analysis_modules/cosmology_calculator.rst
+++ b/doc/source/analyzing/analysis_modules/cosmology_calculator.rst
@@ -31,13 +31,13 @@
    print("hubble distance", co.hubble_distance())
 
    # distance from z = 0 to 0.5
-   print("comoving radial distance", co.comoving_radial_distance(0, 0.5).in_units("Mpc/h"))
+   print("comoving radial distance", co.comoving_radial_distance(0, 0.5).in_units("Mpccm/h"))
 
    # transverse distance
-   print("transverse distance", co.comoving_transverse_distance(0, 0.5).in_units("Mpc/h"))
+   print("transverse distance", co.comoving_transverse_distance(0, 0.5).in_units("Mpccm/h"))
 
    # comoving volume
-   print("comoving volume", co.comoving_volume(0, 0.5).in_units("Gpc**3"))
+   print("comoving volume", co.comoving_volume(0, 0.5).in_units("Gpccm**3"))
 
    # angulare diameter distance
    print("angular diameter distance", co.angular_diameter_distance(0, 0.5).in_units("Mpc/h"))
@@ -67,7 +67,16 @@
    # convert redshift to time after Big Bang (same as Hubble time)
    print("t from z", co.t_from_z(0.5).in_units("Gyr"))
 
-Note, that all distances returned are comoving distances.  All of the above
+.. warning::
+
+   Cosmological distance calculations return values that are either
+   in the comoving or proper frame, depending on the specific quantity.  For
+   simplicity, the proper and comoving frames are set equal to each other
+   within the cosmology calculator.  This means that for some distance value,
+   x, x.to("Mpc") and x.to("Mpccm") will be the same.  The user should take
+   care to understand which reference frame is correct for the given calculation.
+
+All of the above
 functions accept scalar values and arrays.  The helper functions, `co.quan`
 and `co.arr` exist to create unitful `YTQuantities` and `YTArray` with the
 unit registry of the cosmology calculator.  For more information on the usage

diff -r 3da56963e330099e1ebcfc97adb8a8c032779461 -r 784bb3ccc436b610c202f9898c349f2122bac5af doc/source/analyzing/analysis_modules/light_ray_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
@@ -49,13 +49,18 @@
 * ``deltaz_min`` (*float*):  Specifies the minimum Delta-z between
   consecutive datasets in the returned list.  Default: 0.0.
 
-* ``minimum_coherent_box_fraction`` (*float*): Used with
-  ``use_minimum_datasets`` set to False, this parameter specifies the
-  fraction of the total box size to be traversed before rerandomizing the
-  projection axis and center.  This was invented to allow light rays with
-  thin slices to sample coherent large scale structure, but in practice
-  does not work so well.  Try setting this parameter to 1 and see what
-  happens.  Default: 0.0.
+* ``max_box_fraction`` (*float*):  In terms of the size of the domain, the
+  maximum length a light ray segment can be in order to span the redshift interval
+  from one dataset to another.  If using a zoom-in simulation, this parameter can
+  be set to the length of the high resolution region so as to limit ray segments
+  to that size.  If the high resolution region is not cubical, the smallest side
+  should be used.  Default: 1.0 (the size of the box)
+
+* ``minimum_coherent_box_fraction`` (*float*): Use to specify the minimum
+  length of a ray, in terms of the size of the domain, before the trajectory
+  is re-randomized.  Set to 0 to have ray trajectory randomized for every
+  dataset.  Set to np.inf (infinity) to use a single trajectory for the
+  entire ray.  Default: 0.0.
 
 * ``time_data`` (*bool*): Whether or not to include time outputs when
   gathering datasets for time series.  Default: True.
@@ -67,7 +72,7 @@
 ---------------------
 
 Once the LightRay object has been instantiated, the
-:func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay,make_light_ray`
+:func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`
 function will trace out the rays in each dataset and collect information for all the
 fields requested.  The output file will be an HDF5 file containing all the
 cell field values for all the cells that were intersected by the ray.  A
@@ -85,6 +90,21 @@
 
 * ``seed`` (*int*): Seed for the random number generator.  Default: None.
 
+* ``periodic`` (*bool*): If True, ray trajectories will make use of periodic
+  boundaries.  If False, ray trajectories will not be periodic.  Default : True.
+
+* ``left_edge`` (iterable of *floats* or *YTArray*): The left corner of the
+  region in which rays are to be generated.  If None, the left edge will be
+  that of the domain.  Default: None.
+
+* ``right_edge`` (iterable of *floats* or *YTArray*): The right corner of
+  the region in which rays are to be generated.  If None, the right edge
+  will be that of the domain.  Default: None.
+
+* ``min_level`` (*int*): The minimum refinement level of the spatial region in
+  which the ray passes.  This can be used with zoom-in simulations where the
+  high resolution region does not keep a constant geometry.  Default: None.
+
 * ``start_position`` (*list* of floats): Used only if creating a light ray
   from a single dataset.  The coordinates of the starting position of the
   ray.  Default: None.
@@ -122,7 +142,82 @@
   slice and 1 to have all processors work together on each projection.
   Default: 1
 
-.. note:: As of :code:`yt-3.0`, the functionality for recording properties of the nearest halo to each element of the ray no longer exists.  This is still available in :code:`yt-2.x`.  If you would like to use this feature in :code:`yt-3.x`, help is needed to port it over.  Contact the yt-users mailing list if you are interested in doing this.
+Useful Tips for Making LightRays
+--------------------------------
+
+Below are some tips that may come in handy for creating proper LightRays.
+
+How many snapshots do I need?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The number of snapshots required to traverse some redshift interval depends
+on the simulation box size and cosmological parameters.  Before running an
+expensive simulation only to find out that you don't have enough outputs
+to span the redshift interval you want, have a look at
+:ref:`planning-cosmology-simulations`.  The functionality described there
+will allow you to calculate the precise number of snapshots and specific
+redshifts at which they should be written.
+
+My snapshots are too far apart!
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``max_box_fraction`` keyword, provided when creating the `Lightray`,
+allows the user to control how long a ray segment can be for an
+individual dataset.  Be default, the `LightRay` generator will try to
+make segments no longer than the size of the box to avoid sampling the
+same structures more than once.  However, this can be increased in the
+case that the redshift interval between datasets is longer than the
+box size.  Increasing this value should be done with caution as longer
+ray segments run a greater risk of coming back to somewhere near their
+original position.
+
+What if I have a zoom-in simulation?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A zoom-in simulation has a high resolution region embedded within a
+larger, low resolution volume.  In this type of simulation, it is likely
+that you will want the ray segments to stay within the high resolution
+region.  To do this, you must first specify the size of the high
+resolution region when creating the `LightRay` using the
+``max_box_fraction`` keyword.  This will make sure that
+the calculation of the spacing of the segment datasets only takes into
+account the high resolution region and not the full box size.  If your
+high resolution region is not a perfect cube, specify the smallest side.
+Then, in the call to
+:func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`,
+use the ``left_edge`` and ``right_edge`` keyword arguments to specify the
+precise location of the high resolution region.
+
+Technically speaking, the ray segments should no longer be periodic
+since the high resolution region is only a sub-volume within the
+larger domain.  To make the ray segments non-periodic, set the
+``periodic`` keyword to False.  The LightRay generator will continue
+to generate randomly oriented segments until it finds one that fits
+entirely within the high resolution region.  If you have a high
+resolution region that can move and change shape slightly as structure
+forms, use the `min_level` keyword to mandate that the ray segment only
+pass through cells that are refined to at least some minimum level.
+
+If the size of the high resolution region is not large enough to
+span the required redshift interval, the `LightRay` generator can
+be configured to treat the high resolution region as if it were
+periodic simply by setting the ``periodic`` keyword to True.  This
+option should be used with caution as it will lead to the creation
+of disconnected ray segments within a single dataset.
+
+I want a continous trajectory over the entire ray.
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Set the ``minimum_coherent_box_fraction`` keyword argument to a very
+large number, like infinity (`numpy.inf`).
+
+.. note::
+
+   As of :code:`yt-3.0`, the functionality for recording properties of
+   the nearest halo to each element of the ray no longer exists.  This
+   is still available in :code:`yt-2.x`.  If you would like to use this
+   feature in :code:`yt-3.x`, help is needed to port it over.  Contact
+   the yt-users mailing list if you are interested in doing this.
 
 What Can I do with this?
 ------------------------

diff -r 3da56963e330099e1ebcfc97adb8a8c032779461 -r 784bb3ccc436b610c202f9898c349f2122bac5af doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
--- a/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
+++ b/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
@@ -4,7 +4,7 @@
 ===================================================
 
 If you want to run a cosmological simulation that will have just enough data
-outputs to create a cosmology splice, the
+outputs to create a light cone or light ray, the
 :meth:`~yt.analysis_modules.cosmological_observation.cosmology_splice.CosmologySplice.plan_cosmology_splice`
 function will calculate a list of redshifts outputs that will minimally
 connect a redshift interval.

diff -r 3da56963e330099e1ebcfc97adb8a8c032779461 -r 784bb3ccc436b610c202f9898c349f2122bac5af tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -67,7 +67,7 @@
   local_ytdata_000:
     - yt/frontends/ytdata
 
-  local_absorption_spectrum_004:
+  local_absorption_spectrum_005:
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_novpec
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo

diff -r 3da56963e330099e1ebcfc97adb8a8c032779461 -r 784bb3ccc436b610c202f9898c349f2122bac5af yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -21,6 +21,8 @@
 from yt.funcs import mylog
 from yt.utilities.cosmology import \
     Cosmology
+from yt.utilities.physical_constants import \
+    c
 
 class CosmologySplice(object):
     """
@@ -67,7 +69,11 @@
         max_box_fraction : float
             In terms of the size of the domain, the maximum length a light
             ray segment can be in order to span the redshift interval from
-            one dataset to another.
+            one dataset to another.  If using a zoom-in simulation, this
+            parameter can be set to the length of the high resolution
+            region so as to limit ray segments to that size.  If the
+            high resolution region is not cubical, the smallest side
+            should be used.
             Default: 1.0 (the size of the box)
         deltaz_min : float
             Specifies the minimum delta z between consecutive datasets
@@ -115,6 +121,7 @@
                 output['next'] = self.splice_outputs[i + 1]
 
         # Calculate maximum delta z for each data dump.
+        self.max_box_fraction = max_box_fraction
         self._calculate_deltaz_max()
 
         # Calculate minimum delta z for each data dump.
@@ -144,7 +151,7 @@
             self.splice_outputs.sort(key=lambda obj:np.fabs(z - obj['redshift']))
             cosmology_splice.append(self.splice_outputs[0])
             z = cosmology_splice[-1]["redshift"]
-            z_target = z - max_box_fraction * cosmology_splice[-1]["dz_max"]
+            z_target = z - cosmology_splice[-1]["dz_max"]
 
             # fill redshift space with datasets
             while ((z_target > near_redshift) and
@@ -172,7 +179,7 @@
 
                 cosmology_splice.append(current_slice)
                 z = current_slice["redshift"]
-                z_target = z - max_box_fraction * current_slice["dz_max"]
+                z_target = z - current_slice["dz_max"]
 
         # Make light ray using maximum number of datasets (minimum spacing).
         else:
@@ -199,8 +206,8 @@
         mylog.info("create_cosmology_splice: Used %d data dumps to get from z = %f to %f." %
                    (len(cosmology_splice), far_redshift, near_redshift))
         
-        # change the 'next' and 'previous' pointers to point to the correct outputs for the created
-        # splice
+        # change the 'next' and 'previous' pointers to point to the correct outputs
+        # for the created splice
         for i, output in enumerate(cosmology_splice):
             if len(cosmology_splice) == 1:
                 output['previous'] = None
@@ -264,7 +271,8 @@
                 rounded += np.power(10.0, (-1.0*decimals))
             z = rounded
 
-            deltaz_max = self._deltaz_forward(z, self.simulation.box_size)
+            deltaz_max = self._deltaz_forward(z, self.simulation.box_size *
+                                              self.max_box_fraction)
             outputs.append({'redshift': z, 'dz_max': deltaz_max})
             z -= deltaz_max
 
@@ -282,72 +290,23 @@
         from z to (z - delta z).
         """
 
-        d_Tolerance = 1e-4
-        max_Iterations = 100
+        target_distance = self.simulation.box_size * \
+          self.max_box_fraction
+        for output in self.splice_outputs:
+            output['dz_max'] = self._deltaz_forward(output['redshift'],
+                                                    target_distance)
 
-        target_distance = self.simulation.box_size
-
-        for output in self.splice_outputs:
-            z = output['redshift']
-
-            # Calculate delta z that corresponds to the length of the box
-            # at a given redshift using Newton's method.
-            z1 = z
-            z2 = z1 - 0.1 # just an initial guess
-            distance1 = self.simulation.quan(0.0, "Mpccm / h")
-            distance2 = self.cosmology.comoving_radial_distance(z2, z)
-            iteration = 1
-
-            while ((np.abs(distance2-target_distance)/distance2) > d_Tolerance):
-                m = (distance2 - distance1) / (z2 - z1)
-                z1 = z2
-                distance1 = distance2
-                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z)
-                iteration += 1
-                if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
-                                "exceeded for z = %f (delta z = %f)." %
-                                (z, np.abs(z2 - z)))
-                    break
-            output['dz_max'] = np.abs(z2 - z)
-            
     def _calculate_deltaz_min(self, deltaz_min=0.0):
         r"""Calculate delta z that corresponds to a single top grid pixel
         going from z to (z - delta z).
         """
 
-        d_Tolerance = 1e-4
-        max_Iterations = 100
-
         target_distance = self.simulation.box_size / \
           self.simulation.domain_dimensions[0]
-
         for output in self.splice_outputs:
-            z = output['redshift']
-
-            # Calculate delta z that corresponds to the length of a
-            # top grid pixel at a given redshift using Newton's method.
-            z1 = z
-            z2 = z1 - 0.01 # just an initial guess
-            distance1 = self.simulation.quan(0.0, "Mpccm / h")
-            distance2 = self.cosmology.comoving_radial_distance(z2, z)
-            iteration = 1
-
-            while ((np.abs(distance2 - target_distance) / distance2) > d_Tolerance):
-                m = (distance2 - distance1) / (z2 - z1)
-                z1 = z2
-                distance1 = distance2
-                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z)
-                iteration += 1
-                if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
-                                "exceeded for z = %f (delta z = %f)." %
-                                (z, np.abs(z2 - z)))
-                    break
-            # Use this calculation or the absolute minimum specified by the user.
-            output['dz_min'] = max(np.abs(z2 - z), deltaz_min)
+            zf = self._deltaz_forward(output['redshift'],
+                                      target_distance)
+            output['dz_min'] = max(zf, deltaz_min)
 
     def _deltaz_forward(self, z, target_distance):
         r"""Calculate deltaz corresponding to moving a comoving distance
@@ -357,10 +316,13 @@
         d_Tolerance = 1e-4
         max_Iterations = 100
 
-        # Calculate delta z that corresponds to the length of the
-        # box at a given redshift.
         z1 = z
-        z2 = z1 - 0.1 # just an initial guess
+        # Use Hubble's law for initial guess
+        target_distance = self.cosmology.quan(target_distance.to("Mpccm / h"))
+        v = self.cosmology.hubble_parameter(z) * target_distance
+        v = min(v, 0.9 * c)
+        dz = np.sqrt((1. + v/c) / (1. - v/c)) - 1.
+        z2 = z1 - dz
         distance1 = self.cosmology.quan(0.0, "Mpccm / h")
         distance2 = self.cosmology.comoving_radial_distance(z2, z)
         iteration = 1

diff -r 3da56963e330099e1ebcfc97adb8a8c032779461 -r 784bb3ccc436b610c202f9898c349f2122bac5af yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -79,21 +79,23 @@
     max_box_fraction : optional, float
         In terms of the size of the domain, the maximum length a light
         ray segment can be in order to span the redshift interval from
-        one dataset to another.
+        one dataset to another.  If using a zoom-in simulation, this
+        parameter can be set to the length of the high resolution
+        region so as to limit ray segments to that size.  If the
+        high resolution region is not cubical, the smallest side
+        should be used.
         Default: 1.0 (the size of the box)
     deltaz_min : optional, float
         Specifies the minimum :math:`\Delta z` between consecutive
         datasets in the returned list.  Do not use for simple rays.
         Default: 0.0.
     minimum_coherent_box_fraction : optional, float
-        Used with use_minimum_datasets set to False, this parameter
-        specifies the fraction of the total box size to be traversed
-        before rerandomizing the projection axis and center.  This
-        was invented to allow light rays with thin slices to sample
-        coherent large scale structure, but in practice does not work
-        so well.  Try setting this parameter to 1 and see what happens.  
-        Do not use for simple rays.
-        Default: 0.0.
+        Use to specify the minimum length of a ray, in terms of the
+        size of the domain, before the trajectory is re-randomized.
+        Set to 0 to have ray trajectory randomized for every dataset.
+        Set to np.inf (infinity) to use a single trajectory for the
+        entire ray.
+        Default: 0.
     time_data : optional, bool
         Whether or not to include time outputs when gathering
         datasets for time series.  Do not use for simple rays.
@@ -123,6 +125,11 @@
                  time_data=True, redshift_data=True,
                  find_outputs=False, load_kwargs=None):
 
+        if near_redshift is not None and far_redshift is not None and \
+          near_redshift >= far_redshift:
+            raise RuntimeError(
+                "near_redshift must be less than far_redshift.")
+
         self.near_redshift = near_redshift
         self.far_redshift = far_redshift
         self.use_minimum_datasets = use_minimum_datasets
@@ -154,8 +161,7 @@
                 self.cosmology = Cosmology(
                     hubble_constant=self.ds.hubble_constant,
                     omega_matter=self.ds.omega_matter,
-                    omega_lambda=self.ds.omega_lambda,
-                    unit_registry=self.ds.unit_registry)
+                    omega_lambda=self.ds.omega_lambda)
             else:
                 redshift = 0.
             self.light_ray_solution.append({"filename": self.parameter_filename,
@@ -169,20 +175,23 @@
             CosmologySplice.__init__(self, self.parameter_filename, simulation_type,
                                      find_outputs=find_outputs)
             self.light_ray_solution = \
-              self.create_cosmology_splice(self.near_redshift, self.far_redshift,
-                                           minimal=self.use_minimum_datasets,
-                                           max_box_fraction=max_box_fraction,
-                                           deltaz_min=self.deltaz_min,
-                                           time_data=time_data,
-                                           redshift_data=redshift_data)
+              self.create_cosmology_splice(
+                  self.near_redshift, self.far_redshift,
+                  minimal=self.use_minimum_datasets,
+                  max_box_fraction=max_box_fraction,
+                  deltaz_min=self.deltaz_min,
+                  time_data=time_data,
+                  redshift_data=redshift_data)
 
     def _calculate_light_ray_solution(self, seed=None,
+                                      left_edge=None, right_edge=None,
+                                      min_level=None, periodic=True,
                                       start_position=None, end_position=None,
                                       trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
 
         # Calculate dataset sizes, and get random dataset axes and centers.
-        np.random.seed(seed)
+        my_random = np.random.RandomState(seed)
 
         # If using only one dataset, set start and stop manually.
         if start_position is not None:
@@ -192,9 +201,9 @@
             if not ((end_position is None) ^ (trajectory is None)):
                 raise RuntimeError("LightRay Error: must specify either end_position " + \
                                    "or trajectory, but not both.")
-            self.light_ray_solution[0]['start'] = np.asarray(start_position)
+            self.light_ray_solution[0]['start'] = start_position
             if end_position is not None:
-                self.light_ray_solution[0]['end'] = np.asarray(end_position)
+                self.light_ray_solution[0]['end'] = end_position
             else:
                 # assume trajectory given as r, theta, phi
                 if len(trajectory) != 3:
@@ -228,29 +237,40 @@
 
                 # Get dataset axis and center.
                 # If using box coherence, only get start point and vector if
-                # enough of the box has been used,
-                # or if box_fraction_used will be greater than 1 after this slice.
-                if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
-                        (box_fraction_used >
-                         self.minimum_coherent_box_fraction) or \
-                        (box_fraction_used +
-                         self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                    # Random start point
-                    self.light_ray_solution[q]['start'] = np.random.random(3)
-                    theta = np.pi * np.random.random()
-                    phi = 2 * np.pi * np.random.random()
-                    box_fraction_used = 0.0
+                # enough of the box has been used.
+                if (q == 0) or (box_fraction_used >=
+                                self.minimum_coherent_box_fraction):
+                    if periodic:
+                        self.light_ray_solution[q]['start'] = left_edge + \
+                          (right_edge - left_edge) * my_random.random_sample(3)
+                        theta = np.pi * my_random.random_sample()
+                        phi = 2 * np.pi * my_random.random_sample()
+                        box_fraction_used = 0.0
+                    else:
+                        ds = load(self.light_ray_solution[q]["filename"])
+                        ray_length = \
+                          ds.quan(self.light_ray_solution[q]['traversal_box_fraction'],
+                                  "unitary")
+                        self.light_ray_solution[q]['start'], \
+                          self.light_ray_solution[q]['end'] = \
+                          non_periodic_ray(ds, left_edge, right_edge, ray_length,
+                                           my_random=my_random, min_level=min_level)
+                        del ds
                 else:
-                    # Use end point of previous segment and same theta and phi.
+                    # Use end point of previous segment, adjusted for periodicity,
+                    # and the same trajectory.
                     self.light_ray_solution[q]['start'] = \
-                      self.light_ray_solution[q-1]['end'][:]
+                      periodic_adjust(self.light_ray_solution[q-1]['end'][:],
+                                      left=left_edge, right=right_edge)
 
-                self.light_ray_solution[q]['end'] = \
-                  self.light_ray_solution[q]['start'] + \
-                    self.light_ray_solution[q]['traversal_box_fraction'] * \
-                    np.array([np.cos(phi) * np.sin(theta),
-                              np.sin(phi) * np.sin(theta),
-                              np.cos(theta)])
+                if "end" not in self.light_ray_solution[q]:
+                    self.light_ray_solution[q]['end'] = \
+                      self.light_ray_solution[q]['start'] + \
+                        self.light_ray_solution[q]['traversal_box_fraction'] * \
+                        self.simulation.box_size * \
+                        np.array([np.cos(phi) * np.sin(theta),
+                                  np.sin(phi) * np.sin(theta),
+                                  np.cos(theta)])
                 box_fraction_used += \
                   self.light_ray_solution[q]['traversal_box_fraction']
 
@@ -261,15 +281,18 @@
                             'far_redshift':self.far_redshift,
                             'near_redshift':self.near_redshift})
 
-    def make_light_ray(self, seed=None,
+    def make_light_ray(self, seed=None, periodic=True,
+                       left_edge=None, right_edge=None, min_level=None,
                        start_position=None, end_position=None,
                        trajectory=None,
                        fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
-                       get_los_velocity=None, use_peculiar_velocity=True, 
-                       redshift=None, njobs=-1):
+                       get_los_velocity=None, use_peculiar_velocity=True,
+                       redshift=None, field_parameters=None, njobs=-1):
         """
-        make_light_ray(seed=None, start_position=None, end_position=None,
+        make_light_ray(seed=None, periodic=True,
+                       left_edge=None, right_edge=None, min_level=None,
+                       start_position=None, end_position=None,
                        trajectory=None, fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
                        use_peculiar_velocity=True, redshift=None,
@@ -285,6 +308,29 @@
         seed : optional, int
             Seed for the random number generator.
             Default: None.
+        periodic : optional, bool
+            If True, ray trajectories will make use of periodic
+            boundaries.  If False, ray trajectories will not be
+            periodic.
+            Default : True.
+        left_edge : optional, iterable of floats or YTArray
+            The left corner of the region in which rays are to be
+            generated.  If None, the left edge will be that of the
+            domain.  If specified without units, it is assumed to
+            be in code units.
+            Default: None.
+        right_edge : optional, iterable of floats or YTArray
+            The right corner of the region in which rays are to be
+            generated.  If None, the right edge will be that of the
+            domain.  If specified without units, it is assumed to
+            be in code units.
+            Default: None.
+        min_level : optional, int
+            The minimum refinement level of the spatial region in which
+            the ray passes.  This can be used with zoom-in simulations
+            where the high resolution region does not keep a constant
+            geometry.
+            Default: None.
         start_position : optional, iterable of floats or YTArray.
             Used only if creating a light ray from a single dataset.
             The coordinates of the starting position of the ray.
@@ -363,30 +409,56 @@
         ...                       use_peculiar_velocity=True)
 
         """
+        if self.simulation_type is None:
+            domain = self.ds
+        else:
+            domain = self.simulation
 
-        if start_position is not None and hasattr(start_position, 'units'):
-            start_position = start_position.to('unitary')
-        elif start_position is not None :
-            start_position = self.ds.arr(
-                start_position, 'code_length').to('unitary')
+        assumed_units = "code_length"
+        if left_edge is None:
+            left_edge = domain.domain_left_edge
+        elif not hasattr(left_edge, 'units'):
+            left_edge = domain.arr(left_edge, assumed_units)
+        left_edge.convert_to_units('unitary')
 
-        if end_position is not None and hasattr(end_position, 'units'):
-            end_position = end_position.to('unitary')
-        elif end_position is not None :
-            end_position = self.ds.arr(
-                end_position, 'code_length').to('unitary')
+        if right_edge is None:
+            right_edge = domain.domain_right_edge
+        elif not hasattr(right_edge, 'units'):
+            right_edge = domain.arr(right_edge, assumed_units)
+        right_edge.convert_to_units('unitary')
+
+        if start_position is not None:
+            if hasattr(start_position, 'units'):
+                start_position = start_position
+            else:
+                start_position = self.ds.arr(start_position, assumed_units)
+            start_position.convert_to_units('unitary')
+
+        if end_position is not None:
+            if hasattr(end_position, 'units'):
+                end_position = end_position
+            else:
+                end_position = self.ds.arr(end_position, assumed_units)
+            end_position.convert_to_units('unitary')
 
         if get_los_velocity is not None:
             use_peculiar_velocity = get_los_velocity
-            mylog.warn("'get_los_velocity' kwarg is deprecated. Use 'use_peculiar_velocity' instead.")
+            mylog.warn("'get_los_velocity' kwarg is deprecated. " + \
+                       "Use 'use_peculiar_velocity' instead.")
 
         # Calculate solution.
         self._calculate_light_ray_solution(seed=seed,
+                                           left_edge=left_edge,
+                                           right_edge=right_edge,
+                                           min_level=min_level, periodic=periodic,
                                            start_position=start_position,
                                            end_position=end_position,
                                            trajectory=trajectory,
                                            filename=solution_filename)
 
+        if field_parameters is None:
+            field_parameters = {}
+
         # Initialize data structures.
         self._data = {}
         # temperature field is automatically added to fields
@@ -427,19 +499,11 @@
             if setup_function is not None:
                 setup_function(ds)
 
-            if start_position is not None:
-                my_segment["start"] = ds.arr(my_segment["start"], "unitary")
-                my_segment["end"] = ds.arr(my_segment["end"], "unitary")
-            else:
-                my_segment["start"] = ds.domain_width * my_segment["start"] + \
-                  ds.domain_left_edge
-                my_segment["end"] = ds.domain_width * my_segment["end"] + \
-                  ds.domain_left_edge
-
             if not ds.cosmological_simulation:
                 next_redshift = my_segment["redshift"]
             elif self.near_redshift == self.far_redshift:
-                if isinstance(my_segment["traversal_box_fraction"], YTArray):
+                if isinstance(my_segment["traversal_box_fraction"], YTArray) and \
+                  not my_segment["traversal_box_fraction"].units.is_dimensionless:
                     segment_length = \
                       my_segment["traversal_box_fraction"].in_units("Mpccm / h")
                 else:
@@ -453,18 +517,18 @@
             else:
                 next_redshift = my_segment['next']['redshift']
 
+            # Make sure start, end, left, right
+            # are using the dataset's unit system.
+            my_start = ds.arr(my_segment['start'])
+            my_end   = ds.arr(my_segment['end'])
+            my_left  = ds.arr(left_edge)
+            my_right = ds.arr(right_edge)
             mylog.info("Getting segment at z = %s: %s to %s." %
-                       (my_segment['redshift'], my_segment['start'],
-                        my_segment['end']))
-
-            # Convert segment units from unitary to code length for sub_ray
-            my_segment['start'] = my_segment['start'].to('code_length')
-            my_segment['end'] = my_segment['end'].to('code_length')
+                       (my_segment['redshift'], my_start, my_end))
 
             # Break periodic ray into non-periodic segments.
-            sub_segments = periodic_ray(my_segment['start'], my_segment['end'],
-                                        left=ds.domain_left_edge,
-                                        right=ds.domain_right_edge)
+            sub_segments = periodic_ray(my_start, my_end,
+                                        left=my_left, right=my_right)
 
             # Prepare data structure for subsegment.
             sub_data = {}
@@ -477,6 +541,8 @@
                 mylog.info("Getting subsegment: %s to %s." %
                            (list(sub_segment[0]), list(sub_segment[1])))
                 sub_ray = ds.ray(sub_segment[0], sub_segment[1])
+                for key, val in field_parameters.items():
+                    sub_ray.set_field_parameter(key, val)
                 asort = np.argsort(sub_ray["t"])
                 sub_data['dl'].extend(sub_ray['dts'][asort] *
                                       vector_length(sub_ray.start_point,
@@ -515,7 +581,7 @@
                     # sight) and the velocity vectors: a dot b = ab cos(theta)
 
                     sub_vel_mag = sub_ray['velocity_magnitude']
-                    cos_theta = np.dot(line_of_sight, sub_vel) / sub_vel_mag
+                    cos_theta = line_of_sight.dot(sub_vel) / sub_vel_mag
                     # Protect against stituations where velocity mag is exactly
                     # zero, in which case zero / zero = NaN.
                     cos_theta = np.nan_to_num(cos_theta)
@@ -535,8 +601,7 @@
             # Get redshift for each lixel.  Assume linear relation between l 
             # and z.
             sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
-                (sub_data['dl'] / vector_length(my_segment['start'],
-                                                my_segment['end']).in_cgs())
+                (sub_data['dl'] / vector_length(my_start, my_end).in_cgs())
             sub_data['redshift'] = my_segment['redshift'] - \
               sub_data['dredshift'].cumsum() + sub_data['dredshift']
 
@@ -672,6 +737,22 @@
 
     return np.sqrt(np.power((end - start), 2).sum())
 
+def periodic_adjust(p, left=None, right=None):
+    """
+    Return the point p adjusted for periodic boundaries.
+
+    """
+    if isinstance(p, YTArray):
+        p.convert_to_units("unitary")
+    if left is None:
+        left = np.zeros_like(p)
+    if right is None:
+        right = np.ones_like(p)
+
+    w = right - left
+    p -= left
+    return np.mod(p, w)
+
 def periodic_distance(coord1, coord2):
     """
     periodic_distance(coord1, coord2)
@@ -713,7 +794,7 @@
     dim = right - left
 
     vector = end - start
-    wall = np.zeros(start.shape)
+    wall = np.zeros_like(start)
     close = np.zeros(start.shape, dtype=object)
 
     left_bound = vector < 0
@@ -733,7 +814,6 @@
     this_end = end.copy()
     t = 0.0
     tolerance = 1e-6
-
     while t < 1.0 - tolerance:
         hit_left = (this_start <= left) & (vector < 0)
         if (hit_left).any():
@@ -751,8 +831,44 @@
         now = this_start + vector * dt
         close_enough = np.abs(now - nearest) / np.abs(vector.max()) < 1e-10
         now[close_enough] = nearest[close_enough]
-        segments.append([np.copy(this_start), np.copy(now)])
+        segments.append([this_start.copy(), now.copy()])
         this_start = now.copy()
         t += dt
 
     return segments
+
+def non_periodic_ray(ds, left_edge, right_edge, ray_length, max_iter=5000,
+                     min_level=None, my_random=None):
+
+    max_length = vector_length(left_edge, right_edge)
+    if ray_length > max_length:
+        raise RuntimeError(
+            ("The maximum segment length in the region %s to %s is %s, " +
+             "but the ray length requested is %s.  Decrease ray length.") %
+             (left_edge, right_edge, max_length, ray_length))
+
+    if my_random is None:
+        my_random = np.random.RandomState()
+    i = 0
+    while True:
+        start = my_random.random_sample(3) * \
+          (right_edge - left_edge) + left_edge
+        theta = np.pi * my_random.random_sample()
+        phi = 2 * np.pi * my_random.random_sample()
+        end = start + ray_length * \
+          np.array([np.cos(phi) * np.sin(theta),
+                    np.sin(phi) * np.sin(theta),
+                    np.cos(theta)])
+        i += 1
+        test_ray = ds.ray(start, end)
+        if (end >= left_edge).all() and (end <= right_edge).all() and \
+          (min_level is None or min_level <= 0 or
+           (test_ray["grid_level"] >= min_level).all()):
+            mylog.info("Found ray after %d attempts." % i)
+            del test_ray
+            return start, end
+        del test_ray
+        if i > max_iter:
+            raise RuntimeError(
+                ("Failed to create segment in %d attempts.  " +
+                 "Decreasing ray length is recommended") % i)

diff -r 3da56963e330099e1ebcfc97adb8a8c032779461 -r 784bb3ccc436b610c202f9898c349f2122bac5af yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
@@ -10,6 +10,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
+
 from yt.testing import \
     requires_file
 from yt.analysis_modules.cosmological_observation.api import LightRay
@@ -41,6 +43,48 @@
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
 
+ at requires_file(COSMO_PLUS)
+def test_light_ray_cosmo_nested():
+    """
+    This test generates a cosmological light ray confing the ray to a subvolume
+    """
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    left = np.ones(3) * 0.25
+    right = np.ones(3) * 0.75
+
+    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
+
+    lr.make_light_ray(seed=1234567, left_edge=left, right_edge=right,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(COSMO_PLUS)
+def test_light_ray_cosmo_nonperiodic():
+    """
+    This test generates a cosmological light ray using non-periodic segments
+    """
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
+
+    lr.make_light_ray(seed=1234567, periodic=False,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
 
 @requires_file(COSMO_PLUS_SINGLE)
 def test_light_ray_non_cosmo():

diff -r 3da56963e330099e1ebcfc97adb8a8c032779461 -r 784bb3ccc436b610c202f9898c349f2122bac5af yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -197,10 +197,20 @@
     def __init__(self, start_point, end_point, ds=None,
                  field_parameters=None, data_source=None):
         super(YTRay, self).__init__(ds, field_parameters, data_source)
-        self.start_point = self.ds.arr(start_point,
-                            'code_length', dtype='float64')
-        self.end_point = self.ds.arr(end_point,
-                            'code_length', dtype='float64')
+        if isinstance(start_point, YTArray):
+            self.start_point = \
+              self.ds.arr(start_point).to("code_length")
+        else:
+            self.start_point = \
+              self.ds.arr(start_point, 'code_length',
+                          dtype='float64')
+        if isinstance(end_point, YTArray):
+            self.end_point = \
+              self.ds.arr(end_point).to("code_length")
+        else:
+            self.end_point = \
+              self.ds.arr(end_point, 'code_length',
+                          dtype='float64')
         self.vec = self.end_point - self.start_point
         self._set_center(self.start_point)
         self.set_field_parameter('center', self.start_point)

diff -r 3da56963e330099e1ebcfc97adb8a8c032779461 -r 784bb3ccc436b610c202f9898c349f2122bac5af yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -868,8 +868,7 @@
             self.cosmology = \
                     Cosmology(hubble_constant=self.hubble_constant,
                               omega_matter=self.omega_matter,
-                              omega_lambda=self.omega_lambda,
-                              unit_registry=self.unit_registry)
+                              omega_lambda=self.omega_lambda)
             self.critical_density = \
                     self.cosmology.critical_density(self.current_redshift)
             self.scale_factor = 1.0 / (1.0 + self.current_redshift)

diff -r 3da56963e330099e1ebcfc97adb8a8c032779461 -r 784bb3ccc436b610c202f9898c349f2122bac5af yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -110,6 +110,8 @@
         self.domain_right_edge = self.domain_right_edge * self.length_unit
         self.unit_registry.modify("code_time", self.time_unit)
         self.unit_registry.modify("code_length", self.length_unit)
+        self.unit_registry.add("unitary", float(self.box_size.in_base()),
+                               self.length_unit.units.dimensions)
 
     def get_time_series(self, time_data=True, redshift_data=True,
                         initial_time=None, final_time=None,

diff -r 3da56963e330099e1ebcfc97adb8a8c032779461 -r 784bb3ccc436b610c202f9898c349f2122bac5af yt/frontends/gadget/simulation_handling.py
--- a/yt/frontends/gadget/simulation_handling.py
+++ b/yt/frontends/gadget/simulation_handling.py
@@ -102,6 +102,8 @@
             self.box_size = self.box_size * self.length_unit
             self.domain_left_edge = self.domain_left_edge * self.length_unit
             self.domain_right_edge = self.domain_right_edge * self.length_unit
+            self.unit_registry.add("unitary", float(self.box_size.in_base()),
+                                   self.length_unit.units.dimensions)
         else:
             # Read time from file for non-cosmological sim
             self.time_unit = self.quan(

diff -r 3da56963e330099e1ebcfc97adb8a8c032779461 -r 784bb3ccc436b610c202f9898c349f2122bac5af yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -33,7 +33,14 @@
 
     For an explanation of the various cosmological measures, see, for example 
     Hogg (1999, http://xxx.lanl.gov/abs/astro-ph/9905116).
-    
+
+    WARNING: Cosmological distance calculations return values that are either
+    in the comoving or proper frame, depending on the specific quantity.  For
+    simplicity, the proper and comoving frames are set equal to each other
+    within the cosmology calculator.  This means that for some distance value,
+    x, x.to("Mpc") and x.to("Mpccm") will be the same.  The user should take
+    care to understand which reference frame is correct for the given calculation.
+
     Parameters
     ----------
     hubble_constant : float
@@ -58,7 +65,7 @@
     >>> from yt.utilities.cosmology import Cosmology
     >>> co = Cosmology()
     >>> print(co.hubble_time(0.0).in_units("Gyr"))
-    
+
     """
     def __init__(self, hubble_constant = 0.71,
                  omega_matter = 0.27,
@@ -66,9 +73,9 @@
                  omega_curvature = 0.0,
                  unit_registry = None,
                  unit_system = "cgs"):
-        self.omega_matter = omega_matter
-        self.omega_lambda = omega_lambda
-        self.omega_curvature = omega_curvature
+        self.omega_matter = float(omega_matter)
+        self.omega_lambda = float(omega_lambda)
+        self.omega_curvature = float(omega_curvature)
         if unit_registry is None:
             unit_registry = UnitRegistry()
             unit_registry.modify("h", hubble_constant)


https://bitbucket.org/yt_analysis/yt/commits/6202b72efeca/
Changeset:   6202b72efeca
Branch:      stable
User:        atmyers
Date:        2016-07-25 16:48:31+00:00
Summary:     Backporting PR #2290 https://bitbucket.org/yt_analysis/yt/pull-requests/2290
Affected #:  2 files

diff -r 784bb3ccc436b610c202f9898c349f2122bac5af -r 6202b72efeca9d19f0ce7a1270b4ad4cced3fe31 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -325,7 +325,7 @@
   of length 1.0 in "code length" which may produce strange results for volume
   quantities.
 
-.. _loading-fits-data:
+.. _loading-exodusii-data:
 
 Exodus II Data
 --------------
@@ -481,6 +481,7 @@
     ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
                   displacements={'connect2': (5.0, [0.0, 0.0, 1.0])})
 
+.. _loading-fits-data:
 
 FITS Data
 ---------
@@ -1042,6 +1043,8 @@
 
 yt will utilize length, mass and time to set up all other units.
 
+.. _loading-gamer-data:
+
 GAMER Data
 ----------
 

diff -r 784bb3ccc436b610c202f9898c349f2122bac5af -r 6202b72efeca9d19f0ce7a1270b4ad4cced3fe31 doc/source/reference/changelog.rst
--- a/doc/source/reference/changelog.rst
+++ b/doc/source/reference/changelog.rst
@@ -11,6 +11,316 @@
 The `CREDITS file <http://bitbucket.org/yt_analysis/yt/src/yt/CREDITS>`_ contains the
 most up-to-date list of everyone who has contributed to the yt source code.
 
+Version 3.3
+-----------
+
+Version 3.3 is the first major release of yt since July 2015. It includes more
+than 3000 commits from 41 contributors, including 12 new contributors.
+
+Major enhancements
+^^^^^^^^^^^^^^^^^^
+
+* Raw and processed data from selections, projections, profiles and so forth can
+  now be saved in a ytdata format and loaded back in by yt. See 
+  :ref:`saving_data`.
+* Totally re-worked volume rendering API. The old API is still available for users
+  who prefer it, however. See :ref:`volume_rendering`.
+* Support for unstructured mesh visualization. See 
+  :ref:`unstructured-mesh-slices` and :ref:`unstructured_mesh_rendering`.
+* Interactive Data Visualization for AMR and unstructured mesh datasets. See
+  :ref:`interactive_data_visualization`.
+* Several new colormaps, including a new default, 'arbre'. The other new
+  colormaps are named 'octarine', 'kelp', and 'dusk'. All these new colormaps
+  were generated using the `viscm package
+  <https://github.com/matplotlib/viscm>`_ and should do a better job of
+  representing the data for colorblind viewers and when printed out in
+  grayscale. See :ref:`colormaps` for more detail.
+* New frontends for the :ref:`ExodusII <loading-exodusii-data>`, 
+  :ref:`GAMER <loading-gamer-data>`, and :ref:`Gizmo <loading-gizmo-data>` data 
+  formats.
+* The unit system associated with a dataset is now customizable, defaulting to
+  CGS. See :ref:`unit_systems`.
+* Enhancements and usability improvements for analysis modules, especially the
+  ``absorption_spectrum``, ``photon_simulator``, and ``light_ray`` modules. See
+  :ref:`synthetic-observations`.
+* Data objects can now be created via an alternative Numpy-like API. See
+  :ref:`quickly-selecting-data`.
+* A line integral convolution plot modification. See
+  :ref:`annotate-line-integral-convolution`.
+* Many speed optimizations, including to the volume rendering, units, tests,
+  covering grids, the absorption spectrum and photon simulator analysis modules,
+  and ghost zone generation.
+* Packaging and release-related improvements: better install and setup scripts,
+  automated PR backporting.
+* Readability improvements to the codebase, including linting, removing dead
+  code, and refactoring much of the Cython.
+* Improvements to the CI infrastructure, including more extensible answer tests
+  and automated testing for Python 3 and Windows.
+* Numerous documentation improvements, including formatting tweaks, bugfixes,
+  and many new cookbook recipes.
+* Support for geographic (lat/lon) coordinates.
+* Several improvements for SPH codes, including alternative smoothing kernels,
+  an ``add_smoothed_particle_field`` function, and particle type-aware octree
+  construction for Gadget data.
+* Roundtrip conversions between Pint and yt units.
+* Added halo data containers for gadget_fof frontend.
+* Enabled support for spherical datasets in the BoxLib frontend.
+* Many new tests have been added.
+* Better hashing for Selector objects.
+
+Minor enhancements and bugfixes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+* Fixed many bugs related to Python 3 compatibility
+* Fixed bugs related to compatibility issues with newer versions of numpy
+* Added the ability to export data objects to a Pandas dataframe
+* Added support for the fabs ufunc to YTArray
+* Fixed two licensings issues
+* Fixed a number of bugs related to Windows compatability.
+* We now avoid hard-to-decipher tracebacks when loading empty files or
+  directories
+* Fixed a bug related to ART star particle creation time field
+* Fixed a bug caused by using the wrong int type for indexing in particle deposit
+* Fixed a NameError bug in comparing temperature units with offsets
+* Fixed an API bug in YTArray casting during coercion from YTQuantity
+* Added loadtxt and savetxt convenience functions for ``YTArray``
+* Fixed an issue caused by not sort species names with Enzo
+* Fixed a units bug for RAMSES when ``boxlen > 1``.
+* Fixed ``process_chunk`` function for non-cartesian geometry.
+* Added ``scale_factor`` attribute to cosmological simulation datasets
+* Fixed a bug where "center" vectors are used instead of "normal" vectors in
+  get_sph_phi(), etc.
+* Fixed issues involving invalid FRBs when uses called _setup_plots in their
+  scripts
+* Added a ``text_args`` keyword to ``annotate_scale()`` callback
+* Added a print_stats function for RAMSES
+* Fixed a number of bugs in the Photon Simulator
+* Added support for particle fields to the [Min,Max]Location derived quantities
+* Fixed some units bugs for Gadget cosmology simulations
+* Fixed a bug with Gadget/GIZMO StarFormationRate units
+* Fixed an issue in TimeSeriesData where all the filenames were getting passed
+  to ``load`` on each processor.
+* Fixed a units bug in the Tipsy frontend
+* Ensured that ARTIOIndex.get_smallest_dx() returns a quantity with units
+* Ensured that plots are valid after invalidating the figure
+* Fixed a bug regarding code unit labels
+* Fixed a bug with reading Tipsy Aux files
+* Added an effective redshift field to the Light Ray analysis module for use in
+  AbsorptionSpectrum
+* Fixed a bug with the redshift calculation in LightRay analysis module
+* Fixed a bug in the Orion frontend when you had more than 10 on-disk particle
+  fields in the file
+* Detect more types of ART files
+* Update derived_field_list in add_volume_weighted_smoothed_field
+* Fixed casting issues for 1D and 2D Enzo simulations
+* Avoid type indirection when setting up data object entry points
+* Fixed issues with SIMPUT files
+* Fixed loading athena data in python3 with provided parameters
+* Tipsy cosmology unit fixes
+* Fixed bad unit labels for compound units
+* Making the xlim and ylim of the PhasePlot plot axes controllable
+* Adding grid_arrays to grid_container
+* An Athena and a GDF bugfix
+* A small bugfix and some small enhancements for sunyaev_zeldovich
+* Defer to coordinate handlers for width
+* Make array_like_field return same units as get_data
+* Fixing bug in ray "dts" and "t" fields
+* Check against string_types not str
+* Closed a loophole that allowed improper LightRay use
+* Enabling AbsorptionSpectrum to deposit unresolved spectral lines
+* Fixed an ART byte/string/array issue
+* Changing AbsorptionSpectrum attribute lambda_bins to be lambda_field for
+  consistency
+* No longer require user to save to disk when generating an AbsorptionSpectrum
+* ParticlePlot FRBs can now use save_as_dataset and save attributes properly
+* Added checks to assure ARTIO creates a metal_density field from existing metal
+  fields.
+* Added mask to LightRay to assure output elements have non-zero density (a
+  problem in some SPH datasets)
+* Added a "fields" attribute to datasets
+* Updated the TransferFunctionHelper to work with new profiles
+* Fixed a bug where the field_units kwarg to load_amr_grids didn't do anything
+* Changed photon_simulator's output file structure
+* Fixed a bug related to setting output_units.
+* Implemented ptp operation.
+* Added effects of transverse doppler redshift to LightRay
+* Fixed a casting error for float and int64 multiplication in sdf class
+* Added ability to read and write YTArrays to and from groups within HDF5 files
+* Made ftype of "on-disk" stream fields "stream"
+* Fixed a strings decoding issue in the photon simulator
+* Fixed an incorrect docstring in load_uniform_grid
+* Made PlotWindow show/hide helpers for axes and colorbar return self
+* Made Profile objects store field metadata.
+* Ensured GDF unit names are strings
+* Tought off_axis_projection about its resolution keyword.
+* Reintroduced sanitize_width for polar/cyl coordinates.
+* We now fail early when load_uniform_grid is passed data with an incorrect shape
+* Replaced progress bar with tqdm
+* Fixed redshift scaling of "Overdensity" field in yt-2.x
+* Fixed several bugs in the eps_writer
+* Fixed bug affecting 2D BoxLib simulations.
+* Implemented to_json and from_json for the UnitRegistry object
+* Fixed a number of issues with ds.find_field_values_at_point[s]
+* Fixed a bug where sunrise_exporter was using wrong imports
+* Import HUGE from utilities.physical_ratios
+* Fixed bug in ARTIO table look ups
+* Adding support for longitude and latitude
+* Adding halo data containers for gadget_fof frontend.
+* Can now compare YTArrays without copying them
+* Fixed several bugs related to active particle datasets
+* Angular_momentum_vector now only includes space for particle fields if they
+  exist.
+* Image comparison tests now print a meaningful error message if they fail.
+* Fixed numpy 1.11 compatibility issues.
+* Changed _skip_cache to be True by default.
+* Enable support for spherical datasets in the BoxLib frontend.
+* Fixed a bug in add_deposited_particle_field.
+* Fixed issues with input sanitization in the point data object.
+* Fixed a copy/paste error introduced by refactoring WeightedMenParticleField
+* Fixed many formatting issues in the docs build
+* Now avoid creating particle unions for particle types that have no common
+  fields
+* Patched ParticlePlot to work with filtered particle fields.
+* Fixed a couple corner cases in gadget_fof frontend
+* We now properly normalise all normal vectors in functions that take a normal
+  vector (for e.g get_sph_theta)
+* Fixed a bug where the transfer function features were not always getting
+  cleared properly.
+* Made the Chombo frontend is_valid method smarter.
+* Added a get_hash() function to yt/funcs.py which returns a hash for a file
+* Added Sievert to the default unit symbol table
+* Corrected an issue with periodic "wiggle" in AbsorptionSpectrum instances
+* Made ``ds.field_list`` sorted by default
+* Bug fixes for the Nyx frontend
+* Fixed a bug where the index needed to be created before calling derived
+  quantities
+* Made latex_repr a property, computed on-demand
+* Fixed a bug in off-axis slice deposition
+* Fixed a bug with some types of octree block traversal
+* Ensured that mpi operations retain ImageArray type instead of downgrading to
+  YTArray parent class
+* Added a call to _setup_plots in the custom colorbar tickmark example
+* Fixed two minor bugs in save_annocated
+* Added ability to specify that DatasetSeries is not a mixed data type
+* Fixed a memory leak in ARTIO
+* Fixed copy/paste error in to_frb method.
+* Ensured that particle dataset max_level is consistent with the index max_level
+* Fixed an issue where fields were getting added multiple times to
+  field_info.field_list
+* Enhanced annotate_ray and annotate_arrow callbacks
+* Added GDF answer tests
+* Made the YTFieldTypeNotFound exception more informative
+* Added a new function, fake_vr_orientation_test_ds(), for use in testing
+* Ensured that instances of subclasses of YTArray have the correct type
+* Re-enabled max_level for projections, ProjectionPlot, and OffAxisProjectionPlot
+* Fixed a bug in the Orion 2 field definitions
+* Fixed a bug caused by matplotlib not being added to install_requires
+* Edited PhasePlot class to have an annotate_title method
+* Implemented annotate_cell_edges
+* Handled KeyboardInterrupt in volume rendering Cython loop
+* Made old halo finders now accept ptype
+* Updated the latex commands in yt cheatsheet
+* Fixed a circular dependency loop bug in abar field definition for FLASH
+  datasets
+* Added neutral species aliases as described in YTEP 0003
+* Fixed a logging issue: don't create a StreamHandler unless we will use it
+* Correcting how theta and phi are calculated in
+  ``_particle_velocity_spherical_radius``,
+  ``_particle_velocity_spherical_theta``,
+  ``_particle_velocity_cylindrical_radius``, and
+  ``_particle_velocity_cylindrical_theta``
+* Fixed a bug related to the field dictionary in ``load_particles``
+* Allowed for the special case of supplying width as a tuple of tuples
+* Made yt compile with MSVC on Windows
+* Fixed a bug involving mask for dt in octree
+* Merged the get_yt.sh and install_script.sh into one
+* Added tests for the install script
+* Allowed use axis names instead of dimensions for spherical pixelization
+* Fixed a bug where close() wasn't being called in HDF5FileHandler
+* Enhanced commandline image upload/delete
+* Added get_brewer_cmap to get brewer colormaps without importing palettable at
+  the top level
+* Fixed a bug where a parallel_root_only function was getting called inside
+  another parallel_root_only function
+* Exit the install script early if python can't import '_ssl' module
+* Make PlotWindow's annotate_clear method invalidate the plot
+* Adding int wrapper to avoid deprecation warning from numpy
+* Automatically create vector fields for magnetic_field
+* Allow users to completely specify the filename of a 1D profile
+* Force nose to produce meaningful traceback for cookbook recipes' tests
+* Fixed x-ray display_name and documentation
+* Try to guess and load particle file for FLASH dataset
+* Sped up top-level yt import
+* Set the field type correctly for fields added as particle fields
+* Added a position location method for octrees
+* Fixed a copy/paste error in uhstack function
+* Made trig functions give correct results when supplied data with dimensions of
+  angle but units that aren't radian
+* Print out some useful diagnostic information if check_for_openmp() fails
+* Give user-added derived fields a default field type
+* Added support for periodicity in annotate_particles.
+* Added a check for whether returned field has units in volume-weighted smoothed
+  fields
+* Casting array indices as ints in colormaps infrastructure
+* Fixed a bug where the standard particle fields weren't getting set up
+  correctly for the Orion frontends
+* Enabled LightRay to accept loaded datasets instead of just filenames
+* Allowed for adding or subtracting arrays filled with zeros without checking
+  units.
+* Fixed a bug in selection for semistructured meshes.
+* Removed 'io' from enzo particle types for active particle datasets
+* Added support for FLASH particle datasets.
+* Silenced a deprecation warning from IPython
+* Eliminated segfaults in KDTree construction
+* Fixed add_field handling when passed a tuple
+* Ensure field parameters are correct for fields that need ghost zones
+* Made it possible to use DerivedField instances to access data
+* Added ds.particle_type_counts
+* Bug fix and improvement for generating Google Cardboard VR in
+  StereoSphericalLens
+* Made DarkMatterARTDataset more robust in its _is_valid
+* Added Earth radius to units
+* Deposit hydrogen fields to grid in gizmo frontend
+* Switch to index values being int64
+* ValidateParameter ensures parameter values are used during field detection
+* Switched to using cythonize to manage dependencies in the setup script
+* ProfilePlot style changes and refactoring
+* Cancel terms with identical LaTeX representations in a LaTeX representation of
+  a unit
+* Only return early from comparison validation if base values are equal
+* Enabled particle fields for clump objects
+* Added validation checks for data types in callbacks
+* Enabled modification of image axis names in coordinate handlers
+* Only add OWLS/EAGLE ion fields if they are present
+* Ensured that PlotWindow plots continue to look the same under matplotlib 2.0
+* Fixed bug in quiver callbacks for off-axis slice plots
+* Only visit octree children if going to next level
+* Check that CIC always gets at least two cells
+* Fixed compatibility with matplotlib 1.4.3 and earlier
+* Fixed two EnzoSimulation bugs
+* Moved extraction code from YTSearchCmd to its own utility module
+* Changed amr_kdtree functions to be Node class methods
+* Sort block indices in order of ascending levels to match order of grid patches
+* MKS code unit system fixes
+* Disabled bounds checking on pixelize_element_mesh
+* Updated light_ray.py for domain width != 1
+* Implemented a DOAP file generator
+* Fixed bugs for 2D and 1D enzo IO
+* Converted mutable Dataset attributes to be properties that return copies
+* Allowing LightRay segments to extend further than one box length
+* Fixed a divide-by-zero error that occasionally happens in
+  triangle_plane_intersect
+* Make sure we have an index in subclassed derived quantities
+* Added an initial draft of an extensions document
+* Made it possible to pass field tuples to command-line plotting
+* Ensured the positions of coordinate vector lines are in code units
+* Added a minus sign to definition of sz_kinetic field
+* Added grid_levels and grid_indices fields to octrees
+* Added a morton_index derived field
+* Added Exception to AMRKDTree in the case of particle of oct-based data
+
+
+
 Version 3.2
 -----------
 
@@ -611,7 +921,7 @@
  * WebGL interface for isocontours and a pannable map widget added to Reason
  * Performance improvements for volume rendering
  * Adaptive HEALPix support
- * Column density calculations
+ * Column density calculations (see :ref:`radial-column-density`)
  * Massive speedup for 1D profiles
  * Lots more, bug fixes etc.
  * Substantial improvements to the documentation, including
@@ -733,9 +1043,9 @@
 -----------
 
 Version 1.6 is a point release, primarily notable for the new parallel halo
-finder (see :ref:`halo-analysis`)
+finder (see :ref:`halo_finding`)
 
- * (New) Parallel HOP ( http://arxiv.org/abs/1001.3411 , :ref:`halo-analysis` )
+ * (New) Parallel HOP ( http://arxiv.org/abs/1001.3411 , :ref:`halo_finding` )
  * (Beta) Software ray casting and volume rendering
    (see :ref:`volume_rendering`)
  * Rewritten, faster and better contouring engine for clump identification


https://bitbucket.org/yt_analysis/yt/commits/811272326876/
Changeset:   811272326876
Branch:      stable
User:        ngoldbaum
Date:        2016-07-26 20:00:40+00:00
Summary:     Backporting PR #2307 https://bitbucket.org/yt_analysis/yt/pull-requests/2307
Affected #:  2 files

diff -r 6202b72efeca9d19f0ce7a1270b4ad4cced3fe31 -r 811272326876391c73053359137e56383bef3935 doc/source/visualizing/callbacks.rst
--- a/doc/source/visualizing/callbacks.rst
+++ b/doc/source/visualizing/callbacks.rst
@@ -278,17 +278,17 @@
 Overplot Cell Edges
 ~~~~~~~~~~~~~~~~~~~
 
-.. function:: annotate_cell_edges(line_width=1.0, alpha = 1.0,
-                                  color = (0.0, 0.0, 0.0))
+.. function:: annotate_cell_edges(line_width=0.002, alpha=1.0, color='black')
 
    (This is a proxy for
    :class:`~yt.visualization.plot_modifications.CellEdgesCallback`.)
 
-    Annotate the edges of cells, where the ``line_width`` in pixels is specified.
-    The ``alpha`` of the overlaid image and the ``color`` of the lines are also
-    specifiable.  Note that because the lines are drawn from both sides of a
-    cell, the image sometimes has the effect of doubling the line width.
-    Color here is in RGB float values (0 to 1).
+    Annotate the edges of cells, where the ``line_width`` relative to size of
+    the longest plot axis is specified.  The ``alpha`` of the overlaid image and
+    the ``color`` of the lines are also specifiable.  Note that because the
+    lines are drawn from both sides of a cell, the image sometimes has the
+    effect of doubling the line width.  Color here is a matplotlib color name or
+    a 3-tuple of RGB float values.
 
 .. python-script::
 

diff -r 6202b72efeca9d19f0ce7a1270b4ad4cced3fe31 -r 811272326876391c73053359137e56383bef3935 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -2451,7 +2451,7 @@
 
 class CellEdgesCallback(PlotCallback):
     """
-    annotate_cell_edges(line_width=1.0, alpha = 1.0, color = (0.0, 0.0, 0.0))
+    annotate_cell_edges(line_width=0.002, alpha = 1.0, color = 'black')
 
     Annotate cell edges.  This is done through a second call to pixelize, where
     the distance from a pixel to a cell boundary in pixels is compared against
@@ -2461,12 +2461,13 @@
     Parameters
     ----------
     line_width : float
-        Distance, in pixels, from a cell edge that will mark a pixel as being
-        annotated as a cell edge.  Default is 1.0.
+        The width of the cell edge lines in normalized units relative to the
+        size of the longest axis.  Default is 1% of the size of the smallest
+        axis.
     alpha : float
         When the second image is overlaid, it will have this level of alpha
         transparency.  Default is 1.0 (fully-opaque).
-    color : tuple of three floats
+    color : tuple of three floats or matplotlib color name
         This is the color of the cell edge values.  It defaults to black.
 
     Examples
@@ -2480,11 +2481,13 @@
     """
     _type_name = "cell_edges"
     _supported_geometries = ("cartesian", "spectral_cube")
-    def __init__(self, line_width=1.0, alpha = 1.0, color=(0.0, 0.0, 0.0)):
+    def __init__(self, line_width=0.002, alpha = 1.0, color='black'):
+        from matplotlib.colors import ColorConverter
+        conv = ColorConverter()
         PlotCallback.__init__(self)
         self.line_width = line_width
         self.alpha = alpha
-        self.color = (np.array(color) * 255).astype("uint8")
+        self.color = (np.array(conv.to_rgb(color)) * 255).astype("uint8")
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -2494,6 +2497,24 @@
         plot._axes.hold(True)
         nx = plot.image._A.shape[0]
         ny = plot.image._A.shape[1]
+        aspect = float((y1 - y0) / (x1 - x0))
+        pixel_aspect = float(ny)/nx
+        relative_aspect = pixel_aspect / aspect
+        if relative_aspect > 1:
+            nx = int(nx/relative_aspect)
+        else:
+            ny = int(ny*relative_aspect)
+        if aspect > 1:
+            if nx < 1600:
+                nx = int(1600./nx*ny)
+                ny = 1600
+            long_axis = ny
+        else:
+            if ny < 1600:
+                nx = int(1600./ny*nx)
+                ny = 1600
+            long_axis = nx
+        line_width = max(self.line_width*long_axis, 1.0)
         im = pixelize_cartesian(plot.data['px'],
                                 plot.data['py'],
                                 plot.data['pdx'],
@@ -2501,16 +2522,15 @@
                                 plot.data['px'], # dummy field
                                 int(nx), int(ny),
                                 (x0, x1, y0, y1),
-                                line_width=self.line_width).transpose()
+                                line_width=line_width).transpose()
         # New image:
         im_buffer = np.zeros((nx, ny, 4), dtype="uint8")
-        im_buffer[im>0,3] = 255
-        im_buffer[im>0,:3] = self.color
+        im_buffer[im > 0, 3] = 255
+        im_buffer[im > 0, :3] = self.color
         plot._axes.imshow(im_buffer, origin='lower',
-                          interpolation='nearest',
-                          extent = [xx0, xx1, yy0, yy1],
-                          alpha = self.alpha)
-        plot._axes.set_xlim(xx0,xx1)
-        plot._axes.set_ylim(yy0,yy1)
+                          interpolation='bilinear',
+                          extent=[xx0, xx1, yy0, yy1],
+                          alpha=self.alpha)
+        plot._axes.set_xlim(xx0, xx1)
+        plot._axes.set_ylim(yy0, yy1)
         plot._axes.hold(False)
-


https://bitbucket.org/yt_analysis/yt/commits/5e1d7b3189a9/
Changeset:   5e1d7b3189a9
Branch:      stable
User:        ngoldbaum
Date:        2016-07-28 21:06:24+00:00
Summary:     Backporting PR #2310 https://bitbucket.org/yt_analysis/yt/pull-requests/2310
Affected #:  4 files

diff -r 811272326876391c73053359137e56383bef3935 -r 5e1d7b3189a93818f0f5fc2239f1f8db460166d4 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -39,7 +39,7 @@
   local_owls_000:
     - yt/frontends/owls/tests/test_outputs.py
   
-  local_pw_001:
+  local_pw_003:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
     - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes

diff -r 811272326876391c73053359137e56383bef3935 -r 5e1d7b3189a93818f0f5fc2239f1f8db460166d4 yt/units/tests/test_units.py
--- a/yt/units/tests/test_units.py
+++ b/yt/units/tests/test_units.py
@@ -480,6 +480,9 @@
     test_unit = Unit('m_geom/l_geom**3')
     assert_equal(test_unit.latex_repr, '\\frac{1}{M_\\odot^{2}}')
 
+    test_unit = Unit('1e9*cm')
+    assert_equal(test_unit.latex_repr, '1.0 \\times 10^{9}\\ \\rm{cm}')
+
 def test_latitude_longitude():
     lat = unit_symbols.lat
     lon = unit_symbols.lon

diff -r 811272326876391c73053359137e56383bef3935 -r 5e1d7b3189a93818f0f5fc2239f1f8db460166d4 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -127,10 +127,20 @@
         symbols = invert_symbols[val]
         for i in range(1, len(symbols)):
             expr = expr.subs(symbols[i], symbols[0])
-
+    prefix = None
+    if isinstance(expr, Mul):
+        coeffs = expr.as_coeff_Mul()
+        if coeffs[0] == 1 or not isinstance(coeffs[0], Float):
+            pass
+        else:
+            expr = coeffs[1]
+            prefix = Float(coeffs[0], 2)
     latex_repr = latex(expr, symbol_names=symbol_table, mul_symbol="dot",
                        fold_frac_powers=True, fold_short_frac=True)
 
+    if prefix is not None:
+        latex_repr = latex(prefix, mul_symbol="times") + '\\ ' + latex_repr
+
     if latex_repr == '1':
         return ''
     else:

diff -r 811272326876391c73053359137e56383bef3935 -r 5e1d7b3189a93818f0f5fc2239f1f8db460166d4 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -829,7 +829,9 @@
                 h_power = expr.as_coeff_exponent(h_expr)[1]
                 # un is now the original unit, but with h factored out.
                 un = str(expr*h_expr**(-1*h_power))
-                if str(un).endswith('cm') and un != 'cm':
+                un_unit = Unit(un, registry=self.ds.unit_registry)
+                cm = Unit('cm').expr
+                if str(un).endswith('cm') and cm not in un_unit.expr.atoms():
                     comoving = True
                     un = un[:-2]
                 # no length units besides code_length end in h so this is safe
@@ -839,18 +841,22 @@
                     # It doesn't make sense to scale a position by anything
                     # other than h**-1
                     raise RuntimeError
-                if un in formatted_length_unit_names:
-                    un = formatted_length_unit_names[un]
-                pp = un[0]
-                if pp in latex_prefixes:
-                    symbol_wo_prefix = un[1:]
-                    if symbol_wo_prefix in prefixable_units:
-                        un = un.replace(pp, "{"+latex_prefixes[pp]+"}", 1)
                 if un not in ['1', 'u', 'unitary']:
-                    if hinv:
-                        un = un + '\,h^{-1}'
-                    if comoving:
-                        un = un + '\,(1+z)^{-1}'
+                    if un in formatted_length_unit_names:
+                        un = formatted_length_unit_names[un]
+                    else:
+                        un = Unit(un, registry=self.ds.unit_registry)
+                        un = un.latex_representation()
+                        if hinv:
+                            un = un + '\,h^{-1}'
+                        if comoving:
+                            un = un + '\,(1+z)^{-1}'
+                        pp = un[0]
+                        if pp in latex_prefixes:
+                            symbol_wo_prefix = un[1:]
+                            if symbol_wo_prefix in prefixable_units:
+                                un = un.replace(
+                                    pp, "{"+latex_prefixes[pp]+"}", 1)
                     axes_unit_labels[i] = '\ \ ('+un+')'
 
             if self.oblique:
@@ -1692,8 +1698,9 @@
 
 class WindowPlotMPL(ImagePlotMPL):
     """A container for a single PlotWindow matplotlib figure and axes"""
-    def __init__(self, data, cbname, cblinthresh, cmap, extent, zlim, figure_size,
-                 fontsize, aspect, figure, axes, cax):
+    def __init__(self, data, cbname, cblinthresh, cmap, extent, zlim,
+                 figure_size, fontsize, aspect, figure, axes, cax):
+        from matplotlib.ticker import ScalarFormatter
         self._draw_colorbar = True
         self._draw_axes = True
         self._fontsize = fontsize
@@ -1721,7 +1728,14 @@
 
         self._init_image(data, cbname, cblinthresh, cmap, extent, aspect)
 
-        self.image.axes.ticklabel_format(scilimits=(-2, 3))
+        # In matplotlib 2.1 and newer we'll be able to do this using
+        # self.image.axes.ticklabel_format
+        # See https://github.com/matplotlib/matplotlib/pull/6337
+        formatter = ScalarFormatter(useMathText=True)
+        formatter.set_scientific(True)
+        formatter.set_powerlimits((-2, 3))
+        self.image.axes.xaxis.set_major_formatter(formatter)
+        self.image.axes.yaxis.set_major_formatter(formatter)
         if cbname == 'linear':
             self.cb.formatter.set_scientific(True)
             self.cb.formatter.set_powerlimits((-2, 3))


https://bitbucket.org/yt_analysis/yt/commits/4c91f634b67a/
Changeset:   4c91f634b67a
Branch:      stable
User:        atmyers
Date:        2016-07-26 23:43:05+00:00
Summary:     Backporting PR #2311 https://bitbucket.org/yt_analysis/yt/pull-requests/2311
Affected #:  2 files

diff -r 5e1d7b3189a93818f0f5fc2239f1f8db460166d4 -r 4c91f634b67a9e4573d7a2166e17c7b8159ad477 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -49,8 +49,8 @@
                     # in Python 3 (except Mercurial, which requires Python 2).
 INST_HG=1           # Install Mercurial or not?  If hg is not already
                     # installed, yt cannot be installed from source.
-INST_UNSTRUCTURED=0 # Install dependencies needed for unstructured mesh 
-                    # rendering?
+INST_EMBREE=0       # Install dependencies needed for Embree-accelerated 
+                    # ray tracing
 
 # These options control whether low-level system libraries are installed
 # they are necessary for building yt's dependencies from source and are 
@@ -75,6 +75,7 @@
 INST_H5PY=1     # Install h5py?
 INST_ASTROPY=0  # Install astropy?
 INST_NOSE=1     # Install nose?
+INST_NETCDF4=0  # Install netcdf4 and its python bindings?
 
 # These options allow you to customize the builds of yt dependencies.
 # They are only used if INST_CONDA=0.
@@ -487,21 +488,19 @@
     ( $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
-# set paths needed for unstructured mesh rendering support
+# set paths needed for Embree
 
-if [ $INST_UNSTRUCTURED -ne 0 ]
+if [ $INST_EMBREE -ne 0 ]
 then
     if [ $INST_YT_SOURCE -eq 0 ]
     then
-        echo "yt must be compiled from source to install support for"
-        echo "unstructured mesh rendering. Please set INST_YT_SOURCE to 1"
-        echo "and re-run the install script."
+        echo "yt must be compiled from source to install Embree support."
+        echo "Please set INST_YT_SOURCE to 1 and re-run the install script."
         exit 1
     fi
     if [ $INST_CONDA -eq 0 ]
     then
-        echo "unstructured mesh rendering support has not yet been implemented"
-        echo "for INST_CONDA=0."
+        echo "Embree support has not yet been implemented for INST_CONDA=0."
         exit 1
     fi
     if [ `uname` = "Darwin" ]
@@ -513,8 +512,8 @@
         EMBREE="embree-2.8.0.x86_64.linux"
         EMBREE_URL="https://github.com/embree/embree/releases/download/v2.8.0/$EMBREE.tar.gz"
     else
-        echo "Unstructured mesh rendering is not supported on this platform."
-        echo "Set INST_UNSTRUCTURED=0 and re-run the install script."
+        echo "Embree is not supported on this platform."
+        echo "Set INST_EMBREE=0 and re-run the install script."
         exit 1
     fi
     PYEMBREE_URL="https://github.com/scopatz/pyembree/archive/master.zip"
@@ -531,6 +530,17 @@
     fi
 fi
 
+if [ $INST_NETCDF4 -ne 0 ]
+then
+    if [ $INST_CONDA -eq 0 ]
+    then
+        echo "This script can only install netcdf4 through conda."
+        echo "Please set INST_CONDA to 1"
+        echo "and re-run the install script"
+        exit 1
+    fi
+fi
+
 echo
 echo
 echo "========================================================================"
@@ -560,9 +570,9 @@
 get_willwont ${INST_HG}
 echo "be installing Mercurial"
 
-printf "%-18s = %s so I " "INST_UNSTRUCTURED" "${INST_UNSTRUCTURED}"
-get_willwont ${INST_UNSTRUCTURED}
-echo "be installing unstructured mesh rendering"
+printf "%-18s = %s so I " "INST_EMBREE" "${INST_EMBREE}"
+get_willwont ${INST_EMBREE}
+echo "be installing Embree"
 
 if [ $INST_CONDA -eq 0 ]
 then
@@ -1414,7 +1424,7 @@
     fi
     YT_DEPS+=('sympy')
 
-    if [ $INST_UNSTRUCTURED -eq 1 ]
+    if [ $INST_NETCDF4 -eq 1 ]
     then
         YT_DEPS+=('netcdf4')   
     fi
@@ -1439,10 +1449,10 @@
 
     log_cmd hg clone https://bitbucket.org/yt_analysis/yt_conda ${DEST_DIR}/src/yt_conda
     
-    if [ $INST_UNSTRUCTURED -eq 1 ]
+    if [ $INST_EMBREE -eq 1 ]
     then
         
-        echo "Installing embree"
+        echo "Installing Embree"
         if [ ! -d ${DEST_DIR}/src ]
         then
             mkdir ${DEST_DIR}/src
@@ -1497,7 +1507,7 @@
         echo "Building yt from source"
         YT_DIR="${DEST_DIR}/src/yt-hg"
         log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
-        if [ $INST_UNSTRUCTURED -eq 1 ]
+        if [ $INST_EMBREE -eq 1 ]
         then
             echo $DEST_DIR > ${YT_DIR}/embree.cfg
         fi

diff -r 5e1d7b3189a93818f0f5fc2239f1f8db460166d4 -r 4c91f634b67a9e4573d7a2166e17c7b8159ad477 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -3,40 +3,46 @@
 Unstructured Mesh Rendering
 ===========================
 
-Installation
-^^^^^^^^^^^^
+Beginning with version 3.3, yt has the ability to volume render unstructured
+mesh data like that created by finite element calculations. No additional 
+dependencies are required in order to use this feature. However, it is 
+possible to speed up the rendering operation by installing with 
+`Embree <https://embree.github.io>`_ support. Embree is a fast ray-tracing
+library from Intel that can substantially speed up the mesh rendering operation
+on large datasets. You can read about how to install yt with Embree support 
+below, or you can skip to the examples.
 
-Beginning with version 3.3, yt has the ability to volume render unstructured
-mesh data like that created by finite element calculations. In order to use
-this capability, a few additional dependencies are required. The easiest way
-to install yt with unstructured mesh support is to use conda to install the
+Optional Embree Installation
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The easiest way to install yt with Embree support is to use conda to install the
 most recent development version of yt from our channel:
 
 .. code-block:: bash
 
     conda install -c http://use.yt/with_conda/ yt
 
-If you want to install from source, you can use the ``get_yt.sh`` script.
-Be sure to set the INST_YT_SOURCE and INST_UNSTRUCTURED flags to 1 at the
-top of the script. The ``get_yt.sh`` script can be downloaded by doing:
+Alternatively, you can install yt from source using the ``install_script.sh`` 
+script. Be sure to set the INST_CONDA, INST_YT_SOURCE, INST_EMBREE, 
+and INST_NETCDF4 flags to 1 at the top of the script. The ``install_script.sh`` 
+script can be downloaded by doing:
 
 .. code-block:: bash
 
-  wget http://bitbucket.org/yt_analysis/yt/raw/yt/doc/get_yt.sh
+  wget http://bitbucket.org/yt_analysis/yt/raw/yt/doc/install_script.sh
 
 and then run like so:
 
 .. code-block:: bash
 
-  bash get_yt.sh
+  bash install_script.sh
 
-Alternatively, you can install the additional dependencies by hand.
-First, `embree <https://embree.github.io>`_
-(a fast software ray-tracing library from Intel) must be installed, either
-by compiling from source or by using one of the pre-built binaries available
-at Embree's `downloads <https://embree.github.io/downloads.html>`_ page.
+Finally, you can install the additional dependencies by hand.
+First, you will need to install Embree, either by compiling from source 
+or by using one of the pre-built binaries available at Embree's 
+`downloads <https://embree.github.io/downloads.html>`_ page.
 
-Second, the python bindings for embree (called
+Second, the python bindings for Embree (called
 `pyembree <https://github.com/scopatz/pyembree>`_) must also be installed. To
 do so, first obtain a copy, by .e.g. cloning the repo:
 
@@ -54,7 +60,7 @@
 
     CFLAGS='-I/opt/local/include' LDFLAGS='-L/opt/local/lib' python setup.py install
 
-Once embree and pyembree are installed, you must rebuild yt from source in order to use
+Once Embree and pyembree are installed, you must rebuild yt from source in order to use
 the unstructured mesh rendering capability. Once again, if embree is installed in a
 location that is not part of your default search path, you must tell yt where to find it.
 There are a number of ways to do this. One way is to again manually pass in the flags
@@ -84,20 +90,6 @@
 neccessary if you installed embree into a location that is in your default path, such
 as /usr/local.
 
-Once the pre-requisites are installed, unstructured mesh data can be rendered
-much like any other dataset. In particular, a new type of
-:class:`~yt.visualization.volume_rendering.render_source.RenderSource` object
-has been defined, called the
-:class:`~yt.visualization.volume_rendering.render_source.MeshSource`, that
-represents the unstructured mesh data that will be rendered. The user creates
-this object, and also defines a
-:class:`~yt.visualization.volume_rendering.camera.Camera`
-that specifies your viewpoint into the scene. When
-:class:`~yt.visualization.volume_rendering.render_source.RenderSource` is called,
-a set of rays are cast at the source. Each time a ray strikes the source mesh,
-the data is sampled at the intersection point at the resulting value gets
-saved into an image. See below for examples.
-
 Examples
 ^^^^^^^^
 


https://bitbucket.org/yt_analysis/yt/commits/6395fb87a1fc/
Changeset:   6395fb87a1fc
Branch:      stable
User:        ngoldbaum
Date:        2016-08-08 16:46:40+00:00
Summary:     use absolute paths to executables in install script
Affected #:  1 file

diff -r 4c91f634b67a9e4573d7a2166e17c7b8159ad477 -r 6395fb87a1fcb2a4af2eb561573ab7ffdcc34eea doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -1429,25 +1429,24 @@
         YT_DEPS+=('netcdf4')   
     fi
     
-    # Here is our dependency list for yt
-    log_cmd conda update --yes conda
+    log_cmd ${DEST_DIR}/bin/conda update --yes conda
     
     log_cmd echo "DEPENDENCIES" ${YT_DEPS[@]}
     for YT_DEP in "${YT_DEPS[@]}"; do
         echo "Installing $YT_DEP"
-        log_cmd conda install --yes ${YT_DEP}
+        log_cmd ${DEST_DIR}/bin/conda install --yes ${YT_DEP}
     done
 
     if [ $INST_PY3 -eq 1 ]
     then
         echo "Installing mercurial"
-        log_cmd conda create -y -n py27 python=2.7 mercurial
+        log_cmd ${DEST_DIR}/bin/conda create -y -n py27 python=2.7 mercurial
         log_cmd ln -s ${DEST_DIR}/envs/py27/bin/hg ${DEST_DIR}/bin
     fi
 
-    log_cmd pip install python-hglib
+    log_cmd ${DEST_DIR}/bin/pip install python-hglib
 
-    log_cmd hg clone https://bitbucket.org/yt_analysis/yt_conda ${DEST_DIR}/src/yt_conda
+    log_cmd ${DEST_DIR}/bin/hg clone https://bitbucket.org/yt_analysis/yt_conda ${DEST_DIR}/src/yt_conda
     
     if [ $INST_EMBREE -eq 1 ]
     then
@@ -1474,17 +1473,17 @@
         ( ${GETFILE} "$PYEMBREE_URL" 2>&1 ) 1>> ${LOG_FILE} || do_exit
         log_cmd unzip ${DEST_DIR}/src/master.zip
         pushd ${DEST_DIR}/src/pyembree-master &> /dev/null
-        log_cmd python setup.py install build_ext -I${DEST_DIR}/include -L${DEST_DIR}/lib
+        log_cmd ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py install build_ext -I${DEST_DIR}/include -L${DEST_DIR}/lib
         popd &> /dev/null
     fi
 
     if [ $INST_ROCKSTAR -eq 1 ]
     then
         echo "Building Rockstar"
-        ( hg clone http://bitbucket.org/MatthewTurk/rockstar ${DEST_DIR}/src/rockstar/ 2>&1 ) 1>> ${LOG_FILE}
-        ROCKSTAR_PACKAGE=$(conda build ${DEST_DIR}/src/yt_conda/rockstar --output)
-        log_cmd conda build ${DEST_DIR}/src/yt_conda/rockstar
-        log_cmd conda install $ROCKSTAR_PACKAGE
+        ( ${DEST_DIR}/bin/hg clone http://bitbucket.org/MatthewTurk/rockstar ${DEST_DIR}/src/rockstar/ 2>&1 ) 1>> ${LOG_FILE}
+        ROCKSTAR_PACKAGE=$(${DEST_DIR}/bin/conda build ${DEST_DIR}/src/yt_conda/rockstar --output)
+        log_cmd ${DEST_DIR}/bin/conda build ${DEST_DIR}/src/yt_conda/rockstar
+        log_cmd ${DEST_DIR}/bin/conda install $ROCKSTAR_PACKAGE
         ROCKSTAR_DIR=${DEST_DIR}/src/rockstar
     fi
 
@@ -1493,20 +1492,20 @@
     then
         if [ $INST_PY3 -eq 1 ]
         then
-            log_cmd pip install pyx
+            log_cmd ${DEST_DIR}/bin/pip install pyx
         else
-            log_cmd pip install pyx==0.12.1
+            log_cmd ${DEST_DIR}/bin/pip install pyx==0.12.1
         fi
     fi
 
     if [ $INST_YT_SOURCE -eq 0 ]
     then
         echo "Installing yt"
-        log_cmd conda install -c conda-forge --yes yt
+        log_cmd ${DEST_DIR}/bin/conda install -c conda-forge --yes yt
     else
         echo "Building yt from source"
         YT_DIR="${DEST_DIR}/src/yt-hg"
-        log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
+        log_cmd ${DEST_DIR}/bin/hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
         if [ $INST_EMBREE -eq 1 ]
         then
             echo $DEST_DIR > ${YT_DIR}/embree.cfg
@@ -1517,7 +1516,7 @@
             ROCKSTAR_LIBRARY_PATH=${DEST_DIR}/lib
         fi
         pushd ${YT_DIR} &> /dev/null
-        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH python setup.py develop 2>&1) 1>> ${LOG_FILE} || do_exit
+        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py develop 2>&1) 1>> ${LOG_FILE} || do_exit
         popd &> /dev/null
     fi
 


https://bitbucket.org/yt_analysis/yt/commits/f2c2eaa875aa/
Changeset:   f2c2eaa875aa
Branch:      stable
User:        xarthisius
Date:        2016-08-12 18:29:23+00:00
Summary:     Backporting PR #2337 https://bitbucket.org/yt_analysis/yt/pull-requests/2337
Affected #:  1 file

diff -r 6395fb87a1fcb2a4af2eb561573ab7ffdcc34eea -r f2c2eaa875aaa4f2fa713d4cd8bc9f9949597e62 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -81,30 +81,38 @@
         self.times = []
         self.suppress_logging = suppress_logging
 
-        # Default fields 
-        
         if fields is None: fields = []
-        fields.append("particle_position_x")
-        fields.append("particle_position_y")
-        fields.append("particle_position_z")
         fields = list(OrderedDict.fromkeys(fields))
 
         if self.suppress_logging:
             old_level = int(ytcfg.get("yt","loglevel"))
             mylog.setLevel(40)
+        
+        fds = {}
+        ds_first = self.data_series[0]
+        dd_first = ds_first.all_data()
+        idx_field = dd_first._determine_fields("particle_index")[0]
+        for field in ("particle_position_%s" % ax for ax in "xyz"):
+            fds[field] = dd_first._determine_fields(field)[0]
+
         my_storage = {}
         pbar = get_pbar("Constructing trajectory information", len(self.data_series))
         for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
             dd = ds.all_data()
-            idx_field = dd._determine_fields("particle_index")[0]
             newtags = dd[idx_field].ndarray_view().astype("int64")
             mask = np.in1d(newtags, indices, assume_unique=True)
-            sorts = np.argsort(newtags[mask])
-            self.array_indices.append(np.where(np.in1d(indices, newtags, assume_unique=True))[0])
+            sort = np.argsort(newtags[mask])
+            array_indices = np.where(np.in1d(indices, newtags, assume_unique=True))[0]
+            self.array_indices.append(array_indices)
             self.masks.append(mask)
-            self.sorts.append(sorts)
+            self.sorts.append(sort)
+
+            pfields = {}
+            for field in ("particle_position_%s" % ax for ax in "xyz"):
+                pfields[field] = dd[fds[field]].ndarray_view()[mask][sort]
+
             sto.result_id = ds.parameter_filename
-            sto.result = ds.current_time
+            sto.result = (ds.current_time, array_indices, pfields)
             pbar.update(i)
         pbar.finish()
 
@@ -112,17 +120,22 @@
             mylog.setLevel(old_level)
 
         times = []
-        for fn, time in sorted(my_storage.items()):
+        for fn, (time, indices, pfields) in sorted(my_storage.items()):
             times.append(time)
-
         self.times = self.data_series[0].arr([time for time in times], times[0].units)
 
         self.particle_fields = []
+        output_field = np.empty((self.num_indices, self.num_steps))
+        output_field.fill(np.nan)
+        for field in ("particle_position_%s" % ax for ax in "xyz"):
+            for i, (fn, (time, indices, pfields)) in enumerate(sorted(my_storage.items())):
+                output_field[indices, i] = pfields[field]
+            self.field_data[field] = array_like_field(
+                dd_first, output_field.copy(), fds[field])
+            self.particle_fields.append(field)
 
         # Instantiate fields the caller requested
-
-        for field in fields:
-            self._get_data(field)
+        self._get_data(fields)
 
     def has_key(self, key):
         return (key in self.field_data)
@@ -137,7 +150,7 @@
         if key == "particle_time":
             return self.times
         if key not in self.field_data:
-            self._get_data(key)
+            self._get_data([key])
         return self.field_data[key]
     
     def __setitem__(self, key, val):
@@ -188,65 +201,89 @@
         >>> trajs = ParticleTrajectories(my_fns, indices)
         >>> trajs.add_fields(["particle_mass", "particle_gpot"])
         """
-        for field in fields:
-            if field not in self.field_data:
-                self._get_data(field)
+        self._get_data(fields)
                 
-    def _get_data(self, field):
+    def _get_data(self, fields):
         """
-        Get a field to include in the trajectory collection.
+        Get a list of fields to include in the trajectory collection.
         The trajectory collection itself is a dict of 2D numpy arrays,
         with shape (num_indices, num_steps)
         """
-        if field not in self.field_data:
-            if self.suppress_logging:
-                old_level = int(ytcfg.get("yt","loglevel"))
-                mylog.setLevel(40)
-            ds_first = self.data_series[0]
-            dd_first = ds_first.all_data()
-            fd = dd_first._determine_fields(field)[0]
+
+        missing_fields = [field for field in fields
+                          if field not in self.field_data]
+        if not missing_fields:
+            return
+
+        if self.suppress_logging:
+            old_level = int(ytcfg.get("yt","loglevel"))
+            mylog.setLevel(40)
+        ds_first = self.data_series[0]
+        dd_first = ds_first.all_data()
+
+        fds = {}
+        new_particle_fields = []
+        for field in missing_fields:
+            fds[field] = dd_first._determine_fields(field)[0]
             if field not in self.particle_fields:
-                if self.data_series[0]._get_field_info(*fd).particle_type:
+                if self.data_series[0]._get_field_info(*fds[field]).particle_type:
                     self.particle_fields.append(field)
-            particles = np.empty((self.num_indices,self.num_steps))
-            particles[:] = np.nan
-            step = int(0)
-            pbar = get_pbar("Generating field %s in trajectories." % (field), self.num_steps)
-            my_storage={}
-            for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
-                mask = self.masks[i]
-                sort = self.sorts[i]
-                if field in self.particle_fields:
+                    new_particle_fields.append(field)
+                    
+
+        grid_fields = [field for field in missing_fields
+                       if field not in self.particle_fields]
+        step = int(0)
+        pbar = get_pbar("Generating [%s] fields in trajectories." %
+                        ", ".join(missing_fields), self.num_steps)
+        my_storage = {}
+        
+        for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
+            mask = self.masks[i]
+            sort = self.sorts[i]
+            pfield = {}
+
+            if new_particle_fields:  # there's at least one particle field
+                dd = ds.all_data()
+                for field in new_particle_fields:
                     # This is easy... just get the particle fields
-                    dd = ds.all_data()
-                    pfield = dd[fd].ndarray_view()[mask][sort]
-                else:
-                    # This is hard... must loop over grids
-                    pfield = np.zeros((self.num_indices))
-                    x = self["particle_position_x"][:,step].ndarray_view()
-                    y = self["particle_position_y"][:,step].ndarray_view()
-                    z = self["particle_position_z"][:,step].ndarray_view()
-                    # This will fail for non-grid index objects
-                    particle_grids, particle_grid_inds = ds.index._find_points(x,y,z)
-                    for grid in particle_grids:
-                        cube = grid.retrieve_ghost_zones(1, [fd])
-                        CICSample_3(x,y,z,pfield,
+                    pfield[field] = dd[fds[field]].ndarray_view()[mask][sort]
+
+            if grid_fields:
+                # This is hard... must loop over grids
+                for field in grid_fields:
+                    pfield[field] = np.zeros((self.num_indices))
+                x = self["particle_position_x"][:,step].ndarray_view()
+                y = self["particle_position_y"][:,step].ndarray_view()
+                z = self["particle_position_z"][:,step].ndarray_view()
+                particle_grids, particle_grid_inds = ds.index._find_points(x,y,z)
+
+                # This will fail for non-grid index objects
+                for grid in particle_grids:
+                    cube = grid.retrieve_ghost_zones(1, grid_fields)
+                    for field in grid_fields:
+                        CICSample_3(x, y, z, pfield[field],
                                     self.num_indices,
-                                    cube[fd],
+                                    cube[fds[field]],
                                     np.array(grid.LeftEdge).astype(np.float64),
                                     np.array(grid.ActiveDimensions).astype(np.int32),
                                     grid.dds[0])
-                sto.result_id = ds.parameter_filename
-                sto.result = (self.array_indices[i], pfield)
-                pbar.update(step)
-                step += 1
-            pbar.finish()
+            sto.result_id = ds.parameter_filename
+            sto.result = (self.array_indices[i], pfield)
+            pbar.update(step)
+            step += 1
+        pbar.finish()
+
+        output_field = np.empty((self.num_indices,self.num_steps))
+        output_field.fill(np.nan)
+        for field in missing_fields:
+            fd = fds[field]
             for i, (fn, (indices, pfield)) in enumerate(sorted(my_storage.items())):
-                particles[indices,i] = pfield
-            self.field_data[field] = array_like_field(dd_first, particles, fd)
-            if self.suppress_logging:
-                mylog.setLevel(old_level)
-        return self.field_data[field]
+                output_field[indices, i] = pfield[field]
+            self.field_data[field] = array_like_field(dd_first, output_field.copy(), fd)
+
+        if self.suppress_logging:
+            mylog.setLevel(old_level)
 
     def trajectory_from_index(self, index):
         """


https://bitbucket.org/yt_analysis/yt/commits/d12d6cd8f097/
Changeset:   d12d6cd8f097
Branch:      stable
User:        xarthisius
Date:        2016-08-12 20:55:32+00:00
Summary:     Drop embedded_webm_animation cookbook recipe
Affected #:  3 files

diff -r f2c2eaa875aaa4f2fa713d4cd8bc9f9949597e62 -r d12d6cd8f0974ba2fa1b82bef928d2140c5f7168 doc/source/cookbook/embedded_webm_animation.ipynb
--- a/doc/source/cookbook/embedded_webm_animation.ipynb
+++ /dev/null
@@ -1,137 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "This example shows how to embed an animation produced by `matplotlib` into an IPython notebook.  This example makes use of `matplotlib`'s [animation toolkit](http://matplotlib.org/api/animation_api.html) to transform individual frames into a final rendered movie.  \n",
-    "\n",
-    "Matplotlib uses [`ffmpeg`](http://www.ffmpeg.org/) to generate the movie, so you must install `ffmpeg` for this example to work correctly.  Usually the best way to install `ffmpeg` is using your system's package manager."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import yt\n",
-    "from matplotlib import animation"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "First, we need to construct a function that will embed the video produced by ffmpeg directly into the notebook document. This makes use of the [HTML5 video tag](http://www.w3schools.com/html/html5_video.asp) and the WebM video format.  WebM is supported by Chrome, Firefox, and Opera, but not Safari and Internet Explorer.  If you have trouble viewing the video you may need to use a different video format.  Since this uses `libvpx` to construct the frames, you will need to ensure that ffmpeg has been compiled with `libvpx` support."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "from tempfile import NamedTemporaryFile\n",
-    "import base64\n",
-    "\n",
-    "VIDEO_TAG = \"\"\"<video controls>\n",
-    " <source src=\"data:video/x-webm;base64,{0}\" type=\"video/webm\">\n",
-    " Your browser does not support the video tag.\n",
-    "</video>\"\"\"\n",
-    "\n",
-    "def anim_to_html(anim):\n",
-    "    if not hasattr(anim, '_encoded_video'):\n",
-    "        with NamedTemporaryFile(suffix='.webm') as f:\n",
-    "            anim.save(f.name, fps=6, extra_args=['-vcodec', 'libvpx'])\n",
-    "            video = open(f.name, \"rb\").read()\n",
-    "        anim._encoded_video = base64.b64encode(video)\n",
-    "    \n",
-    "    return VIDEO_TAG.format(anim._encoded_video.decode('ascii'))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Next, we define a function to actually display the video inline in the notebook."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "from IPython.display import HTML\n",
-    "\n",
-    "def display_animation(anim):\n",
-    "    plt.close(anim._fig)\n",
-    "    return HTML(anim_to_html(anim))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Finally, we set up the animation itsself.  We use yt to load the data and create each frame and use matplotlib to stitch the frames together.  Note that we customize the plot a bit by calling the `set_zlim` function.  Customizations only need to be applied to the first frame - they will carry through to the rest.\n",
-    "\n",
-    "This may take a while to run, be patient."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import matplotlib.pyplot as plt\n",
-    "from matplotlib.backends.backend_agg import FigureCanvasAgg\n",
-    "\n",
-    "prj = yt.ProjectionPlot(yt.load('Enzo_64/DD0000/data0000'), 0, 'density', weight_field='density',width=(180,'Mpccm'))\n",
-    "prj.set_zlim('density',1e-32,1e-26)\n",
-    "fig = prj.plots['density'].figure\n",
-    "\n",
-    "# animation function.  This is called sequentially\n",
-    "def animate(i):\n",
-    "    ds = yt.load('Enzo_64/DD%04i/data%04i' % (i,i))\n",
-    "    prj._switch_ds(ds)\n",
-    "\n",
-    "# call the animator.  blit=True means only re-draw the parts that have changed.\n",
-    "anim = animation.FuncAnimation(fig, animate, frames=44, interval=200, blit=False)\n",
-    "\n",
-    "# call our new function to display the animation\n",
-    "display_animation(anim)"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 2",
-   "language": "python",
-   "name": "python2"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 2
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython2",
-   "version": "2.7.10"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}

diff -r f2c2eaa875aaa4f2fa713d4cd8bc9f9949597e62 -r d12d6cd8f0974ba2fa1b82bef928d2140c5f7168 doc/source/cookbook/embedded_webm_animation.rst
--- a/doc/source/cookbook/embedded_webm_animation.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Making animations using matplotlib and ffmpeg
----------------------------------------------
-
-.. notebook:: embedded_webm_animation.ipynb

diff -r f2c2eaa875aaa4f2fa713d4cd8bc9f9949597e62 -r d12d6cd8f0974ba2fa1b82bef928d2140c5f7168 doc/source/cookbook/index.rst
--- a/doc/source/cookbook/index.rst
+++ b/doc/source/cookbook/index.rst
@@ -41,7 +41,6 @@
 
    notebook_tutorial
    custom_colorbar_tickmarks
-   embedded_webm_animation
    gadget_notebook
    owls_notebook
    ../visualizing/transfer_function_helper


https://bitbucket.org/yt_analysis/yt/commits/d15463a16bb9/
Changeset:   d15463a16bb9
Branch:      stable
User:        xarthisius
Date:        2016-08-24 18:53:37+00:00
Summary:     Backporting PR #2341 https://bitbucket.org/yt_analysis/yt/pull-requests/2341
Affected #:  5 files

diff -r d12d6cd8f0974ba2fa1b82bef928d2140c5f7168 -r d15463a16bb942443b58617444fbe8e9a3c1fd1f yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -1131,12 +1131,15 @@
                                        mask, sample_values = None,
                                        sample_type = "face",
                                        no_ghost = False):
-        vals = grid.get_vertex_centered_data(field, no_ghost = no_ghost)
+        # TODO: check if multiple fields can be passed here
+        vals = grid.get_vertex_centered_data([field], no_ghost=no_ghost)[field]
         if sample_values is not None:
-            svals = grid.get_vertex_centered_data(sample_values)
+            # TODO: is no_ghost=False correct here?
+            svals = grid.get_vertex_centered_data([sample_values])[sample_values]
         else:
             svals = None
-        sample_type = {"face":1, "vertex":2}[sample_type]
+
+        sample_type = {"face": 1, "vertex": 2}[sample_type]
         my_verts = march_cubes_grid(value, vals, mask, grid.LeftEdge,
                                     grid.dds, svals, sample_type)
         return my_verts
@@ -1208,15 +1211,21 @@
 
     def _calculate_flux_in_grid(self, grid, mask,
             field_x, field_y, field_z, fluxing_field = None):
-        vals = grid.get_vertex_centered_data(self.surface_field)
+
+        vc_fields = [self.surface_field, field_x, field_y, field_z]
+        if fluxing_field is not None:
+            vc_fields.append(fluxing_field)
+
+        vc_data = grid.get_vertex_centered_data(vc_fields)
         if fluxing_field is None:
-            ff = np.ones(vals.shape, dtype="float64")
+            ff = np.ones_like(vc_data[self.surface_field], dtype="float64")
         else:
-            ff = grid.get_vertex_centered_data(fluxing_field)
-        xv, yv, zv = [grid.get_vertex_centered_data(f)
-                      for f in [field_x, field_y, field_z]]
-        return march_cubes_grid_flux(self.field_value, vals, xv, yv, zv,
-                    ff, mask, grid.LeftEdge, grid.dds)
+            ff = vc_data[fluxing_field]
+
+        return march_cubes_grid_flux(
+            self.field_value, vc_data[self.surface_field], vc_data[field_x],
+            vc_data[field_y], vc_data[field_z], ff, mask, grid.LeftEdge,
+            grid.dds)
 
     @property
     def triangles(self):

diff -r d12d6cd8f0974ba2fa1b82bef928d2140c5f7168 -r d15463a16bb942443b58617444fbe8e9a3c1fd1f yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1595,13 +1595,18 @@
 
     def _extract_isocontours_from_grid(self, grid, mask, field, value,
                                        sample_values=None):
-        vals = grid.get_vertex_centered_data(field, no_ghost=False)
+        vc_fields = [field]
         if sample_values is not None:
-            svals = grid.get_vertex_centered_data(sample_values)
-        else:
+            vc_fields.append(sample_values)
+
+        vc_data = grid.get_vertex_centered_data(vc_fields, no_ghost=False)
+        try:
+            svals = vc_data[sample_values]
+        except KeyError:
             svals = None
-        my_verts = march_cubes_grid(value, vals, mask, grid.LeftEdge,
-                                    grid.dds, svals)
+
+        my_verts = march_cubes_grid(value, vc_data[field], mask,
+            grid.LeftEdge, grid.dds, svals)
         return my_verts
 
     def calculate_isocontour_flux(self, field, value,
@@ -1673,15 +1678,21 @@
 
     def _calculate_flux_in_grid(self, grid, mask, field, value,
                     field_x, field_y, field_z, fluxing_field = None):
-        vals = grid.get_vertex_centered_data(field)
+        
+        vc_fields = [field, field_x, field_y, field_z]
+        if fluxing_field is not None:
+            vc_fields.append(fluxing_field)
+
+        vc_data = grid.get_vertex_centered_data(vc_fields)
+
         if fluxing_field is None:
-            ff = np.ones(vals.shape, dtype="float64")
+            ff = np.ones_like(vc_data[field], dtype="float64")
         else:
-            ff = grid.get_vertex_centered_data(fluxing_field)
-        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in
-                     [field_x, field_y, field_z]]
-        return march_cubes_grid_flux(value, vals, xv, yv, zv,
-                    ff, mask, grid.LeftEdge, grid.dds)
+            ff = vc_data[fluxing_field]
+
+        return march_cubes_grid_flux(value, vc_data[field], vc_data[field_x],
+            vc_data[field_y], vc_data[field_z], ff, mask, grid.LeftEdge,
+            grid.dds)
 
     def extract_connected_sets(self, field, num_levels, min_val, max_val,
                                log_space=True, cumulative=True):

diff -r d12d6cd8f0974ba2fa1b82bef928d2140c5f7168 -r d15463a16bb942443b58617444fbe8e9a3c1fd1f yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -13,8 +13,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import warnings
 import weakref
 import numpy as np
+from six import string_types
 
 from yt.data_objects.data_containers import \
     YTFieldData, \
@@ -251,33 +253,50 @@
         cube._base_grid = self
         return cube
 
-    def get_vertex_centered_data(self, field, smoothed=True, no_ghost=False):
-        new_field = np.zeros(self.ActiveDimensions + 1, dtype='float64')
+    def get_vertex_centered_data(self, fields, smoothed=True, no_ghost=False):
+        _old_api = isinstance(fields, (string_types, tuple))
+        if _old_api:
+            message = (
+                'get_vertex_centered_data() requires list of fields, rather than '
+                'a single field as an argument.'
+            )
+            warnings.warn(message, DeprecationWarning, stacklevel=2)
+            fields = [fields]
+
+        # Make sure the field list has only unique entries
+        fields = list(set(fields))
+        new_fields = {}
+        for field in fields:
+            new_fields[field] = np.zeros(self.ActiveDimensions + 1, dtype='float64')
 
         if no_ghost:
-            # Ensure we have the native endianness in this array.  Avoid making
-            # a copy if possible.
-            old_field = np.asarray(self[field], dtype="=f8")
-            # We'll use the ghost zone routine, which will naturally
-            # extrapolate here.
-            input_left = np.array([0.5, 0.5, 0.5], dtype="float64")
-            output_left = np.array([0.0, 0.0, 0.0], dtype="float64")
-            # rf = 1 here
-            ghost_zone_interpolate(1, old_field, input_left,
-                                   new_field, output_left)
+            for field in fields:
+                # Ensure we have the native endianness in this array.  Avoid making
+                # a copy if possible.
+                old_field = np.asarray(self[field], dtype="=f8")
+                # We'll use the ghost zone routine, which will naturally
+                # extrapolate here.
+                input_left = np.array([0.5, 0.5, 0.5], dtype="float64")
+                output_left = np.array([0.0, 0.0, 0.0], dtype="float64")
+                # rf = 1 here
+                ghost_zone_interpolate(1, old_field, input_left,
+                                       new_fields[field], output_left)
         else:
-            cg = self.retrieve_ghost_zones(1, field, smoothed=smoothed)
-            np.add(new_field, cg[field][1: ,1: ,1: ], new_field)
-            np.add(new_field, cg[field][:-1,1: ,1: ], new_field)
-            np.add(new_field, cg[field][1: ,:-1,1: ], new_field)
-            np.add(new_field, cg[field][1: ,1: ,:-1], new_field)
-            np.add(new_field, cg[field][:-1,1: ,:-1], new_field)
-            np.add(new_field, cg[field][1: ,:-1,:-1], new_field)
-            np.add(new_field, cg[field][:-1,:-1,1: ], new_field)
-            np.add(new_field, cg[field][:-1,:-1,:-1], new_field)
-            np.multiply(new_field, 0.125, new_field)
+            cg = self.retrieve_ghost_zones(1, fields, smoothed=smoothed)
+            for field in fields:
+                np.add(new_fields[field], cg[field][1: ,1: ,1: ], new_fields[field])
+                np.add(new_fields[field], cg[field][:-1,1: ,1: ], new_fields[field])
+                np.add(new_fields[field], cg[field][1: ,:-1,1: ], new_fields[field])
+                np.add(new_fields[field], cg[field][1: ,1: ,:-1], new_fields[field])
+                np.add(new_fields[field], cg[field][:-1,1: ,:-1], new_fields[field])
+                np.add(new_fields[field], cg[field][1: ,:-1,:-1], new_fields[field])
+                np.add(new_fields[field], cg[field][:-1,:-1,1: ], new_fields[field])
+                np.add(new_fields[field], cg[field][:-1,:-1,:-1], new_fields[field])
+                np.multiply(new_fields[field], 0.125, new_fields[field])
 
-        return new_field
+        if _old_api:
+            return new_fields[fields[0]]
+        return new_fields
 
     def select_icoords(self, dobj):
         mask = self._get_selector_mask(dobj.selector)

diff -r d12d6cd8f0974ba2fa1b82bef928d2140c5f7168 -r d15463a16bb942443b58617444fbe8e9a3c1fd1f yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -304,10 +304,13 @@
             dds = self.current_vcds[self.current_saved_grids.index(grid)]
         else:
             dds = []
+            vcd = grid.get_vertex_centered_data(self.fields, smoothed=True,
+                                                no_ghost=self.no_ghost)
             for i, field in enumerate(self.fields):
-                vcd = grid.get_vertex_centered_data(field, smoothed=True, no_ghost=self.no_ghost).astype('float64')
-                if self.log_fields[i]: vcd = np.log10(vcd)
-                dds.append(vcd)
+                if self.log_fields[i]:
+                    dds.append(np.log10(vcd[field].astype('float64')))
+                else:
+                    dds.append(vcd[field].astype('float64'))
                 self.current_saved_grids.append(grid)
                 self.current_vcds.append(dds)
 

diff -r d12d6cd8f0974ba2fa1b82bef928d2140c5f7168 -r d15463a16bb942443b58617444fbe8e9a3c1fd1f yt/utilities/tests/test_interpolators.py
--- a/yt/utilities/tests/test_interpolators.py
+++ b/yt/utilities/tests/test_interpolators.py
@@ -1,3 +1,4 @@
+import warnings
 import numpy as np
 
 from yt.testing import \
@@ -74,10 +75,10 @@
     ds = fake_random_ds(16)
 
     g = ds.index.grids[0]
+    vec = g.get_vertex_centered_data(['x', 'y', 'z'], no_ghost=True)
     for i, ax in enumerate('xyz'):
         xc = g[ax]
 
-        xv = g.get_vertex_centered_data(ax, no_ghost=True)
         tf = lin.TrilinearFieldInterpolator(xc,
                 (g.LeftEdge[0] + g.dds[0]/2.0,
                     g.RightEdge[0] - g.dds[0]/2.0,
@@ -97,6 +98,22 @@
                                   xz, np.array([0.0, 0.0, 0.0], dtype="f8"))
 
         ii = (lx, ly, lz)[i]
-        yield assert_array_equal, ii, xv
+        yield assert_array_equal, ii, vec[ax]
         yield assert_array_equal, ii, xi
         yield assert_array_equal, ii, xz
+
+
+def test_get_vertex_centered_data():
+    ds = fake_random_ds(16)
+    g = ds.index.grids[0]
+
+    vec_list = g.get_vertex_centered_data([('gas', 'density')], no_ghost=True)
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter('always')
+        vec_str = g.get_vertex_centered_data('density', no_ghost=True)
+        assert len(w) == 1
+        assert issubclass(w[-1].category, DeprecationWarning)
+        assert 'requires list of fields' in str(w[-1].message)
+    vec_tuple = g.get_vertex_centered_data(('gas', 'density'), no_ghost=True) 
+    assert_array_equal(vec_list[('gas', 'density')], vec_str)
+    assert_array_equal(vec_list[('gas', 'density')], vec_tuple)


https://bitbucket.org/yt_analysis/yt/commits/448305a014fd/
Changeset:   448305a014fd
Branch:      stable
User:        ngoldbaum
Date:        2016-09-23 17:51:20+00:00
Summary:     Backporting PR #2362 https://bitbucket.org/yt_analysis/yt/pull-requests/2362
Affected #:  3 files

diff -r d15463a16bb942443b58617444fbe8e9a3c1fd1f -r 448305a014fd00723fba05e384c9219d2a2d8e14 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1497,10 +1497,13 @@
     Parameters
     ----------
     octree_mask : np.ndarray[uint8_t]
-        This is a depth-first refinement mask for an Octree.  It should be of
-        size n_octs * 8, where each item is 1 for an oct-cell being refined and
-        0 for it not being refined.  Note that for over_refine_factors != 1,
-        the children count will still be 8, so this is always 8.
+        This is a depth-first refinement mask for an Octree.  It should be 
+        of size n_octs * 8 (but see note about the root oct below), where 
+        each item is 1 for an oct-cell being refined and 0 for it not being
+        refined.  For over_refine_factors != 1, the children count will 
+        still be 8, so there will stil be n_octs * 8 entries. Note that if 
+        the root oct is not refined, there will be only one entry
+        for the root, so the size of the mask will be (n_octs - 1)*8 + 1.
     data : dict
         A dictionary of 1D arrays.  Note that these must of the size of the
         number of "False" values in the ``octree_mask``.
@@ -1522,8 +1525,29 @@
         Determines whether the data will be treated as periodic along
         each axis
     partial_coverage : boolean
-        Whether or not an oct can be refined cell-by-cell, or whether all 8 get
-        refined.
+        Whether or not an oct can be refined cell-by-cell, or whether all 
+        8 get refined.
+
+    Example
+    -------
+
+    >>> import yt
+    >>> import numpy as np
+    >>> oct_mask = [8, 0, 0, 0, 0, 8, 0, 8,
+    ...             0, 0, 0, 0, 0, 0, 0, 0,
+    ...             8, 0, 0, 0, 0, 0, 0, 0,
+    ...             0]
+    >>>
+    >>> octree_mask = np.array(oct_mask, dtype=np.uint8)
+    >>> quantities = {}
+    >>> quantities['gas', 'density'] = np.random.random((22, 1), dtype='f8')
+    >>> bbox = np.array([[-10., 10.], [-10., 10.], [-10., 10.]])
+    >>>
+    >>> ds = yt.load_octree(octree_mask=octree_mask,
+    ...                     data=quantities,
+    ...                     bbox=bbox,
+    ...                     over_refine_factor=0,
+    ...                     partial_coverage=0)
 
     """
 

diff -r d15463a16bb942443b58617444fbe8e9a3c1fd1f -r 448305a014fd00723fba05e384c9219d2a2d8e14 yt/frontends/stream/tests/test_stream_octree.py
--- /dev/null
+++ b/yt/frontends/stream/tests/test_stream_octree.py
@@ -0,0 +1,26 @@
+import yt
+import numpy as np
+
+OCT_MASK_LIST = [8, 0, 0, 0, 0, 8, 0, 0,
+                 0, 0, 0, 0, 0, 0, 0, 0,
+                 8, 0, 0, 0, 0, 0, 0, 0,
+                 0]
+
+
+def test_octree():
+    # See Issue #1272
+    octree_mask = np.array(OCT_MASK_LIST, dtype=np.uint8)
+
+    quantities = {}
+    quantities[('gas', 'density')] = np.ones((22, 1), dtype=float)
+
+    bbox = np.array([[-10., 10.], [-10., 10.], [-10., 10.]])
+
+    ds = yt.load_octree(octree_mask=octree_mask,
+                        data=quantities,
+                        bbox=bbox,
+                        over_refine_factor=0,
+                        partial_coverage=0)
+
+    proj = ds.proj('density', 'x')
+    proj['density']

diff -r d15463a16bb942443b58617444fbe8e9a3c1fd1f -r 448305a014fd00723fba05e384c9219d2a2d8e14 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -100,7 +100,7 @@
 
 cdef class FillFileIndicesO(OctVisitor):
     cdef np.uint8_t[:] levels
-    cdef np.uint8_t[:] file_inds
+    cdef np.int64_t[:] file_inds
     cdef np.uint8_t[:] cell_inds
 
 cdef class FillFileIndicesR(OctVisitor):


https://bitbucket.org/yt_analysis/yt/commits/3fe27d35c3f4/
Changeset:   3fe27d35c3f4
Branch:      stable
User:        MatthewTurk
Date:        2016-09-09 20:40:52+00:00
Summary:     Backporting PR #2370 https://bitbucket.org/yt_analysis/yt/pull-requests/2370
Affected #:  1 file

diff -r 448305a014fd00723fba05e384c9219d2a2d8e14 -r 3fe27d35c3f47b25bb62b39dac02d4ce0eb1e01b yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -321,8 +321,12 @@
         tot_parts = np.sum(list(data_file.total_particles.values()))
         endian = data_file.ds.endian
         self._aux_pdtypes = {}
-        self._aux_fields = [f.rsplit('.')[-1]
-                            for f in glob.glob(data_file.filename + '.*')]
+        self._aux_fields = []
+        for f in glob.glob(data_file.filename + '.*'):
+            afield = f.rsplit('.')[-1]
+            filename = data_file.filename + '.' + afield
+            if not os.path.exists(filename): continue
+            self._aux_fields.append(afield)
         for afield in self._aux_fields:
             filename = data_file.filename + '.' + afield
             # We need to do some fairly ugly detection to see what format the


https://bitbucket.org/yt_analysis/yt/commits/c00cdd221c2b/
Changeset:   c00cdd221c2b
Branch:      stable
User:        brittonsmith
Date:        2016-09-15 10:15:02+00:00
Summary:     Backporting PR #2373 https://bitbucket.org/yt_analysis/yt/pull-requests/2373
Affected #:  1 file

diff -r 3fe27d35c3f47b25bb62b39dac02d4ce0eb1e01b -r c00cdd221c2bd90315e731a2670d75bcb80d1f97 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -19,6 +19,7 @@
 
 import matplotlib
 import numpy as np
+import re
 
 from distutils.version import LooseVersion
 from functools import wraps
@@ -1953,11 +1954,14 @@
         # If we're annotating the redshift, put it in the correct format
         if self.redshift:
             try:
-                z = np.abs(plot.data.ds.current_redshift)
+                z = plot.data.ds.current_redshift
             except AttributeError:
                 raise AttributeError("Dataset does not have current_redshift. "
                                      "Set redshift=False.")
+            # Replace instances of -0.0* with 0.0* to avoid
+            # negative null redshifts (e.g., "-0.00").
             self.text += self.redshift_format.format(redshift=float(z))
+            self.text = re.sub('-(0.0*)$', "\g<1>", self.text)
 
         # This is just a fancy wrapper around the TextLabelCallback
         tcb = TextLabelCallback(self.pos, self.text,


https://bitbucket.org/yt_analysis/yt/commits/98e6965ec7db/
Changeset:   98e6965ec7db
Branch:      stable
User:        ngoldbaum
Date:        2016-09-16 02:25:21+00:00
Summary:     Avoid a zero division error in the volume renderer by using C division
Affected #:  2 files

diff -r c00cdd221c2bd90315e731a2670d75bcb80d1f97 -r 98e6965ec7db18dc6f2ca3025e769b893cc0a042 yt/utilities/lib/lenses.pxd
--- a/yt/utilities/lib/lenses.pxd
+++ b/yt/utilities/lib/lenses.pxd
@@ -45,8 +45,8 @@
     np.float64_t *y_vec
 
 
-ctypedef void calculate_extent_function(ImageContainer *image,
-            VolumeContainer *vc, np.int64_t rv[4]) nogil
+ctypedef int calculate_extent_function(ImageContainer *image,
+            VolumeContainer *vc, np.int64_t rv[4]) nogil except -1
 
 ctypedef void generate_vector_info_function(ImageContainer *im,
             np.int64_t vi, np.int64_t vj,

diff -r c00cdd221c2bd90315e731a2670d75bcb80d1f97 -r 98e6965ec7db18dc6f2ca3025e769b893cc0a042 yt/utilities/lib/lenses.pyx
--- a/yt/utilities/lib/lenses.pyx
+++ b/yt/utilities/lib/lenses.pyx
@@ -20,8 +20,9 @@
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-cdef void calculate_extent_plane_parallel(ImageContainer *image,
-            VolumeContainer *vc, np.int64_t rv[4]) nogil:
+ at cython.cdivision(True)
+cdef int calculate_extent_plane_parallel(ImageContainer *image,
+            VolumeContainer *vc, np.int64_t rv[4]) nogil except -1:
     # We do this for all eight corners
     cdef np.float64_t temp
     cdef np.float64_t *edges[2]
@@ -53,11 +54,13 @@
     rv[1] = rv[0] + lrint((extrema[1] - extrema[0])/image.pdx)
     rv[2] = lrint((extrema[2] - cy - image.bounds[2])/image.pdy)
     rv[3] = rv[2] + lrint((extrema[3] - extrema[2])/image.pdy)
+    return 0
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-cdef void calculate_extent_perspective(ImageContainer *image,
-            VolumeContainer *vc, np.int64_t rv[4]) nogil:
+ at cython.cdivision(True)
+cdef int calculate_extent_perspective(ImageContainer *image,
+            VolumeContainer *vc, np.int64_t rv[4]) nogil except -1:
 
     cdef np.float64_t cam_pos[3]
     cdef np.float64_t cam_width[3]
@@ -156,19 +159,21 @@
     rv[1] = min(max_px, image.nv[0])
     rv[2] = max(min_py, 0)
     rv[3] = min(max_py, image.nv[1])
-
+    return 0
 
 # We do this for a bunch of lenses.  Fallback is to grab them from the vector
 # info supplied.
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-cdef void calculate_extent_null(ImageContainer *image,
-            VolumeContainer *vc, np.int64_t rv[4]) nogil:
+ at cython.cdivision(True)
+cdef int calculate_extent_null(ImageContainer *image,
+            VolumeContainer *vc, np.int64_t rv[4]) nogil except -1:
     rv[0] = 0
     rv[1] = image.nv[0]
     rv[2] = 0
     rv[3] = image.nv[1]
+    return 0
 
 @cython.boundscheck(False)
 @cython.wraparound(False)


https://bitbucket.org/yt_analysis/yt/commits/8da4789354c2/
Changeset:   8da4789354c2
Branch:      stable
User:        BW Keller
Date:        2016-09-16 19:40:31+00:00
Summary:     Fixed two small issues that prevents succesful build and nosetests on i386
Affected #:  2 files

diff -r 98e6965ec7db18dc6f2ca3025e769b893cc0a042 -r 8da4789354c224a038cd348c92a9b1c4eba55c68 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -227,8 +227,7 @@
         # Note that what we will be providing to our processing functions will
         # actually be indirectly-sorted fields.  This preserves memory at the
         # expense of additional pointer lookups.
-        pind = np.argsort(pdoms)
-        pind = np.asarray(pind, dtype='int64', order='C')
+        pind = np.asarray(np.argsort(pdoms), dtype='int64', order='C')
         # So what this means is that we now have all the oct-0 particle indices
         # in order, then the oct-1, etc etc.
         # This now gives us the indices to the particles for each domain.

diff -r 98e6965ec7db18dc6f2ca3025e769b893cc0a042 -r 8da4789354c224a038cd348c92a9b1c4eba55c68 yt/utilities/tests/test_interpolators.py
--- a/yt/utilities/tests/test_interpolators.py
+++ b/yt/utilities/tests/test_interpolators.py
@@ -55,7 +55,7 @@
                np.mgrid[0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j]))
     tfi = lin.TrilinearFieldInterpolator(random_data,
             (0.0, 1.0, 0.0, 1.0, 0.0, 1.0), "xyz", True)
-    yield assert_array_equal, tfi(fv), random_data
+    yield assert_array_almost_equal, tfi(fv), random_data
 
     # randomly spaced bins
     size = 64


https://bitbucket.org/yt_analysis/yt/commits/932621c3264b/
Changeset:   932621c3264b
Branch:      stable
User:        jisuoqing
Date:        2016-09-16 23:26:18+00:00
Summary:     Fix zoom for perspective lens
Affected #:  1 file

diff -r 8da4789354c224a038cd348c92a9b1c4eba55c68 -r 932621c3264b571128c7e04a308877d7d4878ed2 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -709,7 +709,7 @@
 
         """
 
-        self.set_width(self.width / factor)
+        self.width[:2] = self.width[:2] / factor
 
     def iter_zoom(self, final, n_steps):
         r"""Loop over a iter_zoom and return snapshots along the way.


https://bitbucket.org/yt_analysis/yt/commits/c96eb579389f/
Changeset:   c96eb579389f
Branch:      stable
User:        xarthisius
Date:        2016-09-19 19:26:26+00:00
Summary:     Disable randomly failing test_mesh_slices
Affected #:  1 file

diff -r 932621c3264b571128c7e04a308877d7d4878ed2 -r c96eb579389fd20eabff2c1ce6e9afe882300379 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -78,6 +78,7 @@
 other_tests:
   unittests:
      - '-v'
+     - '--exclude=test_mesh_slices'  # disable randomly failing test
   cookbook:
      - '-v'
      - 'doc/source/cookbook/tests/test_cookbook.py'


https://bitbucket.org/yt_analysis/yt/commits/04027cc4fe55/
Changeset:   04027cc4fe55
Branch:      stable
User:        ngoldbaum
Date:        2016-09-21 03:15:12+00:00
Summary:     Don't cast ndarrays or ndarray subclasses in the ds.box implementation

Instead handle this in the region initializer
Affected #:  2 files

diff -r c96eb579389fd20eabff2c1ce6e9afe882300379 -r 04027cc4fe559455ec4b6d5af7ab0acbe10e3679 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -780,8 +780,14 @@
         without having to specify a *center* value.  It assumes the center
         is the midpoint between the left_edge and right_edge.
         """
-        left_edge = np.array(left_edge)
-        right_edge = np.array(right_edge)
+        # we handle units in the region data object
+        # but need to check if left_edge or right_edge is a
+        # list or other non-array iterable before calculating
+        # the center
+        if not isinstance(left_edge, np.ndarray):
+            left_edge = np.array(left_edge)
+        if not isinstance(right_edge, np.ndarray):
+            right_edge = np.array(right_edge)
         c = (left_edge + right_edge)/2.0
         return self.region(c, left_edge, right_edge, **kwargs)
 

diff -r c96eb579389fd20eabff2c1ce6e9afe882300379 -r 04027cc4fe559455ec4b6d5af7ab0acbe10e3679 yt/data_objects/tests/test_dataset_access.py
--- a/yt/data_objects/tests/test_dataset_access.py
+++ b/yt/data_objects/tests/test_dataset_access.py
@@ -1,3 +1,5 @@
+import numpy as np
+
 from yt.testing import \
     assert_equal, \
     fake_amr_ds, \
@@ -6,6 +8,25 @@
 
 # This will test the "dataset access" method.
 
+def test_box_creation():
+    ds = fake_random_ds(32, length_unit=2)
+    left_edge = ds.arr([0.2, 0.2, 0.2], 'cm')
+    right_edge = ds.arr([0.6, 0.6, 0.6], 'cm')
+    center = (left_edge + right_edge)/2
+
+    boxes = [
+        ds.box(left_edge, right_edge),
+        ds.box(0.5*np.array(left_edge), 0.5*np.array(right_edge)),
+        ds.box((0.5*left_edge).tolist(), (0.5*right_edge).tolist())
+    ]
+
+    region = ds.region(center, left_edge, right_edge)
+
+    for b in boxes:
+        assert_equal(b.left_edge, region.left_edge)
+        assert_equal(b.right_edge, region.right_edge)
+        assert_equal(b.center, region.center)
+
 def test_region_from_d():
     ds = fake_amr_ds(fields=["density"])
     # We'll do a couple here


https://bitbucket.org/yt_analysis/yt/commits/2bbac65bea19/
Changeset:   2bbac65bea19
Branch:      stable
User:        atmyers
Date:        2016-09-22 00:09:50+00:00
Summary:     Need to use cdivision here to avoid divide-by-zero errors
Affected #:  1 file

diff -r 04027cc4fe559455ec4b6d5af7ab0acbe10e3679 -r 2bbac65bea19ff3065bacfd821d2c2f808b94d56 yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -499,6 +499,7 @@
         p.points[p.count][j] = p0[j] + vec[j] * t
     p.count += 1
 
+ at cython.cdivision(True)
 def triangle_plane_intersect(int ax, np.float64_t coord,
                              np.ndarray[np.float64_t, ndim=3] triangles):
     cdef np.float64_t p0[3]


https://bitbucket.org/yt_analysis/yt/commits/0bad82d11055/
Changeset:   0bad82d11055
Branch:      stable
User:        ngoldbaum
Date:        2016-09-22 04:23:05+00:00
Summary:     Install netcdf4 by default in the install script
Affected #:  1 file

diff -r 2bbac65bea19ff3065bacfd821d2c2f808b94d56 -r 0bad82d110550d88b823eaae3a6a3ec87a1397bd doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -75,7 +75,7 @@
 INST_H5PY=1     # Install h5py?
 INST_ASTROPY=0  # Install astropy?
 INST_NOSE=1     # Install nose?
-INST_NETCDF4=0  # Install netcdf4 and its python bindings?
+INST_NETCDF4=1  # Install netcdf4 and its python bindings?
 
 # These options allow you to customize the builds of yt dependencies.
 # They are only used if INST_CONDA=0.
@@ -535,9 +535,9 @@
     if [ $INST_CONDA -eq 0 ]
     then
         echo "This script can only install netcdf4 through conda."
-        echo "Please set INST_CONDA to 1"
-        echo "and re-run the install script"
-        exit 1
+        echo "Please set INST_CONDA to 1 to install netcdf4"
+        echo "Setting INST_NETCDF4=0"
+        INST_NETCDF4=0
     fi
 fi
 


https://bitbucket.org/yt_analysis/yt/commits/70d7d453a218/
Changeset:   70d7d453a218
Branch:      stable
User:        ngoldbaum
Date:        2016-09-25 22:48:01+00:00
Summary:     Updating recommendations about yielding asserts
Affected #:  1 file

diff -r 0bad82d110550d88b823eaae3a6a3ec87a1397bd -r 70d7d453a2183365b3361510862e274f4862a673 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -32,10 +32,10 @@
 
 Unit tests are tests that operate on some small set of machinery, and verify
 that the machinery works.  yt uses the `Nose
-<http://nose.readthedocs.org/en/latest/>`_ framework for running unit tests.
-In practice, what this means is that we write scripts that ``yield``
-assertions, and Nose identifies those scripts, runs them, and verifies that the
-assertions are true.
+<http://nose.readthedocs.org/en/latest/>`_ framework for running unit tests.  In
+practice, what this means is that we write scripts that assert statements, and
+Nose identifies those scripts, runs them, and verifies that the assertions are
+true and the code runs without crashing.
 
 How to Run the Unit Tests
 ^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -104,8 +104,9 @@
    functionality and should also verify that the results are correct using
    assert statements or functions.  
 #. Tests can ``yield`` a tuple of the form ``function``, ``argument_one``,
-   ``argument_two``, etc.  For example ``yield assert_equal, 1.0, 1.0`` would be
-   captured by nose as a test that asserts that 1.0 is equal to 1.0.
+   ``argument_two``, etc.  For example ``yield my_test, 'banana', 2.0`` would be
+   captured by nose and the ``my_test`` function will be run with the provided
+   arguments.
 #. Use ``fake_random_ds`` to test on datasets, and be sure to test for
    several combinations of ``nproc``, so that domain decomposition can be
    tested as well.


https://bitbucket.org/yt_analysis/yt/commits/d24b64a35957/
Changeset:   d24b64a35957
Branch:      stable
User:        brittonsmith
Date:        2016-09-27 07:28:16+00:00
Summary:     Adding mention to ytree in halo merger tree docs.
Affected #:  1 file

diff -r 70d7d453a2183365b3361510862e274f4862a673 -r d24b64a3595770ffdcc2f9ca1b8e9e3fd590fec5 doc/source/analyzing/analysis_modules/halo_merger_tree.rst
--- a/doc/source/analyzing/analysis_modules/halo_merger_tree.rst
+++ b/doc/source/analyzing/analysis_modules/halo_merger_tree.rst
@@ -3,4 +3,15 @@
 Halo Merger Tree
 ================
 
-.. note:: As of :code:`yt-3.0`, the halo merger tree functionality has been removed to be replaced by machinery that works with the ``HaloCatalog`` object.  In the mean time, this functionality can still be found in :code:`yt-2.x`.
+The ``yt`` merger tree was removed as of :code:`yt-3.0`.  This
+functionality can still be found in :code:`yt-2.x`.  However,
+the recommended option is to use the
+`ytree <http://ytree.readthedocs.io>`_ package, which can be
+installed via pip:
+
+.. code-block:: bash
+
+    pip install ytree
+
+For more information on ``ytree``, see the documentation
+`here <http://ytree.readthedocs.io>`__.


https://bitbucket.org/yt_analysis/yt/commits/55bc6a9d4af2/
Changeset:   55bc6a9d4af2
Branch:      stable
User:        ngoldbaum
Date:        2016-09-27 21:21:39+00:00
Summary:     Backporting PR #2395 https://bitbucket.org/yt_analysis/yt/pull-requests/2395
Affected #:  2 files

diff -r d24b64a3595770ffdcc2f9ca1b8e9e3fd590fec5 -r 55bc6a9d4af29adc86a461c7f622a28e6ed5087d doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -68,7 +68,7 @@
 
 .. code-block:: bash
 
-   $ nosetests visualization/tests/test_plotwindow.py
+   $ nosetests yt/visualization/tests/test_plotwindow.py
 
 How to Write Unit Tests
 ^^^^^^^^^^^^^^^^^^^^^^^
@@ -309,7 +309,7 @@
 .. code-block:: bash
 
    $ cd $YT_HG
-   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store --answer-name=local-tipsy frontends.tipsy
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store --answer-name=local-tipsy yt.frontends.tipsy
 
 This command will create a set of local answers from the tipsy frontend tests
 and store them in ``$HOME/Documents/test`` (this can but does not have to be the
@@ -321,7 +321,7 @@
 
 .. code-block:: bash
 
-   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-name=local-tipsy frontends.tipsy
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-name=local-tipsy yt.frontends.tipsy
 
 The results from a nose testing session are pretty straightforward to
 understand, the results for each test are printed directly to STDOUT.  If a test
@@ -334,7 +334,7 @@
 
 .. code-block:: bash
 
-   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store --answer-big-data frontends.owls
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store --answer-big-data yt.frontends.owls
 
 
 How to Write Answer Tests

diff -r d24b64a3595770ffdcc2f9ca1b8e9e3fd590fec5 -r 55bc6a9d4af29adc86a461c7f622a28e6ed5087d setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -4,7 +4,6 @@
 
 [nosetests]
 detailed-errors=1
-where=yt
 exclude=answer_testing
 with-xunit=1
 


https://bitbucket.org/yt_analysis/yt/commits/65d11cab4895/
Changeset:   65d11cab4895
Branch:      stable
User:        john_regan
Date:        2016-09-28 16:23:22+00:00
Summary:     Updating Enzo Front End
Affected #:  1 file

diff -r 55bc6a9d4af29adc86a461c7f622a28e6ed5087d -r 65d11cab4895585618908239b4af5f244fab627a yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -33,6 +33,7 @@
     'H2I'     : 'H2',
     'H2II'    : 'H2_p1',
     'HM'      : 'H_m1',
+    'HeH'     : 'HeH',
     'DI'      : 'D',
     'DII'     : 'D_p1',
     'HDI'     : 'HD',
@@ -57,6 +58,8 @@
         ("HeI_kph", ("1/code_time", [], None)),
         ("HeII_kph", ("1/code_time", [], None)),
         ("H2I_kdiss", ("1/code_time", [], None)),
+        ("HM_kph", ("1/code_time", [], None)),
+        ("H2II_kdiss", ("1/code_time", [], None)),
         ("Bx", (b_units, [], None)),
         ("By", (b_units, [], None)),
         ("Bz", (b_units, [], None)),
@@ -70,7 +73,7 @@
         ("y-velocity", (vel_units, ["velocity_y"], None)),
         ("z-velocity", (vel_units, ["velocity_z"], None)),
         ("RaySegments", ("", ["ray_segments"], None)),
-        ("PhotoGamma", (ra_units, ["photo_gamma"], None)),
+        ("PhotoGamma", ("eV/code_time", ["photo_gamma"], None)),
         ("PotentialField", ("code_velocity**2", ["gravitational_potential"], None)),
         ("Density", (rho_units, ["density"], None)),
         ("Metal_Density", (rho_units, ["metal_density"], None)),


https://bitbucket.org/yt_analysis/yt/commits/a4531f19c98a/
Changeset:   a4531f19c98a
Branch:      stable
User:        ngoldbaum
Date:        2016-09-28 20:29:06+00:00
Summary:     Don't use /apjs macro in citation examples. Closes #1286
Affected #:  3 files

diff -r 65d11cab4895585618908239b4af5f244fab627a -r a4531f19c98a7ff4bad9ff8bf5e582ca35fcfb04 CITATION
--- a/CITATION
+++ b/CITATION
@@ -9,13 +9,13 @@
 For LaTex and BibTex users:
 
 \bibitem[Turk et al.(2011)]{2011ApJS..192....9T} Turk, M.~J., Smith, B.~D.,
-Oishi, J.~S., et al.\ 2011, \apjs, 192, 9
+Oishi, J.~S., et al.\ 2011, The Astrophysical Journal Supplement Series, 192, 9
 
 @ARTICLE{2011ApJS..192....9T,
    author = {{Turk}, M.~J. and {Smith}, B.~D. and {Oishi}, J.~S. and {Skory}, S. and
 {Skillman}, S.~W. and {Abel}, T. and {Norman}, M.~L.},
     title = "{yt: A Multi-code Analysis Toolkit for Astrophysical Simulation Data}",
-  journal = {\apjs},
+  journal = {The Astrophysical Journal Supplement Series},
 archivePrefix = "arXiv",
    eprint = {1011.3514},
  primaryClass = "astro-ph.IM",

diff -r 65d11cab4895585618908239b4af5f244fab627a -r a4531f19c98a7ff4bad9ff8bf5e582ca35fcfb04 doc/source/about/index.rst
--- a/doc/source/about/index.rst
+++ b/doc/source/about/index.rst
@@ -73,7 +73,7 @@
       author = {{Turk}, M.~J. and {Smith}, B.~D. and {Oishi}, J.~S. and {Skory}, S. and
    	{Skillman}, S.~W. and {Abel}, T. and {Norman}, M.~L.},
        title = "{yt: A Multi-code Analysis Toolkit for Astrophysical Simulation Data}",
-     journal = {\apjs},
+     journal = {The Astrophysical Journal Supplement Series},
    archivePrefix = "arXiv",
       eprint = {1011.3514},
     primaryClass = "astro-ph.IM",
@@ -81,7 +81,8 @@
         year = 2011,
        month = jan,
       volume = 192,
-       pages = {9-+},
+         eid = {9},
+       pages = {9},
          doi = {10.1088/0067-0049/192/1/9},
       adsurl = {http://adsabs.harvard.edu/abs/2011ApJS..192....9T},
      adsnote = {Provided by the SAO/NASA Astrophysics Data System}

diff -r 65d11cab4895585618908239b4af5f244fab627a -r a4531f19c98a7ff4bad9ff8bf5e582ca35fcfb04 doc/source/faq/index.rst
--- a/doc/source/faq/index.rst
+++ b/doc/source/faq/index.rst
@@ -486,7 +486,7 @@
       author = {{Turk}, M.~J. and {Smith}, B.~D. and {Oishi}, J.~S. and {Skory}, S. and
    	{Skillman}, S.~W. and {Abel}, T. and {Norman}, M.~L.},
        title = "{yt: A Multi-code Analysis Toolkit for Astrophysical Simulation Data}",
-     journal = {\apjs},
+     journal = {The Astrophysical Journal Supplement Series},
    archivePrefix = "arXiv",
       eprint = {1011.3514},
     primaryClass = "astro-ph.IM",
@@ -494,7 +494,8 @@
         year = 2011,
        month = jan,
       volume = 192,
-       pages = {9-+},
+         eid = {9},
+       pages = {9},
          doi = {10.1088/0067-0049/192/1/9},
       adsurl = {http://adsabs.harvard.edu/abs/2011ApJS..192....9T},
      adsnote = {Provided by the SAO/NASA Astrophysics Data System}


https://bitbucket.org/yt_analysis/yt/commits/9948ce831f61/
Changeset:   9948ce831f61
Branch:      stable
User:        brittonsmith
Date:        2016-09-28 19:54:53+00:00
Summary:     Strip units of hubble constant before saving.
Affected #:  1 file

diff -r a4531f19c98a7ff4bad9ff8bf5e582ca35fcfb04 -r 9948ce831f6145a8940d78e73a3c8cec21b3d9bf yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -669,6 +669,9 @@
                 ds[attr] = getattr(self.cosmology, attr)
             ds["current_time"] = \
               self.cosmology.t_from_z(ds["current_redshift"])
+            if isinstance(ds["hubble_constant"], YTArray):
+                ds["hubble_constant"] = \
+                  ds["hubble_constant"].to("100*km/(Mpc*s)").d
         extra_attrs = {"data_type": "yt_light_ray"}
         field_types = dict([(field, "grid") for field in data.keys()])
 


https://bitbucket.org/yt_analysis/yt/commits/c37234562322/
Changeset:   c37234562322
Branch:      stable
User:        ngoldbaum
Date:        2016-09-29 15:35:17+00:00
Summary:     Backporting PR #2400 https://bitbucket.org/yt_analysis/yt/pull-requests/2400
Affected #:  2 files

diff -r 9948ce831f6145a8940d78e73a3c8cec21b3d9bf -r c372345623227704e0be876c815808722ebfc91c doc/source/faq/index.rst
--- a/doc/source/faq/index.rst
+++ b/doc/source/faq/index.rst
@@ -448,8 +448,8 @@
 
 .. code-block:: python
 
-   from yt.config import ytcfg
-   ytcfg["yt","loglevel"] = "40" # This sets the log level to "ERROR"
+   from yt.funcs import mylog
+   mylog.setLevel(40) # This sets the log level to "ERROR"
 
 which in this case would suppress everything below error messages. For reference, the numerical
 values corresponding to different log levels are:

diff -r 9948ce831f6145a8940d78e73a3c8cec21b3d9bf -r c372345623227704e0be876c815808722ebfc91c doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -62,27 +62,6 @@
 file. Note that a log level of 1 means that all log messages are printed to
 stdout.  To disable logging, set the log level to 50.
 
-Setting Configuration On the Command Line
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Options can also be set directly on the command line by specifying a
-command-line option.  For instance, if you are running the script
-``my_script.py`` you can specify a configuration option with the ``--config``
-argument.  As an example, to lower the log level (thus making it more verbose)
-you can specify:
-
-.. code-block:: bash
-
-   $ python2.7 my_script.py --config loglevel=1
-
-Any configuration option specific to yt can be specified in this manner.  One
-common configuration option would be to disable serialization:
-
-.. code-block:: bash
-
-   $ python2.7 my_script.py --config serialize=False
-
-This way projections are always re-created.
 
 Available Configuration Options
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^


https://bitbucket.org/yt_analysis/yt/commits/c7ff62f9fffd/
Changeset:   c7ff62f9fffd
Branch:      stable
User:        hyschive
Date:        2016-10-04 22:42:29+00:00
Summary:     Backporting PR #2407 https://bitbucket.org/yt_analysis/yt/pull-requests/2407
Affected #:  1 file

diff -r c372345623227704e0be876c815808722ebfc91c -r c7ff62f9fffde57cce3801665defb48c13ad0233 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -952,9 +952,7 @@
                     val = (val.v, str(val.units))
                 elif not isinstance(val, tuple):
                     val = (val, cgs)
-                u = getattr(self, "%s_unit" % unit, None)
-                mylog.info("Overriding %s_unit: %g -> %g %s.",
-                           unit, u, val[0], val[1])
+                mylog.info("Overriding %s_unit: %g %s.", unit, val[0], val[1])
                 setattr(self, "%s_unit" % unit, self.quan(val[0], val[1]))
 
     _arr = None


https://bitbucket.org/yt_analysis/yt/commits/fdf0463de7a7/
Changeset:   fdf0463de7a7
Branch:      stable
User:        brittonsmith
Date:        2016-10-05 14:43:41+00:00
Summary:     Suppress logging while searching for datasets.
Affected #:  1 file

diff -r c7ff62f9fffde57cce3801665defb48c13ad0233 -r fdf0463de7a7becb967d695bc611a050c630b973 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -587,6 +587,10 @@
                      len(potential_outputs))
 
         my_outputs = {}
+        llevel = mylog.level
+        # suppress logging as we load every dataset, unless set to debug
+        if llevel > 10 and llevel < 40:
+            mylog.setLevel(40)
         for my_storage, output in parallel_objects(potential_outputs,
                                                    storage=my_outputs):
             if self.parameters['DataDumpDir'] in output:
@@ -609,6 +613,7 @@
                             my_storage.result['redshift'] = ds.current_redshift
                 except YTOutputNotIdentified:
                     mylog.error('Failed to load %s', filename)
+        mylog.setLevel(llevel)
         my_outputs = [my_output for my_output in my_outputs.values() \
                       if my_output is not None]
 


https://bitbucket.org/yt_analysis/yt/commits/8367ac115eb9/
Changeset:   8367ac115eb9
Branch:      stable
User:        ngoldbaum
Date:        2016-10-24 16:57:09+00:00
Summary:     Backporting PR #2412 https://bitbucket.org/yt_analysis/yt/pull-requests/2412
Affected #:  3 files

diff -r fdf0463de7a7becb967d695bc611a050c630b973 -r 8367ac115eb9fe9ad08cd042d7d1db9502a51e4e tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -5,16 +5,16 @@
   local_athena_001:
     - yt/frontends/athena
 
-  local_chombo_000:
+  local_chombo_001:
     - yt/frontends/chombo/tests/test_outputs.py
 
-  local_enzo_001:
+  local_enzo_002:
     - yt/frontends/enzo
 
   local_fits_000:
     - yt/frontends/fits/tests/test_outputs.py
 
-  local_flash_002:
+  local_flash_003:
     - yt/frontends/flash/tests/test_outputs.py
 
   local_gadget_000:
@@ -38,8 +38,8 @@
   
   local_owls_000:
     - yt/frontends/owls/tests/test_outputs.py
-  
-  local_pw_003:
+
+  local_pw_009:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
     - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes

diff -r fdf0463de7a7becb967d695bc611a050c630b973 -r 8367ac115eb9fe9ad08cd042d7d1db9502a51e4e yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -183,8 +183,8 @@
         if dlevel != 1:
             rf = rf**dlevel
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
-        startIndex = np.maximum(0, cgi / rf - gi)
-        endIndex = np.minimum((cgi + child.ActiveDimensions) / rf - gi,
+        startIndex = np.maximum(0, cgi // rf - gi)
+        endIndex = np.minimum((cgi + child.ActiveDimensions) // rf - gi,
                               self.ActiveDimensions)
         endIndex += (startIndex == endIndex)
         mask[startIndex[0]:endIndex[0],

diff -r fdf0463de7a7becb967d695bc611a050c630b973 -r 8367ac115eb9fe9ad08cd042d7d1db9502a51e4e yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -126,8 +126,8 @@
         if dlevel != 1:
             raise NotImplementedError
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
-        startIndex = np.maximum(0, cgi / rf - gi)
-        endIndex = np.minimum((cgi + child.ActiveDimensions) / rf - gi,
+        startIndex = np.maximum(0, cgi // rf - gi)
+        endIndex = np.minimum((cgi + child.ActiveDimensions) // rf - gi,
                               self.ActiveDimensions)
         endIndex += (startIndex == endIndex)
         mask[startIndex[0]:endIndex[0],


https://bitbucket.org/yt_analysis/yt/commits/ab4c6d733c18/
Changeset:   ab4c6d733c18
Branch:      stable
User:        ngoldbaum
Date:        2016-10-10 15:17:00+00:00
Summary:     make annotate_clear return self

this makes the UX a little nicer in the notebook
Affected #:  1 file

diff -r 8367ac115eb9fe9ad08cd042d7d1db9502a51e4e -r ab4c6d733c18a60f1371c54f766177e70cbea8d4 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -993,6 +993,7 @@
         else:
             del self._callbacks[index]
         self.setup_callbacks()
+        return self
 
     def run_callbacks(self):
         for f in self.fields:


https://bitbucket.org/yt_analysis/yt/commits/b4796a89809c/
Changeset:   b4796a89809c
Branch:      stable
User:        ngoldbaum
Date:        2016-10-11 16:46:03+00:00
Summary:     Backporting PR #2414 https://bitbucket.org/yt_analysis/yt/pull-requests/2414
Affected #:  1 file

diff -r ab4c6d733c18a60f1371c54f766177e70cbea8d4 -r b4796a89809c223a7913ba57f5de6502c4e6f3f5 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -652,6 +652,8 @@
         if axis is None:
             mv, pos0, pos1, pos2 = self.quantities.max_location(field)
             return pos0, pos1, pos2
+        if isinstance(axis, string_types):
+            axis = [axis]
         rv = self.quantities.sample_at_max_field_values(field, axis)
         if len(rv) == 2:
             return rv[1]


https://bitbucket.org/yt_analysis/yt/commits/cdfd0f196985/
Changeset:   cdfd0f196985
Branch:      stable
User:        ngoldbaum
Date:        2016-10-11 02:06:42+00:00
Summary:     Updat outdated instructions in RockstarHaloFinder docstrings
Affected #:  1 file

diff -r b4796a89809c223a7913ba57f5de6502c4e6f3f5 -r cdfd0f196985c35b5480f1c9971592e6d4e27851 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -114,7 +114,7 @@
 
     Parameters
     ----------
-    ts   : DatasetSeries, Dataset
+    ts: DatasetSeries, Dataset
         This is the data source containing the DM particles. Because 
         halo IDs may change from one snapshot to the next, the only
         way to keep a consistent halo ID across time is to feed 
@@ -165,13 +165,12 @@
     --------
     
     To use the script below you must run it using MPI:
-    mpirun -np 4 python run_rockstar.py --parallel
+    mpirun -np 4 python run_rockstar.py
 
     >>> import yt
+    >>> yt.enable_parallelism()
     >>> from yt.analysis_modules.halo_finding.rockstar.api import \
-    ... RockstarHaloFinder
-    >>> from yt.data_objects.particle_filters import \
-    ... particle_filter
+    ...     RockstarHaloFinder
 
     >>> # create a particle filter to remove star particles
     >>> @yt.particle_filter("dark_matter", requires=["creation_time"])


https://bitbucket.org/yt_analysis/yt/commits/d3f4e0dad160/
Changeset:   d3f4e0dad160
Branch:      stable
User:        ngoldbaum
Date:        2016-10-14 16:10:29+00:00
Summary:     Backporting PR #2419 https://bitbucket.org/yt_analysis/yt/pull-requests/2419
Affected #:  2 files

diff -r cdfd0f196985c35b5480f1c9971592e6d4e27851 -r d3f4e0dad1603d0f4ce7fc25fa4832c11c748e34 .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -33,10 +33,13 @@
 karraki at nmsu.edu = karraki at gmail.com
 hckr at eml.cc = astrohckr at gmail.com
 julian3 at illinois.edu = astrohckr at gmail.com
+aj at hckr.eml.cc = astrohckr at gmail.com
 cosmosquark = bthompson2090 at gmail.com
 chris.m.malone at lanl.gov = chris.m.malone at gmail.com
-jnaiman at ucolick.org = jnaiman
-migueld.deval = miguel at archlinux.net
+jnaiman at ucolick.org = jnaiman at cfa.harvard.edu
+jnaiman = jnaiman at cfa.harvard.edu
+migueld.deval = miguel.deval at gmail.com
+miguel at archlinux.net = miguel.deval at gmail.com
 slevy at ncsa.illinois.edu = salevy at illinois.edu
 malzraa at gmail.com = kellerbw at mcmaster.ca
 None = convert-repo
@@ -47,3 +50,10 @@
 Ben Thompson = bthompson2090 at gmail.com
 goldbaum at ucolick.org = ngoldbau at illinois.edu
 ngoldbau at ucsc.edu = ngoldbau at illinois.edu
+NTAuthority at honeypot.fritz.box = anokfireball at poseto.de
+NTAuthority at guest053.fz-rossendorf.de = anokfireball at poseto.de
+NTAuthority at guest692.fz-rossendorf.de = anokfireball at poseto.de
+Fabian Koller = anokfireball at poseto.de
+Rafael Ruggiero = rafael.ruggiero at usp.br
+john.regan at helsinki.fi = john.a.regan at durham.ac.uk
+code at andre-bubel.de = a.huebl at hzdr.de
\ No newline at end of file

diff -r cdfd0f196985c35b5480f1c9971592e6d4e27851 -r d3f4e0dad1603d0f4ce7fc25fa4832c11c748e34 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -8,11 +8,14 @@
                 Ricarda Beckmann (Ricarda.Beckmann at astro.ox.ac.uk)
                 Elliott Biondo (biondo at wisc.edu)
                 Alex Bogert (fbogert at ucsc.edu)
+                Robert Bradshaw (robertwb at gmail.com)
                 André-Patrick Bubel (code at andre-bubel.de)
+                Corentin Cadiou (corentin.cadiou at iap.fr)
                 Pengfei Chen (madcpf at gmail.com)
                 Yi-Hao Chen (yihaochentw at gmail.com)
                 David Collins (dcollins4096 at gmail.com)
                 Brian Crosby (crosby.bd at gmail.com)
+                Weiguang Cui (weiguang.cui at uwa.edu.au)
                 Andrew Cunningham (ajcunn at gmail.com)
                 Miguel de Val-Borro (miguel.deval at gmail.com)
                 Bili Dong (qobilidop at gmail.com)
@@ -27,18 +30,22 @@
                 Markus Haider (markus.haider at uibk.ac.at)
                 Eric Hallman (hallman13 at gmail.com)
                 David Hannasch (David.A.Hannasch at gmail.com)
+                Axel Huebl (a.huebl at hzdr.de)
                 Cameron Hummels (chummels at gmail.com)
                 Anni Järvenpää (anni.jarvenpaa at gmail.com)
                 Allyson Julian (astrohckr at gmail.com)
                 Christian Karch (chiffre at posteo.de)
                 Maximilian Katz (maximilian.katz at stonybrook.edu)
                 Ben W. Keller (kellerbw at mcmaster.ca)
+                Chang-Goo Kim (changgoo at princeton.edu)
                 Ji-hoon Kim (me at jihoonkim.org)
                 Steffen Klemer (sklemer at phys.uni-goettingen.de)
+                Fabian Holler (anokfireball at poseto.de)
                 Kacper Kowalik (xarthisius.kk at gmail.com)
                 Mark Krumholz (mkrumhol at ucsc.edu)
                 Michael Kuhlen (mqk at astro.berkeley.edu)
                 Meagan Lang (langmm.astro at gmail.com)
+                Erwin Tin-Hay Lau (ethlau at gmail.com)
                 Doris Lee (dorislee at berkeley.edu)
                 Eve Lee (elee at cita.utoronto.ca)
                 Sam Leitner (sam.leitner at gmail.com)
@@ -63,6 +70,7 @@
                 Anna Rosen (rosen at ucolick.org)
                 Chuck Rozhon (rozhon2 at illinois.edu)
                 Douglas Rudd (drudd at uchicago.edu)
+                Rafael Ruggiero (rafael.ruggiero at usp.br)
                 Hsi-Yu Schive (hyschive at gmail.com)
                 Anthony Scopatz (scopatz at gmail.com)
                 Noel Scudder (noel.scudder at stonybrook.edu)


https://bitbucket.org/yt_analysis/yt/commits/028d6e436c56/
Changeset:   028d6e436c56
Branch:      stable
User:        xarthisius
Date:        2016-10-19 16:11:10+00:00
Summary:     Backporting PR #2421 https://bitbucket.org/yt_analysis/yt/pull-requests/2421
Affected #:  2 files

diff -r d3f4e0dad1603d0f4ce7fc25fa4832c11c748e34 -r 028d6e436c56a4d77ca7f6775ee6ffa24d2bca5d yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -80,7 +80,11 @@
             self.num_grids = self.dataset._find_parameter(
                 "integer", "globalnumblocks", True)
         except KeyError:
-            self.num_grids = self._handle["/simulation parameters"][0][0]
+            try:
+                self.num_grids = \
+                    self._handle['simulation parameters']['total blocks'][0]
+            except KeyError:
+                self.num_grids = self._handle["/simulation parameters"][0][0]
         
     def _parse_index(self):
         f = self._handle # shortcut
@@ -317,11 +321,14 @@
                 if hn not in self._handle:
                     continue
                 if hn is 'simulation parameters':
-                    zipover = zip(self._handle[hn].dtype.names,self._handle[hn][0])
+                    zipover = ((name, self._handle[hn][name][0])
+                               for name in self._handle[hn].dtype.names)
                 else:
                     zipover = zip(self._handle[hn][:,'name'],self._handle[hn][:,'value'])
                 for varname, val in zipover:
                     vn = varname.strip()
+                    if hasattr(vn, 'decode'):
+                        vn = vn.decode("ascii", "ignore")
                     if hn.startswith("string"):
                         pval = val.strip()
                     else:
@@ -331,7 +338,7 @@
                                    "scalar of the same name".format(hn[:-1],vn))
                     if hasattr(pval, 'decode'):
                         pval = pval.decode("ascii", "ignore")
-                    self.parameters[vn.decode("ascii", "ignore")] = pval
+                    self.parameters[vn] = pval
         
         # Determine block size
         try:

diff -r d3f4e0dad1603d0f4ce7fc25fa4832c11c748e34 -r 028d6e436c56a4d77ca7f6775ee6ffa24d2bca5d yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -13,6 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
 from yt.testing import \
     assert_equal, \
     requires_file, \
@@ -75,6 +76,21 @@
 def test_FLASHParticleDataset():
     assert isinstance(data_dir_load(fid_1to3_b1), FLASHParticleDataset)
 
+
+dens_turb_mag = 'DensTurbMag/DensTurbMag_hdf5_plt_cnt_0015'
+ at requires_file(dens_turb_mag)
+def test_FLASH25_dataset():
+    ds = data_dir_load(dens_turb_mag)
+    assert_equal(ds.parameters['time'], 751000000000.0)
+    assert_equal(ds.domain_dimensions, np.array([8, 8, 8]))
+    assert_equal(ds.domain_left_edge, 
+                 ds.arr([-2e18, -2e18, -2e18], 'code_length'))
+
+    assert_equal(ds.index.num_grids, 73)
+    dd = ds.all_data()
+    dd['density']
+
+
 @requires_ds(fid_1to3_b1, big_data=True)
 def test_fid_1to3_b1():
     ds = data_dir_load(fid_1to3_b1)


https://bitbucket.org/yt_analysis/yt/commits/83f72a489dae/
Changeset:   83f72a489dae
Branch:      stable
User:        ngoldbaum
Date:        2016-10-25 19:50:30+00:00
Summary:     Backporting PR #2423 https://bitbucket.org/yt_analysis/yt/pull-requests/2423
Affected #:  2 files

diff -r 028d6e436c56a4d77ca7f6775ee6ffa24d2bca5d -r 83f72a489dae57eb2e968c17154dd630b0c4cb4b tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -39,7 +39,7 @@
   local_owls_000:
     - yt/frontends/owls/tests/test_outputs.py
 
-  local_pw_009:
+  local_pw_010:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
     - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes

diff -r 028d6e436c56a4d77ca7f6775ee6ffa24d2bca5d -r 83f72a489dae57eb2e968c17154dd630b0c4cb4b yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -239,10 +239,10 @@
         if not np.any(filter): return None
         arr = np.zeros((bin_fields[0].size, len(fields)), dtype="float64")
         for i, field in enumerate(fields):
-            units = chunk.ds.field_info[field].units
+            units = chunk.ds.field_info[field].output_units
             arr[:,i] = chunk[field][filter].in_units(units)
         if self.weight_field is not None:
-            units = chunk.ds.field_info[self.weight_field].units
+            units = chunk.ds.field_info[self.weight_field].output_units
             weight_data = chunk[self.weight_field].in_units(units)
         else:
             weight_data = np.ones(filter.size, dtype="float64")


https://bitbucket.org/yt_analysis/yt/commits/0d0af4016c88/
Changeset:   0d0af4016c88
Branch:      stable
User:        ngoldbaum
Date:        2016-10-26 17:33:45+00:00
Summary:     Update version numbers
Affected #:  3 files

diff -r 83f72a489dae57eb2e968c17154dd630b0c4cb4b -r 0d0af4016c88476e134c46ce6c25d9ef88c84614 doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -67,9 +67,9 @@
 # built documents.
 #
 # The short X.Y version.
-version = '3.3.1'
+version = '3.3.2'
 # The full version, including alpha/beta/rc tags.
-release = '3.3.1'
+release = '3.3.2'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.

diff -r 83f72a489dae57eb2e968c17154dd630b0c4cb4b -r 0d0af4016c88476e134c46ce6c25d9ef88c84614 setup.py
--- a/setup.py
+++ b/setup.py
@@ -50,7 +50,7 @@
 SHADERS_FILES = glob.glob(os.path.join(SHADERS_DIR, "*.vertexshader")) + \
     glob.glob(os.path.join(SHADERS_DIR, "*.fragmentshader"))
 
-VERSION = "3.3.1"
+VERSION = "3.3.2"
 
 if os.path.exists('MANIFEST'):
     os.remove('MANIFEST')

diff -r 83f72a489dae57eb2e968c17154dd630b0c4cb4b -r 0d0af4016c88476e134c46ce6c25d9ef88c84614 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -72,7 +72,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-__version__ = "3.3.1"
+__version__ = "3.3.2"
 
 # First module imports
 import numpy as np # For modern purposes


https://bitbucket.org/yt_analysis/yt/commits/472c5babbc8c/
Changeset:   472c5babbc8c
Branch:      stable
User:        ngoldbaum
Date:        2016-10-26 17:34:15+00:00
Summary:     Added tag yt-3.3.2 for changeset 0d0af4016c88
Affected #:  1 file

diff -r 0d0af4016c88476e134c46ce6c25d9ef88c84614 -r 472c5babbc8c178f1236284ee5a309fc89fea250 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5195,3 +5195,4 @@
 83d2c1e9313e7d83eb5b96888451ff2646fd8ff3 yt-3.2.3
 7edbfde96c3d55b227194394f46c0b2e6ed2b961 yt-3.3.0
 9bc3d0e9b750c923d44d73c447df64fc431f5838 yt-3.3.1
+0d0af4016c88476e134c46ce6c25d9ef88c84614 yt-3.3.2

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list