[yt-svn] commit/yt: 28 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Aug 27 09:36:52 PDT 2015


28 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/84a41a60d6f1/
Changeset:   84a41a60d6f1
Branch:      stable
User:        alrosen
Date:        2015-07-21 22:18:50+00:00
Summary:     Sink particle files come from simulations that use ray tracing have one extra field so I made a quick
change to read in star properties correctly if the star has 18 or 19 properties.

-ALR, 7/21/15
Affected #:  1 file

diff -r ba0839873c03aff5f03c4612f021ad6aafdf074b -r 84a41a60d6f10972b5bc64b5e3df63fd3d22bc53 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -241,19 +241,18 @@
         index['particle_r']         = 11
         index['particle_mdeut']     = 12
         index['particle_n']         = 13
-        index['particle_mdot']      = 14,
+        index['particle_mdot']      = 14
         index['particle_burnstate'] = 15
 
-    elif len(line.strip().split()) == 18:
+    elif (len(line.strip().split()) == 18 or len(line.strip().split()) == 19):
         # these are the newer style, add luminosity as well
         index['particle_mlast']     = 10
         index['particle_r']         = 11
         index['particle_mdeut']     = 12
         index['particle_n']         = 13
-        index['particle_mdot']      = 14,
-        index['particle_burnstate'] = 15,
+        index['particle_mdot']      = 14
+        index['particle_burnstate'] = 15
         index['particle_luminosity']= 16
-
     else:
         # give a warning if none of the above apply:
         mylog.warning('Warning - could not figure out particle output file')


https://bitbucket.org/yt_analysis/yt/commits/9a7e2427d196/
Changeset:   9a7e2427d196
Branch:      stable
User:        cosmosquark
Date:        2015-07-30 18:01:52+00:00
Summary:     fixing a bug where particle_velocity_cylindrical_radius is being registered as particle_velocity_spherical_radius
Affected #:  1 file

diff -r 84a41a60d6f10972b5bc64b5e3df63fd3d22bc53 -r 9a7e2427d196337785d09050f07a4e068273c064 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -669,7 +669,7 @@
 
     registry.add_field(
         (ptype, "particle_velocity_cylindrical_radius"),
-        function=_particle_velocity_spherical_radius,
+        function=_particle_velocity_cylindrical_radius,
         particle_type=True,
         units="cm/s",
         validators=[ValidateParameter("normal"), ValidateParameter("center")])


https://bitbucket.org/yt_analysis/yt/commits/a9a3c6664f34/
Changeset:   a9a3c6664f34
Branch:      stable
User:        xarthisius
Date:        2015-07-28 19:36:10+00:00
Summary:     Backporting PR #1660 https://bitbucket.org/yt_analysis/yt/pull-requests/1660
Affected #:  2 files

diff -r 9a7e2427d196337785d09050f07a4e068273c064 -r a9a3c6664f34c1bda3817552c2f2bbea80481f8d yt/utilities/tests/test_fits_image.py
--- a/yt/utilities/tests/test_fits_image.py
+++ b/yt/utilities/tests/test_fits_image.py
@@ -17,7 +17,7 @@
 import os
 import numpy as np
 import shutil
-from yt.testing import fake_random_ds
+from yt.testing import fake_random_ds, requires_module
 from yt.convenience import load
 from numpy.testing import \
     assert_equal
@@ -29,6 +29,8 @@
 from yt.visualization.volume_rendering.camera import \
     off_axis_projection
 
+
+ at requires_module("astropy")
 def test_fits_image():
     tmpdir = tempfile.mkdtemp()
     curdir = os.getcwd()
@@ -87,7 +89,7 @@
     cut_frb = cut.to_frb((0.5, "unitary"), 128)
 
     fid3 = FITSImageData(cut_frb, fields=["density","temperature"], units="cm")
-    fits_cut = FITSOffAxisSlice(ds, [0.1, 0.2, -0.9], ["density","temperature"], 
+    fits_cut = FITSOffAxisSlice(ds, [0.1, 0.2, -0.9], ["density","temperature"],
                                 image_res=128, center=[0.5, 0.42, 0.6],
                                 width=(0.5,"unitary"))
 
@@ -103,26 +105,26 @@
     assert new_fid3.wcs.wcs.ctype[0] == "RA---TAN"
     assert new_fid3.wcs.wcs.ctype[1] == "DEC--TAN"
 
-    buf = off_axis_projection(ds, ds.domain_center, [0.1, 0.2, -0.9], 
+    buf = off_axis_projection(ds, ds.domain_center, [0.1, 0.2, -0.9],
                               0.5, 128, "density").swapaxes(0, 1)
     fid4 = FITSImageData(buf, fields="density", width=100.0)
-    fits_oap = FITSOffAxisProjection(ds, [0.1, 0.2, -0.9], "density", 
-                                     width=(0.5,"unitary"), image_res=128, 
+    fits_oap = FITSOffAxisProjection(ds, [0.1, 0.2, -0.9], "density",
+                                     width=(0.5,"unitary"), image_res=128,
                                      depth_res=128, depth=(0.5,"unitary"))
 
     yield assert_equal, fid4.get_data("density"), fits_oap.get_data("density")
 
-    cvg = ds.covering_grid(ds.index.max_level, [0.25,0.25,0.25], 
+    cvg = ds.covering_grid(ds.index.max_level, [0.25,0.25,0.25],
                            [32, 32, 32], fields=["density","temperature"])
     fid5 = FITSImageData(cvg, fields=["density","temperature"])
     assert fid5.dimensionality == 3
 
     fid5.update_header("density", "time", 0.1)
     fid5.update_header("all", "units", "cgs")
-    
+
     assert fid5["density"].header["time"] == 0.1
     assert fid5["temperature"].header["units"] == "cgs"
     assert fid5["density"].header["units"] == "cgs"
-    
+
     os.chdir(curdir)
     shutil.rmtree(tmpdir)

diff -r 9a7e2427d196337785d09050f07a4e068273c064 -r a9a3c6664f34c1bda3817552c2f2bbea80481f8d yt/visualization/tests/test_filters.py
--- a/yt/visualization/tests/test_filters.py
+++ b/yt/visualization/tests/test_filters.py
@@ -10,28 +10,23 @@
 
 """
 
-from yt.testing import fake_amr_ds
+from yt.testing import fake_amr_ds, requires_module
 
 
-class TestFilters():
+ at requires_module("scipy")
+def test_white_noise_filter():
+    ds = fake_amr_ds(fields=("density",))
+    p = ds.proj("density", "z")
+    frb = p.to_frb((1, 'unitary'), 64)
+    frb.apply_white_noise()
+    frb.apply_white_noise(1e-3)
+    frb["density"]
 
-    @classmethod
-    def setup_class(cls):
-        ds = fake_amr_ds(fields=("density",))
-        p = ds.proj("density", "z")
-        cls.frb = p.to_frb((1, 'unitary'), 64)
 
-    def teardown(self):
-        try:
-            del self.frb["density"]
-        except KeyError:
-            pass
-
-    def test_white_noise_filter(self):
-        self.frb.apply_white_noise()
-        self.frb.apply_white_noise(1e-3)
-        self.frb["density"]
-
-    def test_gauss_beam_filter(self):
-        self.frb.apply_gauss_beam(nbeam=15, sigma=1.0)
-        self.frb["density"]
+ at requires_module("scipy")
+def test_gauss_beam_filter():
+    ds = fake_amr_ds(fields=("density",))
+    p = ds.proj("density", "z")
+    frb = p.to_frb((1, 'unitary'), 64)
+    frb.apply_gauss_beam(nbeam=15, sigma=1.0)
+    frb["density"]


https://bitbucket.org/yt_analysis/yt/commits/33b8451fa8f3/
Changeset:   33b8451fa8f3
Branch:      stable
User:        ngoldbaum
Date:        2015-08-05 18:24:33+00:00
Summary:     Backporting PR #1662 https://bitbucket.org/yt_analysis/yt/pull-requests/1662
Affected #:  4 files

diff -r a9a3c6664f34c1bda3817552c2f2bbea80481f8d -r 33b8451fa8f3d891f2db3a0b148ed86d40002ac2 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -264,7 +264,11 @@
         # or the byte swapped equivalents (65536 and 134217728).
         # The int32 following the header (first 4+256 bytes) must equal this
         # number.
-        (rhead,) = struct.unpack('<I',f.read(4))
+        try:
+            (rhead,) = struct.unpack('<I',f.read(4))
+        except struct.error:
+            f.close()
+            return False, 1
         # Use value to check endianess
         if rhead == 256:
             endianswap = '<'

diff -r a9a3c6664f34c1bda3817552c2f2bbea80481f8d -r 33b8451fa8f3d891f2db3a0b148ed86d40002ac2 yt/frontends/stream/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/stream/tests/test_outputs.py
@@ -0,0 +1,45 @@
+"""
+Tests for loading in-memory datasets
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import shutil
+import tempfile
+import unittest
+
+from yt.testing import assert_raises
+from yt.utilities.answer_testing.framework import data_dir_load
+from yt.utilities.exceptions import YTOutputNotIdentified
+
+class TestEmptyLoad(unittest.TestCase):
+
+    def setUp(self):
+        self.tmpdir = tempfile.mkdtemp()
+        self.curdir = os.getcwd()
+        os.chdir(self.tmpdir)
+
+        # create 0 byte file
+        open("empty_file", "a")
+
+        # create empty directory
+        os.makedirs("empty_directory")
+
+    def tearDown(self):
+        os.chdir(self.curdir)
+        shutil.rmtree(self.tmpdir)
+
+    def test_load_empty_file(self):
+        assert_raises(YTOutputNotIdentified, data_dir_load, "not_a_file")
+        assert_raises(YTOutputNotIdentified, data_dir_load, "empty_file")
+        assert_raises(YTOutputNotIdentified, data_dir_load, "empty_directory")

diff -r a9a3c6664f34c1bda3817552c2f2bbea80481f8d -r 33b8451fa8f3d891f2db3a0b148ed86d40002ac2 yt/frontends/tipsy/data_structures.py
--- a/yt/frontends/tipsy/data_structures.py
+++ b/yt/frontends/tipsy/data_structures.py
@@ -255,7 +255,7 @@
             f.seek(0, os.SEEK_SET)
             #Read in the header
             t, n, ndim, ng, nd, ns = struct.unpack("<diiiii", f.read(28))
-        except IOError:
+        except (IOError, struct.error):
             return False, 1
         endianswap = "<"
         #Check Endianness

diff -r a9a3c6664f34c1bda3817552c2f2bbea80481f8d -r 33b8451fa8f3d891f2db3a0b148ed86d40002ac2 yt/utilities/hierarchy_inspection.py
--- a/yt/utilities/hierarchy_inspection.py
+++ b/yt/utilities/hierarchy_inspection.py
@@ -32,6 +32,9 @@
 
     counters = [Counter(mro) for mro in mros]
 
+    if len(counters) == 0:
+        return counters
+
     count = reduce(lambda x, y: x + y, counters)
 
     return [x for x in count.keys() if count[x] == 1]


https://bitbucket.org/yt_analysis/yt/commits/5bcd67dcdfd7/
Changeset:   5bcd67dcdfd7
Branch:      stable
User:        ngoldbaum
Date:        2015-07-30 17:50:13+00:00
Summary:     Backporting PR #1663 https://bitbucket.org/yt_analysis/yt/pull-requests/1663
Affected #:  2 files

diff -r 33b8451fa8f3d891f2db3a0b148ed86d40002ac2 -r 5bcd67dcdfd7db8016f00443ca554af2a9102cbe yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -631,7 +631,7 @@
         # In-place copies do not drop units.
         assert_true(hasattr(out, 'units'))
         assert_true(not hasattr(ret, 'units'))
-    elif ufunc in (np.absolute, np.conjugate, np.floor, np.ceil,
+    elif ufunc in (np.absolute, np.fabs, np.conjugate, np.floor, np.ceil,
                    np.trunc, np.negative):
         ret = ufunc(a, out=out)
 

diff -r 33b8451fa8f3d891f2db3a0b148ed86d40002ac2 -r 5bcd67dcdfd7db8016f00443ca554af2a9102cbe yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -26,7 +26,7 @@
     greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \
     logical_or, logical_xor, logical_not, maximum, minimum, isreal, iscomplex, \
     isfinite, isinf, isnan, signbit, copysign, nextafter, modf, frexp, \
-    floor, ceil, trunc, fmax, fmin
+    floor, ceil, trunc, fmax, fmin, fabs
 
 from yt.units.unit_object import Unit, UnitParseError
 from yt.units.unit_registry import UnitRegistry
@@ -139,7 +139,7 @@
     log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin,
     arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad,
     rad2deg, logical_not, isreal, iscomplex, isfinite, isinf, isnan,
-    signbit, floor, ceil, trunc, modf, frexp,
+    signbit, floor, ceil, trunc, modf, frexp, fabs
 )
 
 binary_operators = (
@@ -223,6 +223,7 @@
         mod: preserve_units,
         fmod: preserve_units,
         absolute: passthrough_unit,
+        fabs: passthrough_unit,
         rint: return_without_unit,
         sign: return_without_unit,
         conj: passthrough_unit,
@@ -1072,7 +1073,8 @@
                                         unit.base_value, out=out_arr)
                             unit = Unit(registry=unit.registry)
         else:
-            raise RuntimeError("Operation is not defined.")
+            raise RuntimeError("Support for the %s ufunc has not been added "
+                               "to YTArray." % str(context[0]))
         if unit is None:
             out_arr = np.array(out_arr, copy=False)
             return out_arr


https://bitbucket.org/yt_analysis/yt/commits/7dc0048a0fee/
Changeset:   7dc0048a0fee
Branch:      stable
User:        brittonsmith
Date:        2015-08-03 20:22:24+00:00
Summary:     Removing enzo installation from install script.
Affected #:  1 file

diff -r 5bcd67dcdfd7db8016f00443ca554af2a9102cbe -r 7dc0048a0fee14c39b8eab590c73fcbb65f704fb doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -47,7 +47,6 @@
                 # lead to broken mercurial installations.
 INST_PNG=1      # Install a local libpng?  Same things apply as with zlib.
 INST_FTYPE=1    # Install FreeType2 locally?
-INST_ENZO=0     # Clone a copy of Enzo?
 INST_SQLITE3=1  # Install a local version of SQLite3?
 INST_PYX=0      # Install PyX?  Sometimes PyX can be problematic without a
                 # working TeX installation.
@@ -109,7 +108,6 @@
     echo INST_BZLIB=${INST_BZLIB} >> ${CONFIG_FILE}
     echo INST_PNG=${INST_PNG} >> ${CONFIG_FILE}
     echo INST_FTYPE=${INST_FTYPE} >> ${CONFIG_FILE}
-    echo INST_ENZO=${INST_ENZO} >> ${CONFIG_FILE}
     echo INST_SQLITE3=${INST_SQLITE3} >> ${CONFIG_FILE}
     echo INST_PYX=${INST_PYX} >> ${CONFIG_FILE}
     echo INST_0MQ=${INST_0MQ} >> ${CONFIG_FILE}
@@ -426,10 +424,6 @@
 get_willwont ${INST_HG}
 echo "be installing Mercurial"
 
-printf "%-15s = %s so I " "INST_ENZO" "${INST_ENZO}"
-get_willwont ${INST_ENZO}
-echo "be checking out Enzo"
-
 printf "%-15s = %s so I " "INST_PYX" "${INST_PYX}"
 get_willwont ${INST_PYX}
 echo "be installing PyX"
@@ -1047,14 +1041,6 @@
     fi
 fi
 
-if [ $INST_ENZO -eq 1 ]
-then
-    echo "Cloning a copy of Enzo."
-    cd ${DEST_DIR}/src/
-    ${HG_EXEC} clone https://bitbucket.org/enzo/enzo-stable ./enzo-hg-stable
-    cd $MY_PWD
-fi
-
 if [ -e $HOME/.matplotlib/fontList.cache ] && \
    ( grep -q python2.6 $HOME/.matplotlib/fontList.cache )
 then
@@ -1106,16 +1092,6 @@
       echo "$DEST_DIR/bin/hg"
       echo
     fi
-    if [ $INST_ENZO -eq 1 ]
-    then
-      echo "Enzo has also been checked out, but not built."
-      echo
-      echo "$DEST_DIR/src/enzo-hg-stable"
-      echo
-      echo "The value of YT_DEST can be used as an HDF5 installation location."
-      echo "Questions about Enzo should be directed to the Enzo User List."
-      echo
-    fi
     echo
     echo "For support, see the website and join the mailing list:"
     echo


https://bitbucket.org/yt_analysis/yt/commits/36955cef7a41/
Changeset:   36955cef7a41
Branch:      stable
User:        MatthewTurk
Date:        2015-08-04 15:56:29+00:00
Summary:     Change to use 64 bit index for gind.  Closes #1057.
Affected #:  1 file

diff -r 7dc0048a0fee14c39b8eab590c73fcbb65f704fb -r 36955cef7a41d8318a1d02277d0ec75a55289558 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -351,7 +351,8 @@
                       np.int64_t domain_ind
                       ):
 
-        cdef int i, j, k, ii
+        cdef int i, j, k
+        cdef np.uint64_t ii
         cdef int ind[3]
         cdef np.float64_t rpos[3]
         cdef np.float64_t rdds[3][2]


https://bitbucket.org/yt_analysis/yt/commits/2ad03c540474/
Changeset:   2ad03c540474
Branch:      stable
User:        aajarven
Date:        2015-08-05 09:40:11+00:00
Summary:     Fixed multiplot crashing when supplied with profiles and user having to save individual plots before using them in multiplot.
Affected #:  1 file

diff -r 36955cef7a41d8318a1d02277d0ec75a55289558 -r 2ad03c540474ecfac07fe49bb61dc7507be4c2ef yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -290,7 +290,7 @@
         self.axes_drawn = True
 
 #=============================================================================
-
+    
     def axis_box_yt(self, plot, units=None, bare_axes=False,
                     tickcolor=None, xlabel=None, ylabel=None, **kwargs):
         r"""Wrapper around DualEPS.axis_box to automatically fill in the
@@ -355,7 +355,32 @@
                         _ylabel = 'y (%s)' % (units)
             if tickcolor == None:
                 _tickcolor = pyx.color.cmyk.white
-        elif isinstance(plot, (ProfilePlot, PhasePlot)):
+        elif isinstance(plot, ProfilePlot):
+            subplot = plot.axes.values()[0]
+            # limits for axes
+            xlimits = subplot.get_xlim()
+            _xrange = (YTQuantity(xlimits[0], 'm'), YTQuantity(xlimits[1], 'm')) # unit hardcoded but afaik it is not used anywhere so it doesn't matter
+            if list(plot.axes.ylim.viewvalues())[0][0] is None:
+                ylimits = subplot.get_ylim()
+            else:
+                ylimits = list(plot.axes.ylim.viewvalues())[0]
+            _yrange = (YTQuantity(ylimits[0], 'm'), YTQuantity(ylimits[1], 'm')) # unit hardcoded but afaik it is not used anywhere so it doesn't matter
+            # axis labels
+            xaxis = subplot.xaxis
+            _xlabel = pyxize_label(xaxis.label.get_text())
+            yaxis = subplot.yaxis
+            _ylabel = pyxize_label(yaxis.label.get_text())
+            # set log if necessary
+            if subplot.get_xscale() == "log":
+                 _xlog = True 
+            else:
+                 _xlog = False
+            if subplot.get_yscale() == "log":
+                 _ylog = True 
+            else:
+                 _ylog = False
+            _tickcolor = None 
+        elif isinstance(plot, PhasePlot):
             k = plot.plots.keys()[0]
             _xrange = plot[k].axes.get_xlim()
             _yrange = plot[k].axes.get_ylim()
@@ -502,10 +527,7 @@
             _p1 = plot.plots[self.field].figure
             force_square = True
         elif isinstance(plot, ProfilePlot):
-            plot._redraw_image()
-            # Remove colorbar
-            _p1 = plot._figure
-            _p1.delaxes(_p1.axes[1])
+            _p1 = plot.figures.items()[0][1]
         elif isinstance(plot, np.ndarray):
             fig = plt.figure()
             iplot = plt.figimage(plot)
@@ -689,6 +711,9 @@
         >>> d.colorbar_yt(p)
         >>> d.save_fig()
         """
+        
+        if isinstance(plot, ProfilePlot):
+            raise RuntimeError("When using ProfilePlots you must either set yt_nocbar=True or provide colorbar flags so that the profiles don't have colorbars")
         _cmap = None
         if field != None:
             self.field = plot.data_source._determine_fields(field)[0]
@@ -1108,6 +1133,7 @@
                 if yaxis_flags[index] != None:
                     yaxis = yaxis_flags[index]
             if _yt:
+                this_plot._setup_plots()
                 if xlabels != None:
                     xlabel = xlabels[i]
                 else:


https://bitbucket.org/yt_analysis/yt/commits/9cc3e93a3e59/
Changeset:   9cc3e93a3e59
Branch:      stable
User:        karraki
Date:        2015-08-05 19:49:57+00:00
Summary:     Backporting PR #1679 https://bitbucket.org/yt_analysis/yt/pull-requests/1679
Affected #:  4 files

diff -r 2ad03c540474ecfac07fe49bb61dc7507be4c2ef -r 9cc3e93a3e590e704a2e77c1e1f51eb22ba90816 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -28,10 +28,7 @@
     OctreeSubset
 from yt.geometry.oct_container import \
     ARTOctreeContainer
-from .fields import \
-    ARTFieldInfo
-from yt.utilities.definitions import \
-    mpc_conversion
+from .fields import ARTFieldInfo
 from yt.utilities.io_handler import \
     io_registry
 from yt.utilities.lib.misc_utilities import \
@@ -50,14 +47,10 @@
 from .io import b2t
 from .io import a2b
 
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
 from yt.utilities.io_handler import \
     io_registry
 from yt.fields.field_info_container import \
     FieldInfoContainer, NullFunc
-from yt.utilities.physical_constants import \
-    mass_hydrogen_cgs, sec_per_Gyr
 
 
 class ARTIndex(OctreeIndex):
@@ -300,6 +293,7 @@
             self.iOctFree, self.nOct = fpu.read_vector(f, 'i', '>')
             self.child_grid_offset = f.tell()
             self.parameters.update(amr_header_vals)
+            amr_header_vals = None
             # estimate the root level
             float_center, fl, iocts, nocts, root_level = _read_art_level_info(
                 f,
@@ -347,18 +341,18 @@
 
         # setup standard simulation params yt expects to see
         self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
-        self.omega_lambda = amr_header_vals['Oml0']
-        self.omega_matter = amr_header_vals['Om0']
-        self.hubble_constant = amr_header_vals['hubble']
-        self.min_level = amr_header_vals['min_level']
-        self.max_level = amr_header_vals['max_level']
+        self.omega_lambda = self.parameters['Oml0']
+        self.omega_matter = self.parameters['Om0']
+        self.hubble_constant = self.parameters['hubble']
+        self.min_level = self.parameters['min_level']
+        self.max_level = self.parameters['max_level']
         if self.limit_level is not None:
             self.max_level = min(
-                self.limit_level, amr_header_vals['max_level'])
+                self.limit_level, self.parameters['max_level'])
         if self.force_max_level is not None:
             self.max_level = self.force_max_level
         self.hubble_time = 1.0/(self.hubble_constant*100/3.08568025e19)
-        self.current_time = b2t(self.parameters['t']) * sec_per_Gyr
+        self.current_time = self.quan(b2t(self.parameters['t']), 'Gyr')
         self.gamma = self.parameters["gamma"]
         mylog.info("Max level is %02i", self.max_level)
 
@@ -600,7 +594,7 @@
 #            self.max_level = self.force_max_level
         self.hubble_time = 1.0/(self.hubble_constant*100/3.08568025e19)
         self.parameters['t'] = a2b(self.parameters['aexpn'])
-        self.current_time = b2t(self.parameters['t']) * sec_per_Gyr
+        self.current_time = self.quan(b2t(self.parameters['t']), 'Gyr')
         self.gamma = self.parameters["gamma"]
         mylog.info("Max level is %02i", self.max_level)
 

diff -r 2ad03c540474ecfac07fe49bb61dc7507be4c2ef -r 9cc3e93a3e590e704a2e77c1e1f51eb22ba90816 yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -110,7 +110,7 @@
 ]
 
 star_struct = [
-    ('>d', ('tdum', 'adum')),
+    ('>d', ('t_stars', 'a_stars')),
     ('>i', 'nstars'),
     ('>d', ('ws_old', 'ws_oldi')),
     ('>f', 'particle_mass'),

diff -r 2ad03c540474ecfac07fe49bb61dc7507be4c2ef -r 9cc3e93a3e590e704a2e77c1e1f51eb22ba90816 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -55,7 +55,7 @@
         ("particle_mass", ("code_mass", [], None)),
         ("particle_index", ("", [], None)),
         ("particle_species", ("", ["particle_type"], None)),
-        ("particle_creation_time", ("code_time", [], None)),
+        ("particle_creation_time", ("Gyr", [], None)),
         ("particle_mass_initial", ("code_mass", [], None)),
         ("particle_metallicity1", ("", [], None)),
         ("particle_metallicity2", ("", [], None)),

diff -r 2ad03c540474ecfac07fe49bb61dc7507be4c2ef -r 9cc3e93a3e590e704a2e77c1e1f51eb22ba90816 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -31,6 +31,7 @@
 from yt.utilities.physical_constants import sec_per_year
 from yt.utilities.lib.geometry_utils import compute_morton
 from yt.geometry.oct_container import _ORDER_MAX
+from yt.units.yt_array import YTQuantity
 
 
 class IOHandlerART(BaseIOHandler):
@@ -300,18 +301,19 @@
 def interpolate_ages(data, file_stars, interp_tb=None, interp_ages=None,
                      current_time=None):
     if interp_tb is None:
-        tdum, adum = read_star_field(file_stars,
-                                     field="tdum")
+        t_stars, a_stars = read_star_field(file_stars,
+                                     field="t_stars")
         # timestamp of file should match amr timestamp
         if current_time:
-            tdiff = b2t(tdum)-current_time/(sec_per_year*1e9)
-            if np.abs(tdiff) < 1e-4:
+            tdiff = YTQuantity(b2t(t_stars), 'Gyr') - current_time.in_units('Gyr')
+            if np.abs(tdiff) > 1e-4:
                 mylog.info("Timestamp mismatch in star " +
-                           "particle header")
+                           "particle header: %s", tdiff)
         mylog.info("Interpolating ages")
         interp_tb, interp_ages = b2t(data)
+        interp_tb = YTArray(interp_tb, 'Gyr')
+        interp_ages = YTArray(interp_ages, 'Gyr')
     temp = np.interp(data, interp_tb, interp_ages)
-    temp *= 1.0e9*sec_per_year
     return interp_tb, interp_ages, temp
 
 


https://bitbucket.org/yt_analysis/yt/commits/47338f8e28ed/
Changeset:   47338f8e28ed
Branch:      stable
User:        xarthisius
Date:        2015-08-06 22:34:45+00:00
Summary:     Backporting PR #1680 https://bitbucket.org/yt_analysis/yt/pull-requests/1680
Affected #:  1 file

diff -r 9cc3e93a3e590e704a2e77c1e1f51eb22ba90816 -r 47338f8e28edccebd4068c9b8ddfb56a51cfc98a yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -446,7 +446,7 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     cdef int fill_mask_selector(self, np.float64_t left_edge[3],
-                                np.float64_t right_edge[3], 
+                                np.float64_t right_edge[3],
                                 np.float64_t dds[3], int dim[3],
                                 np.ndarray[np.uint8_t, ndim=3, cast=True] child_mask,
                                 np.ndarray[np.uint8_t, ndim=3] mask,
@@ -603,9 +603,12 @@
         return mask.view("bool")
 
     def __hash__(self):
+        # https://bitbucket.org/yt_analysis/yt/issues/1052/field-access-tests-fail-under-python3
+        # http://www.eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx
         cdef np.int64_t hash_val = 0
         for v in self._hash_vals() + self._base_hash():
-            hash_val ^= hash(v)
+            # FNV hash cf. http://www.isthe.com/chongo/tech/comp/fnv/index.html
+            hash_val = (hash_val * 16777619) ^ hash(v)
         return hash_val
 
     def _hash_vals(self):
@@ -1107,7 +1110,7 @@
 
     def _hash_vals(self):
         return (("norm_vec[0]", self.norm_vec[0]),
-                ("norm_vec[1]", self.norm_vec[1]), 
+                ("norm_vec[1]", self.norm_vec[1]),
                 ("norm_vec[2]", self.norm_vec[2]),
                 ("d", self.d))
 


https://bitbucket.org/yt_analysis/yt/commits/b648fbae2c3e/
Changeset:   b648fbae2c3e
Branch:      stable
User:        rthompson
Date:        2015-08-06 20:08:44+00:00
Summary:     fixing a typo in the cosmology docs!
Affected #:  1 file

diff -r 47338f8e28edccebd4068c9b8ddfb56a51cfc98a -r b648fbae2c3e7a9d56cdba556859584eeceb6b38 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -388,7 +388,7 @@
         --------
 
         >>> co = Cosmology()
-        >>> print co.t_from_z(4.e17)
+        >>> print co.z_from_t(4.e17)
 
         """
 


https://bitbucket.org/yt_analysis/yt/commits/7419eac8152d/
Changeset:   7419eac8152d
Branch:      stable
User:        xarthisius
Date:        2015-08-06 21:36:05+00:00
Summary:     [enzo] sort species names
Affected #:  1 file

diff -r b648fbae2c3e7a9d56cdba556859584eeceb6b38 -r 7419eac8152d0d3fab58483ec8d543b3dbb38656 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -137,7 +137,7 @@
                        ("enzo", "%s_Density" % species))
 
     def setup_species_fields(self):
-        species_names = [fn.rsplit("_Density")[0] for ft, fn in 
+        species_names = [fn.rsplit("_Density")[0] for ft, fn in
                          self.field_list if fn.endswith("_Density")]
         species_names = [sp for sp in species_names
                          if sp in known_species_names]
@@ -149,6 +149,7 @@
         for sp in species_names:
             self.add_species_field(sp)
             self.species_names.append(known_species_names[sp])
+        self.species_names.sort()  # bb #1059
 
     def setup_fluid_fields(self):
         # Now we conditionally load a few other things.


https://bitbucket.org/yt_analysis/yt/commits/f14f1ad56032/
Changeset:   f14f1ad56032
Branch:      stable
User:        mornkr
Date:        2015-08-08 08:26:14+00:00
Summary:     Fixing a bug where "center" vectors are used instead of "normal" vectors in get_sph_phi(), get_sph_theta(), and get_cyl_theta() in fields/particle_fields.py.
Affected #:  1 file

diff -r 7419eac8152d0d3fab58483ec8d543b3dbb38656 -r f14f1ad560320d1bc02b10952f0266fce1076481 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -490,8 +490,8 @@
         bv = data.get_field_parameter("bulk_velocity")
         pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
-        theta = get_sph_theta(pos, center)
-        phi = get_sph_phi(pos, center)
+        theta = get_sph_theta(pos, normal)
+        phi = get_sph_phi(pos, normal)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
         sphr = get_sph_r_component(vel, theta, phi, normal)
@@ -533,8 +533,8 @@
         bv = data.get_field_parameter("bulk_velocity")
         pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
-        theta = get_sph_theta(pos, center)
-        phi = get_sph_phi(pos, center)
+        theta = get_sph_theta(pos, normal)
+        phi = get_sph_phi(pos, normal)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
         spht = get_sph_theta_component(vel, theta, phi, normal)
@@ -568,7 +568,7 @@
         bv = data.get_field_parameter("bulk_velocity")
         pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
-        phi = get_sph_phi(pos, center)
+        phi = get_sph_phi(pos, normal)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
         sphp = get_sph_phi_component(vel, phi, normal)
@@ -661,7 +661,7 @@
         bv = data.get_field_parameter("bulk_velocity")
         pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
-        theta = get_cyl_theta(pos, center)
+        theta = get_cyl_theta(pos, normal)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
         cylr = get_cyl_r_component(vel, theta, normal)
@@ -685,7 +685,7 @@
         bv = data.get_field_parameter("bulk_velocity")
         pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
-        theta = get_cyl_theta(pos, center)
+        theta = get_cyl_theta(pos, normal)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
         cylt = get_cyl_theta_component(vel, theta, normal)


https://bitbucket.org/yt_analysis/yt/commits/5bf2b6913319/
Changeset:   5bf2b6913319
Branch:      stable
User:        ngoldbaum
Date:        2015-08-09 18:57:25+00:00
Summary:     Backporting PR #1685 https://bitbucket.org/yt_analysis/yt/pull-requests/1685
Affected #:  2 files

diff -r f14f1ad560320d1bc02b10952f0266fce1076481 -r 5bf2b69133199a244d8831c88409fbd572c96681 yt/units/tests/test_units.py
--- a/yt/units/tests/test_units.py
+++ b/yt/units/tests/test_units.py
@@ -21,6 +21,7 @@
     assert_approx_equal, assert_array_almost_equal_nulp, \
     assert_allclose, assert_raises
 from nose.tools import assert_true
+import operator
 from sympy import Symbol
 from yt.testing import fake_random_ds
 
@@ -30,7 +31,7 @@
 # functions
 from yt.units.unit_object import get_conversion_factor
 # classes
-from yt.units.unit_object import Unit, UnitParseError
+from yt.units.unit_object import Unit, UnitParseError, InvalidUnitOperation
 # objects
 from yt.units.unit_lookup_table import \
     default_unit_symbol_lut, unit_prefixes, prefixable_units
@@ -441,3 +442,10 @@
     yield assert_true, u4.is_code_unit
     yield assert_true, not u5.is_code_unit
     yield assert_true, not u6.is_code_unit
+
+def test_temperature_offsets():
+    u1 = Unit('degC')
+    u2 = Unit('degF')
+
+    assert_raises(InvalidUnitOperation, operator.mul, u1, u2)
+    assert_raises(InvalidUnitOperation, operator.div, u1, u2)

diff -r f14f1ad560320d1bc02b10952f0266fce1076481 -r 5bf2b69133199a244d8831c88409fbd572c96681 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -290,9 +290,9 @@
 
         base_offset = 0.0
         if self.base_offset or u.base_offset:
-            if u.dimensions is dims.temperature and self.is_dimensionless:
+            if u.dimensions is temperature and self.is_dimensionless:
                 base_offset = u.base_offset
-            elif self.dimensions is dims.temperature and u.is_dimensionless:
+            elif self.dimensions is temperature and u.is_dimensionless:
                 base_offset = self.base_offset
             else:
                 raise InvalidUnitOperation("Quantities with units of Farhenheit "


https://bitbucket.org/yt_analysis/yt/commits/fd8636902a75/
Changeset:   fd8636902a75
Branch:      stable
User:        ngoldbaum
Date:        2015-08-11 15:06:56+00:00
Summary:     Backporting PR #1686 https://bitbucket.org/yt_analysis/yt/pull-requests/1686
Affected #:  2 files

diff -r 5bf2b69133199a244d8831c88409fbd572c96681 -r fd8636902a75a384578f5da177aeedbd9aea0f26 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -1009,7 +1009,18 @@
     V = YTQuantity(1.0, "statV")
     V_mks = V.to_equivalent("V", "SI")
     yield assert_array_almost_equal, V_mks.v, 1.0e8*V.v/speed_of_light_cm_per_s
-    
+
+def test_ytarray_coercion():
+    a = YTArray([1, 2, 3], 'cm')
+    q = YTQuantity(3, 'cm')
+    na = np.array([1, 2, 3])
+
+    assert_isinstance(a*q, YTArray)
+    assert_isinstance(q*na, YTArray)
+    assert_isinstance(q*3, YTQuantity)
+    assert_isinstance(q*np.float64(3), YTQuantity)
+    assert_isinstance(q*np.array(3), YTQuantity)
+
 def test_numpy_wrappers():
     a1 = YTArray([1, 2, 3], 'cm')
     a2 = YTArray([2, 3, 4, 5, 6], 'cm')

diff -r 5bf2b69133199a244d8831c88409fbd572c96681 -r fd8636902a75a384578f5da177aeedbd9aea0f26 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1085,7 +1085,7 @@
             if ret_class is YTQuantity:
                 # This happens if you do ndarray * YTQuantity. Explicitly
                 # casting to YTArray avoids creating a YTQuantity with size > 1
-                return YTArray(np.array(out_arr, unit))
+                return YTArray(np.array(out_arr), unit)
             return ret_class(np.array(out_arr, copy=False), unit)
 
 


https://bitbucket.org/yt_analysis/yt/commits/b2a648a42647/
Changeset:   b2a648a42647
Branch:      stable
User:        xarthisius
Date:        2015-08-11 17:27:31+00:00
Summary:     Backporting PR #1689 https://bitbucket.org/yt_analysis/yt/pull-requests/1689
Affected #:  1 file

diff -r fd8636902a75a384578f5da177aeedbd9aea0f26 -r b2a648a426478d75b38261f2f0add4526c1ec999 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,12 +1,12 @@
-include distribute_setup.py README* CREDITS COPYING.txt CITATION
+include distribute_setup.py README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js *.gif *.css
-recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE*
+recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE* *.cu
 recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.inc *.html
 recursive-include doc *.h *.c *.sh *.svgz *.pdf *.svg *.pyx
 include doc/README doc/activate doc/activate.csh doc/cheatsheet.tex
 include doc/extensions/README doc/Makefile
 prune doc/source/reference/api/generated
-prune doc/build/
+prune doc/build
 recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
 prune yt/frontends/_skeleton
 prune tests


https://bitbucket.org/yt_analysis/yt/commits/842d4cc04cfa/
Changeset:   842d4cc04cfa
Branch:      stable
User:        karraki
Date:        2015-08-10 19:57:43+00:00
Summary:     Add search bar to main doc page
Affected #:  1 file

diff -r b2a648a426478d75b38261f2f0add4526c1ec999 -r 842d4cc04cfa90460e6714e4235c237daccb5408 doc/source/index.rst
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -11,6 +11,18 @@
 :ref:`sample data for each format <getting-sample-data>` with 
 :ref:`instructions on how to load and examine each data type <examining-data>`.
 
+.. raw:: html
+
+   <form action="search.html" method="get" _lpchecked="1">
+     <div class="form-group">
+       <input type="text" name="q" class="form-control" placeholder="Search" style="width: 70%">
+     </div>
+     <input type="hidden" name="check_keywords" value="yes">
+     <input type="hidden" name="area" value="default">
+   </form>
+
+
+
 Table of Contents
 -----------------
 


https://bitbucket.org/yt_analysis/yt/commits/fd2dc17ee2b7/
Changeset:   fd2dc17ee2b7
Branch:      stable
User:        xarthisius
Date:        2015-08-11 17:37:06+00:00
Summary:     Backporting PR #1693 https://bitbucket.org/yt_analysis/yt/pull-requests/1693
Affected #:  1 file

diff -r 842d4cc04cfa90460e6714e4235c237daccb5408 -r fd2dc17ee2b7a516b3ff2b723ac9b11b7e6aa4d0 doc/source/_templates/layout.html
--- a/doc/source/_templates/layout.html
+++ b/doc/source/_templates/layout.html
@@ -1,5 +1,10 @@
 {% extends '!layout.html' %}
 
+{%- block linktags %}
+    <link href="http://yt-project.org/doc/{{ pagename }}.html" rel="canonical" />
+    {{ super() }}
+{%- endblock %}
+
 {%- block extrahead %}
     {{ super() }}
     <script type="text/javascript">


https://bitbucket.org/yt_analysis/yt/commits/ab4345a3feea/
Changeset:   ab4345a3feea
Branch:      stable
User:        RicardaBeckmann
Date:        2015-08-12 13:53:52+00:00
Summary:     Backporting PR #1694 https://bitbucket.org/yt_analysis/yt/pull-requests/1694
Affected #:  7 files

diff -r fd2dc17ee2b7a516b3ff2b723ac9b11b7e6aa4d0 -r ab4345a3feea70133ef91a56211608b6da52f706 doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -42,7 +42,18 @@
 
     $ pip install mpi4py
 
-Once that has been installed, you're all done!  You just need to launch your
+If you have an Anaconda installation of yt and there is no MPI library on the
+system you are using try:
+
+.. code-block:: bash
+
+    $ conda install mpi4py
+
+This will install `MPICH2 <https://www.mpich.org/>`_ and will interefere with
+other MPI libraries that are already installed. Therefore, it is preferable to
+use the ``pip`` installation method.
+
+Once mpi4py has been installed, you're all done!  You just need to launch your
 scripts with ``mpirun`` (or equivalent) and signal to yt that you want to
 run them in parallel by invoking the ``yt.enable_parallelism()`` function in
 your script.  In general, that's all it takes to get a speed benefit on a

diff -r fd2dc17ee2b7a516b3ff2b723ac9b11b7e6aa4d0 -r ab4345a3feea70133ef91a56211608b6da52f706 doc/source/cookbook/tests/test_cookbook.py
--- a/doc/source/cookbook/tests/test_cookbook.py
+++ b/doc/source/cookbook/tests/test_cookbook.py
@@ -34,7 +34,7 @@
     try:
         subprocess.check_call(cmd)
         result = True
-    except subprocess.CalledProcessError, e:
+    except subprocess.CalledProcessError as e:
         print(("Stdout output:\n", e.output))
         result = False
     assert result

diff -r fd2dc17ee2b7a516b3ff2b723ac9b11b7e6aa4d0 -r ab4345a3feea70133ef91a56211608b6da52f706 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -39,6 +39,12 @@
   have the the necessary compilers installed (e.g. the ``build-essentials``
   package on debian and ubuntu).
 
+.. note::
+  See `Parallel Computation
+  <http://yt-project.org/docs/dev/analyzing/parallel_computation.html>`_
+  for a discussion on using yt in parallel.
+
+
 .. _branches-of-yt:
 
 Branches of yt: ``yt``, ``stable``, and ``yt-2.x``
@@ -201,7 +207,8 @@
 
   bash Miniconda-3.3.0-Linux-x86_64.sh
 
-Make sure that the Anaconda ``bin`` directory is in your path, and then issue:
+For both the Anaconda and Miniconda installations, make sure that the Anaconda
+``bin`` directory is in your path, and then issue:
 
 .. code-block:: bash
 
@@ -209,6 +216,34 @@
 
 which will install yt along with all of its dependencies.
 
+Obtaining Source Code
+^^^^^^^^^^^^^^^^^^^^^
+
+There are two ways to get the yt source code when using an Anaconda
+installation.
+
+Option 1:
+
+Clone the yt repository with:
+
+.. code-block:: bash
+
+  hg clone https://bitbucket.org/yt_analysis/yt
+
+Once inside the yt directory, update to the appropriate branch and
+run ``setup.py``. For example, the following commands will allow you
+to see the tip of the development branch.
+
+.. code-block:: bash
+
+  hg up yt
+  python setup.py develop
+
+This will make sure you are running a version of yt corresponding to the 
+most up-to-date source code.
+
+Option 2:
+
 Recipes to build conda packages for yt are available at
 https://github.com/conda/conda-recipes.  To build the yt conda recipe, first
 clone the conda-recipes repository

diff -r fd2dc17ee2b7a516b3ff2b723ac9b11b7e6aa4d0 -r ab4345a3feea70133ef91a56211608b6da52f706 doc/source/reference/command-line.rst
--- a/doc/source/reference/command-line.rst
+++ b/doc/source/reference/command-line.rst
@@ -42,9 +42,9 @@
 ~~~~~~~~~~~~~~~~~~~~~~
 
 The :code:`yt` command-line tool allows you to access some of yt's basic
-funcionality without opening a python interpreter.  The tools is a collection of
+functionality without opening a python interpreter.  The tools is a collection of
 subcommands.  These can quickly making plots of slices and projections through a
-dataset, updating yt's codebase, print basic statistics about a dataset, laucnh
+dataset, updating yt's codebase, print basic statistics about a dataset, launch
 an IPython notebook session, and more.  To get a quick list of what is
 available, just type:
 

diff -r fd2dc17ee2b7a516b3ff2b723ac9b11b7e6aa4d0 -r ab4345a3feea70133ef91a56211608b6da52f706 doc/source/reference/python_introduction.rst
--- a/doc/source/reference/python_introduction.rst
+++ b/doc/source/reference/python_introduction.rst
@@ -34,7 +34,7 @@
 called on it.  ``dir()`` will return the available commands and objects that
 can be directly called, and ``dir(something)`` will return information about
 all the commands that ``something`` provides.  This probably sounds a bit
-opaque, but it will become clearer with time -- it's also probably heldsul to
+opaque, but it will become clearer with time -- it's also probably helpful to
 call ``help`` on any or all of the objects we create during this orientation.
 
 To start up Python, at your prompt simply type:

diff -r fd2dc17ee2b7a516b3ff2b723ac9b11b7e6aa4d0 -r ab4345a3feea70133ef91a56211608b6da52f706 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -491,26 +491,18 @@
         """
         Generates the conversion to various physical _units based on the parameter file
         """
-        #Please note that for all units given in the info file, the boxlen
-        #still needs to be folded in, as shown below!
+        # loading the units from the info file
+        boxlen=self.parameters['boxlen']
+        length_unit = self.parameters['unit_l']
+        density_unit = self.parameters['unit_d']
+        time_unit = self.parameters['unit_t']
 
-        boxlen=self.parameters['boxlen']
-        length_unit = self.parameters['unit_l'] * boxlen
-        density_unit = self.parameters['unit_d']/ boxlen**3
-
-        # In the mass unit, the factors of boxlen cancel back out, so this 
-        #is equivalent to unit_d*unit_l**3
-
-        mass_unit = density_unit * length_unit**3
-
-        # Cosmological runs are done in lookback conformal time. 
-        # To convert to proper time, the time unit is calculated from 
-        # the expansion factor. This is not yet  done here!
-
-        time_unit = self.parameters['unit_t']
+        # calculating derived units (except velocity and temperature, done below)
+        mass_unit = density_unit * length_unit**3     
         magnetic_unit = np.sqrt(4*np.pi * mass_unit /
                                 (time_unit**2 * length_unit))
         pressure_unit = density_unit * (length_unit / time_unit)**2
+
         # TODO:
         # Generalize the temperature field to account for ionization
         # For now assume an atomic ideal gas with cosmic abundances (x_H = 0.76)
@@ -518,13 +510,15 @@
 
         self.density_unit = self.quan(density_unit, 'g/cm**3')
         self.magnetic_unit = self.quan(magnetic_unit, "gauss")
+        self.pressure_unit = self.quan(pressure_unit, 'dyne/cm**2')
+        self.time_unit = self.quan(time_unit, "s")
+        self.mass_unit = self.quan(mass_unit, "g")
+        self.velocity_unit = self.quan(length_unit, 'cm') / self.time_unit
+        self.temperature_unit = (self.velocity_unit**2*mp* 
+                                 mean_molecular_weight_factor/kb).in_units('K')
+
+        # Only the length unit get scales by a factor of boxlen
         self.length_unit = self.quan(length_unit * boxlen, "cm")
-        self.mass_unit = self.quan(mass_unit, "g")
-        self.time_unit = self.quan(time_unit, "s")
-        self.velocity_unit = self.quan(length_unit, 'cm') / self.time_unit
-        self.temperature_unit = (self.velocity_unit**2 * mp *
-                                 mean_molecular_weight_factor / kb)
-        self.pressure_unit = self.quan(pressure_unit, 'dyne/cm**2')
 
     def _parse_parameter_file(self):
         # hardcoded for now

diff -r fd2dc17ee2b7a516b3ff2b723ac9b11b7e6aa4d0 -r ab4345a3feea70133ef91a56211608b6da52f706 yt/utilities/quantities.py
--- a/yt/utilities/quantities.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""
-Some old field names.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.units.yt_array import YTArray
-from yt.units.unit_object import Unit
-from yt.utilities.exceptions import YTUnitOperationError
-
-
-class Quantity(YTArray):
-    """
-    A physical quantity. Attaches units to a scalar.
-
-    """
-    def __new__(cls, input_array, input_units=None):
-        if isinstance(input_array, Quantity):
-            return input_array
-
-        # Input array is an already formed ndarray instance
-        # We first cast to be our class type
-        obj = np.asarray(input_array).view(cls)
-
-        # Restrict the array to a scalar.
-        if obj.size != 1:
-            raise ValueError("A Quantity can contain only one element. The "
-                "caller provided the array %s with %s elements."
-                % (obj, obj.size))
-
-        return YTArray.__new__(cls, input_array, input_units)


https://bitbucket.org/yt_analysis/yt/commits/09defdab021f/
Changeset:   09defdab021f
Branch:      stable
User:        ngoldbaum
Date:        2015-08-12 14:35:28+00:00
Summary:     Use the nice latex representation for units in field labels
Affected #:  1 file

diff -r fd2dc17ee2b7a516b3ff2b723ac9b11b7e6aa4d0 -r 09defdab021f75fe6012c0445fb3a17f85cfe9ce yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -215,7 +215,7 @@
             units = Unit(self.units)
         # Add unit label
         if not units.is_dimensionless:
-            data_label += r"\ \ (%s)" % (units)
+            data_label += r"\ \ (%s)" % (units.latex_representation())
 
         data_label += r"$"
         return data_label


https://bitbucket.org/yt_analysis/yt/commits/238030456aee/
Changeset:   238030456aee
Branch:      stable
User:        ngoldbaum
Date:        2015-08-12 16:42:06+00:00
Summary:     Backporting PR #1696 https://bitbucket.org/yt_analysis/yt/pull-requests/1696
Affected #:  9 files

diff -r 09defdab021f75fe6012c0445fb3a17f85cfe9ce -r 238030456aee76c57ea3b51af90d843507cfd343 yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -12,7 +12,7 @@
 
 import numpy as np
 from yt.testing import \
-    assert_allclose, requires_file, requires_module
+    assert_allclose_units, requires_file, requires_module
 from yt.analysis_modules.absorption_spectrum.absorption_line import \
     voigt_old, voigt_scipy
 from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
@@ -129,4 +129,4 @@
 def test_voigt_profiles():
     a = 1.7e-4
     x = np.linspace(5.0, -3.6, 60)
-    yield assert_allclose, voigt_old(a, x), voigt_scipy(a, x), 1e-8
+    yield assert_allclose_units, voigt_old(a, x), voigt_scipy(a, x), 1e-8

diff -r 09defdab021f75fe6012c0445fb3a17f85cfe9ce -r 238030456aee76c57ea3b51af90d843507cfd343 yt/analysis_modules/ppv_cube/tests/test_ppv.py
--- a/yt/analysis_modules/ppv_cube/tests/test_ppv.py
+++ b/yt/analysis_modules/ppv_cube/tests/test_ppv.py
@@ -46,7 +46,7 @@
     a = cube.data.mean(axis=(0,1)).v
     b = dv*np.exp(-((cube.vmid+v_shift)/v_th)**2)/(np.sqrt(np.pi)*v_th)
 
-    yield assert_allclose, a, b, 1.0e-2
+    yield assert_allclose_units, a, b, 1.0e-2
 
     E_0 = 6.8*u.keV
 
@@ -58,4 +58,4 @@
 
     c = dE*np.exp(-((cube.vmid-E_shift)/delta_E)**2)/(np.sqrt(np.pi)*delta_E)
 
-    yield assert_allclose, a, c, 1.0e-2
+    yield assert_allclose_units, a, c, 1.0e-2

diff -r 09defdab021f75fe6012c0445fb3a17f85cfe9ce -r 238030456aee76c57ea3b51af90d843507cfd343 yt/frontends/athena/tests/test_outputs.py
--- a/yt/frontends/athena/tests/test_outputs.py
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -78,10 +78,10 @@
     prj2 = ds1.proj("density",0)
 
     yield assert_equal, sp1.quantities.extrema("pressure"), sp2.quantities.extrema("pressure")
-    yield assert_allclose, sp1.quantities.total_quantity("pressure"), sp2.quantities.total_quantity("pressure")
+    yield assert_allclose_units, sp1.quantities.total_quantity("pressure"), sp2.quantities.total_quantity("pressure")
     for ax in "xyz":
         yield assert_equal, sp1.quantities.extrema("velocity_%s" % ax), sp2.quantities.extrema("velocity_%s" % ax)
-    yield assert_allclose, sp1.quantities.bulk_velocity(), sp2.quantities.bulk_velocity()
+    yield assert_allclose_units, sp1.quantities.bulk_velocity(), sp2.quantities.bulk_velocity()
     yield assert_equal, prj1["density"], prj2["density"]
 
     ytcfg["yt","skip_dataset_cache"] = "False"

diff -r 09defdab021f75fe6012c0445fb3a17f85cfe9ce -r 238030456aee76c57ea3b51af90d843507cfd343 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -751,3 +751,59 @@
     finally:
         os.chdir(initial_dir)
         mylog.setLevel(orig_level)
+
+def assert_allclose_units(actual, desired, rtol=1e-7, atol=0, **kwargs):
+    """Raise an error if two objects are not equal up to desired tolerance
+
+    This is a wrapper for :func:`numpy.testing.assert_allclose` that also
+    verifies unit consistency
+
+    Parameters
+    ----------
+    actual : array-like
+        Array obtained (possibly with attached units)
+    desired : array-like
+        Array to compare with (possibly with attached units)
+    rtol : float, oprtional
+        Relative tolerance, defaults to 1e-7
+    atol : float or quantity, optional
+        Absolute tolerance. If units are attached, they must be consistent
+        with the units of ``actual`` and ``desired``. If no units are attached,
+        assumes the same units as ``desired``. Defaults to zero.
+
+    Also accepts additional keyword arguments accepted by
+    :func:`numpy.testing.assert_allclose`, see the documentation of that
+    function for details.
+    """
+    # Create a copy to ensure this function does not alter input arrays
+    act = YTArray(actual)
+    des = YTArray(desired)
+
+    try:
+        des = des.in_units(act.units)
+    except YTUnitOperationError:
+        raise AssertionError("Units of actual (%s) and desired (%s) do not have "
+                             "equivalent dimensions" % (act.units, des.units))
+
+    rt = YTArray(rtol)
+    if not rt.units.is_dimensionless:
+        raise AssertionError("Units of rtol (%s) are not "
+                             "dimensionless" % rt.units)
+
+    if not isinstance(atol, YTArray):
+        at = YTQuantity(atol, des.units)
+
+    try:
+        at = at.in_units(act.units)
+    except YTUnitOperationError:
+        raise AssertionError("Units of atol (%s) and actual (%s) do not have "
+                             "equivalent dimensions" % (at.units, act.units))
+
+    # units have been validated, so we strip units before calling numpy
+    # to avoid spurious errors
+    act = act.value
+    des = des.value
+    rt = rt.value
+    at = at.value
+
+    return assert_allclose(act, des, rt, at, **kwargs)

diff -r 09defdab021f75fe6012c0445fb3a17f85cfe9ce -r 238030456aee76c57ea3b51af90d843507cfd343 yt/units/tests/test_units.py
--- a/yt/units/tests/test_units.py
+++ b/yt/units/tests/test_units.py
@@ -18,12 +18,13 @@
 import nose
 import numpy as np
 from numpy.testing import \
-    assert_approx_equal, assert_array_almost_equal_nulp, \
-    assert_allclose, assert_raises
+    assert_array_almost_equal_nulp, \
+    assert_raises
 from nose.tools import assert_true
 import operator
 from sympy import Symbol
-from yt.testing import fake_random_ds
+from yt.testing import \
+    fake_random_ds, assert_allclose_units
 
 # dimensions
 from yt.units.dimensions import \
@@ -155,10 +156,10 @@
     yield assert_true, u3.expr == s3
     yield assert_true, u4.expr == s4
 
-    yield assert_allclose, u1.base_value, pc_cgs, 1e-12
-    yield assert_allclose, u2.base_value, yr_cgs, 1e-12
-    yield assert_allclose, u3.base_value, pc_cgs * yr_cgs, 1e-12
-    yield assert_allclose, u4.base_value, pc_cgs**2 / yr_cgs, 1e-12
+    yield assert_allclose_units, u1.base_value, pc_cgs, 1e-12
+    yield assert_allclose_units, u2.base_value, yr_cgs, 1e-12
+    yield assert_allclose_units, u3.base_value, pc_cgs * yr_cgs, 1e-12
+    yield assert_allclose_units, u4.base_value, pc_cgs**2 / yr_cgs, 1e-12
 
     yield assert_true, u1.dimensions == length
     yield assert_true, u2.dimensions == time
@@ -180,7 +181,7 @@
     yield assert_true, u1.base_value == 1
     yield assert_true, u1.dimensions == power
 
-    yield assert_allclose, u2.base_value, km_cgs / Mpc_cgs, 1e-12
+    yield assert_allclose_units, u2.base_value, km_cgs / Mpc_cgs, 1e-12
     yield assert_true, u2.dimensions == rate
 
 def test_create_new_symbol():
@@ -324,7 +325,7 @@
     u3 = u1 * u2
 
     yield assert_true, u3.expr == msun_sym * pc_sym
-    yield assert_allclose, u3.base_value, msun_cgs * pc_cgs, 1e-12
+    yield assert_allclose_units, u3.base_value, msun_cgs * pc_cgs, 1e-12
     yield assert_true, u3.dimensions == mass * length
 
     # Pow and Mul operations
@@ -334,7 +335,7 @@
     u6 = u4 * u5
 
     yield assert_true, u6.expr == pc_sym**2 * msun_sym * s_sym
-    yield assert_allclose, u6.base_value, pc_cgs**2 * msun_cgs, 1e-12
+    yield assert_allclose_units, u6.base_value, pc_cgs**2 * msun_cgs, 1e-12
     yield assert_true, u6.dimensions == length**2 * mass * time
 
 
@@ -358,7 +359,7 @@
     u3 = u1 / u2
 
     yield assert_true, u3.expr == pc_sym / (km_sym * s_sym)
-    yield assert_allclose, u3.base_value, pc_cgs / km_cgs, 1e-12
+    yield assert_allclose_units, u3.base_value, pc_cgs / km_cgs, 1e-12
     yield assert_true, u3.dimensions == 1 / time
 
 
@@ -377,12 +378,12 @@
     u2 = u1**2
 
     yield assert_true, u2.dimensions == u1_dims**2
-    yield assert_allclose, u2.base_value, (pc_cgs**2 * mK_cgs**4)**2, 1e-12
+    yield assert_allclose_units, u2.base_value, (pc_cgs**2 * mK_cgs**4)**2, 1e-12
 
     u3 = u1**(-1.0/3)
 
     yield assert_true, u3.dimensions == nsimplify(u1_dims**(-1.0/3))
-    yield assert_allclose, u3.base_value, (pc_cgs**2 * mK_cgs**4)**(-1.0/3), 1e-12
+    yield assert_allclose_units, u3.base_value, (pc_cgs**2 * mK_cgs**4)**(-1.0/3), 1e-12
 
 
 def test_equality():
@@ -414,7 +415,7 @@
     yield assert_true, u2.expr == u3.expr
     yield assert_true, u2 == u3
 
-    yield assert_allclose, u1.base_value, Msun_cgs / Mpc_cgs**3, 1e-12
+    yield assert_allclose_units, u1.base_value, Msun_cgs / Mpc_cgs**3, 1e-12
     yield assert_true, u2.base_value == 1
     yield assert_true, u3.base_value == 1
 
@@ -424,7 +425,7 @@
     yield assert_true, u2.dimensions == mass_density
     yield assert_true, u3.dimensions == mass_density
 
-    yield assert_allclose, get_conversion_factor(u1, u3)[0], \
+    yield assert_allclose_units, get_conversion_factor(u1, u3)[0], \
         Msun_cgs / Mpc_cgs**3, 1e-12
 
 def test_is_code_unit():

diff -r 09defdab021f75fe6012c0445fb3a17f85cfe9ce -r 238030456aee76c57ea3b51af90d843507cfd343 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -28,8 +28,7 @@
     assert_array_equal, \
     assert_equal, assert_raises, \
     assert_array_almost_equal_nulp, \
-    assert_array_almost_equal, \
-    assert_allclose
+    assert_array_almost_equal
 from numpy import array
 from yt.units.yt_array import \
     YTArray, YTQuantity, \
@@ -38,7 +37,9 @@
     uunion1d
 from yt.utilities.exceptions import \
     YTUnitOperationError, YTUfuncUnitError
-from yt.testing import fake_random_ds, requires_module
+from yt.testing import \
+    fake_random_ds, requires_module, \
+    assert_allclose_units
 from yt.funcs import fix_length
 from yt.units.unit_symbols import \
     cm, m, g
@@ -883,14 +884,14 @@
 
     E = mp.to_equivalent("keV","mass_energy")
     yield assert_equal, E, mp*clight*clight
-    yield assert_allclose, mp, E.to_equivalent("g", "mass_energy")
+    yield assert_allclose_units, mp, E.to_equivalent("g", "mass_energy")
 
     # Thermal
 
     T = YTQuantity(1.0e8,"K")
     E = T.to_equivalent("W*hr","thermal")
     yield assert_equal, E, (kboltz*T).in_units("W*hr")
-    yield assert_allclose, T, E.to_equivalent("K", "thermal")
+    yield assert_allclose_units, T, E.to_equivalent("K", "thermal")
 
     # Spectral
 
@@ -899,11 +900,11 @@
     yield assert_equal, nu, clight/l
     E = hcgs*nu
     l2 = E.to_equivalent("angstrom", "spectral")
-    yield assert_allclose, l, l2
+    yield assert_allclose_units, l, l2
     nu2 = clight/l2.in_units("cm")
-    yield assert_allclose, nu, nu2
+    yield assert_allclose_units, nu, nu2
     E2 = nu2.to_equivalent("keV", "spectral")
-    yield assert_allclose, E2, E.in_units("keV")
+    yield assert_allclose_units, E2, E.in_units("keV")
 
     # Sound-speed
 
@@ -911,13 +912,13 @@
     gg = 5./3.
     c_s = T.to_equivalent("km/s","sound_speed")
     yield assert_equal, c_s, np.sqrt(gg*kboltz*T/(mu*mh))
-    yield assert_allclose, T, c_s.to_equivalent("K","sound_speed")
+    yield assert_allclose_units, T, c_s.to_equivalent("K","sound_speed")
 
     mu = 0.5
     gg = 4./3.
     c_s = T.to_equivalent("km/s","sound_speed", mu=mu, gamma=gg)
     yield assert_equal, c_s, np.sqrt(gg*kboltz*T/(mu*mh))
-    yield assert_allclose, T, c_s.to_equivalent("K","sound_speed",
+    yield assert_allclose_units, T, c_s.to_equivalent("K","sound_speed",
                                                     mu=mu, gamma=gg)
 
     # Lorentz
@@ -925,21 +926,21 @@
     v = 0.8*clight
     g = v.to_equivalent("dimensionless","lorentz")
     g2 = YTQuantity(1./np.sqrt(1.-0.8*0.8), "dimensionless")
-    yield assert_allclose, g, g2
+    yield assert_allclose_units, g, g2
     v2 = g2.to_equivalent("mile/hr", "lorentz")
-    yield assert_allclose, v2, v.in_units("mile/hr")
+    yield assert_allclose_units, v2, v.in_units("mile/hr")
 
     # Schwarzschild
 
     R = mass_sun_cgs.to_equivalent("kpc","schwarzschild")
     yield assert_equal, R.in_cgs(), 2*G*mass_sun_cgs/(clight*clight)
-    yield assert_allclose, mass_sun_cgs, R.to_equivalent("g", "schwarzschild")
+    yield assert_allclose_units, mass_sun_cgs, R.to_equivalent("g", "schwarzschild")
 
     # Compton
 
     l = me.to_equivalent("angstrom","compton")
     yield assert_equal, l, hcgs/(me*clight)
-    yield assert_allclose, me, l.to_equivalent("g", "compton")
+    yield assert_allclose_units, me, l.to_equivalent("g", "compton")
 
     # Number density
 
@@ -947,18 +948,18 @@
 
     n = rho.to_equivalent("cm**-3","number_density")
     yield assert_equal, n, rho/(mh*0.6)
-    yield assert_allclose, rho, n.to_equivalent("g/cm**3","number_density")
+    yield assert_allclose_units, rho, n.to_equivalent("g/cm**3","number_density")
 
     n = rho.to_equivalent("cm**-3","number_density", mu=0.75)
     yield assert_equal, n, rho/(mh*0.75)
-    yield assert_allclose, rho, n.to_equivalent("g/cm**3","number_density", mu=0.75)
+    yield assert_allclose_units, rho, n.to_equivalent("g/cm**3","number_density", mu=0.75)
 
     # Effective temperature
 
     T = YTQuantity(1.0e4, "K")
     F = T.to_equivalent("erg/s/cm**2","effective_temperature")
     yield assert_equal, F, stefan_boltzmann_constant_cgs*T**4
-    yield assert_allclose, T, F.to_equivalent("K", "effective_temperature")
+    yield assert_allclose_units, T, F.to_equivalent("K", "effective_temperature")
 
 def test_electromagnetic():
     from yt.units.dimensions import charge_mks, pressure, current_cgs, \

diff -r 09defdab021f75fe6012c0445fb3a17f85cfe9ce -r 238030456aee76c57ea3b51af90d843507cfd343 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -403,8 +403,8 @@
             assert_equal(new_result, old_result,
                          err_msg=err_msg, verbose=True)
         else:
-            assert_allclose(new_result, old_result, 10.**(-self.decimals),
-                             err_msg=err_msg, verbose=True)
+            assert_allclose_units(new_result, old_result, 10.**(-self.decimals),
+                                  err_msg=err_msg, verbose=True)
 
 class AllFieldValuesTest(AnswerTestingTest):
     _type_name = "AllFieldValues"
@@ -478,8 +478,8 @@
             if self.decimals is None:
                 assert_equal(nres, ores, err_msg=err_msg)
             else:
-                assert_allclose(nres, ores, 10.**-(self.decimals),
-                                err_msg=err_msg)
+                assert_allclose_units(nres, ores, 10.**-(self.decimals),
+                                      err_msg=err_msg)
 
 class PixelizedProjectionValuesTest(AnswerTestingTest):
     _type_name = "PixelizedProjectionValues"
@@ -727,7 +727,8 @@
             if self.decimals is None:
                 assert_equal(new_result[k], old_result[k])
             else:
-                assert_allclose(new_result[k], old_result[k], 10**(-self.decimals))
+                assert_allclose_units(new_result[k], old_result[k],
+                                      10**(-self.decimals))
 
 class GenericImageTest(AnswerTestingTest):
     _type_name = "GenericImage"

diff -r 09defdab021f75fe6012c0445fb3a17f85cfe9ce -r 238030456aee76c57ea3b51af90d843507cfd343 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -195,7 +195,7 @@
           "the %s base system of units." % (self.unit, self.units_base)
         return err
 
-class YTEquivalentDimsError(Exception):
+class YTEquivalentDimsError(YTUnitOperationError):
     def __init__(self, old_units, new_units, base):
         self.old_units = old_units
         self.new_units = new_units

diff -r 09defdab021f75fe6012c0445fb3a17f85cfe9ce -r 238030456aee76c57ea3b51af90d843507cfd343 yt/visualization/tests/test_export_frb.py
--- a/yt/visualization/tests/test_export_frb.py
+++ b/yt/visualization/tests/test_export_frb.py
@@ -16,7 +16,7 @@
 import numpy as np
 from yt.testing import \
     fake_random_ds, assert_equal, \
-    assert_allclose
+    assert_allclose_units
 
 def setup():
     """Test specific setup."""
@@ -35,5 +35,6 @@
     yield assert_equal, frb_ds.domain_right_edge.v, np.array([0.75,0.75,1.0])
     yield assert_equal, frb_ds.domain_width.v, np.array([0.5,0.5,1.0])
     yield assert_equal, frb_ds.domain_dimensions, np.array([64,64,1], dtype="int64")
-    yield assert_allclose, frb["density"].sum(), dd_frb.quantities.total_quantity("density")
+    yield assert_allclose_units, frb["density"].sum(), \
+        dd_frb.quantities.total_quantity("density")
     yield assert_equal, frb_ds.index.num_grids, 8


https://bitbucket.org/yt_analysis/yt/commits/7a1bb5851343/
Changeset:   7a1bb5851343
Branch:      stable
User:        ngoldbaum
Date:        2015-08-12 20:20:40+00:00
Summary:     Cast enzo grid left edges to int arrays to avoid errors in newer numpys
Affected #:  1 file

diff -r 238030456aee76c57ea3b51af90d843507cfd343 -r 7a1bb5851343ba0636ff54e18dd6c119799c5ccd yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -338,7 +338,7 @@
 
     def _fill_arrays(self, ei, si, LE, RE, npart, nap):
         self.grid_dimensions.flat[:] = ei
-        self.grid_dimensions -= np.array(si, self.float_type)
+        self.grid_dimensions -= np.array(si, dtype='i4')
         self.grid_dimensions += 1
         self.grid_left_edge.flat[:] = LE
         self.grid_right_edge.flat[:] = RE


https://bitbucket.org/yt_analysis/yt/commits/ec665f371750/
Changeset:   ec665f371750
Branch:      stable
User:        ngoldbaum
Date:        2015-08-13 20:10:19+00:00
Summary:     Backporting PR #1698 https://bitbucket.org/yt_analysis/yt/pull-requests/1698
Affected #:  2 files

diff -r 7a1bb5851343ba0636ff54e18dd6c119799c5ccd -r ec665f371750b42756205f74f424fe2c120084da yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -71,9 +71,9 @@
             if not args[0]._data_valid:
                 args[0]._recreate_frb()
         if not args[0]._plot_valid:
+            # it is the responsibility of _setup_plots to call
+            # args[0].run_callbacks()
             args[0]._setup_plots()
-            if hasattr(args[0], 'run_callbacks'):
-                args[0].run_callbacks()
         rv = f(*args, **kwargs)
         return rv
     return newfunc
@@ -81,7 +81,6 @@
 def apply_callback(f):
     @wraps(f)
     def newfunc(*args, **kwargs):
-        #rv = f(*args[1:], **kwargs)
         args[0]._callbacks.append((f.__name__, (args, kwargs)))
         return args[0]
     return newfunc

diff -r 7a1bb5851343ba0636ff54e18dd6c119799c5ccd -r ec665f371750b42756205f74f424fe2c120084da yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -731,7 +731,11 @@
         return xc, yc
 
     def _setup_plots(self):
-        if self._plot_valid: return
+        if self._plot_valid:
+            return
+        if not self._data_valid:
+            self._recreate_frb()
+            self._data_valid = True
         self._colorbar_valid = True
         for f in list(set(self.data_source._determine_fields(self.fields))):
             axis_index = self.data_source.axis
@@ -973,7 +977,7 @@
                 self.plots[f]._toggle_colorbar(draw_colorbar)
 
         self._set_font_properties()
-
+        self.run_callbacks()
         self._plot_valid = True
 
     def setup_callbacks(self):


https://bitbucket.org/yt_analysis/yt/commits/470ff85b370b/
Changeset:   470ff85b370b
Branch:      stable
User:        ngoldbaum
Date:        2015-08-13 20:28:07+00:00
Summary:     Backporting PR #1699 https://bitbucket.org/yt_analysis/yt/pull-requests/1699
Affected #:  2 files

diff -r ec665f371750b42756205f74f424fe2c120084da -r 470ff85b370bd444b6d37457c92d270e7230729c yt/fields/fluid_fields.py
--- a/yt/fields/fluid_fields.py
+++ b/yt/fields/fluid_fields.py
@@ -215,7 +215,11 @@
             return new_field
         return func
 
-    grad_units = "(%s) / cm" % field_units
+    if field_units != "":
+        grad_units = "(%s) / cm" % field_units
+    else:
+        grad_units = "1 / cm"
+
     for axi, ax in enumerate('xyz'):
         f = grad_func(axi, ax)
         registry.add_field((ftype, "%s_gradient_%s" % (fname, ax)),

diff -r ec665f371750b42756205f74f424fe2c120084da -r 470ff85b370bd444b6d37457c92d270e7230729c yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -196,15 +196,23 @@
 
 def test_add_gradient_fields():
     gfields = base_ds.add_gradient_fields(("gas","density"))
+    gfields += base_ds.add_gradient_fields(("index", "ones"))
     field_list = [('gas', 'density_gradient_x'),
                   ('gas', 'density_gradient_y'),
                   ('gas', 'density_gradient_z'),
-                  ('gas', 'density_gradient_magnitude')]
+                  ('gas', 'density_gradient_magnitude'),
+                  ('index', 'ones_gradient_x'),
+                  ('index', 'ones_gradient_y'),
+                  ('index', 'ones_gradient_z'),
+                  ('index', 'ones_gradient_magnitude')]
     assert_equal(gfields, field_list)
     ad = base_ds.all_data()
     for field in field_list:
         ret = ad[field]
-        assert str(ret.units) == "g/cm**4"
+        if field[0] == 'gas':
+            assert str(ret.units) == "g/cm**4"
+        else:
+            assert str(ret.units) == "1/cm"
 
 def get_data(ds, field_name):
     # Need to create a new data object otherwise the errors we are


https://bitbucket.org/yt_analysis/yt/commits/bd5e1bcce116/
Changeset:   bd5e1bcce116
Branch:      stable
User:        ngoldbaum
Date:        2015-08-18 22:32:05+00:00
Summary:     Correcting two incorrect paths in the loading data docs
Affected #:  1 file

diff -r 470ff85b370bd444b6d37457c92d270e7230729c -r bd5e1bcce116defa5c151251852422e7a5a2670e doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -771,7 +771,7 @@
 
 .. code-block:: python
 
-   from yt.frontends.sph.definitions import gadget_field_specs
+   from yt.frontends.gadget.definitions import gadget_field_specs
    gadget_field_specs["my_field_def"] = my_field_def
 
 Please also feel free to issue a pull request with any new field
@@ -871,7 +871,7 @@
 ----------------
 
 See :ref:`loading-numpy-array` and
-:func:`~yt.frontends.sph.data_structures.load_amr_grids` for more detail.
+:func:`~yt.frontends.stream.data_structures.load_amr_grids` for more detail.
 
 It is possible to create native yt dataset from Python's dictionary
 that describes set of rectangular patches of data of possibly varying


https://bitbucket.org/yt_analysis/yt/commits/e7f167df6e23/
Changeset:   e7f167df6e23
Branch:      stable
User:        jisuoqing
Date:        2015-08-20 00:22:11+00:00
Summary:     Fix ds.h.find_max like functions for non-cartesian geometry
Affected #:  1 file

diff -r bd5e1bcce116defa5c151251852422e7a5a2670e -r e7f167df6e233bdf9dd0d7c791d7e733c3c3f106 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -542,16 +542,19 @@
         return rv
 
     def process_chunk(self, data, field):
+        axis_names = data.ds.coordinates.axis_name
         field = data._determine_fields(field)[0]
         ma = array_like_field(data, -HUGE, field)
-        mx = array_like_field(data, -1, "x")
-        my = array_like_field(data, -1, "y")
-        mz = array_like_field(data, -1, "z")
+        mx = array_like_field(data, -1, axis_names[0])
+        my = array_like_field(data, -1, axis_names[1])
+        mz = array_like_field(data, -1, axis_names[2])
         maxi = -1
         if data[field].size > 0:
             maxi = np.argmax(data[field])
             ma = data[field][maxi]
-            mx, my, mz = [data[ax][maxi] for ax in 'xyz']
+            mx, my, mz = [data[ax][maxi] for ax in (axis_names[0],
+                                                    axis_names[1],
+                                                    axis_names[2])]
         return (ma, maxi, mx, my, mz)
 
     def reduce_intermediate(self, values):


https://bitbucket.org/yt_analysis/yt/commits/4de01dedab27/
Changeset:   4de01dedab27
Branch:      stable
User:        ngoldbaum
Date:        2015-08-20 18:19:37+00:00
Summary:     Clarify formatting of a comment in plot_container.py
Affected #:  1 file

diff -r e7f167df6e233bdf9dd0d7c791d7e733c3c3f106 -r 4de01dedab27175f736dd6bdb7e00bd03b6c5669 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -71,8 +71,8 @@
             if not args[0]._data_valid:
                 args[0]._recreate_frb()
         if not args[0]._plot_valid:
-            # it is the responsibility of _setup_plots to call
-            # args[0].run_callbacks()
+            # it is the responsibility of _setup_plots to
+            # call args[0].run_callbacks()
             args[0]._setup_plots()
         rv = f(*args, **kwargs)
         return rv


https://bitbucket.org/yt_analysis/yt/commits/841164c02239/
Changeset:   841164c02239
Branch:      stable
User:        ngoldbaum
Date:        2015-08-21 16:58:00+00:00
Summary:     Merge an extra head
Affected #:  7 files

diff -r 4de01dedab27175f736dd6bdb7e00bd03b6c5669 -r 841164c022392ca2c2eacde930c9da6df0383520 doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -42,7 +42,18 @@
 
     $ pip install mpi4py
 
-Once that has been installed, you're all done!  You just need to launch your
+If you have an Anaconda installation of yt and there is no MPI library on the
+system you are using try:
+
+.. code-block:: bash
+
+    $ conda install mpi4py
+
+This will install `MPICH2 <https://www.mpich.org/>`_ and will interefere with
+other MPI libraries that are already installed. Therefore, it is preferable to
+use the ``pip`` installation method.
+
+Once mpi4py has been installed, you're all done!  You just need to launch your
 scripts with ``mpirun`` (or equivalent) and signal to yt that you want to
 run them in parallel by invoking the ``yt.enable_parallelism()`` function in
 your script.  In general, that's all it takes to get a speed benefit on a

diff -r 4de01dedab27175f736dd6bdb7e00bd03b6c5669 -r 841164c022392ca2c2eacde930c9da6df0383520 doc/source/cookbook/tests/test_cookbook.py
--- a/doc/source/cookbook/tests/test_cookbook.py
+++ b/doc/source/cookbook/tests/test_cookbook.py
@@ -34,7 +34,7 @@
     try:
         subprocess.check_call(cmd)
         result = True
-    except subprocess.CalledProcessError, e:
+    except subprocess.CalledProcessError as e:
         print(("Stdout output:\n", e.output))
         result = False
     assert result

diff -r 4de01dedab27175f736dd6bdb7e00bd03b6c5669 -r 841164c022392ca2c2eacde930c9da6df0383520 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -39,6 +39,12 @@
   have the the necessary compilers installed (e.g. the ``build-essentials``
   package on debian and ubuntu).
 
+.. note::
+  See `Parallel Computation
+  <http://yt-project.org/docs/dev/analyzing/parallel_computation.html>`_
+  for a discussion on using yt in parallel.
+
+
 .. _branches-of-yt:
 
 Branches of yt: ``yt``, ``stable``, and ``yt-2.x``
@@ -201,7 +207,8 @@
 
   bash Miniconda-3.3.0-Linux-x86_64.sh
 
-Make sure that the Anaconda ``bin`` directory is in your path, and then issue:
+For both the Anaconda and Miniconda installations, make sure that the Anaconda
+``bin`` directory is in your path, and then issue:
 
 .. code-block:: bash
 
@@ -209,6 +216,34 @@
 
 which will install yt along with all of its dependencies.
 
+Obtaining Source Code
+^^^^^^^^^^^^^^^^^^^^^
+
+There are two ways to get the yt source code when using an Anaconda
+installation.
+
+Option 1:
+
+Clone the yt repository with:
+
+.. code-block:: bash
+
+  hg clone https://bitbucket.org/yt_analysis/yt
+
+Once inside the yt directory, update to the appropriate branch and
+run ``setup.py``. For example, the following commands will allow you
+to see the tip of the development branch.
+
+.. code-block:: bash
+
+  hg up yt
+  python setup.py develop
+
+This will make sure you are running a version of yt corresponding to the 
+most up-to-date source code.
+
+Option 2:
+
 Recipes to build conda packages for yt are available at
 https://github.com/conda/conda-recipes.  To build the yt conda recipe, first
 clone the conda-recipes repository

diff -r 4de01dedab27175f736dd6bdb7e00bd03b6c5669 -r 841164c022392ca2c2eacde930c9da6df0383520 doc/source/reference/command-line.rst
--- a/doc/source/reference/command-line.rst
+++ b/doc/source/reference/command-line.rst
@@ -42,9 +42,9 @@
 ~~~~~~~~~~~~~~~~~~~~~~
 
 The :code:`yt` command-line tool allows you to access some of yt's basic
-funcionality without opening a python interpreter.  The tools is a collection of
+functionality without opening a python interpreter.  The tools is a collection of
 subcommands.  These can quickly making plots of slices and projections through a
-dataset, updating yt's codebase, print basic statistics about a dataset, laucnh
+dataset, updating yt's codebase, print basic statistics about a dataset, launch
 an IPython notebook session, and more.  To get a quick list of what is
 available, just type:
 

diff -r 4de01dedab27175f736dd6bdb7e00bd03b6c5669 -r 841164c022392ca2c2eacde930c9da6df0383520 doc/source/reference/python_introduction.rst
--- a/doc/source/reference/python_introduction.rst
+++ b/doc/source/reference/python_introduction.rst
@@ -34,7 +34,7 @@
 called on it.  ``dir()`` will return the available commands and objects that
 can be directly called, and ``dir(something)`` will return information about
 all the commands that ``something`` provides.  This probably sounds a bit
-opaque, but it will become clearer with time -- it's also probably heldsul to
+opaque, but it will become clearer with time -- it's also probably helpful to
 call ``help`` on any or all of the objects we create during this orientation.
 
 To start up Python, at your prompt simply type:

diff -r 4de01dedab27175f736dd6bdb7e00bd03b6c5669 -r 841164c022392ca2c2eacde930c9da6df0383520 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -491,26 +491,18 @@
         """
         Generates the conversion to various physical _units based on the parameter file
         """
-        #Please note that for all units given in the info file, the boxlen
-        #still needs to be folded in, as shown below!
+        # loading the units from the info file
+        boxlen=self.parameters['boxlen']
+        length_unit = self.parameters['unit_l']
+        density_unit = self.parameters['unit_d']
+        time_unit = self.parameters['unit_t']
 
-        boxlen=self.parameters['boxlen']
-        length_unit = self.parameters['unit_l'] * boxlen
-        density_unit = self.parameters['unit_d']/ boxlen**3
-
-        # In the mass unit, the factors of boxlen cancel back out, so this 
-        #is equivalent to unit_d*unit_l**3
-
-        mass_unit = density_unit * length_unit**3
-
-        # Cosmological runs are done in lookback conformal time. 
-        # To convert to proper time, the time unit is calculated from 
-        # the expansion factor. This is not yet  done here!
-
-        time_unit = self.parameters['unit_t']
+        # calculating derived units (except velocity and temperature, done below)
+        mass_unit = density_unit * length_unit**3     
         magnetic_unit = np.sqrt(4*np.pi * mass_unit /
                                 (time_unit**2 * length_unit))
         pressure_unit = density_unit * (length_unit / time_unit)**2
+
         # TODO:
         # Generalize the temperature field to account for ionization
         # For now assume an atomic ideal gas with cosmic abundances (x_H = 0.76)
@@ -518,13 +510,15 @@
 
         self.density_unit = self.quan(density_unit, 'g/cm**3')
         self.magnetic_unit = self.quan(magnetic_unit, "gauss")
+        self.pressure_unit = self.quan(pressure_unit, 'dyne/cm**2')
+        self.time_unit = self.quan(time_unit, "s")
+        self.mass_unit = self.quan(mass_unit, "g")
+        self.velocity_unit = self.quan(length_unit, 'cm') / self.time_unit
+        self.temperature_unit = (self.velocity_unit**2*mp* 
+                                 mean_molecular_weight_factor/kb).in_units('K')
+
+        # Only the length unit get scales by a factor of boxlen
         self.length_unit = self.quan(length_unit * boxlen, "cm")
-        self.mass_unit = self.quan(mass_unit, "g")
-        self.time_unit = self.quan(time_unit, "s")
-        self.velocity_unit = self.quan(length_unit, 'cm') / self.time_unit
-        self.temperature_unit = (self.velocity_unit**2 * mp *
-                                 mean_molecular_weight_factor / kb)
-        self.pressure_unit = self.quan(pressure_unit, 'dyne/cm**2')
 
     def _parse_parameter_file(self):
         # hardcoded for now

diff -r 4de01dedab27175f736dd6bdb7e00bd03b6c5669 -r 841164c022392ca2c2eacde930c9da6df0383520 yt/utilities/quantities.py
--- a/yt/utilities/quantities.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""
-Some old field names.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.units.yt_array import YTArray
-from yt.units.unit_object import Unit
-from yt.utilities.exceptions import YTUnitOperationError
-
-
-class Quantity(YTArray):
-    """
-    A physical quantity. Attaches units to a scalar.
-
-    """
-    def __new__(cls, input_array, input_units=None):
-        if isinstance(input_array, Quantity):
-            return input_array
-
-        # Input array is an already formed ndarray instance
-        # We first cast to be our class type
-        obj = np.asarray(input_array).view(cls)
-
-        # Restrict the array to a scalar.
-        if obj.size != 1:
-            raise ValueError("A Quantity can contain only one element. The "
-                "caller provided the array %s with %s elements."
-                % (obj, obj.size))
-
-        return YTArray.__new__(cls, input_array, input_units)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list