[yt-svn] commit/yt: 5 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Dec 8 15:55:54 PST 2016


5 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/be112664c55d/
Changeset:   be112664c55d
Branch:      yt
User:        kausik93
Date:        2016-10-28 17:51:09+00:00
Summary:     set_background_color added to ImagePlotContainer class
Affected #:  3 files

diff -r 334cceb2c6e197acf1a8c7f273ee9dd0bd592f58 -r be112664c55d313f05ae3233436727044e8fdd78 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -643,6 +643,19 @@
    slc.set_log('x-velocity', True, linthresh=1.e1)
    slc.save()
 
+The :meth:`~yt.visualization.plot_container.ImagePlotContainer.set_background_color`
+function accepts a field name and a color (optional). If color is given, the function
+will set the plot's background color to that. If not, it will set it to the bottom
+value of the color map.
+
+.. python-script::
+
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = yt.SlicePlot(ds, 'z', 'x-velocity', width=(30,'kpc'))
+   slc.set_background_color('density')
+   slc.save()
+
 Lastly, the :meth:`~yt.visualization.plot_window.AxisAlignedSlicePlot.set_zlim`
 function makes it possible to set a custom colormap range.
 

diff -r 334cceb2c6e197acf1a8c7f273ee9dd0bd592f58 -r be112664c55d313f05ae3233436727044e8fdd78 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -14,6 +14,7 @@
 #-----------------------------------------------------------------------------
 from yt.extern.six.moves import builtins
 from yt.extern.six import iteritems
+from yt.extern.six import string_types
 
 import base64
 import errno
@@ -34,6 +35,8 @@
     ensure_list
 from yt.utilities.exceptions import \
     YTNotInsideNotebook
+from yt.visualization.color_maps import \
+    yt_colormaps
 
 def invalidate_data(f):
     @wraps(f)
@@ -291,6 +294,29 @@
         return self
 
     @invalidate_plot
+    def set_background_color(self, field, color=None):
+        """set the background color to match provided color
+
+        Parameters
+        ----------
+        field : string
+            the field to set the colormap
+            if field == 'all', applies to all plots.
+        color : string or RGBA tuple (optional)
+            if set, set the background color to this color
+            if unset, background color is set to the bottom value of 
+            the color map
+
+        """
+        if color is None:
+            cmap = self._colormaps[field]
+            if isinstance(cmap, string_types): 
+                cmap = yt_colormaps[cmap]
+            color = cmap(0)
+        self.plots[field].axes.set_axis_bgcolor(color)
+        return self
+
+    @invalidate_plot
     def set_zlim(self, field, zmin, zmax, dynamic_range=None):
         """set the scale of the colormap
 

diff -r 334cceb2c6e197acf1a8c7f273ee9dd0bd592f58 -r be112664c55d313f05ae3233436727044e8fdd78 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -495,3 +495,10 @@
     slc = SlicePlot(ds, 2, 'density')
     slc.set_buff_size(1200)
     assert_equal(slc.frb['density'].shape, (1200, 1200))
+
+def test_set_background_color():
+    ds = fake_random_ds(32)
+    plot = SlicePlot(ds, 2, 'density')
+    plot.set_background_color('density', 'red')
+    ax = plot.plots['density'].axes
+    assert_equal(ax.get_axis_bgcolor(), 'red')


https://bitbucket.org/yt_analysis/yt/commits/5b8b97be86e2/
Changeset:   5b8b97be86e2
Branch:      yt
User:        kausik93
Date:        2016-11-01 20:20:45+00:00
Summary:     import heading changed
Affected #:  1 file

diff -r be112664c55d313f05ae3233436727044e8fdd78 -r 5b8b97be86e2b30fe9b5ef34eb37478b2be73852 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -13,8 +13,9 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 from yt.extern.six.moves import builtins
-from yt.extern.six import iteritems
-from yt.extern.six import string_types
+from yt.extern.six import  \
+    iteritems, \
+    string_types
 
 import base64
 import errno


https://bitbucket.org/yt_analysis/yt/commits/9719806bb427/
Changeset:   9719806bb427
Branch:      yt
User:        kausik93
Date:        2016-12-08 18:30:41+00:00
Summary:     used determine_fields to resolve field to field tuple in set_background_color
Affected #:  2 files

diff -r 5b8b97be86e2b30fe9b5ef34eb37478b2be73852 -r 9719806bb4279736c4e39721324f5ca58b5f898c yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -309,12 +309,13 @@
             the color map
 
         """
+        actual_field = self.data_source._determine_fields(field)[0]
         if color is None:
-            cmap = self._colormaps[field]
+            cmap = self._colormaps[actual_field]
             if isinstance(cmap, string_types): 
                 cmap = yt_colormaps[cmap]
             color = cmap(0)
-        self.plots[field].axes.set_axis_bgcolor(color)
+        self.plots[actual_field].axes.set_axis_bgcolor(color)
         return self
 
     @invalidate_plot

diff -r 5b8b97be86e2b30fe9b5ef34eb37478b2be73852 -r 9719806bb4279736c4e39721324f5ca58b5f898c yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -499,6 +499,7 @@
 def test_set_background_color():
     ds = fake_random_ds(32)
     plot = SlicePlot(ds, 2, 'density')
-    plot.set_background_color('density', 'red')
-    ax = plot.plots['density'].axes
-    assert_equal(ax.get_axis_bgcolor(), 'red')
+    for field in ['density', ('gas', 'density')]:
+        plot.set_background_color(field, 'red')
+        ax = plot.plots[field].axes
+        assert_equal(ax.get_axis_bgcolor(), 'red')


https://bitbucket.org/yt_analysis/yt/commits/e902d814f979/
Changeset:   e902d814f979
Branch:      yt
User:        kausik93
Date:        2016-12-08 18:38:11+00:00
Summary:     merging
Affected #:  94 files

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 doc/helper_scripts/generate_doap.py
--- a/doc/helper_scripts/generate_doap.py
+++ b/doc/helper_scripts/generate_doap.py
@@ -75,47 +75,47 @@
 lastname_sort = lambda a: a.rsplit(None, 1)[-1]
 
 def get_release_tags():
-    c = hglib.open(yt_path)
-    releases = {}
-    for name, rev, node, islocal in c.tags():
-        if name.startswith("yt-"):
-            releases[name] = node
-    rr = []
-    for name, node in sorted(releases.items()):
-        date = c.log(node)[-1][-1]
-        rr.append((date, name[3:]))
+    with hglib.open(yt_path) as c:
+        releases = {}
+        for name, rev, node, islocal in c.tags():
+            if name.startswith("yt-"):
+                releases[name] = node
+        rr = []
+        for name, node in sorted(releases.items()):
+            date = c.log(node)[-1][-1]
+            rr.append((date, name[3:]))
     rr.sort()
     return [(_[1], _[0].strftime("%Y-%M-%d")) for _ in rr]
 
 def developer_names():
     cmd = hglib.util.cmdbuilder("churn", "-c")
-    c = hglib.open(yt_path)
-    emails = set([])
-    for dev in c.rawcommand(cmd).split("\n"):
-        if len(dev.strip()) == 0: continue
-        emails.add(dev.rsplit(None, 2)[0])
-    print("Generating real names for {0} emails".format(len(emails)))
-    names = set([])
-    for email in sorted(emails):
-        if email in name_ignores:
-            continue
-        if email in name_mappings:
-            names.add(name_mappings[email])
-            continue
-        cset = c.log(revrange="last(author('%s'))" % email)
-        if len(cset) == 0:
-            print("Error finding {0}".format(email))
-            realname = email
-        else:
-            realname, addr = parseaddr(cset[0][4])
-        if realname == '':
-            realname = email
-        if realname in name_mappings:
-            names.add(name_mappings[realname])
-            continue
-        realname = realname.decode('utf-8')
-        realname = realname.encode('ascii', 'xmlcharrefreplace')
-        names.add(realname)
+    with hglib.open(yt_path) as c:
+        emails = set([])
+        for dev in c.rawcommand(cmd).split("\n"):
+            if len(dev.strip()) == 0: continue
+            emails.add(dev.rsplit(None, 2)[0])
+        print("Generating real names for {0} emails".format(len(emails)))
+        names = set([])
+        for email in sorted(emails):
+            if email in name_ignores:
+                continue
+            if email in name_mappings:
+                names.add(name_mappings[email])
+                continue
+            cset = c.log(revrange="last(author('%s'))" % email)
+            if len(cset) == 0:
+                print("Error finding {0}".format(email))
+                realname = email
+            else:
+                realname, addr = parseaddr(cset[0][4])
+            if realname == '':
+                realname = email
+            if realname in name_mappings:
+                names.add(name_mappings[realname])
+                continue
+            realname = realname.decode('utf-8')
+            realname = realname.encode('ascii', 'xmlcharrefreplace')
+            names.add(realname)
     #with open("devs.txt", "w") as f:
     #    for name in sorted(names, key=lastname_sort):
     #        f.write("%s\n" % name)

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -64,6 +64,24 @@
 You can use this to easily explore available fields, particularly through
 tab-completion in Jupyter/IPython.
 
+It's also possible to iterate over the list of fields associated with each
+field type. For example, to print all of the ``'gas'`` fields, one might do:
+
+.. code-block:: python
+
+   for field in ds.fields.gas:
+       print(field)
+
+You can also check if a given field is associated with a field type using
+standard python syntax:
+
+.. code-block:: python
+
+   # these examples evaluate to True for a dataset that has ('gas', 'density')
+   'density' in ds.fields.gas
+   ('gas', 'density') in ds.fields.gas
+   ds.fields.gas.density in ds.fields.gas
+
 For a more programmatic method of accessing fields, you can utilize the
 ``ds.field_list``, ``ds.derived_field_list`` and some accessor methods to gain
 information about fields.  The full list of fields available for a dataset can

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -64,6 +64,18 @@
        print("(%f,  %f,  %f)    %f" %
              (sp["x"][i], sp["y"][i], sp["z"][i], sp["temperature"][i]))
 
+Data objects can also be cloned; for instance:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("RedshiftOutput0005")
+   sp = ds.sphere([0.5, 0.5, 0.5], (1, 'kpc'))
+   sp_copy = sp.clone()
+
+This can be useful for when manually chunking data or exploring different field
+parameters.
+
 .. _quickly-selecting-data:
 
 Slicing Syntax for Selecting Data

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1698,7 +1698,9 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Cosmological parameters can be specified to Tipsy to enable computation of
-default units.  The parameters recognized are of this form:
+default units.  For example do the following, to load a Tipsy dataset whose
+path is stored in the variable ``my_filename`` with specified cosmology
+parameters:
 
 .. code-block:: python
 
@@ -1707,14 +1709,21 @@
                            'omega_matter': 0.272,
                            'hubble_constant': 0.702}
 
-If you wish to set the default units directly, you can do so by using the
+   ds = yt.load(my_filename,
+                cosmology_parameters=cosmology_parameters)
+
+If you wish to set the unit system directly, you can do so by using the
 ``unit_base`` keyword in the load statement.
 
  .. code-block:: python
 
     import yt
+
     ds = yt.load(filename, unit_base={'length', (1.0, 'Mpc')})
 
+See the documentation for the
+:class:`~yt.frontends.tipsy.data_structures.TipsyDataset` class for more
+information.
 
 Loading Cosmological Simulations
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 doc/source/visualizing/FITSImageData.ipynb
--- a/doc/source/visualizing/FITSImageData.ipynb
+++ b/doc/source/visualizing/FITSImageData.ipynb
@@ -15,8 +15,7 @@
    },
    "outputs": [],
    "source": [
-    "import yt\n",
-    "from yt.utilities.fits_image import FITSImageData, FITSProjection"
+    "import yt"
    ]
   },
   {
@@ -27,9 +26,9 @@
    },
    "outputs": [],
    "source": [
-    "ds = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", parameters={\"length_unit\":(1.0,\"Mpc\"),\n",
-    "                                                               \"mass_unit\":(1.0e14,\"Msun\"),\n",
-    "                                                               \"time_unit\":(1.0,\"Myr\")})"
+    "ds = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", units_override={\"length_unit\":(1.0,\"Mpc\"),\n",
+    "                                                                   \"mass_unit\":(1.0e14,\"Msun\"),\n",
+    "                                                                   \"time_unit\":(1.0,\"Myr\")})"
    ]
   },
   {
@@ -73,7 +72,7 @@
    },
    "outputs": [],
    "source": [
-    "prj_fits = FITSProjection(ds, \"z\", [\"temperature\"], weight_field=\"density\")"
+    "prj_fits = yt.FITSProjection(ds, \"z\", [\"temperature\"], weight_field=\"density\")"
    ]
   },
   {
@@ -236,7 +235,7 @@
    "source": [
     "slc3 = ds.slice(0, 0.0)\n",
     "frb = slc3.to_frb((500.,\"kpc\"), 800)\n",
-    "fid_frb = FITSImageData(frb, fields=[\"density\",\"temperature\"], units=\"pc\")"
+    "fid_frb = yt.FITSImageData(frb, fields=[\"density\",\"temperature\"], units=\"pc\")"
    ]
   },
   {
@@ -255,7 +254,7 @@
    "outputs": [],
    "source": [
     "cvg = ds.covering_grid(ds.index.max_level, [-0.5,-0.5,-0.5], [64, 64, 64], fields=[\"density\",\"temperature\"])\n",
-    "fid_cvg = FITSImageData(cvg, fields=[\"density\",\"temperature\"], units=\"Mpc\")"
+    "fid_cvg = yt.FITSImageData(cvg, fields=[\"density\",\"temperature\"], units=\"Mpc\")"
    ]
   },
   {
@@ -280,7 +279,7 @@
    },
    "outputs": [],
    "source": [
-    "fid = FITSImageData.from_file(\"sloshing.fits\")\n",
+    "fid = yt.FITSImageData.from_file(\"sloshing.fits\")\n",
     "fid.info()"
    ]
   },
@@ -299,8 +298,8 @@
    },
    "outputs": [],
    "source": [
-    "prj_fits2 = FITSProjection(ds, \"z\", [\"density\"])\n",
-    "prj_fits3 = FITSImageData.from_images([prj_fits, prj_fits2])\n",
+    "prj_fits2 = yt.FITSProjection(ds, \"z\", [\"density\"])\n",
+    "prj_fits3 = yt.FITSImageData.from_images([prj_fits, prj_fits2])\n",
     "prj_fits3.info()"
    ]
   },
@@ -348,7 +347,27 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "So far, the FITS images we have shown have linear spatial coordinates. One may want to take a projection of an object and make a crude mock observation out of it, with celestial coordinates. For this, we can use the `create_sky_wcs` method. Specify a center (RA, Dec) coordinate in degrees, as well as a linear scale in terms of angle per distance:"
+    "So far, the FITS images we have shown have linear spatial coordinates. We can see this by looking at the header for one of the fields, and examining the `CTYPE1` and `CTYPE2` keywords:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "prj_fits[\"temperature\"].header"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The `WCSNAME` keyword is set to `\"yt\"` by default. \n",
+    "\n",
+    "However, one may want to take a projection of an object and make a crude mock observation out of it, with celestial coordinates. For this, we can use the `create_sky_wcs` method. Specify a center (RA, Dec) coordinate in degrees, as well as a linear scale in terms of angle per distance:"
    ]
   },
   {
@@ -368,7 +387,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "By the default, a tangent RA/Dec projection is used, but one could also use another projection using the `ctype` keyword. We can now look at the header and see it has the appropriate WCS:"
+    "By default, a tangent RA/Dec projection is used, but one could also use another projection using the `ctype` keyword. We can now look at the header and see it has the appropriate WCS:"
    ]
   },
   {
@@ -386,6 +405,49 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
+    "and now the `WCSNAME` has been set to `\"celestial\"`. If you don't want to override the default WCS but to add another one, then you can make the call to `create_sky_wcs` and set `replace_old_wcs=False`:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "prj_fits3.create_sky_wcs(sky_center, sky_scale, ctype=[\"RA---TAN\",\"DEC--TAN\"], replace_old_wcs=False)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We now can see that there are two WCSes in the header, with the celestial WCS keywords having the \"A\" designation:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "prj_fits3[\"temperature\"].header"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Any further WCSes that are added will have \"B\", \"C\", etc."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
     "Finally, we can add header keywords to a single field or for all fields in the FITS image using `update_header`:"
    ]
   },
@@ -415,22 +477,11 @@
   }
  ],
  "metadata": {
+  "anaconda-cloud": {},
   "kernelspec": {
-   "display_name": "Python 3",
+   "display_name": "Python [default]",
    "language": "python",
    "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.5.1"
   }
  },
  "nbformat": 4,

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 doc/source/visualizing/colormaps/index.rst
--- a/doc/source/visualizing/colormaps/index.rst
+++ b/doc/source/visualizing/colormaps/index.rst
@@ -50,6 +50,25 @@
 colorblind/printer/grayscale-friendly plots. For more information, visit
 `http://colorbrewer2.org <http://colorbrewer2.org>`_.
 
+.. _cmocean-cmaps:
+
+Colormaps from cmocean
+~~~~~~~~~~~~~~~~~~~~~~
+
+In addition to ``palettable``, yt will also import colormaps defined in the
+`cmocean <http://matplotlib.org/cmocean>`_ package. These colormaps are
+`perceptually uniform <http://bids.github.io/colormap/>`_ and were originally
+designed for oceanography applications, but can be used for any kind of plots.
+
+Since ``cmocean`` is not installed as a dependency of yt by default, it must be
+installed separately to access the ``cmocean`` colormaps with yt. The easiest
+way to install ``cmocean`` is via ``pip``: ``pip install cmocean``.  To access
+the colormaps in yt, simply specify the name of the ``cmocean`` colormap in any
+context where you would specify a colormap. One caveat is the ``cmocean``
+colormap ``algae``. Since yt already defines a colormap named ``algae``, the
+``cmocean`` version of ``algae`` must be specified with the name
+``algae_cmocean``.
+
 .. _custom-colormaps:
 
 Making and Viewing Custom Colormaps

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -522,6 +522,33 @@
 The same result could have been accomplished by explicitly setting the ``width``
 to ``(.01, 'Mpc')``.
 
+Set image units
+~~~~~~~~~~~~~~~
+
+:meth:`~yt.visualization.plot_window.AxisAlignedSlicePlot.set_axes_unit` allows
+the customization of the units used for the image and colorbar.
+
+.. python-script::
+
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = yt.SlicePlot(ds, 'z', 'density', width=(10,'kpc'))
+   slc.set_unit('density', 'Msun/pc**3')
+   slc.save()
+
+If the unit you would like to convert to needs an equivalency, this can be
+specified via the ``equivalency`` keyword argument of ``set_unit``. For
+example, let's make a plot of the temperature field, but present it using
+an energy unit instead of a temperature unit:
+
+.. python-script::
+
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = yt.SlicePlot(ds, 'z', 'temperature', width=(10,'kpc'))
+   slc.set_unit('temperature', 'keV', equivalency='thermal')
+   slc.save()
+
 Set the plot center
 ~~~~~~~~~~~~~~~~~~~
 

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -13,6 +13,6 @@
 #      unused import errors
 #      autogenerated __config__.py files
 #      vendored libraries
-exclude = doc,benchmarks,*/api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/lru_cache.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
+exclude = doc,benchmarks,*/api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/lru_cache.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py,yt/utilities/fits_image.py
 max-line-length=999
 ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E402,E502,E701,E703,E731,W291,W292,W293,W391,W503
\ No newline at end of file

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 setup.py
--- a/setup.py
+++ b/setup.py
@@ -180,7 +180,7 @@
     "particle_mesh_operations", "depth_first_octree", "fortran_reader",
     "interpolators", "misc_utilities", "basic_octree", "image_utilities",
     "points_in_volume", "quad_tree", "ray_integrators", "mesh_utilities",
-    "amr_kdtools", "lenses", "distance_queue"
+    "amr_kdtools", "lenses", "distance_queue", "allocation_container"
 ]
 for ext_name in lib_exts:
     cython_extensions.append(
@@ -298,6 +298,15 @@
                 fobj.write("hg_version = '%s'\n" % changeset)
         _build_py.run(self)
 
+    def get_outputs(self):
+        # http://bitbucket.org/yt_analysis/yt/issues/1296
+        outputs = _build_py.get_outputs(self)
+        outputs.append(
+            os.path.join(self.build_lib, 'yt', '__hg_version__.py')
+        )
+        return outputs
+
+
 class build_ext(_build_ext):
     # subclass setuptools extension builder to avoid importing cython and numpy
     # at top level in setup.py. See http://stackoverflow.com/a/21621689/1382869

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 setupext.py
--- a/setupext.py
+++ b/setupext.py
@@ -45,7 +45,7 @@
         
         if exit_code != 0:
             print("Compilation of OpenMP test code failed with the error: ")
-            print(err)
+            print(err.decode('utf8'))
             print("Disabling OpenMP support. ")
 
         # Clean up

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 tests/nose_runner.py
--- a/tests/nose_runner.py
+++ b/tests/nose_runner.py
@@ -6,6 +6,8 @@
 from yt.extern.six import StringIO
 from yt.config import ytcfg
 from yt.utilities.answer_testing.framework import AnswerTesting
+import numpy
+numpy.set_printoptions(threshold=5, edgeitems=1, precision=4)
 
 class NoseWorker(multiprocessing.Process):
 
@@ -67,7 +69,7 @@
                       if DROP_TAG not in line])
     tests = yaml.load(data)
 
-    base_argv = ['--local-dir=%s' % answers_dir, '-v',
+    base_argv = ['--local-dir=%s' % answers_dir,
                  '--with-answer-testing', '--answer-big-data', '--local']
     args = []
 

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -1,45 +1,45 @@
 answer_tests:
-  local_artio_000:
+  local_artio_001:
     - yt/frontends/artio/tests/test_outputs.py
 
-  local_athena_001:
+  local_athena_002:
     - yt/frontends/athena
 
-  local_chombo_001:
+  local_chombo_002:
     - yt/frontends/chombo/tests/test_outputs.py
 
-  local_enzo_002:
+  local_enzo_003:
     - yt/frontends/enzo
 
-  local_fits_000:
+  local_fits_001:
     - yt/frontends/fits/tests/test_outputs.py
 
-  local_flash_003:
+  local_flash_004:
     - yt/frontends/flash/tests/test_outputs.py
 
-  local_gadget_000:
+  local_gadget_001:
     - yt/frontends/gadget/tests/test_outputs.py
 
-  local_gamer_001:
+  local_gamer_002:
     - yt/frontends/gamer/tests/test_outputs.py
 
-  local_gdf_000:
+  local_gdf_001:
     - yt/frontends/gdf/tests/test_outputs.py
 
-  local_gizmo_001:
+  local_gizmo_002:
     - yt/frontends/gizmo/tests/test_outputs.py
 
-  local_halos_000:
+  local_halos_001:
     - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py  # [py2]
     - yt/analysis_modules/halo_finding/tests/test_rockstar.py  # [py2]
     - yt/frontends/owls_subfind/tests/test_outputs.py
     - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g5
     - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g42
 
-  local_owls_000:
+  local_owls_001:
     - yt/frontends/owls/tests/test_outputs.py
 
-  local_pw_010:
+  local_pw_012:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
     - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
@@ -47,10 +47,10 @@
     - yt/visualization/tests/test_particle_plot.py:test_particle_projection_filter
     - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers
 
-  local_tipsy_001:
+  local_tipsy_002:
     - yt/frontends/tipsy/tests/test_outputs.py
 
-  local_varia_006:
+  local_varia_007:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py
@@ -59,13 +59,13 @@
     - yt/visualization/volume_rendering/tests/test_mesh_render.py
     - yt/visualization/tests/test_mesh_slices.py:test_tri2
 
-  local_orion_000:
+  local_orion_001:
     - yt/frontends/boxlib/tests/test_orion.py
 
-  local_ramses_000:
+  local_ramses_001:
     - yt/frontends/ramses/tests/test_outputs.py
 
-  local_ytdata_000:
+  local_ytdata_002:
     - yt/frontends/ytdata
 
   local_absorption_spectrum_005:
@@ -81,8 +81,6 @@
 
 other_tests:
   unittests:
-     - '-v'
      - '--exclude=test_mesh_slices'  # disable randomly failing test
   cookbook:
-     - '-v'
      - 'doc/source/cookbook/tests/test_cookbook.py'

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -156,7 +156,9 @@
     ProjectionPlot, OffAxisProjectionPlot, \
     show_colormaps, add_cmap, make_colormap, \
     ProfilePlot, PhasePlot, ParticlePhasePlot, \
-    ParticleProjectionPlot, ParticleImageBuffer, ParticlePlot
+    ParticleProjectionPlot, ParticleImageBuffer, ParticlePlot, \
+    FITSImageData, FITSSlice, FITSProjection, FITSOffAxisSlice, \
+    FITSOffAxisProjection
 
 from yt.visualization.volume_rendering.api import \
     volume_render, create_scene, ColorTransferFunction, TransferFunction, \

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -390,6 +390,9 @@
         # and deposit the lines into the spectrum
         for line in parallel_objects(self.line_list, njobs=njobs):
             column_density = field_data[line['field_name']] * field_data['dl']
+            if (column_density < 0).any():
+                mylog.warn("Setting negative densities for field %s to 0! Bad!" % line['field_name'])
+                np.clip(column_density, 0, np.inf, out=column_density)
             if (column_density == 0).all():
                 mylog.info("Not adding line %s: insufficient column density" % line['label'])
                 continue

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -673,6 +673,25 @@
                 ds["hubble_constant"] = \
                   ds["hubble_constant"].to("100*km/(Mpc*s)").d
         extra_attrs = {"data_type": "yt_light_ray"}
+
+        # save the light ray solution
+        if len(self.light_ray_solution) > 0:
+            # Convert everything to base unit system now to avoid
+            # problems with different units for each ds.
+            for s in self.light_ray_solution:
+                for f in s:
+                    if isinstance(s[f], YTArray):
+                        s[f].convert_to_base()
+            for key in self.light_ray_solution[0]:
+                if key in ["next", "previous"]:
+                    continue
+                lrsa = [sol[key] for sol in self.light_ray_solution]
+                if isinstance(lrsa[-1], YTArray):
+                    to_arr = YTArray
+                else:
+                    to_arr = np.array
+                extra_attrs["light_ray_solution_%s" % key] = to_arr(lrsa)
+
         field_types = dict([(field, "grid") for field in data.keys()])
 
         # Only return LightRay elements with non-zero density

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
@@ -12,7 +12,10 @@
 
 import numpy as np
 
+from yt.convenience import \
+    load
 from yt.testing import \
+    assert_array_equal, \
     requires_file
 from yt.analysis_modules.cosmological_observation.api import LightRay
 import os
@@ -23,6 +26,19 @@
 COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo"
 COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
 
+def compare_light_ray_solutions(lr1, lr2):
+    assert len(lr1.light_ray_solution) == len(lr2.light_ray_solution)
+    if len(lr1.light_ray_solution) == 0:
+        return
+    for s1, s2 in zip(lr1.light_ray_solution, lr2.light_ray_solution):
+        for field in s1:
+            if field in ["next", "previous"]:
+                continue
+            if isinstance(s1[field], np.ndarray):
+                assert_array_equal(s1[field], s2[field])
+            else:
+                assert s1[field] == s2[field]
+
 @requires_file(COSMO_PLUS)
 def test_light_ray_cosmo():
     """
@@ -39,6 +55,9 @@
                       fields=['temperature', 'density', 'H_number_density'],
                       data_filename='lightray.h5')
 
+    ds = load('lightray.h5')
+    compare_light_ray_solutions(lr, ds)
+
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
@@ -62,6 +81,9 @@
                       fields=['temperature', 'density', 'H_number_density'],
                       data_filename='lightray.h5')
 
+    ds = load('lightray.h5')
+    compare_light_ray_solutions(lr, ds)
+
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
@@ -82,6 +104,9 @@
                       fields=['temperature', 'density', 'H_number_density'],
                       data_filename='lightray.h5')
 
+    ds = load('lightray.h5')
+    compare_light_ray_solutions(lr, ds)
+
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
@@ -105,6 +130,9 @@
                       fields=['temperature', 'density', 'H_number_density'],
                       data_filename='lightray.h5')
 
+    ds = load('lightray.h5')
+    compare_light_ray_solutions(lr, ds)
+
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
@@ -130,6 +158,9 @@
                       fields=['temperature', 'density', 'H_number_density'],
                       data_filename='lightray.h5')
 
+    ds = load('lightray.h5')
+    compare_light_ray_solutions(lr, ds)
+
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -99,7 +99,7 @@
         pbar = get_pbar("Constructing trajectory information", len(self.data_series))
         for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
             dd = ds.all_data()
-            newtags = dd[idx_field].ndarray_view().astype("int64")
+            newtags = dd[idx_field].d.astype("int64")
             mask = np.in1d(newtags, indices, assume_unique=True)
             sort = np.argsort(newtags[mask])
             array_indices = np.where(np.in1d(indices, newtags, assume_unique=True))[0]
@@ -197,7 +197,6 @@
 
         Examples
         ________
-        >>> from yt.mods import *
         >>> trajs = ParticleTrajectories(my_fns, indices)
         >>> trajs.add_fields(["particle_mass", "particle_gpot"])
         """
@@ -247,15 +246,15 @@
                 dd = ds.all_data()
                 for field in new_particle_fields:
                     # This is easy... just get the particle fields
-                    pfield[field] = dd[fds[field]].ndarray_view()[mask][sort]
+                    pfield[field] = dd[fds[field]].d[mask][sort]
 
             if grid_fields:
                 # This is hard... must loop over grids
                 for field in grid_fields:
-                    pfield[field] = np.zeros((self.num_indices))
-                x = self["particle_position_x"][:,step].ndarray_view()
-                y = self["particle_position_y"][:,step].ndarray_view()
-                z = self["particle_position_z"][:,step].ndarray_view()
+                    pfield[field] = np.zeros(self.num_indices)
+                x = self["particle_position_x"][:,step].d
+                y = self["particle_position_y"][:,step].d
+                z = self["particle_position_z"][:,step].d
                 particle_grids, particle_grid_inds = ds.index._find_points(x,y,z)
 
                 # This will fail for non-grid index objects
@@ -375,10 +374,10 @@
         >>> trajs.write_out_h5("orbit_trajectories")                
         """
         fid = h5py.File(filename, "w")
-        fields = [field for field in sorted(self.field_data.keys())]
         fid.create_dataset("particle_indices", dtype=np.int64,
                            data=self.indices)
-        fid.create_dataset("particle_time", data=self.times)
+        fid.close()
+        self.times.write_hdf5(filename, dataset_name="particle_times")
+        fields = [field for field in sorted(self.field_data.keys())]
         for field in fields:
-            fid.create_dataset("%s" % field, data=self[field])
-        fid.close()
+            self[field].write_hdf5(filename, dataset_name="%s" % field)

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/analysis_modules/photon_simulator/api.py
--- a/yt/analysis_modules/photon_simulator/api.py
+++ b/yt/analysis_modules/photon_simulator/api.py
@@ -10,6 +10,11 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+from yt.funcs import issue_deprecation_warning
+
+issue_deprecation_warning("The photon_simulator module is deprecated. Please use pyXSIM "
+                          "(http://hea-www.cfa.harvard.edu/~jzuhone/pyxsim) instead.")
+
 from .photon_models import \
      PhotonModel, \
      ThermalPhotonModel

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -31,7 +31,7 @@
 from yt.utilities.physical_constants import clight
 from yt.utilities.cosmology import Cosmology
 from yt.utilities.orientation import Orientation
-from yt.utilities.fits_image import assert_same_wcs
+from yt.visualization.fits_image import assert_same_wcs
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     communication_system, parallel_root_only, get_mpi_type, \
     parallel_capable

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -13,7 +13,7 @@
 import numpy as np
 from yt.utilities.on_demand_imports import _astropy
 from yt.utilities.orientation import Orientation
-from yt.utilities.fits_image import FITSImageData, sanitize_fits_unit
+from yt.visualization.fits_image import FITSImageData, sanitize_fits_unit
 from yt.visualization.volume_rendering.off_axis_projection import off_axis_projection
 from yt.funcs import get_pbar
 from yt.utilities.physical_constants import clight, mh

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -375,7 +375,7 @@
         >>> sky_center = (30., 45., "deg")
         >>> szprj.write_fits("SZbullet.fits", sky_center=sky_center, sky_scale=sky_scale)
         """
-        from yt.utilities.fits_image import FITSImageData
+        from yt.visualization.fits_image import FITSImageData
 
         dx = self.dx.in_units("kpc")
         dy = dx

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -411,9 +411,11 @@
                 path_length_unit = self.ds.field_info[path_element_name].units
                 path_length_unit = Unit(path_length_unit,
                                         registry=self.ds.unit_registry)
-                # Only convert to CGS for path elements that aren't angles
+                # Only convert to appropriate unit system for path
+                # elements that aren't angles
                 if not path_length_unit.is_dimensionless:
-                    path_length_unit = path_length_unit.get_cgs_equivalent()
+                    path_length_unit = path_length_unit.get_base_equivalent(
+                        unit_system=self.ds.unit_system)
             if self.weight_field is None:
                 self._projected_units[field] = field_unit*path_length_unit
             else:

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1040,6 +1040,35 @@
                      [self.field_parameters])
         return (_reconstruct_object, args)
 
+    def clone(self):
+        r"""Clone a data object.
+
+        This will make a duplicate of a data object; note that the
+        `field_parameters` may not necessarily be deeply-copied.  If you modify
+        the field parameters in-place, it may or may not be shared between the
+        objects, depending on the type of object that that particular field
+        parameter is.
+
+        Notes
+        -----
+        One use case for this is to have multiple identical data objects that
+        are being chunked over in different orders.
+
+        Examples
+        --------
+
+        >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+        >>> sp = ds.sphere("c", 0.1)
+        >>> sp_clone = sp.clone()
+        >>> sp["density"]
+        >>> print sp.field_data.keys()
+        [("gas", "density")]
+        >>> print sp_clone.field_data.keys()
+        []
+        """
+        args = self.__reduce__()
+        return args[0](self.ds, *args[1][1:])[1]
+
     def __repr__(self):
         # We'll do this the slow way to be clear what's going on
         s = "%s (%s): " % (self.__class__.__name__, self.ds)
@@ -1189,7 +1218,16 @@
         # This is an iterator that will yield the necessary chunks.
         self.get_data() # Ensure we have built ourselves
         if fields is None: fields = []
-        for chunk in self.index._chunk(self, chunking_style, **kwargs):
+        # chunk_ind can be supplied in the keyword arguments.  If it's a
+        # scalar, that'll be the only chunk that gets returned; if it's a list,
+        # those are the ones that will be.
+        chunk_ind = kwargs.pop("chunk_ind", None)
+        if chunk_ind is not None:
+            chunk_ind = ensure_list(chunk_ind)
+        for ci, chunk in enumerate(self.index._chunk(self, chunking_style,
+                                   **kwargs)):
+            if chunk_ind is not None and ci not in chunk_ind:
+                continue
             with self._chunked_read(chunk):
                 self.get_data(fields)
                 # NOTE: we yield before releasing the context
@@ -1978,6 +2016,9 @@
     return narg
 
 def _get_ds_by_hash(hash):
+    from yt.data_objects.static_output import Dataset
+    if isinstance(hash, Dataset):
+        return hash
     from yt.data_objects.static_output import _cached_datasets
     for ds in _cached_datasets.values():
         if ds._hash() == hash: return ds

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -93,7 +93,7 @@
     def _reshape_vals(self, arr):
         nz = self.nz
         if len(arr.shape) <= 2:
-            n_oct = arr.shape[0] / (nz**3)
+            n_oct = arr.shape[0] // (nz**3)
         else:
             n_oct = max(arr.shape)
         if arr.size == nz*nz*nz*n_oct:

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -276,7 +276,13 @@
 
     def _get_bins(self, mi, ma, n, take_log):
         if take_log:
-            return np.logspace(np.log10(mi), np.log10(ma), n+1)
+            ret = np.logspace(np.log10(mi), np.log10(ma), n+1)
+            # at this point ret[0] and ret[-1] are not exactly equal to
+            # mi and ma due to round-off error. Let's force them to be
+            # mi and ma exactly to avoid incorrectly discarding cells near
+            # the edges. See Issue #1300.
+            ret[0], ret[-1] = mi, ma
+            return ret
         else:
             return np.linspace(mi, ma, n+1)
 
@@ -1010,6 +1016,11 @@
     if extrema is None:
         ex = [data_source.quantities["Extrema"](f, non_zero=l)
               for f, l in zip(bin_fields, logs)]
+        # pad extrema by epsilon so cells at bin edges are not excluded
+        for i, (mi, ma) in enumerate(ex):
+            mi = mi - np.spacing(mi)
+            ma = ma + np.spacing(ma)
+            ex[i][0], ex[i][1] = mi, ma
     else:
         ex = []
         for bin_field in bin_fields:

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -106,8 +106,32 @@
             return self.__getattribute__(attr)
         return fnc
 
+    _field_types = None
+    @property
+    def field_types(self):
+        if self._field_types is None:
+            self._field_types = set(t for t, n in self.ds.field_info)
+        return self._field_types
+
     def __dir__(self):
-        return list(set(t for t, n in self.ds.field_info))
+        return list(self.field_types)
+
+    def __iter__(self):
+        for ft in self.field_types:
+            fnc = FieldNameContainer(self.ds, ft)
+            if len(dir(fnc)) == 0:
+                yield self.__getattribute__(ft)
+            else:
+                yield fnc
+
+    def __contains__(self, obj):
+        ob = None
+        if isinstance(obj, FieldNameContainer):
+            ob = obj.field_type
+        elif isinstance(obj, string_types):
+            ob = obj
+
+        return ob in self.field_types
 
 class FieldNameContainer(object):
     def __init__(self, ds, field_type):
@@ -125,6 +149,26 @@
         return [n for t, n in self.ds.field_info
                 if t == self.field_type]
 
+    def __iter__(self):
+        for t, n in self.ds.field_info:
+            if t == self.field_type:
+                yield self.ds.field_info[t, n]
+
+    def __contains__(self, obj):
+        if isinstance(obj, DerivedField):
+            if self.field_type == obj.name[0] and obj.name in self.ds.field_info:
+                # e.g. from a completely different dataset
+                if self.ds.field_info[obj.name] is not obj:
+                    return False
+                return True
+        elif isinstance(obj, tuple):
+            if self.field_type == obj[0] and obj in self.ds.field_info:
+                return True
+        elif isinstance(obj, string_types):
+            if (self.field_type, obj) in self.ds.field_info:
+                return True
+        return False
+
 class IndexProxy(object):
     # This is a simple proxy for Index objects.  It enables backwards
     # compatibility so that operations like .h.sphere, .h.print_stats and

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/data_objects/tests/test_clone.py
--- /dev/null
+++ b/yt/data_objects/tests/test_clone.py
@@ -0,0 +1,24 @@
+from yt.testing import \
+    fake_random_ds, \
+    assert_equal, \
+    assert_array_equal
+
+def test_clone_sphere():
+    # Now we test that we can get different radial velocities based on field
+    # parameters.
+
+    # Get the first sphere
+    ds = fake_random_ds(16, fields = ("density",
+      "velocity_x", "velocity_y", "velocity_z"))
+    sp0 = ds.sphere(ds.domain_center, 0.25)
+
+    assert_equal(list(sp0.keys()), [])
+
+    sp1 = sp0.clone()
+    sp0["density"]
+    assert_equal(list(sp0.keys()), (("gas","density"),))
+    assert_equal(list(sp1.keys()), [])
+
+    sp1["density"]
+
+    assert_array_equal(sp0["density"], sp1["density"])

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -145,32 +145,53 @@
     unit_system = registry.ds.unit_system
     elements = _get_all_elements(registry.species_names)
     for element in elements:
-        registry.add_field((ftype, "%s_nuclei_density" % element), sampling_type="cell", 
+        registry.add_field((ftype, "%s_nuclei_density" % element),
+                           sampling_type="cell",
                            function = _nuclei_density,
                            particle_type = particle_type,
                            units = unit_system["number_density"])
-    if len(elements) == 0:
-        for element in ["H", "He"]:
-            registry.add_field((ftype, "%s_nuclei_density" % element), sampling_type="cell", 
-                               function = _default_nuclei_density,
-                               particle_type = particle_type,
-                               units = unit_system["number_density"])
+
+    for element in ["H", "He"]:
+        if element in elements:
+            continue
+        registry.add_field((ftype, "%s_nuclei_density" % element),
+                           sampling_type="cell",
+                           function = _default_nuclei_density,
+                           particle_type = particle_type,
+                           units = unit_system["number_density"])
 
 def _default_nuclei_density(field, data):
+    ftype = field.name[0]
     element = field.name[1][:field.name[1].find("_")]
-    return data["gas", "density"] * _primordial_mass_fraction[element] / \
+    return data[ftype, "density"] * _primordial_mass_fraction[element] / \
       ChemicalFormula(element).weight / amu_cgs
         
 def _nuclei_density(field, data):
+    ftype = field.name[0]
     element = field.name[1][:field.name[1].find("_")]
-    field_data = np.zeros_like(data["gas", "%s_number_density" % 
+
+    nuclei_mass_field = "%s_nuclei_mass_density" % element
+    if (ftype, nuclei_mass_field) in data.ds.field_info:
+        return data[(ftype, nuclei_mass_field)] / \
+          ChemicalFormula(element).weight / amu_cgs
+    metal_field = "%s_metallicity" % element
+    if (ftype, metal_field) in data.ds.field_info:
+        return data[ftype, "density"] * data[(ftype, metal_field)] / \
+          ChemicalFormula(element).weight / amu_cgs
+
+    field_data = np.zeros_like(data[ftype, "%s_number_density" %
                                     data.ds.field_info.species_names[0]])
     for species in data.ds.field_info.species_names:
         nucleus = species
         if "_" in species:
             nucleus = species[:species.find("_")]
+        # num is the number of nuclei contributed by this species.
         num = _get_element_multiple(nucleus, element)
-        field_data += num * data["gas", "%s_number_density" % species]
+        # Since this is a loop over all species existing in this dataset,
+        # we will encounter species that contribute nothing, so we skip them.
+        if num == 0:
+            continue
+        field_data += num * data[ftype, "%s_number_density" % species]
     return field_data
 
 def _get_all_elements(species_list):

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/fields/tests/test_field_name_container.py
--- a/yt/fields/tests/test_field_name_container.py
+++ b/yt/fields/tests/test_field_name_container.py
@@ -1,13 +1,28 @@
-from yt.utilities.answer_testing.framework import \
-    requires_ds, \
-    data_dir_load
+from yt import \
+    load
+from yt.testing import \
+    requires_file
+
+def do_field_type(ft):
+    for field_name in dir(ft):
+        f = getattr(ft, field_name)
+        assert ((ft.field_type, field_name) == f.name)
+    for field in ft:
+        f = getattr(ft, field.name[1])
+        assert (f == field)
+        assert (f in ft)
+        assert (f.name in ft)
+        assert (f.name[1] in ft)
+
 
 enzotiny = "enzo_tiny_cosmology/DD0046/DD0046"
- at requires_ds(enzotiny)
-def test_simulated_halo_mass_function():
-    ds = data_dir_load(enzotiny)
+ at requires_file(enzotiny)
+def test_field_name_container():
+    ds = load(enzotiny)
     for field_type in dir(ds.fields):
+        assert (field_type in ds.fields)
         ft = getattr(ds.fields, field_type)
-        for field_name in dir(ft):
-            f = getattr(ft, field_name)
-            assert ((field_type, field_name) == f.name)
+        do_field_type(ft)
+    for field_type in ds.fields:
+        assert (field_type in ds.fields)
+        do_field_type(field_type)

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -7,7 +7,7 @@
     SelectorObject, AlwaysSelector, OctreeSubsetSelector
 from yt.utilities.lib.fp_utils cimport imax
 from yt.geometry.oct_container cimport \
-    SparseOctreeContainer
+    SparseOctreeContainer, OctObjectPool
 from yt.geometry.oct_visitors cimport Oct
 from yt.geometry.particle_deposit cimport \
     ParticleDepositOperation
@@ -923,7 +923,7 @@
         super(ARTIOOctreeContainer, self).__init__(dims, DLE, DRE)
         self.artio_handle = range_handler.artio_handle
         self.level_offset = 1
-        self.domains = NULL
+        self.domains = OctObjectPool()
         self.root_nodes = NULL
 
     @cython.boundscheck(False)
@@ -949,7 +949,7 @@
 
         # We only allow one root oct.
         self.append_domain(oct_count)
-        self.domains[self.num_domains - 1].con_id = sfc
+        self.domains.containers[self.num_domains - 1].con_id = sfc
 
         oct_ind = -1
         ipos = 0
@@ -1009,7 +1009,7 @@
         source_arrays = []
         ipos = -1
         for i in range(self.num_domains):
-            ipos = imax(ipos, self.domains[i].n)
+            ipos = imax(ipos, self.domains.containers[i].n)
         for i in range(nf):
             field_ind[i] = field_indices[i]
             # Note that we subtract one, because we're not using the root mesh.
@@ -1029,13 +1029,13 @@
         #     double-loop to calculate domain_counts
         # The cons should be in order
         cdef np.int64_t sfc_start, sfc_end
-        sfc_start = self.domains[0].con_id
-        sfc_end = self.domains[self.num_domains - 1].con_id
+        sfc_start = self.domains.containers[0].con_id
+        sfc_end = self.domains.containers[self.num_domains - 1].con_id
         status = artio_grid_cache_sfc_range(handle, sfc_start, sfc_end)
         check_artio_status(status)
         cdef np.int64_t offset = 0
         for si in range(self.num_domains):
-            sfc = self.domains[si].con_id
+            sfc = self.domains.containers[si].con_id
             status = artio_grid_read_root_cell_begin( handle, sfc,
                     dpos, NULL, &num_oct_levels, num_octs_per_level)
             check_artio_status(status)

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -17,7 +17,6 @@
 from yt.funcs import mylog
 from yt.utilities.on_demand_imports import _astropy
 from yt.units.yt_array import YTQuantity, YTArray
-from yt.utilities.fits_image import FITSImageData
 if PY3:
     from io import BytesIO as IO
 else:
@@ -109,6 +108,7 @@
     ...                            nan_mask=0.0)
     """
     from spectral_cube import SpectralCube
+    from yt.visualization.fits_image import FITSImageData
     from yt.frontends.fits.api import FITSDataset
     cube = SpectralCube.read(filename)
     if not isinstance(slab_width, YTQuantity):
@@ -171,14 +171,18 @@
     nx = ds.domain_dimensions[ds.lon_axis]
     ny = ds.domain_dimensions[ds.lat_axis]
     mask = filter.mask((ny,nx)).transpose()
+    if ds.events_data:
+        prefix = "event_"
+    else:
+        prefix = ""
     def _reg_field(field, data):
-        i = data["xyz"[ds.lon_axis]].ndarray_view().astype("int")-1
-        j = data["xyz"[ds.lat_axis]].ndarray_view().astype("int")-1
+        i = data[prefix+"xyz"[ds.lon_axis]].d.astype("int")-1
+        j = data[prefix+"xyz"[ds.lat_axis]].d.astype("int")-1
         new_mask = mask[i,j]
-        ret = data["zeros"].copy()
+        ret = np.zeros(data[prefix+"x"].shape)
         ret[new_mask] = 1.
         return ret
-    ds.add_field(("gas",reg_name), sampling_type="cell",  function=_reg_field)
+    ds.add_field(("gas", reg_name), sampling_type="cell",  function=_reg_field)
     if obj is None:
         obj = ds.all_data()
     if field_parameters is not None:

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/frontends/gadget/definitions.py
--- a/yt/frontends/gadget/definitions.py
+++ b/yt/frontends/gadget/definitions.py
@@ -84,3 +84,12 @@
                    ("StellarAge", "Stars")
     ),
 )
+
+gadget_hdf5_ptypes  = (
+    "PartType0",
+    "PartType1",
+    "PartType2",
+    "PartType3",
+    "PartType4",
+    "PartType5"
+)

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/frontends/gadget/fields.py
--- a/yt/frontends/gadget/fields.py
+++ b/yt/frontends/gadget/fields.py
@@ -93,6 +93,7 @@
             sampling_type="particle",
             function=_temperature,
             units=self.ds.unit_system["temperature"])
+        self.alias((ptype, 'temperature'), (ptype, 'Temperature'))
 
         # For now, we hardcode num_neighbors.  We should make this configurable
         # in the future.

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/frontends/gadget/io.py
--- a/yt/frontends/gadget/io.py
+++ b/yt/frontends/gadget/io.py
@@ -19,18 +19,191 @@
 import os
 
 from yt.extern.six import string_types
-from yt.frontends.owls.io import \
-    IOHandlerOWLS
 from yt.utilities.io_handler import \
     BaseIOHandler
 from yt.utilities.lib.geometry_utils import \
     compute_morton
 from yt.utilities.logger import ytLogger as mylog
+from yt.utilities.on_demand_imports import _h5py as h5py
 
-from .data_structures import _get_gadget_format
+from .data_structures import \
+    _get_gadget_format
 
-class IOHandlerGadgetHDF5(IOHandlerOWLS):
+from .definitions import \
+    gadget_hdf5_ptypes
+
+
+class IOHandlerGadgetHDF5(BaseIOHandler):
     _dataset_type = "gadget_hdf5"
+    _vector_fields = ("Coordinates", "Velocity", "Velocities")
+    _known_ptypes = gadget_hdf5_ptypes
+    _var_mass = None
+    _element_names = ('Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen',
+                       'Neon', 'Magnesium', 'Silicon', 'Iron' )
+
+
+    @property
+    def var_mass(self):
+        if self._var_mass is None:
+            vm = []
+            for i, v in enumerate(self.ds["Massarr"]):
+                if v == 0:
+                    vm.append(self._known_ptypes[i])
+            self._var_mass = tuple(vm)
+        return self._var_mass
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        raise NotImplementedError
+
+    def _read_particle_coords(self, chunks, ptf):
+        # This will read chunks and yield the results.
+        chunks = list(chunks)
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files, key=lambda x: x.filename):
+            f = h5py.File(data_file.filename, "r")
+            # This double-reads
+            for ptype, field_list in sorted(ptf.items()):
+                if data_file.total_particles[ptype] == 0:
+                    continue
+                x = f["/%s/Coordinates" % ptype][:,0].astype("float64")
+                y = f["/%s/Coordinates" % ptype][:,1].astype("float64")
+                z = f["/%s/Coordinates" % ptype][:,2].astype("float64")
+                yield ptype, (x, y, z)
+            f.close()
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        # Now we have all the sizes, and we can allocate
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files, key=lambda x: x.filename):
+            f = h5py.File(data_file.filename, "r")
+            for ptype, field_list in sorted(ptf.items()):
+                if data_file.total_particles[ptype] == 0:
+                    continue
+                g = f["/%s" % ptype]
+                coords = g["Coordinates"][:].astype("float64")
+                mask = selector.select_points(
+                            coords[:,0], coords[:,1], coords[:,2], 0.0)
+                del coords
+                if mask is None: continue
+                for field in field_list:
+
+                    if field in ("Mass", "Masses") and \
+                        ptype not in self.var_mass:
+                        data = np.empty(mask.sum(), dtype="float64")
+                        ind = self._known_ptypes.index(ptype)
+                        data[:] = self.ds["Massarr"][ind]
+
+                    elif field in self._element_names:
+                        rfield = 'ElementAbundance/' + field
+                        data = g[rfield][:][mask,...]
+                    elif field.startswith("Metallicity_"):
+                        col = int(field.rsplit("_", 1)[-1])
+                        data = g["Metallicity"][:,col][mask]
+                    elif field.startswith("Chemistry_"):
+                        col = int(field.rsplit("_", 1)[-1])
+                        data = g["ChemistryAbundances"][:,col][mask]
+                    else:
+                        data = g[field][:][mask,...]
+
+                    yield (ptype, field), data
+            f.close()
+
+    def _initialize_index(self, data_file, regions):
+        index_ptype = self.index_ptype
+        f = h5py.File(data_file.filename, "r")
+        if index_ptype == "all":
+            pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
+            keys = f.keys()
+        else:
+            pt = int(index_ptype[-1])
+            pcount = f["/Header"].attrs["NumPart_ThisFile"][pt]
+            keys = [index_ptype]
+        morton = np.empty(pcount, dtype='uint64')
+        ind = 0
+        for key in keys:
+            if not key.startswith("PartType"): continue
+            if "Coordinates" not in f[key]: continue
+            ds = f[key]["Coordinates"]
+            dt = ds.dtype.newbyteorder("N") # Native
+            pos = np.empty(ds.shape, dtype=dt)
+            pos[:] = ds
+            regions.add_data_file(pos, data_file.file_id,
+                                  data_file.ds.filter_bbox)
+            morton[ind:ind+pos.shape[0]] = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.ds.domain_left_edge,
+                data_file.ds.domain_right_edge,
+                data_file.ds.filter_bbox)
+            ind += pos.shape[0]
+        f.close()
+        return morton
+
+    def _count_particles(self, data_file):
+        f = h5py.File(data_file.filename, "r")
+        pcount = f["/Header"].attrs["NumPart_ThisFile"][:]
+        f.close()
+        npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount))
+        return npart
+
+
+    def _identify_fields(self, data_file):
+        f = h5py.File(data_file.filename, "r")
+        fields = []
+        cname = self.ds._particle_coordinates_name  # Coordinates
+        mname = self.ds._particle_mass_name  # Mass
+
+        # loop over all keys in OWLS hdf5 file
+        #--------------------------------------------------
+        for key in f.keys():
+
+            # only want particle data
+            #--------------------------------------
+            if not key.startswith("PartType"):
+                continue
+
+            # particle data group
+            #--------------------------------------
+            g = f[key]
+            if cname not in g:
+                continue
+
+            # note str => not unicode!
+            ptype = str(key)
+            if ptype not in self.var_mass:
+                fields.append((ptype, mname))
+
+            # loop over all keys in PartTypeX group
+            #----------------------------------------
+            for k in g.keys():
+
+                if k == 'ElementAbundance':
+                    gp = g[k]
+                    for j in gp.keys():
+                        kk = j
+                        fields.append((ptype, str(kk)))
+                elif k == 'Metallicity' and len(g[k].shape) > 1:
+                    # Vector of metallicity
+                    for i in range(g[k].shape[1]):
+                        fields.append((ptype, "Metallicity_%02i" % i))
+                elif k == "ChemistryAbundances" and len(g[k].shape) > 1:
+                    for i in range(g[k].shape[1]):
+                        fields.append((ptype, "Chemistry_%03i" % i))
+                else:
+                    kk = k
+                    if not hasattr(g[kk], "shape"):
+                        continue
+                    if len(g[kk].shape) > 1:
+                        self._vector_fields[kk] = g[kk].shape[1]
+                    fields.append((ptype, str(kk)))
+
+        f.close()
+        return fields, {}
 
 ZeroMass = object()
 
@@ -139,7 +312,7 @@
         arr = np.fromfile(f, dtype=dt, count = count)
         if name in self._vector_fields:
             factor = self._vector_fields[name]
-            arr = arr.reshape((count/factor, factor), order="C")
+            arr = arr.reshape((count//factor, factor), order="C")
         return arr.astype("float64")
 
     def _initialize_index(self, data_file, regions):

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/frontends/gizmo/fields.py
--- a/yt/frontends/gizmo/fields.py
+++ b/yt/frontends/gizmo/fields.py
@@ -14,15 +14,21 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+from yt.fields.field_info_container import \
+    FieldInfoContainer
 from yt.fields.particle_fields import \
     add_volume_weighted_smoothed_field
 from yt.fields.species_fields import \
-    add_species_field_by_density
+    add_species_field_by_density, \
+    setup_species_fields
 from yt.frontends.gadget.fields import \
     GadgetFieldInfo
 from yt.frontends.sph.fields import \
     SPHFieldInfo
 
+metal_elements = ["He", "C", "N", "O", "Ne",
+                  "Mg", "Si", "S", "Ca", "Fe"]
+
 class GizmoFieldInfo(GadgetFieldInfo):
     known_particle_fields = (
         ("Mass", ("code_mass", ["particle_mass"], None)),
@@ -58,12 +64,17 @@
     def __init__(self, *args, **kwargs):
         super(SPHFieldInfo, self).__init__(*args, **kwargs)
         if ("PartType0", "Metallicity_00") in self.field_list:
-            self.nuclei_names = ["He", "C", "N", "O", "Ne", "Mg", "Si", "S",
-                                 "Ca", "Fe"]
+            self.nuclei_names = metal_elements
+            self.species_names = ["H", "H_p1"] + metal_elements
+
+    def setup_particle_fields(self, ptype):
+        FieldInfoContainer.setup_particle_fields(self, ptype)
+        if ptype in ("PartType0",):
+            self.setup_gas_particle_fields(ptype)
+            setup_species_fields(self, ptype)
 
     def setup_gas_particle_fields(self, ptype):
         super(GizmoFieldInfo, self).setup_gas_particle_fields(ptype)
-        self.alias((ptype, "temperature"), (ptype, "Temperature"))
 
         def _h_density(field, data):
             x_H = 1.0 - data[(ptype, "He_metallicity")] - \

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/frontends/owls/definitions.py
--- a/yt/frontends/owls/definitions.py
+++ b/yt/frontends/owls/definitions.py
@@ -13,6 +13,3 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
-
-ghdf5_ptypes  = ("PartType0", "PartType1", "PartType2", "PartType3",
-                 "PartType4", "PartType5")

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/frontends/owls/io.py
--- a/yt/frontends/owls/io.py
+++ b/yt/frontends/owls/io.py
@@ -15,198 +15,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.utilities.on_demand_imports import _h5py as h5py
-import numpy as np
-import os
-
-from yt.utilities.io_handler import \
-    BaseIOHandler
-from yt.utilities.lib.geometry_utils import \
-    compute_morton
-
-from .definitions import \
-    ghdf5_ptypes
-
-CHUNKSIZE = 10000000
-
-def _get_h5_handle(fn):
-    try:
-        f = h5py.File(fn, "r")
-    except IOError:
-        print("ERROR OPENING %s" % (fn))
-        if os.path.exists(fn):
-            print("FILENAME EXISTS")
-        else:
-            print("FILENAME DOES NOT EXIST")
-        raise
-    return f
-
-class IOHandlerOWLS(BaseIOHandler):
-    _dataset_type = "OWLS"
-    _vector_fields = ("Coordinates", "Velocity", "Velocities")
-    _known_ptypes = ghdf5_ptypes
-    _var_mass = None
-    _element_names = ('Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen',
-                       'Neon', 'Magnesium', 'Silicon', 'Iron' )
-
-
-    @property
-    def var_mass(self):
-        if self._var_mass is None:
-            vm = []
-            for i, v in enumerate(self.ds["Massarr"]):
-                if v == 0:
-                    vm.append(self._known_ptypes[i])
-            self._var_mass = tuple(vm)
-        return self._var_mass
-
-    def _read_fluid_selection(self, chunks, selector, fields, size):
-        raise NotImplementedError
-
-    def _read_particle_coords(self, chunks, ptf):
-        # This will read chunks and yield the results.
-        chunks = list(chunks)
-        data_files = set([])
-        for chunk in chunks:
-            for obj in chunk.objs:
-                data_files.update(obj.data_files)
-        for data_file in sorted(data_files, key=lambda x: x.filename):
-            f = _get_h5_handle(data_file.filename)
-            # This double-reads
-            for ptype, field_list in sorted(ptf.items()):
-                if data_file.total_particles[ptype] == 0:
-                    continue
-                x = f["/%s/Coordinates" % ptype][:,0].astype("float64")
-                y = f["/%s/Coordinates" % ptype][:,1].astype("float64")
-                z = f["/%s/Coordinates" % ptype][:,2].astype("float64")
-                yield ptype, (x, y, z)
-            f.close()
-
-    def _read_particle_fields(self, chunks, ptf, selector):
-        # Now we have all the sizes, and we can allocate
-        data_files = set([])
-        for chunk in chunks:
-            for obj in chunk.objs:
-                data_files.update(obj.data_files)
-        for data_file in sorted(data_files, key=lambda x: x.filename):
-            f = _get_h5_handle(data_file.filename)
-            for ptype, field_list in sorted(ptf.items()):
-                if data_file.total_particles[ptype] == 0:
-                    continue
-                g = f["/%s" % ptype]
-                coords = g["Coordinates"][:].astype("float64")
-                mask = selector.select_points(
-                            coords[:,0], coords[:,1], coords[:,2], 0.0)
-                del coords
-                if mask is None: continue
-                for field in field_list:
-
-                    if field in ("Mass", "Masses") and \
-                        ptype not in self.var_mass:
-                        data = np.empty(mask.sum(), dtype="float64")
-                        ind = self._known_ptypes.index(ptype)
-                        data[:] = self.ds["Massarr"][ind]
+from yt.frontends.gadget.io import \
+    IOHandlerGadgetHDF5
 
-                    elif field in self._element_names:
-                        rfield = 'ElementAbundance/' + field
-                        data = g[rfield][:][mask,...]
-                    elif field.startswith("Metallicity_"):
-                        col = int(field.rsplit("_", 1)[-1])
-                        data = g["Metallicity"][:,col][mask]
-                    elif field.startswith("Chemistry_"):
-                        col = int(field.rsplit("_", 1)[-1])
-                        data = g["ChemistryAbundances"][:,col][mask]
-                    else:
-                        data = g[field][:][mask,...]
-
-                    yield (ptype, field), data
-            f.close()
-
-    def _initialize_index(self, data_file, regions):
-        index_ptype = self.index_ptype
-        f = _get_h5_handle(data_file.filename)
-        if index_ptype == "all":
-            pcount = f["/Header"].attrs["NumPart_ThisFile"][:].sum()
-            keys = f.keys()
-        else:
-            pt = int(index_ptype[-1])
-            pcount = f["/Header"].attrs["NumPart_ThisFile"][pt]
-            keys = [index_ptype]
-        morton = np.empty(pcount, dtype='uint64')
-        ind = 0
-        for key in keys:
-            if not key.startswith("PartType"): continue
-            if "Coordinates" not in f[key]: continue
-            ds = f[key]["Coordinates"]
-            dt = ds.dtype.newbyteorder("N") # Native
-            pos = np.empty(ds.shape, dtype=dt)
-            pos[:] = ds
-            regions.add_data_file(pos, data_file.file_id,
-                                  data_file.ds.filter_bbox)
-            morton[ind:ind+pos.shape[0]] = compute_morton(
-                pos[:,0], pos[:,1], pos[:,2],
-                data_file.ds.domain_left_edge,
-                data_file.ds.domain_right_edge,
-                data_file.ds.filter_bbox)
-            ind += pos.shape[0]
-        f.close()
-        return morton
-
-    def _count_particles(self, data_file):
-        f = _get_h5_handle(data_file.filename)
-        pcount = f["/Header"].attrs["NumPart_ThisFile"][:]
-        f.close()
-        npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount))
-        return npart
-
-
-    def _identify_fields(self, data_file):
-        f = _get_h5_handle(data_file.filename)
-        fields = []
-        cname = self.ds._particle_coordinates_name  # Coordinates
-        mname = self.ds._particle_mass_name  # Mass
-
-        # loop over all keys in OWLS hdf5 file
-        #--------------------------------------------------
-        for key in f.keys():
-
-            # only want particle data
-            #--------------------------------------
-            if not key.startswith("PartType"): continue
-
-            # particle data group
-            #--------------------------------------
-            g = f[key]
-            if cname not in g: continue
-
-            # note str => not unicode!
-
-            #ptype = int(key[8:])
-            ptype = str(key)
-            if ptype not in self.var_mass:
-                fields.append((ptype, mname))
-
-            # loop over all keys in PartTypeX group
-            #----------------------------------------
-            for k in g.keys():
-
-                if k == 'ElementAbundance':
-                    gp = g[k]
-                    for j in gp.keys():
-                        kk = j
-                        fields.append((ptype, str(kk)))
-                elif k == 'Metallicity' and len(g[k].shape) > 1:
-                    # Vector of metallicity
-                    for i in range(g[k].shape[1]):
-                        fields.append((ptype, "Metallicity_%02i" % i))
-                elif k == "ChemistryAbundances" and len(g[k].shape)>1:
-                    for i in range(g[k].shape[1]):
-                        fields.append((ptype, "Chemistry_%03i" % i))
-                else:
-                    kk = k
-                    if not hasattr(g[kk], "shape"): continue
-                    fields.append((ptype, str(kk)))
-
-
-        f.close()
-        return fields, {}
+class IOHandlerOWLS(IOHandlerGadgetHDF5):
+    _dataset_type = "OWLS"

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -654,7 +654,7 @@
 
             age = self.parameters['time']
             iage = 1 + int(10.*age/self.dtau)
-            iage = np.min([iage,self.n_frw/2 + (iage - self.n_frw/2)/10])
+            iage = np.min([iage,self.n_frw//2 + (iage - self.n_frw//2)//10])
 
             self.time_simu = self.t_frw[iage  ]*(age-self.tau_frw[iage-1])/(self.tau_frw[iage]-self.tau_frw[iage-1])+ \
                              self.t_frw[iage-1]*(age-self.tau_frw[iage  ])/(self.tau_frw[iage-1]-self.tau_frw[iage])

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -165,7 +165,7 @@
                             aux_fh[afield].seek(0, os.SEEK_SET)
                             sh = aux_fields_offsets[afield][ptype][0] + total
                             sf = aux_fields_offsets[afield][ptype][1] + \
-                                tp[ptype] - count
+                                tp[ptype] - count - total
                             if tp[ptype] > 0:
                                 aux = np.genfromtxt(
                                     aux_fh[afield], skip_header=sh,

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -218,13 +218,58 @@
             cont_type = parse_h5_attr(f, "container_type")
             if data_type is None:
                 return False
-            if data_type in ["yt_light_ray"]:
-                return True
             if data_type == "yt_data_container" and \
                 cont_type not in _grid_data_containers:
                 return True
         return False
 
+class YTDataLightRayDataset(YTDataContainerDataset):
+    """Dataset for saved LightRay objects."""
+
+    def _parse_parameter_file(self):
+        super(YTDataLightRayDataset, self)._parse_parameter_file()
+        self._restore_light_ray_solution()
+
+    def _restore_light_ray_solution(self):
+        """
+        Restore all information asssociate with the light ray solution
+        to its original form.
+        """
+        key = "light_ray_solution"
+        self.light_ray_solution = []
+        lrs_fields = [par for par in self.parameters \
+                      if key in par and not par.endswith("_units")]
+        if len(lrs_fields) == 0:
+            return
+        self.light_ray_solution = \
+          [{} for val in self.parameters[lrs_fields[0]]]
+        for sp3 in ["unique_identifier", "filename"]:
+            ksp3 = "%s_%s" % (key, sp3)
+            if ksp3 not in lrs_fields:
+                continue
+            self.parameters[ksp3] = self.parameters[ksp3].astype(str)
+        for field in lrs_fields:
+            field_name = field[len(key)+1:]
+            for i in range(self.parameters[field].shape[0]):
+                self.light_ray_solution[i][field_name] = self.parameters[field][i]
+                if "%s_units" % field in self.parameters:
+                    if len(self.parameters[field].shape) > 1:
+                        to_val = self.arr
+                    else:
+                        to_val = self.quan
+                    self.light_ray_solution[i][field_name] = \
+                      to_val(self.light_ray_solution[i][field_name],
+                             self.parameters["%s_units" % field])
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if not args[0].endswith(".h5"): return False
+        with h5py.File(args[0], "r") as f:
+            data_type = parse_h5_attr(f, "data_type")
+            if data_type in ["yt_light_ray"]:
+                return True
+        return False
+
 class YTSpatialPlotDataset(YTDataContainerDataset):
     """Dataset for saved slices and projections."""
     _field_info_class = YTGridFieldInfo

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -232,5 +232,5 @@
     if iterable(val):
         val = np.array(val)
         if val.dtype.kind == 'U':
-            val = val.astype('|S40')
+            val = val.astype('|S')
     fh.attrs[str(attr)] = val

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -519,33 +519,34 @@
         print("Try: pip install python-hglib")
         return -1
     f = open(os.path.join(path, "yt_updater.log"), "a")
-    repo = hglib.open(path)
-    repo.pull()
-    ident = repo.identify().decode("utf-8")
-    if "+" in ident:
-        print("Can't rebuild modules by myself.")
-        print("You will have to do this yourself.  Here's a sample commands:")
-        print("")
-        print("    $ cd %s" % (path))
-        print("    $ hg up")
-        print("    $ %s setup.py develop" % (sys.executable))
-        return 1
-    print("Updating the repository")
-    f.write("Updating the repository\n\n")
-    repo.update(check=True)
-    f.write("Updated from %s to %s\n\n" % (ident, repo.identify()))
-    if skip_rebuild: return
-    f.write("Rebuilding modules\n\n")
-    p = subprocess.Popen([sys.executable, "setup.py", "build_ext", "-i"], cwd=path,
-                        stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
-    stdout, stderr = p.communicate()
-    f.write(stdout.decode('utf-8'))
-    f.write("\n\n")
-    if p.returncode:
-        print("BROKEN: See %s" % (os.path.join(path, "yt_updater.log")))
-        sys.exit(1)
-    f.write("Successful!\n")
-    print("Updated successfully.")
+    with hglib.open(path) as repo:
+        repo.pull()
+        ident = repo.identify().decode("utf-8")
+        if "+" in ident:
+            print("Can't rebuild modules by myself.")
+            print("You will have to do this yourself.  Here's a sample commands:")
+            print("")
+            print("    $ cd %s" % (path))
+            print("    $ hg up")
+            print("    $ %s setup.py develop" % (sys.executable))
+            return 1
+        print("Updating the repository")
+        f.write("Updating the repository\n\n")
+        repo.update(check=True)
+        f.write("Updated from %s to %s\n\n" % (ident, repo.identify()))
+        if skip_rebuild: return
+        f.write("Rebuilding modules\n\n")
+        p = subprocess.Popen([sys.executable, "setup.py", "build_ext", "-i"],
+                             cwd=path, stdout = subprocess.PIPE,
+                             stderr = subprocess.STDOUT)
+        stdout, stderr = p.communicate()
+        f.write(stdout.decode('utf-8'))
+        f.write("\n\n")
+        if p.returncode:
+            print("BROKEN: See %s" % (os.path.join(path, "yt_updater.log")))
+            sys.exit(1)
+        f.write("Successful!\n")
+        print("Updated successfully.")
 
 def get_hg_version(path):
     try:
@@ -556,8 +557,8 @@
         print("Try: pip install python-hglib")
         return None
     try:
-        repo = hglib.open(path)
-        return repo.identify()
+        with hglib.open(path) as repo:
+            return repo.identify()
     except hglib.error.ServerError:
         # path is not an hg repository
         return None
@@ -1035,3 +1036,7 @@
         return val.decode('utf8')
     else:
         return val
+
+def issue_deprecation_warning(msg):
+    from numpy import VisibleDeprecationWarning
+    warnings.warn(msg, VisibleDeprecationWarning, stacklevel=3)

diff -r 9719806bb4279736c4e39721324f5ca58b5f898c -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 yt/geometry/coordinates/geographic_coordinates.py
--- a/yt/geometry/coordinates/geographic_coordinates.py
+++ b/yt/geometry/coordinates/geographic_coordinates.py
@@ -309,8 +309,8 @@
               axis, width, depth)
         elif name == self.radial_axis:
             rax = self.radial_axis
-            width = [self.ds.domain_width[self.y_axis[rax]],
-                     self.ds.domain_width[self.x_axis[rax]]]
+            width = [self.ds.domain_width[self.x_axis[rax]],
+                     self.ds.domain_width[self.y_axis[rax]]]
         elif name == 'latitude':
             ri = self.axis_id[self.radial_axis]
             # Remember, in spherical coordinates when we cut in theta,

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/078774898c0b/
Changeset:   078774898c0b
Branch:      yt
User:        ngoldbaum
Date:        2016-12-08 23:55:02+00:00
Summary:     fix issue in docs
Affected #:  1 file

diff -r e902d814f979c5a07acb5bbf6dda622c2f2929d9 -r 078774898c0b14002775f250e6c4dc7930d3b7b0 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -679,9 +679,11 @@
 
    import yt
    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-   slc = yt.SlicePlot(ds, 'z', 'x-velocity', width=(30,'kpc'))
+   slc = yt.SlicePlot(ds, 'z', 'density', width=(1.5, 'Mpc'))
    slc.set_background_color('density')
-   slc.save()
+   slc.save('bottom_colormap_background')
+   slc.set_background_color('density', color='black')
+   slc.save('black_background')
 
 Lastly, the :meth:`~yt.visualization.plot_window.AxisAlignedSlicePlot.set_zlim`
 function makes it possible to set a custom colormap range.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list