[yt-svn] commit/yt: 9 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Feb 9 06:49:48 PST 2017


9 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/c6882a33750a/
Changeset:   c6882a33750a
Branch:      yt
User:        jzuhone
Date:        2016-12-05 17:46:09+00:00
Summary:     Move particle trajectories to data_objects
Affected #:  6 files

diff -r 2668f56ec734f8600cfd4c72b06cc035bdd42d3a -r c6882a33750aa88143e300869590f616c592f24b doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
--- a/doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
+++ b/doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
@@ -4,7 +4,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The `particle_trajectories` analysis module enables the construction of particle trajectories from a time series of datasets for a specified list of particles identified by their unique indices. "
+    "One can create particle trajectories from a `DatasetSeries` object for a specified list of particles identified by their unique indices using the `particle_trajectories` method. "
    ]
   },
   {
@@ -18,10 +18,10 @@
     "%matplotlib inline\n",
     "import yt\n",
     "import glob\n",
-    "from yt.analysis_modules.particle_trajectories.api import ParticleTrajectories\n",
     "from yt.config import ytcfg\n",
     "path = ytcfg.get(\"yt\", \"test_data_dir\")\n",
-    "import matplotlib.pyplot as plt"
+    "import matplotlib.pyplot as plt\n",
+    "from mpl_toolkits.mplot3d import Axes3D"
    ]
   },
   {
@@ -86,7 +86,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "which is what we expected them to be. Now we're ready to create a `ParticleTrajectories` object:"
+    "which is what we expected them to be. Now we're ready to create a `DatasetSeries` object and use it to create particle trajectories: "
    ]
   },
   {
@@ -97,7 +97,9 @@
    },
    "outputs": [],
    "source": [
-    "trajs = ParticleTrajectories(my_fns, indices, fields=fields)"
+    "ts = yt.DatasetSeries(my_fns)\n",
+    "# suppress_logging=True cuts down on a lot of noise\n",
+    "trajs = ts.particle_trajectories(indices, fields=fields, suppress_logging=True)"
    ]
   },
   {
@@ -134,8 +136,9 @@
    },
    "outputs": [],
    "source": [
-    "plt.plot(trajs[\"particle_position_x\"][0].ndarray_view(), trajs[\"particle_position_y\"][0].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_position_x\"][1].ndarray_view(), trajs[\"particle_position_y\"][1].ndarray_view())"
+    "plt.figure(figsize=(6, 6))\n",
+    "plt.plot(trajs[\"particle_position_x\"][0], trajs[\"particle_position_y\"][0])\n",
+    "plt.plot(trajs[\"particle_position_x\"][1], trajs[\"particle_position_y\"][1])"
    ]
   },
   {
@@ -153,8 +156,9 @@
    },
    "outputs": [],
    "source": [
-    "plt.plot(trajs[\"particle_velocity_x\"][0].ndarray_view(), trajs[\"particle_velocity_y\"][0].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_velocity_x\"][1].ndarray_view(), trajs[\"particle_velocity_y\"][1].ndarray_view())"
+    "plt.figure(figsize=(6, 6))\n",
+    "plt.plot(trajs[\"particle_velocity_x\"][0], trajs[\"particle_velocity_y\"][0])\n",
+    "plt.plot(trajs[\"particle_velocity_x\"][1], trajs[\"particle_velocity_y\"][1])"
    ]
   },
   {
@@ -172,8 +176,9 @@
    },
    "outputs": [],
    "source": [
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_velocity_x\"][1].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_velocity_y\"][1].ndarray_view())"
+    "plt.figure(figsize=(6, 6))\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_velocity_x\"][1])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_velocity_y\"][1])"
    ]
   },
   {
@@ -192,8 +197,9 @@
    "outputs": [],
    "source": [
     "particle1 = trajs.trajectory_from_index(1)\n",
-    "plt.plot(particle1[\"particle_time\"].ndarray_view(), particle1[\"particle_position_x\"].ndarray_view())\n",
-    "plt.plot(particle1[\"particle_time\"].ndarray_view(), particle1[\"particle_position_y\"].ndarray_view())"
+    "plt.figure(figsize=(6, 6))\n",
+    "plt.plot(particle1[\"particle_time\"], particle1[\"particle_position_x\"])\n",
+    "plt.plot(particle1[\"particle_time\"], particle1[\"particle_position_y\"])"
    ]
   },
   {
@@ -252,7 +258,8 @@
    "source": [
     "my_fns = glob.glob(path+\"/enzo_tiny_cosmology/DD*/*.hierarchy\")\n",
     "my_fns.sort()\n",
-    "trajs = ParticleTrajectories(my_fns, indices)"
+    "ts = yt.DatasetSeries(my_fns)\n",
+    "trajs = ts.particle_trajectories(indices, fields=fields, suppress_logging=True)"
    ]
   },
   {
@@ -270,16 +277,11 @@
    },
    "outputs": [],
    "source": [
-    "import matplotlib.pyplot as plt\n",
-    "from mpl_toolkits.mplot3d import Axes3D\n",
     "fig = plt.figure(figsize=(8.0, 8.0))\n",
     "ax = fig.add_subplot(111, projection='3d')\n",
-    "ax.plot(trajs[\"particle_position_x\"][100].ndarray_view(), trajs[\"particle_position_z\"][100].ndarray_view(), \n",
-    "        trajs[\"particle_position_z\"][100].ndarray_view())\n",
-    "ax.plot(trajs[\"particle_position_x\"][8].ndarray_view(), trajs[\"particle_position_z\"][8].ndarray_view(), \n",
-    "        trajs[\"particle_position_z\"][8].ndarray_view())\n",
-    "ax.plot(trajs[\"particle_position_x\"][25].ndarray_view(), trajs[\"particle_position_z\"][25].ndarray_view(), \n",
-    "        trajs[\"particle_position_z\"][25].ndarray_view())"
+    "ax.plot(trajs[\"particle_position_x\"][100], trajs[\"particle_position_z\"][100], trajs[\"particle_position_z\"][100])\n",
+    "ax.plot(trajs[\"particle_position_x\"][8], trajs[\"particle_position_z\"][8], trajs[\"particle_position_z\"][8])\n",
+    "ax.plot(trajs[\"particle_position_x\"][25], trajs[\"particle_position_z\"][25], trajs[\"particle_position_z\"][25])"
    ]
   },
   {
@@ -297,9 +299,10 @@
    },
    "outputs": [],
    "source": [
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_position_x\"][100].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_position_x\"][8].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_position_x\"][25].ndarray_view())"
+    "plt.figure(figsize=(6,6))\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][100])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][8])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][25])"
    ]
   },
   {
@@ -335,9 +338,10 @@
    },
    "outputs": [],
    "source": [
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"density\"][100].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"density\"][8].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"density\"][25].ndarray_view())\n",
+    "plt.figure(figsize=(6,6))\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"density\"][100])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"density\"][8])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"density\"][25])\n",
     "plt.yscale(\"log\")"
    ]
   },
@@ -362,8 +366,9 @@
   }
  ],
  "metadata": {
+  "anaconda-cloud": {},
   "kernelspec": {
-   "display_name": "Python 3",
+   "display_name": "Python [default]",
    "language": "python",
    "name": "python3"
   },
@@ -377,7 +382,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.5.1"
+   "version": "3.5.2"
   }
  },
  "nbformat": 4,

diff -r 2668f56ec734f8600cfd4c72b06cc035bdd42d3a -r c6882a33750aa88143e300869590f616c592f24b yt/analysis_modules/particle_trajectories/api.py
--- a/yt/analysis_modules/particle_trajectories/api.py
+++ /dev/null
@@ -1,13 +0,0 @@
-"""
-API for particle_trajectories
-"""
-from __future__ import absolute_import
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .particle_trajectories import ParticleTrajectories

diff -r 2668f56ec734f8600cfd4c72b06cc035bdd42d3a -r c6882a33750aa88143e300869590f616c592f24b yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ /dev/null
@@ -1,383 +0,0 @@
-"""
-Particle trajectories
-"""
-from __future__ import print_function
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.data_objects.field_data import YTFieldData
-from yt.data_objects.time_series import DatasetSeries
-from yt.utilities.lib.particle_mesh_operations import CICSample_3
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    parallel_root_only
-from yt.funcs import mylog, get_pbar
-from yt.units.yt_array import array_like_field
-from yt.config import ytcfg
-from collections import OrderedDict
-
-import numpy as np
-from yt.utilities.on_demand_imports import _h5py as h5py
-
-class ParticleTrajectories(object):
-    r"""A collection of particle trajectories in time over a series of
-    datasets. 
-
-    The ParticleTrajectories object contains a collection of
-    particle trajectories for a specified set of particle indices. 
-    
-    Parameters
-    ----------
-    outputs : `yt.data_objects.time_series.DatasetSeries` or list of strings
-        DatasetSeries object, or a time-sorted list of filenames to
-        construct a new DatasetSeries object.
-    indices : array_like
-        An integer array of particle indices whose trajectories we
-        want to track. If they are not sorted they will be sorted.
-    fields : list of strings, optional
-        A set of fields that is retrieved when the trajectory
-        collection is instantiated.
-        Default : None (will default to the fields 'particle_position_x',
-        'particle_position_y', 'particle_position_z')
-    suppress_logging : boolean
-        Suppress yt's logging when iterating over the simulation time
-        series.
-        Default : False
-
-    Examples
-    ________
-    >>> from yt.mods import *
-    >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
-    >>> my_fns.sort()
-    >>> fields = ["particle_position_x", "particle_position_y",
-    >>>           "particle_position_z", "particle_velocity_x",
-    >>>           "particle_velocity_y", "particle_velocity_z"]
-    >>> ds = load(my_fns[0])
-    >>> init_sphere = ds.sphere(ds.domain_center, (.5, "unitary"))
-    >>> indices = init_sphere["particle_index"].astype("int")
-    >>> trajs = ParticleTrajectories(my_fns, indices, fields=fields)
-    >>> for t in trajs :
-    >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
-    """
-    def __init__(self, outputs, indices, fields=None, suppress_logging=False):
-
-        indices.sort() # Just in case the caller wasn't careful
-        self.field_data = YTFieldData()
-        if isinstance(outputs, DatasetSeries):
-            self.data_series = outputs
-        else:
-            self.data_series = DatasetSeries(outputs)
-        self.masks = []
-        self.sorts = []
-        self.array_indices = []
-        self.indices = indices
-        self.num_indices = len(indices)
-        self.num_steps = len(outputs)
-        self.times = []
-        self.suppress_logging = suppress_logging
-
-        if fields is None: fields = []
-        fields = list(OrderedDict.fromkeys(fields))
-
-        if self.suppress_logging:
-            old_level = int(ytcfg.get("yt","loglevel"))
-            mylog.setLevel(40)
-        
-        fds = {}
-        ds_first = self.data_series[0]
-        dd_first = ds_first.all_data()
-        idx_field = dd_first._determine_fields("particle_index")[0]
-        for field in ("particle_position_%s" % ax for ax in "xyz"):
-            fds[field] = dd_first._determine_fields(field)[0]
-
-        my_storage = {}
-        pbar = get_pbar("Constructing trajectory information", len(self.data_series))
-        for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
-            dd = ds.all_data()
-            newtags = dd[idx_field].d.astype("int64")
-            mask = np.in1d(newtags, indices, assume_unique=True)
-            sort = np.argsort(newtags[mask])
-            array_indices = np.where(np.in1d(indices, newtags, assume_unique=True))[0]
-            self.array_indices.append(array_indices)
-            self.masks.append(mask)
-            self.sorts.append(sort)
-
-            pfields = {}
-            for field in ("particle_position_%s" % ax for ax in "xyz"):
-                pfields[field] = dd[fds[field]].ndarray_view()[mask][sort]
-
-            sto.result_id = ds.parameter_filename
-            sto.result = (ds.current_time, array_indices, pfields)
-            pbar.update(i)
-        pbar.finish()
-
-        if self.suppress_logging:
-            mylog.setLevel(old_level)
-
-        times = []
-        for fn, (time, indices, pfields) in sorted(my_storage.items()):
-            times.append(time)
-        self.times = self.data_series[0].arr([time for time in times], times[0].units)
-
-        self.particle_fields = []
-        output_field = np.empty((self.num_indices, self.num_steps))
-        output_field.fill(np.nan)
-        for field in ("particle_position_%s" % ax for ax in "xyz"):
-            for i, (fn, (time, indices, pfields)) in enumerate(sorted(my_storage.items())):
-                output_field[indices, i] = pfields[field]
-            self.field_data[field] = array_like_field(
-                dd_first, output_field.copy(), fds[field])
-            self.particle_fields.append(field)
-
-        # Instantiate fields the caller requested
-        self._get_data(fields)
-
-    def has_key(self, key):
-        return (key in self.field_data)
-    
-    def keys(self):
-        return self.field_data.keys()
-
-    def __getitem__(self, key):
-        """
-        Get the field associated with key.
-        """
-        if key == "particle_time":
-            return self.times
-        if key not in self.field_data:
-            self._get_data([key])
-        return self.field_data[key]
-    
-    def __setitem__(self, key, val):
-        """
-        Sets a field to be some other value.
-        """
-        self.field_data[key] = val
-                        
-    def __delitem__(self, key):
-        """
-        Delete the field from the trajectory
-        """
-        del self.field_data[key]
-
-    def __iter__(self):
-        """
-        This iterates over the trajectories for
-        the different particles, returning dicts
-        of fields for each trajectory
-        """
-        for idx in range(self.num_indices):
-            traj = {}
-            traj["particle_index"] = self.indices[idx]
-            traj["particle_time"] = self.times
-            for field in self.field_data.keys():
-                traj[field] = self[field][idx,:]
-            yield traj
-            
-    def __len__(self):
-        """
-        The number of individual trajectories
-        """
-        return self.num_indices
-
-    def add_fields(self, fields):
-        """
-        Add a list of fields to an existing trajectory
-
-        Parameters
-        ----------
-        fields : list of strings
-            A list of fields to be added to the current trajectory
-            collection.
-
-        Examples
-        ________
-        >>> trajs = ParticleTrajectories(my_fns, indices)
-        >>> trajs.add_fields(["particle_mass", "particle_gpot"])
-        """
-        self._get_data(fields)
-                
-    def _get_data(self, fields):
-        """
-        Get a list of fields to include in the trajectory collection.
-        The trajectory collection itself is a dict of 2D numpy arrays,
-        with shape (num_indices, num_steps)
-        """
-
-        missing_fields = [field for field in fields
-                          if field not in self.field_data]
-        if not missing_fields:
-            return
-
-        if self.suppress_logging:
-            old_level = int(ytcfg.get("yt","loglevel"))
-            mylog.setLevel(40)
-        ds_first = self.data_series[0]
-        dd_first = ds_first.all_data()
-
-        fds = {}
-        new_particle_fields = []
-        for field in missing_fields:
-            fds[field] = dd_first._determine_fields(field)[0]
-            if field not in self.particle_fields:
-                if self.data_series[0]._get_field_info(*fds[field]).particle_type:
-                    self.particle_fields.append(field)
-                    new_particle_fields.append(field)
-                    
-
-        grid_fields = [field for field in missing_fields
-                       if field not in self.particle_fields]
-        step = int(0)
-        pbar = get_pbar("Generating [%s] fields in trajectories." %
-                        ", ".join(missing_fields), self.num_steps)
-        my_storage = {}
-        
-        for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
-            mask = self.masks[i]
-            sort = self.sorts[i]
-            pfield = {}
-
-            if new_particle_fields:  # there's at least one particle field
-                dd = ds.all_data()
-                for field in new_particle_fields:
-                    # This is easy... just get the particle fields
-                    pfield[field] = dd[fds[field]].d[mask][sort]
-
-            if grid_fields:
-                # This is hard... must loop over grids
-                for field in grid_fields:
-                    pfield[field] = np.zeros(self.num_indices)
-                x = self["particle_position_x"][:,step].d
-                y = self["particle_position_y"][:,step].d
-                z = self["particle_position_z"][:,step].d
-                particle_grids, particle_grid_inds = ds.index._find_points(x,y,z)
-
-                # This will fail for non-grid index objects
-                for grid in particle_grids:
-                    cube = grid.retrieve_ghost_zones(1, grid_fields)
-                    for field in grid_fields:
-                        CICSample_3(x, y, z, pfield[field],
-                                    self.num_indices,
-                                    cube[fds[field]],
-                                    np.array(grid.LeftEdge).astype(np.float64),
-                                    np.array(grid.ActiveDimensions).astype(np.int32),
-                                    grid.dds[0])
-            sto.result_id = ds.parameter_filename
-            sto.result = (self.array_indices[i], pfield)
-            pbar.update(step)
-            step += 1
-        pbar.finish()
-
-        output_field = np.empty((self.num_indices,self.num_steps))
-        output_field.fill(np.nan)
-        for field in missing_fields:
-            fd = fds[field]
-            for i, (fn, (indices, pfield)) in enumerate(sorted(my_storage.items())):
-                output_field[indices, i] = pfield[field]
-            self.field_data[field] = array_like_field(dd_first, output_field.copy(), fd)
-
-        if self.suppress_logging:
-            mylog.setLevel(old_level)
-
-    def trajectory_from_index(self, index):
-        """
-        Retrieve a single trajectory corresponding to a specific particle
-        index
-
-        Parameters
-        ----------
-        index : int
-            This defines which particle trajectory from the
-            ParticleTrajectories object will be returned.
-
-        Returns
-        -------
-        A dictionary corresponding to the particle's trajectory and the
-        fields along that trajectory
-
-        Examples
-        --------
-        >>> from yt.mods import *
-        >>> import matplotlib.pylab as pl
-        >>> trajs = ParticleTrajectories(my_fns, indices)
-        >>> traj = trajs.trajectory_from_index(indices[0])
-        >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
-        >>> pl.savefig("orbit")
-        """
-        mask = np.in1d(self.indices, (index,), assume_unique=True)
-        if not np.any(mask):
-            print("The particle index %d is not in the list!" % (index))
-            raise IndexError
-        fields = [field for field in sorted(self.field_data.keys())]
-        traj = {}
-        traj["particle_time"] = self.times
-        traj["particle_index"] = index
-        for field in fields:
-            traj[field] = self[field][mask,:][0]
-        return traj
-
-    @parallel_root_only
-    def write_out(self, filename_base):
-        """
-        Write out particle trajectories to tab-separated ASCII files (one
-        for each trajectory) with the field names in the file header. Each
-        file is named with a basename and the index number.
-
-        Parameters
-        ----------
-        filename_base : string
-            The prefix for the outputted ASCII files.
-
-        Examples
-        --------
-        >>> from yt.mods import *
-        >>> trajs = ParticleTrajectories(my_fns, indices)
-        >>> trajs.write_out("orbit_trajectory")       
-        """
-        fields = [field for field in sorted(self.field_data.keys())]
-        num_fields = len(fields)
-        first_str = "# particle_time\t" + "\t".join(fields)+"\n"
-        template_str = "%g\t"*num_fields+"%g\n"
-        for ix in range(self.num_indices):
-            outlines = [first_str]
-            for it in range(self.num_steps):
-                outlines.append(template_str %
-                                tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
-            fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
-            fid.writelines(outlines)
-            fid.close()
-            del fid
-
-    @parallel_root_only
-    def write_out_h5(self, filename):
-        """
-        Write out all the particle trajectories to a single HDF5 file
-        that contains the indices, the times, and the 2D array for each
-        field individually
-
-        Parameters
-        ----------
-
-        filename : string
-            The output filename for the HDF5 file
-
-        Examples
-        --------
-
-        >>> from yt.mods import *
-        >>> trajs = ParticleTrajectories(my_fns, indices)
-        >>> trajs.write_out_h5("orbit_trajectories")                
-        """
-        fid = h5py.File(filename, "w")
-        fid.create_dataset("particle_indices", dtype=np.int64,
-                           data=self.indices)
-        fid.close()
-        self.times.write_hdf5(filename, dataset_name="particle_times")
-        fields = [field for field in sorted(self.field_data.keys())]
-        for field in fields:
-            self[field].write_hdf5(filename, dataset_name="%s" % field)

diff -r 2668f56ec734f8600cfd4c72b06cc035bdd42d3a -r c6882a33750aa88143e300869590f616c592f24b yt/data_objects/particle_trajectories.py
--- /dev/null
+++ b/yt/data_objects/particle_trajectories.py
@@ -0,0 +1,371 @@
+"""
+Particle trajectories
+"""
+from __future__ import print_function
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.data_objects.field_data import YTFieldData
+from yt.utilities.lib.particle_mesh_operations import CICSample_3
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_root_only
+from yt.funcs import mylog, get_pbar
+from yt.units.yt_array import array_like_field
+from yt.config import ytcfg
+from collections import OrderedDict
+
+import numpy as np
+from yt.utilities.on_demand_imports import _h5py as h5py
+
+class ParticleTrajectories(object):
+    r"""A collection of particle trajectories in time over a series of
+    datasets. 
+
+    Parameters
+    ----------
+    outputs : `yt.data_objects.time_series.DatasetSeries`
+        DatasetSeries object from which to draw the particles.
+    indices : array_like
+        An integer array of particle indices whose trajectories we
+        want to track. If they are not sorted they will be sorted.
+    fields : list of strings, optional
+        A set of fields that is retrieved when the trajectory
+        collection is instantiated. Default: None (will default
+        to the fields 'particle_position_x', 'particle_position_y',
+        'particle_position_z')
+    suppress_logging : boolean
+        Suppress yt's logging when iterating over the simulation time
+        series. Default: False
+
+    Examples
+    --------
+    >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
+    >>> my_fns.sort()
+    >>> fields = ["particle_position_x", "particle_position_y",
+    >>>           "particle_position_z", "particle_velocity_x",
+    >>>           "particle_velocity_y", "particle_velocity_z"]
+    >>> ds = load(my_fns[0])
+    >>> init_sphere = ds.sphere(ds.domain_center, (.5, "unitary"))
+    >>> indices = init_sphere["particle_index"].astype("int")
+    >>> ts = DatasetSeries(my_fns)
+    >>> trajs = ts.particle_trajectories(indices, fields=fields)
+    >>> for t in trajs :
+    >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
+    """
+    def __init__(self, outputs, indices, fields=None, suppress_logging=False):
+
+        indices.sort() # Just in case the caller wasn't careful
+        self.field_data = YTFieldData()
+        self.data_series = outputs
+        self.masks = []
+        self.sorts = []
+        self.array_indices = []
+        self.indices = indices
+        self.num_indices = len(indices)
+        self.num_steps = len(outputs)
+        self.times = []
+        self.suppress_logging = suppress_logging
+
+        if fields is None: fields = []
+        fields = list(OrderedDict.fromkeys(fields))
+
+        if self.suppress_logging:
+            old_level = int(ytcfg.get("yt","loglevel"))
+            mylog.setLevel(40)
+        
+        fds = {}
+        ds_first = self.data_series[0]
+        dd_first = ds_first.all_data()
+        idx_field = dd_first._determine_fields("particle_index")[0]
+        for field in ("particle_position_%s" % ax for ax in "xyz"):
+            fds[field] = dd_first._determine_fields(field)[0]
+
+        my_storage = {}
+        pbar = get_pbar("Constructing trajectory information", len(self.data_series))
+        for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
+            dd = ds.all_data()
+            newtags = dd[idx_field].d.astype("int64")
+            mask = np.in1d(newtags, indices, assume_unique=True)
+            sort = np.argsort(newtags[mask])
+            array_indices = np.where(np.in1d(indices, newtags, assume_unique=True))[0]
+            self.array_indices.append(array_indices)
+            self.masks.append(mask)
+            self.sorts.append(sort)
+
+            pfields = {}
+            for field in ("particle_position_%s" % ax for ax in "xyz"):
+                pfields[field] = dd[fds[field]].ndarray_view()[mask][sort]
+
+            sto.result_id = ds.parameter_filename
+            sto.result = (ds.current_time, array_indices, pfields)
+            pbar.update(i)
+        pbar.finish()
+
+        if self.suppress_logging:
+            mylog.setLevel(old_level)
+
+        times = []
+        for fn, (time, indices, pfields) in sorted(my_storage.items()):
+            times.append(time)
+        self.times = self.data_series[0].arr([time for time in times], times[0].units)
+
+        self.particle_fields = []
+        output_field = np.empty((self.num_indices, self.num_steps))
+        output_field.fill(np.nan)
+        for field in ("particle_position_%s" % ax for ax in "xyz"):
+            for i, (fn, (time, indices, pfields)) in enumerate(sorted(my_storage.items())):
+                output_field[indices, i] = pfields[field]
+            self.field_data[field] = array_like_field(
+                dd_first, output_field.copy(), fds[field])
+            self.particle_fields.append(field)
+
+        # Instantiate fields the caller requested
+        self._get_data(fields)
+
+    def has_key(self, key):
+        return (key in self.field_data)
+    
+    def keys(self):
+        return self.field_data.keys()
+
+    def __getitem__(self, key):
+        """
+        Get the field associated with key.
+        """
+        if key == "particle_time":
+            return self.times
+        if key not in self.field_data:
+            self._get_data([key])
+        return self.field_data[key]
+    
+    def __setitem__(self, key, val):
+        """
+        Sets a field to be some other value.
+        """
+        self.field_data[key] = val
+                        
+    def __delitem__(self, key):
+        """
+        Delete the field from the trajectory
+        """
+        del self.field_data[key]
+
+    def __iter__(self):
+        """
+        This iterates over the trajectories for
+        the different particles, returning dicts
+        of fields for each trajectory
+        """
+        for idx in range(self.num_indices):
+            traj = {}
+            traj["particle_index"] = self.indices[idx]
+            traj["particle_time"] = self.times
+            for field in self.field_data.keys():
+                traj[field] = self[field][idx,:]
+            yield traj
+            
+    def __len__(self):
+        """
+        The number of individual trajectories
+        """
+        return self.num_indices
+
+    def add_fields(self, fields):
+        """
+        Add a list of fields to an existing trajectory
+
+        Parameters
+        ----------
+        fields : list of strings
+            A list of fields to be added to the current trajectory
+            collection.
+
+        Examples
+        ________
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.add_fields(["particle_mass", "particle_gpot"])
+        """
+        self._get_data(fields)
+                
+    def _get_data(self, fields):
+        """
+        Get a list of fields to include in the trajectory collection.
+        The trajectory collection itself is a dict of 2D numpy arrays,
+        with shape (num_indices, num_steps)
+        """
+
+        missing_fields = [field for field in fields
+                          if field not in self.field_data]
+        if not missing_fields:
+            return
+
+        if self.suppress_logging:
+            old_level = int(ytcfg.get("yt","loglevel"))
+            mylog.setLevel(40)
+        ds_first = self.data_series[0]
+        dd_first = ds_first.all_data()
+
+        fds = {}
+        new_particle_fields = []
+        for field in missing_fields:
+            fds[field] = dd_first._determine_fields(field)[0]
+            if field not in self.particle_fields:
+                if self.data_series[0]._get_field_info(*fds[field]).particle_type:
+                    self.particle_fields.append(field)
+                    new_particle_fields.append(field)
+                    
+
+        grid_fields = [field for field in missing_fields
+                       if field not in self.particle_fields]
+        step = int(0)
+        pbar = get_pbar("Generating [%s] fields in trajectories" %
+                        ", ".join(missing_fields), self.num_steps)
+        my_storage = {}
+        
+        for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
+            mask = self.masks[i]
+            sort = self.sorts[i]
+            pfield = {}
+
+            if new_particle_fields:  # there's at least one particle field
+                dd = ds.all_data()
+                for field in new_particle_fields:
+                    # This is easy... just get the particle fields
+                    pfield[field] = dd[fds[field]].d[mask][sort]
+
+            if grid_fields:
+                # This is hard... must loop over grids
+                for field in grid_fields:
+                    pfield[field] = np.zeros(self.num_indices)
+                x = self["particle_position_x"][:,step].d
+                y = self["particle_position_y"][:,step].d
+                z = self["particle_position_z"][:,step].d
+                particle_grids, particle_grid_inds = ds.index._find_points(x,y,z)
+
+                # This will fail for non-grid index objects
+                for grid in particle_grids:
+                    cube = grid.retrieve_ghost_zones(1, grid_fields)
+                    for field in grid_fields:
+                        CICSample_3(x, y, z, pfield[field],
+                                    self.num_indices,
+                                    cube[fds[field]],
+                                    np.array(grid.LeftEdge).astype(np.float64),
+                                    np.array(grid.ActiveDimensions).astype(np.int32),
+                                    grid.dds[0])
+            sto.result_id = ds.parameter_filename
+            sto.result = (self.array_indices[i], pfield)
+            pbar.update(step)
+            step += 1
+        pbar.finish()
+
+        output_field = np.empty((self.num_indices,self.num_steps))
+        output_field.fill(np.nan)
+        for field in missing_fields:
+            fd = fds[field]
+            for i, (fn, (indices, pfield)) in enumerate(sorted(my_storage.items())):
+                output_field[indices, i] = pfield[field]
+            self.field_data[field] = array_like_field(dd_first, output_field.copy(), fd)
+
+        if self.suppress_logging:
+            mylog.setLevel(old_level)
+
+    def trajectory_from_index(self, index):
+        """
+        Retrieve a single trajectory corresponding to a specific particle
+        index
+
+        Parameters
+        ----------
+        index : int
+            This defines which particle trajectory from the
+            ParticleTrajectories object will be returned.
+
+        Returns
+        -------
+        A dictionary corresponding to the particle's trajectory and the
+        fields along that trajectory
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> import matplotlib.pylab as pl
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> traj = trajs.trajectory_from_index(indices[0])
+        >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
+        >>> pl.savefig("orbit")
+        """
+        mask = np.in1d(self.indices, (index,), assume_unique=True)
+        if not np.any(mask):
+            print("The particle index %d is not in the list!" % (index))
+            raise IndexError
+        fields = [field for field in sorted(self.field_data.keys())]
+        traj = {}
+        traj["particle_time"] = self.times
+        traj["particle_index"] = index
+        for field in fields:
+            traj[field] = self[field][mask,:][0]
+        return traj
+
+    @parallel_root_only
+    def write_out(self, filename_base):
+        """
+        Write out particle trajectories to tab-separated ASCII files (one
+        for each trajectory) with the field names in the file header. Each
+        file is named with a basename and the index number.
+
+        Parameters
+        ----------
+        filename_base : string
+            The prefix for the outputted ASCII files.
+
+        Examples
+        --------
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out("orbit_trajectory")       
+        """
+        fields = [field for field in sorted(self.field_data.keys())]
+        num_fields = len(fields)
+        first_str = "# particle_time\t" + "\t".join(fields)+"\n"
+        template_str = "%g\t"*num_fields+"%g\n"
+        for ix in range(self.num_indices):
+            outlines = [first_str]
+            for it in range(self.num_steps):
+                outlines.append(template_str %
+                                tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
+            fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
+            fid.writelines(outlines)
+            fid.close()
+            del fid
+
+    @parallel_root_only
+    def write_out_h5(self, filename):
+        """
+        Write out all the particle trajectories to a single HDF5 file
+        that contains the indices, the times, and the 2D array for each
+        field individually
+
+        Parameters
+        ----------
+
+        filename : string
+            The output filename for the HDF5 file
+
+        Examples
+        --------
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out_h5("orbit_trajectories")                
+        """
+        fid = h5py.File(filename, "w")
+        fid.create_dataset("particle_indices", dtype=np.int64,
+                           data=self.indices)
+        fid.close()
+        self.times.write_hdf5(filename, dataset_name="particle_times")
+        fields = [field for field in sorted(self.field_data.keys())]
+        for field in fields:
+            self[field].write_hdf5(filename, dataset_name="%s" % field)

diff -r 2668f56ec734f8600cfd4c72b06cc035bdd42d3a -r c6882a33750aa88143e300869590f616c592f24b yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -32,6 +32,8 @@
     create_quantity_proxy, \
     analysis_task_registry, \
     AnalysisTask
+from yt.data_objects.particle_trajectories import \
+    ParticleTrajectories
 from yt.funcs import \
     iterable, \
     ensure_list, \
@@ -399,6 +401,41 @@
         self._dataset_cls = ds.__class__
         return ds
 
+    def particle_trajectories(self, indices, fields=None, suppress_logging=False):
+        r"""Create a collection of particle trajectories in time over a series of
+        datasets.
+
+        Parameters
+        ----------
+        indices : array_like
+            An integer array of particle indices whose trajectories we
+            want to track. If they are not sorted they will be sorted.
+        fields : list of strings, optional
+            A set of fields that is retrieved when the trajectory
+            collection is instantiated. Default: None (will default
+            to the fields 'particle_position_x', 'particle_position_y',
+            'particle_position_z')
+        suppress_logging : boolean
+            Suppress yt's logging when iterating over the simulation time
+            series. Default: False
+
+        Examples
+        --------
+        >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
+        >>> my_fns.sort()
+        >>> fields = ["particle_position_x", "particle_position_y",
+        >>>           "particle_position_z", "particle_velocity_x",
+        >>>           "particle_velocity_y", "particle_velocity_z"]
+        >>> ds = load(my_fns[0])
+        >>> init_sphere = ds.sphere(ds.domain_center, (.5, "unitary"))
+        >>> indices = init_sphere["particle_index"].astype("int")
+        >>> ts = DatasetSeries(my_fns)
+        >>> trajs = ts.particle_trajectories(indices, fields=fields)
+        >>> for t in trajs :
+        >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
+        """
+        return ParticleTrajectories(self, indices, fields=fields, suppress_logging=suppress_logging)
+
 class TimeSeriesQuantitiesContainer(object):
     def __init__(self, data_object, quantities):
         self.data_object = data_object


https://bitbucket.org/yt_analysis/yt/commits/4990b229fe2f/
Changeset:   4990b229fe2f
Branch:      yt
User:        jzuhone
Date:        2016-12-05 19:34:32+00:00
Summary:     Add something for backwards-compat
Affected #:  2 files

diff -r c6882a33750aa88143e300869590f616c592f24b -r 4990b229fe2f8ac63de21b393cabf01c88bdb462 yt/analysis_modules/particle_trajectories/api.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/api.py
@@ -0,0 +1,6 @@
+from yt.funcs import issue_deprecation_warning
+
+issue_deprecation_warning("Particle trajectories are now available from DatasetSeries "
+                          "objects as ts.particle_trajectories. The ParticleTrajectories "
+                          "analysis module is deprecated.")
+from yt.data_objects.particle_trajectories import ParticleTrajectories
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/64895da9daac/
Changeset:   64895da9daac
Branch:      yt
User:        jzuhone
Date:        2017-01-18 14:47:20+00:00
Summary:     Move particle trajectories docs
Affected #:  6 files

diff -r 4990b229fe2f8ac63de21b393cabf01c88bdb462 -r 64895da9daac301c362c1cad5c03c81319df8c30 doc/source/analyzing/Particle_Trajectories.ipynb
--- /dev/null
+++ b/doc/source/analyzing/Particle_Trajectories.ipynb
@@ -0,0 +1,390 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "One can create particle trajectories from a `DatasetSeries` object for a specified list of particles identified by their unique indices using the `particle_trajectories` method. "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "%matplotlib inline\n",
+    "import yt\n",
+    "import glob\n",
+    "from yt.config import ytcfg\n",
+    "path = ytcfg.get(\"yt\", \"test_data_dir\")\n",
+    "import matplotlib.pyplot as plt\n",
+    "from mpl_toolkits.mplot3d import Axes3D"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "First, let's start off with a FLASH dataset containing only two particles in a mutual circular orbit. We can get the list of filenames this way:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "my_fns = glob.glob(path+\"/Orbit/orbit_hdf5_chk_00[0-9][0-9]\")\n",
+    "my_fns.sort()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "And let's define a list of fields that we want to include in the trajectories. The position fields will be included by default, so let's just ask for the velocity fields:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "fields = [\"particle_velocity_x\", \"particle_velocity_y\", \"particle_velocity_z\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "There are only two particles, but for consistency's sake let's grab their indices from the dataset itself:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ds = yt.load(my_fns[0])\n",
+    "dd = ds.all_data()\n",
+    "indices = dd[\"particle_index\"].astype(\"int\")\n",
+    "print (indices)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "which is what we expected them to be. Now we're ready to create a `DatasetSeries` object and use it to create particle trajectories: "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ts = yt.DatasetSeries(my_fns)\n",
+    "# suppress_logging=True cuts down on a lot of noise\n",
+    "trajs = ts.particle_trajectories(indices, fields=fields, suppress_logging=True)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The `ParticleTrajectories` object `trajs` is essentially a dictionary-like container for the particle fields along the trajectory, and can be accessed as such:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "print (trajs[\"particle_position_x\"])\n",
+    "print (trajs[\"particle_position_x\"].shape)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Note that each field is a 2D NumPy array with the different particle indices along the first dimension and the times along the second dimension. As such, we can access them individually by indexing the field:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "plt.figure(figsize=(6, 6))\n",
+    "plt.plot(trajs[\"particle_position_x\"][0], trajs[\"particle_position_y\"][0])\n",
+    "plt.plot(trajs[\"particle_position_x\"][1], trajs[\"particle_position_y\"][1])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "And we can plot the velocity fields as well:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "plt.figure(figsize=(6, 6))\n",
+    "plt.plot(trajs[\"particle_velocity_x\"][0], trajs[\"particle_velocity_y\"][0])\n",
+    "plt.plot(trajs[\"particle_velocity_x\"][1], trajs[\"particle_velocity_y\"][1])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "If we want to access the time along the trajectory, we use the key `\"particle_time\"`:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "plt.figure(figsize=(6, 6))\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_velocity_x\"][1])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_velocity_y\"][1])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Alternatively, if we know the particle index we'd like to examine, we can get an individual trajectory corresponding to that index:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "particle1 = trajs.trajectory_from_index(1)\n",
+    "plt.figure(figsize=(6, 6))\n",
+    "plt.plot(particle1[\"particle_time\"], particle1[\"particle_position_x\"])\n",
+    "plt.plot(particle1[\"particle_time\"], particle1[\"particle_position_y\"])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now let's look at a more complicated (and fun!) example. We'll use an Enzo cosmology dataset. First, we'll find the maximum density in the domain, and obtain the indices of the particles within some radius of the center. First, let's have a look at what we're getting:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+    "slc = yt.SlicePlot(ds, \"x\", [\"density\",\"dark_matter_density\"], center=\"max\", width=(3.0, \"Mpc\"))\n",
+    "slc.show()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "So far, so good--it looks like we've centered on a galaxy cluster. Let's grab all of the dark matter particles within a sphere of 0.5 Mpc (identified by `\"particle_type == 1\"`):"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "sp = ds.sphere(\"max\", (0.5, \"Mpc\"))\n",
+    "indices = sp[\"particle_index\"][sp[\"particle_type\"] == 1]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Next we'll get the list of datasets we want, and create trajectories for these particles:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "my_fns = glob.glob(path+\"/enzo_tiny_cosmology/DD*/*.hierarchy\")\n",
+    "my_fns.sort()\n",
+    "ts = yt.DatasetSeries(my_fns)\n",
+    "trajs = ts.particle_trajectories(indices, fields=fields, suppress_logging=True)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Matplotlib can make 3D plots, so let's pick three particle trajectories at random and look at them in the volume:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "fig = plt.figure(figsize=(8.0, 8.0))\n",
+    "ax = fig.add_subplot(111, projection='3d')\n",
+    "ax.plot(trajs[\"particle_position_x\"][100], trajs[\"particle_position_z\"][100], trajs[\"particle_position_z\"][100])\n",
+    "ax.plot(trajs[\"particle_position_x\"][8], trajs[\"particle_position_z\"][8], trajs[\"particle_position_z\"][8])\n",
+    "ax.plot(trajs[\"particle_position_x\"][25], trajs[\"particle_position_z\"][25], trajs[\"particle_position_z\"][25])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "It looks like these three different particles fell into the cluster along different filaments. We can also look at their x-positions only as a function of time:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "plt.figure(figsize=(6,6))\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][100])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][8])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][25])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Suppose we wanted to know the gas density along the particle trajectory, but there wasn't a particle field corresponding to that in our dataset. Never fear! If the field exists as a grid field, yt will interpolate this field to the particle positions and add the interpolated field to the trajectory. To add such a field (or any field, including additional particle fields) we can call the `add_fields` method:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "trajs.add_fields([\"density\"])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We also could have included `\"density\"` in our original field list. Now, plot up the gas density for each particle as a function of time:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "plt.figure(figsize=(6,6))\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"density\"][100])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"density\"][8])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"density\"][25])\n",
+    "plt.yscale(\"log\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Finally, the particle trajectories can be written to disk. Two options are provided: ASCII text files with a column for each field and the time, and HDF5 files:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "trajs.write_out(\"halo_trajectories\") # This will write a separate file for each trajectory\n",
+    "trajs.write_out_h5(\"halo_trajectories.h5\") # This will write all trajectories to a single file"
+   ]
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python [default]",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.5.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

diff -r 4990b229fe2f8ac63de21b393cabf01c88bdb462 -r 64895da9daac301c362c1cad5c03c81319df8c30 doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
--- a/doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
+++ /dev/null
@@ -1,390 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "One can create particle trajectories from a `DatasetSeries` object for a specified list of particles identified by their unique indices using the `particle_trajectories` method. "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "%matplotlib inline\n",
-    "import yt\n",
-    "import glob\n",
-    "from yt.config import ytcfg\n",
-    "path = ytcfg.get(\"yt\", \"test_data_dir\")\n",
-    "import matplotlib.pyplot as plt\n",
-    "from mpl_toolkits.mplot3d import Axes3D"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "First, let's start off with a FLASH dataset containing only two particles in a mutual circular orbit. We can get the list of filenames this way:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "my_fns = glob.glob(path+\"/Orbit/orbit_hdf5_chk_00[0-9][0-9]\")\n",
-    "my_fns.sort()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "And let's define a list of fields that we want to include in the trajectories. The position fields will be included by default, so let's just ask for the velocity fields:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "fields = [\"particle_velocity_x\", \"particle_velocity_y\", \"particle_velocity_z\"]"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "There are only two particles, but for consistency's sake let's grab their indices from the dataset itself:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "ds = yt.load(my_fns[0])\n",
-    "dd = ds.all_data()\n",
-    "indices = dd[\"particle_index\"].astype(\"int\")\n",
-    "print (indices)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "which is what we expected them to be. Now we're ready to create a `DatasetSeries` object and use it to create particle trajectories: "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "ts = yt.DatasetSeries(my_fns)\n",
-    "# suppress_logging=True cuts down on a lot of noise\n",
-    "trajs = ts.particle_trajectories(indices, fields=fields, suppress_logging=True)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "The `ParticleTrajectories` object `trajs` is essentially a dictionary-like container for the particle fields along the trajectory, and can be accessed as such:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "print (trajs[\"particle_position_x\"])\n",
-    "print (trajs[\"particle_position_x\"].shape)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Note that each field is a 2D NumPy array with the different particle indices along the first dimension and the times along the second dimension. As such, we can access them individually by indexing the field:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "plt.figure(figsize=(6, 6))\n",
-    "plt.plot(trajs[\"particle_position_x\"][0], trajs[\"particle_position_y\"][0])\n",
-    "plt.plot(trajs[\"particle_position_x\"][1], trajs[\"particle_position_y\"][1])"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "And we can plot the velocity fields as well:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "plt.figure(figsize=(6, 6))\n",
-    "plt.plot(trajs[\"particle_velocity_x\"][0], trajs[\"particle_velocity_y\"][0])\n",
-    "plt.plot(trajs[\"particle_velocity_x\"][1], trajs[\"particle_velocity_y\"][1])"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "If we want to access the time along the trajectory, we use the key `\"particle_time\"`:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "plt.figure(figsize=(6, 6))\n",
-    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_velocity_x\"][1])\n",
-    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_velocity_y\"][1])"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Alternatively, if we know the particle index we'd like to examine, we can get an individual trajectory corresponding to that index:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "particle1 = trajs.trajectory_from_index(1)\n",
-    "plt.figure(figsize=(6, 6))\n",
-    "plt.plot(particle1[\"particle_time\"], particle1[\"particle_position_x\"])\n",
-    "plt.plot(particle1[\"particle_time\"], particle1[\"particle_position_y\"])"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Now let's look at a more complicated (and fun!) example. We'll use an Enzo cosmology dataset. First, we'll find the maximum density in the domain, and obtain the indices of the particles within some radius of the center. First, let's have a look at what we're getting:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
-    "slc = yt.SlicePlot(ds, \"x\", [\"density\",\"dark_matter_density\"], center=\"max\", width=(3.0, \"Mpc\"))\n",
-    "slc.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "So far, so good--it looks like we've centered on a galaxy cluster. Let's grab all of the dark matter particles within a sphere of 0.5 Mpc (identified by `\"particle_type == 1\"`):"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "sp = ds.sphere(\"max\", (0.5, \"Mpc\"))\n",
-    "indices = sp[\"particle_index\"][sp[\"particle_type\"] == 1]"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Next we'll get the list of datasets we want, and create trajectories for these particles:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "my_fns = glob.glob(path+\"/enzo_tiny_cosmology/DD*/*.hierarchy\")\n",
-    "my_fns.sort()\n",
-    "ts = yt.DatasetSeries(my_fns)\n",
-    "trajs = ts.particle_trajectories(indices, fields=fields, suppress_logging=True)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Matplotlib can make 3D plots, so let's pick three particle trajectories at random and look at them in the volume:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "fig = plt.figure(figsize=(8.0, 8.0))\n",
-    "ax = fig.add_subplot(111, projection='3d')\n",
-    "ax.plot(trajs[\"particle_position_x\"][100], trajs[\"particle_position_z\"][100], trajs[\"particle_position_z\"][100])\n",
-    "ax.plot(trajs[\"particle_position_x\"][8], trajs[\"particle_position_z\"][8], trajs[\"particle_position_z\"][8])\n",
-    "ax.plot(trajs[\"particle_position_x\"][25], trajs[\"particle_position_z\"][25], trajs[\"particle_position_z\"][25])"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "It looks like these three different particles fell into the cluster along different filaments. We can also look at their x-positions only as a function of time:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "plt.figure(figsize=(6,6))\n",
-    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][100])\n",
-    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][8])\n",
-    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][25])"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Suppose we wanted to know the gas density along the particle trajectory, but there wasn't a particle field corresponding to that in our dataset. Never fear! If the field exists as a grid field, yt will interpolate this field to the particle positions and add the interpolated field to the trajectory. To add such a field (or any field, including additional particle fields) we can call the `add_fields` method:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "trajs.add_fields([\"density\"])"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "We also could have included `\"density\"` in our original field list. Now, plot up the gas density for each particle as a function of time:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "plt.figure(figsize=(6,6))\n",
-    "plt.plot(trajs[\"particle_time\"], trajs[\"density\"][100])\n",
-    "plt.plot(trajs[\"particle_time\"], trajs[\"density\"][8])\n",
-    "plt.plot(trajs[\"particle_time\"], trajs[\"density\"][25])\n",
-    "plt.yscale(\"log\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Finally, the particle trajectories can be written to disk. Two options are provided: ASCII text files with a column for each field and the time, and HDF5 files:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "trajs.write_out(\"halo_trajectories\") # This will write a separate file for each trajectory\n",
-    "trajs.write_out_h5(\"halo_trajectories.h5\") # This will write all trajectories to a single file"
-   ]
-  }
- ],
- "metadata": {
-  "anaconda-cloud": {},
-  "kernelspec": {
-   "display_name": "Python [default]",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.5.2"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}

diff -r 4990b229fe2f8ac63de21b393cabf01c88bdb462 -r 64895da9daac301c362c1cad5c03c81319df8c30 doc/source/analyzing/analysis_modules/index.rst
--- a/doc/source/analyzing/analysis_modules/index.rst
+++ b/doc/source/analyzing/analysis_modules/index.rst
@@ -18,4 +18,3 @@
    exporting
    two_point_functions
    clump_finding
-   particle_trajectories

diff -r 4990b229fe2f8ac63de21b393cabf01c88bdb462 -r 64895da9daac301c362c1cad5c03c81319df8c30 doc/source/analyzing/analysis_modules/particle_trajectories.rst
--- a/doc/source/analyzing/analysis_modules/particle_trajectories.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _particle-trajectories:
-
-Particle Trajectories
----------------------
-
-.. notebook:: Particle_Trajectories.ipynb

diff -r 4990b229fe2f8ac63de21b393cabf01c88bdb462 -r 64895da9daac301c362c1cad5c03c81319df8c30 doc/source/analyzing/index.rst
--- a/doc/source/analyzing/index.rst
+++ b/doc/source/analyzing/index.rst
@@ -22,4 +22,5 @@
    generating_processed_data
    saving_data
    time_series_analysis
+   particle_trajectories
    parallel_computation

diff -r 4990b229fe2f8ac63de21b393cabf01c88bdb462 -r 64895da9daac301c362c1cad5c03c81319df8c30 doc/source/analyzing/particle_trajectories.rst
--- /dev/null
+++ b/doc/source/analyzing/particle_trajectories.rst
@@ -0,0 +1,6 @@
+.. _particle-trajectories:
+
+Particle Trajectories
+---------------------
+
+.. notebook:: Particle_Trajectories.ipynb


https://bitbucket.org/yt_analysis/yt/commits/0f0ccc306d21/
Changeset:   0f0ccc306d21
Branch:      yt
User:        jzuhone
Date:        2017-01-18 16:07:22+00:00
Summary:     Starting work on tests for particle_trajectories
Affected #:  1 file

diff -r 64895da9daac301c362c1cad5c03c81319df8c30 -r 0f0ccc306d211b202f3cbf13df519b1a7dba37f5 yt/data_objects/tests/test_particle_trajectories.py
--- /dev/null
+++ b/yt/data_objects/tests/test_particle_trajectories.py
@@ -0,0 +1,28 @@
+import glob
+
+import os
+
+from yt.data_objects.time_series import DatasetSeries
+from yt.config import ytcfg
+
+def setup():
+    ytcfg["yt","__withintesting"] = "True"
+
+data_path = ytcfg.get("yt", "test_data_dir")
+
+def test_orbit_traj():
+    fields = ["particle_velocity_x", "particle_velocity_y", "particle_velocity_z"]
+    my_fns = glob.glob(os.path.join(data_path, "Orbit/orbit_hdf5_chk_00[0-9][0-9]"))
+    my_fns.sort()
+    ts = DatasetSeries(my_fns)
+    traj = ts.particle_trajectories([1, 2], fields=fields, suppress_logging=True)
+
+def test_etc_traj():
+    fields = ["particle_velocity_x", "particle_velocity_y", "particle_velocity_z"]
+    my_fns = glob.glob(os.path.join(data_path, "enzo_tiny_cosmology/DD*/*.hierarchy"))
+    my_fns.sort()
+    ts = DatasetSeries(my_fns)
+    sp = ts[0].sphere("max", (0.5, "Mpc"))
+    indices = sp["particle_index"][sp["particle_type"] == 1]
+    traj = ts.particle_trajectories(indices, fields=fields, suppress_logging=True)
+    traj.add_fields(["density"])
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/6204277adfc8/
Changeset:   6204277adfc8
Branch:      yt
User:        jzuhone
Date:        2017-01-18 19:37:58+00:00
Summary:     These should be answer tests
Affected #:  2 files

diff -r 0f0ccc306d211b202f3cbf13df519b1a7dba37f5 -r 6204277adfc8d170d35166d85775c2715bd22055 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -79,6 +79,9 @@
   local_axialpix_001:
     - yt/geometry/coordinates/tests/test_axial_pixelization.py:test_axial_pixelization
 
+  local_particle_trajectory_000:
+    - yt/data_objects/tests/test_particle_trajectories.py
+
 other_tests:
   unittests:
      - '-v'

diff -r 0f0ccc306d211b202f3cbf13df519b1a7dba37f5 -r 6204277adfc8d170d35166d85775c2715bd22055 yt/data_objects/tests/test_particle_trajectories.py
--- a/yt/data_objects/tests/test_particle_trajectories.py
+++ b/yt/data_objects/tests/test_particle_trajectories.py
@@ -1,28 +1,45 @@
 import glob
-
 import os
 
+from yt.config import ytcfg
 from yt.data_objects.time_series import DatasetSeries
-from yt.config import ytcfg
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    GenericArrayTest
 
 def setup():
     ytcfg["yt","__withintesting"] = "True"
 
 data_path = ytcfg.get("yt", "test_data_dir")
 
+pfields = ["particle_position_x", "particle_position_y", "particle_position_z"]
+vfields = ["particle_velocity_x", "particle_velocity_y", "particle_velocity_z"]
+
+ at requires_ds("Orbit/orbit_hdf5_chk_0000")
 def test_orbit_traj():
     fields = ["particle_velocity_x", "particle_velocity_y", "particle_velocity_z"]
     my_fns = glob.glob(os.path.join(data_path, "Orbit/orbit_hdf5_chk_00[0-9][0-9]"))
     my_fns.sort()
     ts = DatasetSeries(my_fns)
+    ds = ts[0]
     traj = ts.particle_trajectories([1, 2], fields=fields, suppress_logging=True)
+    for field in pfields+vfields:
+        def field_func(name):
+            return traj[field]
+        yield GenericArrayTest(ds, field_func, args=[field])
 
+ at requires_ds("enzo_tiny_cosmology/DD0000/DD0000")
 def test_etc_traj():
     fields = ["particle_velocity_x", "particle_velocity_y", "particle_velocity_z"]
-    my_fns = glob.glob(os.path.join(data_path, "enzo_tiny_cosmology/DD*/*.hierarchy"))
+    my_fns = glob.glob(os.path.join(data_path, "enzo_tiny_cosmology/DD000[0-9]/*.hierarchy"))
     my_fns.sort()
     ts = DatasetSeries(my_fns)
-    sp = ts[0].sphere("max", (0.5, "Mpc"))
-    indices = sp["particle_index"][sp["particle_type"] == 1]
+    ds = ts[0]
+    sp = ds.sphere("max", (0.5, "Mpc"))
+    indices = sp["particle_index"][sp["particle_type"] == 1][:5]
     traj = ts.particle_trajectories(indices, fields=fields, suppress_logging=True)
-    traj.add_fields(["density"])
\ No newline at end of file
+    traj.add_fields(["density"])
+    for field in pfields+vfields+["density"]:
+        def field_func(name):
+            return traj[field]
+        yield GenericArrayTest(ds, field_func, args=[field])


https://bitbucket.org/yt_analysis/yt/commits/d548f42dcc23/
Changeset:   d548f42dcc23
Branch:      yt
User:        jzuhone
Date:        2017-01-19 17:12:04+00:00
Summary:     Merge
Affected #:  135 files

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -27,6 +27,7 @@
 yt/utilities/kdtree/forthonf2c.h
 yt/utilities/libconfig_wrapper.c
 yt/utilities/spatial/ckdtree.c
+yt/utilities/lib/allocation_container.c
 yt/utilities/lib/alt_ray_tracers.c
 yt/utilities/lib/amr_kdtools.c
 yt/utilities/lib/basic_octree.c

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -1505,7 +1505,8 @@
     else
         echo "Building yt from source"
         YT_DIR="${DEST_DIR}/src/yt-hg"
-        log_cmd ${DEST_DIR}/bin/hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
+        log_cmd ${DEST_DIR}/bin/hg clone https://bitbucket.org/yt_analysis/yt ${YT_DIR}
+        log_cmd ${DEST_DIR}/bin/hg -R ${YT_DIR} up -C ${BRANCH}
         if [ $INST_EMBREE -eq 1 ]
         then
             echo $DEST_DIR > ${YT_DIR}/embree.cfg

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c doc/source/analyzing/XrayEmissionFields.ipynb
--- /dev/null
+++ b/doc/source/analyzing/XrayEmissionFields.ipynb
@@ -0,0 +1,220 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "> Note: If you came here trying to figure out how to create simulated X-ray photons and observations,\n",
+    "  you should go [here](analysis_modules/photon_simulator.html) instead."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "This functionality provides the ability to create metallicity-dependent X-ray luminosity, emissivity, and photon emissivity fields for a given photon energy range.  This works by interpolating from emission tables created from the photoionization code [Cloudy](http://nublado.org/) or the collisional ionization database [AtomDB](http://www.atomdb.org). These can be downloaded from http://yt-project.org/data from the command line like so:\n",
+    "\n",
+    "`# Put the data in a directory you specify`  \n",
+    "`yt download cloudy_emissivity_v2.h5 /path/to/data`\n",
+    "\n",
+    "`# Put the data in the location set by \"supp_data_dir\"`  \n",
+    "`yt download apec_emissivity_v2.h5 supp_data_dir`\n",
+    "\n",
+    "The data path can be a directory on disk, or it can be \"supp_data_dir\", which will download the data to the directory specified by the `\"supp_data_dir\"` yt configuration entry. It is easiest to put these files in the directory from which you will be running yt or `\"supp_data_dir\"`, but see the note below about putting them in alternate locations."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Emission fields can be made for any energy interval between 0.1 keV and 100 keV, and will always be created for luminosity $(\\rm{erg~s^{-1}})$, emissivity $\\rm{(erg~s^{-1}~cm^{-3})}$, and photon emissivity $\\rm{(photons~s^{-1}~cm^{-3})}$.  The only required arguments are the\n",
+    "dataset object, and the minimum and maximum energies of the energy band. However, typically one needs to decide what will be used for the metallicity. This can either be a floating-point value representing a spatially constant metallicity, or a prescription for a metallicity field, e.g. `(\"gas\", \"metallicity\")`. For this first example, where the dataset has no metallicity field, we'll just assume $Z = 0.3~Z_\\odot$ everywhere:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false,
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "import yt\n",
+    "\n",
+    "ds = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150\")\n",
+    "\n",
+    "xray_fields = yt.add_xray_emissivity_field(ds, 0.5, 7.0, table_type='apec', metallicity=0.3)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "> Note: If you place the HDF5 emissivity tables in a location other than the current working directory or the location \n",
+    "  specified by the \"supp_data_dir\" configuration value, you will need to specify it in the call to \n",
+    "  `add_xray_emissivity_field`:  \n",
+    "  `xray_fields = yt.add_xray_emissivity_field(ds, 0.5, 7.0, data_dir=\"/path/to/data\", table_type='apec', metallicity=0.3)`"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Having made the fields, one can see which fields were made:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "print (xray_fields)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The luminosity field is useful for summing up in regions like this:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "sp = ds.sphere(\"c\", (2.0, \"Mpc\"))\n",
+    "print (sp.quantities.total_quantity(\"xray_luminosity_0.5_7.0_keV\"))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Whereas the emissivity fields may be useful in derived fields or for plotting:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false,
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "slc = yt.SlicePlot(ds, 'z', ['xray_emissivity_0.5_7.0_keV','xray_photon_emissivity_0.5_7.0_keV'],\n",
+    "                   width=(0.75, \"Mpc\"))\n",
+    "slc.show()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The emissivity and the luminosity fields take the values one would see in the frame of the source. However, if one wishes to make projections of the X-ray emission from a cosmologically distant object, the energy band will be redshifted. For this case, one can supply a `redshift` parameter and a `Cosmology` object (either from the dataset or one made on your own) to compute X-ray intensity fields along with the emissivity and luminosity fields.\n",
+    "\n",
+    "This example shows how to do that, Where we also use a spatially dependent metallicity field and the Cloudy tables instead of the APEC tables we used previously:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false,
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "ds2 = yt.load(\"D9p_500/10MpcBox_HartGal_csf_a0.500.d\")\n",
+    "\n",
+    "# In this case, use the redshift and cosmology from the dataset, \n",
+    "# but in theory you could put in something different\n",
+    "xray_fields2 = yt.add_xray_emissivity_field(ds2, 0.5, 2.0, redshift=ds2.current_redshift, cosmology=ds2.cosmology,\n",
+    "                                            metallicity=(\"gas\", \"metallicity\"), table_type='cloudy')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now, one can see that two new fields have been added, corresponding to X-ray intensity / surface brightness when projected:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "print (xray_fields2)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Note also that the energy range now corresponds to the *observer* frame, whereas in the source frame the energy range is between `emin*(1+redshift)` and `emax*(1+redshift)`. Let's zoom in on a galaxy and make a projection of the intensity fields:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false,
+    "scrolled": false
+   },
+   "outputs": [],
+   "source": [
+    "prj = yt.ProjectionPlot(ds2, \"x\", [\"xray_intensity_0.5_2.0_keV\", \"xray_photon_intensity_0.5_2.0_keV\"],\n",
+    "                        center=\"max\", width=(40, \"kpc\"))\n",
+    "prj.set_zlim(\"xray_intensity_0.5_2.0_keV\", 1.0e-32, 5.0e-24)\n",
+    "prj.set_zlim(\"xray_photon_intensity_0.5_2.0_keV\", 1.0e-24, 5.0e-16)\n",
+    "prj.show()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "> Warning: The X-ray fields depend on the number density of hydrogen atoms, given by the yt field\n",
+    "  `H_nuclei_density`. In the case of the APEC model, this assumes that all of the hydrogen in your\n",
+    "  dataset is ionized, whereas in the Cloudy model the ionization level is taken into account. If \n",
+    "  this field is not defined (either in the dataset or by the user), it will be constructed using\n",
+    "  abundance information from your dataset. Finally, if your dataset contains no abundance information,\n",
+    "  a primordial hydrogen mass fraction (X = 0.76) will be assumed."
+   ]
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python [default]",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.5.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -3,6 +3,14 @@
 Constructing Mock X-ray Observations
 ------------------------------------
 
+.. warning:: 
+
+  The ``photon_simulator`` analysis module has been deprecated; it is
+  no longer being updated, and it will be removed in a future version
+  of yt. Users are encouraged to download and use the
+  `pyXSIM <http://hea-www.cfa.harvard.edu/~jzuhone/pyxsim>`_ package 
+  instead. 
+
 .. note::
 
   If you just want to create derived fields for X-ray emission,

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c doc/source/analyzing/analysis_modules/xray_emission_fields.rst
--- a/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
+++ /dev/null
@@ -1,78 +0,0 @@
-.. _xray_emission_fields:
-
-X-ray Emission Fields
-=====================
-.. sectionauthor:: Britton Smith <brittonsmith at gmail.com>, John ZuHone <jzuhone at gmail.com>
-
-.. note::
-
-  If you came here trying to figure out how to create simulated X-ray photons and observations,
-  you should go `here <photon_simulator.html>`_ instead.
-
-This functionality provides the ability to create metallicity-dependent
-X-ray luminosity, emissivity, and photon emissivity fields for a given
-photon energy range.  This works by interpolating from emission tables
-created from the photoionization code `Cloudy <http://nublado.org/>`_ or
-the collisional ionization database `AtomDB <http://www.atomdb.org>`_. If
-you installed yt with the install script, these data files should be located in
-the *data* directory inside the installation directory, or can be downloaded
-from `<http://yt-project.org/data>`_. Emission fields can be made for any
-interval between 0.1 keV and 100 keV.
-
-Adding Emission Fields
-----------------------
-
-Fields will be created for luminosity :math:`{\rm (erg~s^{-1})}`, emissivity :math:`{\rm (erg~s^{-1}~cm^{-3})}`,
-and photon emissivity :math:`{\rm (photons~s^{-1}~cm^{-3})}`.  The only required arguments are the
-dataset object, and the minimum and maximum energies of the energy band.
-
-.. code-block:: python
-
-  import yt
-  from yt.analysis_modules.spectral_integrator.api import \
-       add_xray_emissivity_field
-
-  xray_fields = add_xray_emissivity_field(ds, 0.5, 7.0)
-
-Additional keyword arguments are:
-
- * **filename** (*string*): Path to data file containing emissivity values. If None,
-   a file called "cloudy_emissivity.h5" is used, for photoionized plasmas. A second
-   option, for collisionally ionized plasmas, is in the file "apec_emissivity.h5",
-   available at http://yt-project.org/data. These files contain emissivity tables
-   for primordial elements and for metals at solar metallicity for the energy range
-   0.1 to 100 keV. Default: None.
-
- * **with_metals** (*bool*): If True, use the metallicity field to add the
-   contribution from metals.  If False, only the emission from H/He is
-   considered.  Default: True.
-
- * **constant_metallicity** (*float*): If specified, assume a constant
-   metallicity for the emission from metals.  The *with_metals* keyword
-   must be set to False to use this. It should be given in unit of solar metallicity.
-   Default: None.
-
-The resulting fields can be used like all normal fields. The function will return the names of
-the created fields in a Python list.
-
-.. code-block:: python
-
-  import yt
-  from yt.analysis_modules.spectral_integrator.api import \
-       add_xray_emissivity_field
-
-  xray_fields = add_xray_emissivity_field(ds, 0.5, 7.0, filename="apec_emissivity.h5")
-
-  ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
-  plot = yt.SlicePlot(ds, 'x', 'xray_luminosity_0.5_7.0_keV')
-  plot.save()
-  plot = yt.ProjectionPlot(ds, 'x', 'xray_emissivity_0.5_7.0_keV')
-  plot.save()
-  plot = yt.ProjectionPlot(ds, 'x', 'xray_photon_emissivity_0.5_7.0_keV')
-  plot.save()
-
-.. warning::
-
-  The X-ray fields depend on the number density of hydrogen atoms, in the yt field
-  ``H_number_density``. If this field is not defined (either in the dataset or by the user),
-  the primordial hydrogen mass fraction (X = 0.76) will be used to construct it.

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -64,6 +64,24 @@
 You can use this to easily explore available fields, particularly through
 tab-completion in Jupyter/IPython.
 
+It's also possible to iterate over the list of fields associated with each
+field type. For example, to print all of the ``'gas'`` fields, one might do:
+
+.. code-block:: python
+
+   for field in ds.fields.gas:
+       print(field)
+
+You can also check if a given field is associated with a field type using
+standard python syntax:
+
+.. code-block:: python
+
+   # these examples evaluate to True for a dataset that has ('gas', 'density')
+   'density' in ds.fields.gas
+   ('gas', 'density') in ds.fields.gas
+   ds.fields.gas.density in ds.fields.gas
+
 For a more programmatic method of accessing fields, you can utilize the
 ``ds.field_list``, ``ds.derived_field_list`` and some accessor methods to gain
 information about fields.  The full list of fields available for a dataset can

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -64,6 +64,18 @@
        print("(%f,  %f,  %f)    %f" %
              (sp["x"][i], sp["y"][i], sp["z"][i], sp["temperature"][i]))
 
+Data objects can also be cloned; for instance:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("RedshiftOutput0005")
+   sp = ds.sphere([0.5, 0.5, 0.5], (1, 'kpc'))
+   sp_copy = sp.clone()
+
+This can be useful for when manually chunking data or exploring different field
+parameters.
+
 .. _quickly-selecting-data:
 
 Slicing Syntax for Selecting Data
@@ -618,7 +630,7 @@
 ---------------------------------------
 
 A special type of data object is the *boolean* data object, which works with
-three-dimensional data selection.  It is built by relating already existing
+data selection objects of any dimension.  It is built by relating already existing
 data objects with the bitwise operators for AND, OR and XOR, as well as the
 subtraction operator.  These are created by using the operators ``&`` for an
 intersection ("AND"), ``|`` for a union ("OR"), ``^`` for an exclusive or

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -557,6 +557,100 @@
     "print (temp, type(temp))\n",
     "print (ptemp, type(ptemp))"
    ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Defining New Units"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "yt also provides a way to define your own units. Suppose you wanted to define a new unit for \"miles per hour\", the familiar \"mph\", which is not already in yt. One can do this by calling `yt.define_unit()`:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "yt.define_unit(\"mph\", (1.0, \"mile/hr\"))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Once this unit is defined, it can be used in the same way as any other unit:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "from yt.units import clight\n",
+    "print (clight.to('mph'))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "If you want to define a new unit which is prefixable (like SI units), you can set `prefixable=True` when defining the unit:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "from yt import YTQuantity\n",
+    "yt.define_unit(\"L\", (1000.0, \"cm**3\"), prefixable=True)\n",
+    "print (YTQuantity(1.0, \"mL\").to(\"cm**3\"))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "`yt.define_unit()` defines new units for all yt operations. However, new units can be defined for particular datasets only as well using `ds.define_unit()`, which has the same signature:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ds.define_unit(\"M_star\", (2.0e13, \"Msun\"))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "dd = ds.all_data()\n",
+    "print(dd.quantities.total_mass().to(\"M_star\"))"
+   ]
   }
  ],
  "metadata": {
@@ -580,4 +674,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
\ No newline at end of file
+}

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c doc/source/analyzing/xray_emission_fields.rst
--- /dev/null
+++ b/doc/source/analyzing/xray_emission_fields.rst
@@ -0,0 +1,3 @@
+.. _xray_emission_fields:
+
+.. notebook:: XrayEmissionFields.ipynb

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -98,13 +98,13 @@
 
    ds = load("./A11QR1/s11Qzm1h2_a1.0000.art")
 
-.. _loading_athena_data:
+.. _loading-athena-data:
 
 Athena Data
 -----------
 
-Athena 4.x VTK data is *mostly* supported and cared for by John
-ZuHone. Both uniform grid and SMR datasets are supported.
+Athena 4.x VTK data is supported and cared for by John ZuHone. Both uniform grid
+and SMR datasets are supported.
 
 .. note:
    yt also recognizes Fargo3D data written to VTK files as
@@ -138,10 +138,14 @@
 which will pick up all of the files in the different ``id*`` directories for
 the entire dataset.
 
-yt works in cgs ("Gaussian") units by default, but Athena data is not
-normally stored in these units. If you would like to convert data to
-cgs units, you may supply conversions for length, time, and mass to ``load`` using
-the ``units_override`` functionality:
+The default unit system in yt is cgs ("Gaussian") units, but Athena data is not
+normally stored in these units, so the code unit system is the default unit
+system for Athena data. This means that answers to field queries from data
+objects and plots of data will be expressed in code units. Note that the default
+conversions from these units will still be in terms of cgs units, e.g. 1
+``code_length`` equals 1 cm, and so on. If you would like to provided different
+conversions, you may supply conversions for length, time, and mass to ``load``
+using the ``units_override`` functionality:
 
 .. code-block:: python
 
@@ -153,14 +157,16 @@
 
    ds = yt.load("id0/cluster_merger.0250.vtk", units_override=units_override)
 
-This means that the yt fields, e.g. ``("gas","density")``, ``("gas","x-velocity")``,
-``("gas","magnetic_field_x")``, will be in cgs units, but the Athena fields, e.g.,
-``("athena","density")``, ``("athena","velocity_x")``, ``("athena","cell_centered_B_x")``, will be
-in code units.
+This means that the yt fields, e.g. ``("gas","density")``,
+``("gas","velocity_x")``, ``("gas","magnetic_field_x")``, will be in cgs units
+(or whatever unit system was specified), but the Athena fields, e.g.,
+``("athena","density")``, ``("athena","velocity_x")``,
+``("athena","cell_centered_B_x")``, will be in code units.
 
-Some 3D Athena outputs may have large grids (especially parallel datasets subsequently joined with
-the `join_vtk` script), and may benefit from being subdivided into "virtual grids". For this purpose,
-one can pass in the `nprocs` parameter:
+Some 3D Athena outputs may have large grids (especially parallel datasets
+subsequently joined with the ``join_vtk`` script), and may benefit from being
+subdivided into "virtual grids". For this purpose, one can pass in the
+``nprocs`` parameter:
 
 .. code-block:: python
 
@@ -168,43 +174,100 @@
 
    ds = yt.load("sloshing.0000.vtk", nprocs=8)
 
-which will subdivide each original grid into `nprocs` grids.
+which will subdivide each original grid into ``nprocs`` grids.
 
 .. note::
 
     Virtual grids are only supported (and really only necessary) for 3D data.
 
-Alternative values for the following simulation parameters may be specified using a ``parameters``
-dict, accepting the following keys:
+Alternative values for the following simulation parameters may be specified
+using a ``parameters`` dict, accepting the following keys:
 
 * ``Gamma``: ratio of specific heats, Type: Float
-* ``geometry``: Geometry type, currently accepts ``"cartesian"`` or ``"cylindrical"``
-* ``periodicity``: Is the domain periodic? Type: Tuple of boolean values corresponding to each dimension
+* ``geometry``: Geometry type, currently accepts ``"cartesian"`` or
+  ``"cylindrical"``
+* ``periodicity``: Is the domain periodic? Type: Tuple of boolean values
+  corresponding to each dimension
 
 .. code-block:: python
 
    import yt
 
-   parameters = {"gamma":4./3., "geometry":"cylindrical", "periodicity":(False,False,False)}
+   parameters = {"gamma":4./3., "geometry":"cylindrical",
+                 "periodicity":(False,False,False)}
 
    ds = yt.load("relativistic_jet_0000.vtk", parameters=parameters)
 
 .. rubric:: Caveats
 
-* yt primarily works with primitive variables. If the Athena
-  dataset contains conservative variables, the yt primitive fields will be generated from the
+* yt primarily works with primitive variables. If the Athena dataset contains
+  conservative variables, the yt primitive fields will be generated from the
   conserved variables on disk.
-* Special relativistic datasets may be loaded, but are not fully supported. In particular, the relationships between
-  quantities such as pressure and thermal energy will be incorrect, as it is currently assumed that their relationship
-  is that of an ideal a :math:`\gamma`-law equation of state.
+* Special relativistic datasets may be loaded, but at this time not all of
+  their fields are fully supported. In particular, the relationships between
+  quantities such as pressure and thermal energy will be incorrect, as it is
+  currently assumed that their relationship is that of an ideal a
+  :math:`\gamma`-law equation of state. This will be rectified in a future
+  release.
 * Domains may be visualized assuming periodicity.
 * Particle list data is currently unsupported.
 
 .. note::
 
    The old behavior of supplying unit conversions using a ``parameters``
-   dict supplied to ``load`` for Athena datasets is still supported, but is being deprecated in
-   favor of ``units_override``, which provides the same functionality.
+   dict supplied to ``load`` for Athena datasets is still supported, but is
+   being deprecated in favor of ``units_override``, which provides the same
+   functionality.
+
+.. _loading-athena-pp-data:
+
+Athena++ Data
+-------------
+
+Athena++ HDF5 data is supported and cared for by John ZuHone. Uniform-grid, SMR,
+and AMR datasets in cartesian coordinates are fully supported. Support for
+curvilinear coordinates and logarithmic cell sizes exists, but is preliminary.
+For the latter type of dataset, the data will be loaded in as a semi-structured
+mesh dataset. See :ref:`loading-semi-structured-mesh-data` for more details on
+how this works in yt.
+
+The default unit system in yt is cgs ("Gaussian") units, but Athena++ data is
+not normally stored in these units, so the code unit system is the default unit
+system for Athena++ data. This means that answers to field queries from data
+objects and plots of data will be expressed in code units. Note that the default
+conversions from these units will still be in terms of cgs units, e.g. 1
+``code_length`` equals 1 cm, and so on. If you would like to provided different
+conversions, you may supply conversions for length, time, and mass to ``load``
+using the ``units_override`` functionality:
+
+.. code-block:: python
+
+   import yt
+
+   units_override = {"length_unit":(1.0,"Mpc"),
+                     "time_unit"(1.0,"Myr"),
+                     "mass_unit":(1.0e14,"Msun")}
+
+   ds = yt.load("AM06/AM06.out1.00400.athdf", units_override=units_override)
+
+This means that the yt fields, e.g. ``("gas","density")``,
+``("gas","velocity_x")``, ``("gas","magnetic_field_x")``, will be in cgs units
+(or whatever unit system was specified), but the Athena fields, e.g.,
+``("athena_pp","density")``, ``("athena_pp","vel1")``, ``("athena_pp","Bcc1")``,
+will be in code units.
+
+.. rubric:: Caveats
+
+* yt primarily works with primitive variables. If the Athena++ dataset contains
+  conservative variables, the yt primitive fields will be generated from the
+  conserved variables on disk.
+* Special relativistic datasets may be loaded, but at this time not all of their
+  fields are fully supported. In particular, the relationships between
+  quantities such as pressure and thermal energy will be incorrect, as it is
+  currently assumed that their relationship is that of an ideal
+  :math:`\gamma`-law equation of state. This will be rectified in a future
+  release.
+* Domains may be visualized assuming periodicity.
 
 .. _loading-orion-data:
 
@@ -1182,6 +1245,8 @@
 * Particles may be difficult to integrate.
 * Data must already reside in memory.
 
+.. _loading-semi-structured-mesh-data:
+
 Semi-Structured Grid Data
 -------------------------
 

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -95,22 +95,17 @@
 Running the Install Script
 ^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-To get the installation script for the ``stable`` branch of the code,
-download it using the following command:
+You can download the installation script with the following command:
 
 .. code-block:: bash
 
-  $ wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
+  $ wget http://bitbucket.org/yt_analysis/yt/raw/yt/doc/install_script.sh
 
 If you do not have ``wget``, the following should also work:
 
 .. code-block:: bash
 
-  $ curl -OL http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
-
-If you wish to install a different version of yt (see :ref:`branches-of-yt`),
-replace ``stable`` with the appropriate branch name (e.g. ``yt``, ``yt-2.x``) in
-the path above to get the correct install script.
+  $ curl -OL http://bitbucket.org/yt_analysis/yt/raw/yt/doc/install_script.sh
 
 By default, the bash install script will create a python environment based on
 the `miniconda python distrubtion <http://conda.pydata.org/miniconda.html>`_,

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -155,11 +155,18 @@
 
    yt.data_objects.static_output.Dataset.arr
    yt.data_objects.static_output.Dataset.quan
+   ~yt.units.unit_object.define_unit
    ~yt.units.unit_object.Unit
    ~yt.units.unit_registry.UnitRegistry
    ~yt.units.yt_array.YTArray
    ~yt.units.yt_array.YTQuantity
-
+   ~yt.units.yt_array.uconcatenate
+   ~yt.units.yt_array.uintersect1d
+   ~yt.units.yt_array.uunion1d
+   ~yt.units.yt_array.unorm
+   ~yt.units.yt_array.udot
+   ~yt.units.yt_array.uvstack
+   ~yt.units.yt_array.uhstack
 
 Frontends
 ---------

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c doc/source/reference/command-line.rst
--- a/doc/source/reference/command-line.rst
+++ b/doc/source/reference/command-line.rst
@@ -276,6 +276,34 @@
    $ yt hub start
    $ yt hub start /user/xarthisius/Public
 
+download
+~~~~~~~~
+
+This subcommand downloads a file from http://yt-project.org/data. Using ``yt download``, 
+one can download a file to:
+
+* ``"test_data_dir"``: Save the file to the location specified in 
+  the ``"test_data_dir"`` configuration entry for test data.
+* ``"supp_data_dir"``: Save the file to the location specified in 
+  the ``"supp_data_dir"`` configuration entry for supplemental data.
+* Any valid path to a location on disk, e.g. ``/home/jzuhone/data``.
+
+Examples:
+
+.. code-block:: bash
+
+   $ yt download apec_emissivity_v2.h5 supp_data_dir
+
+.. code-block:: bash
+
+   $ yt download GasSloshing.tar.gz test_data_dir
+
+.. code-block:: bash 
+
+   $ yt download ZeldovichPancake.tar.gz /Users/jzuhone/workspace
+
+If the configuration values ``"test_data_dir"`` or ``"supp_data_dir"`` have not
+been set by the user, an error will be thrown. 
 
 Config helper
 ~~~~~~~~~~~~~

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -109,6 +109,8 @@
   to stdout rather than stderr
 * ``skip_dataset_cache`` (default: ``'False'``): If true, automatic caching of datasets
   is turned off.
+* ``supp_data_dir`` (default: ``'/does/not/exist'``): The default path certain
+  submodules of yt look in for supplemental data files.
 
 .. _plugin-file:
 

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c doc/source/visualizing/FITSImageData.ipynb
--- a/doc/source/visualizing/FITSImageData.ipynb
+++ b/doc/source/visualizing/FITSImageData.ipynb
@@ -15,8 +15,7 @@
    },
    "outputs": [],
    "source": [
-    "import yt\n",
-    "from yt.utilities.fits_image import FITSImageData, FITSProjection"
+    "import yt"
    ]
   },
   {
@@ -73,7 +72,7 @@
    },
    "outputs": [],
    "source": [
-    "prj_fits = FITSProjection(ds, \"z\", [\"temperature\"], weight_field=\"density\")"
+    "prj_fits = yt.FITSProjection(ds, \"z\", [\"temperature\"], weight_field=\"density\")"
    ]
   },
   {
@@ -236,7 +235,7 @@
    "source": [
     "slc3 = ds.slice(0, 0.0)\n",
     "frb = slc3.to_frb((500.,\"kpc\"), 800)\n",
-    "fid_frb = FITSImageData(frb, fields=[\"density\",\"temperature\"], units=\"pc\")"
+    "fid_frb = yt.FITSImageData(frb, fields=[\"density\",\"temperature\"], units=\"pc\")"
    ]
   },
   {
@@ -255,7 +254,7 @@
    "outputs": [],
    "source": [
     "cvg = ds.covering_grid(ds.index.max_level, [-0.5,-0.5,-0.5], [64, 64, 64], fields=[\"density\",\"temperature\"])\n",
-    "fid_cvg = FITSImageData(cvg, fields=[\"density\",\"temperature\"], units=\"Mpc\")"
+    "fid_cvg = yt.FITSImageData(cvg, fields=[\"density\",\"temperature\"], units=\"Mpc\")"
    ]
   },
   {
@@ -280,7 +279,7 @@
    },
    "outputs": [],
    "source": [
-    "fid = FITSImageData.from_file(\"sloshing.fits\")\n",
+    "fid = yt.FITSImageData.from_file(\"sloshing.fits\")\n",
     "fid.info()"
    ]
   },
@@ -299,8 +298,8 @@
    },
    "outputs": [],
    "source": [
-    "prj_fits2 = FITSProjection(ds, \"z\", [\"density\"])\n",
-    "prj_fits3 = FITSImageData.from_images([prj_fits, prj_fits2])\n",
+    "prj_fits2 = yt.FITSProjection(ds, \"z\", [\"density\"])\n",
+    "prj_fits3 = yt.FITSImageData.from_images([prj_fits, prj_fits2])\n",
     "prj_fits3.info()"
    ]
   },

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c doc/source/visualizing/callbacks.rst
--- a/doc/source/visualizing/callbacks.rst
+++ b/doc/source/visualizing/callbacks.rst
@@ -304,8 +304,10 @@
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 
 .. function:: annotate_halos(self, halo_catalog, circle_args=None, \
-                             width=None, annotate_field=None, text_args=None, \
-                             factor=1.0)
+                             width=None, annotate_field=None, \
+                             radius_field='virial_radius', \
+                             center_field_prefix="particle_position", \
+                             text_args=None, factor=1.0)
 
    (This is a proxy for
    :class:`~yt.visualization.plot_modifications.HaloCatalogCallback`.)
@@ -321,11 +323,22 @@
    with the annotate_field, which accepts a field contained in the halo catalog
    to add text to the plot near the halo (example: ``annotate_field=
    'particle_mass'`` will write the halo mass next to each halo, whereas
-   ``'particle_identifier'`` shows the halo number).  font_kwargs contains the
-   arguments controlling the text appearance of the annotated field.
-   Factor is the number the virial radius is multiplied by for plotting the
-   circles. Ex: ``factor=2.0`` will plot circles with twice the radius of each
-   halo virial radius.
+   ``'particle_identifier'`` shows the halo number). The size of the circles is
+   found from the field ``radius_field`` which is ``'virial_radius'`` by
+   default. If another radius has been found as part of your halo analysis
+   workflow, you can save that field and use it as the ``radius_field`` to
+   change the size of the halos. The position of each halo is determined using
+   ``center_field_prefix`` in the following way. If ``'particle_position'``
+   is the value of ``center_field_prefix`` as is the default, the x value of
+   the halo position is stored in the field ``'particle_position_x'``, y is
+   ``'particle_position_y'``, and z is ``'particle_position_z'``. If you have
+   stored another set of coordinates for each halo as part of your halo
+   analysis as fields such as ``'halo_position_x'``, you can use these fields
+   to determine halo position by passing ``'halo_position'`` to
+   ``center_field_prefix``. font_kwargs contains the arguments controlling the
+   text appearance of the annotated field. Factor is the number the virial
+   radius is multiplied by for plotting the circles. Ex: ``factor=2.0`` will
+   plot circles with twice the radius of each halo virial radius.
 
 .. python-script::
 

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c doc/source/visualizing/colormaps/index.rst
--- a/doc/source/visualizing/colormaps/index.rst
+++ b/doc/source/visualizing/colormaps/index.rst
@@ -50,6 +50,25 @@
 colorblind/printer/grayscale-friendly plots. For more information, visit
 `http://colorbrewer2.org <http://colorbrewer2.org>`_.
 
+.. _cmocean-cmaps:
+
+Colormaps from cmocean
+~~~~~~~~~~~~~~~~~~~~~~
+
+In addition to ``palettable``, yt will also import colormaps defined in the
+`cmocean <http://matplotlib.org/cmocean>`_ package. These colormaps are
+`perceptually uniform <http://bids.github.io/colormap/>`_ and were originally
+designed for oceanography applications, but can be used for any kind of plots.
+
+Since ``cmocean`` is not installed as a dependency of yt by default, it must be
+installed separately to access the ``cmocean`` colormaps with yt. The easiest
+way to install ``cmocean`` is via ``pip``: ``pip install cmocean``.  To access
+the colormaps in yt, simply specify the name of the ``cmocean`` colormap in any
+context where you would specify a colormap. One caveat is the ``cmocean``
+colormap ``algae``. Since yt already defines a colormap named ``algae``, the
+``cmocean`` version of ``algae`` must be specified with the name
+``algae_cmocean``.
+
 .. _custom-colormaps:
 
 Making and Viewing Custom Colormaps

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -670,6 +670,21 @@
    slc.set_log('x-velocity', True, linthresh=1.e1)
    slc.save()
 
+The :meth:`~yt.visualization.plot_container.ImagePlotContainer.set_background_color`
+function accepts a field name and a color (optional). If color is given, the function
+will set the plot's background color to that. If not, it will set it to the bottom
+value of the color map.
+
+.. python-script::
+
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = yt.SlicePlot(ds, 'z', 'density', width=(1.5, 'Mpc'))
+   slc.set_background_color('density')
+   slc.save('bottom_colormap_background')
+   slc.set_background_color('density', color='black')
+   slc.save('black_background')
+
 Lastly, the :meth:`~yt.visualization.plot_window.AxisAlignedSlicePlot.set_zlim`
 function makes it possible to set a custom colormap range.
 

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -13,6 +13,6 @@
 #      unused import errors
 #      autogenerated __config__.py files
 #      vendored libraries
-exclude = doc,benchmarks,*/api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/lru_cache.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
+exclude = doc,benchmarks,*/api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/lru_cache.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py,yt/utilities/fits_image.py
 max-line-length=999
 ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E402,E502,E701,E703,E731,W291,W292,W293,W391,W503
\ No newline at end of file

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c setup.py
--- a/setup.py
+++ b/setup.py
@@ -180,7 +180,7 @@
     "particle_mesh_operations", "depth_first_octree", "fortran_reader",
     "interpolators", "misc_utilities", "basic_octree", "image_utilities",
     "points_in_volume", "quad_tree", "ray_integrators", "mesh_utilities",
-    "amr_kdtools", "lenses", "distance_queue"
+    "amr_kdtools", "lenses", "distance_queue", "allocation_container"
 ]
 for ext_name in lib_exts:
     cython_extensions.append(
@@ -298,6 +298,15 @@
                 fobj.write("hg_version = '%s'\n" % changeset)
         _build_py.run(self)
 
+    def get_outputs(self):
+        # http://bitbucket.org/yt_analysis/yt/issues/1296
+        outputs = _build_py.get_outputs(self)
+        outputs.append(
+            os.path.join(self.build_lib, 'yt', '__hg_version__.py')
+        )
+        return outputs
+
+
 class build_ext(_build_ext):
     # subclass setuptools extension builder to avoid importing cython and numpy
     # at top level in setup.py. See http://stackoverflow.com/a/21621689/1382869
@@ -308,7 +317,12 @@
         _build_ext.finalize_options(self)
         # Prevent numpy from thinking it is still in its setup process
         # see http://stackoverflow.com/a/21621493/1382869
-        __builtins__.__NUMPY_SETUP__ = False
+        if isinstance(__builtins__, dict):
+            # sometimes this is a dict so we need to check for that
+            # https://docs.python.org/3/library/builtins.html
+            __builtins__["__NUMPY_SETUP__"] = False
+        else:
+            __builtins__.__NUMPY_SETUP__ = False
         import numpy
         self.include_dirs.append(numpy.get_include())
 
@@ -325,9 +339,7 @@
 setup(
     name="yt",
     version=VERSION,
-    description="An analysis and visualization toolkit for Astrophysical "
-                + "simulations, focusing on Adaptive Mesh Refinement data "
-                  "from Enzo, Orion, FLASH, and others.",
+    description="An analysis and visualization toolkit for volumetric data",
     classifiers=["Development Status :: 5 - Production/Stable",
                  "Environment :: Console",
                  "Intended Audience :: Science/Research",

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c setupext.py
--- a/setupext.py
+++ b/setupext.py
@@ -45,7 +45,7 @@
         
         if exit_code != 0:
             print("Compilation of OpenMP test code failed with the error: ")
-            print(err)
+            print(err.decode('utf8'))
             print("Disabling OpenMP support. ")
 
         # Clean up

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c tests/nose_runner.py
--- a/tests/nose_runner.py
+++ b/tests/nose_runner.py
@@ -6,6 +6,8 @@
 from yt.extern.six import StringIO
 from yt.config import ytcfg
 from yt.utilities.answer_testing.framework import AnswerTesting
+import numpy
+numpy.set_printoptions(threshold=5, edgeitems=1, precision=4)
 
 class NoseWorker(multiprocessing.Process):
 
@@ -67,7 +69,7 @@
                       if DROP_TAG not in line])
     tests = yaml.load(data)
 
-    base_argv = ['--local-dir=%s' % answers_dir, '-v',
+    base_argv = ['--local-dir=%s' % answers_dir,
                  '--with-answer-testing', '--answer-big-data', '--local']
     args = []
 

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -1,45 +1,48 @@
 answer_tests:
-  local_artio_000:
+  local_artio_001:
     - yt/frontends/artio/tests/test_outputs.py
 
-  local_athena_001:
+  local_athena_003:
     - yt/frontends/athena
 
-  local_chombo_001:
+  local_athena_pp_000:
+    - yt/frontends/athena_pp
+
+  local_chombo_002:
     - yt/frontends/chombo/tests/test_outputs.py
 
-  local_enzo_002:
+  local_enzo_003:
     - yt/frontends/enzo
 
-  local_fits_000:
+  local_fits_001:
     - yt/frontends/fits/tests/test_outputs.py
 
-  local_flash_003:
+  local_flash_004:
     - yt/frontends/flash/tests/test_outputs.py
 
-  local_gadget_000:
+  local_gadget_001:
     - yt/frontends/gadget/tests/test_outputs.py
 
-  local_gamer_001:
+  local_gamer_002:
     - yt/frontends/gamer/tests/test_outputs.py
 
-  local_gdf_000:
+  local_gdf_001:
     - yt/frontends/gdf/tests/test_outputs.py
 
-  local_gizmo_001:
+  local_gizmo_002:
     - yt/frontends/gizmo/tests/test_outputs.py
 
-  local_halos_000:
+  local_halos_001:
     - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py  # [py2]
     - yt/analysis_modules/halo_finding/tests/test_rockstar.py  # [py2]
     - yt/frontends/owls_subfind/tests/test_outputs.py
     - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g5
     - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g42
 
-  local_owls_000:
+  local_owls_001:
     - yt/frontends/owls/tests/test_outputs.py
 
-  local_pw_011:
+  local_pw_012:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
     - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
@@ -47,10 +50,10 @@
     - yt/visualization/tests/test_particle_plot.py:test_particle_projection_filter
     - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers
 
-  local_tipsy_001:
+  local_tipsy_002:
     - yt/frontends/tipsy/tests/test_outputs.py
 
-  local_varia_006:
+  local_varia_008:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py
@@ -58,14 +61,15 @@
     - yt/visualization/volume_rendering/tests/test_vr_orientation.py
     - yt/visualization/volume_rendering/tests/test_mesh_render.py
     - yt/visualization/tests/test_mesh_slices.py:test_tri2
+    - yt/fields/tests/test_xray_fields.py
 
-  local_orion_000:
+  local_orion_001:
     - yt/frontends/boxlib/tests/test_orion.py
 
-  local_ramses_000:
+  local_ramses_001:
     - yt/frontends/ramses/tests/test_outputs.py
 
-  local_ytdata_001:
+  local_ytdata_002:
     - yt/frontends/ytdata
 
   local_absorption_spectrum_005:
@@ -84,8 +88,6 @@
 
 other_tests:
   unittests:
-     - '-v'
      - '--exclude=test_mesh_slices'  # disable randomly failing test
   cookbook:
-     - '-v'
      - 'doc/source/cookbook/tests/test_cookbook.py'

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -99,12 +99,18 @@
 
 import yt.utilities.physical_constants as physical_constants
 import yt.units as units
+from yt.units.unit_object import define_unit
 from yt.units.yt_array import \
     YTArray, \
     YTQuantity, \
     uconcatenate, \
+    ucross, \
     uintersect1d, \
     uunion1d, \
+    unorm, \
+    udot, \
+    uvstack, \
+    uhstack, \
     loadtxt, \
     savetxt
 
@@ -119,7 +125,8 @@
     ValidateSpatial, \
     ValidateGridType, \
     add_field, \
-    derived_field
+    derived_field, \
+    add_xray_emissivity_field
 
 from yt.data_objects.api import \
     DatasetSeries, ImageArray, \
@@ -156,7 +163,9 @@
     ProjectionPlot, OffAxisProjectionPlot, \
     show_colormaps, add_cmap, make_colormap, \
     ProfilePlot, PhasePlot, ParticlePhasePlot, \
-    ParticleProjectionPlot, ParticleImageBuffer, ParticlePlot
+    ParticleProjectionPlot, ParticleImageBuffer, ParticlePlot, \
+    FITSImageData, FITSSlice, FITSProjection, FITSOffAxisSlice, \
+    FITSOffAxisProjection
 
 from yt.visualization.volume_rendering.api import \
     volume_render, create_scene, ColorTransferFunction, TransferFunction, \

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -495,8 +495,8 @@
                 window_width_in_bins = 2
 
                 while True:
-                    left_index = (center_index[i] - window_width_in_bins/2)
-                    right_index = (center_index[i] + window_width_in_bins/2)
+                    left_index = (center_index[i] - window_width_in_bins//2)
+                    right_index = (center_index[i] + window_width_in_bins//2)
                     n_vbins = (right_index - left_index) * n_vbins_per_bin[i]
 
                     # the array of virtual bins in lambda space

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -655,24 +655,42 @@
 
         Write light ray data to hdf5 file.
         """
+
+        extra_attrs = {"data_type": "yt_light_ray"}
         if self.simulation_type is None:
             ds = self.ds
         else:
             ds = {}
-            ds["dimensionality"] = self.simulation.dimensionality
-            ds["domain_left_edge"] = self.simulation.domain_left_edge
-            ds["domain_right_edge"] = self.simulation.domain_right_edge
-            ds["cosmological_simulation"] = self.simulation.cosmological_simulation
             ds["periodicity"] = (True, True, True)
             ds["current_redshift"] = self.near_redshift
-            for attr in ["omega_lambda", "omega_matter", "hubble_constant"]:
-                ds[attr] = getattr(self.cosmology, attr)
+            for attr in ["dimensionality", "cosmological_simulation",
+                         "domain_left_edge", "domain_right_edge",
+                         "length_unit", "time_unit"]:
+                ds[attr] = getattr(self.simulation, attr)
+            if self.simulation.cosmological_simulation:
+                for attr in ["omega_lambda", "omega_matter",
+                             "hubble_constant"]:
+                    ds[attr] = getattr(self.cosmology, attr)
             ds["current_time"] = \
               self.cosmology.t_from_z(ds["current_redshift"])
             if isinstance(ds["hubble_constant"], YTArray):
                 ds["hubble_constant"] = \
                   ds["hubble_constant"].to("100*km/(Mpc*s)").d
-        extra_attrs = {"data_type": "yt_light_ray"}
+            extra_attrs["unit_registry_json"] = \
+              self.simulation.unit_registry.to_json()
+
+        # save the light ray solution
+        if len(self.light_ray_solution) > 0:
+            for key in self.light_ray_solution[0]:
+                if key in ["next", "previous", "index"]:
+                    continue
+                lrsa = [sol[key] for sol in self.light_ray_solution]
+                if isinstance(lrsa[-1], YTArray):
+                    to_arr = YTArray
+                else:
+                    to_arr = np.array
+                extra_attrs["light_ray_solution_%s" % key] = to_arr(lrsa)
+
         field_types = dict([(field, "grid") for field in data.keys()])
 
         # Only return LightRay elements with non-zero density

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
@@ -12,7 +12,10 @@
 
 import numpy as np
 
+from yt.convenience import \
+    load
 from yt.testing import \
+    assert_array_equal, \
     requires_file
 from yt.analysis_modules.cosmological_observation.api import LightRay
 import os
@@ -23,6 +26,19 @@
 COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo"
 COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
 
+def compare_light_ray_solutions(lr1, lr2):
+    assert len(lr1.light_ray_solution) == len(lr2.light_ray_solution)
+    if len(lr1.light_ray_solution) == 0:
+        return
+    for s1, s2 in zip(lr1.light_ray_solution, lr2.light_ray_solution):
+        for field in s1:
+            if field in ["next", "previous"]:
+                continue
+            if isinstance(s1[field], np.ndarray):
+                assert_array_equal(s1[field], s2[field])
+            else:
+                assert s1[field] == s2[field]
+
 @requires_file(COSMO_PLUS)
 def test_light_ray_cosmo():
     """
@@ -39,6 +55,9 @@
                       fields=['temperature', 'density', 'H_number_density'],
                       data_filename='lightray.h5')
 
+    ds = load('lightray.h5')
+    compare_light_ray_solutions(lr, ds)
+
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
@@ -62,6 +81,9 @@
                       fields=['temperature', 'density', 'H_number_density'],
                       data_filename='lightray.h5')
 
+    ds = load('lightray.h5')
+    compare_light_ray_solutions(lr, ds)
+
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
@@ -82,6 +104,9 @@
                       fields=['temperature', 'density', 'H_number_density'],
                       data_filename='lightray.h5')
 
+    ds = load('lightray.h5')
+    compare_light_ray_solutions(lr, ds)
+
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
@@ -105,6 +130,9 @@
                       fields=['temperature', 'density', 'H_number_density'],
                       data_filename='lightray.h5')
 
+    ds = load('lightray.h5')
+    compare_light_ray_solutions(lr, ds)
+
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
@@ -130,6 +158,9 @@
                       fields=['temperature', 'density', 'H_number_density'],
                       data_filename='lightray.h5')
 
+    ds = load('lightray.h5')
+    compare_light_ray_solutions(lr, ds)
+
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c yt/analysis_modules/halo_analysis/halo_finding_methods.py
--- a/yt/analysis_modules/halo_analysis/halo_finding_methods.py
+++ b/yt/analysis_modules/halo_analysis/halo_finding_methods.py
@@ -75,8 +75,13 @@
     
     rh = RockstarHaloFinder(ds, **finder_kwargs)
     rh.run()
+    
+    if 'outbase' in finder_kwargs:
+        outbase = finder_kwargs['outbase']
+    else:
+        outbase = "rockstar_halos"
 
-    halos_ds = RockstarDataset("rockstar_halos/halos_0.0.bin")
+    halos_ds = RockstarDataset(outbase + "/halos_0.0.bin")
     try:
         halos_ds.create_field_info()
     except ValueError:

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c yt/analysis_modules/photon_simulator/api.py
--- a/yt/analysis_modules/photon_simulator/api.py
+++ b/yt/analysis_modules/photon_simulator/api.py
@@ -10,12 +10,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from numpy import VisibleDeprecationWarning
+from yt.funcs import issue_deprecation_warning
 
-import warnings
-warnings.warn("The photon_simulator module is deprecated. "
-              "Please use pyXSIM (http://hea-www.cfa.harvard.edu/~jzuhone/pyxsim) instead.",
-              VisibleDeprecationWarning, stacklevel=2)
+issue_deprecation_warning("The photon_simulator module is deprecated. Please use pyXSIM "
+                          "(http://hea-www.cfa.harvard.edu/~jzuhone/pyxsim) instead.")
 
 from .photon_models import \
      PhotonModel, \

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -31,7 +31,7 @@
 from yt.utilities.physical_constants import clight
 from yt.utilities.cosmology import Cosmology
 from yt.utilities.orientation import Orientation
-from yt.utilities.fits_image import assert_same_wcs
+from yt.visualization.fits_image import assert_same_wcs
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     communication_system, parallel_root_only, get_mpi_type, \
     parallel_capable

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -13,7 +13,7 @@
 import numpy as np
 from yt.utilities.on_demand_imports import _astropy
 from yt.utilities.orientation import Orientation
-from yt.utilities.fits_image import FITSImageData, sanitize_fits_unit
+from yt.visualization.fits_image import FITSImageData, sanitize_fits_unit
 from yt.visualization.volume_rendering.off_axis_projection import off_axis_projection
 from yt.funcs import get_pbar
 from yt.utilities.physical_constants import clight, mh

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c yt/analysis_modules/spectral_integrator/api.py
--- a/yt/analysis_modules/spectral_integrator/api.py
+++ b/yt/analysis_modules/spectral_integrator/api.py
@@ -1,18 +1,8 @@
-"""
-API for spectral_integrator
-
-
-
-"""
+from yt.funcs import issue_deprecation_warning
 
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
+issue_deprecation_warning("The spectral_integrator module is deprecated. "
+                          "'add_xray_emissivity_field' can now be imported "
+                          "from the yt module.")
 
-from .spectral_frequency_integrator import \
-    EmissivityIntegrator, \
+from yt.fields.xray_emission_fields import \
     add_xray_emissivity_field
\ No newline at end of file

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ /dev/null
@@ -1,279 +0,0 @@
-"""
-Integrator classes to deal with interpolation and integration of input spectral
-bins.  Currently only supports Cloudy and APEC-style data.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.utilities.on_demand_imports import _h5py as h5py
-import numpy as np
-import os
-
-from yt.funcs import \
-     download_file, \
-     mylog, \
-     only_on_root
-
-from yt.utilities.exceptions import YTFieldNotFound
-from yt.utilities.exceptions import YTException
-from yt.utilities.linear_interpolators import \
-    UnilinearFieldInterpolator, BilinearFieldInterpolator
-from yt.utilities.physical_constants import \
-    hcgs, mp
-from yt.units.yt_array import YTArray, YTQuantity
-from yt.utilities.physical_ratios import \
-    primordial_H_mass_fraction, erg_per_keV
-
-xray_data_version = 1
-
-def _get_data_file(data_file=None):
-    if data_file is None:
-        data_file = "cloudy_emissivity.h5"
-    data_url = "http://yt-project.org/data"
-    if "YT_DEST" in os.environ and \
-      os.path.isdir(os.path.join(os.environ["YT_DEST"], "data")):
-        data_dir = os.path.join(os.environ["YT_DEST"], "data")
-    else:
-        data_dir = "."
-    data_path = os.path.join(data_dir, data_file)
-    if not os.path.exists(data_path):
-        mylog.info("Attempting to download supplementary data from %s to %s." % 
-                   (data_url, data_dir))
-        fn = download_file(os.path.join(data_url, data_file), data_path)
-        if fn != data_path:
-            raise RuntimeError("Failed to download supplementary data.")
-    return data_path
-
-class EnergyBoundsException(YTException):
-    def __init__(self, lower, upper):
-        self.lower = lower
-        self.upper = upper
-
-    def __str__(self):
-        return "Energy bounds are %e to %e keV." % \
-          (self.lower, self.upper)
-
-class ObsoleteDataException(YTException):
-    def __str__(self):
-        return "X-ray emissivity data is out of date.\n" + \
-               "Download the latest data from http://yt-project.org/data/cloudy_emissivity.h5 and move it to %s." % \
-          os.path.join(os.environ["YT_DEST"], "data", "cloudy_emissivity.h5")
-          
-class EmissivityIntegrator(object):
-    r"""Class for making X-ray emissivity fields with hdf5 data tables 
-    from Cloudy.
-    
-    Initialize an EmissivityIntegrator object.
-
-    Parameters
-    ----------
-    filename: string, default None
-        Path to data file containing emissivity values.  If None,
-        a file called "cloudy_emissivity.h5" is used, for photoionized
-        plasmas. A second option, for collisionally ionized plasmas, is
-        in the file "apec_emissivity.h5", available at http://yt-project.org/data.
-        These files contain emissivity tables for primordial elements and
-        for metals at solar metallicity for the energy range 0.1 to 100 keV.
-        Default: None.
-        
-    """
-    def __init__(self, filename=None):
-
-        default_filename = False
-        if filename is None:
-            filename = _get_data_file()
-            default_filename = True
-
-        if not os.path.exists(filename):
-            mylog.warning("File %s does not exist, will attempt to find it." % filename)
-            filename = _get_data_file(data_file=filename)
-        only_on_root(mylog.info, "Loading emissivity data from %s." % filename)
-        in_file = h5py.File(filename, "r")
-        if "info" in in_file.attrs:
-            only_on_root(mylog.info, in_file.attrs["info"])
-        if default_filename and \
-          in_file.attrs["version"] < xray_data_version:
-            raise ObsoleteDataException()
-        else:
-            only_on_root(mylog.info, "X-ray emissivity data version: %s." % \
-                         in_file.attrs["version"])
-
-        for field in ["emissivity_primordial", "emissivity_metals",
-                      "log_nH", "log_T", "log_E"]:
-            if field in in_file:
-                setattr(self, field, in_file[field][:])
-        in_file.close()
-
-        E_diff = np.diff(self.log_E)
-        self.E_bins = \
-                  YTArray(np.power(10, np.concatenate([self.log_E[:-1] - 0.5 * E_diff,
-                                                      [self.log_E[-1] - 0.5 * E_diff[-1],
-                                                       self.log_E[-1] + 0.5 * E_diff[-1]]])),
-                          "keV")
-        self.dnu = (np.diff(self.E_bins)/hcgs).in_units("Hz")
-
-    def get_interpolator(self, data, e_min, e_max):
-        e_min = YTQuantity(e_min, "keV")
-        e_max = YTQuantity(e_max, "keV")
-        if (e_min - self.E_bins[0]) / e_min < -1e-3 or \
-          (e_max - self.E_bins[-1]) / e_max > 1e-3:
-            raise EnergyBoundsException(self.E_bins[0], self.E_bins[-1])
-        e_is, e_ie = np.digitize([e_min, e_max], self.E_bins)
-        e_is = np.clip(e_is - 1, 0, self.E_bins.size - 1)
-        e_ie = np.clip(e_ie, 0, self.E_bins.size - 1)
-
-        my_dnu = self.dnu[e_is: e_ie].copy()
-        # clip edge bins if the requested range is smaller
-        my_dnu[0] -= ((e_min - self.E_bins[e_is])/hcgs).in_units("Hz")
-        my_dnu[-1] -= ((self.E_bins[e_ie] - e_max)/hcgs).in_units("Hz")
-
-        interp_data = (data[..., e_is:e_ie] * my_dnu).sum(axis=-1)
-        if len(data.shape) == 2:
-            emiss = UnilinearFieldInterpolator(np.log10(interp_data),
-                                               [self.log_T[0],  self.log_T[-1]],
-                                               "log_T", truncate=True)
-        else:
-            emiss = BilinearFieldInterpolator(np.log10(interp_data),
-                                              [self.log_nH[0], self.log_nH[-1],
-                                               self.log_T[0],  self.log_T[-1]],
-                                              ["log_nH", "log_T"], truncate=True)
-
-        return emiss
-
-def add_xray_emissivity_field(ds, e_min, e_max,
-                              filename=None,
-                              with_metals=True,
-                              constant_metallicity=None):
-    r"""Create X-ray emissivity fields for a given energy range.
-
-    Parameters
-    ----------
-    e_min: float
-        the minimum energy in keV for the energy band.
-    e_min: float
-        the maximum energy in keV for the energy band.
-    filename: string, optional
-        Path to data file containing emissivity values.  If None,
-        a file called "cloudy_emissivity.h5" is used, for photoionized
-        plasmas. A second option, for collisionally ionized plasmas, is
-        in the file "apec_emissivity.h5", available at http://yt-project.org/data.
-        These files contain emissivity tables for primordial elements and
-        for metals at solar metallicity for the energy range 0.1 to 100 keV.
-        Default: None.
-    with_metals: bool, optional
-        If True, use the metallicity field to add the contribution from 
-        metals.  If False, only the emission from H/He is considered.
-        Default: True.
-    constant_metallicity: float, optional
-        If specified, assume a constant metallicity for the emission 
-        from metals.  The *with_metals* keyword must be set to False 
-        to use this. It should be given in unit of solar metallicity.
-        Default: None.
-
-    This will create three fields:
-
-    "xray_emissivity_{e_min}_{e_max}_keV" (erg s^-1 cm^-3)
-    "xray_luminosity_{e_min}_{e_max}_keV" (erg s^-1)
-    "xray_photon_emissivity_{e_min}_{e_max}_keV" (photons s^-1 cm^-3)
-
-    Examples
-    --------
-
-    >>> from yt.mods import *
-    >>> from yt.analysis_modules.spectral_integrator.api import *
-    >>> ds = load(dataset)
-    >>> add_xray_emissivity_field(ds, 0.5, 2)
-    >>> p = ProjectionPlot(ds, 'x', "xray_emissivity_0.5_2_keV")
-    >>> p.save()
-
-    """
-
-    if with_metals:
-        try:
-            ds._get_field_info("metal_density")
-        except YTFieldNotFound:
-            raise RuntimeError("Your dataset does not have a \"metal_density\" field! " +
-                               "Perhaps you should specify a constant metallicity?")
-
-    my_si = EmissivityIntegrator(filename=filename)
-
-    em_0 = my_si.get_interpolator(my_si.emissivity_primordial, e_min, e_max)
-    em_Z = None
-    if with_metals or constant_metallicity is not None:
-        em_Z = my_si.get_interpolator(my_si.emissivity_metals, e_min, e_max)
-
-    energy_erg = np.power(10, my_si.log_E) * erg_per_keV
-    emp_0 = my_si.get_interpolator((my_si.emissivity_primordial[..., :] / energy_erg),
-                                   e_min, e_max)
-    emp_Z = None
-    if with_metals or constant_metallicity is not None:
-        emp_Z = my_si.get_interpolator((my_si.emissivity_metals[..., :] / energy_erg),
-                                       e_min, e_max)
-
-    try:
-        ds._get_field_info("H_number_density")
-    except YTFieldNotFound:
-        mylog.warning("Could not find a field for \"H_number_density\". Assuming primordial H " +
-                      "mass fraction.")
-        def _nh(field, data):
-            return primordial_H_mass_fraction*data["gas","density"]/mp
-        ds.add_field(("gas", "H_number_density"), function=_nh, units="cm**-3")
-
-    def _emissivity_field(field, data):
-        dd = {"log_nH" : np.log10(data["gas","H_number_density"]),
-              "log_T"   : np.log10(data["gas","temperature"])}
-
-        my_emissivity = np.power(10, em_0(dd))
-        if em_Z is not None:
-            if with_metals:
-                my_Z = data["gas","metallicity"]
-            elif constant_metallicity is not None:
-                my_Z = constant_metallicity
-            my_emissivity += my_Z * np.power(10, em_Z(dd))
-
-        return data["gas","H_number_density"]**2 * \
-            YTArray(my_emissivity, "erg*cm**3/s")
-
-    emiss_name = "xray_emissivity_%s_%s_keV" % (e_min, e_max)
-    ds.add_field(("gas", emiss_name), function=_emissivity_field,
-                 display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max),
-                 units="erg/cm**3/s")
-
-    def _luminosity_field(field, data):
-        return data[emiss_name] * data["cell_volume"]
-
-    lum_name = "xray_luminosity_%s_%s_keV" % (e_min, e_max)
-    ds.add_field(("gas", lum_name), function=_luminosity_field,
-                 display_name=r"\rm{L}_{X} (%s-%s keV)" % (e_min, e_max),
-                 units="erg/s")
-
-    def _photon_emissivity_field(field, data):
-        dd = {"log_nH" : np.log10(data["gas","H_number_density"]),
-              "log_T"   : np.log10(data["gas","temperature"])}
-
-        my_emissivity = np.power(10, emp_0(dd))
-        if emp_Z is not None:
-            if with_metals:
-                my_Z = data["gas","metallicity"]
-            elif constant_metallicity is not None:
-                my_Z = constant_metallicity
-            my_emissivity += my_Z * np.power(10, emp_Z(dd))
-
-        return data["gas","H_number_density"]**2 * \
-            YTArray(my_emissivity, "photons*cm**3/s")
-
-    phot_name = "xray_photon_emissivity_%s_%s_keV" % (e_min, e_max)
-    ds.add_field(("gas", phot_name), function=_photon_emissivity_field,
-                 display_name=r"\epsilon_{X} (%s-%s keV)" % (e_min, e_max),
-                 units="photons/cm**3/s")
-
-    return emiss_name, lum_name, phot_name

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -375,7 +375,7 @@
         >>> sky_center = (30., 45., "deg")
         >>> szprj.write_fits("SZbullet.fits", sky_center=sky_center, sky_scale=sky_scale)
         """
-        from yt.utilities.fits_image import FITSImageData
+        from yt.visualization.fits_image import FITSImageData
 
         dx = self.dx.in_units("kpc")
         dy = dx

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -67,6 +67,7 @@
     ignore_invalid_unit_operation_errors = 'False',
     chunk_size = '1000',
     xray_data_dir = '/does/not/exist',
+    supp_data_dir = '/does/not/exist',
     default_colormap = 'arbre',
     ray_tracing_engine = 'embree',
     )

diff -r 6204277adfc8d170d35166d85775c2715bd22055 -r d548f42dcc231e6228c6e466aee4697f13f85a1c yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -56,6 +56,7 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects, parallel_root_only, communication_system
 from yt.units.unit_object import Unit
+from yt.units.yt_array import uconcatenate
 import yt.geometry.particle_deposit as particle_deposit
 from yt.utilities.grid_data_format.writer import write_to_gdf
 from yt.fields.field_exceptions import \
@@ -411,9 +412,11 @@
                 path_length_unit = self.ds.field_info[path_element_name].units
                 path_length_unit = Unit(path_length_unit,
                                         registry=self.ds.unit_registry)
-                # Only convert to CGS for path elements that aren't angles
+                # Only convert to appropriate unit system for path
+                # elements that aren't angles
                 if not path_length_unit.is_dimensionless:
-                    path_length_unit = path_length_unit.get_cgs_equivalent()
+                    path_length_unit = path_length_unit.get_base_equivalent(
+                        unit_system=self.ds.unit_system)
             if self.weight_field is None:
                 self._projected_units[field] = field_unit*path_length_unit
             else:
@@ -663,13 +666,17 @@
                          for field in fields]
         domain_dims = self.ds.domain_dimensions.astype("int64") \
                     * self.ds.relative_refinement(0, self.level)
+        refine_by = self.ds.refine_by
+        if not iterable(self.ds.refine_by):
+            refine_by = [refine_by, refine_by, refine_by]
+        refine_by = np.array(refine_by, dtype="i8")
         for chunk in self._data_source.chunks(fields, "io"):
             input_fields = [chunk[field] for field in fields]
             # NOTE: This usage of "refine_by" is actually *okay*, because it's
             # being used with respect to iref, which is *already* scaled!
             fill_region(input_fields, output_fields, self.level,
                         self.global_startindex, chunk.icoords, chunk.ires,
-                        domain_dims, self.ds.refine_by)
+                        domain_dims, refine_by)
         for name, v in zip(fields, output_fields):
             fi = self.ds._get_field_info(*name)
             self[name] = self.ds.arr(v, fi.units)
@@ -937,6 +944,12 @@
         if len(fields) == 0: return
         ls = self._initialize_level_state(fields)
         min_level = self._compute_minimum_level()
+        # NOTE: This usage of "refine_by" is actually *okay*, because it's
+        # being used with respect to iref, which is *already* scaled!
+        refine_by = self.ds.refine_by
+        if not iterable(self.ds.refine_by):
+            refine_by = [refine_by, refine_by, refine_by]
+        refine_by = np.array(refine_by, dtype="i8")
         for level in range(self.level + 1):
             if level < min_level:
                 self._update_level_state(ls)
@@ -951,11 +964,9 @@
             for chunk in ls.data_source.chunks(fields, "io"):
                 chunk[fields[0]]
                 input_fields = [chunk[field] for field in fields]
-                # NOTE: This usage of "refine_by" is actually *okay*, because it's
-                # being used with respect to iref, which is *already* scaled!
                 tot -= fill_region(input_fields, ls.fields, ls.current_level,
                             ls.global_startindex, chunk.icoords,
-                            chunk.ires, domain_dims, self.ds.refine_by)
+                            chunk.ires, domain_dims, refine_by)
             if level == 0 and tot != 0:
                 raise RuntimeError
             self._update_level_state(ls)
@@ -1118,9 +1129,11 @@
                 verts.append(my_verts)
         verts = np.concatenate(verts).transpose()
         verts = self.comm.par_combine_object(verts, op='cat', datatype='array')
-        self.vertices = verts
+        # verts is an ndarray here and will always be in code units, so we
+        # expose it in the public API as a YTArray
+        self.vertices = self.ds.arr(verts, 'code_length')
         if fields is not None:
-            samples = np.concatenate(samples)
+            samples = uconcatenate(samples)
             samples = self.comm.par_combine_object(samples, op='cat',
                                 datatype='array')
             if sample_type == "face":
@@ -1222,17 +1235,24 @@
             ff = np.ones_like(vc_data[self.surface_field], dtype="float64")
         else:
             ff = vc_data[fluxing_field]
-
-        return march_cubes_grid_flux(
-            self.field_value, vc_data[self.surface_field], vc_data[field_x],
-            vc_data[field_y], vc_data[field_z], ff, mask, grid.LeftEdge,
-            grid.dds)
+        surf_vals = vc_data[self.surface_field]
+        field_x_vals = vc_data[field_x]
+        field_y_vals = vc_data[field_y]
+        field_z_vals = vc_data[field_z]
+        ret = march_cubes_grid_flux(
+            self.field_value, surf_vals, field_x_vals, field_y_vals, field_z_vals,
+            ff, mask, grid.LeftEdge, grid.dds)
+        # assumes all the fluxing fields have the same units
+        ret_units = field_x_vals.units * ff.units * grid.dds.units**2
+        ret = self.ds.arr(ret, ret_units)
+        ret.convert_to_units(self.ds.unit_system[ret_units.dimensions])
+        return ret
 
     @property
     def triangles(self):
         if self.vertices is None:
             self.get_data()
-        vv = np.empty((self.vertices.shape[1]/3, 3, 3), dtype="float64")
+        vv = np.empty((self.vertices.shape[1]//3, 3, 3), dtype="float64")
         for i in range(3):
             for j in range(3):
                 vv[:,i,j] = self.vertices[j,i::3]
@@ -1314,7 +1334,8 @@
         >>> def _Emissivity(field, data):
         ...     return (data['density']*data['density'] *
         ...             np.sqrt(data['temperature']))
-        >>> ds.add_field("emissivity", function=_Emissivity, units=r"g*K/cm**6")
+        >>> ds.add_field("emissivity", function=_Emissivity,
+        ...              sampling_type='cell', units=r"g**2*sqrt(K)/cm**6")
         >>> for i, r in enumerate(rhos):
         ...     surf = ds.surface(sp,'density',r)
         ...     surf.export_obj("my_galaxy", transparency=trans[i],
@@ -1429,22 +1450,22 @@
         vtype = [("x","float"),("y","float"), ("z","float")]
         if plot_index == 0:
             fobj.write("# yt OBJ file\n")
-            fobj.write("# www.yt-project.com\n")
+            fobj.write("# www.yt-project.org\n")
             fobj.write("mtllib " + filename + '.mtl\n\n')  # use this material file for the faces
             fmtl.write("# yt MLT file\n")
-            fmtl.write("# www.yt-project.com\n\n")
+            fmtl.write("# www.yt-project.org\n\n")
         #(0) formulate vertices
         nv = self.vertices.shape[1] # number of groups of vertices
-        f = np.empty(nv/self.vertices.shape[0], dtype=ftype) # store sets of face colors
+        f = np.empty(nv//self.vertices.shape[0], dtype=ftype) # store sets of face colors
         v = np.empty(nv, dtype=vtype) # stores vertices
         if color_field is not None:
             cs = self[color_field]
         else:
-            cs = np.empty(self.vertices.shape[1]/self.vertices.shape[0])
+            cs = np.empty(self.vertices.shape[1]//self.vertices.shape[0])
         if emit_field is not None:
             em = self[emit_field]
         else:
-            em = np.empty(self.vertices.shape[1]/self.vertices.shape[0])
+            em = np.empty(self.vertices.shape[1]//self.vertices.shape[0])
         self._color_samples_obj(cs, em, color_log, emit_log, color_map, f,
                                 color_field_max, color_field_min,  color_field,
                                 emit_field_max, emit_field_min, emit_field) # map color values to color scheme
@@ -1748,7 +1769,7 @@
             arr = np.empty(cs.shape[0], dtype=np.dtype(fs))
             self._color_samples(cs, color_log, color_map, arr)
         else:
-            arr = np.empty(nv/3, np.dtype(fs[:-3]))
+            arr = np.empty(nv//3, np.dtype(fs[:-3]))
         for i, ax in enumerate("xyz"):
             # Do the bounds first since we cast to f32
             tmp = self.vertices[i,:]
@@ -1761,7 +1782,7 @@
         v.tofile(f)
         arr["ni"][:] = 3
         vi = np.arange(nv, dtype="<i")
-        vi.shape = (nv/3, 3)
+        vi.shape = (nv//3, 3)
         arr["v1"][:] = vi[:,0]
         arr["v2"][:] = vi[:,1]
         arr["v3"][:] = vi[:,2]

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/d93179148e64/
Changeset:   d93179148e64
Branch:      yt
User:        jzuhone
Date:        2017-01-27 15:49:00+00:00
Summary:     Merge
Affected #:  12 files

diff -r 25d834d0f599e24d3689c8c5af08934edbc12136 -r d93179148e643078781e7dde1a538ece18d9b23e doc/source/analyzing/Particle_Trajectories.ipynb
--- /dev/null
+++ b/doc/source/analyzing/Particle_Trajectories.ipynb
@@ -0,0 +1,390 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "One can create particle trajectories from a `DatasetSeries` object for a specified list of particles identified by their unique indices using the `particle_trajectories` method. "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "%matplotlib inline\n",
+    "import yt\n",
+    "import glob\n",
+    "from yt.config import ytcfg\n",
+    "path = ytcfg.get(\"yt\", \"test_data_dir\")\n",
+    "import matplotlib.pyplot as plt\n",
+    "from mpl_toolkits.mplot3d import Axes3D"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "First, let's start off with a FLASH dataset containing only two particles in a mutual circular orbit. We can get the list of filenames this way:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "my_fns = glob.glob(path+\"/Orbit/orbit_hdf5_chk_00[0-9][0-9]\")\n",
+    "my_fns.sort()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "And let's define a list of fields that we want to include in the trajectories. The position fields will be included by default, so let's just ask for the velocity fields:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "fields = [\"particle_velocity_x\", \"particle_velocity_y\", \"particle_velocity_z\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "There are only two particles, but for consistency's sake let's grab their indices from the dataset itself:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ds = yt.load(my_fns[0])\n",
+    "dd = ds.all_data()\n",
+    "indices = dd[\"particle_index\"].astype(\"int\")\n",
+    "print (indices)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "which is what we expected them to be. Now we're ready to create a `DatasetSeries` object and use it to create particle trajectories: "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ts = yt.DatasetSeries(my_fns)\n",
+    "# suppress_logging=True cuts down on a lot of noise\n",
+    "trajs = ts.particle_trajectories(indices, fields=fields, suppress_logging=True)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The `ParticleTrajectories` object `trajs` is essentially a dictionary-like container for the particle fields along the trajectory, and can be accessed as such:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "print (trajs[\"particle_position_x\"])\n",
+    "print (trajs[\"particle_position_x\"].shape)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Note that each field is a 2D NumPy array with the different particle indices along the first dimension and the times along the second dimension. As such, we can access them individually by indexing the field:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "plt.figure(figsize=(6, 6))\n",
+    "plt.plot(trajs[\"particle_position_x\"][0], trajs[\"particle_position_y\"][0])\n",
+    "plt.plot(trajs[\"particle_position_x\"][1], trajs[\"particle_position_y\"][1])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "And we can plot the velocity fields as well:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "plt.figure(figsize=(6, 6))\n",
+    "plt.plot(trajs[\"particle_velocity_x\"][0], trajs[\"particle_velocity_y\"][0])\n",
+    "plt.plot(trajs[\"particle_velocity_x\"][1], trajs[\"particle_velocity_y\"][1])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "If we want to access the time along the trajectory, we use the key `\"particle_time\"`:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "plt.figure(figsize=(6, 6))\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_velocity_x\"][1])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_velocity_y\"][1])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Alternatively, if we know the particle index we'd like to examine, we can get an individual trajectory corresponding to that index:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "particle1 = trajs.trajectory_from_index(1)\n",
+    "plt.figure(figsize=(6, 6))\n",
+    "plt.plot(particle1[\"particle_time\"], particle1[\"particle_position_x\"])\n",
+    "plt.plot(particle1[\"particle_time\"], particle1[\"particle_position_y\"])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now let's look at a more complicated (and fun!) example. We'll use an Enzo cosmology dataset. First, we'll find the maximum density in the domain, and obtain the indices of the particles within some radius of the center. First, let's have a look at what we're getting:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+    "slc = yt.SlicePlot(ds, \"x\", [\"density\",\"dark_matter_density\"], center=\"max\", width=(3.0, \"Mpc\"))\n",
+    "slc.show()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "So far, so good--it looks like we've centered on a galaxy cluster. Let's grab all of the dark matter particles within a sphere of 0.5 Mpc (identified by `\"particle_type == 1\"`):"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "sp = ds.sphere(\"max\", (0.5, \"Mpc\"))\n",
+    "indices = sp[\"particle_index\"][sp[\"particle_type\"] == 1]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Next we'll get the list of datasets we want, and create trajectories for these particles:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "my_fns = glob.glob(path+\"/enzo_tiny_cosmology/DD*/*.hierarchy\")\n",
+    "my_fns.sort()\n",
+    "ts = yt.DatasetSeries(my_fns)\n",
+    "trajs = ts.particle_trajectories(indices, fields=fields, suppress_logging=True)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Matplotlib can make 3D plots, so let's pick three particle trajectories at random and look at them in the volume:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "fig = plt.figure(figsize=(8.0, 8.0))\n",
+    "ax = fig.add_subplot(111, projection='3d')\n",
+    "ax.plot(trajs[\"particle_position_x\"][100], trajs[\"particle_position_z\"][100], trajs[\"particle_position_z\"][100])\n",
+    "ax.plot(trajs[\"particle_position_x\"][8], trajs[\"particle_position_z\"][8], trajs[\"particle_position_z\"][8])\n",
+    "ax.plot(trajs[\"particle_position_x\"][25], trajs[\"particle_position_z\"][25], trajs[\"particle_position_z\"][25])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "It looks like these three different particles fell into the cluster along different filaments. We can also look at their x-positions only as a function of time:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "plt.figure(figsize=(6,6))\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][100])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][8])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][25])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Suppose we wanted to know the gas density along the particle trajectory, but there wasn't a particle field corresponding to that in our dataset. Never fear! If the field exists as a grid field, yt will interpolate this field to the particle positions and add the interpolated field to the trajectory. To add such a field (or any field, including additional particle fields) we can call the `add_fields` method:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "trajs.add_fields([\"density\"])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We also could have included `\"density\"` in our original field list. Now, plot up the gas density for each particle as a function of time:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "plt.figure(figsize=(6,6))\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"density\"][100])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"density\"][8])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"density\"][25])\n",
+    "plt.yscale(\"log\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Finally, the particle trajectories can be written to disk. Two options are provided: ASCII text files with a column for each field and the time, and HDF5 files:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "trajs.write_out(\"halo_trajectories\") # This will write a separate file for each trajectory\n",
+    "trajs.write_out_h5(\"halo_trajectories.h5\") # This will write all trajectories to a single file"
+   ]
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python [default]",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.5.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

diff -r 25d834d0f599e24d3689c8c5af08934edbc12136 -r d93179148e643078781e7dde1a538ece18d9b23e doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
--- a/doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
+++ /dev/null
@@ -1,385 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "The `particle_trajectories` analysis module enables the construction of particle trajectories from a time series of datasets for a specified list of particles identified by their unique indices. "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "%matplotlib inline\n",
-    "import yt\n",
-    "import glob\n",
-    "from yt.analysis_modules.particle_trajectories.api import ParticleTrajectories\n",
-    "from yt.config import ytcfg\n",
-    "path = ytcfg.get(\"yt\", \"test_data_dir\")\n",
-    "import matplotlib.pyplot as plt"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "First, let's start off with a FLASH dataset containing only two particles in a mutual circular orbit. We can get the list of filenames this way:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "my_fns = glob.glob(path+\"/Orbit/orbit_hdf5_chk_00[0-9][0-9]\")\n",
-    "my_fns.sort()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "And let's define a list of fields that we want to include in the trajectories. The position fields will be included by default, so let's just ask for the velocity fields:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "fields = [\"particle_velocity_x\", \"particle_velocity_y\", \"particle_velocity_z\"]"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "There are only two particles, but for consistency's sake let's grab their indices from the dataset itself:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "ds = yt.load(my_fns[0])\n",
-    "dd = ds.all_data()\n",
-    "indices = dd[\"particle_index\"].astype(\"int\")\n",
-    "print (indices)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "which is what we expected them to be. Now we're ready to create a `ParticleTrajectories` object:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "trajs = ParticleTrajectories(my_fns, indices, fields=fields)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "The `ParticleTrajectories` object `trajs` is essentially a dictionary-like container for the particle fields along the trajectory, and can be accessed as such:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "print (trajs[\"particle_position_x\"])\n",
-    "print (trajs[\"particle_position_x\"].shape)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Note that each field is a 2D NumPy array with the different particle indices along the first dimension and the times along the second dimension. As such, we can access them individually by indexing the field:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "plt.plot(trajs[\"particle_position_x\"][0].ndarray_view(), trajs[\"particle_position_y\"][0].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_position_x\"][1].ndarray_view(), trajs[\"particle_position_y\"][1].ndarray_view())"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "And we can plot the velocity fields as well:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "plt.plot(trajs[\"particle_velocity_x\"][0].ndarray_view(), trajs[\"particle_velocity_y\"][0].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_velocity_x\"][1].ndarray_view(), trajs[\"particle_velocity_y\"][1].ndarray_view())"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "If we want to access the time along the trajectory, we use the key `\"particle_time\"`:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_velocity_x\"][1].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_velocity_y\"][1].ndarray_view())"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Alternatively, if we know the particle index we'd like to examine, we can get an individual trajectory corresponding to that index:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "particle1 = trajs.trajectory_from_index(1)\n",
-    "plt.plot(particle1[\"particle_time\"].ndarray_view(), particle1[\"particle_position_x\"].ndarray_view())\n",
-    "plt.plot(particle1[\"particle_time\"].ndarray_view(), particle1[\"particle_position_y\"].ndarray_view())"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Now let's look at a more complicated (and fun!) example. We'll use an Enzo cosmology dataset. First, we'll find the maximum density in the domain, and obtain the indices of the particles within some radius of the center. First, let's have a look at what we're getting:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
-    "slc = yt.SlicePlot(ds, \"x\", [\"density\",\"dark_matter_density\"], center=\"max\", width=(3.0, \"Mpc\"))\n",
-    "slc.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "So far, so good--it looks like we've centered on a galaxy cluster. Let's grab all of the dark matter particles within a sphere of 0.5 Mpc (identified by `\"particle_type == 1\"`):"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "sp = ds.sphere(\"max\", (0.5, \"Mpc\"))\n",
-    "indices = sp[\"particle_index\"][sp[\"particle_type\"] == 1]"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Next we'll get the list of datasets we want, and create trajectories for these particles:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "my_fns = glob.glob(path+\"/enzo_tiny_cosmology/DD*/*.hierarchy\")\n",
-    "my_fns.sort()\n",
-    "trajs = ParticleTrajectories(my_fns, indices)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Matplotlib can make 3D plots, so let's pick three particle trajectories at random and look at them in the volume:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import matplotlib.pyplot as plt\n",
-    "from mpl_toolkits.mplot3d import Axes3D\n",
-    "fig = plt.figure(figsize=(8.0, 8.0))\n",
-    "ax = fig.add_subplot(111, projection='3d')\n",
-    "ax.plot(trajs[\"particle_position_x\"][100].ndarray_view(), trajs[\"particle_position_z\"][100].ndarray_view(), \n",
-    "        trajs[\"particle_position_z\"][100].ndarray_view())\n",
-    "ax.plot(trajs[\"particle_position_x\"][8].ndarray_view(), trajs[\"particle_position_z\"][8].ndarray_view(), \n",
-    "        trajs[\"particle_position_z\"][8].ndarray_view())\n",
-    "ax.plot(trajs[\"particle_position_x\"][25].ndarray_view(), trajs[\"particle_position_z\"][25].ndarray_view(), \n",
-    "        trajs[\"particle_position_z\"][25].ndarray_view())"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "It looks like these three different particles fell into the cluster along different filaments. We can also look at their x-positions only as a function of time:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_position_x\"][100].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_position_x\"][8].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_position_x\"][25].ndarray_view())"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Suppose we wanted to know the gas density along the particle trajectory, but there wasn't a particle field corresponding to that in our dataset. Never fear! If the field exists as a grid field, yt will interpolate this field to the particle positions and add the interpolated field to the trajectory. To add such a field (or any field, including additional particle fields) we can call the `add_fields` method:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "trajs.add_fields([\"density\"])"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "We also could have included `\"density\"` in our original field list. Now, plot up the gas density for each particle as a function of time:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"density\"][100].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"density\"][8].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"density\"][25].ndarray_view())\n",
-    "plt.yscale(\"log\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Finally, the particle trajectories can be written to disk. Two options are provided: ASCII text files with a column for each field and the time, and HDF5 files:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "trajs.write_out(\"halo_trajectories\") # This will write a separate file for each trajectory\n",
-    "trajs.write_out_h5(\"halo_trajectories.h5\") # This will write all trajectories to a single file"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.5.1"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}

diff -r 25d834d0f599e24d3689c8c5af08934edbc12136 -r d93179148e643078781e7dde1a538ece18d9b23e doc/source/analyzing/analysis_modules/index.rst
--- a/doc/source/analyzing/analysis_modules/index.rst
+++ b/doc/source/analyzing/analysis_modules/index.rst
@@ -18,4 +18,3 @@
    exporting
    two_point_functions
    clump_finding
-   particle_trajectories

diff -r 25d834d0f599e24d3689c8c5af08934edbc12136 -r d93179148e643078781e7dde1a538ece18d9b23e doc/source/analyzing/analysis_modules/particle_trajectories.rst
--- a/doc/source/analyzing/analysis_modules/particle_trajectories.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _particle-trajectories:
-
-Particle Trajectories
----------------------
-
-.. notebook:: Particle_Trajectories.ipynb

diff -r 25d834d0f599e24d3689c8c5af08934edbc12136 -r d93179148e643078781e7dde1a538ece18d9b23e doc/source/analyzing/index.rst
--- a/doc/source/analyzing/index.rst
+++ b/doc/source/analyzing/index.rst
@@ -22,4 +22,5 @@
    generating_processed_data
    saving_data
    time_series_analysis
+   particle_trajectories
    parallel_computation

diff -r 25d834d0f599e24d3689c8c5af08934edbc12136 -r d93179148e643078781e7dde1a538ece18d9b23e doc/source/analyzing/particle_trajectories.rst
--- /dev/null
+++ b/doc/source/analyzing/particle_trajectories.rst
@@ -0,0 +1,6 @@
+.. _particle-trajectories:
+
+Particle Trajectories
+---------------------
+
+.. notebook:: Particle_Trajectories.ipynb

diff -r 25d834d0f599e24d3689c8c5af08934edbc12136 -r d93179148e643078781e7dde1a538ece18d9b23e tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -101,6 +101,9 @@
   local_axialpix_001:
     - yt/geometry/coordinates/tests/test_axial_pixelization.py:test_axial_pixelization
 
+  local_particle_trajectory_000:
+    - yt/data_objects/tests/test_particle_trajectories.py
+
 other_tests:
   unittests:
      - '--exclude=test_mesh_slices'  # disable randomly failing test

diff -r 25d834d0f599e24d3689c8c5af08934edbc12136 -r d93179148e643078781e7dde1a538ece18d9b23e yt/analysis_modules/particle_trajectories/api.py
--- a/yt/analysis_modules/particle_trajectories/api.py
+++ b/yt/analysis_modules/particle_trajectories/api.py
@@ -1,13 +1,6 @@
-"""
-API for particle_trajectories
-"""
-from __future__ import absolute_import
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
+from yt.funcs import issue_deprecation_warning
 
-from .particle_trajectories import ParticleTrajectories
+issue_deprecation_warning("Particle trajectories are now available from DatasetSeries "
+                          "objects as ts.particle_trajectories. The ParticleTrajectories "
+                          "analysis module is deprecated.")
+from yt.data_objects.particle_trajectories import ParticleTrajectories
\ No newline at end of file

diff -r 25d834d0f599e24d3689c8c5af08934edbc12136 -r d93179148e643078781e7dde1a538ece18d9b23e yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ /dev/null
@@ -1,383 +0,0 @@
-"""
-Particle trajectories
-"""
-from __future__ import print_function
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.data_objects.field_data import YTFieldData
-from yt.data_objects.time_series import DatasetSeries
-from yt.utilities.lib.particle_mesh_operations import CICSample_3
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    parallel_root_only
-from yt.funcs import mylog, get_pbar
-from yt.units.yt_array import array_like_field
-from yt.config import ytcfg
-from collections import OrderedDict
-
-import numpy as np
-from yt.utilities.on_demand_imports import _h5py as h5py
-
-class ParticleTrajectories(object):
-    r"""A collection of particle trajectories in time over a series of
-    datasets. 
-
-    The ParticleTrajectories object contains a collection of
-    particle trajectories for a specified set of particle indices. 
-    
-    Parameters
-    ----------
-    outputs : `yt.data_objects.time_series.DatasetSeries` or list of strings
-        DatasetSeries object, or a time-sorted list of filenames to
-        construct a new DatasetSeries object.
-    indices : array_like
-        An integer array of particle indices whose trajectories we
-        want to track. If they are not sorted they will be sorted.
-    fields : list of strings, optional
-        A set of fields that is retrieved when the trajectory
-        collection is instantiated.
-        Default : None (will default to the fields 'particle_position_x',
-        'particle_position_y', 'particle_position_z')
-    suppress_logging : boolean
-        Suppress yt's logging when iterating over the simulation time
-        series.
-        Default : False
-
-    Examples
-    ________
-    >>> from yt.mods import *
-    >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
-    >>> my_fns.sort()
-    >>> fields = ["particle_position_x", "particle_position_y",
-    >>>           "particle_position_z", "particle_velocity_x",
-    >>>           "particle_velocity_y", "particle_velocity_z"]
-    >>> ds = load(my_fns[0])
-    >>> init_sphere = ds.sphere(ds.domain_center, (.5, "unitary"))
-    >>> indices = init_sphere["particle_index"].astype("int")
-    >>> trajs = ParticleTrajectories(my_fns, indices, fields=fields)
-    >>> for t in trajs :
-    >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
-    """
-    def __init__(self, outputs, indices, fields=None, suppress_logging=False):
-
-        indices.sort() # Just in case the caller wasn't careful
-        self.field_data = YTFieldData()
-        if isinstance(outputs, DatasetSeries):
-            self.data_series = outputs
-        else:
-            self.data_series = DatasetSeries(outputs)
-        self.masks = []
-        self.sorts = []
-        self.array_indices = []
-        self.indices = indices
-        self.num_indices = len(indices)
-        self.num_steps = len(outputs)
-        self.times = []
-        self.suppress_logging = suppress_logging
-
-        if fields is None: fields = []
-        fields = list(OrderedDict.fromkeys(fields))
-
-        if self.suppress_logging:
-            old_level = int(ytcfg.get("yt","loglevel"))
-            mylog.setLevel(40)
-        
-        fds = {}
-        ds_first = self.data_series[0]
-        dd_first = ds_first.all_data()
-        idx_field = dd_first._determine_fields("particle_index")[0]
-        for field in ("particle_position_%s" % ax for ax in "xyz"):
-            fds[field] = dd_first._determine_fields(field)[0]
-
-        my_storage = {}
-        pbar = get_pbar("Constructing trajectory information", len(self.data_series))
-        for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
-            dd = ds.all_data()
-            newtags = dd[idx_field].d.astype("int64")
-            mask = np.in1d(newtags, indices, assume_unique=True)
-            sort = np.argsort(newtags[mask])
-            array_indices = np.where(np.in1d(indices, newtags, assume_unique=True))[0]
-            self.array_indices.append(array_indices)
-            self.masks.append(mask)
-            self.sorts.append(sort)
-
-            pfields = {}
-            for field in ("particle_position_%s" % ax for ax in "xyz"):
-                pfields[field] = dd[fds[field]].ndarray_view()[mask][sort]
-
-            sto.result_id = ds.parameter_filename
-            sto.result = (ds.current_time, array_indices, pfields)
-            pbar.update(i)
-        pbar.finish()
-
-        if self.suppress_logging:
-            mylog.setLevel(old_level)
-
-        times = []
-        for fn, (time, indices, pfields) in sorted(my_storage.items()):
-            times.append(time)
-        self.times = self.data_series[0].arr([time for time in times], times[0].units)
-
-        self.particle_fields = []
-        output_field = np.empty((self.num_indices, self.num_steps))
-        output_field.fill(np.nan)
-        for field in ("particle_position_%s" % ax for ax in "xyz"):
-            for i, (fn, (time, indices, pfields)) in enumerate(sorted(my_storage.items())):
-                output_field[indices, i] = pfields[field]
-            self.field_data[field] = array_like_field(
-                dd_first, output_field.copy(), fds[field])
-            self.particle_fields.append(field)
-
-        # Instantiate fields the caller requested
-        self._get_data(fields)
-
-    def has_key(self, key):
-        return (key in self.field_data)
-    
-    def keys(self):
-        return self.field_data.keys()
-
-    def __getitem__(self, key):
-        """
-        Get the field associated with key.
-        """
-        if key == "particle_time":
-            return self.times
-        if key not in self.field_data:
-            self._get_data([key])
-        return self.field_data[key]
-    
-    def __setitem__(self, key, val):
-        """
-        Sets a field to be some other value.
-        """
-        self.field_data[key] = val
-                        
-    def __delitem__(self, key):
-        """
-        Delete the field from the trajectory
-        """
-        del self.field_data[key]
-
-    def __iter__(self):
-        """
-        This iterates over the trajectories for
-        the different particles, returning dicts
-        of fields for each trajectory
-        """
-        for idx in range(self.num_indices):
-            traj = {}
-            traj["particle_index"] = self.indices[idx]
-            traj["particle_time"] = self.times
-            for field in self.field_data.keys():
-                traj[field] = self[field][idx,:]
-            yield traj
-            
-    def __len__(self):
-        """
-        The number of individual trajectories
-        """
-        return self.num_indices
-
-    def add_fields(self, fields):
-        """
-        Add a list of fields to an existing trajectory
-
-        Parameters
-        ----------
-        fields : list of strings
-            A list of fields to be added to the current trajectory
-            collection.
-
-        Examples
-        ________
-        >>> trajs = ParticleTrajectories(my_fns, indices)
-        >>> trajs.add_fields(["particle_mass", "particle_gpot"])
-        """
-        self._get_data(fields)
-                
-    def _get_data(self, fields):
-        """
-        Get a list of fields to include in the trajectory collection.
-        The trajectory collection itself is a dict of 2D numpy arrays,
-        with shape (num_indices, num_steps)
-        """
-
-        missing_fields = [field for field in fields
-                          if field not in self.field_data]
-        if not missing_fields:
-            return
-
-        if self.suppress_logging:
-            old_level = int(ytcfg.get("yt","loglevel"))
-            mylog.setLevel(40)
-        ds_first = self.data_series[0]
-        dd_first = ds_first.all_data()
-
-        fds = {}
-        new_particle_fields = []
-        for field in missing_fields:
-            fds[field] = dd_first._determine_fields(field)[0]
-            if field not in self.particle_fields:
-                if self.data_series[0]._get_field_info(*fds[field]).particle_type:
-                    self.particle_fields.append(field)
-                    new_particle_fields.append(field)
-                    
-
-        grid_fields = [field for field in missing_fields
-                       if field not in self.particle_fields]
-        step = int(0)
-        pbar = get_pbar("Generating [%s] fields in trajectories." %
-                        ", ".join(missing_fields), self.num_steps)
-        my_storage = {}
-        
-        for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
-            mask = self.masks[i]
-            sort = self.sorts[i]
-            pfield = {}
-
-            if new_particle_fields:  # there's at least one particle field
-                dd = ds.all_data()
-                for field in new_particle_fields:
-                    # This is easy... just get the particle fields
-                    pfield[field] = dd[fds[field]].d[mask][sort]
-
-            if grid_fields:
-                # This is hard... must loop over grids
-                for field in grid_fields:
-                    pfield[field] = np.zeros(self.num_indices)
-                x = self["particle_position_x"][:,step].d
-                y = self["particle_position_y"][:,step].d
-                z = self["particle_position_z"][:,step].d
-                particle_grids, particle_grid_inds = ds.index._find_points(x,y,z)
-
-                # This will fail for non-grid index objects
-                for grid in particle_grids:
-                    cube = grid.retrieve_ghost_zones(1, grid_fields)
-                    for field in grid_fields:
-                        CICSample_3(x, y, z, pfield[field],
-                                    self.num_indices,
-                                    cube[fds[field]],
-                                    np.array(grid.LeftEdge).astype(np.float64),
-                                    np.array(grid.ActiveDimensions).astype(np.int32),
-                                    grid.dds[0])
-            sto.result_id = ds.parameter_filename
-            sto.result = (self.array_indices[i], pfield)
-            pbar.update(step)
-            step += 1
-        pbar.finish()
-
-        output_field = np.empty((self.num_indices,self.num_steps))
-        output_field.fill(np.nan)
-        for field in missing_fields:
-            fd = fds[field]
-            for i, (fn, (indices, pfield)) in enumerate(sorted(my_storage.items())):
-                output_field[indices, i] = pfield[field]
-            self.field_data[field] = array_like_field(dd_first, output_field.copy(), fd)
-
-        if self.suppress_logging:
-            mylog.setLevel(old_level)
-
-    def trajectory_from_index(self, index):
-        """
-        Retrieve a single trajectory corresponding to a specific particle
-        index
-
-        Parameters
-        ----------
-        index : int
-            This defines which particle trajectory from the
-            ParticleTrajectories object will be returned.
-
-        Returns
-        -------
-        A dictionary corresponding to the particle's trajectory and the
-        fields along that trajectory
-
-        Examples
-        --------
-        >>> from yt.mods import *
-        >>> import matplotlib.pylab as pl
-        >>> trajs = ParticleTrajectories(my_fns, indices)
-        >>> traj = trajs.trajectory_from_index(indices[0])
-        >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
-        >>> pl.savefig("orbit")
-        """
-        mask = np.in1d(self.indices, (index,), assume_unique=True)
-        if not np.any(mask):
-            print("The particle index %d is not in the list!" % (index))
-            raise IndexError
-        fields = [field for field in sorted(self.field_data.keys())]
-        traj = {}
-        traj["particle_time"] = self.times
-        traj["particle_index"] = index
-        for field in fields:
-            traj[field] = self[field][mask,:][0]
-        return traj
-
-    @parallel_root_only
-    def write_out(self, filename_base):
-        """
-        Write out particle trajectories to tab-separated ASCII files (one
-        for each trajectory) with the field names in the file header. Each
-        file is named with a basename and the index number.
-
-        Parameters
-        ----------
-        filename_base : string
-            The prefix for the outputted ASCII files.
-
-        Examples
-        --------
-        >>> from yt.mods import *
-        >>> trajs = ParticleTrajectories(my_fns, indices)
-        >>> trajs.write_out("orbit_trajectory")       
-        """
-        fields = [field for field in sorted(self.field_data.keys())]
-        num_fields = len(fields)
-        first_str = "# particle_time\t" + "\t".join(fields)+"\n"
-        template_str = "%g\t"*num_fields+"%g\n"
-        for ix in range(self.num_indices):
-            outlines = [first_str]
-            for it in range(self.num_steps):
-                outlines.append(template_str %
-                                tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
-            fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
-            fid.writelines(outlines)
-            fid.close()
-            del fid
-
-    @parallel_root_only
-    def write_out_h5(self, filename):
-        """
-        Write out all the particle trajectories to a single HDF5 file
-        that contains the indices, the times, and the 2D array for each
-        field individually
-
-        Parameters
-        ----------
-
-        filename : string
-            The output filename for the HDF5 file
-
-        Examples
-        --------
-
-        >>> from yt.mods import *
-        >>> trajs = ParticleTrajectories(my_fns, indices)
-        >>> trajs.write_out_h5("orbit_trajectories")                
-        """
-        fid = h5py.File(filename, "w")
-        fid.create_dataset("particle_indices", dtype=np.int64,
-                           data=self.indices)
-        fid.close()
-        self.times.write_hdf5(filename, dataset_name="particle_times")
-        fields = [field for field in sorted(self.field_data.keys())]
-        for field in fields:
-            self[field].write_hdf5(filename, dataset_name="%s" % field)

diff -r 25d834d0f599e24d3689c8c5af08934edbc12136 -r d93179148e643078781e7dde1a538ece18d9b23e yt/data_objects/particle_trajectories.py
--- /dev/null
+++ b/yt/data_objects/particle_trajectories.py
@@ -0,0 +1,371 @@
+"""
+Particle trajectories
+"""
+from __future__ import print_function
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.data_objects.field_data import YTFieldData
+from yt.utilities.lib.particle_mesh_operations import CICSample_3
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_root_only
+from yt.funcs import mylog, get_pbar
+from yt.units.yt_array import array_like_field
+from yt.config import ytcfg
+from collections import OrderedDict
+
+import numpy as np
+from yt.utilities.on_demand_imports import _h5py as h5py
+
+class ParticleTrajectories(object):
+    r"""A collection of particle trajectories in time over a series of
+    datasets. 
+
+    Parameters
+    ----------
+    outputs : `yt.data_objects.time_series.DatasetSeries`
+        DatasetSeries object from which to draw the particles.
+    indices : array_like
+        An integer array of particle indices whose trajectories we
+        want to track. If they are not sorted they will be sorted.
+    fields : list of strings, optional
+        A set of fields that is retrieved when the trajectory
+        collection is instantiated. Default: None (will default
+        to the fields 'particle_position_x', 'particle_position_y',
+        'particle_position_z')
+    suppress_logging : boolean
+        Suppress yt's logging when iterating over the simulation time
+        series. Default: False
+
+    Examples
+    --------
+    >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
+    >>> my_fns.sort()
+    >>> fields = ["particle_position_x", "particle_position_y",
+    >>>           "particle_position_z", "particle_velocity_x",
+    >>>           "particle_velocity_y", "particle_velocity_z"]
+    >>> ds = load(my_fns[0])
+    >>> init_sphere = ds.sphere(ds.domain_center, (.5, "unitary"))
+    >>> indices = init_sphere["particle_index"].astype("int")
+    >>> ts = DatasetSeries(my_fns)
+    >>> trajs = ts.particle_trajectories(indices, fields=fields)
+    >>> for t in trajs :
+    >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
+    """
+    def __init__(self, outputs, indices, fields=None, suppress_logging=False):
+
+        indices.sort() # Just in case the caller wasn't careful
+        self.field_data = YTFieldData()
+        self.data_series = outputs
+        self.masks = []
+        self.sorts = []
+        self.array_indices = []
+        self.indices = indices
+        self.num_indices = len(indices)
+        self.num_steps = len(outputs)
+        self.times = []
+        self.suppress_logging = suppress_logging
+
+        if fields is None: fields = []
+        fields = list(OrderedDict.fromkeys(fields))
+
+        if self.suppress_logging:
+            old_level = int(ytcfg.get("yt","loglevel"))
+            mylog.setLevel(40)
+        
+        fds = {}
+        ds_first = self.data_series[0]
+        dd_first = ds_first.all_data()
+        idx_field = dd_first._determine_fields("particle_index")[0]
+        for field in ("particle_position_%s" % ax for ax in "xyz"):
+            fds[field] = dd_first._determine_fields(field)[0]
+
+        my_storage = {}
+        pbar = get_pbar("Constructing trajectory information", len(self.data_series))
+        for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
+            dd = ds.all_data()
+            newtags = dd[idx_field].d.astype("int64")
+            mask = np.in1d(newtags, indices, assume_unique=True)
+            sort = np.argsort(newtags[mask])
+            array_indices = np.where(np.in1d(indices, newtags, assume_unique=True))[0]
+            self.array_indices.append(array_indices)
+            self.masks.append(mask)
+            self.sorts.append(sort)
+
+            pfields = {}
+            for field in ("particle_position_%s" % ax for ax in "xyz"):
+                pfields[field] = dd[fds[field]].ndarray_view()[mask][sort]
+
+            sto.result_id = ds.parameter_filename
+            sto.result = (ds.current_time, array_indices, pfields)
+            pbar.update(i)
+        pbar.finish()
+
+        if self.suppress_logging:
+            mylog.setLevel(old_level)
+
+        times = []
+        for fn, (time, indices, pfields) in sorted(my_storage.items()):
+            times.append(time)
+        self.times = self.data_series[0].arr([time for time in times], times[0].units)
+
+        self.particle_fields = []
+        output_field = np.empty((self.num_indices, self.num_steps))
+        output_field.fill(np.nan)
+        for field in ("particle_position_%s" % ax for ax in "xyz"):
+            for i, (fn, (time, indices, pfields)) in enumerate(sorted(my_storage.items())):
+                output_field[indices, i] = pfields[field]
+            self.field_data[field] = array_like_field(
+                dd_first, output_field.copy(), fds[field])
+            self.particle_fields.append(field)
+
+        # Instantiate fields the caller requested
+        self._get_data(fields)
+
+    def has_key(self, key):
+        return (key in self.field_data)
+    
+    def keys(self):
+        return self.field_data.keys()
+
+    def __getitem__(self, key):
+        """
+        Get the field associated with key.
+        """
+        if key == "particle_time":
+            return self.times
+        if key not in self.field_data:
+            self._get_data([key])
+        return self.field_data[key]
+    
+    def __setitem__(self, key, val):
+        """
+        Sets a field to be some other value.
+        """
+        self.field_data[key] = val
+                        
+    def __delitem__(self, key):
+        """
+        Delete the field from the trajectory
+        """
+        del self.field_data[key]
+
+    def __iter__(self):
+        """
+        This iterates over the trajectories for
+        the different particles, returning dicts
+        of fields for each trajectory
+        """
+        for idx in range(self.num_indices):
+            traj = {}
+            traj["particle_index"] = self.indices[idx]
+            traj["particle_time"] = self.times
+            for field in self.field_data.keys():
+                traj[field] = self[field][idx,:]
+            yield traj
+            
+    def __len__(self):
+        """
+        The number of individual trajectories
+        """
+        return self.num_indices
+
+    def add_fields(self, fields):
+        """
+        Add a list of fields to an existing trajectory
+
+        Parameters
+        ----------
+        fields : list of strings
+            A list of fields to be added to the current trajectory
+            collection.
+
+        Examples
+        ________
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.add_fields(["particle_mass", "particle_gpot"])
+        """
+        self._get_data(fields)
+                
+    def _get_data(self, fields):
+        """
+        Get a list of fields to include in the trajectory collection.
+        The trajectory collection itself is a dict of 2D numpy arrays,
+        with shape (num_indices, num_steps)
+        """
+
+        missing_fields = [field for field in fields
+                          if field not in self.field_data]
+        if not missing_fields:
+            return
+
+        if self.suppress_logging:
+            old_level = int(ytcfg.get("yt","loglevel"))
+            mylog.setLevel(40)
+        ds_first = self.data_series[0]
+        dd_first = ds_first.all_data()
+
+        fds = {}
+        new_particle_fields = []
+        for field in missing_fields:
+            fds[field] = dd_first._determine_fields(field)[0]
+            if field not in self.particle_fields:
+                if self.data_series[0]._get_field_info(*fds[field]).particle_type:
+                    self.particle_fields.append(field)
+                    new_particle_fields.append(field)
+                    
+
+        grid_fields = [field for field in missing_fields
+                       if field not in self.particle_fields]
+        step = int(0)
+        pbar = get_pbar("Generating [%s] fields in trajectories" %
+                        ", ".join(missing_fields), self.num_steps)
+        my_storage = {}
+        
+        for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
+            mask = self.masks[i]
+            sort = self.sorts[i]
+            pfield = {}
+
+            if new_particle_fields:  # there's at least one particle field
+                dd = ds.all_data()
+                for field in new_particle_fields:
+                    # This is easy... just get the particle fields
+                    pfield[field] = dd[fds[field]].d[mask][sort]
+
+            if grid_fields:
+                # This is hard... must loop over grids
+                for field in grid_fields:
+                    pfield[field] = np.zeros(self.num_indices)
+                x = self["particle_position_x"][:,step].d
+                y = self["particle_position_y"][:,step].d
+                z = self["particle_position_z"][:,step].d
+                particle_grids, particle_grid_inds = ds.index._find_points(x,y,z)
+
+                # This will fail for non-grid index objects
+                for grid in particle_grids:
+                    cube = grid.retrieve_ghost_zones(1, grid_fields)
+                    for field in grid_fields:
+                        CICSample_3(x, y, z, pfield[field],
+                                    self.num_indices,
+                                    cube[fds[field]],
+                                    np.array(grid.LeftEdge).astype(np.float64),
+                                    np.array(grid.ActiveDimensions).astype(np.int32),
+                                    grid.dds[0])
+            sto.result_id = ds.parameter_filename
+            sto.result = (self.array_indices[i], pfield)
+            pbar.update(step)
+            step += 1
+        pbar.finish()
+
+        output_field = np.empty((self.num_indices,self.num_steps))
+        output_field.fill(np.nan)
+        for field in missing_fields:
+            fd = fds[field]
+            for i, (fn, (indices, pfield)) in enumerate(sorted(my_storage.items())):
+                output_field[indices, i] = pfield[field]
+            self.field_data[field] = array_like_field(dd_first, output_field.copy(), fd)
+
+        if self.suppress_logging:
+            mylog.setLevel(old_level)
+
+    def trajectory_from_index(self, index):
+        """
+        Retrieve a single trajectory corresponding to a specific particle
+        index
+
+        Parameters
+        ----------
+        index : int
+            This defines which particle trajectory from the
+            ParticleTrajectories object will be returned.
+
+        Returns
+        -------
+        A dictionary corresponding to the particle's trajectory and the
+        fields along that trajectory
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> import matplotlib.pylab as pl
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> traj = trajs.trajectory_from_index(indices[0])
+        >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
+        >>> pl.savefig("orbit")
+        """
+        mask = np.in1d(self.indices, (index,), assume_unique=True)
+        if not np.any(mask):
+            print("The particle index %d is not in the list!" % (index))
+            raise IndexError
+        fields = [field for field in sorted(self.field_data.keys())]
+        traj = {}
+        traj["particle_time"] = self.times
+        traj["particle_index"] = index
+        for field in fields:
+            traj[field] = self[field][mask,:][0]
+        return traj
+
+    @parallel_root_only
+    def write_out(self, filename_base):
+        """
+        Write out particle trajectories to tab-separated ASCII files (one
+        for each trajectory) with the field names in the file header. Each
+        file is named with a basename and the index number.
+
+        Parameters
+        ----------
+        filename_base : string
+            The prefix for the outputted ASCII files.
+
+        Examples
+        --------
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out("orbit_trajectory")       
+        """
+        fields = [field for field in sorted(self.field_data.keys())]
+        num_fields = len(fields)
+        first_str = "# particle_time\t" + "\t".join(fields)+"\n"
+        template_str = "%g\t"*num_fields+"%g\n"
+        for ix in range(self.num_indices):
+            outlines = [first_str]
+            for it in range(self.num_steps):
+                outlines.append(template_str %
+                                tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
+            fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
+            fid.writelines(outlines)
+            fid.close()
+            del fid
+
+    @parallel_root_only
+    def write_out_h5(self, filename):
+        """
+        Write out all the particle trajectories to a single HDF5 file
+        that contains the indices, the times, and the 2D array for each
+        field individually
+
+        Parameters
+        ----------
+
+        filename : string
+            The output filename for the HDF5 file
+
+        Examples
+        --------
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out_h5("orbit_trajectories")                
+        """
+        fid = h5py.File(filename, "w")
+        fid.create_dataset("particle_indices", dtype=np.int64,
+                           data=self.indices)
+        fid.close()
+        self.times.write_hdf5(filename, dataset_name="particle_times")
+        fields = [field for field in sorted(self.field_data.keys())]
+        for field in fields:
+            self[field].write_hdf5(filename, dataset_name="%s" % field)

diff -r 25d834d0f599e24d3689c8c5af08934edbc12136 -r d93179148e643078781e7dde1a538ece18d9b23e yt/data_objects/tests/test_particle_trajectories.py
--- /dev/null
+++ b/yt/data_objects/tests/test_particle_trajectories.py
@@ -0,0 +1,45 @@
+import glob
+import os
+
+from yt.config import ytcfg
+from yt.data_objects.time_series import DatasetSeries
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    GenericArrayTest
+
+def setup():
+    ytcfg["yt","__withintesting"] = "True"
+
+data_path = ytcfg.get("yt", "test_data_dir")
+
+pfields = ["particle_position_x", "particle_position_y", "particle_position_z"]
+vfields = ["particle_velocity_x", "particle_velocity_y", "particle_velocity_z"]
+
+ at requires_ds("Orbit/orbit_hdf5_chk_0000")
+def test_orbit_traj():
+    fields = ["particle_velocity_x", "particle_velocity_y", "particle_velocity_z"]
+    my_fns = glob.glob(os.path.join(data_path, "Orbit/orbit_hdf5_chk_00[0-9][0-9]"))
+    my_fns.sort()
+    ts = DatasetSeries(my_fns)
+    ds = ts[0]
+    traj = ts.particle_trajectories([1, 2], fields=fields, suppress_logging=True)
+    for field in pfields+vfields:
+        def field_func(name):
+            return traj[field]
+        yield GenericArrayTest(ds, field_func, args=[field])
+
+ at requires_ds("enzo_tiny_cosmology/DD0000/DD0000")
+def test_etc_traj():
+    fields = ["particle_velocity_x", "particle_velocity_y", "particle_velocity_z"]
+    my_fns = glob.glob(os.path.join(data_path, "enzo_tiny_cosmology/DD000[0-9]/*.hierarchy"))
+    my_fns.sort()
+    ts = DatasetSeries(my_fns)
+    ds = ts[0]
+    sp = ds.sphere("max", (0.5, "Mpc"))
+    indices = sp["particle_index"][sp["particle_type"] == 1][:5]
+    traj = ts.particle_trajectories(indices, fields=fields, suppress_logging=True)
+    traj.add_fields(["density"])
+    for field in pfields+vfields+["density"]:
+        def field_func(name):
+            return traj[field]
+        yield GenericArrayTest(ds, field_func, args=[field])

diff -r 25d834d0f599e24d3689c8c5af08934edbc12136 -r d93179148e643078781e7dde1a538ece18d9b23e yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -32,6 +32,8 @@
     create_quantity_proxy, \
     analysis_task_registry, \
     AnalysisTask
+from yt.data_objects.particle_trajectories import \
+    ParticleTrajectories
 from yt.funcs import \
     iterable, \
     ensure_list, \
@@ -399,6 +401,41 @@
         self._dataset_cls = ds.__class__
         return ds
 
+    def particle_trajectories(self, indices, fields=None, suppress_logging=False):
+        r"""Create a collection of particle trajectories in time over a series of
+        datasets.
+
+        Parameters
+        ----------
+        indices : array_like
+            An integer array of particle indices whose trajectories we
+            want to track. If they are not sorted they will be sorted.
+        fields : list of strings, optional
+            A set of fields that is retrieved when the trajectory
+            collection is instantiated. Default: None (will default
+            to the fields 'particle_position_x', 'particle_position_y',
+            'particle_position_z')
+        suppress_logging : boolean
+            Suppress yt's logging when iterating over the simulation time
+            series. Default: False
+
+        Examples
+        --------
+        >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
+        >>> my_fns.sort()
+        >>> fields = ["particle_position_x", "particle_position_y",
+        >>>           "particle_position_z", "particle_velocity_x",
+        >>>           "particle_velocity_y", "particle_velocity_z"]
+        >>> ds = load(my_fns[0])
+        >>> init_sphere = ds.sphere(ds.domain_center, (.5, "unitary"))
+        >>> indices = init_sphere["particle_index"].astype("int")
+        >>> ts = DatasetSeries(my_fns)
+        >>> trajs = ts.particle_trajectories(indices, fields=fields)
+        >>> for t in trajs :
+        >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
+        """
+        return ParticleTrajectories(self, indices, fields=fields, suppress_logging=suppress_logging)
+
 class TimeSeriesQuantitiesContainer(object):
     def __init__(self, data_object, quantities):
         self.data_object = data_object


https://bitbucket.org/yt_analysis/yt/commits/8cb97eec7284/
Changeset:   8cb97eec7284
Branch:      yt
User:        jzuhone
Date:        2017-01-29 18:43:22+00:00
Summary:     Add ParticleTrajectories to API docs
Affected #:  1 file

diff -r d93179148e643078781e7dde1a538ece18d9b23e -r 8cb97eec72840e4137bcbb7ccb7f2879ba0fba3c doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -130,6 +130,7 @@
    ~yt.data_objects.time_series.DatasetSeriesObject
    ~yt.data_objects.time_series.TimeSeriesQuantitiesContainer
    ~yt.data_objects.time_series.AnalysisTaskProxy
+   ~yt.data_objects.particle_trajectories.ParticleTrajectories
 
 Geometry Handlers
 -----------------


https://bitbucket.org/yt_analysis/yt/commits/441ff5e2dcd3/
Changeset:   441ff5e2dcd3
Branch:      yt
User:        xarthisius
Date:        2017-02-09 14:49:38+00:00
Summary:     Merged in jzuhone/yt (pull request #2496)

Move particle trajectories to data_objects and make them a DatasetSeries method.

Approved-by: Nathan Goldbaum
Approved-by: Andrew Myers
Approved-by: Kacper Kowalik
Affected #:  13 files

diff -r 13c1ee22f924f988c6dfc68c9b64d6e9acdebfd2 -r 441ff5e2dcd342526152309d260e912300fdcd3b doc/source/analyzing/Particle_Trajectories.ipynb
--- /dev/null
+++ b/doc/source/analyzing/Particle_Trajectories.ipynb
@@ -0,0 +1,390 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "One can create particle trajectories from a `DatasetSeries` object for a specified list of particles identified by their unique indices using the `particle_trajectories` method. "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "%matplotlib inline\n",
+    "import yt\n",
+    "import glob\n",
+    "from yt.config import ytcfg\n",
+    "path = ytcfg.get(\"yt\", \"test_data_dir\")\n",
+    "import matplotlib.pyplot as plt\n",
+    "from mpl_toolkits.mplot3d import Axes3D"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "First, let's start off with a FLASH dataset containing only two particles in a mutual circular orbit. We can get the list of filenames this way:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "my_fns = glob.glob(path+\"/Orbit/orbit_hdf5_chk_00[0-9][0-9]\")\n",
+    "my_fns.sort()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "And let's define a list of fields that we want to include in the trajectories. The position fields will be included by default, so let's just ask for the velocity fields:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "fields = [\"particle_velocity_x\", \"particle_velocity_y\", \"particle_velocity_z\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "There are only two particles, but for consistency's sake let's grab their indices from the dataset itself:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ds = yt.load(my_fns[0])\n",
+    "dd = ds.all_data()\n",
+    "indices = dd[\"particle_index\"].astype(\"int\")\n",
+    "print (indices)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "which is what we expected them to be. Now we're ready to create a `DatasetSeries` object and use it to create particle trajectories: "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ts = yt.DatasetSeries(my_fns)\n",
+    "# suppress_logging=True cuts down on a lot of noise\n",
+    "trajs = ts.particle_trajectories(indices, fields=fields, suppress_logging=True)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The `ParticleTrajectories` object `trajs` is essentially a dictionary-like container for the particle fields along the trajectory, and can be accessed as such:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "print (trajs[\"particle_position_x\"])\n",
+    "print (trajs[\"particle_position_x\"].shape)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Note that each field is a 2D NumPy array with the different particle indices along the first dimension and the times along the second dimension. As such, we can access them individually by indexing the field:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "plt.figure(figsize=(6, 6))\n",
+    "plt.plot(trajs[\"particle_position_x\"][0], trajs[\"particle_position_y\"][0])\n",
+    "plt.plot(trajs[\"particle_position_x\"][1], trajs[\"particle_position_y\"][1])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "And we can plot the velocity fields as well:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "plt.figure(figsize=(6, 6))\n",
+    "plt.plot(trajs[\"particle_velocity_x\"][0], trajs[\"particle_velocity_y\"][0])\n",
+    "plt.plot(trajs[\"particle_velocity_x\"][1], trajs[\"particle_velocity_y\"][1])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "If we want to access the time along the trajectory, we use the key `\"particle_time\"`:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "plt.figure(figsize=(6, 6))\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_velocity_x\"][1])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_velocity_y\"][1])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Alternatively, if we know the particle index we'd like to examine, we can get an individual trajectory corresponding to that index:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "particle1 = trajs.trajectory_from_index(1)\n",
+    "plt.figure(figsize=(6, 6))\n",
+    "plt.plot(particle1[\"particle_time\"], particle1[\"particle_position_x\"])\n",
+    "plt.plot(particle1[\"particle_time\"], particle1[\"particle_position_y\"])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now let's look at a more complicated (and fun!) example. We'll use an Enzo cosmology dataset. First, we'll find the maximum density in the domain, and obtain the indices of the particles within some radius of the center. First, let's have a look at what we're getting:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+    "slc = yt.SlicePlot(ds, \"x\", [\"density\",\"dark_matter_density\"], center=\"max\", width=(3.0, \"Mpc\"))\n",
+    "slc.show()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "So far, so good--it looks like we've centered on a galaxy cluster. Let's grab all of the dark matter particles within a sphere of 0.5 Mpc (identified by `\"particle_type == 1\"`):"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "sp = ds.sphere(\"max\", (0.5, \"Mpc\"))\n",
+    "indices = sp[\"particle_index\"][sp[\"particle_type\"] == 1]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Next we'll get the list of datasets we want, and create trajectories for these particles:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "my_fns = glob.glob(path+\"/enzo_tiny_cosmology/DD*/*.hierarchy\")\n",
+    "my_fns.sort()\n",
+    "ts = yt.DatasetSeries(my_fns)\n",
+    "trajs = ts.particle_trajectories(indices, fields=fields, suppress_logging=True)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Matplotlib can make 3D plots, so let's pick three particle trajectories at random and look at them in the volume:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "fig = plt.figure(figsize=(8.0, 8.0))\n",
+    "ax = fig.add_subplot(111, projection='3d')\n",
+    "ax.plot(trajs[\"particle_position_x\"][100], trajs[\"particle_position_z\"][100], trajs[\"particle_position_z\"][100])\n",
+    "ax.plot(trajs[\"particle_position_x\"][8], trajs[\"particle_position_z\"][8], trajs[\"particle_position_z\"][8])\n",
+    "ax.plot(trajs[\"particle_position_x\"][25], trajs[\"particle_position_z\"][25], trajs[\"particle_position_z\"][25])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "It looks like these three different particles fell into the cluster along different filaments. We can also look at their x-positions only as a function of time:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "plt.figure(figsize=(6,6))\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][100])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][8])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][25])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Suppose we wanted to know the gas density along the particle trajectory, but there wasn't a particle field corresponding to that in our dataset. Never fear! If the field exists as a grid field, yt will interpolate this field to the particle positions and add the interpolated field to the trajectory. To add such a field (or any field, including additional particle fields) we can call the `add_fields` method:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "trajs.add_fields([\"density\"])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We also could have included `\"density\"` in our original field list. Now, plot up the gas density for each particle as a function of time:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "plt.figure(figsize=(6,6))\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"density\"][100])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"density\"][8])\n",
+    "plt.plot(trajs[\"particle_time\"], trajs[\"density\"][25])\n",
+    "plt.yscale(\"log\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Finally, the particle trajectories can be written to disk. Two options are provided: ASCII text files with a column for each field and the time, and HDF5 files:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "trajs.write_out(\"halo_trajectories\") # This will write a separate file for each trajectory\n",
+    "trajs.write_out_h5(\"halo_trajectories.h5\") # This will write all trajectories to a single file"
+   ]
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python [default]",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.5.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

diff -r 13c1ee22f924f988c6dfc68c9b64d6e9acdebfd2 -r 441ff5e2dcd342526152309d260e912300fdcd3b doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
--- a/doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
+++ /dev/null
@@ -1,385 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "The `particle_trajectories` analysis module enables the construction of particle trajectories from a time series of datasets for a specified list of particles identified by their unique indices. "
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "%matplotlib inline\n",
-    "import yt\n",
-    "import glob\n",
-    "from yt.analysis_modules.particle_trajectories.api import ParticleTrajectories\n",
-    "from yt.config import ytcfg\n",
-    "path = ytcfg.get(\"yt\", \"test_data_dir\")\n",
-    "import matplotlib.pyplot as plt"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "First, let's start off with a FLASH dataset containing only two particles in a mutual circular orbit. We can get the list of filenames this way:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "my_fns = glob.glob(path+\"/Orbit/orbit_hdf5_chk_00[0-9][0-9]\")\n",
-    "my_fns.sort()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "And let's define a list of fields that we want to include in the trajectories. The position fields will be included by default, so let's just ask for the velocity fields:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "fields = [\"particle_velocity_x\", \"particle_velocity_y\", \"particle_velocity_z\"]"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "There are only two particles, but for consistency's sake let's grab their indices from the dataset itself:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "ds = yt.load(my_fns[0])\n",
-    "dd = ds.all_data()\n",
-    "indices = dd[\"particle_index\"].astype(\"int\")\n",
-    "print (indices)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "which is what we expected them to be. Now we're ready to create a `ParticleTrajectories` object:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "trajs = ParticleTrajectories(my_fns, indices, fields=fields)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "The `ParticleTrajectories` object `trajs` is essentially a dictionary-like container for the particle fields along the trajectory, and can be accessed as such:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "print (trajs[\"particle_position_x\"])\n",
-    "print (trajs[\"particle_position_x\"].shape)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Note that each field is a 2D NumPy array with the different particle indices along the first dimension and the times along the second dimension. As such, we can access them individually by indexing the field:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "plt.plot(trajs[\"particle_position_x\"][0].ndarray_view(), trajs[\"particle_position_y\"][0].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_position_x\"][1].ndarray_view(), trajs[\"particle_position_y\"][1].ndarray_view())"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "And we can plot the velocity fields as well:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "plt.plot(trajs[\"particle_velocity_x\"][0].ndarray_view(), trajs[\"particle_velocity_y\"][0].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_velocity_x\"][1].ndarray_view(), trajs[\"particle_velocity_y\"][1].ndarray_view())"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "If we want to access the time along the trajectory, we use the key `\"particle_time\"`:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_velocity_x\"][1].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_velocity_y\"][1].ndarray_view())"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Alternatively, if we know the particle index we'd like to examine, we can get an individual trajectory corresponding to that index:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "particle1 = trajs.trajectory_from_index(1)\n",
-    "plt.plot(particle1[\"particle_time\"].ndarray_view(), particle1[\"particle_position_x\"].ndarray_view())\n",
-    "plt.plot(particle1[\"particle_time\"].ndarray_view(), particle1[\"particle_position_y\"].ndarray_view())"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Now let's look at a more complicated (and fun!) example. We'll use an Enzo cosmology dataset. First, we'll find the maximum density in the domain, and obtain the indices of the particles within some radius of the center. First, let's have a look at what we're getting:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
-    "slc = yt.SlicePlot(ds, \"x\", [\"density\",\"dark_matter_density\"], center=\"max\", width=(3.0, \"Mpc\"))\n",
-    "slc.show()"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "So far, so good--it looks like we've centered on a galaxy cluster. Let's grab all of the dark matter particles within a sphere of 0.5 Mpc (identified by `\"particle_type == 1\"`):"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "sp = ds.sphere(\"max\", (0.5, \"Mpc\"))\n",
-    "indices = sp[\"particle_index\"][sp[\"particle_type\"] == 1]"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Next we'll get the list of datasets we want, and create trajectories for these particles:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "my_fns = glob.glob(path+\"/enzo_tiny_cosmology/DD*/*.hierarchy\")\n",
-    "my_fns.sort()\n",
-    "trajs = ParticleTrajectories(my_fns, indices)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Matplotlib can make 3D plots, so let's pick three particle trajectories at random and look at them in the volume:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import matplotlib.pyplot as plt\n",
-    "from mpl_toolkits.mplot3d import Axes3D\n",
-    "fig = plt.figure(figsize=(8.0, 8.0))\n",
-    "ax = fig.add_subplot(111, projection='3d')\n",
-    "ax.plot(trajs[\"particle_position_x\"][100].ndarray_view(), trajs[\"particle_position_z\"][100].ndarray_view(), \n",
-    "        trajs[\"particle_position_z\"][100].ndarray_view())\n",
-    "ax.plot(trajs[\"particle_position_x\"][8].ndarray_view(), trajs[\"particle_position_z\"][8].ndarray_view(), \n",
-    "        trajs[\"particle_position_z\"][8].ndarray_view())\n",
-    "ax.plot(trajs[\"particle_position_x\"][25].ndarray_view(), trajs[\"particle_position_z\"][25].ndarray_view(), \n",
-    "        trajs[\"particle_position_z\"][25].ndarray_view())"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "It looks like these three different particles fell into the cluster along different filaments. We can also look at their x-positions only as a function of time:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_position_x\"][100].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_position_x\"][8].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_position_x\"][25].ndarray_view())"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Suppose we wanted to know the gas density along the particle trajectory, but there wasn't a particle field corresponding to that in our dataset. Never fear! If the field exists as a grid field, yt will interpolate this field to the particle positions and add the interpolated field to the trajectory. To add such a field (or any field, including additional particle fields) we can call the `add_fields` method:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "trajs.add_fields([\"density\"])"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "We also could have included `\"density\"` in our original field list. Now, plot up the gas density for each particle as a function of time:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"density\"][100].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"density\"][8].ndarray_view())\n",
-    "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"density\"][25].ndarray_view())\n",
-    "plt.yscale(\"log\")"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Finally, the particle trajectories can be written to disk. Two options are provided: ASCII text files with a column for each field and the time, and HDF5 files:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "trajs.write_out(\"halo_trajectories\") # This will write a separate file for each trajectory\n",
-    "trajs.write_out_h5(\"halo_trajectories.h5\") # This will write all trajectories to a single file"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.5.1"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}

diff -r 13c1ee22f924f988c6dfc68c9b64d6e9acdebfd2 -r 441ff5e2dcd342526152309d260e912300fdcd3b doc/source/analyzing/analysis_modules/index.rst
--- a/doc/source/analyzing/analysis_modules/index.rst
+++ b/doc/source/analyzing/analysis_modules/index.rst
@@ -18,4 +18,3 @@
    exporting
    two_point_functions
    clump_finding
-   particle_trajectories

diff -r 13c1ee22f924f988c6dfc68c9b64d6e9acdebfd2 -r 441ff5e2dcd342526152309d260e912300fdcd3b doc/source/analyzing/analysis_modules/particle_trajectories.rst
--- a/doc/source/analyzing/analysis_modules/particle_trajectories.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _particle-trajectories:
-
-Particle Trajectories
----------------------
-
-.. notebook:: Particle_Trajectories.ipynb

diff -r 13c1ee22f924f988c6dfc68c9b64d6e9acdebfd2 -r 441ff5e2dcd342526152309d260e912300fdcd3b doc/source/analyzing/index.rst
--- a/doc/source/analyzing/index.rst
+++ b/doc/source/analyzing/index.rst
@@ -22,4 +22,5 @@
    generating_processed_data
    saving_data
    time_series_analysis
+   particle_trajectories
    parallel_computation

diff -r 13c1ee22f924f988c6dfc68c9b64d6e9acdebfd2 -r 441ff5e2dcd342526152309d260e912300fdcd3b doc/source/analyzing/particle_trajectories.rst
--- /dev/null
+++ b/doc/source/analyzing/particle_trajectories.rst
@@ -0,0 +1,6 @@
+.. _particle-trajectories:
+
+Particle Trajectories
+---------------------
+
+.. notebook:: Particle_Trajectories.ipynb

diff -r 13c1ee22f924f988c6dfc68c9b64d6e9acdebfd2 -r 441ff5e2dcd342526152309d260e912300fdcd3b doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -130,6 +130,7 @@
    ~yt.data_objects.time_series.DatasetSeriesObject
    ~yt.data_objects.time_series.TimeSeriesQuantitiesContainer
    ~yt.data_objects.time_series.AnalysisTaskProxy
+   ~yt.data_objects.particle_trajectories.ParticleTrajectories
 
 Geometry Handlers
 -----------------

diff -r 13c1ee22f924f988c6dfc68c9b64d6e9acdebfd2 -r 441ff5e2dcd342526152309d260e912300fdcd3b tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -104,6 +104,9 @@
   local_axialpix_001:
     - yt/geometry/coordinates/tests/test_axial_pixelization.py:test_axial_pixelization
 
+  local_particle_trajectory_000:
+    - yt/data_objects/tests/test_particle_trajectories.py
+
 other_tests:
   unittests:
      - '--exclude=test_mesh_slices'  # disable randomly failing test

diff -r 13c1ee22f924f988c6dfc68c9b64d6e9acdebfd2 -r 441ff5e2dcd342526152309d260e912300fdcd3b yt/analysis_modules/particle_trajectories/api.py
--- a/yt/analysis_modules/particle_trajectories/api.py
+++ b/yt/analysis_modules/particle_trajectories/api.py
@@ -1,13 +1,6 @@
-"""
-API for particle_trajectories
-"""
-from __future__ import absolute_import
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
+from yt.funcs import issue_deprecation_warning
 
-from .particle_trajectories import ParticleTrajectories
+issue_deprecation_warning("Particle trajectories are now available from DatasetSeries "
+                          "objects as ts.particle_trajectories. The ParticleTrajectories "
+                          "analysis module is deprecated.")
+from yt.data_objects.particle_trajectories import ParticleTrajectories
\ No newline at end of file

diff -r 13c1ee22f924f988c6dfc68c9b64d6e9acdebfd2 -r 441ff5e2dcd342526152309d260e912300fdcd3b yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ /dev/null
@@ -1,383 +0,0 @@
-"""
-Particle trajectories
-"""
-from __future__ import print_function
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.data_objects.field_data import YTFieldData
-from yt.data_objects.time_series import DatasetSeries
-from yt.utilities.lib.particle_mesh_operations import CICSample_3
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    parallel_root_only
-from yt.funcs import mylog, get_pbar
-from yt.units.yt_array import array_like_field
-from yt.config import ytcfg
-from collections import OrderedDict
-
-import numpy as np
-from yt.utilities.on_demand_imports import _h5py as h5py
-
-class ParticleTrajectories(object):
-    r"""A collection of particle trajectories in time over a series of
-    datasets. 
-
-    The ParticleTrajectories object contains a collection of
-    particle trajectories for a specified set of particle indices. 
-    
-    Parameters
-    ----------
-    outputs : `yt.data_objects.time_series.DatasetSeries` or list of strings
-        DatasetSeries object, or a time-sorted list of filenames to
-        construct a new DatasetSeries object.
-    indices : array_like
-        An integer array of particle indices whose trajectories we
-        want to track. If they are not sorted they will be sorted.
-    fields : list of strings, optional
-        A set of fields that is retrieved when the trajectory
-        collection is instantiated.
-        Default : None (will default to the fields 'particle_position_x',
-        'particle_position_y', 'particle_position_z')
-    suppress_logging : boolean
-        Suppress yt's logging when iterating over the simulation time
-        series.
-        Default : False
-
-    Examples
-    ________
-    >>> from yt.mods import *
-    >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
-    >>> my_fns.sort()
-    >>> fields = ["particle_position_x", "particle_position_y",
-    >>>           "particle_position_z", "particle_velocity_x",
-    >>>           "particle_velocity_y", "particle_velocity_z"]
-    >>> ds = load(my_fns[0])
-    >>> init_sphere = ds.sphere(ds.domain_center, (.5, "unitary"))
-    >>> indices = init_sphere["particle_index"].astype("int")
-    >>> trajs = ParticleTrajectories(my_fns, indices, fields=fields)
-    >>> for t in trajs :
-    >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
-    """
-    def __init__(self, outputs, indices, fields=None, suppress_logging=False):
-
-        indices.sort() # Just in case the caller wasn't careful
-        self.field_data = YTFieldData()
-        if isinstance(outputs, DatasetSeries):
-            self.data_series = outputs
-        else:
-            self.data_series = DatasetSeries(outputs)
-        self.masks = []
-        self.sorts = []
-        self.array_indices = []
-        self.indices = indices
-        self.num_indices = len(indices)
-        self.num_steps = len(outputs)
-        self.times = []
-        self.suppress_logging = suppress_logging
-
-        if fields is None: fields = []
-        fields = list(OrderedDict.fromkeys(fields))
-
-        if self.suppress_logging:
-            old_level = int(ytcfg.get("yt","loglevel"))
-            mylog.setLevel(40)
-        
-        fds = {}
-        ds_first = self.data_series[0]
-        dd_first = ds_first.all_data()
-        idx_field = dd_first._determine_fields("particle_index")[0]
-        for field in ("particle_position_%s" % ax for ax in "xyz"):
-            fds[field] = dd_first._determine_fields(field)[0]
-
-        my_storage = {}
-        pbar = get_pbar("Constructing trajectory information", len(self.data_series))
-        for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
-            dd = ds.all_data()
-            newtags = dd[idx_field].d.astype("int64")
-            mask = np.in1d(newtags, indices, assume_unique=True)
-            sort = np.argsort(newtags[mask])
-            array_indices = np.where(np.in1d(indices, newtags, assume_unique=True))[0]
-            self.array_indices.append(array_indices)
-            self.masks.append(mask)
-            self.sorts.append(sort)
-
-            pfields = {}
-            for field in ("particle_position_%s" % ax for ax in "xyz"):
-                pfields[field] = dd[fds[field]].ndarray_view()[mask][sort]
-
-            sto.result_id = ds.parameter_filename
-            sto.result = (ds.current_time, array_indices, pfields)
-            pbar.update(i)
-        pbar.finish()
-
-        if self.suppress_logging:
-            mylog.setLevel(old_level)
-
-        times = []
-        for fn, (time, indices, pfields) in sorted(my_storage.items()):
-            times.append(time)
-        self.times = self.data_series[0].arr([time for time in times], times[0].units)
-
-        self.particle_fields = []
-        output_field = np.empty((self.num_indices, self.num_steps))
-        output_field.fill(np.nan)
-        for field in ("particle_position_%s" % ax for ax in "xyz"):
-            for i, (fn, (time, indices, pfields)) in enumerate(sorted(my_storage.items())):
-                output_field[indices, i] = pfields[field]
-            self.field_data[field] = array_like_field(
-                dd_first, output_field.copy(), fds[field])
-            self.particle_fields.append(field)
-
-        # Instantiate fields the caller requested
-        self._get_data(fields)
-
-    def has_key(self, key):
-        return (key in self.field_data)
-    
-    def keys(self):
-        return self.field_data.keys()
-
-    def __getitem__(self, key):
-        """
-        Get the field associated with key.
-        """
-        if key == "particle_time":
-            return self.times
-        if key not in self.field_data:
-            self._get_data([key])
-        return self.field_data[key]
-    
-    def __setitem__(self, key, val):
-        """
-        Sets a field to be some other value.
-        """
-        self.field_data[key] = val
-                        
-    def __delitem__(self, key):
-        """
-        Delete the field from the trajectory
-        """
-        del self.field_data[key]
-
-    def __iter__(self):
-        """
-        This iterates over the trajectories for
-        the different particles, returning dicts
-        of fields for each trajectory
-        """
-        for idx in range(self.num_indices):
-            traj = {}
-            traj["particle_index"] = self.indices[idx]
-            traj["particle_time"] = self.times
-            for field in self.field_data.keys():
-                traj[field] = self[field][idx,:]
-            yield traj
-            
-    def __len__(self):
-        """
-        The number of individual trajectories
-        """
-        return self.num_indices
-
-    def add_fields(self, fields):
-        """
-        Add a list of fields to an existing trajectory
-
-        Parameters
-        ----------
-        fields : list of strings
-            A list of fields to be added to the current trajectory
-            collection.
-
-        Examples
-        ________
-        >>> trajs = ParticleTrajectories(my_fns, indices)
-        >>> trajs.add_fields(["particle_mass", "particle_gpot"])
-        """
-        self._get_data(fields)
-                
-    def _get_data(self, fields):
-        """
-        Get a list of fields to include in the trajectory collection.
-        The trajectory collection itself is a dict of 2D numpy arrays,
-        with shape (num_indices, num_steps)
-        """
-
-        missing_fields = [field for field in fields
-                          if field not in self.field_data]
-        if not missing_fields:
-            return
-
-        if self.suppress_logging:
-            old_level = int(ytcfg.get("yt","loglevel"))
-            mylog.setLevel(40)
-        ds_first = self.data_series[0]
-        dd_first = ds_first.all_data()
-
-        fds = {}
-        new_particle_fields = []
-        for field in missing_fields:
-            fds[field] = dd_first._determine_fields(field)[0]
-            if field not in self.particle_fields:
-                if self.data_series[0]._get_field_info(*fds[field]).particle_type:
-                    self.particle_fields.append(field)
-                    new_particle_fields.append(field)
-                    
-
-        grid_fields = [field for field in missing_fields
-                       if field not in self.particle_fields]
-        step = int(0)
-        pbar = get_pbar("Generating [%s] fields in trajectories." %
-                        ", ".join(missing_fields), self.num_steps)
-        my_storage = {}
-        
-        for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
-            mask = self.masks[i]
-            sort = self.sorts[i]
-            pfield = {}
-
-            if new_particle_fields:  # there's at least one particle field
-                dd = ds.all_data()
-                for field in new_particle_fields:
-                    # This is easy... just get the particle fields
-                    pfield[field] = dd[fds[field]].d[mask][sort]
-
-            if grid_fields:
-                # This is hard... must loop over grids
-                for field in grid_fields:
-                    pfield[field] = np.zeros(self.num_indices)
-                x = self["particle_position_x"][:,step].d
-                y = self["particle_position_y"][:,step].d
-                z = self["particle_position_z"][:,step].d
-                particle_grids, particle_grid_inds = ds.index._find_points(x,y,z)
-
-                # This will fail for non-grid index objects
-                for grid in particle_grids:
-                    cube = grid.retrieve_ghost_zones(1, grid_fields)
-                    for field in grid_fields:
-                        CICSample_3(x, y, z, pfield[field],
-                                    self.num_indices,
-                                    cube[fds[field]],
-                                    np.array(grid.LeftEdge).astype(np.float64),
-                                    np.array(grid.ActiveDimensions).astype(np.int32),
-                                    grid.dds[0])
-            sto.result_id = ds.parameter_filename
-            sto.result = (self.array_indices[i], pfield)
-            pbar.update(step)
-            step += 1
-        pbar.finish()
-
-        output_field = np.empty((self.num_indices,self.num_steps))
-        output_field.fill(np.nan)
-        for field in missing_fields:
-            fd = fds[field]
-            for i, (fn, (indices, pfield)) in enumerate(sorted(my_storage.items())):
-                output_field[indices, i] = pfield[field]
-            self.field_data[field] = array_like_field(dd_first, output_field.copy(), fd)
-
-        if self.suppress_logging:
-            mylog.setLevel(old_level)
-
-    def trajectory_from_index(self, index):
-        """
-        Retrieve a single trajectory corresponding to a specific particle
-        index
-
-        Parameters
-        ----------
-        index : int
-            This defines which particle trajectory from the
-            ParticleTrajectories object will be returned.
-
-        Returns
-        -------
-        A dictionary corresponding to the particle's trajectory and the
-        fields along that trajectory
-
-        Examples
-        --------
-        >>> from yt.mods import *
-        >>> import matplotlib.pylab as pl
-        >>> trajs = ParticleTrajectories(my_fns, indices)
-        >>> traj = trajs.trajectory_from_index(indices[0])
-        >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
-        >>> pl.savefig("orbit")
-        """
-        mask = np.in1d(self.indices, (index,), assume_unique=True)
-        if not np.any(mask):
-            print("The particle index %d is not in the list!" % (index))
-            raise IndexError
-        fields = [field for field in sorted(self.field_data.keys())]
-        traj = {}
-        traj["particle_time"] = self.times
-        traj["particle_index"] = index
-        for field in fields:
-            traj[field] = self[field][mask,:][0]
-        return traj
-
-    @parallel_root_only
-    def write_out(self, filename_base):
-        """
-        Write out particle trajectories to tab-separated ASCII files (one
-        for each trajectory) with the field names in the file header. Each
-        file is named with a basename and the index number.
-
-        Parameters
-        ----------
-        filename_base : string
-            The prefix for the outputted ASCII files.
-
-        Examples
-        --------
-        >>> from yt.mods import *
-        >>> trajs = ParticleTrajectories(my_fns, indices)
-        >>> trajs.write_out("orbit_trajectory")       
-        """
-        fields = [field for field in sorted(self.field_data.keys())]
-        num_fields = len(fields)
-        first_str = "# particle_time\t" + "\t".join(fields)+"\n"
-        template_str = "%g\t"*num_fields+"%g\n"
-        for ix in range(self.num_indices):
-            outlines = [first_str]
-            for it in range(self.num_steps):
-                outlines.append(template_str %
-                                tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
-            fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
-            fid.writelines(outlines)
-            fid.close()
-            del fid
-
-    @parallel_root_only
-    def write_out_h5(self, filename):
-        """
-        Write out all the particle trajectories to a single HDF5 file
-        that contains the indices, the times, and the 2D array for each
-        field individually
-
-        Parameters
-        ----------
-
-        filename : string
-            The output filename for the HDF5 file
-
-        Examples
-        --------
-
-        >>> from yt.mods import *
-        >>> trajs = ParticleTrajectories(my_fns, indices)
-        >>> trajs.write_out_h5("orbit_trajectories")                
-        """
-        fid = h5py.File(filename, "w")
-        fid.create_dataset("particle_indices", dtype=np.int64,
-                           data=self.indices)
-        fid.close()
-        self.times.write_hdf5(filename, dataset_name="particle_times")
-        fields = [field for field in sorted(self.field_data.keys())]
-        for field in fields:
-            self[field].write_hdf5(filename, dataset_name="%s" % field)

diff -r 13c1ee22f924f988c6dfc68c9b64d6e9acdebfd2 -r 441ff5e2dcd342526152309d260e912300fdcd3b yt/data_objects/particle_trajectories.py
--- /dev/null
+++ b/yt/data_objects/particle_trajectories.py
@@ -0,0 +1,371 @@
+"""
+Particle trajectories
+"""
+from __future__ import print_function
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.data_objects.field_data import YTFieldData
+from yt.utilities.lib.particle_mesh_operations import CICSample_3
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_root_only
+from yt.funcs import mylog, get_pbar
+from yt.units.yt_array import array_like_field
+from yt.config import ytcfg
+from collections import OrderedDict
+
+import numpy as np
+from yt.utilities.on_demand_imports import _h5py as h5py
+
+class ParticleTrajectories(object):
+    r"""A collection of particle trajectories in time over a series of
+    datasets. 
+
+    Parameters
+    ----------
+    outputs : `yt.data_objects.time_series.DatasetSeries`
+        DatasetSeries object from which to draw the particles.
+    indices : array_like
+        An integer array of particle indices whose trajectories we
+        want to track. If they are not sorted they will be sorted.
+    fields : list of strings, optional
+        A set of fields that is retrieved when the trajectory
+        collection is instantiated. Default: None (will default
+        to the fields 'particle_position_x', 'particle_position_y',
+        'particle_position_z')
+    suppress_logging : boolean
+        Suppress yt's logging when iterating over the simulation time
+        series. Default: False
+
+    Examples
+    --------
+    >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
+    >>> my_fns.sort()
+    >>> fields = ["particle_position_x", "particle_position_y",
+    >>>           "particle_position_z", "particle_velocity_x",
+    >>>           "particle_velocity_y", "particle_velocity_z"]
+    >>> ds = load(my_fns[0])
+    >>> init_sphere = ds.sphere(ds.domain_center, (.5, "unitary"))
+    >>> indices = init_sphere["particle_index"].astype("int")
+    >>> ts = DatasetSeries(my_fns)
+    >>> trajs = ts.particle_trajectories(indices, fields=fields)
+    >>> for t in trajs :
+    >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
+    """
+    def __init__(self, outputs, indices, fields=None, suppress_logging=False):
+
+        indices.sort() # Just in case the caller wasn't careful
+        self.field_data = YTFieldData()
+        self.data_series = outputs
+        self.masks = []
+        self.sorts = []
+        self.array_indices = []
+        self.indices = indices
+        self.num_indices = len(indices)
+        self.num_steps = len(outputs)
+        self.times = []
+        self.suppress_logging = suppress_logging
+
+        if fields is None: fields = []
+        fields = list(OrderedDict.fromkeys(fields))
+
+        if self.suppress_logging:
+            old_level = int(ytcfg.get("yt","loglevel"))
+            mylog.setLevel(40)
+        
+        fds = {}
+        ds_first = self.data_series[0]
+        dd_first = ds_first.all_data()
+        idx_field = dd_first._determine_fields("particle_index")[0]
+        for field in ("particle_position_%s" % ax for ax in "xyz"):
+            fds[field] = dd_first._determine_fields(field)[0]
+
+        my_storage = {}
+        pbar = get_pbar("Constructing trajectory information", len(self.data_series))
+        for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
+            dd = ds.all_data()
+            newtags = dd[idx_field].d.astype("int64")
+            mask = np.in1d(newtags, indices, assume_unique=True)
+            sort = np.argsort(newtags[mask])
+            array_indices = np.where(np.in1d(indices, newtags, assume_unique=True))[0]
+            self.array_indices.append(array_indices)
+            self.masks.append(mask)
+            self.sorts.append(sort)
+
+            pfields = {}
+            for field in ("particle_position_%s" % ax for ax in "xyz"):
+                pfields[field] = dd[fds[field]].ndarray_view()[mask][sort]
+
+            sto.result_id = ds.parameter_filename
+            sto.result = (ds.current_time, array_indices, pfields)
+            pbar.update(i)
+        pbar.finish()
+
+        if self.suppress_logging:
+            mylog.setLevel(old_level)
+
+        times = []
+        for fn, (time, indices, pfields) in sorted(my_storage.items()):
+            times.append(time)
+        self.times = self.data_series[0].arr([time for time in times], times[0].units)
+
+        self.particle_fields = []
+        output_field = np.empty((self.num_indices, self.num_steps))
+        output_field.fill(np.nan)
+        for field in ("particle_position_%s" % ax for ax in "xyz"):
+            for i, (fn, (time, indices, pfields)) in enumerate(sorted(my_storage.items())):
+                output_field[indices, i] = pfields[field]
+            self.field_data[field] = array_like_field(
+                dd_first, output_field.copy(), fds[field])
+            self.particle_fields.append(field)
+
+        # Instantiate fields the caller requested
+        self._get_data(fields)
+
+    def has_key(self, key):
+        return (key in self.field_data)
+    
+    def keys(self):
+        return self.field_data.keys()
+
+    def __getitem__(self, key):
+        """
+        Get the field associated with key.
+        """
+        if key == "particle_time":
+            return self.times
+        if key not in self.field_data:
+            self._get_data([key])
+        return self.field_data[key]
+    
+    def __setitem__(self, key, val):
+        """
+        Sets a field to be some other value.
+        """
+        self.field_data[key] = val
+                        
+    def __delitem__(self, key):
+        """
+        Delete the field from the trajectory
+        """
+        del self.field_data[key]
+
+    def __iter__(self):
+        """
+        This iterates over the trajectories for
+        the different particles, returning dicts
+        of fields for each trajectory
+        """
+        for idx in range(self.num_indices):
+            traj = {}
+            traj["particle_index"] = self.indices[idx]
+            traj["particle_time"] = self.times
+            for field in self.field_data.keys():
+                traj[field] = self[field][idx,:]
+            yield traj
+            
+    def __len__(self):
+        """
+        The number of individual trajectories
+        """
+        return self.num_indices
+
+    def add_fields(self, fields):
+        """
+        Add a list of fields to an existing trajectory
+
+        Parameters
+        ----------
+        fields : list of strings
+            A list of fields to be added to the current trajectory
+            collection.
+
+        Examples
+        ________
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.add_fields(["particle_mass", "particle_gpot"])
+        """
+        self._get_data(fields)
+                
+    def _get_data(self, fields):
+        """
+        Get a list of fields to include in the trajectory collection.
+        The trajectory collection itself is a dict of 2D numpy arrays,
+        with shape (num_indices, num_steps)
+        """
+
+        missing_fields = [field for field in fields
+                          if field not in self.field_data]
+        if not missing_fields:
+            return
+
+        if self.suppress_logging:
+            old_level = int(ytcfg.get("yt","loglevel"))
+            mylog.setLevel(40)
+        ds_first = self.data_series[0]
+        dd_first = ds_first.all_data()
+
+        fds = {}
+        new_particle_fields = []
+        for field in missing_fields:
+            fds[field] = dd_first._determine_fields(field)[0]
+            if field not in self.particle_fields:
+                if self.data_series[0]._get_field_info(*fds[field]).particle_type:
+                    self.particle_fields.append(field)
+                    new_particle_fields.append(field)
+                    
+
+        grid_fields = [field for field in missing_fields
+                       if field not in self.particle_fields]
+        step = int(0)
+        pbar = get_pbar("Generating [%s] fields in trajectories" %
+                        ", ".join(missing_fields), self.num_steps)
+        my_storage = {}
+        
+        for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
+            mask = self.masks[i]
+            sort = self.sorts[i]
+            pfield = {}
+
+            if new_particle_fields:  # there's at least one particle field
+                dd = ds.all_data()
+                for field in new_particle_fields:
+                    # This is easy... just get the particle fields
+                    pfield[field] = dd[fds[field]].d[mask][sort]
+
+            if grid_fields:
+                # This is hard... must loop over grids
+                for field in grid_fields:
+                    pfield[field] = np.zeros(self.num_indices)
+                x = self["particle_position_x"][:,step].d
+                y = self["particle_position_y"][:,step].d
+                z = self["particle_position_z"][:,step].d
+                particle_grids, particle_grid_inds = ds.index._find_points(x,y,z)
+
+                # This will fail for non-grid index objects
+                for grid in particle_grids:
+                    cube = grid.retrieve_ghost_zones(1, grid_fields)
+                    for field in grid_fields:
+                        CICSample_3(x, y, z, pfield[field],
+                                    self.num_indices,
+                                    cube[fds[field]],
+                                    np.array(grid.LeftEdge).astype(np.float64),
+                                    np.array(grid.ActiveDimensions).astype(np.int32),
+                                    grid.dds[0])
+            sto.result_id = ds.parameter_filename
+            sto.result = (self.array_indices[i], pfield)
+            pbar.update(step)
+            step += 1
+        pbar.finish()
+
+        output_field = np.empty((self.num_indices,self.num_steps))
+        output_field.fill(np.nan)
+        for field in missing_fields:
+            fd = fds[field]
+            for i, (fn, (indices, pfield)) in enumerate(sorted(my_storage.items())):
+                output_field[indices, i] = pfield[field]
+            self.field_data[field] = array_like_field(dd_first, output_field.copy(), fd)
+
+        if self.suppress_logging:
+            mylog.setLevel(old_level)
+
+    def trajectory_from_index(self, index):
+        """
+        Retrieve a single trajectory corresponding to a specific particle
+        index
+
+        Parameters
+        ----------
+        index : int
+            This defines which particle trajectory from the
+            ParticleTrajectories object will be returned.
+
+        Returns
+        -------
+        A dictionary corresponding to the particle's trajectory and the
+        fields along that trajectory
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> import matplotlib.pylab as pl
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> traj = trajs.trajectory_from_index(indices[0])
+        >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
+        >>> pl.savefig("orbit")
+        """
+        mask = np.in1d(self.indices, (index,), assume_unique=True)
+        if not np.any(mask):
+            print("The particle index %d is not in the list!" % (index))
+            raise IndexError
+        fields = [field for field in sorted(self.field_data.keys())]
+        traj = {}
+        traj["particle_time"] = self.times
+        traj["particle_index"] = index
+        for field in fields:
+            traj[field] = self[field][mask,:][0]
+        return traj
+
+    @parallel_root_only
+    def write_out(self, filename_base):
+        """
+        Write out particle trajectories to tab-separated ASCII files (one
+        for each trajectory) with the field names in the file header. Each
+        file is named with a basename and the index number.
+
+        Parameters
+        ----------
+        filename_base : string
+            The prefix for the outputted ASCII files.
+
+        Examples
+        --------
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out("orbit_trajectory")       
+        """
+        fields = [field for field in sorted(self.field_data.keys())]
+        num_fields = len(fields)
+        first_str = "# particle_time\t" + "\t".join(fields)+"\n"
+        template_str = "%g\t"*num_fields+"%g\n"
+        for ix in range(self.num_indices):
+            outlines = [first_str]
+            for it in range(self.num_steps):
+                outlines.append(template_str %
+                                tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
+            fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
+            fid.writelines(outlines)
+            fid.close()
+            del fid
+
+    @parallel_root_only
+    def write_out_h5(self, filename):
+        """
+        Write out all the particle trajectories to a single HDF5 file
+        that contains the indices, the times, and the 2D array for each
+        field individually
+
+        Parameters
+        ----------
+
+        filename : string
+            The output filename for the HDF5 file
+
+        Examples
+        --------
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out_h5("orbit_trajectories")                
+        """
+        fid = h5py.File(filename, "w")
+        fid.create_dataset("particle_indices", dtype=np.int64,
+                           data=self.indices)
+        fid.close()
+        self.times.write_hdf5(filename, dataset_name="particle_times")
+        fields = [field for field in sorted(self.field_data.keys())]
+        for field in fields:
+            self[field].write_hdf5(filename, dataset_name="%s" % field)

diff -r 13c1ee22f924f988c6dfc68c9b64d6e9acdebfd2 -r 441ff5e2dcd342526152309d260e912300fdcd3b yt/data_objects/tests/test_particle_trajectories.py
--- /dev/null
+++ b/yt/data_objects/tests/test_particle_trajectories.py
@@ -0,0 +1,45 @@
+import glob
+import os
+
+from yt.config import ytcfg
+from yt.data_objects.time_series import DatasetSeries
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    GenericArrayTest
+
+def setup():
+    ytcfg["yt","__withintesting"] = "True"
+
+data_path = ytcfg.get("yt", "test_data_dir")
+
+pfields = ["particle_position_x", "particle_position_y", "particle_position_z"]
+vfields = ["particle_velocity_x", "particle_velocity_y", "particle_velocity_z"]
+
+ at requires_ds("Orbit/orbit_hdf5_chk_0000")
+def test_orbit_traj():
+    fields = ["particle_velocity_x", "particle_velocity_y", "particle_velocity_z"]
+    my_fns = glob.glob(os.path.join(data_path, "Orbit/orbit_hdf5_chk_00[0-9][0-9]"))
+    my_fns.sort()
+    ts = DatasetSeries(my_fns)
+    ds = ts[0]
+    traj = ts.particle_trajectories([1, 2], fields=fields, suppress_logging=True)
+    for field in pfields+vfields:
+        def field_func(name):
+            return traj[field]
+        yield GenericArrayTest(ds, field_func, args=[field])
+
+ at requires_ds("enzo_tiny_cosmology/DD0000/DD0000")
+def test_etc_traj():
+    fields = ["particle_velocity_x", "particle_velocity_y", "particle_velocity_z"]
+    my_fns = glob.glob(os.path.join(data_path, "enzo_tiny_cosmology/DD000[0-9]/*.hierarchy"))
+    my_fns.sort()
+    ts = DatasetSeries(my_fns)
+    ds = ts[0]
+    sp = ds.sphere("max", (0.5, "Mpc"))
+    indices = sp["particle_index"][sp["particle_type"] == 1][:5]
+    traj = ts.particle_trajectories(indices, fields=fields, suppress_logging=True)
+    traj.add_fields(["density"])
+    for field in pfields+vfields+["density"]:
+        def field_func(name):
+            return traj[field]
+        yield GenericArrayTest(ds, field_func, args=[field])

diff -r 13c1ee22f924f988c6dfc68c9b64d6e9acdebfd2 -r 441ff5e2dcd342526152309d260e912300fdcd3b yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -32,6 +32,8 @@
     create_quantity_proxy, \
     analysis_task_registry, \
     AnalysisTask
+from yt.data_objects.particle_trajectories import \
+    ParticleTrajectories
 from yt.funcs import \
     iterable, \
     ensure_list, \
@@ -399,6 +401,41 @@
         self._dataset_cls = ds.__class__
         return ds
 
+    def particle_trajectories(self, indices, fields=None, suppress_logging=False):
+        r"""Create a collection of particle trajectories in time over a series of
+        datasets.
+
+        Parameters
+        ----------
+        indices : array_like
+            An integer array of particle indices whose trajectories we
+            want to track. If they are not sorted they will be sorted.
+        fields : list of strings, optional
+            A set of fields that is retrieved when the trajectory
+            collection is instantiated. Default: None (will default
+            to the fields 'particle_position_x', 'particle_position_y',
+            'particle_position_z')
+        suppress_logging : boolean
+            Suppress yt's logging when iterating over the simulation time
+            series. Default: False
+
+        Examples
+        --------
+        >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
+        >>> my_fns.sort()
+        >>> fields = ["particle_position_x", "particle_position_y",
+        >>>           "particle_position_z", "particle_velocity_x",
+        >>>           "particle_velocity_y", "particle_velocity_z"]
+        >>> ds = load(my_fns[0])
+        >>> init_sphere = ds.sphere(ds.domain_center, (.5, "unitary"))
+        >>> indices = init_sphere["particle_index"].astype("int")
+        >>> ts = DatasetSeries(my_fns)
+        >>> trajs = ts.particle_trajectories(indices, fields=fields)
+        >>> for t in trajs :
+        >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
+        """
+        return ParticleTrajectories(self, indices, fields=fields, suppress_logging=suppress_logging)
+
 class TimeSeriesQuantitiesContainer(object):
     def __init__(self, data_object, quantities):
         self.data_object = data_object

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list