[yt-svn] commit/yt: 5 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Oct 31 04:56:41 PDT 2013


5 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/3f2e610678d3/
Changeset:   3f2e610678d3
Branch:      yt
User:        jzuhone
Date:        2013-10-30 06:54:19
Summary:     Moving particle_trajectories to analysis_modules.
Affected #:  7 files

diff -r c144fe080f8c3011e7e49d8e2d82557467bd459e -r 3f2e610678d3fa7321ec26ce387a654759f7c0db yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -108,3 +108,5 @@
 from .radmc3d_export.api import \
     RadMC3DWriter
 
+from .particle_trajectories.api import \
+    ParticleTrajectoryColleciton

diff -r c144fe080f8c3011e7e49d8e2d82557467bd459e -r 3f2e610678d3fa7321ec26ce387a654759f7c0db yt/analysis_modules/particle_trajectories/api.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/api.py
@@ -0,0 +1,12 @@
+"""
+API for particle_trajectories
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from particle_trajectories import ParticleTrajectoryCollection

diff -r c144fe080f8c3011e7e49d8e2d82557467bd459e -r 3f2e610678d3fa7321ec26ce387a654759f7c0db yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -0,0 +1,324 @@
+"""
+Particle trajectories
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.data_objects.data_containers import YTFieldData
+from yt.data_objects.time_series import TimeSeriesData
+from yt.utilities.lib import sample_field_at_positions
+from yt.funcs import *
+
+import numpy as np
+import h5py
+
+class ParticleTrajectoryCollection(object):
+    r"""A collection of particle trajectories in time over a series of
+    parameter files. 
+
+    The ParticleTrajectoryCollection object contains a collection of
+    particle trajectories for a specified set of particle indices. 
+    
+    Parameters
+    ----------
+    filenames : list of strings
+        A time-sorted list of filenames to construct the TimeSeriesData
+        object.
+    indices : array_like
+        An integer array of particle indices whose trajectories we
+        want to track. If they are not sorted they will be sorted.
+    fields : list of strings, optional
+        A set of fields that is retrieved when the trajectory
+        collection is instantiated.
+        Default : None (will default to the fields 'particle_position_x',
+        'particle_position_y', 'particle_position_z')
+
+    Examples
+    ________
+    >>> from yt.mods import *
+    >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
+    >>> my_fns.sort()
+    >>> fields = ["particle_position_x", "particle_position_y",
+    >>>           "particle_position_z", "particle_velocity_x",
+    >>>           "particle_velocity_y", "particle_velocity_z"]
+    >>> pf = load(my_fns[0])
+    >>> init_sphere = pf.h.sphere(pf.domain_center, (.5, "unitary"))
+    >>> indices = init_sphere["particle_index"].astype("int")
+    >>> trajs = ParticleTrajectoryCollection(my_fns, indices, fields=fields)
+    >>> for t in trajs :
+    >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
+
+    Notes
+    -----
+    As of this time only particle trajectories that are complete over the
+    set of specified parameter files are supported. If any particle's history
+    ends for some reason (e.g. leaving the simulation domain or being actively
+    destroyed), the whole trajectory collection of which it is a set must end
+    at or before the particle's last timestep. This is a limitation we hope to
+    lift at some point in the future.     
+    """
+    def __init__(self, filenames, indices, fields=None) :
+
+        indices.sort() # Just in case the caller wasn't careful
+        
+        self.field_data = YTFieldData()
+        self.pfs = TimeSeriesData.from_filenames(filenames)
+        self.masks = []
+        self.sorts = []
+        self.indices = indices
+        self.num_indices = len(indices)
+        self.num_steps = len(filenames)
+        self.times = []
+
+        # Default fields 
+        
+        if fields is None: fields = []
+
+        # Must ALWAYS have these fields
+        
+        fields = fields + ["particle_position_x",
+                           "particle_position_y",
+                           "particle_position_z"]
+
+        """
+        The following loops through the parameter files
+        and performs two tasks. The first is to isolate
+        the particles with the correct indices, and the
+        second is to create a sorted list of these particles.
+        We also make a list of the current time from each file. 
+        Right now, the code assumes (and checks for) the
+        particle indices existing in each dataset, a limitation I
+        would like to lift at some point since some codes
+        (e.g., FLASH) destroy particles leaving the domain.
+        """
+        
+        for pf in self.pfs:
+            dd = pf.h.all_data()
+            newtags = dd["particle_index"].astype("int")
+            if not np.all(np.in1d(indices, newtags, assume_unique=True)):
+                print "Not all requested particle ids contained in this dataset!"
+                raise IndexError
+            mask = np.in1d(newtags, indices, assume_unique=True)
+            sorts = np.argsort(newtags[mask])
+            self.masks.append(mask)            
+            self.sorts.append(sorts)
+            self.times.append(pf.current_time)
+
+        self.times = np.array(self.times)
+
+        # Set up the derived field list and the particle field list
+        # so that if the requested field is a particle field, we'll
+        # just copy the field over, but if the field is a grid field,
+        # we will first copy the field over to the particle positions
+        # and then return the field. 
+
+        self.derived_field_list = self.pfs[0].h.derived_field_list
+        self.particle_fields = [field for field in self.derived_field_list
+                                if self.pfs[0].field_info[field].particle_type]
+
+        # Now instantiate the requested fields 
+        for field in fields:
+            self._get_data(field)
+            
+    def has_key(self, key):
+        return (key in self.field_data)
+    
+    def keys(self):
+        return self.field_data.keys()
+
+    def __getitem__(self, key):
+        """
+        Get the field associated with key,
+        checking to make sure it is a particle field.
+        """
+        if not self.field_data.has_key(key) :
+            self._get_data(key)
+        return self.field_data[key]
+    
+    def __setitem__(self, key, val):
+        """
+        Sets a field to be some other value.
+        """
+        self.field_data[key] = val
+                        
+    def __delitem__(self, key):
+        """
+        Delete the field from the trajectory
+        """
+        del self.field_data[key]
+
+    def __iter__(self):
+        """
+        This iterates over the trajectories for
+        the different particles, returning dicts
+        of fields for each trajectory
+        """
+        for idx in xrange(self.num_indices):
+            traj = {}
+            traj["particle_index"] = self.indices[idx]
+            traj["particle_time"] = self.times
+            for field in self.field_data.keys():
+                traj[field] = self[field][idx,:]
+            yield traj
+            
+    def __len__(self):
+        """
+        The number of individual trajectories
+        """
+        return self.num_indices
+
+    def add_fields(self, fields):
+        """
+        Add a list of fields to an existing trajectory
+
+        Parameters
+        ----------
+        fields : list of strings
+            A list of fields to be added to the current trajectory
+            collection.
+
+        Examples
+        ________
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
+        >>> trajs.add_fields(["particle_mass", "particle_gpot"])
+        """
+        for field in fields:
+            if not self.field_data.has_key(field):
+                self._get_data(field)
+                
+    def _get_data(self, field):
+        """
+        Get a field to include in the trajectory collection.
+        The trajectory collection itself is a dict of 2D numpy arrays,
+        with shape (num_indices, num_steps)
+        """
+        if not self.field_data.has_key(field):
+            particles = np.empty((0))
+            step = int(0)
+            for pf, mask, sort in zip(self.pfs, self.masks, self.sorts):
+                if field in self.particle_fields:
+                    # This is easy... just get the particle fields
+                    dd = pf.h.all_data()
+                    pfield = dd[field][mask]
+                    particles = np.append(particles, pfield[sort])
+                else:
+                    # This is hard... must loop over grids
+                    pfield = np.zeros((self.num_indices))
+                    x = self["particle_position_x"][:,step]
+                    y = self["particle_position_y"][:,step]
+                    z = self["particle_position_z"][:,step]
+                    leaf_grids = [g for g in pf.h.grids if len(g.Children) == 0]
+                    for grid in leaf_grids:
+                        pfield += sample_field_at_positions(grid[field],
+                                                            grid.LeftEdge,
+                                                            grid.RightEdge,
+                                                            x, y, z)
+                    particles = np.append(particles, pfield)
+                step += 1
+            self[field] = particles.reshape(self.num_steps,
+                                            self.num_indices).transpose()
+        return self.field_data[field]
+
+    def trajectory_from_index(self, index):
+        """
+        Retrieve a single trajectory corresponding to a specific particle
+        index
+
+        Parameters
+        ----------
+        index : int
+            This defines which particle trajectory from the
+            ParticleTrajectoryCollection object will be returned.
+
+        Returns
+        -------
+        A dictionary corresponding to the particle's trajectory and the
+        fields along that trajectory
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> import matplotlib.pylab as pl
+        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
+        >>> traj = trajs.trajectory_from_index(indices[0])
+        >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
+        >>> pl.savefig("orbit")
+        """
+        mask = np.in1d(self.indices, (index,), assume_unique=True)
+        if not np.any(mask):
+            print "The particle index %d is not in the list!" % (index)
+            raise IndexError
+        fields = [field for field in sorted(self.field_data.keys())]
+        traj = {}
+        traj["particle_time"] = self.times
+        traj["particle_index"] = index
+        for field in fields:
+            traj[field] = self[field][mask,:][0]
+        return traj
+
+    def write_out(self, filename_base):
+        """
+        Write out particle trajectories to tab-separated ASCII files (one
+        for each trajectory) with the field names in the file header. Each
+        file is named with a basename and the index number.
+
+        Parameters
+        ----------
+        filename_base : string
+            The prefix for the outputted ASCII files.
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
+        >>> trajs.write_out("orbit_trajectory")       
+        """
+        fields = [field for field in sorted(self.field_data.keys())]
+        num_fields = len(fields)
+        first_str = "# particle_time\t" + "\t".join(fields)+"\n"
+        template_str = "%g\t"*num_fields+"%g\n"
+        for ix in xrange(self.num_indices):
+            outlines = [first_str]
+            for it in xrange(self.num_steps):
+                outlines.append(template_str %
+                                tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
+            fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
+            fid.writelines(outlines)
+            fid.close()
+            del fid
+            
+    def write_out_h5(self, filename):
+        """
+        Write out all the particle trajectories to a single HDF5 file
+        that contains the indices, the times, and the 2D array for each
+        field individually
+
+        Parameters
+        ----------
+
+        filename : string
+            The output filename for the HDF5 file
+
+        Examples
+        --------
+
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
+        >>> trajs.write_out_h5("orbit_trajectories")                
+        """
+        fid = h5py.File(filename, "w")
+        fields = [field for field in sorted(self.field_data.keys())]
+        fid.create_dataset("particle_indices", dtype=np.int32,
+                           data=self.indices)
+        fid.create_dataset("particle_time", data=self.times)
+        for field in fields:
+            fid.create_dataset("%s" % field, data=self[field])
+        fid.close()

diff -r c144fe080f8c3011e7e49d8e2d82557467bd459e -r 3f2e610678d3fa7321ec26ce387a654759f7c0db yt/analysis_modules/particle_trajectories/setup.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('particle_trajectories', parent_package, top_path)
+    #config.add_subpackage("tests")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r c144fe080f8c3011e7e49d8e2d82557467bd459e -r 3f2e610678d3fa7321ec26ce387a654759f7c0db yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -1,8 +1,5 @@
 """
 API for yt.data_objects
-
-
-
 """
 
 #-----------------------------------------------------------------------------
@@ -72,5 +69,3 @@
     add_grad, \
     derived_field
 
-from particle_trajectories import \
-    ParticleTrajectoryCollection

diff -r c144fe080f8c3011e7e49d8e2d82557467bd459e -r 3f2e610678d3fa7321ec26ce387a654759f7c0db yt/data_objects/particle_trajectories.py
--- a/yt/data_objects/particle_trajectories.py
+++ /dev/null
@@ -1,380 +0,0 @@
-"""
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.data_objects.data_containers import YTFieldData
-from yt.data_objects.time_series import TimeSeriesData
-from yt.utilities.lib import sample_field_at_positions
-from yt.funcs import *
-
-import numpy as np
-import h5py
-
-class ParticleTrajectoryCollection(object) :
-
-    r"""A collection of particle trajectories in time over a series of
-    parameter files. 
-
-    The ParticleTrajectoryCollection object contains a collection of
-    particle trajectories for a specified set of particle indices. 
-    
-    Parameters
-    ----------
-    filenames : list of strings
-        A time-sorted list of filenames to construct the TimeSeriesData
-        object.
-    indices : array_like
-        An integer array of particle indices whose trajectories we
-        want to track. If they are not sorted they will be sorted.
-    fields : list of strings, optional
-        A set of fields that is retrieved when the trajectory
-        collection is instantiated.
-        Default : None (will default to the fields 'particle_position_x',
-        'particle_position_y', 'particle_position_z')
-
-    Examples
-    ________
-    >>> from yt.mods import *
-    >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
-    >>> my_fns.sort()
-    >>> fields = ["particle_position_x", "particle_position_y",
-    >>>           "particle_position_z", "particle_velocity_x",
-    >>>           "particle_velocity_y", "particle_velocity_z"]
-    >>> pf = load(my_fns[0])
-    >>> init_sphere = pf.h.sphere(pf.domain_center, (.5, "unitary"))
-    >>> indices = init_sphere["particle_index"].astype("int")
-    >>> trajs = ParticleTrajectoryCollection(my_fns, indices, fields=fields)
-    >>> for t in trajs :
-    >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
-
-    Notes
-    -----
-    As of this time only particle trajectories that are complete over the
-    set of specified parameter files are supported. If any particle's history
-    ends for some reason (e.g. leaving the simulation domain or being actively
-    destroyed), the whole trajectory collection of which it is a set must end
-    at or before the particle's last timestep. This is a limitation we hope to
-    lift at some point in the future.     
-    """
-    def __init__(self, filenames, indices, fields = None) :
-
-        indices.sort() # Just in case the caller wasn't careful
-        
-        self.field_data = YTFieldData()
-        self.pfs = TimeSeriesData.from_filenames(filenames)
-        self.masks = []
-        self.sorts = []
-        self.indices = indices
-        self.num_indices = len(indices)
-        self.num_steps = len(filenames)
-        self.times = []
-
-        # Default fields 
-        
-        if fields is None : fields = []
-
-        # Must ALWAYS have these fields
-        
-        fields = fields + ["particle_position_x",
-                           "particle_position_y",
-                           "particle_position_z"]
-
-        """
-        The following loops through the parameter files
-        and performs two tasks. The first is to isolate
-        the particles with the correct indices, and the
-        second is to create a sorted list of these particles.
-        We also make a list of the current time from each file. 
-        Right now, the code assumes (and checks for) the
-        particle indices existing in each file, a limitation I
-        would like to lift at some point since some codes
-        (e.g., FLASH) destroy particles leaving the domain.
-        """
-        
-        for pf in self.pfs :
-            dd = pf.h.all_data()
-            newtags = dd["particle_index"].astype("int")
-            if not np.all(np.in1d(indices, newtags, assume_unique=True)) :
-                print "Not all requested particle ids contained in this file!"
-                raise IndexError
-            mask = np.in1d(newtags, indices, assume_unique=True)
-            sorts = np.argsort(newtags[mask])
-            self.masks.append(mask)            
-            self.sorts.append(sorts)
-            self.times.append(pf.current_time)
-
-        self.times = np.array(self.times)
-
-        # Set up the derived field list and the particle field list
-        # so that if the requested field is a particle field, we'll
-        # just copy the field over, but if the field is a grid field,
-        # we will first copy the field over to the particle positions
-        # and then return the field. 
-
-        self.derived_field_list = self.pfs[0].h.derived_field_list
-        self.particle_fields = [field for field in self.derived_field_list
-                                if self.pfs[0].field_info[field].particle_type]
-
-        # Now instantiate the requested fields 
-        for field in fields :
-
-            self._get_data(field)
-            
-    def has_key(self, key) :
-
-        return (key in self.field_data)
-    
-    def keys(self) :
-
-        return self.field_data.keys()
-
-    def __getitem__(self, key) :
-        """
-        Get the field associated with key,
-        checking to make sure it is a particle field.
-        """
-
-        if not self.field_data.has_key(key) :
-
-            self._get_data(key)
-
-        return self.field_data[key]
-    
-    def __setitem__(self, key, val):
-        """
-        Sets a field to be some other value.
-        """
-        self.field_data[key] = val
-                        
-    def __delitem__(self, key) :
-        """
-        Delete the field from the trajectory
-        """
-        del self.field_data[key]
-
-    def __iter__(self) :
-
-        """
-        This iterates over the trajectories for
-        the different particles, returning dicts
-        of fields for each trajectory
-        """
-        for idx in xrange(self.num_indices) :
-            traj = {}
-            traj["particle_index"] = self.indices[idx]
-            traj["particle_time"] = self.times
-            for field in self.field_data.keys() :
-                traj[field] = self[field][idx,:]
-            yield traj
-            
-    def __len__(self) :
-
-        """
-        The number of individual trajectories
-        """
-        return self.num_indices
-
-    def add_fields(self, fields) :
-
-        """
-        Add a list of fields to an existing trajectory
-
-        Parameters
-        ----------
-        fields : list of strings
-            A list of fields to be added to the current trajectory
-            collection.
-
-        Examples
-        ________
-        >>> from yt.mods import *
-        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
-        >>> trajs.add_fields(["particle_mass", "particle_gpot"])
-        """
-        
-        for field in fields :
-
-            if not self.field_data.has_key(field):
-
-                self._get_data(field)
-                
-    def _get_data(self, field) :
-
-        """
-        Get a field to include in the trajectory collection.
-        The trajectory collection itself is a dict of 2D numpy arrays,
-        with shape (num_indices, num_steps)
-        """
-        
-        if not self.field_data.has_key(field):
-            
-            particles = np.empty((0))
-
-            step = int(0)
-                
-            for pf, mask, sort in zip(self.pfs, self.masks, self.sorts) :
-                                    
-                if field in self.particle_fields :
-
-                    # This is easy... just get the particle fields
-
-                    dd = pf.h.all_data()
-                    pfield = dd[field][mask]
-                    particles = np.append(particles, pfield[sort])
-
-                else :
-
-                    # This is hard... must loop over grids
-
-                    pfield = np.zeros((self.num_indices))
-                    x = self["particle_position_x"][:,step]
-                    y = self["particle_position_y"][:,step]
-                    z = self["particle_position_z"][:,step]
-
-                    leaf_grids = [g for g in pf.h.grids if len(g.Children) == 0]
-                        
-                    for grid in leaf_grids :
-
-                        pfield += sample_field_at_positions(grid[field],
-                                                            grid.LeftEdge,
-                                                            grid.RightEdge,
-                                                            x, y, z)
-
-                    particles = np.append(particles, pfield)
-
-                step += 1
-                
-            self[field] = particles.reshape(self.num_steps,
-                                            self.num_indices).transpose()
-
-        return self.field_data[field]
-
-    def trajectory_from_index(self, index) :
-
-        """
-        Retrieve a single trajectory corresponding to a specific particle
-        index
-
-        Parameters
-        ----------
-        index : int
-            This defines which particle trajectory from the
-            ParticleTrajectoryCollection object will be returned.
-
-        Returns
-        -------
-        A dictionary corresponding to the particle's trajectory and the
-        fields along that trajectory
-
-        Examples
-        --------
-        >>> from yt.mods import *
-        >>> import matplotlib.pylab as pl
-        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
-        >>> traj = trajs.trajectory_from_index(indices[0])
-        >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
-        >>> pl.savefig("orbit")
-        """
-        
-        mask = np.in1d(self.indices, (index,), assume_unique=True)
-
-        if not np.any(mask) :
-            print "The particle index %d is not in the list!" % (index)
-            raise IndexError
-
-        fields = [field for field in sorted(self.field_data.keys())]
-                                
-        traj = {}
-
-        traj["particle_time"] = self.times
-        traj["particle_index"] = index
-        
-        for field in fields :
-
-            traj[field] = self[field][mask,:][0]
-
-        return traj
-
-    def write_out(self, filename_base) :
-
-        """
-        Write out particle trajectories to tab-separated ASCII files (one
-        for each trajectory) with the field names in the file header. Each
-        file is named with a basename and the index number.
-
-        Parameters
-        ----------
-        filename_base : string
-            The prefix for the outputted ASCII files.
-
-        Examples
-        --------
-        >>> from yt.mods import *
-        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
-        >>> trajs.write_out("orbit_trajectory")       
-        """
-        
-        fields = [field for field in sorted(self.field_data.keys())]
-
-        num_fields = len(fields)
-
-        first_str = "# particle_time\t" + "\t".join(fields)+"\n"
-        
-        template_str = "%g\t"*num_fields+"%g\n"
-        
-        for ix in xrange(self.num_indices) :
-
-            outlines = [first_str]
-
-            for it in xrange(self.num_steps) :
-                outlines.append(template_str %
-                                tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
-            
-            fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
-            fid.writelines(outlines)
-            fid.close()
-            del fid
-            
-    def write_out_h5(self, filename) :
-
-        """
-        Write out all the particle trajectories to a single HDF5 file
-        that contains the indices, the times, and the 2D array for each
-        field individually
-
-        Parameters
-        ----------
-
-        filename : string
-            The output filename for the HDF5 file
-
-        Examples
-        --------
-
-        >>> from yt.mods import *
-        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
-        >>> trajs.write_out_h5("orbit_trajectories")                
-        """
-        
-        fid = h5py.File(filename, "w")
-
-        fields = [field for field in sorted(self.field_data.keys())]
-        
-        fid.create_dataset("particle_indices", dtype=np.int32,
-                           data=self.indices)
-        fid.create_dataset("particle_time", data=self.times)
-        
-        for field in fields :
-
-            fid.create_dataset("%s" % field, data=self[field])
-                        
-        fid.close()


https://bitbucket.org/yt_analysis/yt/commits/c413bba27789/
Changeset:   c413bba27789
Branch:      yt
User:        jzuhone
Date:        2013-10-30 14:05:04
Summary:     Renaming some stuff and getting rid of now-defunct imports.
Affected #:  4 files

diff -r 3f2e610678d3fa7321ec26ce387a654759f7c0db -r c413bba277891fb726558a1053cf71726e460225 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -109,4 +109,4 @@
     RadMC3DWriter
 
 from .particle_trajectories.api import \
-    ParticleTrajectoryColleciton
+    ParticleTrajectories

diff -r 3f2e610678d3fa7321ec26ce387a654759f7c0db -r c413bba277891fb726558a1053cf71726e460225 yt/analysis_modules/particle_trajectories/api.py
--- a/yt/analysis_modules/particle_trajectories/api.py
+++ b/yt/analysis_modules/particle_trajectories/api.py
@@ -9,4 +9,4 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from particle_trajectories import ParticleTrajectoryCollection
+from particle_trajectories import ParticleTrajectories

diff -r 3f2e610678d3fa7321ec26ce387a654759f7c0db -r c413bba277891fb726558a1053cf71726e460225 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -1,6 +1,5 @@
 """
 Particle trajectories
-
 """
 
 #-----------------------------------------------------------------------------
@@ -19,11 +18,11 @@
 import numpy as np
 import h5py
 
-class ParticleTrajectoryCollection(object):
+class ParticleTrajectories(object):
     r"""A collection of particle trajectories in time over a series of
     parameter files. 
 
-    The ParticleTrajectoryCollection object contains a collection of
+    The ParticleTrajectories object contains a collection of
     particle trajectories for a specified set of particle indices. 
     
     Parameters
@@ -51,7 +50,7 @@
     >>> pf = load(my_fns[0])
     >>> init_sphere = pf.h.sphere(pf.domain_center, (.5, "unitary"))
     >>> indices = init_sphere["particle_index"].astype("int")
-    >>> trajs = ParticleTrajectoryCollection(my_fns, indices, fields=fields)
+    >>> trajs = ParticleTrajectories(my_fns, indices, fields=fields)
     >>> for t in trajs :
     >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
 
@@ -187,7 +186,7 @@
         Examples
         ________
         >>> from yt.mods import *
-        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
+        >>> trajs = ParticleTrajectories(my_fns, indices)
         >>> trajs.add_fields(["particle_mass", "particle_gpot"])
         """
         for field in fields:
@@ -236,7 +235,7 @@
         ----------
         index : int
             This defines which particle trajectory from the
-            ParticleTrajectoryCollection object will be returned.
+            ParticleTrajectories object will be returned.
 
         Returns
         -------
@@ -247,7 +246,7 @@
         --------
         >>> from yt.mods import *
         >>> import matplotlib.pylab as pl
-        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
+        >>> trajs = ParticleTrajectories(my_fns, indices)
         >>> traj = trajs.trajectory_from_index(indices[0])
         >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
         >>> pl.savefig("orbit")
@@ -278,7 +277,7 @@
         Examples
         --------
         >>> from yt.mods import *
-        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
+        >>> trajs = ParticleTrajectories(my_fns, indices)
         >>> trajs.write_out("orbit_trajectory")       
         """
         fields = [field for field in sorted(self.field_data.keys())]
@@ -311,7 +310,7 @@
         --------
 
         >>> from yt.mods import *
-        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
+        >>> trajs = ParticleTrajectories(my_fns, indices)
         >>> trajs.write_out_h5("orbit_trajectories")                
         """
         fid = h5py.File(filename, "w")

diff -r 3f2e610678d3fa7321ec26ce387a654759f7c0db -r c413bba277891fb726558a1053cf71726e460225 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -54,7 +54,7 @@
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \
-    ParticleTrajectoryCollection, ImageArray
+    ImageArray
 
 from yt.data_objects.derived_quantities import \
     add_quantity, quantity_info


https://bitbucket.org/yt_analysis/yt/commits/d12737531589/
Changeset:   d12737531589
Branch:      yt
User:        jzuhone
Date:        2013-10-30 17:14:55
Summary:     Making particle trajectories use CICSample_3 for interpolation of grid fields.
Affected #:  2 files

diff -r c413bba277891fb726558a1053cf71726e460225 -r d127375315894f9c038d2d80ede31bd5d12bd8b0 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -12,7 +12,7 @@
 
 from yt.data_objects.data_containers import YTFieldData
 from yt.data_objects.time_series import TimeSeriesData
-from yt.utilities.lib import sample_field_at_positions
+from yt.utilities.lib import CICSample_3
 from yt.funcs import *
 
 import numpy as np
@@ -86,6 +86,17 @@
                            "particle_position_y",
                            "particle_position_z"]
 
+        # Set up the derived field list and the particle field list
+        # so that if the requested field is a particle field, we'll
+        # just copy the field over, but if the field is a grid field,
+        # we will first interpolate the field to the particle positions
+        # and then return the field. 
+
+        pf = self.pfs[0]
+        self.derived_field_list = pf.h.derived_field_list
+        self.particle_fields = [field for field in self.derived_field_list
+                                if pf.field_info[field].particle_type]
+
         """
         The following loops through the parameter files
         and performs two tasks. The first is to isolate
@@ -112,16 +123,6 @@
 
         self.times = np.array(self.times)
 
-        # Set up the derived field list and the particle field list
-        # so that if the requested field is a particle field, we'll
-        # just copy the field over, but if the field is a grid field,
-        # we will first copy the field over to the particle positions
-        # and then return the field. 
-
-        self.derived_field_list = self.pfs[0].h.derived_field_list
-        self.particle_fields = [field for field in self.derived_field_list
-                                if self.pfs[0].field_info[field].particle_type]
-
         # Now instantiate the requested fields 
         for field in fields:
             self._get_data(field)
@@ -137,7 +138,9 @@
         Get the field associated with key,
         checking to make sure it is a particle field.
         """
-        if not self.field_data.has_key(key) :
+        if key == "particle_time":
+            return self.times
+        if not self.field_data.has_key(key):
             self._get_data(key)
         return self.field_data[key]
     
@@ -214,12 +217,15 @@
                     x = self["particle_position_x"][:,step]
                     y = self["particle_position_y"][:,step]
                     z = self["particle_position_z"][:,step]
-                    leaf_grids = [g for g in pf.h.grids if len(g.Children) == 0]
-                    for grid in leaf_grids:
-                        pfield += sample_field_at_positions(grid[field],
-                                                            grid.LeftEdge,
-                                                            grid.RightEdge,
-                                                            x, y, z)
+                    particle_grids, particle_grid_inds = pf.h.find_points(x,y,z)
+                    for grid in particle_grids:
+                        cube = grid.retrieve_ghost_zones(1, [field])
+                        CICSample_3(x,y,z,pfield,
+                                    self.num_indices,
+                                    cube[field],
+                                    np.array(grid.LeftEdge).astype(np.float64),
+                                    np.array(grid.ActiveDimensions).astype(np.int32),
+                                    np.float64(grid['dx']))
                     particles = np.append(particles, pfield)
                 step += 1
             self[field] = particles.reshape(self.num_steps,

diff -r c413bba277891fb726558a1053cf71726e460225 -r d127375315894f9c038d2d80ede31bd5d12bd8b0 yt/utilities/lib/CICDeposit.pyx
--- a/yt/utilities/lib/CICDeposit.pyx
+++ b/yt/utilities/lib/CICDeposit.pyx
@@ -131,12 +131,21 @@
     le2 = leftEdge[2] 
                                                     
     for n in range(npositions):
+        
+        # Compute the position of the central cell
 
-        # Compute the position of the central cell
-        xpos = fclip((posx[n] - le0)*fact, 0.5001, edge0)
-        ypos = fclip((posy[n] - le1)*fact, 0.5001, edge1)
-        zpos = fclip((posz[n] - le2)*fact, 0.5001, edge2)
+        xpos = (posx[n]-le0)*fact
+        ypos = (posy[n]-le1)*fact
+        zpos = (posz[n]-le2)*fact
+        
+        if (xpos < -1 or ypos < -1 or zpos < -1 or
+            xpos >= edge0+1.5001 or ypos >= edge1+1.5001 or zpos >= edge2+1.5001):
+            continue
 
+        xpos = fclip(xpos, 0.5001, edge0)
+        ypos = fclip(ypos, 0.5001, edge1)
+        zpos = fclip(zpos, 0.5001, edge2)
+        
         i1  = <int> (xpos + 0.5)
         j1  = <int> (ypos + 0.5)
         k1  = <int> (zpos + 0.5)


https://bitbucket.org/yt_analysis/yt/commits/c9bd63af62a4/
Changeset:   c9bd63af62a4
Branch:      yt
User:        jzuhone
Date:        2013-10-30 17:21:53
Summary:     Merged yt_analysis/yt into yt
Affected #:  27 files

diff -r d127375315894f9c038d2d80ede31bd5d12bd8b0 -r c9bd63af62a44512661a8f7f0761507013783266 README
--- a/README
+++ b/README
@@ -1,11 +1,12 @@
-Hi there!  You've just downloaded yt, an analysis tool for astrophysical
-simulation datasets, generated by simulation platforms like Enzo, Orion, FLASH,
-Nyx, MAESTRO, ART and Ramses.  It's written in python and heavily leverages
-both NumPy and Matplotlib for fast arrays and visualization, respectively.
+Hi there!  You've just downloaded yt, an analysis tool for scientific
+datasets, generated on a variety of data platforms.  It's written in 
+python and heavily leverages both NumPy and Matplotlib for fast arrays and 
+visualization, respectively.
 
 Full documentation and a user community can be found at:
 
 http://yt-project.org/
+
 http://yt-project.org/doc/
 
 If you have used Python before, and are comfortable with installing packages,
@@ -16,9 +17,7 @@
 doc/install_script.sh .  You will have to set the destination directory, and
 there are options available, but it should be straightforward.
 
-In case of any problems, please email the yt-users mailing list, and if you're
-interested in helping out, see the developer documentation:
-
-http://yt-project.org/doc/advanced/developing.html
+For more information on installation, what to do if you run into problems, or 
+ways to help development, please visit our website.
 
 Enjoy!

diff -r d127375315894f9c038d2d80ede31bd5d12bd8b0 -r c9bd63af62a44512661a8f7f0761507013783266 yt/frontends/art/__init__.py
--- a/yt/frontends/art/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-API for yt.frontends.art
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r d127375315894f9c038d2d80ede31bd5d12bd8b0 -r c9bd63af62a44512661a8f7f0761507013783266 yt/frontends/art/api.py
--- a/yt/frontends/art/api.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""
-API for yt.frontends.art
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import \
-      ARTGrid, \
-      ARTHierarchy, \
-      ARTStaticOutput
-
-from .fields import \
-      ARTFieldInfo, \
-      add_art_field
-
-from .io import \
-      IOHandlerART

diff -r d127375315894f9c038d2d80ede31bd5d12bd8b0 -r c9bd63af62a44512661a8f7f0761507013783266 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ /dev/null
@@ -1,672 +0,0 @@
-"""
-ART-specific data structures
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-import numpy as np
-import os.path
-import glob
-import stat
-import weakref
-import cStringIO
-
-from yt.funcs import *
-from yt.data_objects.grid_patch import \
-      AMRGridPatch
-from yt.data_objects.hierarchy import \
-      AMRHierarchy
-from yt.data_objects.static_output import \
-      StaticOutput
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, NullFunc
-from .fields import \
-    ARTFieldInfo, add_art_field, KnownARTFields
-from yt.utilities.definitions import \
-    mpc_conversion
-from yt.utilities.io_handler import \
-    io_registry
-from yt.utilities.lib import \
-    get_box_grids_level
-import yt.utilities.lib as amr_utils
-
-from .definitions import *
-from io import _read_child_mask_level
-from io import read_particles
-from io import read_stars
-from io import spread_ages
-from io import _count_art_octs
-from io import _read_art_level_info
-from io import _read_art_child
-from io import _skip_record
-from io import _read_record
-from io import _read_frecord
-from io import _read_record_size
-from io import _read_struct
-from io import b2t
-
-
-#import yt.frontends.ramses._ramses_reader as _ramses_reader
-
-from .fields import ARTFieldInfo, KnownARTFields
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
-from yt.utilities.lib import \
-    get_box_grids_level
-from yt.utilities.io_handler import \
-    io_registry
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, NullFunc
-from yt.utilities.physical_constants import \
-    mass_hydrogen_cgs, sec_per_Gyr
-
-class ARTGrid(AMRGridPatch):
-    _id_offset = 0
-
-    def __init__(self, id, hierarchy, level, locations,start_index, le,re,gd,
-            child_mask=None,nop=0):
-        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
-                              hierarchy = hierarchy)
-        start_index =start_index 
-        self.Level = level
-        self.Parent = []
-        self.Children = []
-        self.locations = locations
-        self.start_index = start_index.copy()
-        
-        self.LeftEdge = le
-        self.RightEdge = re
-        self.ActiveDimensions = gd
-        self.NumberOfParticles=nop
-        for particle_field in particle_fields:
-            setattr(self,particle_field,np.array([]))
-
-    def _setup_dx(self):
-        id = self.id - self._id_offset
-        if len(self.Parent) > 0:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] \
-                = self.dds
-
-    def get_global_startindex(self):
-        """
-        Return the integer starting index for each dimension at the current
-        level.
-        """
-        if self.start_index != None:
-            return self.start_index
-        if len(self.Parent) == 0:
-            start_index = self.LeftEdge / self.dds
-            return np.rint(start_index).astype('int64').ravel()
-        pdx = self.Parent[0].dds
-        start_index = (self.Parent[0].get_global_startindex()) + \
-                       np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
-        self.start_index = (start_index*self.pf.refine_by)\
-                           .astype('int64').ravel()
-        return self.start_index
-
-    def __repr__(self):
-        return "ARTGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
-
-class ARTHierarchy(AMRHierarchy):
-    grid = ARTGrid
-    _handle = None
-    
-    def __init__(self, pf, data_style='art'):
-        self.data_style = data_style
-        self.parameter_file = weakref.proxy(pf)
-        self.max_level = pf.max_level
-        self.hierarchy_filename = self.parameter_file.parameter_filename
-        self.directory = os.path.dirname(self.hierarchy_filename)
-        self.float_type = np.float64
-        AMRHierarchy.__init__(self,pf,data_style)
-        self._setup_field_list()
-
-    def _initialize_data_storage(self):
-        pass
-    
-    def _detect_fields(self):
-        self.field_list = []
-        self.field_list += fluid_fields
-        self.field_list += particle_fields
-        
-    def _setup_classes(self):
-        dd = self._get_data_reader_dict()
-        AMRHierarchy._setup_classes(self, dd)
-        self.object_types.sort()
-            
-    def _count_grids(self):
-        LEVEL_OF_EDGE = 7
-        MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
-        min_eff = 0.30
-        vol_max = 128**3
-        with open(self.pf.parameter_filename,'rb') as f:
-            (self.pf.nhydro_vars, self.pf.level_info,
-            self.pf.level_oct_offsets, 
-            self.pf.level_child_offsets) = \
-                             _count_art_octs(f, 
-                              self.pf.child_grid_offset,
-                              self.pf.min_level, self.pf.max_level)
-            self.pf.level_info[0]=self.pf.ncell
-            self.pf.level_info = np.array(self.pf.level_info)
-            self.pf.level_offsets = self.pf.level_child_offsets
-            self.pf.level_offsets = np.array(self.pf.level_offsets, 
-                                             dtype='int64')
-            self.pf.level_offsets[0] = self.pf.root_grid_offset
-            self.pf.level_art_child_masks = {}
-            cm = self.pf.root_iOctCh>0
-            cm_shape = (1,)+cm.shape 
-            self.pf.level_art_child_masks[0] = \
-                    cm.reshape(cm_shape).astype('uint8')        
-            del cm
-            root_psg = _ramses_reader.ProtoSubgrid(
-                            np.zeros(3, dtype='int64'), # left index of PSG
-                            self.pf.domain_dimensions, # dim of PSG
-                            np.zeros((1,3), dtype='int64'),# left edges of grids
-                            np.zeros((1,6), dtype='int64') # empty
-                            )
-            self.proto_grids = [[root_psg],]
-            for level in xrange(1, len(self.pf.level_info)):
-                if self.pf.level_info[level] == 0:
-                    self.proto_grids.append([])
-                    continue
-                psgs = []
-                effs,sizes = [], []
-                if self.pf.limit_level:
-                    if level > self.pf.limit_level : continue
-                #refers to the left index for the art octgrid
-                left_index, fl, nocts,root_level = _read_art_level_info(f, 
-                        self.pf.level_oct_offsets,level,
-                        coarse_grid=self.pf.domain_dimensions[0])
-                if level>1:
-                    assert root_level == last_root_level
-                last_root_level = root_level
-                #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
-                #read in the child masks for this level and save them
-                idc, art_child_mask = _read_child_mask_level(f, 
-                        self.pf.level_child_offsets,
-                    level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
-                art_child_mask = art_child_mask.reshape((nocts,2,2,2))
-                self.pf.level_art_child_masks[level]=art_child_mask
-                #child_mask is zero where child grids exist and
-                #thus where higher resolution data is available
-                #compute the hilbert indices up to a certain level
-                #the indices will associate an oct grid to the nearest
-                #hilbert index?
-                base_level = int( np.log10(self.pf.domain_dimensions.max()) /
-                                  np.log10(2))
-                hilbert_indices = _ramses_reader.get_hilbert_indices(
-                                        level + base_level, left_index)
-                #print base_level, hilbert_indices.max(),
-                hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
-                #print hilbert_indices.max()
-                # Strictly speaking, we don't care about the index of any
-                # individual oct at this point.  So we can then split them up.
-                unique_indices = np.unique(hilbert_indices)
-                mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
-                            level, unique_indices.size, hilbert_indices.size)
-                #use the hilbert indices to order oct grids so that consecutive
-                #items on a list are spatially near each other
-                #this is useful because we will define grid patches over these
-                #octs, which are more efficient if the octs are spatially close
-                #split into list of lists, with domains containing 
-                #lists of sub octgrid left indices and an index
-                #referring to the domain on which they live
-                pbar = get_pbar("Calc Hilbert Indices ",1)
-                locs, lefts = _ramses_reader.get_array_indices_lists(
-                            hilbert_indices, unique_indices, left_index, fl)
-                pbar.finish()
-                #iterate over the domains    
-                step=0
-                pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
-                psg_eff = []
-                for ddleft_index, ddfl in zip(lefts, locs):
-                    #iterate over just the unique octs
-                    #why would we ever have non-unique octs?
-                    #perhaps the hilbert ordering may visit the same
-                    #oct multiple times - review only unique octs 
-                    #for idomain in np.unique(ddfl[:,1]):
-                    #dom_ind = ddfl[:,1] == idomain
-                    #dleft_index = ddleft_index[dom_ind,:]
-                    #dfl = ddfl[dom_ind,:]
-                    dleft_index = ddleft_index
-                    dfl = ddfl
-                    initial_left = np.min(dleft_index, axis=0)
-                    idims = (np.max(dleft_index, axis=0) - initial_left).ravel()
-                    idims +=2
-                    #this creates a grid patch that doesn't cover the whole leve
-                    #necessarily, but with other patches covers all the regions
-                    #with octs. This object automatically shrinks its size
-                    #to barely encompass the octs inside of it.
-                    psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
-                                    dleft_index, dfl)
-                    if psg.efficiency <= 0: continue
-                    #because grid patches maybe mostly empty, and with octs
-                    #that only partially fill the grid, it may be more efficient
-                    #to split large patches into smaller patches. We split
-                    #if less than 10% the volume of a patch is covered with octs
-                    if idims.prod() > vol_max or psg.efficiency < min_eff:
-                        psg_split = _ramses_reader.recursive_patch_splitting(
-                            psg, idims, initial_left, 
-                            dleft_index, dfl,min_eff=min_eff,use_center=True,
-                            split_on_vol=vol_max)
-                        psgs.extend(psg_split)
-                        psg_eff += [x.efficiency for x in psg_split] 
-                    else:
-                        psgs.append(psg)
-                        psg_eff =  [psg.efficiency,]
-                    tol = 1.00001
-                    step+=1
-                    pbar.update(step)
-                eff_mean = np.mean(psg_eff)
-                eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
-                eff_nall = len(psg_eff)
-                mylog.info("Average subgrid efficiency %02.1f %%",
-                            eff_mean*100.0)
-                mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
-                            eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
-                
-                mylog.info("Done with level % 2i; max LE %i", level,
-                           np.max(left_index))
-                pbar.finish()
-                self.proto_grids.append(psgs)
-                if len(self.proto_grids[level]) == 1: continue
-        self.num_grids = sum(len(l) for l in self.proto_grids)
-        
-    def _parse_hierarchy(self):
-        grids = []
-        gi = 0
-        dd=self.pf.domain_dimensions
-        for level, grid_list in enumerate(self.proto_grids):
-            dds = ((2**level) * dd).astype("float64")
-            for g in grid_list:
-                fl = g.grid_file_locations
-                props = g.get_properties()
-                start_index = props[0,:]
-                le = props[0,:].astype('float64')/dds
-                re = props[1,:].astype('float64')/dds
-                gd = props[2,:].astype('int64')
-                if level==0:
-                    le = np.zeros(3,dtype='float64')
-                    re = np.ones(3,dtype='float64')
-                    gd = dd
-                self.grid_left_edge[gi,:] = le
-                self.grid_right_edge[gi,:] = re
-                self.grid_dimensions[gi,:] = gd
-                assert np.all(self.grid_left_edge[gi,:]<=1.0)    
-                self.grid_levels[gi,:] = level
-                child_mask = np.zeros(props[2,:],'uint8')
-                amr_utils.fill_child_mask(fl,start_index,
-                    self.pf.level_art_child_masks[level],
-                    child_mask)
-                grids.append(self.grid(gi, self, level, fl, 
-                    start_index,le,re,gd))
-                gi += 1
-        self.grids = np.empty(len(grids), dtype='object')
-        if not self.pf.skip_particles and self.pf.file_particle_data:
-            lspecies = self.pf.parameters['lspecies']
-            wspecies = self.pf.parameters['wspecies']
-            self.pf.particle_position,self.pf.particle_velocity= \
-                read_particles(self.pf.file_particle_data,
-                        self.pf.parameters['Nrow'])
-            nparticles = lspecies[-1]
-            if not np.all(self.pf.particle_position[nparticles:]==0.0):
-                mylog.info('WARNING: unused particles discovered from lspecies')
-            self.pf.particle_position = self.pf.particle_position[:nparticles]
-            self.pf.particle_velocity = self.pf.particle_velocity[:nparticles]
-            self.pf.particle_position  /= self.pf.domain_dimensions 
-            self.pf.particle_type = np.zeros(nparticles,dtype='int')
-            self.pf.particle_mass = np.zeros(nparticles,dtype='float64')
-            self.pf.particle_star_index = len(wspecies)-1
-            a=0
-            for i,(b,m) in enumerate(zip(lspecies,wspecies)):
-                if i == self.pf.particle_star_index:
-                    assert m==0.0
-                    sa,sb = a,b
-                else:
-                    assert m>0.0
-                self.pf.particle_type[a:b] = i #particle type
-                self.pf.particle_mass[a:b] = m #mass in code units
-                a=b
-            if not self.pf.skip_stars and self.pf.file_particle_stars: 
-                (nstars_rs,), mass, imass, tbirth, metallicity1, metallicity2, \
-                        ws_old,ws_oldi,tdum,adum \
-                     = read_stars(self.pf.file_particle_stars)
-                self.pf.nstars_rs = nstars_rs     
-                self.pf.nstars_pa = b-a
-                inconsistent=self.pf.particle_type==self.pf.particle_star_index
-                if not nstars_rs==np.sum(inconsistent):
-                    mylog.info('WARNING!: nstars is inconsistent!')
-                del inconsistent
-                if nstars_rs > 0 :
-                    n=min(1e2,len(tbirth))
-                    birthtimes= b2t(tbirth,n=n)
-                    birthtimes = birthtimes.astype('float64')
-                    assert birthtimes.shape == tbirth.shape    
-                    birthtimes*= 1.0e9 #from Gyr to yr
-                    birthtimes*= 365*24*3600 #to seconds
-                    ages = self.pf.current_time-birthtimes
-                    spread = self.pf.spread_age
-                    if type(spread)==type(5.5):
-                        ages = spread_ages(ages,spread=spread)
-                    elif spread:
-                        ages = spread_ages(ages)
-                    idx = self.pf.particle_type == self.pf.particle_star_index
-                    for psf in particle_star_fields:
-                        if getattr(self.pf,psf,None) is None:
-                            setattr(self.pf,psf,
-                                    np.zeros(nparticles,dtype='float64'))
-                    self.pf.particle_age[sa:sb] = ages
-                    self.pf.particle_mass[sa:sb] = mass
-                    self.pf.particle_mass_initial[sa:sb] = imass
-                    self.pf.particle_creation_time[sa:sb] = birthtimes
-                    self.pf.particle_metallicity1[sa:sb] = metallicity1
-                    self.pf.particle_metallicity2[sa:sb] = metallicity2
-                    self.pf.particle_metallicity[sa:sb]  = metallicity1\
-                                                          + metallicity2
-        for gi,g in enumerate(grids):    
-            self.grids[gi]=g
-                    
-    def _get_grid_parents(self, grid, LE, RE):
-        mask = np.zeros(self.num_grids, dtype='bool')
-        grids, grid_ind = self.get_box_grids(LE, RE)
-        mask[grid_ind] = True
-        mask = np.logical_and(mask, (self.grid_levels == (grid.Level-1)).flat)
-        return self.grids[mask]
-
-    def _populate_grid_objects(self):
-        mask = np.empty(self.grids.size, dtype='int32')
-        pb = get_pbar("Populating grids", len(self.grids))
-        for gi,g in enumerate(self.grids):
-            pb.update(gi)
-            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
-                                self.grid_right_edge[gi,:],
-                                g.Level - 1,
-                                self.grid_left_edge, self.grid_right_edge,
-                                self.grid_levels, mask)
-            parents = self.grids[mask.astype("bool")]
-            if len(parents) > 0:
-                g.Parent.extend((p for p in parents.tolist()
-                        if p.locations[0,0] == g.locations[0,0]))
-                for p in parents: p.Children.append(g)
-            #Now we do overlapping siblings; note that one has to "win" with
-            #siblings, so we assume the lower ID one will "win"
-            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
-                                self.grid_right_edge[gi,:],
-                                g.Level,
-                                self.grid_left_edge, self.grid_right_edge,
-                                self.grid_levels, mask, gi)
-            mask[gi] = False
-            siblings = self.grids[mask.astype("bool")]
-            if len(siblings) > 0:
-                g.OverlappingSiblings = siblings.tolist()
-            g._prepare_grid()
-            g._setup_dx()
-            #instead of gridding particles assign them all to the root grid
-            if gi==0:
-                for particle_field in particle_fields:
-                    source = getattr(self.pf,particle_field,None)
-                    if source is None:
-                        for i,ax in enumerate('xyz'):
-                            pf = particle_field.replace('_%s'%ax,'')
-                            source = getattr(self.pf,pf,None)
-                            if source is not None:
-                                source = source[:,i]
-                                break
-                    if source is not None:
-                        mylog.info("Attaching %s to the root grid",
-                                    particle_field)
-                        g.NumberOfParticles = source.shape[0]
-                        setattr(g,particle_field,source)
-                g.particle_index = np.arange(g.NumberOfParticles)
-        pb.finish()
-        self.max_level = self.grid_levels.max()
-
-    def _setup_field_list(self):
-        if not self.parameter_file.skip_particles:
-            # We know which particle fields will exist -- pending further
-            # changes in the future.
-            for field in particle_fields:
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-                # Note that we call add_field on the field_info directly.  This
-                # will allow the same field detection mechanism to work for 1D,
-                # 2D and 3D fields.
-                self.pf.field_info.add_field(field, NullFunc,
-                                             convert_function=cf,
-                                             take_log=True, particle_type=True)
-
-    def _setup_derived_fields(self):
-        self.derived_field_list = []
-
-    def _setup_data_io(self):
-        self.io = io_registry[self.data_style](
-            self.pf.parameter_filename,
-            self.pf.nhydro_vars,
-            self.pf.level_info,
-            self.pf.level_offsets)
-
-class ARTStaticOutput(StaticOutput):
-    _hierarchy_class = ARTHierarchy
-    _fieldinfo_fallback = ARTFieldInfo
-    _fieldinfo_known = KnownARTFields
-    
-    def __init__(self, file_amr, storage_filename = None,
-            skip_particles=False,skip_stars=False,limit_level=None,
-            spread_age=True,data_style='art'):
-        self.data_style = data_style
-        self._find_files(file_amr)
-        self.skip_particles = skip_particles
-        self.skip_stars = skip_stars
-        self.file_amr = file_amr
-        self.parameter_filename = file_amr
-        self.limit_level = limit_level
-        self.spread_age = spread_age
-        self.domain_left_edge  = np.zeros(3,dtype='float64')
-        self.domain_right_edge = np.ones(3,dtype='float64') 
-        StaticOutput.__init__(self, file_amr, data_style)
-        self.storage_filename = storage_filename
-
-    def _find_files(self,file_amr):
-        """
-        Given the AMR base filename, attempt to find the
-        particle header, star files, etc.
-        """
-        prefix,suffix = filename_pattern['amr'].split('%s')
-        affix = os.path.basename(file_amr).replace(prefix,'')
-        affix = affix.replace(suffix,'')
-        affix = affix.replace('_','')
-        affix = affix[1:-1]
-        dirname = os.path.dirname(file_amr)
-        for filetype, pattern in filename_pattern.items():
-            #sometimes the affix is surrounded by an extraneous _
-            #so check for an extra character on either side
-            check_filename = dirname+'/'+pattern%('?%s?'%affix)
-            filenames = glob.glob(check_filename)
-            if len(filenames)==1:
-                setattr(self,"file_"+filetype,filenames[0])
-                mylog.info('discovered %s',filetype)
-            elif len(filenames)>1:
-                setattr(self,"file_"+filetype,None)
-                mylog.info("Ambiguous number of files found for %s",
-                        check_filename)
-            else:
-                setattr(self,"file_"+filetype,None)
-
-    def __repr__(self):
-        return self.basename.rsplit(".", 1)[0]
-        
-    def _set_units(self):
-        """
-        Generates the conversion to various physical units based 
-		on the parameters from the header
-        """
-        self.units = {}
-        self.time_units = {}
-        self.time_units['1'] = 1
-        self.units['1'] = 1.0
-        self.units['unitary'] = 1.0
-
-        #spatial units
-        z   = self.current_redshift
-        h   = self.hubble_constant
-        boxcm_cal = self.parameters["boxh"]
-        boxcm_uncal = boxcm_cal / h
-        box_proper = boxcm_uncal/(1+z)
-        aexpn = self["aexpn"]
-        for unit in mpc_conversion:
-            self.units[unit] = mpc_conversion[unit] * box_proper
-            self.units[unit+'h'] = mpc_conversion[unit] * box_proper * h
-            self.units[unit+'cm'] = mpc_conversion[unit] * boxcm_uncal
-            self.units[unit+'hcm'] = mpc_conversion[unit] * boxcm_cal
-
-        #all other units
-        wmu = self.parameters["wmu"]
-        Om0 = self.parameters['Om0']
-        ng  = self.parameters['ng']
-        wmu = self.parameters["wmu"]
-        boxh   = self.parameters['boxh'] 
-        aexpn  = self.parameters["aexpn"]
-        hubble = self.parameters['hubble']
-
-        cf = defaultdict(lambda: 1.0)
-        r0 = boxh/ng
-        P0= 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
-        T_0 = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
-        S_0 = 52.077 * wmu**(5.0/3.0)
-        S_0 *= hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
-        #v0 =  r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
-        v0 = 50.0*r0*np.sqrt(Om0)
-        t0 = r0/v0
-        #rho0 = 1.8791e-29 * hubble**2.0 * self.omega_matter
-        rho0 = 2.776e11 * hubble**2.0 * Om0
-        tr = 2./3. *(3.03e5*r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
-        aM0 = rho0 * (boxh/hubble)**3.0 / ng**3.0
-        cf['r0']=r0
-        cf['P0']=P0
-        cf['T_0']=T_0
-        cf['S_0']=S_0
-        cf['v0']=v0
-        cf['t0']=t0
-        cf['rho0']=rho0
-        cf['tr']=tr
-        cf['aM0']=aM0
-
-        #factors to multiply the native code units to CGS
-        cf['Pressure'] = P0 #already cgs
-        cf['Velocity'] = v0/aexpn*1.0e5 #proper cm/s
-        cf["Mass"] = aM0 * 1.98892e33
-        cf["Density"] = rho0*(aexpn**-3.0)
-        cf["GasEnergy"] = rho0*v0**2*(aexpn**-5.0)
-        cf["Potential"] = 1.0
-        cf["Entropy"] = S_0
-        cf["Temperature"] = tr
-        self.cosmological_simulation = True
-        self.conversion_factors = cf
-        
-        for particle_field in particle_fields:
-            self.conversion_factors[particle_field] =  1.0
-        for ax in 'xyz':
-            self.conversion_factors["%s-velocity" % ax] = 1.0
-            self.conversion_factors["particle_velocity_%s"%ax] = cf['Velocity']
-        for unit in sec_conversion.keys():
-            self.time_units[unit] = 1.0 / sec_conversion[unit]
-        self.conversion_factors['particle_mass'] = cf['Mass']
-        self.conversion_factors['particle_creation_time'] =  31556926.0
-        self.conversion_factors['Msun'] = 5.027e-34 
-
-    def _parse_parameter_file(self):
-        """
-        Get the various simulation parameters & constants.
-        """
-        self.dimensionality = 3
-        self.refine_by = 2
-        self.periodicity = (True, True, True)
-        self.cosmological_simulation = True
-        self.parameters = {}
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        self.parameters.update(constants)
-        with open(self.file_amr,'rb') as f:
-            amr_header_vals = _read_struct(f,amr_header_struct)
-            for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
-                _skip_record(f)
-            (self.ncell,) = struct.unpack('>l', _read_record(f))
-            # Try to figure out the root grid dimensions
-            est = int(np.rint(self.ncell**(1.0/3.0)))
-            # Note here: this is the number of *cells* on the root grid.
-            # This is not the same as the number of Octs.
-            self.domain_dimensions = np.ones(3, dtype='int64')*est 
-            self.root_grid_mask_offset = f.tell()
-            root_cells = self.domain_dimensions.prod()
-            self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
-            self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,
-                 order='F')
-            self.root_grid_offset = f.tell()
-            _skip_record(f) # hvar
-            _skip_record(f) # var
-            self.iOctFree, self.nOct = struct.unpack('>ii', _read_record(f))
-            self.child_grid_offset = f.tell()
-        self.parameters.update(amr_header_vals)
-        if not self.skip_particles and self.file_particle_header:
-            with open(self.file_particle_header,"rb") as fh:
-                particle_header_vals = _read_struct(fh,particle_header_struct)
-                fh.seek(seek_extras)
-                n = particle_header_vals['Nspecies']
-                wspecies = np.fromfile(fh,dtype='>f',count=10)
-                lspecies = np.fromfile(fh,dtype='>i',count=10)
-            self.parameters['wspecies'] = wspecies[:n]
-            self.parameters['lspecies'] = lspecies[:n]
-            ls_nonzero = np.diff(lspecies)[:n-1]
-            mylog.info("Discovered %i species of particles",len(ls_nonzero))
-            mylog.info("Particle populations: "+'%1.1e '*len(ls_nonzero),
-                *ls_nonzero)
-            for k,v in particle_header_vals.items():
-                if k in self.parameters.keys():
-                    if not self.parameters[k] == v:
-                        mylog.info("Inconsistent parameter %s %1.1e  %1.1e",k,v,
-                                   self.parameters[k])
-                else:
-                    self.parameters[k]=v
-            self.parameters_particles = particle_header_vals
-    
-        #setup standard simulation yt expects to see
-        self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
-        self.omega_lambda = amr_header_vals['Oml0']
-        self.omega_matter = amr_header_vals['Om0']
-        self.hubble_constant = amr_header_vals['hubble']
-        self.min_level = amr_header_vals['min_level']
-        self.max_level = amr_header_vals['max_level']
-        self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
-        self.current_time = b2t(self.parameters['t']) * sec_per_Gyr
-
-    @classmethod
-    def _is_valid(self, *args, **kwargs):
-        """
-        Defined for the NMSU file naming scheme.
-        This could differ for other formats.
-        """
-        fn = ("%s" % (os.path.basename(args[0])))
-        f = ("%s" % args[0])
-        if fn.endswith(".d") and fn.startswith('10Mpc') and\
-                os.path.exists(f): 
-                return True
-        return False
-

diff -r d127375315894f9c038d2d80ede31bd5d12bd8b0 -r c9bd63af62a44512661a8f7f0761507013783266 yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ /dev/null
@@ -1,140 +0,0 @@
-"""
-Definitions specific to ART
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-fluid_fields= [ 
-    'Density',
-    'TotalEnergy',
-    'XMomentumDensity',
-    'YMomentumDensity',
-    'ZMomentumDensity',
-    'Pressure',
-    'Gamma',
-    'GasEnergy',
-    'MetalDensitySNII',
-    'MetalDensitySNIa',
-    'PotentialNew',
-    'PotentialOld'
-]
-
-particle_fields= [
-    'particle_age',
-    'particle_index',
-    'particle_mass',
-    'particle_mass_initial',
-    'particle_creation_time',
-    'particle_metallicity1',
-    'particle_metallicity2',
-    'particle_metallicity',
-    'particle_position_x',
-    'particle_position_y',
-    'particle_position_z',
-    'particle_velocity_x',
-    'particle_velocity_y',
-    'particle_velocity_z',
-    'particle_type'
-]
-
-particle_star_fields = [
-    'particle_age',
-    'particle_mass',
-    'particle_mass_initial',
-    'particle_creation_time',
-    'particle_metallicity1',
-    'particle_metallicity2',
-    'particle_metallicity',
-]
-
-filename_pattern = {				
-	'amr':'10MpcBox_csf512_%s.d',
-	'particle_header':'PMcrd%s.DAT',
-	'particle_data':'PMcrs0%s.DAT',
-	'particle_stars':'stars_%s.dat'
-}
-
-amr_header_struct = [
-    ('>i','pad byte'),
-    ('>256s','jname'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>i','istep'),
-    ('>d','t'),
-    ('>d','dt'),
-    ('>f','aexpn'),
-    ('>f','ainit'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>f','boxh'),
-    ('>f','Om0'),
-    ('>f','Oml0'),
-    ('>f','Omb0'),
-    ('>f','hubble'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>i','nextras'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>f','extra1'),
-    ('>f','extra2'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>256s','lextra'),
-    ('>256s','lextra'),
-    ('>i','pad byte'),
-    ('>i', 'pad byte'),
-    ('>i', 'min_level'),
-    ('>i', 'max_level'),
-    ('>i', 'pad byte'),
-]
-
-particle_header_struct =[
-    ('>i','pad'),
-    ('45s','header'), 
-    ('>f','aexpn'),
-    ('>f','aexp0'),
-    ('>f','amplt'),
-    ('>f','astep'),
-    ('>i','istep'),
-    ('>f','partw'),
-    ('>f','tintg'),
-    ('>f','Ekin'),
-    ('>f','Ekin1'),
-    ('>f','Ekin2'),
-    ('>f','au0'),
-    ('>f','aeu0'),
-    ('>i','Nrow'),
-    ('>i','Ngridc'),
-    ('>i','Nspecies'),
-    ('>i','Nseed'),
-    ('>f','Om0'),
-    ('>f','Oml0'),
-    ('>f','hubble'),
-    ('>f','Wp5'),
-    ('>f','Ocurv'),
-    ('>f','Omb0'),
-    ('>%ds'%(396),'extras'),
-    ('>f','unknown'),
-    ('>i','pad')
-]
-
-constants = {
-    "Y_p":0.245,
-    "gamma":5./3.,
-    "T_CMB0":2.726,
-    "T_min":300.,
-    "ng":128,
-    "wmu":4.0/(8.0-5.0*0.245)
-}
-
-seek_extras = 137

diff -r d127375315894f9c038d2d80ede31bd5d12bd8b0 -r c9bd63af62a44512661a8f7f0761507013783266 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ /dev/null
@@ -1,295 +0,0 @@
-"""
-ART-specific fields
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, \
-    FieldInfo, \
-    NullFunc, \
-    TranslationFunc, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType
-import yt.data_objects.universal_fields
-import yt.utilities.lib as amr_utils
-
-KnownARTFields = FieldInfoContainer()
-add_art_field = KnownARTFields.add_field
-
-ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_field = ARTFieldInfo.add_field
-
-import numpy as np
-
-#these are just the hydro fields
-known_art_fields = [ 'Density','TotalEnergy',
-                     'XMomentumDensity','YMomentumDensity','ZMomentumDensity',
-                     'Pressure','Gamma','GasEnergy',
-                     'MetalDensitySNII', 'MetalDensitySNIa',
-                     'PotentialNew','PotentialOld']
-
-#Add the fields, then later we'll individually defined units and names
-for f in known_art_fields:
-    add_art_field(f, function=NullFunc, take_log=True,
-              validators = [ValidateDataField(f)])
-
-#Hydro Fields that are verified to be OK unit-wise:
-#Density
-#Temperature
-#metallicities
-#MetalDensity SNII + SNia
-
-#Hydro Fields that need to be tested:
-#TotalEnergy
-#XYZMomentum
-#Pressure
-#Gamma
-#GasEnergy
-#Potentials
-#xyzvelocity
-
-#Particle fields that are tested:
-#particle_position_xyz
-#particle_type
-#particle_index
-#particle_mass
-#particle_mass_initial
-#particle_age
-#particle_velocity
-#particle_metallicity12
-
-#Particle fields that are untested:
-#NONE
-
-#Other checks:
-#CellMassMsun == Density * CellVolume
-
-def _convertDensity(data):
-    return data.convert("Density")
-KnownARTFields["Density"]._units = r"\rm{g}/\rm{cm}^3"
-KnownARTFields["Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["Density"]._convert_function=_convertDensity
-
-def _convertTotalEnergy(data):
-    return data.convert("GasEnergy")
-KnownARTFields["TotalEnergy"]._units = r"\rm{g}/\rm{cm}^3"
-KnownARTFields["TotalEnergy"]._projected_units = r"\rm{K}"
-KnownARTFields["TotalEnergy"]._convert_function=_convertTotalEnergy
-
-def _convertXMomentumDensity(data):
-    tr  = data.convert("Mass")*data.convert("Velocity")
-    tr *= (data.convert("Density")/data.convert("Mass"))
-    return tr
-KnownARTFields["XMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
-KnownARTFields["XMomentumDensity"]._projected_units = r"\rm{K}"
-KnownARTFields["XMomentumDensity"]._convert_function=_convertXMomentumDensity
-
-def _convertYMomentumDensity(data):
-    tr  = data.convert("Mass")*data.convert("Velocity")
-    tr *= (data.convert("Density")/data.convert("Mass"))
-    return tr
-KnownARTFields["YMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
-KnownARTFields["YMomentumDensity"]._projected_units = r"\rm{K}"
-KnownARTFields["YMomentumDensity"]._convert_function=_convertYMomentumDensity
-
-def _convertZMomentumDensity(data):
-    tr  = data.convert("Mass")*data.convert("Velocity")
-    tr *= (data.convert("Density")/data.convert("Mass"))
-    return tr
-KnownARTFields["ZMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
-KnownARTFields["ZMomentumDensity"]._projected_units = r"\rm{K}"
-KnownARTFields["ZMomentumDensity"]._convert_function=_convertZMomentumDensity
-
-def _convertPressure(data):
-    return data.convert("Pressure")
-KnownARTFields["Pressure"]._units = r"\rm{g}/\rm{cm}/\rm{s}^2"
-KnownARTFields["Pressure"]._projected_units = r"\rm{g}/\rm{s}^2"
-KnownARTFields["Pressure"]._convert_function=_convertPressure
-
-def _convertGamma(data):
-    return 1.0
-KnownARTFields["Gamma"]._units = r""
-KnownARTFields["Gamma"]._projected_units = r""
-KnownARTFields["Gamma"]._convert_function=_convertGamma
-
-def _convertGasEnergy(data):
-    return data.convert("GasEnergy")
-KnownARTFields["GasEnergy"]._units = r"\rm{ergs}/\rm{g}"
-KnownARTFields["GasEnergy"]._projected_units = r""
-KnownARTFields["GasEnergy"]._convert_function=_convertGasEnergy
-
-def _convertMetalDensitySNII(data):
-    return data.convert('Density')
-KnownARTFields["MetalDensitySNII"]._units = r"\rm{g}/\rm{cm}^3"
-KnownARTFields["MetalDensitySNII"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["MetalDensitySNII"]._convert_function=_convertMetalDensitySNII
-
-def _convertMetalDensitySNIa(data):
-    return data.convert('Density')
-KnownARTFields["MetalDensitySNIa"]._units = r"\rm{g}/\rm{cm}^3"
-KnownARTFields["MetalDensitySNIa"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["MetalDensitySNIa"]._convert_function=_convertMetalDensitySNIa
-
-def _convertPotentialNew(data):
-    return data.convert("Potential")
-KnownARTFields["PotentialNew"]._units = r"\rm{g}/\rm{cm}^3"
-KnownARTFields["PotentialNew"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["PotentialNew"]._convert_function=_convertPotentialNew
-
-def _convertPotentialOld(data):
-    return data.convert("Potential")
-KnownARTFields["PotentialOld"]._units = r"\rm{g}/\rm{cm}^3"
-KnownARTFields["PotentialOld"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["PotentialOld"]._convert_function=_convertPotentialOld
-
-####### Derived fields
-
-def _temperature(field, data):
-    dg = data["GasEnergy"].astype('float64')
-    dg /= data.pf.conversion_factors["GasEnergy"]
-    dd = data["Density"].astype('float64')
-    dd /= data.pf.conversion_factors["Density"]
-    tr = dg/dd*data.pf.tr
-    #ghost cells have zero density?
-    tr[np.isnan(tr)] = 0.0
-    #dd[di] = -1.0
-    #if data.id==460:
-    #tr[di] = -1.0 #replace the zero-density points with zero temp
-    #print tr.min()
-    #assert np.all(np.isfinite(tr))
-    return tr
-def _converttemperature(data):
-    #x = data.pf.conversion_factors["Temperature"]
-    x = 1.0
-    return x
-add_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
-ARTFieldInfo["Temperature"]._units = r"\mathrm{K}"
-ARTFieldInfo["Temperature"]._projected_units = r"\mathrm{K}"
-#ARTFieldInfo["Temperature"]._convert_function=_converttemperature
-
-def _metallicity_snII(field, data):
-    tr  = data["MetalDensitySNII"] / data["Density"]
-    return tr
-add_field("Metallicity_SNII", function=_metallicity_snII, units = r"\mathrm{K}",take_log=True)
-ARTFieldInfo["Metallicity_SNII"]._units = r""
-ARTFieldInfo["Metallicity_SNII"]._projected_units = r""
-
-def _metallicity_snIa(field, data):
-    tr  = data["MetalDensitySNIa"] / data["Density"]
-    return tr
-add_field("Metallicity_SNIa", function=_metallicity_snIa, units = r"\mathrm{K}",take_log=True)
-ARTFieldInfo["Metallicity_SNIa"]._units = r""
-ARTFieldInfo["Metallicity_SNIa"]._projected_units = r""
-
-def _metallicity(field, data):
-    tr  = data["Metal_Density"] / data["Density"]
-    return tr
-add_field("Metallicity", function=_metallicity, units = r"\mathrm{K}",take_log=True)
-ARTFieldInfo["Metallicity"]._units = r""
-ARTFieldInfo["Metallicity"]._projected_units = r""
-
-def _x_velocity(field,data):
-    tr  = data["XMomentumDensity"]/data["Density"]
-    return tr
-add_field("x-velocity", function=_x_velocity, units = r"\mathrm{cm/s}",take_log=False)
-ARTFieldInfo["x-velocity"]._units = r"\rm{cm}/\rm{s}"
-ARTFieldInfo["x-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
-
-def _y_velocity(field,data):
-    tr  = data["YMomentumDensity"]/data["Density"]
-    return tr
-add_field("y-velocity", function=_y_velocity, units = r"\mathrm{cm/s}",take_log=False)
-ARTFieldInfo["y-velocity"]._units = r"\rm{cm}/\rm{s}"
-ARTFieldInfo["y-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
-
-def _z_velocity(field,data):
-    tr  = data["ZMomentumDensity"]/data["Density"]
-    return tr
-add_field("z-velocity", function=_z_velocity, units = r"\mathrm{cm/s}",take_log=False)
-ARTFieldInfo["z-velocity"]._units = r"\rm{cm}/\rm{s}"
-ARTFieldInfo["z-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
-
-def _metal_density(field, data):
-    tr  = data["MetalDensitySNIa"]
-    tr += data["MetalDensitySNII"]
-    return tr
-add_field("Metal_Density", function=_metal_density, units = r"\mathrm{K}",take_log=True)
-ARTFieldInfo["Metal_Density"]._units = r""
-ARTFieldInfo["Metal_Density"]._projected_units = r""
-
-
-#Particle fields
-
-def ParticleMass(field,data):
-    return data['particle_mass']
-add_field("ParticleMass",function=ParticleMass,units=r"\rm{g}",particle_type=True)
-
-
-#Derived particle fields
-
-def ParticleMassMsun(field,data):
-    return data['particle_mass']*data.pf['Msun']
-add_field("ParticleMassMsun",function=ParticleMassMsun,units=r"\rm{g}",particle_type=True)
-
-def _creation_time(field,data):
-    pa = data["particle_age"]
-    tr = np.zeros(pa.shape,dtype='float')-1.0
-    tr[pa>0] = pa[pa>0]
-    return tr
-add_field("creation_time",function=_creation_time,units=r"\rm{s}",particle_type=True)
-
-def mass_dm(field, data):
-    tr = np.ones(data.ActiveDimensions, dtype='float32')
-    idx = data["particle_type"]<5
-    #make a dumb assumption that the mass is evenly spread out in the grid
-    #must return an array the shape of the grid cells
-    if np.sum(idx)>0:
-        tr /= np.prod(data['CellVolumeCode']*data.pf['mpchcm']**3.0) #divide by the volume
-        tr *= np.sum(data['particle_mass'][idx])*data.pf['Msun'] #Multiply by total contaiend mass
-        print tr.shape
-        return tr
-    else:
-        return tr*1e-9
-
-add_field("particle_cell_mass_dm", function=mass_dm, units = r"\mathrm{M_{sun}}",
-        validators=[ValidateSpatial(0)],        
-        take_log=False,
-        projection_conversion="1")
-
-def _spdensity(field, data):
-    grid_mass = np.zeros(data.ActiveDimensions, dtype='float64')
-    if data.star_mass.shape[0] ==0 : return grid_mass 
-    amr_utils.CICDeposit_3(data.star_position_x,
-                           data.star_position_y,
-                           data.star_position_z,
-                           data.star_mass,
-                           data.star_mass.shape[0],
-                           grid_mass, 
-                           np.array(data.LeftEdge).astype(np.float64),
-                           np.array(data.ActiveDimensions).astype(np.int32), 
-                           np.float64(data['dx']))
-    return grid_mass 
-
-#add_field("star_density", function=_spdensity,
-#          validators=[ValidateSpatial(0)], convert_function=_convertDensity)
-
-def _simple_density(field,data):
-    mass = np.sum(data.star_mass)
-    volume = data['dx']*data.ActiveDimensions.prod().astype('float64')
-    return mass/volume
-
-add_field("star_density", function=_simple_density,
-          validators=[ValidateSpatial(0)], convert_function=_convertDensity)

diff -r d127375315894f9c038d2d80ede31bd5d12bd8b0 -r c9bd63af62a44512661a8f7f0761507013783266 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ /dev/null
@@ -1,468 +0,0 @@
-"""
-ART-specific IO
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-import struct
-
-import os
-import os.path
-
-from yt.utilities.io_handler import \
-    BaseIOHandler
-
-from yt.utilities.io_handler import \
-    BaseIOHandler
-import yt.utilities.lib as au
-
-from yt.frontends.art.definitions import *
-
-class IOHandlerART(BaseIOHandler):
-    _data_style = "art"
-
-    def __init__(self, filename, nhydro_vars, level_info, level_offsets,
-                 *args, **kwargs):
-        BaseIOHandler.__init__(self, *args, **kwargs)
-        self.filename = filename
-        self.nhydro_vars = nhydro_vars
-        self.level_info = level_info
-        self.level_offsets = level_offsets
-        self.level_data = {}
-
-    def preload_level(self, level,field=None):
-        """ Reads in the full ART tree. From the ART source:
-            iOctLv :    >0   - level of an oct
-            iOctPr :         - parent of an oct
-            iOctCh :    >0   - pointer to an oct of children
-                        0   - there are no children; the cell is a leaf
-            iOctNb :    >0   - pointers to neighbouring cells 
-            iOctPs :         - coordinates of Oct centers
-            
-            iOctLL1:         - doubly linked list of octs
-            iOctLL2:         - doubly linked list of octs
-            
-            tl - current  time moment for level L
-            tlold - previous time moment for level L
-            dtl - dtime0/2**iTimeBin
-            dtlold -  previous time step for level L
-            iSO - sweep order
-            
-            hvar(1,*) - gas density 
-            hvar(2,*) - gas energy 
-            hvar(3,*) - x-momentum 
-            hvar(4,*) - y-momentum
-            hvar(5,*) - z-momentum
-            hvar(6,*) - pressure
-            hvar(7,*) - Gamma
-            hvar(8,*) - internal energy 
-
-            var (1,*) - total density 
-            var (2,*) - potential (new)
-            var (3,*) - potential (old)
-            
-            
-            
-        """
-        
-        if level in self.level_data: return
-        if level == 0:
-            self.preload_root_level()
-            return
-        f = open(self.filename, 'rb')
-        f.seek(self.level_offsets[level])
-        ncells = 8*self.level_info[level]
-        nvals = ncells * (self.nhydro_vars + 6) # 2 vars, 2 pads
-        arr = np.fromfile(f, dtype='>f', count=nvals)
-        arr = arr.reshape((self.nhydro_vars+6, ncells), order="F")
-        assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
-        arr = arr[3:-1,:] #skip beginning pad, idc, iOctCh, + ending pad
-        if field==None:
-            self.level_data[level] = arr.astype('float32')
-        else:
-            self.level_data[level] = arr.astype('float32')
-        del arr
-
-    def preload_root_level(self):
-        f = open(self.filename, 'rb')
-        f.seek(self.level_offsets[0] + 4) # Ditch the header
-        ncells = self.level_info[0]
-        nhvals = ncells * (self.nhydro_vars) # 0 vars, 0 pads
-        hvar = np.fromfile(f, dtype='>f', count=nhvals).astype("float32")
-        hvar = hvar.reshape((self.nhydro_vars, ncells), order="F")
-        np.fromfile(f,dtype='>i',count=2) #throw away the pads
-        nvars = ncells * (2) # 0 vars, 0 pads
-        var = np.fromfile(f, dtype='>f', count=nvars).astype("float32")
-        var = var.reshape((2, ncells), order="F")
-        arr = np.concatenate((hvar,var))
-        self.level_data[0] = arr
-
-    def clear_level(self, level):
-        self.level_data.pop(level, None)
-
-    def _read_particle_field(self, grid, field):
-        dat = getattr(grid,field,None)
-        if dat is not None: 
-            return dat
-        starfield = field.replace('star','particle')
-        dat = getattr(grid,starfield,None)
-        if dat is not None:
-            psi = grid.pf.particle_star_index
-            idx = grid.particle_type==psi
-            return dat[idx]
-        raise KeyError
-        
-    def _read_data(self, grid, field):
-        if field in particle_fields:
-            return self._read_particle_field(grid, field)
-        pf = grid.pf
-        field_id = grid.pf.h.field_list.index(field)
-        if grid.Level == 0: # We only have one root grid
-            self.preload_level(0)
-            tr = self.level_data[0][field_id,:].reshape(
-                    pf.domain_dimensions, order="F").copy()
-            return tr.swapaxes(0, 2).astype("float64")
-        tr = np.zeros(grid.ActiveDimensions, dtype='float32')
-        grids = [grid]
-        l_delta = 0
-        filled = np.zeros(grid.ActiveDimensions, dtype='uint8')
-        to_fill = grid.ActiveDimensions.prod()
-        while to_fill > 0 and len(grids) > 0:
-            next_grids = []
-            for g in grids:
-                self.preload_level(g.Level,field=field_id)
-                #print "Filling %s from %s (%s)" % (grid, g, g.Level)
-                to_fill -= au.read_art_grid(field_id, 
-                        grid.get_global_startindex(), grid.ActiveDimensions,
-                        tr, filled, self.level_data[g.Level],
-                        g.Level, 2**l_delta, g.locations)
-                next_grids += g.Parent
-            grids = next_grids
-            l_delta += 1
-        return tr.astype("float64")
-
-def _count_art_octs(f, offset, 
-                   MinLev, MaxLevelNow):
-    level_oct_offsets= [0,]
-    level_child_offsets= [0,]
-    f.seek(offset)
-    nchild,ntot=8,0
-    Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
-    iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
-    iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
-    for Lev in xrange(MinLev + 1, MaxLevelNow+1):
-        level_oct_offsets.append(f.tell())
-
-        #Get the info for this level, skip the rest
-        #print "Reading oct tree data for level", Lev
-        #print 'offset:',f.tell()
-        Level[Lev], iNOLL[Lev], iHOLL[Lev] = struct.unpack(
-           '>iii', _read_record(f))
-        #print 'Level %i : '%Lev, iNOLL
-        #print 'offset after level record:',f.tell()
-        iOct = iHOLL[Lev] - 1
-        nLevel = iNOLL[Lev]
-        nLevCells = nLevel * nchild
-        ntot = ntot + nLevel
-
-        #Skip all the oct hierarchy data
-        ns = _read_record_size(f)
-        size = struct.calcsize('>i') + ns + struct.calcsize('>i')
-        f.seek(f.tell()+size * nLevel)
-
-        level_child_offsets.append(f.tell())
-        #Skip the child vars data
-        ns = _read_record_size(f)
-        size = struct.calcsize('>i') + ns + struct.calcsize('>i')
-        f.seek(f.tell()+size * nLevel*nchild)
-
-        #find nhydrovars
-        nhydrovars = 8+2
-    f.seek(offset)
-    return nhydrovars, iNOLL, level_oct_offsets, level_child_offsets
-
-def _read_art_level_info(f, level_oct_offsets,level,coarse_grid=128):
-    pos = f.tell()
-    f.seek(level_oct_offsets[level])
-    #Get the info for this level, skip the rest
-    junk, nLevel, iOct = struct.unpack(
-       '>iii', _read_record(f))
-    
-    #fortran indices start at 1
-    
-    #Skip all the oct hierarchy data
-    le     = np.zeros((nLevel,3),dtype='int64')
-    fl     = np.ones((nLevel,6),dtype='int64')
-    iocts  = np.zeros(nLevel+1,dtype='int64')
-    idxa,idxb = 0,0
-    chunk = long(1e6) #this is ~111MB for 15 dimensional 64 bit arrays
-    left = nLevel
-    while left > 0 :
-        this_chunk = min(chunk,left)
-        idxb=idxa+this_chunk
-        data = np.fromfile(f,dtype='>i',count=this_chunk*15)
-        data=data.reshape(this_chunk,15)
-        left-=this_chunk
-        le[idxa:idxb,:] = data[:,1:4]
-        fl[idxa:idxb,1] = np.arange(idxa,idxb)
-        #pad byte is last, LL2, then ioct right before it
-        iocts[idxa:idxb] = data[:,-3] 
-        idxa=idxa+this_chunk
-    del data
-    
-    #ioct always represents the index of the next variable
-    #not the current, so shift forward one index
-    #the last index isn't used
-    ioctso = iocts.copy()
-    iocts[1:]=iocts[:-1] #shift
-    iocts = iocts[:nLevel] #chop off the last index
-    iocts[0]=iOct #starting value
-
-    #now correct iocts for fortran indices start @ 1
-    iocts = iocts-1
-
-    assert np.unique(iocts).shape[0] == nLevel
-    
-    #ioct tries to access arrays much larger than le & fl
-    #just make sure they appear in the right order, skipping
-    #the empty space in between
-    idx = np.argsort(iocts)
-    
-    #now rearrange le & fl in order of the ioct
-    le = le[idx]
-    fl = fl[idx]
-
-
-    #left edges are expressed as if they were on 
-    #level 15, so no matter what level max(le)=2**15 
-    #correct to the yt convention
-    #le = le/2**(root_level-1-level)-1
-
-    #try to find the root_level first
-    root_level=np.floor(np.log2(le.max()*1.0/coarse_grid))
-    root_level = root_level.astype('int64')
-
-    #try without the -1
-    le = le/2**(root_level+1-level)-1
-
-    #now read the hvars and vars arrays
-    #we are looking for iOctCh
-    #we record if iOctCh is >0, in which it is subdivided
-    iOctCh  = np.zeros((nLevel+1,8),dtype='bool')
-    
-    
-    
-    f.seek(pos)
-    return le,fl,nLevel,root_level
-
-
-def read_particles(file,Nrow):
-    words = 6 # words (reals) per particle: x,y,z,vx,vy,vz
-    real_size = 4 # for file_particle_data; not always true?
-    np_per_page = Nrow**2 # defined in ART a_setup.h
-    num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
-
-    f = np.fromfile(file, dtype='>f4').astype('float32') # direct access
-    pages = np.vsplit(np.reshape(f, (num_pages, words, np_per_page)), num_pages)
-    data = np.squeeze(np.dstack(pages)).T # x,y,z,vx,vy,vz
-    return data[:,0:3],data[:,3:]
-
-def read_stars(file):
-    fh = open(file,'rb')
-    tdum,adum   = _read_frecord(fh,'>d')
-    nstars      = _read_frecord(fh,'>i')
-    ws_old, ws_oldi = _read_frecord(fh,'>d')
-    mass    = _read_frecord(fh,'>f') 
-    imass   = _read_frecord(fh,'>f') 
-    tbirth  = _read_frecord(fh,'>f') 
-    if fh.tell() < os.path.getsize(file):
-        metallicity1 = _read_frecord(fh,'>f') 
-    if fh.tell() < os.path.getsize(file):
-        metallicity2 = _read_frecord(fh,'>f')     
-    assert fh.tell() == os.path.getsize(file)
-    return  nstars, mass, imass, tbirth, metallicity1, metallicity2,\
-            ws_old,ws_oldi,tdum,adum
-
-def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
-    f.seek(level_child_offsets[level])
-    nvals = nLevel * (nhydro_vars + 6) # 2 vars, 2 pads
-    ioctch = np.zeros(nLevel,dtype='uint8')
-    idc = np.zeros(nLevel,dtype='int32')
-    
-    chunk = long(1e6)
-    left = nLevel
-    width = nhydro_vars+6
-    a,b=0,0
-    while left > 0:
-        chunk = min(chunk,left)
-        b += chunk
-        arr = np.fromfile(f, dtype='>i', count=chunk*width)
-        arr = arr.reshape((width, chunk), order="F")
-        assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
-        idc[a:b]    = arr[1,:]-1 #fix fortran indexing
-        ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined available
-        #zero in the mask means there is refinement available
-        a=b
-        left -= chunk
-    assert left==0
-    return idc,ioctch
-    
-nchem=8+2
-dtyp = np.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
-                ",>%sf4"%(2)+",>i4")
-def _read_art_child(f, level_child_offsets,level,nLevel,field):
-    pos=f.tell()
-    f.seek(level_child_offsets[level])
-    arr = np.fromfile(f, dtype='>f', count=nLevel * 8)
-    arr = arr.reshape((nLevel,16), order="F")
-    arr = arr[3:-1,:].astype("float64")
-    f.seek(pos)
-    return arr[field,:]
-
-def _skip_record(f):
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
-    f.seek(s[0], 1)
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
-
-def _read_frecord(f,fmt):
-    s1 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
-    count = s1/np.dtype(fmt).itemsize
-    ss = np.fromfile(f,fmt,count=count)
-    s2 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
-    assert s1==s2
-    return ss
-
-
-def _read_record(f,fmt=None):
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
-    ss = f.read(s)
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
-    if fmt is not None:
-        return struct.unpack(ss,fmt)
-    return ss
-
-def _read_record_size(f):
-    pos = f.tell()
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
-    f.seek(pos)
-    return s[0]
-
-def _read_struct(f,structure,verbose=False):
-    vals = {}
-    for format,name in structure:
-        size = struct.calcsize(format)
-        (val,) = struct.unpack(format,f.read(size))
-        vals[name] = val
-        if verbose: print "%s:\t%s\t (%d B)" %(name,val,f.tell())
-    return vals
-
-
-
-#All of these functions are to convert from hydro time var to 
-#proper time
-sqrt = np.sqrt
-sign = np.sign
-
-def find_root(f,a,b,tol=1e-6):
-    c = (a+b)/2.0
-    last = -np.inf
-    assert(sign(f(a)) != sign(f(b)))  
-    while np.abs(f(c)-last) > tol:
-        last=f(c)
-        if sign(last)==sign(f(b)):
-            b=c
-        else:
-            a=c
-        c = (a+b)/2.0
-    return c
-
-def quad(fintegrand,xmin,xmax,n=1e4):
-    spacings = np.logspace(np.log10(xmin),np.log10(xmax),n)
-    integrand_arr = fintegrand(spacings)
-    val = np.trapz(integrand_arr,dx=np.diff(spacings))
-    return val
-
-def a2b(at,Om0=0.27,Oml0=0.73,h=0.700):
-    def f_a2b(x):
-        val = 0.5*sqrt(Om0) / x**3.0
-        val /= sqrt(Om0/x**3.0 +Oml0 +(1.0 - Om0-Oml0)/x**2.0)
-        return val
-    #val, err = si.quad(f_a2b,1,at)
-    val = quad(f_a2b,1,at)
-    return val
-
-def b2a(bt,**kwargs):
-    #converts code time into expansion factor 
-    #if Om0 ==1and OmL == 0 then b2a is (1 / (1-td))**2
-    #if bt < -190.0 or bt > -.10:  raise 'bt outside of range'
-    f_b2a = lambda at: a2b(at,**kwargs)-bt
-    return find_root(f_b2a,1e-4,1.1)
-    #return so.brenth(f_b2a,1e-4,1.1)
-    #return brent.brent(f_b2a)
-
-def a2t(at,Om0=0.27,Oml0=0.73,h=0.700):
-    integrand = lambda x : 1./(x*sqrt(Oml0+Om0*x**-3.0))
-    #current_time,err = si.quad(integrand,0.0,at,epsabs=1e-6,epsrel=1e-6)
-    current_time = quad(integrand,1e-4,at)
-    #spacings = np.logspace(-5,np.log10(at),1e5)
-    #integrand_arr = integrand(spacings)
-    #current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
-    current_time *= 9.779/h
-    return current_time
-
-def b2t(tb,n = 1e2,logger=None,**kwargs):
-    tb = np.array(tb)
-    if type(tb) == type(1.1): 
-        return a2t(b2a(tb))
-    if tb.shape == (): 
-        return a2t(b2a(tb))
-    if len(tb) < n: n= len(tb)
-    age_min = a2t(b2a(tb.max(),**kwargs),**kwargs)
-    age_max = a2t(b2a(tb.min(),**kwargs),**kwargs)
-    tbs  = -1.*np.logspace(np.log10(-tb.min()),
-                          np.log10(-tb.max()),n)
-    ages = []
-    for i,tbi in enumerate(tbs):
-        ages += a2t(b2a(tbi)),
-        if logger: logger(i)
-    ages = np.array(ages)
-    fb2t = np.interp(tb,tbs,ages)
-    #fb2t = interp1d(tbs,ages)
-    return fb2t
-
-def spread_ages(ages,logger=None,spread=1.0e7*365*24*3600):
-    #stars are formed in lumps; spread out the ages linearly
-    da= np.diff(ages)
-    assert np.all(da<=0)
-    #ages should always be decreasing, and ordered so
-    agesd = np.zeros(ages.shape)
-    idx, = np.where(da<0)
-    idx+=1 #mark the right edges
-    #spread this age evenly out to the next age
-    lidx=0
-    lage=0
-    for i in idx:
-        n = i-lidx #n stars affected
-        rage = ages[i]
-        lage = max(rage-spread,0.0)
-        agesd[lidx:i]=np.linspace(lage,rage,n)
-        lidx=i
-        #lage=rage
-        if logger: logger(i)
-    #we didn't get the last iter
-    i=ages.shape[0]-1
-    n = i-lidx #n stars affected
-    rage = ages[i]
-    lage = max(rage-spread,0.0)
-    agesd[lidx:i]=np.linspace(lage,rage,n)
-    return agesd

diff -r d127375315894f9c038d2d80ede31bd5d12bd8b0 -r c9bd63af62a44512661a8f7f0761507013783266 yt/frontends/art/setup.py
--- a/yt/frontends/art/setup.py
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os, sys, os.path
-
-def configuration(parent_package='',top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('art',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r d127375315894f9c038d2d80ede31bd5d12bd8b0 -r c9bd63af62a44512661a8f7f0761507013783266 yt/frontends/ramses/__init__.py
--- a/yt/frontends/ramses/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-API for yt.frontends.ramses
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r d127375315894f9c038d2d80ede31bd5d12bd8b0 -r c9bd63af62a44512661a8f7f0761507013783266 yt/frontends/ramses/api.py
--- a/yt/frontends/ramses/api.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""
-API for yt.frontends.ramses
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import \
-      RAMSESGrid, \
-      RAMSESHierarchy, \
-      RAMSESStaticOutput
-
-from .fields import \
-      RAMSESFieldInfo, \
-      add_ramses_field
-
-from .io import \
-      IOHandlerRAMSES

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/4aede64dc503/
Changeset:   4aede64dc503
Branch:      yt
User:        MatthewTurk
Date:        2013-10-31 12:56:36
Summary:     Merged in jzuhone/yt (pull request #628)

Moving particle_trajectories to analysis_modules
Affected #:  9 files

diff -r 6953b5aab5c9524c6c2979a05fcba3b1445444e8 -r 4aede64dc503166025f302a8419bcaa19837ccfe yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -108,3 +108,5 @@
 from .radmc3d_export.api import \
     RadMC3DWriter
 
+from .particle_trajectories.api import \
+    ParticleTrajectories

diff -r 6953b5aab5c9524c6c2979a05fcba3b1445444e8 -r 4aede64dc503166025f302a8419bcaa19837ccfe yt/analysis_modules/particle_trajectories/api.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/api.py
@@ -0,0 +1,12 @@
+"""
+API for particle_trajectories
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from particle_trajectories import ParticleTrajectories

diff -r 6953b5aab5c9524c6c2979a05fcba3b1445444e8 -r 4aede64dc503166025f302a8419bcaa19837ccfe yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -0,0 +1,329 @@
+"""
+Particle trajectories
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.data_objects.data_containers import YTFieldData
+from yt.data_objects.time_series import TimeSeriesData
+from yt.utilities.lib import CICSample_3
+from yt.funcs import *
+
+import numpy as np
+import h5py
+
+class ParticleTrajectories(object):
+    r"""A collection of particle trajectories in time over a series of
+    parameter files. 
+
+    The ParticleTrajectories object contains a collection of
+    particle trajectories for a specified set of particle indices. 
+    
+    Parameters
+    ----------
+    filenames : list of strings
+        A time-sorted list of filenames to construct the TimeSeriesData
+        object.
+    indices : array_like
+        An integer array of particle indices whose trajectories we
+        want to track. If they are not sorted they will be sorted.
+    fields : list of strings, optional
+        A set of fields that is retrieved when the trajectory
+        collection is instantiated.
+        Default : None (will default to the fields 'particle_position_x',
+        'particle_position_y', 'particle_position_z')
+
+    Examples
+    ________
+    >>> from yt.mods import *
+    >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
+    >>> my_fns.sort()
+    >>> fields = ["particle_position_x", "particle_position_y",
+    >>>           "particle_position_z", "particle_velocity_x",
+    >>>           "particle_velocity_y", "particle_velocity_z"]
+    >>> pf = load(my_fns[0])
+    >>> init_sphere = pf.h.sphere(pf.domain_center, (.5, "unitary"))
+    >>> indices = init_sphere["particle_index"].astype("int")
+    >>> trajs = ParticleTrajectories(my_fns, indices, fields=fields)
+    >>> for t in trajs :
+    >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
+
+    Notes
+    -----
+    As of this time only particle trajectories that are complete over the
+    set of specified parameter files are supported. If any particle's history
+    ends for some reason (e.g. leaving the simulation domain or being actively
+    destroyed), the whole trajectory collection of which it is a set must end
+    at or before the particle's last timestep. This is a limitation we hope to
+    lift at some point in the future.     
+    """
+    def __init__(self, filenames, indices, fields=None) :
+
+        indices.sort() # Just in case the caller wasn't careful
+        
+        self.field_data = YTFieldData()
+        self.pfs = TimeSeriesData.from_filenames(filenames)
+        self.masks = []
+        self.sorts = []
+        self.indices = indices
+        self.num_indices = len(indices)
+        self.num_steps = len(filenames)
+        self.times = []
+
+        # Default fields 
+        
+        if fields is None: fields = []
+
+        # Must ALWAYS have these fields
+        
+        fields = fields + ["particle_position_x",
+                           "particle_position_y",
+                           "particle_position_z"]
+
+        # Set up the derived field list and the particle field list
+        # so that if the requested field is a particle field, we'll
+        # just copy the field over, but if the field is a grid field,
+        # we will first interpolate the field to the particle positions
+        # and then return the field. 
+
+        pf = self.pfs[0]
+        self.derived_field_list = pf.h.derived_field_list
+        self.particle_fields = [field for field in self.derived_field_list
+                                if pf.field_info[field].particle_type]
+
+        """
+        The following loops through the parameter files
+        and performs two tasks. The first is to isolate
+        the particles with the correct indices, and the
+        second is to create a sorted list of these particles.
+        We also make a list of the current time from each file. 
+        Right now, the code assumes (and checks for) the
+        particle indices existing in each dataset, a limitation I
+        would like to lift at some point since some codes
+        (e.g., FLASH) destroy particles leaving the domain.
+        """
+        
+        for pf in self.pfs:
+            dd = pf.h.all_data()
+            newtags = dd["particle_index"].astype("int")
+            if not np.all(np.in1d(indices, newtags, assume_unique=True)):
+                print "Not all requested particle ids contained in this dataset!"
+                raise IndexError
+            mask = np.in1d(newtags, indices, assume_unique=True)
+            sorts = np.argsort(newtags[mask])
+            self.masks.append(mask)            
+            self.sorts.append(sorts)
+            self.times.append(pf.current_time)
+
+        self.times = np.array(self.times)
+
+        # Now instantiate the requested fields 
+        for field in fields:
+            self._get_data(field)
+            
+    def has_key(self, key):
+        return (key in self.field_data)
+    
+    def keys(self):
+        return self.field_data.keys()
+
+    def __getitem__(self, key):
+        """
+        Get the field associated with key,
+        checking to make sure it is a particle field.
+        """
+        if key == "particle_time":
+            return self.times
+        if not self.field_data.has_key(key):
+            self._get_data(key)
+        return self.field_data[key]
+    
+    def __setitem__(self, key, val):
+        """
+        Sets a field to be some other value.
+        """
+        self.field_data[key] = val
+                        
+    def __delitem__(self, key):
+        """
+        Delete the field from the trajectory
+        """
+        del self.field_data[key]
+
+    def __iter__(self):
+        """
+        This iterates over the trajectories for
+        the different particles, returning dicts
+        of fields for each trajectory
+        """
+        for idx in xrange(self.num_indices):
+            traj = {}
+            traj["particle_index"] = self.indices[idx]
+            traj["particle_time"] = self.times
+            for field in self.field_data.keys():
+                traj[field] = self[field][idx,:]
+            yield traj
+            
+    def __len__(self):
+        """
+        The number of individual trajectories
+        """
+        return self.num_indices
+
+    def add_fields(self, fields):
+        """
+        Add a list of fields to an existing trajectory
+
+        Parameters
+        ----------
+        fields : list of strings
+            A list of fields to be added to the current trajectory
+            collection.
+
+        Examples
+        ________
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.add_fields(["particle_mass", "particle_gpot"])
+        """
+        for field in fields:
+            if not self.field_data.has_key(field):
+                self._get_data(field)
+                
+    def _get_data(self, field):
+        """
+        Get a field to include in the trajectory collection.
+        The trajectory collection itself is a dict of 2D numpy arrays,
+        with shape (num_indices, num_steps)
+        """
+        if not self.field_data.has_key(field):
+            particles = np.empty((0))
+            step = int(0)
+            for pf, mask, sort in zip(self.pfs, self.masks, self.sorts):
+                if field in self.particle_fields:
+                    # This is easy... just get the particle fields
+                    dd = pf.h.all_data()
+                    pfield = dd[field][mask]
+                    particles = np.append(particles, pfield[sort])
+                else:
+                    # This is hard... must loop over grids
+                    pfield = np.zeros((self.num_indices))
+                    x = self["particle_position_x"][:,step]
+                    y = self["particle_position_y"][:,step]
+                    z = self["particle_position_z"][:,step]
+                    particle_grids, particle_grid_inds = pf.h.find_points(x,y,z)
+                    for grid in particle_grids:
+                        cube = grid.retrieve_ghost_zones(1, [field])
+                        CICSample_3(x,y,z,pfield,
+                                    self.num_indices,
+                                    cube[field],
+                                    np.array(grid.LeftEdge).astype(np.float64),
+                                    np.array(grid.ActiveDimensions).astype(np.int32),
+                                    np.float64(grid['dx']))
+                    particles = np.append(particles, pfield)
+                step += 1
+            self[field] = particles.reshape(self.num_steps,
+                                            self.num_indices).transpose()
+        return self.field_data[field]
+
+    def trajectory_from_index(self, index):
+        """
+        Retrieve a single trajectory corresponding to a specific particle
+        index
+
+        Parameters
+        ----------
+        index : int
+            This defines which particle trajectory from the
+            ParticleTrajectories object will be returned.
+
+        Returns
+        -------
+        A dictionary corresponding to the particle's trajectory and the
+        fields along that trajectory
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> import matplotlib.pylab as pl
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> traj = trajs.trajectory_from_index(indices[0])
+        >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
+        >>> pl.savefig("orbit")
+        """
+        mask = np.in1d(self.indices, (index,), assume_unique=True)
+        if not np.any(mask):
+            print "The particle index %d is not in the list!" % (index)
+            raise IndexError
+        fields = [field for field in sorted(self.field_data.keys())]
+        traj = {}
+        traj["particle_time"] = self.times
+        traj["particle_index"] = index
+        for field in fields:
+            traj[field] = self[field][mask,:][0]
+        return traj
+
+    def write_out(self, filename_base):
+        """
+        Write out particle trajectories to tab-separated ASCII files (one
+        for each trajectory) with the field names in the file header. Each
+        file is named with a basename and the index number.
+
+        Parameters
+        ----------
+        filename_base : string
+            The prefix for the outputted ASCII files.
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out("orbit_trajectory")       
+        """
+        fields = [field for field in sorted(self.field_data.keys())]
+        num_fields = len(fields)
+        first_str = "# particle_time\t" + "\t".join(fields)+"\n"
+        template_str = "%g\t"*num_fields+"%g\n"
+        for ix in xrange(self.num_indices):
+            outlines = [first_str]
+            for it in xrange(self.num_steps):
+                outlines.append(template_str %
+                                tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
+            fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
+            fid.writelines(outlines)
+            fid.close()
+            del fid
+            
+    def write_out_h5(self, filename):
+        """
+        Write out all the particle trajectories to a single HDF5 file
+        that contains the indices, the times, and the 2D array for each
+        field individually
+
+        Parameters
+        ----------
+
+        filename : string
+            The output filename for the HDF5 file
+
+        Examples
+        --------
+
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out_h5("orbit_trajectories")                
+        """
+        fid = h5py.File(filename, "w")
+        fields = [field for field in sorted(self.field_data.keys())]
+        fid.create_dataset("particle_indices", dtype=np.int32,
+                           data=self.indices)
+        fid.create_dataset("particle_time", data=self.times)
+        for field in fields:
+            fid.create_dataset("%s" % field, data=self[field])
+        fid.close()

diff -r 6953b5aab5c9524c6c2979a05fcba3b1445444e8 -r 4aede64dc503166025f302a8419bcaa19837ccfe yt/analysis_modules/particle_trajectories/setup.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('particle_trajectories', parent_package, top_path)
+    #config.add_subpackage("tests")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 6953b5aab5c9524c6c2979a05fcba3b1445444e8 -r 4aede64dc503166025f302a8419bcaa19837ccfe yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -1,8 +1,5 @@
 """
 API for yt.data_objects
-
-
-
 """
 
 #-----------------------------------------------------------------------------
@@ -72,5 +69,3 @@
     add_grad, \
     derived_field
 
-from particle_trajectories import \
-    ParticleTrajectoryCollection

diff -r 6953b5aab5c9524c6c2979a05fcba3b1445444e8 -r 4aede64dc503166025f302a8419bcaa19837ccfe yt/data_objects/particle_trajectories.py
--- a/yt/data_objects/particle_trajectories.py
+++ /dev/null
@@ -1,380 +0,0 @@
-"""
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.data_objects.data_containers import YTFieldData
-from yt.data_objects.time_series import TimeSeriesData
-from yt.utilities.lib import sample_field_at_positions
-from yt.funcs import *
-
-import numpy as np
-import h5py
-
-class ParticleTrajectoryCollection(object) :
-
-    r"""A collection of particle trajectories in time over a series of
-    parameter files. 
-
-    The ParticleTrajectoryCollection object contains a collection of
-    particle trajectories for a specified set of particle indices. 
-    
-    Parameters
-    ----------
-    filenames : list of strings
-        A time-sorted list of filenames to construct the TimeSeriesData
-        object.
-    indices : array_like
-        An integer array of particle indices whose trajectories we
-        want to track. If they are not sorted they will be sorted.
-    fields : list of strings, optional
-        A set of fields that is retrieved when the trajectory
-        collection is instantiated.
-        Default : None (will default to the fields 'particle_position_x',
-        'particle_position_y', 'particle_position_z')
-
-    Examples
-    ________
-    >>> from yt.mods import *
-    >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
-    >>> my_fns.sort()
-    >>> fields = ["particle_position_x", "particle_position_y",
-    >>>           "particle_position_z", "particle_velocity_x",
-    >>>           "particle_velocity_y", "particle_velocity_z"]
-    >>> pf = load(my_fns[0])
-    >>> init_sphere = pf.h.sphere(pf.domain_center, (.5, "unitary"))
-    >>> indices = init_sphere["particle_index"].astype("int")
-    >>> trajs = ParticleTrajectoryCollection(my_fns, indices, fields=fields)
-    >>> for t in trajs :
-    >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
-
-    Notes
-    -----
-    As of this time only particle trajectories that are complete over the
-    set of specified parameter files are supported. If any particle's history
-    ends for some reason (e.g. leaving the simulation domain or being actively
-    destroyed), the whole trajectory collection of which it is a set must end
-    at or before the particle's last timestep. This is a limitation we hope to
-    lift at some point in the future.     
-    """
-    def __init__(self, filenames, indices, fields = None) :
-
-        indices.sort() # Just in case the caller wasn't careful
-        
-        self.field_data = YTFieldData()
-        self.pfs = TimeSeriesData.from_filenames(filenames)
-        self.masks = []
-        self.sorts = []
-        self.indices = indices
-        self.num_indices = len(indices)
-        self.num_steps = len(filenames)
-        self.times = []
-
-        # Default fields 
-        
-        if fields is None : fields = []
-
-        # Must ALWAYS have these fields
-        
-        fields = fields + ["particle_position_x",
-                           "particle_position_y",
-                           "particle_position_z"]
-
-        """
-        The following loops through the parameter files
-        and performs two tasks. The first is to isolate
-        the particles with the correct indices, and the
-        second is to create a sorted list of these particles.
-        We also make a list of the current time from each file. 
-        Right now, the code assumes (and checks for) the
-        particle indices existing in each file, a limitation I
-        would like to lift at some point since some codes
-        (e.g., FLASH) destroy particles leaving the domain.
-        """
-        
-        for pf in self.pfs :
-            dd = pf.h.all_data()
-            newtags = dd["particle_index"].astype("int")
-            if not np.all(np.in1d(indices, newtags, assume_unique=True)) :
-                print "Not all requested particle ids contained in this file!"
-                raise IndexError
-            mask = np.in1d(newtags, indices, assume_unique=True)
-            sorts = np.argsort(newtags[mask])
-            self.masks.append(mask)            
-            self.sorts.append(sorts)
-            self.times.append(pf.current_time)
-
-        self.times = np.array(self.times)
-
-        # Set up the derived field list and the particle field list
-        # so that if the requested field is a particle field, we'll
-        # just copy the field over, but if the field is a grid field,
-        # we will first copy the field over to the particle positions
-        # and then return the field. 
-
-        self.derived_field_list = self.pfs[0].h.derived_field_list
-        self.particle_fields = [field for field in self.derived_field_list
-                                if self.pfs[0].field_info[field].particle_type]
-
-        # Now instantiate the requested fields 
-        for field in fields :
-
-            self._get_data(field)
-            
-    def has_key(self, key) :
-
-        return (key in self.field_data)
-    
-    def keys(self) :
-
-        return self.field_data.keys()
-
-    def __getitem__(self, key) :
-        """
-        Get the field associated with key,
-        checking to make sure it is a particle field.
-        """
-
-        if not self.field_data.has_key(key) :
-
-            self._get_data(key)
-
-        return self.field_data[key]
-    
-    def __setitem__(self, key, val):
-        """
-        Sets a field to be some other value.
-        """
-        self.field_data[key] = val
-                        
-    def __delitem__(self, key) :
-        """
-        Delete the field from the trajectory
-        """
-        del self.field_data[key]
-
-    def __iter__(self) :
-
-        """
-        This iterates over the trajectories for
-        the different particles, returning dicts
-        of fields for each trajectory
-        """
-        for idx in xrange(self.num_indices) :
-            traj = {}
-            traj["particle_index"] = self.indices[idx]
-            traj["particle_time"] = self.times
-            for field in self.field_data.keys() :
-                traj[field] = self[field][idx,:]
-            yield traj
-            
-    def __len__(self) :
-
-        """
-        The number of individual trajectories
-        """
-        return self.num_indices
-
-    def add_fields(self, fields) :
-
-        """
-        Add a list of fields to an existing trajectory
-
-        Parameters
-        ----------
-        fields : list of strings
-            A list of fields to be added to the current trajectory
-            collection.
-
-        Examples
-        ________
-        >>> from yt.mods import *
-        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
-        >>> trajs.add_fields(["particle_mass", "particle_gpot"])
-        """
-        
-        for field in fields :
-
-            if not self.field_data.has_key(field):
-
-                self._get_data(field)
-                
-    def _get_data(self, field) :
-
-        """
-        Get a field to include in the trajectory collection.
-        The trajectory collection itself is a dict of 2D numpy arrays,
-        with shape (num_indices, num_steps)
-        """
-        
-        if not self.field_data.has_key(field):
-            
-            particles = np.empty((0))
-
-            step = int(0)
-                
-            for pf, mask, sort in zip(self.pfs, self.masks, self.sorts) :
-                                    
-                if field in self.particle_fields :
-
-                    # This is easy... just get the particle fields
-
-                    dd = pf.h.all_data()
-                    pfield = dd[field][mask]
-                    particles = np.append(particles, pfield[sort])
-
-                else :
-
-                    # This is hard... must loop over grids
-
-                    pfield = np.zeros((self.num_indices))
-                    x = self["particle_position_x"][:,step]
-                    y = self["particle_position_y"][:,step]
-                    z = self["particle_position_z"][:,step]
-
-                    leaf_grids = [g for g in pf.h.grids if len(g.Children) == 0]
-                        
-                    for grid in leaf_grids :
-
-                        pfield += sample_field_at_positions(grid[field],
-                                                            grid.LeftEdge,
-                                                            grid.RightEdge,
-                                                            x, y, z)
-
-                    particles = np.append(particles, pfield)
-
-                step += 1
-                
-            self[field] = particles.reshape(self.num_steps,
-                                            self.num_indices).transpose()
-
-        return self.field_data[field]
-
-    def trajectory_from_index(self, index) :
-
-        """
-        Retrieve a single trajectory corresponding to a specific particle
-        index
-
-        Parameters
-        ----------
-        index : int
-            This defines which particle trajectory from the
-            ParticleTrajectoryCollection object will be returned.
-
-        Returns
-        -------
-        A dictionary corresponding to the particle's trajectory and the
-        fields along that trajectory
-
-        Examples
-        --------
-        >>> from yt.mods import *
-        >>> import matplotlib.pylab as pl
-        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
-        >>> traj = trajs.trajectory_from_index(indices[0])
-        >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
-        >>> pl.savefig("orbit")
-        """
-        
-        mask = np.in1d(self.indices, (index,), assume_unique=True)
-
-        if not np.any(mask) :
-            print "The particle index %d is not in the list!" % (index)
-            raise IndexError
-
-        fields = [field for field in sorted(self.field_data.keys())]
-                                
-        traj = {}
-
-        traj["particle_time"] = self.times
-        traj["particle_index"] = index
-        
-        for field in fields :
-
-            traj[field] = self[field][mask,:][0]
-
-        return traj
-
-    def write_out(self, filename_base) :
-
-        """
-        Write out particle trajectories to tab-separated ASCII files (one
-        for each trajectory) with the field names in the file header. Each
-        file is named with a basename and the index number.
-
-        Parameters
-        ----------
-        filename_base : string
-            The prefix for the outputted ASCII files.
-
-        Examples
-        --------
-        >>> from yt.mods import *
-        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
-        >>> trajs.write_out("orbit_trajectory")       
-        """
-        
-        fields = [field for field in sorted(self.field_data.keys())]
-
-        num_fields = len(fields)
-
-        first_str = "# particle_time\t" + "\t".join(fields)+"\n"
-        
-        template_str = "%g\t"*num_fields+"%g\n"
-        
-        for ix in xrange(self.num_indices) :
-
-            outlines = [first_str]
-
-            for it in xrange(self.num_steps) :
-                outlines.append(template_str %
-                                tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
-            
-            fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
-            fid.writelines(outlines)
-            fid.close()
-            del fid
-            
-    def write_out_h5(self, filename) :
-
-        """
-        Write out all the particle trajectories to a single HDF5 file
-        that contains the indices, the times, and the 2D array for each
-        field individually
-
-        Parameters
-        ----------
-
-        filename : string
-            The output filename for the HDF5 file
-
-        Examples
-        --------
-
-        >>> from yt.mods import *
-        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
-        >>> trajs.write_out_h5("orbit_trajectories")                
-        """
-        
-        fid = h5py.File(filename, "w")
-
-        fields = [field for field in sorted(self.field_data.keys())]
-        
-        fid.create_dataset("particle_indices", dtype=np.int32,
-                           data=self.indices)
-        fid.create_dataset("particle_time", data=self.times)
-        
-        for field in fields :
-
-            fid.create_dataset("%s" % field, data=self[field])
-                        
-        fid.close()

diff -r 6953b5aab5c9524c6c2979a05fcba3b1445444e8 -r 4aede64dc503166025f302a8419bcaa19837ccfe yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -54,7 +54,7 @@
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \
-    ParticleTrajectoryCollection, ImageArray
+    ImageArray
 
 from yt.data_objects.derived_quantities import \
     add_quantity, quantity_info

diff -r 6953b5aab5c9524c6c2979a05fcba3b1445444e8 -r 4aede64dc503166025f302a8419bcaa19837ccfe yt/utilities/lib/CICDeposit.pyx
--- a/yt/utilities/lib/CICDeposit.pyx
+++ b/yt/utilities/lib/CICDeposit.pyx
@@ -131,12 +131,21 @@
     le2 = leftEdge[2] 
                                                     
     for n in range(npositions):
+        
+        # Compute the position of the central cell
 
-        # Compute the position of the central cell
-        xpos = fclip((posx[n] - le0)*fact, 0.5001, edge0)
-        ypos = fclip((posy[n] - le1)*fact, 0.5001, edge1)
-        zpos = fclip((posz[n] - le2)*fact, 0.5001, edge2)
+        xpos = (posx[n]-le0)*fact
+        ypos = (posy[n]-le1)*fact
+        zpos = (posz[n]-le2)*fact
+        
+        if (xpos < -1 or ypos < -1 or zpos < -1 or
+            xpos >= edge0+1.5001 or ypos >= edge1+1.5001 or zpos >= edge2+1.5001):
+            continue
 
+        xpos = fclip(xpos, 0.5001, edge0)
+        ypos = fclip(ypos, 0.5001, edge1)
+        zpos = fclip(zpos, 0.5001, edge2)
+        
         i1  = <int> (xpos + 0.5)
         j1  = <int> (ypos + 0.5)
         k1  = <int> (zpos + 0.5)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list