[yt-svn] commit/yt: 3 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Sep 29 07:33:27 PDT 2014


3 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/fbc488db5168/
Changeset:   fbc488db5168
Branch:      yt
User:        drudd
Date:        2014-09-19 21:49:30+00:00
Summary:     Add to the long list of ways to check for valid fields
Affected #:  1 file

diff -r b3c9e119a5b61c46aad9467107d27dc0fb6b5b50 -r fbc488db516810854aa7f76d1dacc146d90db808 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -496,7 +496,8 @@
 
                 # really ugly check to ensure that this field really does exist somewhere,
                 # in some naming convention, before returning it as a possible field type
-                if (ftype,fname) not in self.ds.field_list and \
+                if (ftype,fname) not in self.ds.field_info and \
+                        (ftype,fname) not in self.ds.field_list and \
                         fname not in self.ds.field_list and \
                         (ftype,fname) not in self.ds.derived_field_list and \
                         fname not in self.ds.derived_field_list and \


https://bitbucket.org/yt_analysis/yt/commits/d4c59a2dda36/
Changeset:   d4c59a2dda36
Branch:      yt
User:        drudd
Date:        2014-09-24 19:06:16+00:00
Summary:     Merged yt_analysis/yt into yt
Affected #:  29 files

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -72,6 +72,8 @@
 * Quantities
 * Callbacks
 
+A list of all available filters, quantities, and callbacks can be found in 
+:ref:`halo_analysis_ref`.  
 All interaction with this analysis can be performed by importing from 
 halo_analysis.
 
@@ -161,6 +163,18 @@
    # ... Later on in your script
    hc.add_quantity("my_quantity") 
 
+This quantity will then be accessible for functions called later via the 
+*quantities* dictionary that is associated with the halo object.
+
+.. code-block:: python
+
+   def my_new_function(halo):
+       print halo.quantities["my_quantity"]
+   add_callback("print_quantity", my_new_function)
+
+   # ... Anywhere after "my_quantity" has been called
+   hc.add_callback("print_quantity")
+
 Callbacks
 ^^^^^^^^^
 
@@ -178,10 +192,10 @@
    hc.add_callback("sphere", factor=2.0)
     
 Currently available callbacks are located in 
-``yt/analysis_modules/halo_analysis/halo_callbacks.py``. New callbacks may 
+``yt/analysis_modules/halo_analysis/halo_callbacks.py``.  New callbacks may 
 be added by using the syntax shown below. If you think that your 
 callback may be of use to the general community, add it to 
-halo_callbacks.py and issue a pull request
+halo_callbacks.py and issue a pull request.
 
 An example of defining your own callback:
 

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf doc/source/cookbook/halo_profiler.py
--- a/doc/source/cookbook/halo_profiler.py
+++ b/doc/source/cookbook/halo_profiler.py
@@ -18,8 +18,8 @@
 
 # use the sphere to calculate radial profiles of gas density
 # weighted by cell volume in terms of the virial radius
-hc.add_callback("profile", x_field="radius",
-                y_fields=[("gas", "overdensity")],
+hc.add_callback("profile", ["radius"],
+                [("gas", "overdensity")],
                 weight_field="cell_volume",
                 accumulation=False,
                 storage="virial_quantities_profiles")
@@ -32,7 +32,8 @@
 field_params = dict(virial_radius=('quantity', 'radius_200'))
 hc.add_callback('sphere', radius_field='radius_200', factor=5,
                 field_parameters=field_params)
-hc.add_callback('profile', 'virial_radius', [('gas', 'temperature')],
+hc.add_callback('profile', ['virial_radius'], 
+                [('gas', 'temperature')],
                 storage='virial_profiles',
                 weight_field='cell_mass',
                 accumulation=False, output_dir='profiles')

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -407,6 +407,8 @@
    ~yt.data_objects.profiles.Profile3D
    ~yt.data_objects.profiles.create_profile
 
+.. _halo_analysis_ref:
+
 Halo Analysis
 ^^^^^^^^^^^^^
 
@@ -419,21 +421,22 @@
    ~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog
    ~yt.analysis_modules.halo_analysis.halo_finding_methods.HaloFindingMethod
    ~yt.analysis_modules.halo_analysis.halo_callbacks.HaloCallback
+   ~yt.analysis_modules.halo_analysis.halo_callbacks.delete_attribute
    ~yt.analysis_modules.halo_analysis.halo_callbacks.halo_sphere
-   ~yt.analysis_modules.halo_analysis.halo_callbacks.sphere_field_max_recenter
-   ~yt.analysis_modules.halo_analysis.halo_callbacks.sphere_bulk_velocity
+   ~yt.analysis_modules.halo_analysis.halo_callbacks.iterative_center_of_mass
+   ~yt.analysis_modules.halo_analysis.halo_callbacks.load_profiles
+   ~yt.analysis_modules.halo_analysis.halo_callbacks.phase_plot
    ~yt.analysis_modules.halo_analysis.halo_callbacks.profile
    ~yt.analysis_modules.halo_analysis.halo_callbacks.save_profiles
-   ~yt.analysis_modules.halo_analysis.halo_callbacks.load_profiles
+   ~yt.analysis_modules.halo_analysis.halo_callbacks.sphere_bulk_velocity
+   ~yt.analysis_modules.halo_analysis.halo_callbacks.sphere_field_max_recenter
    ~yt.analysis_modules.halo_analysis.halo_callbacks.virial_quantities
-   ~yt.analysis_modules.halo_analysis.halo_callbacks.phase_plot
-   ~yt.analysis_modules.halo_analysis.halo_callbacks.delete_attribute
    ~yt.analysis_modules.halo_analysis.halo_filters.HaloFilter
+   ~yt.analysis_modules.halo_analysis.halo_filters.not_subhalo
    ~yt.analysis_modules.halo_analysis.halo_filters.quantity_value
-   ~yt.analysis_modules.halo_analysis.halo_filters.not_subhalo
    ~yt.analysis_modules.halo_analysis.halo_quantities.HaloQuantity
+   ~yt.analysis_modules.halo_analysis.halo_quantities.bulk_velocity
    ~yt.analysis_modules.halo_analysis.halo_quantities.center_of_mass
-   ~yt.analysis_modules.halo_analysis.halo_quantities.bulk_velocity
 
 Halo Finding
 ^^^^^^^^^^^^

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -24,7 +24,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Castro                |     Y      |     Y     |   Partial  |   Y   |    Y     |    Y     |     N      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
-| Chombo                |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      | Partial  |
+| Chombo                |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Enzo                  |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -18,7 +18,7 @@
 import os
 
 from yt.data_objects.profiles import \
-     Profile1D
+     create_profile
 from yt.units.yt_array import \
      YTArray, YTQuantity
 from yt.utilities.exceptions import \
@@ -147,11 +147,11 @@
 
 add_callback("sphere_bulk_velocity", sphere_bulk_velocity)
 
-def profile(halo, x_field, y_fields, x_bins=32, x_range=None, x_log=True,
-            weight_field="cell_mass", accumulation=False, storage="profiles",
-            output_dir="."):
+def profile(halo, bin_fields, profile_fields, n_bins=32, extrema=None, logs=None,
+            weight_field="cell_mass", accumulation=False, fractional=False,
+            storage="profiles", output_dir="."):
     r"""
-    Create 1d profiles.
+    Create 1, 2, or 3D profiles of a halo.
 
     Store profile data in a dictionary associated with the halo object.
 
@@ -159,26 +159,37 @@
     ----------
     halo : Halo object
         The Halo object to be provided by the HaloCatalog.
-    x_field : string
-        The binning field for the profile.
-    y_fields : string or list of strings
+    bin_fields : list of strings
+        The binning fields for the profile.
+    profile_fields : string or list of strings
         The fields to be propython
         filed.
-    x_bins : int
-        The number of bins in the profile.
-        Default: 32
-    x_range : (float, float)
-        The range of the x_field.  If None, the extrema are used.
-        Default: None
-    x_log : bool
-        Flag for logarithmmic binning.
-        Default: True
+    n_bins : int or list of ints
+        The number of bins in each dimension.  If None, 32 bins for
+        each bin are used for each bin field.
+        Default: 32.
+    extrema : dict of min, max tuples
+        Minimum and maximum values of the bin_fields for the profiles.
+        The keys correspond to the field names. Defaults to the extrema
+        of the bin_fields of the dataset. If a units dict is provided, extrema
+        are understood to be in the units specified in the dictionary.
+    logs : dict of boolean values
+        Whether or not to log the bin_fields for the profiles.
+        The keys correspond to the field names. Defaults to the take_log
+        attribute of the field.
     weight_field : string
         Weight field for profiling.
         Default : "cell_mass"
-    accumulation : bool
-        If True, profile data is a cumulative sum.
-        Default : False
+    accumulation : bool or list of bools
+        If True, the profile values for a bin n are the cumulative sum of
+        all the values from bin 0 to n.  If -True, the sum is reversed so
+        that the value for bin n is the cumulative sum from bin N (total bins)
+        to n.  If the profile is 2D or 3D, a list of values can be given to
+        control the summation in each dimension independently.
+        Default: False.
+    fractional : If True the profile values are divided by the sum of all
+        the profile data such that the profile represents a probability
+        distribution function.
     storage : string
         Name of the dictionary to store profiles.
         Default: "profiles"
@@ -209,37 +220,18 @@
         output_dir = storage
     output_dir = os.path.join(halo.halo_catalog.output_dir, output_dir)
     
-    if x_range is None:
-        x_range = list(halo.data_object.quantities.extrema(x_field, non_zero=True))
-
-    my_profile = Profile1D(halo.data_object, x_field, x_bins, 
-                           x_range[0], x_range[1], x_log, 
-                           weight_field=weight_field)
-    my_profile.add_fields(ensure_list(y_fields))
-
-    # temporary fix since profiles do not have units at the moment
-    for field in my_profile.field_data:
-        my_profile.field_data[field] = dds.arr(my_profile[field],
-                                               dds.field_info[field].units)
-
-    # accumulate, if necessary
-    if accumulation:
-        used = my_profile.used
-        for field in my_profile.field_data:
-            if weight_field is None:
-                my_profile.field_data[field][used] = \
-                    np.cumsum(my_profile.field_data[field][used])
-            else:
-                my_weight = my_profile.weight
-                my_profile.field_data[field][used] = \
-                  np.cumsum(my_profile.field_data[field][used] * my_weight[used]) / \
-                  np.cumsum(my_weight[used])
+    bin_fields = ensure_list(bin_fields)
+    my_profile = create_profile(halo.data_object, bin_fields, profile_fields, n_bins=n_bins,
+                                extrema=extrema, logs=logs, weight_field=weight_field,
+                                accumulation=accumulation, fractional=fractional)
                   
-    # create storage dictionary
     prof_store = dict([(field, my_profile[field]) \
                        for field in my_profile.field_data])
     prof_store[my_profile.x_field] = my_profile.x
-
+    if len(bin_fields) > 1:
+        prof_store[my_profile.y_field] = my_profile.y
+    if len(bin_fields) > 2:
+        prof_store[my_profile.z_field] = my_profile.z
     if hasattr(halo, storage):
         halo_store = getattr(halo, storage)
         if "used" in halo_store:
@@ -249,6 +241,17 @@
         setattr(halo, storage, halo_store)
     halo_store.update(prof_store)
 
+    if hasattr(my_profile, "variance"):
+        variance_store = dict([(field, my_profile.variance[field]) \
+                           for field in my_profile.variance])
+        variance_storage = "%s_variance" % storage
+        if hasattr(halo, variance_storage):
+            halo_variance_store = getattr(halo, variance_storage)
+        else:
+            halo_variance_store = {}
+            setattr(halo, variance_storage, halo_variance_store)
+        halo_variance_store.update(variance_store)
+
 add_callback("profile", profile)
 
 @parallel_root_only
@@ -287,18 +290,24 @@
     mylog.info("Saving halo %d profile data to %s." %
                (halo.quantities["particle_identifier"], output_file))
 
-    out_file = h5py.File(output_file, "w")
+    fh = h5py.File(output_file, "w")
     my_profile = getattr(halo, storage)
+    profile_group = fh.create_group("profiles")
     for field in my_profile:
         # Don't write code units because we might not know those later.
         if isinstance(my_profile[field], YTArray):
             my_profile[field].convert_to_cgs()
-        dataset = out_file.create_dataset(str(field), data=my_profile[field])
-        units = ""
-        if isinstance(my_profile[field], YTArray):
-            units = str(my_profile[field].units)
-        dataset.attrs["units"] = units
-    out_file.close()
+        _yt_array_hdf5(profile_group, str(field), my_profile[field])
+    variance_storage = "%s_variance" % storage
+    if hasattr(halo, variance_storage):
+        my_profile = getattr(halo, variance_storage)
+        variance_group = fh.create_group("variance")
+        for field in my_profile:
+            # Don't write code units because we might not know those later.
+            if isinstance(my_profile[field], YTArray):
+                my_profile[field].convert_to_cgs()
+            _yt_array_hdf5(variance_group, str(field), my_profile[field])
+    fh.close()
 
 add_callback("save_profiles", save_profiles)
 
@@ -339,20 +348,33 @@
     mylog.info("Loading halo %d profile data from %s." %
                (halo.quantities["particle_identifier"], output_file))
 
-    out_file = h5py.File(output_file, "r")
+    fh = h5py.File(output_file, "r")
+    if fields is None:
+        profile_fields = fh["profiles"].keys()
+    else:
+        profile_fields = fields
     my_profile = {}
-    if fields is None:
-        fields = out_file.keys()
-    for field in fields:
-        if field not in out_file:
+    my_group = fh["profiles"]
+    for field in profile_fields:
+        if field not in my_group:
             raise RuntimeError("%s field not present in %s." % (field, output_file))
-        units = ""
-        if "units" in out_file[field].attrs:
-            units = out_file[field].attrs["units"]
-        if units == "dimensionless": units = ""
-        my_profile[field] = halo.halo_catalog.halos_ds.arr(out_file[field].value, units)
-    out_file.close()
+        my_profile[field] = _hdf5_yt_array(my_group, field,
+                                           ds=halo.halo_catalog.halos_ds)
     setattr(halo, storage, my_profile)
+    
+    if "variance" in fh:
+        my_variance = {}
+        my_group = fh["variance"]
+        if fields is None:
+            profile_fields = my_group.keys()
+        for field in profile_fields:
+            if field not in my_group:
+                raise RuntimeError("%s field not present in %s." % (field, output_file))
+            my_variance[field] = _hdf5_yt_array(my_group, field,
+                                                ds=halo.halo_catalog.halos_ds)
+        setattr(halo, "%s_variance" % storage, my_variance)
+        
+    fh.close()
 
 add_callback("load_profiles", load_profiles)
 
@@ -382,6 +404,8 @@
                halo.quantities["particle_identifier"])
 
     fields = ensure_list(fields)
+    fields = [halo.halo_catalog.data_source._determine_fields(field)[0]
+              for field in fields]
     
     dds = halo.halo_catalog.data_ds
     profile_data = getattr(halo, profile_storage)
@@ -497,3 +521,74 @@
         delattr(halo, attribute)
 
 add_callback("delete_attribute", delete_attribute)
+
+def iterative_center_of_mass(halo, radius_field="virial_radius", inner_ratio=0.1, step_ratio=0.9,
+                             units="pc"):
+    r"""
+    Adjust halo position by iteratively recalculating the center of mass while 
+    decreasing the radius.
+
+    Parameters
+    ----------
+    halo : Halo object
+        The Halo object to be provided by the HaloCatalog.
+    radius_field : string
+        The halo quantity to be used as the radius for the sphere.
+        Default: "virial_radius"
+    inner_ratio : float
+        The ratio of the smallest sphere radius used for calculating the center of 
+        mass to the initial radius.  The sphere radius is reduced and center of mass 
+        recalculated until the sphere has reached this size.
+        Default: 0.1
+    step_ratio : float
+        The multiplicative factor used to reduce the radius of the sphere after the 
+        center of mass is calculated.
+        Default: 0.9
+    units : str
+        The units for printing out the distance between the initial and final centers.
+        Default : "pc"
+        
+    """
+    if inner_ratio <= 0.0 or inner_ratio >= 1.0:
+        raise RuntimeError("iterative_center_of_mass: inner_ratio must be between 0 and 1.")
+    if step_ratio <= 0.0 or step_ratio >= 1.0:
+        raise RuntimeError("iterative_center_of_mass: step_ratio must be between 0 and 1.")
+
+    center_orig = halo.halo_catalog.data_ds.arr([halo.quantities["particle_position_%s" % axis]
+                                                 for axis in "xyz"])
+    sphere = halo.halo_catalog.data_ds.sphere(center_orig, halo.quantities[radius_field])
+
+    while sphere.radius > inner_ratio * halo.quantities[radius_field]:
+        new_center = sphere.quantities.center_of_mass(use_gas=True, use_particles=True)
+        sphere = sphere.ds.sphere(new_center, step_ratio * sphere.radius)
+
+    distance = periodic_distance(center_orig.in_units("code_length").to_ndarray(),
+                                 new_center.in_units("code_length").to_ndarray())
+    distance = halo.halo_catalog.data_ds.quan(distance, "code_length")
+    mylog.info("Recentering halo %d %f %s away." %
+               (halo.quantities["particle_identifier"],
+                distance.in_units(units), units))
+
+    for i, axis in enumerate("xyz"):
+        halo.quantities["particle_position_%s" % axis] = sphere.center[i]
+    del sphere
+    
+add_callback("iterative_center_of_mass", iterative_center_of_mass)
+
+def _yt_array_hdf5(fh, fieldname, data):
+    dataset = fh.create_dataset(fieldname, data=data)
+    units = ""
+    if isinstance(data, YTArray):
+        units = str(data.units)
+    dataset.attrs["units"] = units
+
+def _hdf5_yt_array(fh, fieldname, ds=None):
+    if ds is None:
+        new_arr = YTArray
+    else:
+        new_arr = ds.arr
+    units = ""
+    if "units" in fh[fieldname].attrs:
+        units = fh[fieldname].attrs["units"]
+    if units == "dimensionless": units = ""
+    return new_arr(fh[fieldname].value, units)

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf yt/analysis_modules/halo_analysis/halo_catalog.py
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ b/yt/analysis_modules/halo_analysis/halo_catalog.py
@@ -364,7 +364,6 @@
         if self.halos_ds is None:
             # Find the halos and make a dataset of them
             self.halos_ds = self.finder_method(self.data_ds)
-            self.halos_ds.index
             if self.halos_ds is None:
                 mylog.warning('No halos were found for {0}'.format(\
                         self.data_ds.basename))
@@ -373,6 +372,7 @@
                     self.save_catalog()
                     self.halos_ds = None
                 return
+            self.halos_ds.index
 
             # Assign ds and data sources appropriately
             self.data_source = self.halos_ds.all_data()

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -799,12 +799,14 @@
     def _get_pw(self, fields, center, width, origin, plot_type):
         from yt.visualization.plot_window import \
             get_window_parameters, PWViewerMPL
-        from yt.visualization.fixed_resolution import FixedResolutionBuffer as frb
+        from yt.visualization.fixed_resolution import \
+            FixedResolutionBuffer as frb
         axis = self.axis
         skip = self._key_fields
         skip += list(set(frb._exclude_fields).difference(set(self._key_fields)))
-        self.fields = ensure_list(fields) + \
-            [k for k in self.field_data if k not in skip]
+        self.fields = [k for k in self.field_data if k not in skip]
+        if fields is not None:
+            self.fields = ensure_list(fields) + self.fields
         (bounds, center, display_center) = \
             get_window_parameters(axis, center, width, self.ds)
         pw = PWViewerMPL(self, bounds, fields=self.fields, origin=origin,
@@ -1190,16 +1192,15 @@
         return self._particle_handler
 
 
-    def volume(self, unit = "unitary"):
+    def volume(self):
         """
-        Return the volume of the data container in units *unit*.
+        Return the volume of the data container.
         This is found by adding up the volume of the cells with centers
         in the container, rather than using the geometric shape of
         the container, so this may vary very slightly
         from what might be expected from the geometric volume.
         """
-        return self.quantities["TotalQuantity"]("CellVolume")[0] * \
-            (self.ds[unit] / self.ds['cm']) ** 3.0
+        return self.quantities.total_quantity(("index", "cell_volume"))
 
 # Many of these items are set up specifically to ensure that
 # we are not breaking old pickle files.  This means we must only call the

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -955,7 +955,7 @@
     x_min : float
         The minimum value of the x profile field.
     x_max : float
-        The maximum value of hte x profile field.
+        The maximum value of the x profile field.
     x_log : boolean
         Controls whether or not the bins for the x field are evenly
         spaced in linear (False) or log (True) space.

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -54,12 +54,13 @@
                 yield assert_equal, np.unique(proj["py"]), uc[yax]
                 yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
-                pw = proj.to_pw(fields='density')
-                for p in pw.plots.values():
-                    tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
-                    os.close(tmpfd)
-                    p.save(name=tmpname)
-                    fns.append(tmpname)
+                plots = [proj.to_pw(fields='density'), proj.to_pw()]
+                for pw in plots:
+                    for p in pw.plots.values():
+                        tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+                        os.close(tmpfd)
+                        p.save(name=tmpname)
+                        fns.append(tmpname)
                 frb = proj.to_frb((1.0, 'unitary'), 64)
                 for proj_field in ['ones', 'density']:
                     fi = ds._get_field_info(proj_field)

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -195,7 +195,7 @@
 
     def get_label(self, projected=False):
         """
-        Return a data label for the given field, inluding units.
+        Return a data label for the given field, including units.
         """
         name = self.name[1]
         if self.display_name is not None:

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -314,6 +314,7 @@
                 if type(e) != YTFieldNotFound:
                     mylog.debug("Raises %s during field %s detection.",
                                 str(type(e)), field)
+                self.pop(field)
                 continue
             # This next bit checks that we can't somehow generate everything.
             # We also manually update the 'requested' attribute

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf yt/fields/vector_operations.py
--- a/yt/fields/vector_operations.py
+++ b/yt/fields/vector_operations.py
@@ -43,18 +43,14 @@
                            ftype = "gas", slice_info = None,
                            validators = None, particle_type=False):
 
-    xn, yn, zn = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
-
-    # Is this safe?
-    if registry.ds.dimensionality < 3:
-        zn = ("index", "zeros")
-    if registry.ds.dimensionality < 2:
-        yn = ("index", "zeros")
+    field_components = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
 
     def _magnitude(field, data):
-        mag  = data[xn] * data[xn]
-        mag += data[yn] * data[yn]
-        mag += data[zn] * data[zn]
+        fn = field_components[0]
+        mag = data[fn] * data[fn]
+        for idim in range(1, registry.ds.dimensionality):
+            fn = field_components[idim]
+            mag += data[fn] * data[fn]
         return np.sqrt(mag)
 
     registry.add_field((ftype, "%s_magnitude" % basename),
@@ -65,18 +61,14 @@
                          ftype = "gas", slice_info = None,
                          validators = None, particle_type=False):
 
-    xn, yn, zn = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
-
-    # Is this safe?
-    if registry.ds.dimensionality < 3:
-        zn = ("index", "zeros")
-    if registry.ds.dimensionality < 2:
-        yn = ("index", "zeros")
+    field_components = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
 
     def _squared(field, data):
-        squared  = data[xn] * data[xn]
-        squared += data[yn] * data[yn]
-        squared += data[zn] * data[zn]
+        fn = field_components[0]
+        squared  = data[fn] * data[fn]
+        for idim in range(1, registry.ds.dimensionality):
+            fn = field_components[idim]
+            squared += data[fn] * data[fn]
         return squared
 
     registry.add_field((ftype, "%s_squared" % basename),

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -638,6 +638,29 @@
         self._read_particles()
         # self.io = IOHandlerOrion
 
+    def _detect_output_fields(self):
+        # This is all done in _parse_header_file
+        self.field_list = [("boxlib", f) for f in
+                           self.dataset._field_list]
+        self.field_indexes = dict((f[1], i)
+                                  for i, f in enumerate(self.field_list))
+        # There are times when field_list may change.  We copy it here to
+        # avoid that possibility.
+        self.field_order = [f for f in self.field_list]
+
+        # look for particle fields
+        self.particle_filename = None
+        for particle_filename in ["StarParticles", "SinkParticles"]:
+            fn = os.path.join(self.ds.output_dir, particle_filename)
+            if os.path.exists(fn):
+                self.particle_filename = fn
+
+        if self.particle_filename is None:
+            return
+
+        pfield_list = [("io", c) for c in self.io.particle_field_index.keys()]
+        self.field_list.extend(pfield_list)
+
     def _read_particles(self):
         """
         reads in particles and assigns them to grids. Will search for
@@ -649,9 +672,8 @@
         """
         self.grid_particle_count = np.zeros(len(self.grids))
 
-        for particle_filename in ["StarParticles", "SinkParticles"]:
-            fn = os.path.join(self.ds.output_dir, particle_filename)
-            if os.path.exists(fn): self._read_particle_file(fn)
+        if self.particle_filename is not None:
+            self._read_particle_file(self.particle_filename)
 
     def _read_particle_file(self, fn):
         """actually reads the orion particle data file itself.
@@ -670,8 +692,8 @@
                 # copied from object_finding_mixin.py
                 mask = np.ones(self.num_grids)
                 for i in xrange(len(coord)):
-                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                    np.choose(np.greater(self.grid_left_edge.d[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge.d[:,i],coord[i]), (0,mask), mask)
                 ind = np.where(mask == 1)
                 selected_grids = self.grids[ind]
                 # in orion, particles always live on the finest level.
@@ -915,7 +937,7 @@
         if v in ("F", "T"):
             pcast = bool
     else:
-        syms = (".", "D+", "D-", "E+", "E-")
+        syms = (".", "D+", "D-", "E+", "E-", "E", "D")
         if any(sym in v.upper() for sym in syms for v in vals.split()):
             pcast = float
         else:

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -21,6 +21,7 @@
 from yt.utilities.io_handler import \
            BaseIOHandler
 from yt.funcs import mylog, defaultdict
+from yt.frontends.chombo.io import parse_orion_sinks
 
 class IOHandlerBoxlib(BaseIOHandler):
 
@@ -82,69 +83,59 @@
 class IOHandlerOrion(IOHandlerBoxlib):
     _dataset_type = "orion_native"
 
+    _particle_filename = None
+    @property
+    def particle_filename(self):
+        fn = self.ds.output_dir + "/StarParticles"
+        if not os.path.exists(fn):
+            fn = self.ds.output_dir + "/SinkParticles"
+        self._particle_filename = fn
+        return self._particle_filename
+
+    _particle_field_index = None
+    @property
+    def particle_field_index(self):
+
+        index = parse_orion_sinks(self.particle_filename)
+
+        self._particle_field_index = index
+        return self._particle_field_index
+
+    def _read_particle_selection(self, chunks, selector, fields):
+        rv = {}
+        chunks = list(chunks)
+
+        if selector.__class__.__name__ == "GridSelector":
+
+            if not (len(chunks) == len(chunks[0].objs) == 1):
+                raise RuntimeError
+
+            grid = chunks[0].objs[0]
+
+            for ftype, fname in fields:
+                rv[ftype, fname] = self._read_particles(grid, fname)
+
+            return rv
+
+        rv = {f: np.array([]) for f in fields}
+        for chunk in chunks:
+            for grid in chunk.objs:
+                for ftype, fname in fields:
+                    data = self._read_particles(grid, fname)
+                    rv[ftype, fname] = np.concatenate((data, rv[ftype, fname]))
+        return rv
+
     def _read_particles(self, grid, field): 
         """
         parses the Orion Star Particle text files
-        
+
         """
 
-        fn = grid.ds.fullplotdir + "/StarParticles"
-        if not os.path.exists(fn):
-            fn = grid.ds.fullplotdir + "/SinkParticles"
-
-        # Figure out the format of the particle file
-        with open(fn, 'r') as f:
-            lines = f.readlines()
-        line = lines[1]
-        
-        # The basic fields that all sink particles have
-        index = {'particle_mass': 0,
-                 'particle_position_x': 1,
-                 'particle_position_y': 2,
-                 'particle_position_z': 3,
-                 'particle_momentum_x': 4,
-                 'particle_momentum_y': 5,
-                 'particle_momentum_z': 6,
-                 'particle_angmomen_x': 7,
-                 'particle_angmomen_y': 8,
-                 'particle_angmomen_z': 9,
-                 'particle_mlast': 10,
-                 'particle_mdeut': 11,
-                 'particle_n': 12,
-                 'particle_mdot': 13,
-                 'particle_burnstate': 14,
-                 'particle_id': 15}
-
-        if len(line.strip().split()) == 11:
-            # these are vanilla sinks, do nothing
-            pass  
-
-        elif len(line.strip().split()) == 17:
-            # these are old-style stars, add stellar model parameters
-            index['particle_mlast']     = 10
-            index['particle_r']         = 11
-            index['particle_mdeut']     = 12
-            index['particle_n']         = 13
-            index['particle_mdot']      = 14,
-            index['particle_burnstate'] = 15
-
-        elif len(line.strip().split()) == 18:
-            # these are the newer style, add luminosity as well
-            index['particle_mlast']     = 10
-            index['particle_r']         = 11
-            index['particle_mdeut']     = 12
-            index['particle_n']         = 13
-            index['particle_mdot']      = 14,
-            index['particle_burnstate'] = 15,
-            index['particle_luminosity']= 16
-
-        else:
-            # give a warning if none of the above apply:
-            mylog.warning('Warning - could not figure out particle output file')
-            mylog.warning('These results could be nonsense!')
+        fn = self.particle_filename
 
         def read(line, field):
-            return float(line.strip().split(' ')[index[field]])
+            entry = line.strip().split(' ')[self.particle_field_index[field]]
+            return np.float(entry)
 
         with open(fn, 'r') as f:
             lines = f.readlines()
@@ -154,11 +145,12 @@
                     coord = read(line, "particle_position_x"), \
                             read(line, "particle_position_y"), \
                             read(line, "particle_position_z") 
-                    if ( (grid.LeftEdge < coord).all() and 
+                    if ( (grid.LeftEdge <= coord).all() and 
                          (coord <= grid.RightEdge).all() ):
                         particles.append(read(line, field))
         return np.array(particles)
 
+
 class IOHandlerCastro(IOHandlerBoxlib):
     _dataset_type = "castro_native"
 

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf yt/frontends/chombo/api.py
--- a/yt/frontends/chombo/api.py
+++ b/yt/frontends/chombo/api.py
@@ -14,14 +14,24 @@
 #-----------------------------------------------------------------------------
 
 from .data_structures import \
-      ChomboGrid, \
-      ChomboHierarchy, \
-      ChomboDataset, \
-      Orion2Hierarchy, \
-      Orion2Dataset
+    ChomboGrid, \
+    ChomboHierarchy, \
+    ChomboDataset, \
+    Orion2Hierarchy, \
+    Orion2Dataset, \
+    ChomboPICHierarchy, \
+    ChomboPICDataset, \
+    PlutoHierarchy, \
+    PlutoDataset
 
 from .fields import \
-      ChomboFieldInfo
+    ChomboFieldInfo, \
+    Orion2FieldInfo, \
+    ChomboPICFieldInfo1D, \
+    ChomboPICFieldInfo2D, \
+    ChomboPICFieldInfo3D, \
+    PlutoFieldInfo
 
 from .io import \
-      IOHandlerChomboHDF5
+    IOHandlerChomboHDF5,\
+    IOHandlerPlutoHDF5

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -18,31 +18,31 @@
 import os
 import weakref
 import numpy as np
+import six
 
-from collections import \
-     defaultdict
 from stat import \
-     ST_CTIME
+    ST_CTIME
 
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
-     AMRGridPatch
+    AMRGridPatch
 from yt.geometry.grid_geometry_handler import \
-     GridIndex
+    GridIndex
 from yt.data_objects.static_output import \
-     Dataset
+    Dataset
 from yt.utilities.definitions import \
-     mpc_conversion, sec_conversion
+    mpc_conversion, sec_conversion
 from yt.utilities.file_handler import \
     HDF5FileHandler
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     parallel_root_only
+    parallel_root_only
 from yt.utilities.lib.misc_utilities import \
     get_box_grids_level
-from yt.utilities.io_handler import \
-    io_registry
 
-from .fields import ChomboFieldInfo, Orion2FieldInfo
+from .fields import ChomboFieldInfo, Orion2FieldInfo, \
+    ChomboPICFieldInfo1D, ChomboPICFieldInfo2D, ChomboPICFieldInfo3D, \
+    PlutoFieldInfo
+
 
 class ChomboGrid(AMRGridPatch):
     _id_offset = 0
@@ -89,12 +89,13 @@
         return [self.index.grids[cid - self._id_offset]
                 for cid in self._children_ids]
 
+
 class ChomboHierarchy(GridIndex):
 
     grid = ChomboGrid
     _data_file = None
 
-    def __init__(self,ds,dataset_type='chombo_hdf5'):
+    def __init__(self, ds, dataset_type='chombo_hdf5'):
         self.domain_left_edge = ds.domain_left_edge
         self.domain_right_edge = ds.domain_right_edge
         self.dataset_type = dataset_type
@@ -102,7 +103,7 @@
         if ds.dimensionality == 1:
             self.dataset_type = "chombo1d_hdf5"
         if ds.dimensionality == 2:
-            self.dataset_type = "chombo2d_hdf5"        
+            self.dataset_type = "chombo2d_hdf5"
 
         self.field_indexes = {}
         self.dataset = weakref.proxy(ds)
@@ -114,7 +115,7 @@
 
         self.float_type = self._handle['Chombo_global'].attrs['testReal'].dtype.name
         self._levels = [key for key in self._handle.keys() if key.startswith('level')]
-        GridIndex.__init__(self,ds,dataset_type)
+        GridIndex.__init__(self, ds, dataset_type)
 
         self._read_particles()
 
@@ -123,7 +124,7 @@
         # only do anything if the dataset contains particles
         if not any([f[1].startswith('particle_') for f in self.field_list]):
             return
-        
+
         self.num_particles = 0
         particles_per_grid = []
         for key, val in self._handle.items():
@@ -189,7 +190,7 @@
             for level_id, box in enumerate(boxes):
                 si = np.array([box['lo_%s' % ax] for ax in 'ijk'[:D]])
                 ei = np.array([box['hi_%s' % ax] for ax in 'ijk'[:D]])
-                
+
                 if D == 1:
                     si = np.concatenate((si, [0.0, 0.0]))
                     ei = np.concatenate((ei, [0.0, 0.0]))
@@ -236,6 +237,7 @@
             for child in grid.Children:
                 child._parent_id.append(i + grid._id_offset)
 
+
 class ChomboDataset(Dataset):
     _index_class = ChomboHierarchy
     _field_info_class = ChomboFieldInfo
@@ -244,6 +246,7 @@
                  storage_filename = None, ini_filename = None):
         self.fluid_types += ("chombo",)
         self._handle = HDF5FileHandler(filename)
+        self.dataset_type = dataset_type
 
         # look up the dimensionality of the dataset
         D = self._handle['Chombo_global/'].attrs['SpaceDim']
@@ -251,15 +254,18 @@
             self.dataset_type = 'chombo1d_hdf5'
         if D == 2:
             self.dataset_type = 'chombo2d_hdf5'
-        if D == 3:
-            self.dataset_type = 'chombo_hdf5'
 
-        # some datasets will not be time-dependent, make
+        # some datasets will not be time-dependent, and to make
+        # matters worse, the simulation time is not always
+        # stored in the same place in the hdf file! Make
         # sure we handle that here.
         try:
             self.current_time = self._handle.attrs['time']
         except KeyError:
-            self.current_time = 0.0
+            try:
+                self.current_time = self._handle['/level_0'].attrs['time']
+            except KeyError:
+                self.current_time = 0.0
 
         self.geometry = "cartesian"
         self.ini_filename = ini_filename
@@ -274,10 +280,13 @@
         self.parameters["EOSType"] = -1 # default
 
     def _set_code_unit_attributes(self):
-        self.length_unit = YTQuantity(1.0, "cm")
-        self.mass_unit = YTQuantity(1.0, "g")
-        self.time_unit = YTQuantity(1.0, "s")
-        self.velocity_unit = YTQuantity(1.0, "cm/s")
+        mylog.warning("Setting code length to be 1.0 cm")
+        mylog.warning("Setting code mass to be 1.0 g")
+        mylog.warning("Setting code time to be 1.0 s")
+        self.length_unit = self.quan(1.0, "cm")
+        self.mass_unit = self.quan(1.0, "g")
+        self.time_unit = self.quan(1.0, "s")
+        self.velocity_unit = self.length_unit / self.time_unit
 
     def _localize(self, f, default):
         if f is None:
@@ -285,7 +294,7 @@
         return f
 
     def _parse_parameter_file(self):
-        
+
         self.unique_identifier = \
                                int(os.stat(self.parameter_filename)[ST_CTIME])
         self.dimensionality = self._handle['Chombo_global/'].attrs['SpaceDim']
@@ -303,9 +312,19 @@
             self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
             self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
             self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
-        
+
         self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
-        self.periodicity = (True, True, True)
+        self._determine_periodic()
+
+    def _determine_periodic(self):
+        # we default to true unless the HDF5 file says otherwise
+        is_periodic = np.array([True, True, True])
+        for dir in range(self.dimensionality):
+            try:
+                is_periodic[dir] = self._handle['/level_0'].attrs['is_periodic_%d' % dir]
+            except KeyError:
+                is_periodic[dir] = True
+        self.periodicity = tuple(is_periodic)
 
     def _calc_left_edge(self):
         fileh = self._handle
@@ -331,22 +350,23 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
 
-        pluto_ini_file_exists  = False
+        pluto_ini_file_exists = False
         orion2_ini_file_exists = False
 
-        if type(args[0]) == type(""):
+        if isinstance(args[0], six.string_types): 
             dir_name = os.path.dirname(os.path.abspath(args[0]))
             pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
             orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
             pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
             orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
 
-        if not (pluto_ini_file_exists and orion2_ini_file_exists):
+        if not (pluto_ini_file_exists or orion2_ini_file_exists):
             try:
                 fileh = h5py.File(args[0],'r')
                 valid = "Chombo_global" in fileh["/"]
                 # ORION2 simulations should always have this:
                 valid = valid and not ('CeilVA_mass' in fileh.attrs.keys())
+                valid = valid and not ('Charm_global' in fileh.keys())
                 fileh.close()
                 return valid
             except:
@@ -363,13 +383,159 @@
             v = getattr(self, a)
             mylog.info("Parameters: %-25s = %s", a, v)
 
+
+class PlutoHierarchy(ChomboHierarchy):
+
+    def __init__(self, ds, dataset_type="pluto_chombo_native"):
+        ChomboHierarchy.__init__(self, ds, dataset_type)
+
+    def _parse_index(self):
+        f = self._handle # shortcut
+        self.max_level = f.attrs['num_levels'] - 1
+
+        grids = []
+        self.dds_list = []
+        i = 0
+        D = self.dataset.dimensionality
+        for lev_index, lev in enumerate(self._levels):
+            level_number = int(re.match('level_(\d+)',lev).groups()[0])
+            try:
+                boxes = f[lev]['boxes'].value
+            except KeyError:
+                boxes = f[lev]['particles:boxes'].value
+            dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
+
+            if D == 1:
+                self.dds_list[lev_index][1] = 1.0
+                self.dds_list[lev_index][2] = 1.0
+
+            if D == 2:
+                self.dds_list[lev_index][2] = 1.0
+
+            for level_id, box in enumerate(boxes):
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'[:D]])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'[:D]])
+
+                if D == 1:
+                    si = np.concatenate((si, [0.0, 0.0]))
+                    ei = np.concatenate((ei, [0.0, 0.0]))
+
+                if D == 2:
+                    si = np.concatenate((si, [0.0]))
+                    ei = np.concatenate((ei, [0.0]))
+
+                pg = self.grid(len(grids),self,level=level_number,
+                               start = si, stop = ei)
+                grids.append(pg)
+                grids[-1]._level_id = level_id
+                self.grid_levels[i] = level_number
+                self.grid_left_edge[i] = self.dds_list[lev_index]*si.astype(self.float_type)+self.domain_left_edge.value
+                self.grid_right_edge[i] = self.dds_list[lev_index]*(ei.astype(self.float_type)+1)+self.domain_left_edge.value
+                self.grid_particle_count[i] = 0
+                self.grid_dimensions[i] = ei - si + 1
+                i += 1
+        self.grids = np.empty(len(grids), dtype='object')
+        for gi, g in enumerate(grids): self.grids[gi] = g
+
+
+class PlutoDataset(ChomboDataset):
+
+    _index_class = PlutoHierarchy
+    _field_info_class = PlutoFieldInfo
+
+    def __init__(self, filename, dataset_type='pluto_chombo_native',
+                 storage_filename = None, ini_filename = None):
+
+        ChomboDataset.__init__(self, filename, dataset_type, 
+                    storage_filename, ini_filename)
+
+    def _parse_parameter_file(self):
+        """
+        Check to see whether a 'pluto.ini' file
+        exists in the plot file directory. If one does, attempt to parse it.
+        Otherwise grab the dimensions from the hdf5 file.
+        """
+
+        pluto_ini_file_exists = False
+        dir_name = os.path.dirname(os.path.abspath(self.fullplotdir))
+        pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+        pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+
+        self.unique_identifier = \
+                               int(os.stat(self.parameter_filename)[ST_CTIME])
+        self.dimensionality = self._handle['Chombo_global/'].attrs['SpaceDim']
+        self.domain_dimensions = self._calc_domain_dimensions()
+        self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
+
+        if pluto_ini_file_exists:
+            lines=[line.strip() for line in open(pluto_ini_filename)]
+            self.domain_left_edge = np.zeros(self.dimensionality)
+            self.domain_right_edge = np.zeros(self.dimensionality)
+            for il,ll in enumerate(lines[lines.index('[Grid]')+2:lines.index('[Grid]')+2+self.dimensionality]):
+                self.domain_left_edge[il] = float(ll.split()[2])
+                self.domain_right_edge[il] = float(ll.split()[-1])
+            self.periodicity = [0]*3
+            for il,ll in enumerate(lines[lines.index('[Boundary]')+2:lines.index('[Boundary]')+2+6:2]):
+                self.periodicity[il] = (ll.split()[1] == 'periodic')
+            self.periodicity = tuple(self.periodicity)
+            for il,ll in enumerate(lines[lines.index('[Parameters]')+2:]):
+                if (ll.split()[0] == 'GAMMA'):
+                    self.gamma = float(ll.split()[1])
+        else:
+            self.domain_left_edge = self._calc_left_edge()
+            self.domain_right_edge = self._calc_right_edge()
+            self.periodicity = (True, True, True)
+
+        # if a lower-dimensional dataset, set up pseudo-3D stuff here.
+        if self.dimensionality == 1:
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0, 0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0, 1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1, 1]))
+
+        if self.dimensionality == 2:
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
+
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+
+        pluto_ini_file_exists = False
+
+        if isinstance(args[0], six.string_types):
+            dir_name = os.path.dirname(os.path.abspath(args[0]))
+            pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+            pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+
+        if pluto_ini_file_exists:
+            return True
+
+        return False
+
+
 class Orion2Hierarchy(ChomboHierarchy):
 
     def __init__(self, ds, dataset_type="orion_chombo_native"):
         ChomboHierarchy.__init__(self, ds, dataset_type)
 
+    def _detect_output_fields(self):
+
+        # look for fluid fields
+        output_fields = []
+        for key, val in self._handle.attrs.items():
+            if key.startswith("component"):
+                output_fields.append(val)
+        self.field_list = [("chombo", c) for c in output_fields]
+
+        # look for particle fields
+        self.particle_filename = self.index_filename[:-4] + 'sink'
+        if not os.path.exists(self.particle_filename): return
+        pfield_list = [("io", c) for c in self.io.particle_field_index.keys()]
+        self.field_list.extend(pfield_list)
+
     def _read_particles(self):
-        self.particle_filename = self.index_filename[:-4] + 'sink'
         if not os.path.exists(self.particle_filename): return
         with open(self.particle_filename, 'r') as f:
             lines = f.readlines()
@@ -383,8 +549,8 @@
                 # copied from object_finding_mixin.py
                 mask=np.ones(self.num_grids)
                 for i in xrange(len(coord)):
-                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                    np.choose(np.greater(self.grid_left_edge.d[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge.d[:,i],coord[i]), (0,mask), mask)
                 ind = np.where(mask == 1)
                 selected_grids = self.grids[ind]
                 # in orion, particles always live on the finest level.
@@ -396,6 +562,7 @@
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
 
+
 class Orion2Dataset(ChomboDataset):
 
     _index_class = Orion2Hierarchy
@@ -404,7 +571,7 @@
     def __init__(self, filename, dataset_type='orion_chombo_native',
                  storage_filename = None, ini_filename = None):
 
-        ChomboDataset.__init__(self, filename, dataset_type, 
+        ChomboDataset.__init__(self, filename, dataset_type,
                     storage_filename, ini_filename)
 
     def _parse_parameter_file(self):
@@ -427,7 +594,7 @@
         self.domain_right_edge = self._calc_right_edge()
         self.domain_dimensions = self._calc_domain_dimensions()
         self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
-        self.periodicity = (True, True, True)
+        self._determine_periodic()
 
     def _parse_inputs_file(self, ini_filename):
         self.fullplotdir = os.path.abspath(self.parameter_filename)
@@ -449,7 +616,7 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
 
-        pluto_ini_file_exists  = False
+        pluto_ini_file_exists = False
         orion2_ini_file_exists = False
 
         if type(args[0]) == type(""):
@@ -458,18 +625,70 @@
             orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
             pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
             orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
-        
+
         if orion2_ini_file_exists:
             return True
 
         if not pluto_ini_file_exists:
             try:
                 fileh = h5py.File(args[0],'r')
-                valid = "Chombo_global" in fileh["/"]
                 valid = 'CeilVA_mass' in fileh.attrs.keys()
+                valid = "Chombo_global" in fileh["/"] and "Charm_global" not in fileh["/"]
+                valid = valid and 'CeilVA_mass' in fileh.attrs.keys()
                 fileh.close()
                 return valid
             except:
                 pass
         return False
 
+
+class ChomboPICHierarchy(ChomboHierarchy):
+
+    def __init__(self, ds, dataset_type="chombo_hdf5"):
+        ChomboHierarchy.__init__(self, ds, dataset_type)
+
+
+class ChomboPICDataset(ChomboDataset):
+
+    _index_class = ChomboPICHierarchy
+    _field_info_class = ChomboPICFieldInfo3D
+
+    def __init__(self, filename, dataset_type='chombo_hdf5',
+                 storage_filename=None, ini_filename=None):
+
+        ChomboDataset.__init__(self, filename, dataset_type,
+                               storage_filename, ini_filename)
+
+        if self.dimensionality == 1:
+            self._field_info_class = ChomboPICFieldInfo1D
+
+        if self.dimensionality == 2:
+            self._field_info_class = ChomboPICFieldInfo2D
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+
+        pluto_ini_file_exists = False
+        orion2_ini_file_exists = False
+
+        if isinstance(args[0], six.string_types):
+            dir_name = os.path.dirname(os.path.abspath(args[0]))
+            pluto_ini_filename = os.path.join(dir_name, "pluto.ini")
+            orion2_ini_filename = os.path.join(dir_name, "orion2.ini")
+            pluto_ini_file_exists = os.path.isfile(pluto_ini_filename)
+            orion2_ini_file_exists = os.path.isfile(orion2_ini_filename)
+
+        if orion2_ini_file_exists:
+            return False
+
+        if pluto_ini_file_exists:
+            return False
+
+        try:
+            fileh = h5py.File(args[0],'r')
+            valid = "Charm_global" in fileh["/"]
+            fileh.close()
+            return valid
+        except:
+            pass
+        return False

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -14,8 +14,13 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
+from yt.units.unit_object import Unit
 from yt.fields.field_info_container import \
-    FieldInfoContainer
+    FieldInfoContainer, \
+    particle_deposition_functions, \
+    particle_vector_functions, \
+    standard_particle_fields
+
 from yt.frontends.boxlib.fields import \
     rho_units, \
     mom_units, \
@@ -27,12 +32,16 @@
 rho_units = "code_mass / code_length**3"
 mom_units = "code_mass / (code_time * code_length**2)"
 eden_units = "code_mass / (code_time**2 * code_length)" # erg / cm^3
+vel_units = "code_length / code_time"
+b_units = "code_magnetic"
+
 
 # Chombo does not have any known fields by itself.
 class ChomboFieldInfo(FieldInfoContainer):
     known_other_fields = ()
     known_particle_fields = ()
 
+
 # Orion 2 Fields
 # We duplicate everything here from Boxlib, because we want to be able to
 # subclass it and that can be somewhat tricky.
@@ -88,3 +97,180 @@
                        units = "erg/cm**3")
         self.add_field("temperature", function=_temperature,
                        units="K")
+
+
+class ChomboPICFieldInfo3D(FieldInfoContainer):
+    known_other_fields = (
+        ("density", (rho_units, ["density", "Density"], None)),
+        ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", [], None)),
+        ("gravitational_field_y", ("code_length / code_time**2", [], None)),
+        ("gravitational_field_z", ("code_length / code_time**2", [], None)),
+    )
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_position_y", ("code_length", [], None)),
+        ("particle_position_z", ("code_length", [], None)),
+        ("particle_velocity_x", ("code_length / code_time", [], None)),
+        ("particle_velocity_y", ("code_length / code_time", [], None)),
+        ("particle_velocity_z", ("code_length / code_time", [], None)),
+    )
+
+    # I am re-implementing this here to override a few default behaviors:
+    # I don't want to skip output units for code_length and I do want
+    # particle_fields to default to take_log = False.
+    def setup_particle_fields(self, ptype, ftype='gas', num_neighbors=64 ):
+        skip_output_units = ()
+        for f, (units, aliases, dn) in sorted(self.known_particle_fields):
+            units = self.ds.field_units.get((ptype, f), units)
+            if (f in aliases or ptype not in self.ds.particle_types_raw) and \
+                units not in skip_output_units:
+                u = Unit(units, registry = self.ds.unit_registry)
+                output_units = str(u.get_cgs_equivalent())
+            else:
+                output_units = units
+            if (ptype, f) not in self.field_list:
+                continue
+            self.add_output_field((ptype, f),
+                units = units, particle_type = True,
+                display_name = dn, output_units = output_units, take_log=False)
+            for alias in aliases:
+                self.alias((ptype, alias), (ptype, f), units = output_units)
+
+        # We'll either have particle_position or particle_position_[xyz]
+        if (ptype, "particle_position") in self.field_list or \
+           (ptype, "particle_position") in self.field_aliases:
+            particle_scalar_functions(ptype,
+                   "particle_position", "particle_velocity",
+                   self)
+        else:
+            # We need to check to make sure that there's a "known field" that
+            # overlaps with one of the vector fields.  For instance, if we are
+            # in the Stream frontend, and we have a set of scalar position
+            # fields, they will overlap with -- and be overridden by -- the
+            # "known" vector field that the frontend creates.  So the easiest
+            # thing to do is to simply remove the on-disk field (which doesn't
+            # exist) and replace it with a derived field.
+            if (ptype, "particle_position") in self and \
+                 self[ptype, "particle_position"]._function == NullFunc:
+                self.pop((ptype, "particle_position"))
+            particle_vector_functions(ptype,
+                    ["particle_position_%s" % ax for ax in 'xyz'],
+                    ["particle_velocity_%s" % ax for ax in 'xyz'],
+                    self)
+        particle_deposition_functions(ptype, "particle_position",
+            "particle_mass", self)
+        standard_particle_fields(self, ptype)
+        # Now we check for any leftover particle fields
+        for field in sorted(self.field_list):
+            if field in self: continue
+            if not isinstance(field, tuple):
+                raise RuntimeError
+            if field[0] not in self.ds.particle_types:
+                continue
+            self.add_output_field(field, 
+                                  units = self.ds.field_units.get(field, ""),
+                                  particle_type = True)
+        self.setup_smoothed_fields(ptype, 
+                                   num_neighbors=num_neighbors,
+                                   ftype=ftype)
+
+
+def _dummy_position(field, data):
+    return 0.5*np.ones_like(data['particle_position_x'])
+
+
+def _dummy_velocity(field, data):
+    return np.zeros_like(data['particle_velocity_x'])
+
+
+def _dummy_field(field, data):
+    return 0.0 * data['gravitational_field_x']
+
+fluid_field_types = ['chombo', 'gas']
+particle_field_types = ['io', 'all']
+
+
+class ChomboPICFieldInfo2D(ChomboPICFieldInfo3D):
+    known_other_fields = (
+        ("density", (rho_units, ["density", "Density"], None)),
+        ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", [], None)),
+        ("gravitational_field_y", ("code_length / code_time**2", [], None)),
+    )
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_position_y", ("code_length", [], None)),
+        ("particle_velocity_x", ("code_length / code_time", [], None)),
+        ("particle_velocity_y", ("code_length / code_time", [], None)),
+    )
+
+    def __init__(self, ds, field_list):
+        super(ChomboPICFieldInfo2D, self).__init__(ds, field_list)
+
+        for ftype in fluid_field_types:
+            self.add_field((ftype, 'gravitational_field_z'), function = _dummy_field, 
+                            units = "code_length / code_time**2")
+
+        for ptype in particle_field_types:                
+            self.add_field((ptype, "particle_position_z"), function = _dummy_position,
+                           particle_type = True,
+                           units = "code_length")
+
+            self.add_field((ptype, "particle_velocity_z"), function = _dummy_velocity,
+                           particle_type = True,
+                           units = "code_length / code_time")
+
+
+class ChomboPICFieldInfo1D(ChomboPICFieldInfo3D):
+    known_other_fields = (
+        ("density", (rho_units, ["density", "Density"], None)),
+        ("potential", ("code_length**2 / code_time**2", ["potential", "Potential"], None)),
+        ("gravitational_field_x", ("code_length / code_time**2", [], None)),
+    )
+    known_particle_fields = (
+        ("particle_mass", ("code_mass", [], None)),
+        ("particle_position_x", ("code_length", [], None)),
+        ("particle_velocity_x", ("code_length / code_time", [], None)),
+    )
+
+    def __init__(self, ds, field_list):
+        super(ChomboPICFieldInfo1D, self).__init__(ds, field_list)
+
+        for ftype in fluid_field_types:
+            self.add_field((ftype, 'gravitational_field_y'), function = _dummy_field, 
+                            units = "code_length / code_time**2")
+
+            self.add_field((ftype, 'gravitational_field_z'), function = _dummy_field, 
+                    units = "code_length / code_time**2")
+
+        for ptype in particle_field_types:
+            self.add_field((ptype, "particle_position_y"), function = _dummy_position,
+                           particle_type = True,
+                           units = "code_length")
+            self.add_field((ptype, "particle_position_z"), function = _dummy_position,
+                           particle_type = True,
+                           units = "code_length")
+            self.add_field((ptype, "particle_velocity_y"), function = _dummy_velocity,
+                           particle_type = True,
+                           units = "code_length / code_time")
+            self.add_field((ptype, "particle_velocity_z"), function = _dummy_velocity,
+                           particle_type = True,
+                           units = "code_length / code_time")
+
+
+class PlutoFieldInfo(ChomboFieldInfo):
+    known_other_fields = (
+        ("rho", (rho_units, ["density"], None)),
+        ("prs", ("code_mass / (code_length * code_time**2)", ["pressure"], None)),
+        ("vx1", (vel_units, ["velocity_x"], None)),
+        ("vx2", (vel_units, ["velocity_y"], None)),
+        ("vx3", (vel_units, ["velocity_z"], None)),
+        ("bx1", (b_units, ["magnetic_field_x"], None)),
+        ("bx2", (b_units, ["magnetic_field_y"], None)),
+        ("bx3", (b_units, ["magnetic_field_z"], None)),
+    )
+
+    known_particle_fields = ()

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -12,14 +12,14 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
-import h5py
-import os
+
 import re
 import numpy as np
 from yt.utilities.logger import ytLogger as mylog
 
 from yt.utilities.io_handler import \
-           BaseIOHandler
+    BaseIOHandler
+
 
 class IOHandlerChomboHDF5(BaseIOHandler):
     _dataset_type = "chombo_hdf5"
@@ -30,6 +30,18 @@
         BaseIOHandler.__init__(self, ds, *args, **kwargs)
         self.ds = ds
         self._handle = ds._handle
+        self.dim = self._handle['Chombo_global/'].attrs['SpaceDim']
+        self._read_ghost_info()
+
+    def _read_ghost_info(self):
+        try:
+            self.ghost = tuple(self._handle['level_0/data_attributes'].attrs['outputGhost'])
+            # pad with zeros if the dataset is low-dimensional
+            self.ghost += (3 - self.dim)*(0,)
+            self.ghost = np.array(self.ghost)
+        except KeyError:
+            # assume zero ghosts if outputGhosts not present
+            self.ghost = np.zeros(self.dim)
 
     _field_dict = None
     @property
@@ -55,25 +67,27 @@
                 comp_number = int(re.match('particle_component_(\d)', key).groups()[0])
                 field_dict[val] = comp_number
         self._particle_field_index = field_dict
-        return self._particle_field_index        
-        
+        return self._particle_field_index
+
     def _read_field_names(self,grid):
         ncomp = int(self._handle.attrs['num_components'])
         fns = [c[1] for c in f.attrs.items()[-ncomp-1:-1]]
-    
+
     def _read_data(self,grid,field):
-
         lstring = 'level_%i' % grid.Level
         lev = self._handle[lstring]
         dims = grid.ActiveDimensions
-        boxsize = dims.prod()
-        
+        shape = grid.ActiveDimensions + 2*self.ghost
+        boxsize = shape.prod()
+
         grid_offset = lev[self._offset_string][grid._level_id]
         start = grid_offset+self.field_dict[field]*boxsize
         stop = start + boxsize
         data = lev[self._data_string][start:stop]
-        
-        return data.reshape(dims, order='F')
+        data_no_ghost = data.reshape(shape, order='F')
+        ghost_slice = [slice(g, d-g, None) for g, d in zip(self.ghost, grid.ActiveDimensions)]
+        ghost_slice = ghost_slice[0:self.dim]
+        return data_no_ghost[ghost_slice]
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}
@@ -124,7 +138,7 @@
 
             return rv
 
-        rv = {f:np.array([]) for f in fields}
+        rv = {f: np.array([]) for f in fields}
         for chunk in chunks:
             for grid in chunk.objs:
                 for ftype, fname in fields:
@@ -147,7 +161,7 @@
 
         # convert between the global grid id and the id on this level            
         grid_levels = np.array([g.Level for g in self.ds.index.grids])
-        grid_ids    = np.array([g.id    for g in self.ds.index.grids])
+        grid_ids = np.array([g.id    for g in self.ds.index.grids])
         grid_level_offset = grid_ids[np.where(grid_levels == grid.Level)[0][0]]
         lo = grid.id - grid_level_offset
         hi = lo + 1
@@ -159,6 +173,7 @@
         data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
         return np.asarray(data[field_index::items_per_particle], dtype=np.float64, order='F')
 
+
 class IOHandlerChombo2DHDF5(IOHandlerChomboHDF5):
     _dataset_type = "chombo2d_hdf5"
     _offset_string = 'data:offsets=0'
@@ -168,6 +183,9 @@
         BaseIOHandler.__init__(self, ds, *args, **kwargs)
         self.ds = ds
         self._handle = ds._handle
+        self.dim = 2
+        self._read_ghost_info()
+
 
 class IOHandlerChombo1DHDF5(IOHandlerChomboHDF5):
     _dataset_type = "chombo1d_hdf5"
@@ -177,67 +195,103 @@
     def __init__(self, ds, *args, **kwargs):
         BaseIOHandler.__init__(self, ds, *args, **kwargs)
         self.ds = ds
-        self._handle = ds._handle   
+        self.dim = 1
+        self._handle = ds._handle
+        self._read_ghost_info()
+
+
+class IOHandlerPlutoHDF5(IOHandlerChomboHDF5):
+    _dataset_type = "pluto_chombo_native"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, ds, *args, **kwargs):
+        BaseIOHandler.__init__(self, ds, *args, **kwargs)
+        self.ds = ds
+        self._handle = ds._handle
+
+
+def parse_orion_sinks(fn):
+    '''
+
+    Orion sink particles are stored in text files. This function
+    is for figuring what particle fields are present based on the
+    number of entries per line in the *.sink file.
+
+    '''
+
+    # Figure out the format of the particle file
+    with open(fn, 'r') as f:
+        lines = f.readlines()
+    line = lines[1]
+
+    # The basic fields that all sink particles have
+    index = {'particle_mass': 0,
+             'particle_position_x': 1,
+             'particle_position_y': 2,
+             'particle_position_z': 3,
+             'particle_momentum_x': 4,
+             'particle_momentum_y': 5,
+             'particle_momentum_z': 6,
+             'particle_angmomen_x': 7,
+             'particle_angmomen_y': 8,
+             'particle_angmomen_z': 9,
+             'particle_id': -1}
+
+    if len(line.strip().split()) == 11:
+        # these are vanilla sinks, do nothing
+        pass
+
+    elif len(line.strip().split()) == 17:
+        # these are old-style stars, add stellar model parameters
+        index['particle_mlast']     = 10
+        index['particle_r']         = 11
+        index['particle_mdeut']     = 12
+        index['particle_n']         = 13
+        index['particle_mdot']      = 14,
+        index['particle_burnstate'] = 15
+
+    elif len(line.strip().split()) == 18:
+        # these are the newer style, add luminosity as well
+        index['particle_mlast']     = 10
+        index['particle_r']         = 11
+        index['particle_mdeut']     = 12
+        index['particle_n']         = 13
+        index['particle_mdot']      = 14,
+        index['particle_burnstate'] = 15,
+        index['particle_luminosity']= 16
+
+    else:
+        # give a warning if none of the above apply:
+        mylog.warning('Warning - could not figure out particle output file')
+        mylog.warning('These results could be nonsense!')
+
+    return index
+
 
 class IOHandlerOrion2HDF5(IOHandlerChomboHDF5):
     _dataset_type = "orion_chombo_native"
 
+    _particle_field_index = None
+    @property
+    def particle_field_index(self):
+
+        fn = self.ds.fullplotdir[:-4] + "sink"
+
+        index = parse_orion_sinks(fn)
+
+        self._particle_field_index = index
+        return self._particle_field_index
+
     def _read_particles(self, grid, field):
         """
         parses the Orion Star Particle text files
-             
+
         """
 
-        fn = grid.ds.fullplotdir[:-4] + "sink"
-
-        # Figure out the format of the particle file
-        with open(fn, 'r') as f:
-            lines = f.readlines()
-        line = lines[1]
-
-        # The basic fields that all sink particles have
-        index = {'particle_mass': 0,
-                 'particle_position_x': 1,
-                 'particle_position_y': 2,
-                 'particle_position_z': 3,
-                 'particle_momentum_x': 4,
-                 'particle_momentum_y': 5,
-                 'particle_momentum_z': 6,
-                 'particle_angmomen_x': 7,
-                 'particle_angmomen_y': 8,
-                 'particle_angmomen_z': 9,
-                 'particle_id': -1}
-
-        if len(line.strip().split()) == 11:
-            # these are vanilla sinks, do nothing
-            pass  
-
-        elif len(line.strip().split()) == 17:
-            # these are old-style stars, add stellar model parameters
-            index['particle_mlast']     = 10
-            index['particle_r']         = 11
-            index['particle_mdeut']     = 12
-            index['particle_n']         = 13
-            index['particle_mdot']      = 14,
-            index['particle_burnstate'] = 15
-
-        elif len(line.strip().split()) == 18:
-            # these are the newer style, add luminosity as well
-            index['particle_mlast']     = 10
-            index['particle_r']         = 11
-            index['particle_mdeut']     = 12
-            index['particle_n']         = 13
-            index['particle_mdot']      = 14,
-            index['particle_burnstate'] = 15,
-            index['particle_luminosity']= 16
-
-        else:
-            # give a warning if none of the above apply:
-            mylog.warning('Warning - could not figure out particle output file')
-            mylog.warning('These results could be nonsense!')
-            
         def read(line, field):
-            return float(line.strip().split(' ')[index[field]])
+            entry = line.strip().split(' ')[self.particle_field_index[field]]
+            return np.float(entry)
 
         fn = grid.ds.fullplotdir[:-4] + "sink"
         with open(fn, 'r') as f:
@@ -246,9 +300,9 @@
             for line in lines[1:]:
                 if grid.NumberOfParticles > 0:
                     coord = read(line, "particle_position_x"), \
-                            read(line, "particle_position_y"), \
-                            read(line, "particle_position_z")
-                    if ( (grid.LeftEdge < coord).all() and
-                         (coord <= grid.RightEdge).all() ):
+                        read(line, "particle_position_y"), \
+                        read(line, "particle_position_z")
+                    if ((grid.LeftEdge <= coord).all() and
+                       (coord <= grid.RightEdge).all() ):
                         particles.append(read(line, field))
         return np.array(particles)

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -22,7 +22,8 @@
     data_dir_load
 from yt.frontends.chombo.api import \
     ChomboDataset, \
-    Orion2Dataset
+    Orion2Dataset, \
+    PlutoDataset
 
 _fields = ("density", "velocity_magnitude",  # "velocity_divergence",
            "magnetic_field_x")
@@ -57,6 +58,14 @@
         test_tb.__name__ = test.description
         yield test
 
+kho = "KelvinHelmholtz/data.0004.hdf5"
+ at requires_ds(kho)
+def test_kho():
+    ds = data_dir_load(kho)
+    yield assert_equal, str(ds), "data.0004.hdf5"
+    for test in small_patch_amr(kho, _fields):
+        test_gc.__name__ = test.description
+        yield test
 
 @requires_file(zp)
 def test_ChomboDataset():
@@ -68,6 +77,6 @@
     assert isinstance(data_dir_load(gc), Orion2Dataset)
 
 
-#@requires_file(kho)
-#def test_PlutoDataset():
-#    assert isinstance(data_dir_load(kho), PlutoDataset)
+ at requires_file(kho)
+def test_PlutoDataset():
+    assert isinstance(data_dir_load(kho), PlutoDataset)

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf yt/frontends/fits/tests/test_outputs.py
--- a/yt/frontends/fits/tests/test_outputs.py
+++ b/yt/frontends/fits/tests/test_outputs.py
@@ -33,7 +33,7 @@
 
 _fields_vels = ("velocity_x","velocity_y","velocity_z")
 
-vf = "UniformGrid/velocity_field_20.fits"
+vf = "UnigridData/velocity_field_20.fits"
 @requires_ds(vf)
 def test_velocity_field():
     ds = data_dir_load(vf, cls=FITSDataset)

diff -r fbc488db516810854aa7f76d1dacc146d90db808 -r d4c59a2dda36eb48e03a03395b862c05c4867ccf yt/frontends/sph/api.py
--- a/yt/frontends/sph/api.py
+++ b/yt/frontends/sph/api.py
@@ -19,7 +19,8 @@
       GadgetDataset, \
       GadgetHDF5Dataset, \
       TipsyDataset,\
-      EagleNetworkDataset
+      EagleNetworkDataset, \
+      EagleDataset
 
 from .io import \
       IOHandlerOWLS, \

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/3d43afd7ca4b/
Changeset:   3d43afd7ca4b
Branch:      yt
User:        MatthewTurk
Date:        2014-09-29 14:33:16+00:00
Summary:     Merged in drudd/yt (pull request #1217)

[Bugfix] ds.derived_field_list is often out of sync with ds.field_info [Fixes #898]
Affected #:  1 file

diff -r 7848f9288258e1eaf7b0decbf01ac3be39dec62b -r 3d43afd7ca4b56d679f8744bedc554c8b5f5b3a7 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -496,7 +496,8 @@
 
                 # really ugly check to ensure that this field really does exist somewhere,
                 # in some naming convention, before returning it as a possible field type
-                if (ftype,fname) not in self.ds.field_list and \
+                if (ftype,fname) not in self.ds.field_info and \
+                        (ftype,fname) not in self.ds.field_list and \
                         fname not in self.ds.field_list and \
                         (ftype,fname) not in self.ds.derived_field_list and \
                         fname not in self.ds.derived_field_list and \

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list