[yt-svn] commit/yt: 180 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Nov 2 11:33:01 PST 2015


180 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/e2a9d8c3dff3/
Changeset:   e2a9d8c3dff3
Branch:      yt
User:        brittonsmith
Date:        2015-07-31 10:03:46+00:00
Summary:     Adding convenience function for determining output filenames.
Affected #:  1 file

diff -r c8e5a56dfd3a968b318524fbd31f2550083d646b -r e2a9d8c3dff315e02c58e13ed1af32b2eb07a1cb yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -14,6 +14,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import errno
 from yt.extern.six import string_types
 import time, types, signal, inspect, traceback, sys, pdb, os, re
 import contextlib
@@ -672,6 +673,57 @@
     suffix = os.path.splitext(name)[1]
     return suffix if suffix in ['.png', '.eps', '.ps', '.pdf'] else ''
 
+def get_output_filename(name, keyword, suffix):
+    r"""Return an appropriate filename for output.
+
+    With a name provided by the user, this will decide how to 
+    appropriately name the output file by the following rules:
+    1. if name is None, the filename will be the keyword plus 
+       the suffix.
+    2. if name ends with "/", assume name is a directory and 
+       the file will be named name/(keyword+suffix).  If the
+       directory does not exist, first try to create it and
+       raise an exception if an error occurs.
+    3. if name does not end in the suffix, add the suffix.
+    
+    Parameters
+    ----------
+    name : str
+        A filename given by the user.
+    keyword : str
+        A default filename prefix if name is None.
+    suffix : str
+        Suffix that must appear at end of the filename.
+        This will be added if not present.
+
+    Examples
+    --------
+
+    >>> print get_output_filename(None, "Projection_x", ".png")
+    Projection_x.png
+    >>> print get_output_filename("my_file", "Projection_x", ".png")
+    my_file.png
+    >>> print get_output_filename("my_file/", "Projection_x", ".png")
+    my_file/Projection_x.png
+    
+    """
+    if name is None:
+        name = keyword
+    name = os.path.expanduser(name)
+    if name[-1] == os.sep and not os.path.isdir(name):
+        try:
+            os.mkdir(name)
+        except OSError as e:
+            if e.errno == errno.EEXIST:
+                pass
+            else:
+                raise
+    if os.path.isdir(name):
+        name = os.path.join(name, keyword)
+    if not name.endswith(suffix):
+        name += suffix
+    return name
+
 def ensure_dir_exists(path):
     r"""Create all directories in path recursively in a parallel safe manner"""
     my_dir = os.path.dirname(path)


https://bitbucket.org/yt_analysis/yt/commits/1b4012bd4e4b/
Changeset:   1b4012bd4e4b
Branch:      yt
User:        brittonsmith
Date:        2015-07-31 11:15:21+00:00
Summary:     Adding some initial utilities for saving data fields as yt loadable datasets.
Affected #:  4 files

diff -r e2a9d8c3dff315e02c58e13ed1af32b2eb07a1cb -r 1b4012bd4e4ba2a55ec371e5251353e341aec904 yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -18,23 +18,26 @@
 import os
 
 from yt.analysis_modules.cosmological_observation.light_ray.light_ray import \
-     periodic_distance
+    periodic_distance
 from yt.data_objects.profiles import \
-     create_profile
+    create_profile
+from yt.frontends.ytdata.utilities import \
+    _hdf5_yt_array, \
+    _yt_array_hdf5
 from yt.units.yt_array import \
-     YTArray, YTQuantity
+    YTArray, YTQuantity
 from yt.utilities.exceptions import \
-     YTSphereTooSmall
+    YTSphereTooSmall
 from yt.funcs import \
-     ensure_list, is_root
+    ensure_list, is_root
 from yt.utilities.exceptions import YTUnitConversionError
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.operator_registry import \
-     OperatorRegistry
+    OperatorRegistry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
 from yt.visualization.profile_plotter import \
-     PhasePlot
+    PhasePlot
 
 callback_registry = OperatorRegistry()
     
@@ -585,21 +588,3 @@
     del sphere
     
 add_callback("iterative_center_of_mass", iterative_center_of_mass)
-
-def _yt_array_hdf5(fh, fieldname, data):
-    dataset = fh.create_dataset(fieldname, data=data)
-    units = ""
-    if isinstance(data, YTArray):
-        units = str(data.units)
-    dataset.attrs["units"] = units
-
-def _hdf5_yt_array(fh, fieldname, ds=None):
-    if ds is None:
-        new_arr = YTArray
-    else:
-        new_arr = ds.arr
-    units = ""
-    if "units" in fh[fieldname].attrs:
-        units = fh[fieldname].attrs["units"]
-    if units == "dimensionless": units = ""
-    return new_arr(fh[fieldname].value, units)

diff -r e2a9d8c3dff315e02c58e13ed1af32b2eb07a1cb -r 1b4012bd4e4ba2a55ec371e5251353e341aec904 yt/frontends/ytdata/__init__.py
--- /dev/null
+++ b/yt/frontends/ytdata/__init__.py
@@ -0,0 +1,15 @@
+"""
+API for ytData frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r e2a9d8c3dff315e02c58e13ed1af32b2eb07a1cb -r 1b4012bd4e4ba2a55ec371e5251353e341aec904 yt/frontends/ytdata/api.py
--- /dev/null
+++ b/yt/frontends/ytdata/api.py
@@ -0,0 +1,27 @@
+"""
+API for ytData frontend
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+# from .data_structures import \
+#      ytDataDataset
+
+# from .io import \
+#      IOHandlerytDataHDF5
+
+# from .fields import \
+#      ytDataFieldInfo
+
+from .utilties import \
+    to_yt_dataset

diff -r e2a9d8c3dff315e02c58e13ed1af32b2eb07a1cb -r 1b4012bd4e4ba2a55ec371e5251353e341aec904 yt/frontends/ytdata/utilties.py
--- /dev/null
+++ b/yt/frontends/ytdata/utilties.py
@@ -0,0 +1,115 @@
+"""
+Utility functions for ytdata frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+
+from yt.funcs import mylog
+
+def to_yt_dataset(ds, filename, data):
+    r"""Export a set of field arrays to a reloadable yt dataset.
+
+    This function can be used to create a yt loadable dataset from a 
+    set of arrays.  The field arrays can either be associated with a 
+    loaded dataset or, if not, a dictionary of dataset attributes can
+    be provided that will be used as metadata for the new dataset.  The 
+    resulting dataset can be reloaded as a yt dataset.
+
+    Parameters
+    ----------
+    ds : dataset
+        The dataset associated with the fields.  
+    filename : str
+        The name of the file to be written.
+    data : dict
+        A dictionary of field arrays to be saved.
+
+    Returns
+    -------
+    filename : str
+        The name of the file that has been created.
+
+    Examples
+    --------
+
+    COMING SOON!
+    
+    """
+
+    fh = h5py.file(filename, "w")
+
+    fh.close()
+
+def _hdf5_yt_array(fh, field, ds=None):
+    r"""Load an hdf5 dataset as a YTArray.
+
+    Reads in a dataset from an open hdf5 file or group and uses the
+    "units" attribute, if it exists, to apply units.
+    
+    Parameters
+    ----------
+    fh : an open hdf5 file or hdf5 group
+        The hdf5 file or group in which the dataset exists.
+    field : str
+        The name of the field to be loaded.
+    ds : yt Dataset
+        If not None, the unit_registry of the dataset
+        is used to apply units.
+
+    Returns
+    -------
+    A YTArray of the requested field.
+    
+    """
+    
+    if ds is None:
+        new_arr = YTArray
+    else:
+        new_arr = ds.arr
+    units = ""
+    if "units" in fh[field].attrs:
+        units = fh[field].attrs["units"]
+    if units == "dimensionless": units = ""
+    return new_arr(fh[field].value, units)
+
+def _yt_array_hdf5(fh, field, data):
+    r"""Save a YTArray to an open hdf5 file or group.
+
+    Save a YTArray to an open hdf5 file or group, and save the 
+    units to a "units" attribute.
+    
+    Parameters
+    ----------
+    fh : an open hdf5 file or hdf5 group
+        The hdf5 file or group to which the data will be written.
+    field : str
+        The name of the field to be saved.
+    ddata : YTArray
+        The data array to be saved.
+
+    Returns
+    -------
+    dataset : hdf5 dataset
+        The created hdf5 dataset.
+    
+    """
+
+    dataset = fh.create_dataset(field, data=data)
+    units = ""
+    if isinstance(data, YTArray):
+        units = str(data.units)
+    dataset.attrs["units"] = units
+    return dataset


https://bitbucket.org/yt_analysis/yt/commits/b88be30a4665/
Changeset:   b88be30a4665
Branch:      yt
User:        brittonsmith
Date:        2015-07-31 11:15:42+00:00
Summary:     Adding stub to_dataset function.
Affected #:  1 file

diff -r 1b4012bd4e4ba2a55ec371e5251353e341aec904 -r b88be30a4665691eb2500e805a08653a0d98de30 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -13,7 +13,9 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import h5py
 import itertools
+import os
 import types
 import uuid
 from yt.extern.six import string_types
@@ -25,9 +27,12 @@
 import shelve
 from contextlib import contextmanager
 
+from yt.funcs import get_output_filename
 from yt.funcs import *
 
 from yt.data_objects.particle_io import particle_handler_registry
+from yt.frontends.ytdata.api import \
+    to_yt_dataset
 from yt.units.unit_object import UnitParseError
 from yt.utilities.exceptions import \
     YTUnitConversionError, \
@@ -463,6 +468,48 @@
         df = pd.DataFrame(data)
         return df
 
+    def to_dataset(self, filename=None, fields=None):
+        r"""Export a data object to a reloadable yt dataset.
+
+        This function will take a data object and output a dataset 
+        containing either the fields presently existing or fields 
+        given in a list.  The resulting dataset can be reloaded as 
+        a yt dataset.
+
+        Parameters
+        ----------
+        filename : str
+            The name of the file to be written.  If None, the name 
+            will be a combination of the original dataset and the type 
+            of data container.
+        fields : list of strings or tuples, default None
+            If this is supplied, it is the list of fields to be exported into
+            the data frame.  If not supplied, whatever fields presently exist
+            will be used.
+
+        Returns
+        -------
+        filename : str
+            The name of the file that has been created.
+
+        Examples
+        --------
+
+        >>> dd = ds.all_data()
+        >>> fn1 = dd.to_dataset(["density", "temperature"])
+        >>> ds1 = yt.load(fn1)
+        >>> dd["velocity_magnitude"]
+        >>> fn2 = dd.to_dataset()
+        >>> ds2 = yt.load(fn2)
+        """
+
+        keyword = "%s_%s" % (str(self.ds), self._type_name)
+        filename = get_output_filename(filename, keyword, ".h5")
+
+        
+
+        return filename
+        
     def to_glue(self, fields, label="yt", data_collection=None):
         """
         Takes specific *fields* in the container and exports them to


https://bitbucket.org/yt_analysis/yt/commits/de8e0a2a83d1/
Changeset:   de8e0a2a83d1
Branch:      yt
User:        brittonsmith
Date:        2015-07-31 11:46:35+00:00
Summary:     Filling out to_yt_dataset function.
Affected #:  1 file

diff -r b88be30a4665691eb2500e805a08653a0d98de30 -r de8e0a2a83d1685bb21b1627013369aa8007961b yt/frontends/ytdata/utilties.py
--- a/yt/frontends/ytdata/utilties.py
+++ b/yt/frontends/ytdata/utilties.py
@@ -19,7 +19,7 @@
 
 from yt.funcs import mylog
 
-def to_yt_dataset(ds, filename, data):
+def to_yt_dataset(ds, filename, data, extra_attrs=None):
     r"""Export a set of field arrays to a reloadable yt dataset.
 
     This function can be used to create a yt loadable dataset from a 
@@ -36,6 +36,8 @@
         The name of the file to be written.
     data : dict
         A dictionary of field arrays to be saved.
+    extra_attrs: dict
+        A dictionary of additional attributes to be saved.
 
     Returns
     -------
@@ -49,8 +51,36 @@
     
     """
 
+    mylog.info("Saving field data to yt dataset: %s." % filename)
+
+    if extra_attrs is None: extra_attrs = {}
+    base_attrs  = ["domain_left_edge", "domain_right_edge",
+                   "current_redshift", "current_time",
+                   "domain_dimensions", "periodicity",
+                   "cosmological_simulation", "omega_lambda",
+                   "omega_matter", "hubble_constant"]
+
     fh = h5py.file(filename, "w")
-
+    for attr in base_attrs:
+        if isinstance(ds, dict):
+            my_val = ds.get(attr, None)
+        else:
+            my_val = getattr(ds, attr, None)
+        if my_val is None:
+            mylog.warn("Skipping %s attribute, this may be just fine." % attr)
+            continue
+        if hasattr(my_val, "units"):
+            my_val = my_val.in_cgs()
+        fh.attrs[attr] = my_val
+    for attr in extra_attrs:
+        my_val = extra_attrs[my_val]
+        if hasattr(my_val, "units"):
+            my_val = my_val.in_cgs()
+        fh.attrs[attr] = my_val
+    if "data_type" not in extra_attrs:
+        fh.attrs["data_type"] = "unknown"
+    for field in data:
+        dataset = _yt_array_hdf5(fh, field, data[field])
     fh.close()
 
 def _hdf5_yt_array(fh, field, ds=None):


https://bitbucket.org/yt_analysis/yt/commits/b69ecf0f7c15/
Changeset:   b69ecf0f7c15
Branch:      yt
User:        brittonsmith
Date:        2015-07-31 12:09:45+00:00
Summary:     to_dataset and to_yt_dataset now functioning.
Affected #:  2 files

diff -r de8e0a2a83d1685bb21b1627013369aa8007961b -r b69ecf0f7c151d96fd0d8f5210ad291d063afe68 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -506,7 +506,16 @@
         keyword = "%s_%s" % (str(self.ds), self._type_name)
         filename = get_output_filename(filename, keyword, ".h5")
 
-        
+        data = {}
+        if fields is not None:
+            for f in fields:
+                data[f] = self[f]
+        else:
+            data.update(self.field_data)
+        extra_attrs = dict([(arg, getattr(self, arg, None))
+                            for arg in self._con_args])
+        extra_attrs["data_type"] = self._type_name
+        to_yt_dataset(self.ds, filename, data, extra_attrs=extra_attrs)
 
         return filename
         

diff -r de8e0a2a83d1685bb21b1627013369aa8007961b -r b69ecf0f7c151d96fd0d8f5210ad291d063afe68 yt/frontends/ytdata/utilties.py
--- a/yt/frontends/ytdata/utilties.py
+++ b/yt/frontends/ytdata/utilties.py
@@ -17,7 +17,10 @@
 import h5py
 import numpy as np
 
-from yt.funcs import mylog
+from yt.funcs import \
+    mylog
+from yt.units.yt_array import \
+    YTArray
 
 def to_yt_dataset(ds, filename, data, extra_attrs=None):
     r"""Export a set of field arrays to a reloadable yt dataset.
@@ -60,7 +63,7 @@
                    "cosmological_simulation", "omega_lambda",
                    "omega_matter", "hubble_constant"]
 
-    fh = h5py.file(filename, "w")
+    fh = h5py.File(filename, "w")
     for attr in base_attrs:
         if isinstance(ds, dict):
             my_val = ds.get(attr, None)
@@ -73,13 +76,16 @@
             my_val = my_val.in_cgs()
         fh.attrs[attr] = my_val
     for attr in extra_attrs:
-        my_val = extra_attrs[my_val]
+        my_val = extra_attrs[attr]
         if hasattr(my_val, "units"):
             my_val = my_val.in_cgs()
         fh.attrs[attr] = my_val
     if "data_type" not in extra_attrs:
         fh.attrs["data_type"] = "unknown"
     for field in data:
+        # for now, let's avoid writing "code" units
+        if hasattr(field, "units"):
+            data[field].convert_to_cgs()
         dataset = _yt_array_hdf5(fh, field, data[field])
     fh.close()
 


https://bitbucket.org/yt_analysis/yt/commits/bb62b3a5b744/
Changeset:   bb62b3a5b744
Branch:      yt
User:        brittonsmith
Date:        2015-07-31 12:28:28+00:00
Summary:     Adding docstring for to_yt_dataset.
Affected #:  2 files

diff -r b69ecf0f7c151d96fd0d8f5210ad291d063afe68 -r bb62b3a5b744fb827d7c74a7c243b082b1e73513 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -514,7 +514,8 @@
             data.update(self.field_data)
         extra_attrs = dict([(arg, getattr(self, arg, None))
                             for arg in self._con_args])
-        extra_attrs["data_type"] = self._type_name
+        extra_attrs["data_type"] = "yt_data_container"
+        extra_attrs["container_type"] = self._type_name
         to_yt_dataset(self.ds, filename, data, extra_attrs=extra_attrs)
 
         return filename

diff -r b69ecf0f7c151d96fd0d8f5210ad291d063afe68 -r bb62b3a5b744fb827d7c74a7c243b082b1e73513 yt/frontends/ytdata/utilties.py
--- a/yt/frontends/ytdata/utilties.py
+++ b/yt/frontends/ytdata/utilties.py
@@ -50,7 +50,27 @@
     Examples
     --------
 
-    COMING SOON!
+    >>> import yt
+    >>> from yt.frontends.ytdata.api import to_yt_dataset
+    >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+    >>> sphere = ds.sphere([0.5]*3, (10, "Mpc")
+    >>> sphere_density = sphere["density"]
+    >>> region = ds.box([0.]*3, [0.25]*3)
+    >>> region_density = region["density"]
+    >>> data = {}
+    >>> data["sphere_density"] = sphere_density
+    >>> data["region_density"] = region_density
+    >>> to_yt_dataset(ds, "density_data.h5", data)
+
+    >>> import yt
+    >>> from yt.frontends.ytdata.api import to_yt_dataset
+    >>> from yt.units.yt_array import YTArray, YTQuantity
+    >>> data = {"density": YTArray(np.random.random(10), "g/cm**3"),
+    ...         "temperature": YTArray(np.random.random(10), "K")}
+    >>> ds_data = {"domain_left_edge": YTArray(np.zeros(3), "cm"),
+    ...            "domain_right_edge": YTArray(np.ones(3), "cm"),
+    ...            "current_time": YTQuantity(10, "Myr")}
+    >>> to_yt_dataset(ds_data, "random_data.h5", data)
     
     """
 
@@ -81,7 +101,7 @@
             my_val = my_val.in_cgs()
         fh.attrs[attr] = my_val
     if "data_type" not in extra_attrs:
-        fh.attrs["data_type"] = "unknown"
+        fh.attrs["data_type"] = "yt_array_data"
     for field in data:
         # for now, let's avoid writing "code" units
         if hasattr(field, "units"):


https://bitbucket.org/yt_analysis/yt/commits/f6241fd2f51e/
Changeset:   f6241fd2f51e
Branch:      yt
User:        brittonsmith
Date:        2015-07-31 12:35:44+00:00
Summary:     Return an empty list if there are no candidates.
Affected #:  1 file

diff -r bb62b3a5b744fb827d7c74a7c243b082b1e73513 -r f6241fd2f51e6c8cddb2f2c7ac58f7c7c67f5158 yt/utilities/hierarchy_inspection.py
--- a/yt/utilities/hierarchy_inspection.py
+++ b/yt/utilities/hierarchy_inspection.py
@@ -27,6 +27,8 @@
     # lowest class
     if len(candidates) == 1:
         return candidates
+    elif len(candidates) == 0:
+        return []
 
     mros = [inspect.getmro(c) for c in candidates]
 


https://bitbucket.org/yt_analysis/yt/commits/1931fe20371c/
Changeset:   1931fe20371c
Branch:      yt
User:        brittonsmith
Date:        2015-07-31 12:39:56+00:00
Summary:     Adding files for ytData frontend.
Affected #:  5 files

diff -r f6241fd2f51e6c8cddb2f2c7ac58f7c7c67f5158 -r 1931fe20371c2878b90cefcc194f6c605fa845de yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -30,6 +30,7 @@
     config.add_subpackage("sph")
     config.add_subpackage("stream")
     config.add_subpackage("tipsy")
+    config.add_subpackage("ytdata")
     config.add_subpackage("art/tests")
     config.add_subpackage("artio/tests")
     config.add_subpackage("athena/tests")

diff -r f6241fd2f51e6c8cddb2f2c7ac58f7c7c67f5158 -r 1931fe20371c2878b90cefcc194f6c605fa845de yt/frontends/ytdata/data_structures.py
--- /dev/null
+++ b/yt/frontends/ytdata/data_structures.py
@@ -0,0 +1,97 @@
+"""
+Data structures for HaloCatalog frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+import stat
+import weakref
+import struct
+import glob
+import time
+import os
+
+from .fields import \
+    HaloCatalogFieldInfo
+
+from yt.utilities.cosmology import Cosmology
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
+from yt.data_objects.static_output import \
+    Dataset, \
+    ParticleFile
+import yt.utilities.fortran_utils as fpu
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity
+    
+class HaloCatalogHDF5File(ParticleFile):
+    def __init__(self, ds, io, filename, file_id):
+        with h5py.File(filename, "r") as f:
+            self.header = dict((field, f.attrs[field]) \
+                               for field in f.attrs.keys())
+
+        super(HaloCatalogHDF5File, self).__init__(ds, io, filename, file_id)
+    
+class HaloCatalogDataset(Dataset):
+    _index_class = ParticleIndex
+    _file_class = HaloCatalogHDF5File
+    _field_info_class = HaloCatalogFieldInfo
+    _suffix = ".h5"
+
+    def __init__(self, filename, dataset_type="halocatalog_hdf5",
+                 n_ref = 16, over_refine_factor = 1, units_override=None):
+        self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
+        super(HaloCatalogDataset, self).__init__(filename, dataset_type,
+                                                 units_override=units_override)
+
+    def _parse_parameter_file(self):
+        with h5py.File(self.parameter_filename, "r") as f:
+            hvals = dict((key, f.attrs[key]) for key in f.attrs.keys())
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        prefix = ".".join(self.parameter_filename.rsplit(".", 2)[:-2])
+        self.filename_template = "%s.%%(num)s%s" % (prefix, self._suffix)
+        self.file_count = len(glob.glob(prefix + "*" + self._suffix))
+
+        for attr in ["cosmological_simulation", "current_time", "current_redshift",
+                     "hubble_constant", "omega_matter", "omega_lambda",
+                     "domain_left_edge", "domain_right_edge"]:
+            setattr(self, attr, hvals[attr])
+        self.periodicity = (True, True, True)
+        self.particle_types = ("halos")
+        self.particle_types_raw = ("halos")
+
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
+        self.parameters.update(hvals)
+
+    def _set_code_unit_attributes(self):
+        self.length_unit = self.quan(1.0, "cm")
+        self.mass_unit = self.quan(1.0, "g")
+        self.velocity_unit = self.quan(1.0, "cm / s")
+        self.time_unit = self.quan(1.0, "s")
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if not args[0].endswith(".h5"): return False
+        with h5py.File(args[0], "r") as f:
+            if "data_type" in f.attrs and \
+              f.attrs["data_type"] == "halo_catalog":
+                return True
+        return False

diff -r f6241fd2f51e6c8cddb2f2c7ac58f7c7c67f5158 -r 1931fe20371c2878b90cefcc194f6c605fa845de yt/frontends/ytdata/fields.py
--- /dev/null
+++ b/yt/frontends/ytdata/fields.py
@@ -0,0 +1,48 @@
+"""
+HaloCatalog-specific fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.funcs import mylog
+from yt.fields.field_info_container import \
+    FieldInfoContainer
+from yt.units.yt_array import \
+    YTArray
+
+from yt.utilities.physical_constants import \
+    mh, \
+    mass_sun_cgs
+
+m_units = "g"
+p_units = "cm"
+v_units = "cm / s"
+r_units = "cm"
+
+class HaloCatalogFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+    )
+
+    known_particle_fields = (
+        ("particle_identifier", ("", [], None)),
+        ("particle_position_x", (p_units, [], None)),
+        ("particle_position_y", (p_units, [], None)),
+        ("particle_position_z", (p_units, [], None)),
+        ("particle_velocity_x", (v_units, [], None)),
+        ("particle_velocity_y", (v_units, [], None)),
+        ("particle_velocity_z", (v_units, [], None)),
+        ("particle_mass", (m_units, [], "Virial Mass")),
+        ("virial_radius", (r_units, [], "Virial Radius")),
+)

diff -r f6241fd2f51e6c8cddb2f2c7ac58f7c7c67f5158 -r 1931fe20371c2878b90cefcc194f6c605fa845de yt/frontends/ytdata/io.py
--- /dev/null
+++ b/yt/frontends/ytdata/io.py
@@ -0,0 +1,119 @@
+"""
+HaloCatalog data-file handling function
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+
+from yt.utilities.exceptions import *
+from yt.funcs import mylog
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+
+from yt.utilities.lib.geometry_utils import compute_morton
+
+from yt.geometry.oct_container import _ORDER_MAX
+
+class IOHandlerHaloCatalogHDF5(BaseIOHandler):
+    _dataset_type = "halocatalog_hdf5"
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        raise NotImplementedError
+
+    def _read_particle_coords(self, chunks, ptf):
+        # This will read chunks and yield the results.
+        chunks = list(chunks)
+        data_files = set([])
+        # Only support halo reading for now.
+        assert(len(ptf) == 1)
+        assert(list(ptf.keys())[0] == "halos")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files):
+            pcount = data_file.header['num_halos']
+            with h5py.File(data_file.filename, "r") as f:
+                x = f['particle_position_x'].value.astype("float64")
+                y = f['particle_position_y'].value.astype("float64")
+                z = f['particle_position_z'].value.astype("float64")
+                yield "halos", (x, y, z)
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        # Now we have all the sizes, and we can allocate
+        chunks = list(chunks)
+        data_files = set([])
+        # Only support halo reading for now.
+        assert(len(ptf) == 1)
+        assert(list(ptf.keys())[0] == "halos")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files):
+            pcount = data_file.header['num_halos']
+            with h5py.File(data_file.filename, "r") as f:
+                for ptype, field_list in sorted(ptf.items()):
+                    x = f['particle_position_x'].value.astype("float64")
+                    y = f['particle_position_y'].value.astype("float64")
+                    z = f['particle_position_z'].value.astype("float64")
+                    mask = selector.select_points(x, y, z, 0.0)
+                    del x, y, z
+                    if mask is None: continue
+                    for field in field_list:
+                        data = f[field][mask].astype("float64")
+                        yield (ptype, field), data
+
+    def _initialize_index(self, data_file, regions):
+        pcount = data_file.header["num_halos"]
+        morton = np.empty(pcount, dtype='uint64')
+        mylog.debug("Initializing index % 5i (% 7i particles)",
+                    data_file.file_id, pcount)
+        ind = 0
+        with h5py.File(data_file.filename, "r") as f:
+            if not f.keys(): return None
+            pos = np.empty((pcount, 3), dtype="float64")
+            pos = data_file.ds.arr(pos, "code_length")
+            dx = np.finfo(f['particle_position_x'].dtype).eps
+            dx = 2.0*self.ds.quan(dx, "code_length")
+            pos[:,0] = f["particle_position_x"].value
+            pos[:,1] = f["particle_position_y"].value
+            pos[:,2] = f["particle_position_z"].value
+            # These are 32 bit numbers, so we give a little lee-way.
+            # Otherwise, for big sets of particles, we often will bump into the
+            # domain edges.  This helps alleviate that.
+            np.clip(pos, self.ds.domain_left_edge + dx,
+                         self.ds.domain_right_edge - dx, pos)
+            if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \
+               np.any(pos.max(axis=0) > self.ds.domain_right_edge):
+                raise YTDomainOverflow(pos.min(axis=0),
+                                       pos.max(axis=0),
+                                       self.ds.domain_left_edge,
+                                       self.ds.domain_right_edge)
+            regions.add_data_file(pos, data_file.file_id)
+            morton[ind:ind+pos.shape[0]] = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.ds.domain_left_edge,
+                data_file.ds.domain_right_edge)
+        return morton
+
+    def _count_particles(self, data_file):
+        return {'halos': data_file.header['num_halos']}
+
+    def _identify_fields(self, data_file):
+        with h5py.File(data_file.filename, "r") as f:
+            fields = [("halos", field) for field in f]
+            units = dict([(("halos", field), 
+                           f[field].attrs["units"]) for field in f])
+        return fields, units

diff -r f6241fd2f51e6c8cddb2f2c7ac58f7c7c67f5158 -r 1931fe20371c2878b90cefcc194f6c605fa845de yt/frontends/ytdata/setup.py
--- /dev/null
+++ b/yt/frontends/ytdata/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('ytdata', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


https://bitbucket.org/yt_analysis/yt/commits/e1258d3d4499/
Changeset:   e1258d3d4499
Branch:      yt
User:        brittonsmith
Date:        2015-07-31 12:47:14+00:00
Summary:     Some initial renaming.
Affected #:  4 files

diff -r 1931fe20371c2878b90cefcc194f6c605fa845de -r e1258d3d44998670cb684be6e66abbc5509773e7 yt/frontends/ytdata/api.py
--- a/yt/frontends/ytdata/api.py
+++ b/yt/frontends/ytdata/api.py
@@ -14,14 +14,14 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-# from .data_structures import \
-#      ytDataDataset
+from .data_structures import \
+    YTDataDataset
 
-# from .io import \
-#      IOHandlerytDataHDF5
+from .io import \
+    IOHandlerYTDataHDF5
 
-# from .fields import \
-#      ytDataFieldInfo
+from .fields import \
+    YTDataFieldInfo
 
 from .utilties import \
     to_yt_dataset

diff -r 1931fe20371c2878b90cefcc194f6c605fa845de -r e1258d3d44998670cb684be6e66abbc5509773e7 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -1,5 +1,5 @@
 """
-Data structures for HaloCatalog frontend.
+Data structures for YTData frontend.
 
 
 
@@ -7,7 +7,7 @@
 """
 
 #-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
+# Copyright (c) 2015, yt Development Team.
 #
 # Distributed under the terms of the Modified BSD License.
 #
@@ -24,7 +24,7 @@
 import os
 
 from .fields import \
-    HaloCatalogFieldInfo
+    YTDataFieldInfo
 
 from yt.utilities.cosmology import Cosmology
 from yt.geometry.particle_geometry_handler import \
@@ -37,25 +37,25 @@
     YTArray, \
     YTQuantity
     
-class HaloCatalogHDF5File(ParticleFile):
+class YTDataHDF5File(ParticleFile):
     def __init__(self, ds, io, filename, file_id):
         with h5py.File(filename, "r") as f:
             self.header = dict((field, f.attrs[field]) \
                                for field in f.attrs.keys())
 
-        super(HaloCatalogHDF5File, self).__init__(ds, io, filename, file_id)
+        super(YTDataHDF5File, self).__init__(ds, io, filename, file_id)
     
-class HaloCatalogDataset(Dataset):
+class YTDataDataset(Dataset):
     _index_class = ParticleIndex
-    _file_class = HaloCatalogHDF5File
-    _field_info_class = HaloCatalogFieldInfo
+    _file_class = YTDataHDF5File
+    _field_info_class = YTDataFieldInfo
     _suffix = ".h5"
 
-    def __init__(self, filename, dataset_type="halocatalog_hdf5",
+    def __init__(self, filename, dataset_type="ytdata_hdf5",
                  n_ref = 16, over_refine_factor = 1, units_override=None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
-        super(HaloCatalogDataset, self).__init__(filename, dataset_type,
+        super(YTDataDataset, self).__init__(filename, dataset_type,
                                                  units_override=units_override)
 
     def _parse_parameter_file(self):

diff -r 1931fe20371c2878b90cefcc194f6c605fa845de -r e1258d3d44998670cb684be6e66abbc5509773e7 yt/frontends/ytdata/fields.py
--- a/yt/frontends/ytdata/fields.py
+++ b/yt/frontends/ytdata/fields.py
@@ -1,5 +1,5 @@
 """
-HaloCatalog-specific fields
+YTData-specific fields
 
 
 
@@ -7,7 +7,7 @@
 """
 
 #-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
+# Copyright (c) 2015, yt Development Team.
 #
 # Distributed under the terms of the Modified BSD License.
 #
@@ -22,27 +22,17 @@
 from yt.units.yt_array import \
     YTArray
 
-from yt.utilities.physical_constants import \
-    mh, \
-    mass_sun_cgs
-
 m_units = "g"
 p_units = "cm"
 v_units = "cm / s"
 r_units = "cm"
 
-class HaloCatalogFieldInfo(FieldInfoContainer):
+class YTDataFieldInfo(FieldInfoContainer):
     known_other_fields = (
     )
 
     known_particle_fields = (
-        ("particle_identifier", ("", [], None)),
         ("particle_position_x", (p_units, [], None)),
         ("particle_position_y", (p_units, [], None)),
         ("particle_position_z", (p_units, [], None)),
-        ("particle_velocity_x", (v_units, [], None)),
-        ("particle_velocity_y", (v_units, [], None)),
-        ("particle_velocity_z", (v_units, [], None)),
-        ("particle_mass", (m_units, [], "Virial Mass")),
-        ("virial_radius", (r_units, [], "Virial Radius")),
-)
+    )

diff -r 1931fe20371c2878b90cefcc194f6c605fa845de -r e1258d3d44998670cb684be6e66abbc5509773e7 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -1,5 +1,5 @@
 """
-HaloCatalog data-file handling function
+YTData data-file handling function
 
 
 
@@ -7,7 +7,7 @@
 """
 
 #-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
+# Copyright (c) 2015, yt Development Team.
 #
 # Distributed under the terms of the Modified BSD License.
 #
@@ -18,17 +18,20 @@
 import numpy as np
 
 from yt.utilities.exceptions import *
-from yt.funcs import mylog
+from yt.funcs import \
+    mylog
 
 from yt.utilities.io_handler import \
     BaseIOHandler
 
-from yt.utilities.lib.geometry_utils import compute_morton
+from yt.utilities.lib.geometry_utils import \
+    compute_morton
 
-from yt.geometry.oct_container import _ORDER_MAX
+from yt.geometry.oct_container import \
+    _ORDER_MAX
 
-class IOHandlerHaloCatalogHDF5(BaseIOHandler):
-    _dataset_type = "halocatalog_hdf5"
+class IOHandlerYTDataHDF5(BaseIOHandler):
+    _dataset_type = "ytdata_hdf5"
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         raise NotImplementedError


https://bitbucket.org/yt_analysis/yt/commits/611180793030/
Changeset:   611180793030
Branch:      yt
User:        brittonsmith
Date:        2015-07-31 13:02:32+00:00
Summary:     Renaming file with correct spelling.
Affected #:  2 files

diff -r e1258d3d44998670cb684be6e66abbc5509773e7 -r 611180793030955e568fe69b53ec952c16242911 yt/frontends/ytdata/utilities.py
--- /dev/null
+++ b/yt/frontends/ytdata/utilities.py
@@ -0,0 +1,171 @@
+"""
+Utility functions for ytdata frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+
+from yt.funcs import \
+    mylog
+from yt.units.yt_array import \
+    YTArray
+
+def to_yt_dataset(ds, filename, data, extra_attrs=None):
+    r"""Export a set of field arrays to a reloadable yt dataset.
+
+    This function can be used to create a yt loadable dataset from a 
+    set of arrays.  The field arrays can either be associated with a 
+    loaded dataset or, if not, a dictionary of dataset attributes can
+    be provided that will be used as metadata for the new dataset.  The 
+    resulting dataset can be reloaded as a yt dataset.
+
+    Parameters
+    ----------
+    ds : dataset
+        The dataset associated with the fields.  
+    filename : str
+        The name of the file to be written.
+    data : dict
+        A dictionary of field arrays to be saved.
+    extra_attrs: dict
+        A dictionary of additional attributes to be saved.
+
+    Returns
+    -------
+    filename : str
+        The name of the file that has been created.
+
+    Examples
+    --------
+
+    >>> import yt
+    >>> from yt.frontends.ytdata.api import to_yt_dataset
+    >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+    >>> sphere = ds.sphere([0.5]*3, (10, "Mpc")
+    >>> sphere_density = sphere["density"]
+    >>> region = ds.box([0.]*3, [0.25]*3)
+    >>> region_density = region["density"]
+    >>> data = {}
+    >>> data["sphere_density"] = sphere_density
+    >>> data["region_density"] = region_density
+    >>> to_yt_dataset(ds, "density_data.h5", data)
+
+    >>> import yt
+    >>> from yt.frontends.ytdata.api import to_yt_dataset
+    >>> from yt.units.yt_array import YTArray, YTQuantity
+    >>> data = {"density": YTArray(np.random.random(10), "g/cm**3"),
+    ...         "temperature": YTArray(np.random.random(10), "K")}
+    >>> ds_data = {"domain_left_edge": YTArray(np.zeros(3), "cm"),
+    ...            "domain_right_edge": YTArray(np.ones(3), "cm"),
+    ...            "current_time": YTQuantity(10, "Myr")}
+    >>> to_yt_dataset(ds_data, "random_data.h5", data)
+    
+    """
+
+    mylog.info("Saving field data to yt dataset: %s." % filename)
+
+    if extra_attrs is None: extra_attrs = {}
+    base_attrs  = ["domain_left_edge", "domain_right_edge",
+                   "current_redshift", "current_time",
+                   "domain_dimensions", "periodicity",
+                   "cosmological_simulation", "omega_lambda",
+                   "omega_matter", "hubble_constant"]
+
+    fh = h5py.File(filename, "w")
+    for attr in base_attrs:
+        if isinstance(ds, dict):
+            my_val = ds.get(attr, None)
+        else:
+            my_val = getattr(ds, attr, None)
+        if my_val is None:
+            mylog.warn("Skipping %s attribute, this may be just fine." % attr)
+            continue
+        if hasattr(my_val, "units"):
+            my_val = my_val.in_cgs()
+        fh.attrs[attr] = my_val
+    for attr in extra_attrs:
+        my_val = extra_attrs[attr]
+        if hasattr(my_val, "units"):
+            my_val = my_val.in_cgs()
+        fh.attrs[attr] = my_val
+    if "data_type" not in extra_attrs:
+        fh.attrs["data_type"] = "yt_array_data"
+    for field in data:
+        # for now, let's avoid writing "code" units
+        if hasattr(field, "units"):
+            data[field].convert_to_cgs()
+        dataset = _yt_array_hdf5(fh, field, data[field])
+    fh.close()
+
+def _hdf5_yt_array(fh, field, ds=None):
+    r"""Load an hdf5 dataset as a YTArray.
+
+    Reads in a dataset from an open hdf5 file or group and uses the
+    "units" attribute, if it exists, to apply units.
+    
+    Parameters
+    ----------
+    fh : an open hdf5 file or hdf5 group
+        The hdf5 file or group in which the dataset exists.
+    field : str
+        The name of the field to be loaded.
+    ds : yt Dataset
+        If not None, the unit_registry of the dataset
+        is used to apply units.
+
+    Returns
+    -------
+    A YTArray of the requested field.
+    
+    """
+    
+    if ds is None:
+        new_arr = YTArray
+    else:
+        new_arr = ds.arr
+    units = ""
+    if "units" in fh[field].attrs:
+        units = fh[field].attrs["units"]
+    if units == "dimensionless": units = ""
+    return new_arr(fh[field].value, units)
+
+def _yt_array_hdf5(fh, field, data):
+    r"""Save a YTArray to an open hdf5 file or group.
+
+    Save a YTArray to an open hdf5 file or group, and save the 
+    units to a "units" attribute.
+    
+    Parameters
+    ----------
+    fh : an open hdf5 file or hdf5 group
+        The hdf5 file or group to which the data will be written.
+    field : str
+        The name of the field to be saved.
+    ddata : YTArray
+        The data array to be saved.
+
+    Returns
+    -------
+    dataset : hdf5 dataset
+        The created hdf5 dataset.
+    
+    """
+
+    dataset = fh.create_dataset(field, data=data)
+    units = ""
+    if isinstance(data, YTArray):
+        units = str(data.units)
+    dataset.attrs["units"] = units
+    return dataset

diff -r e1258d3d44998670cb684be6e66abbc5509773e7 -r 611180793030955e568fe69b53ec952c16242911 yt/frontends/ytdata/utilties.py
--- a/yt/frontends/ytdata/utilties.py
+++ /dev/null
@@ -1,171 +0,0 @@
-"""
-Utility functions for ytdata frontend.
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2015, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py
-import numpy as np
-
-from yt.funcs import \
-    mylog
-from yt.units.yt_array import \
-    YTArray
-
-def to_yt_dataset(ds, filename, data, extra_attrs=None):
-    r"""Export a set of field arrays to a reloadable yt dataset.
-
-    This function can be used to create a yt loadable dataset from a 
-    set of arrays.  The field arrays can either be associated with a 
-    loaded dataset or, if not, a dictionary of dataset attributes can
-    be provided that will be used as metadata for the new dataset.  The 
-    resulting dataset can be reloaded as a yt dataset.
-
-    Parameters
-    ----------
-    ds : dataset
-        The dataset associated with the fields.  
-    filename : str
-        The name of the file to be written.
-    data : dict
-        A dictionary of field arrays to be saved.
-    extra_attrs: dict
-        A dictionary of additional attributes to be saved.
-
-    Returns
-    -------
-    filename : str
-        The name of the file that has been created.
-
-    Examples
-    --------
-
-    >>> import yt
-    >>> from yt.frontends.ytdata.api import to_yt_dataset
-    >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
-    >>> sphere = ds.sphere([0.5]*3, (10, "Mpc")
-    >>> sphere_density = sphere["density"]
-    >>> region = ds.box([0.]*3, [0.25]*3)
-    >>> region_density = region["density"]
-    >>> data = {}
-    >>> data["sphere_density"] = sphere_density
-    >>> data["region_density"] = region_density
-    >>> to_yt_dataset(ds, "density_data.h5", data)
-
-    >>> import yt
-    >>> from yt.frontends.ytdata.api import to_yt_dataset
-    >>> from yt.units.yt_array import YTArray, YTQuantity
-    >>> data = {"density": YTArray(np.random.random(10), "g/cm**3"),
-    ...         "temperature": YTArray(np.random.random(10), "K")}
-    >>> ds_data = {"domain_left_edge": YTArray(np.zeros(3), "cm"),
-    ...            "domain_right_edge": YTArray(np.ones(3), "cm"),
-    ...            "current_time": YTQuantity(10, "Myr")}
-    >>> to_yt_dataset(ds_data, "random_data.h5", data)
-    
-    """
-
-    mylog.info("Saving field data to yt dataset: %s." % filename)
-
-    if extra_attrs is None: extra_attrs = {}
-    base_attrs  = ["domain_left_edge", "domain_right_edge",
-                   "current_redshift", "current_time",
-                   "domain_dimensions", "periodicity",
-                   "cosmological_simulation", "omega_lambda",
-                   "omega_matter", "hubble_constant"]
-
-    fh = h5py.File(filename, "w")
-    for attr in base_attrs:
-        if isinstance(ds, dict):
-            my_val = ds.get(attr, None)
-        else:
-            my_val = getattr(ds, attr, None)
-        if my_val is None:
-            mylog.warn("Skipping %s attribute, this may be just fine." % attr)
-            continue
-        if hasattr(my_val, "units"):
-            my_val = my_val.in_cgs()
-        fh.attrs[attr] = my_val
-    for attr in extra_attrs:
-        my_val = extra_attrs[attr]
-        if hasattr(my_val, "units"):
-            my_val = my_val.in_cgs()
-        fh.attrs[attr] = my_val
-    if "data_type" not in extra_attrs:
-        fh.attrs["data_type"] = "yt_array_data"
-    for field in data:
-        # for now, let's avoid writing "code" units
-        if hasattr(field, "units"):
-            data[field].convert_to_cgs()
-        dataset = _yt_array_hdf5(fh, field, data[field])
-    fh.close()
-
-def _hdf5_yt_array(fh, field, ds=None):
-    r"""Load an hdf5 dataset as a YTArray.
-
-    Reads in a dataset from an open hdf5 file or group and uses the
-    "units" attribute, if it exists, to apply units.
-    
-    Parameters
-    ----------
-    fh : an open hdf5 file or hdf5 group
-        The hdf5 file or group in which the dataset exists.
-    field : str
-        The name of the field to be loaded.
-    ds : yt Dataset
-        If not None, the unit_registry of the dataset
-        is used to apply units.
-
-    Returns
-    -------
-    A YTArray of the requested field.
-    
-    """
-    
-    if ds is None:
-        new_arr = YTArray
-    else:
-        new_arr = ds.arr
-    units = ""
-    if "units" in fh[field].attrs:
-        units = fh[field].attrs["units"]
-    if units == "dimensionless": units = ""
-    return new_arr(fh[field].value, units)
-
-def _yt_array_hdf5(fh, field, data):
-    r"""Save a YTArray to an open hdf5 file or group.
-
-    Save a YTArray to an open hdf5 file or group, and save the 
-    units to a "units" attribute.
-    
-    Parameters
-    ----------
-    fh : an open hdf5 file or hdf5 group
-        The hdf5 file or group to which the data will be written.
-    field : str
-        The name of the field to be saved.
-    ddata : YTArray
-        The data array to be saved.
-
-    Returns
-    -------
-    dataset : hdf5 dataset
-        The created hdf5 dataset.
-    
-    """
-
-    dataset = fh.create_dataset(field, data=data)
-    units = ""
-    if isinstance(data, YTArray):
-        units = str(data.units)
-    dataset.attrs["units"] = units
-    return dataset


https://bitbucket.org/yt_analysis/yt/commits/5de304a6a01f/
Changeset:   5de304a6a01f
Branch:      yt
User:        brittonsmith
Date:        2015-07-31 13:02:56+00:00
Summary:     Cleaning up some imports and such.
Affected #:  4 files

diff -r 611180793030955e568fe69b53ec952c16242911 -r 5de304a6a01f61563962d4d702e474343735360f yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -31,7 +31,7 @@
 from yt.funcs import *
 
 from yt.data_objects.particle_io import particle_handler_registry
-from yt.frontends.ytdata.api import \
+from yt.frontends.ytdata.utilities import \
     to_yt_dataset
 from yt.units.unit_object import UnitParseError
 from yt.utilities.exceptions import \

diff -r 611180793030955e568fe69b53ec952c16242911 -r 5de304a6a01f61563962d4d702e474343735360f yt/frontends/ytdata/api.py
--- a/yt/frontends/ytdata/api.py
+++ b/yt/frontends/ytdata/api.py
@@ -23,5 +23,5 @@
 from .fields import \
     YTDataFieldInfo
 
-from .utilties import \
+from .utilities import \
     to_yt_dataset

diff -r 611180793030955e568fe69b53ec952c16242911 -r 5de304a6a01f61563962d4d702e474343735360f yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -89,9 +89,11 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
+        import pdb ; pdb.set_trace()
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
             if "data_type" in f.attrs and \
-              f.attrs["data_type"] == "halo_catalog":
+              f.attrs["data_type"] in ["yt_array_data",
+                                       "yt_data_container"]:
                 return True
         return False

diff -r 611180793030955e568fe69b53ec952c16242911 -r 5de304a6a01f61563962d4d702e474343735360f yt/frontends/ytdata/fields.py
--- a/yt/frontends/ytdata/fields.py
+++ b/yt/frontends/ytdata/fields.py
@@ -19,8 +19,6 @@
 from yt.funcs import mylog
 from yt.fields.field_info_container import \
     FieldInfoContainer
-from yt.units.yt_array import \
-    YTArray
 
 m_units = "g"
 p_units = "cm"


https://bitbucket.org/yt_analysis/yt/commits/6fa1cef7900f/
Changeset:   6fa1cef7900f
Branch:      yt
User:        brittonsmith
Date:        2015-07-31 13:04:42+00:00
Summary:     Adding to list of frontends.
Affected #:  2 files

diff -r 5de304a6a01f61563962d4d702e474343735360f -r 6fa1cef7900ff2161705435c2088b9f230747063 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -39,6 +39,7 @@
     'sdf',
     'stream',
     'tipsy',
+    'ytdata',
 ]
 
 class _frontend_container:

diff -r 5de304a6a01f61563962d4d702e474343735360f -r 6fa1cef7900ff2161705435c2088b9f230747063 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -89,7 +89,6 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        import pdb ; pdb.set_trace()
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
             if "data_type" in f.attrs and \


https://bitbucket.org/yt_analysis/yt/commits/e592428b8e4e/
Changeset:   e592428b8e4e
Branch:      yt
User:        brittonsmith
Date:        2015-08-01 12:29:12+00:00
Summary:     Write out all relevant position fields.
Affected #:  2 files

diff -r 6fa1cef7900ff2161705435c2088b9f230747063 -r e592428b8e4e760ac24ea05db1b09456c06b1ce7 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -508,15 +508,47 @@
 
         data = {}
         if fields is not None:
-            for f in fields:
+            for f in self._determine_fields(fields):
                 data[f] = self[f]
         else:
             data.update(self.field_data)
+        data_fields = data.keys()
+
+        need_grid_fields = False
+        need_particle_fields = False
+        ptypes = []
+        ftypes = {}
+        for field in data_fields:
+            if self.ds.field_info[field].particle_type:
+                if field[0] not in ptypes:
+                    ptypes.append(field[0])
+                ftypes[field] = field[0]
+                need_particle_fields = True
+            else:
+                ftypes[field] = "grid"
+                need_grid_fields = True
+
+        for ax in "xyz":
+            if need_particle_fields:
+                for ptype in ptypes:
+                    p_field = (ptype, "particle_position_%s" % ax)
+                    if p_field in self.ds.field_info and p_field not in data:
+                        data_fields.append(field)
+                        ftypes[p_field] = p_field[0]
+                        data[p_field] = self[p_field]
+            if need_grid_fields:
+                g_field = ("index", ax)
+                if g_field in self.ds.field_info and g_field not in data:
+                    data_fields.append(g_field)
+                    ftypes[g_field] = "grid"
+                    data[g_field] = self[g_field]
+                    
         extra_attrs = dict([(arg, getattr(self, arg, None))
                             for arg in self._con_args])
         extra_attrs["data_type"] = "yt_data_container"
         extra_attrs["container_type"] = self._type_name
-        to_yt_dataset(self.ds, filename, data, extra_attrs=extra_attrs)
+        to_yt_dataset(self.ds, filename, data, field_types=ftypes,
+                      extra_attrs=extra_attrs)
 
         return filename
         

diff -r 6fa1cef7900ff2161705435c2088b9f230747063 -r e592428b8e4e760ac24ea05db1b09456c06b1ce7 yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -22,7 +22,8 @@
 from yt.units.yt_array import \
     YTArray
 
-def to_yt_dataset(ds, filename, data, extra_attrs=None):
+def to_yt_dataset(ds, filename, data, field_types=None,
+                  extra_attrs=None):
     r"""Export a set of field arrays to a reloadable yt dataset.
 
     This function can be used to create a yt loadable dataset from a 
@@ -103,10 +104,17 @@
     if "data_type" not in extra_attrs:
         fh.attrs["data_type"] = "yt_array_data"
     for field in data:
+        if field_types is None:
+            field_type = "data"
+        else:
+            field_type = field_types[field]
+        if field_type not in fh:
+            fh.create_group(field_type)
+        
         # for now, let's avoid writing "code" units
         if hasattr(field, "units"):
             data[field].convert_to_cgs()
-        dataset = _yt_array_hdf5(fh, field, data[field])
+        dataset = _yt_array_hdf5(fh[field_type], field, data[field])
     fh.close()
 
 def _hdf5_yt_array(fh, field, ds=None):
@@ -163,7 +171,7 @@
     
     """
 
-    dataset = fh.create_dataset(field, data=data)
+    dataset = fh.create_dataset(str(field), data=data)
     units = ""
     if isinstance(data, YTArray):
         units = str(data.units)


https://bitbucket.org/yt_analysis/yt/commits/4447c7544559/
Changeset:   4447c7544559
Branch:      yt
User:        brittonsmith
Date:        2015-08-05 11:28:07+00:00
Summary:     Adding support for grid fields.
Affected #:  2 files

diff -r e592428b8e4e760ac24ea05db1b09456c06b1ce7 -r 4447c754455921f9c26d9258fd65c80d8a97e7c6 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -74,8 +74,8 @@
                      "domain_left_edge", "domain_right_edge"]:
             setattr(self, attr, hvals[attr])
         self.periodicity = (True, True, True)
-        self.particle_types = ("halos")
-        self.particle_types_raw = ("halos")
+        self.particle_types = ("grid")
+        self.particle_types_raw = ("grid")
 
         nz = 1 << self.over_refine_factor
         self.domain_dimensions = np.ones(3, "int32") * nz
@@ -92,7 +92,8 @@
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
             if "data_type" in f.attrs and \
-              f.attrs["data_type"] in ["yt_array_data",
+              f.attrs["data_type"] in ["light_ray",
+                                       "yt_array_data",
                                        "yt_data_container"]:
                 return True
         return False

diff -r e592428b8e4e760ac24ea05db1b09456c06b1ce7 -r 4447c754455921f9c26d9258fd65c80d8a97e7c6 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -112,11 +112,12 @@
         return morton
 
     def _count_particles(self, data_file):
-        return {'halos': data_file.header['num_halos']}
+        with h5py.File(data_file.filename, "r") as f:
+            return {"grid": f["grid"].attrs["num_elements"]}
 
     def _identify_fields(self, data_file):
         with h5py.File(data_file.filename, "r") as f:
-            fields = [("halos", field) for field in f]
-            units = dict([(("halos", field), 
-                           f[field].attrs["units"]) for field in f])
+            fields = [("grid", field) for field in f["grid"]]
+            units = dict([(("grid", field), 
+                           f["grid"][field].attrs["units"]) for field in f["grid"]])
         return fields, units


https://bitbucket.org/yt_analysis/yt/commits/fc3ac2b9fa85/
Changeset:   fc3ac2b9fa85
Branch:      yt
User:        brittonsmith
Date:        2015-08-05 11:34:01+00:00
Summary:     Almost got grid fields.
Affected #:  1 file

diff -r 4447c754455921f9c26d9258fd65c80d8a97e7c6 -r fc3ac2b9fa85b2b2bf35616f2c65be45f6a4cea3 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -42,17 +42,17 @@
         data_files = set([])
         # Only support halo reading for now.
         assert(len(ptf) == 1)
-        assert(list(ptf.keys())[0] == "halos")
+        assert(list(ptf.keys())[0] == "grid")
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
         for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with h5py.File(data_file.filename, "r") as f:
-                x = f['particle_position_x'].value.astype("float64")
-                y = f['particle_position_y'].value.astype("float64")
-                z = f['particle_position_z'].value.astype("float64")
-                yield "halos", (x, y, z)
+                x = f["grid"]['x'].value.astype("float64")
+                y = f["grid"]['y'].value.astype("float64")
+                z = f["grid"]['z'].value.astype("float64")
+                yield "grid", (x, y, z)
 
     def _read_particle_fields(self, chunks, ptf, selector):
         # Now we have all the sizes, and we can allocate
@@ -60,17 +60,18 @@
         data_files = set([])
         # Only support halo reading for now.
         assert(len(ptf) == 1)
-        assert(list(ptf.keys())[0] == "halos")
+        assert(list(ptf.keys())[0] == "grid")
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
         for data_file in sorted(data_files):
-            pcount = data_file.header['num_halos']
+            all_count = self._count_particles(data_file)
+            pcount = all_count["grid"]
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):
-                    x = f['particle_position_x'].value.astype("float64")
-                    y = f['particle_position_y'].value.astype("float64")
-                    z = f['particle_position_z'].value.astype("float64")
+                    x = f["grid"]['x'].value.astype("float64")
+                    y = f["grid"]['y'].value.astype("float64")
+                    z = f["grid"]['z'].value.astype("float64")
                     mask = selector.select_points(x, y, z, 0.0)
                     del x, y, z
                     if mask is None: continue
@@ -79,20 +80,21 @@
                         yield (ptype, field), data
 
     def _initialize_index(self, data_file, regions):
-        pcount = data_file.header["num_halos"]
+        all_count = self._count_particles(data_file)
+        pcount = all_count["grid"]
         morton = np.empty(pcount, dtype='uint64')
         mylog.debug("Initializing index % 5i (% 7i particles)",
                     data_file.file_id, pcount)
         ind = 0
         with h5py.File(data_file.filename, "r") as f:
-            if not f.keys(): return None
+            if not f["grid"].keys(): return None
             pos = np.empty((pcount, 3), dtype="float64")
             pos = data_file.ds.arr(pos, "code_length")
-            dx = np.finfo(f['particle_position_x'].dtype).eps
+            dx = np.finfo(f["grid"]['x'].dtype).eps
             dx = 2.0*self.ds.quan(dx, "code_length")
-            pos[:,0] = f["particle_position_x"].value
-            pos[:,1] = f["particle_position_y"].value
-            pos[:,2] = f["particle_position_z"].value
+            pos[:,0] = f["grid"]["x"].value
+            pos[:,1] = f["grid"]["y"].value
+            pos[:,2] = f["grid"]["z"].value
             # These are 32 bit numbers, so we give a little lee-way.
             # Otherwise, for big sets of particles, we often will bump into the
             # domain edges.  This helps alleviate that.


https://bitbucket.org/yt_analysis/yt/commits/f3110510ef2d/
Changeset:   f3110510ef2d
Branch:      yt
User:        brittonsmith
Date:        2015-08-05 11:36:31+00:00
Summary:     Missed a pcount.
Affected #:  1 file

diff -r fc3ac2b9fa85b2b2bf35616f2c65be45f6a4cea3 -r f3110510ef2dfabae96f7079f35c0c369155d746 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -47,7 +47,8 @@
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
         for data_file in sorted(data_files):
-            pcount = data_file.header['num_halos']
+            all_count = self._count_particles(data_file)
+            pcount = all_count["grid"]
             with h5py.File(data_file.filename, "r") as f:
                 x = f["grid"]['x'].value.astype("float64")
                 y = f["grid"]['y'].value.astype("float64")


https://bitbucket.org/yt_analysis/yt/commits/2c6d9e6ebffb/
Changeset:   2c6d9e6ebffb
Branch:      yt
User:        brittonsmith
Date:        2015-08-05 11:38:27+00:00
Summary:     Fixing filename template.
Affected #:  1 file

diff -r f3110510ef2dfabae96f7079f35c0c369155d746 -r 2c6d9e6ebffbe09192c6cc4eacd37151ae6a1a4f yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -66,8 +66,8 @@
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
         prefix = ".".join(self.parameter_filename.rsplit(".", 2)[:-2])
-        self.filename_template = "%s.%%(num)s%s" % (prefix, self._suffix)
-        self.file_count = len(glob.glob(prefix + "*" + self._suffix))
+        self.filename_template = self.parameter_filename
+        self.file_count = 1
 
         for attr in ["cosmological_simulation", "current_time", "current_redshift",
                      "hubble_constant", "omega_matter", "omega_lambda",


https://bitbucket.org/yt_analysis/yt/commits/39f92435f532/
Changeset:   39f92435f532
Branch:      yt
User:        brittonsmith
Date:        2015-08-05 11:40:16+00:00
Summary:     Use a string.
Affected #:  1 file

diff -r 2c6d9e6ebffbe09192c6cc4eacd37151ae6a1a4f -r 39f92435f532ef0e82ec90c739c97f08b45bfcb1 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -120,7 +120,7 @@
 
     def _identify_fields(self, data_file):
         with h5py.File(data_file.filename, "r") as f:
-            fields = [("grid", field) for field in f["grid"]]
-            units = dict([(("grid", field), 
+            fields = [("grid", str(field)) for field in f["grid"]]
+            units = dict([(("grid", str(field)), 
                            f["grid"][field].attrs["units"]) for field in f["grid"]])
         return fields, units


https://bitbucket.org/yt_analysis/yt/commits/983689a4cff8/
Changeset:   983689a4cff8
Branch:      yt
User:        brittonsmith
Date:        2015-08-05 12:32:53+00:00
Summary:     Make sure to access grid group.
Affected #:  1 file

diff -r 39f92435f532ef0e82ec90c739c97f08b45bfcb1 -r 983689a4cff861c6846f283f90e8a296662d667b yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -77,7 +77,7 @@
                     del x, y, z
                     if mask is None: continue
                     for field in field_list:
-                        data = f[field][mask].astype("float64")
+                        data = f["grid"][field][mask].astype("float64")
                         yield (ptype, field), data
 
     def _initialize_index(self, data_file, regions):


https://bitbucket.org/yt_analysis/yt/commits/dd702572dfef/
Changeset:   dd702572dfef
Branch:      yt
User:        devinsilvia
Date:        2015-07-22 19:10:34+00:00
Summary:     Trying to add some attributes to light ray objects so that they can be read back in as YT objects.

This will allow for simpler addition of new fields for light ray objects.
Affected #:  1 file

diff -r c12349973350b9c9b2f4c60af9a7bc6b90ea42e2 -r dd702572dfef60055d5473052c22f4fe252e3379 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -494,6 +494,9 @@
 
         mylog.info("Saving light ray data to %s." % filename)
         output = h5py.File(filename, 'w')
+        for attr in ["omega_lambda", "omega_matter", "hubble_constant"]:
+            output.attrs[attr] = getattr(self.cosmology, attr)
+        output.attrs["data_type"] = "light_ray"
         for field in data.keys():
             # if the field is a tuple, only use the second part of the tuple
             # in the hdf5 output (i.e. ('gas', 'density') -> 'density')


https://bitbucket.org/yt_analysis/yt/commits/613c3490f3ff/
Changeset:   613c3490f3ff
Branch:      yt
User:        devinsilvia
Date:        2015-07-24 19:30:18+00:00
Summary:     Merged yt_analysis/yt into yt
Affected #:  142 files

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -11,6 +11,7 @@
 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/analysis_modules/ppv_cube/ppv_utils.c
 yt/frontends/ramses/_ramses_reader.cpp
+yt/frontends/sph/smoothing_kernel.c
 yt/geometry/fake_octree.c
 yt/geometry/grid_container.c
 yt/geometry/grid_visitors.c
@@ -40,6 +41,7 @@
 yt/utilities/lib/mesh_utilities.c
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/Octree.c
+yt/utilities/lib/GridTree.c
 yt/utilities/lib/origami.c
 yt/utilities/lib/pixelization_routines.c
 yt/utilities/lib/png_writer.c
@@ -62,3 +64,4 @@
 doc/source/reference/api/generated/*
 doc/_temp/*
 doc/source/bootcamp/.ipynb_checkpoints/
+dist

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 .python-version
--- /dev/null
+++ b/.python-version
@@ -0,0 +1,1 @@
+2.7.9

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 README
--- a/README
+++ b/README
@@ -21,3 +21,4 @@
 ways to help development, please visit our website.
 
 Enjoy!
+

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 distribute_setup.py
--- a/distribute_setup.py
+++ /dev/null
@@ -1,541 +0,0 @@
-#!python
-"""Bootstrap distribute installation
-
-If you want to use setuptools in your package's setup.py, just include this
-file in the same directory with it, and add this to the top of your setup.py::
-
-    from distribute_setup import use_setuptools
-    use_setuptools()
-
-If you want to require a specific version of setuptools, set a download
-mirror, or use an alternate download directory, you can do so by supplying
-the appropriate options to ``use_setuptools()``.
-
-This file can also be run as a script to install or upgrade setuptools.
-"""
-import os
-import shutil
-import sys
-import time
-import fnmatch
-import tempfile
-import tarfile
-import optparse
-
-from distutils import log
-
-try:
-    from site import USER_SITE
-except ImportError:
-    USER_SITE = None
-
-try:
-    import subprocess
-
-    def _python_cmd(*args):
-        args = (sys.executable,) + args
-        return subprocess.call(args) == 0
-
-except ImportError:
-    # will be used for python 2.3
-    def _python_cmd(*args):
-        args = (sys.executable,) + args
-        # quoting arguments if windows
-        if sys.platform == 'win32':
-            def quote(arg):
-                if ' ' in arg:
-                    return '"%s"' % arg
-                return arg
-            args = [quote(arg) for arg in args]
-        return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
-
-DEFAULT_VERSION = "0.6.32"
-DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
-SETUPTOOLS_FAKED_VERSION = "0.6c11"
-
-SETUPTOOLS_PKG_INFO = """\
-Metadata-Version: 1.0
-Name: setuptools
-Version: %s
-Summary: xxxx
-Home-page: xxx
-Author: xxx
-Author-email: xxx
-License: xxx
-Description: xxx
-""" % SETUPTOOLS_FAKED_VERSION
-
-
-def _install(tarball, install_args=()):
-    # extracting the tarball
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        tar = tarfile.open(tarball)
-        _extractall(tar)
-        tar.close()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-
-        # installing
-        log.warn('Installing Distribute')
-        if not _python_cmd('setup.py', 'install', *install_args):
-            log.warn('Something went wrong during the installation.')
-            log.warn('See the error message above.')
-            # exitcode will be 2
-            return 2
-    finally:
-        os.chdir(old_wd)
-        shutil.rmtree(tmpdir)
-
-
-def _build_egg(egg, tarball, to_dir):
-    # extracting the tarball
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        tar = tarfile.open(tarball)
-        _extractall(tar)
-        tar.close()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-
-        # building an egg
-        log.warn('Building a Distribute egg in %s', to_dir)
-        _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
-
-    finally:
-        os.chdir(old_wd)
-        shutil.rmtree(tmpdir)
-    # returning the result
-    log.warn(egg)
-    if not os.path.exists(egg):
-        raise IOError('Could not build the egg.')
-
-
-def _do_download(version, download_base, to_dir, download_delay):
-    egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
-                       % (version, sys.version_info[0], sys.version_info[1]))
-    if not os.path.exists(egg):
-        tarball = download_setuptools(version, download_base,
-                                      to_dir, download_delay)
-        _build_egg(egg, tarball, to_dir)
-    sys.path.insert(0, egg)
-    import setuptools
-    setuptools.bootstrap_install_from = egg
-
-
-def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-                   to_dir=os.curdir, download_delay=15, no_fake=True):
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    was_imported = 'pkg_resources' in sys.modules or \
-        'setuptools' in sys.modules
-    try:
-        try:
-            import pkg_resources
-            if not hasattr(pkg_resources, '_distribute'):
-                if not no_fake:
-                    _fake_setuptools()
-                raise ImportError
-        except ImportError:
-            return _do_download(version, download_base, to_dir, download_delay)
-        try:
-            pkg_resources.require("distribute>=" + version)
-            return
-        except pkg_resources.VersionConflict:
-            e = sys.exc_info()[1]
-            if was_imported:
-                sys.stderr.write(
-                "The required version of distribute (>=%s) is not available,\n"
-                "and can't be installed while this script is running. Please\n"
-                "install a more recent version first, using\n"
-                "'easy_install -U distribute'."
-                "\n\n(Currently using %r)\n" % (version, e.args[0]))
-                sys.exit(2)
-            else:
-                del pkg_resources, sys.modules['pkg_resources']    # reload ok
-                return _do_download(version, download_base, to_dir,
-                                    download_delay)
-        except pkg_resources.DistributionNotFound:
-            return _do_download(version, download_base, to_dir,
-                                download_delay)
-    finally:
-        if not no_fake:
-            _create_fake_setuptools_pkg_info(to_dir)
-
-
-def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-                        to_dir=os.curdir, delay=15):
-    """Download distribute from a specified location and return its filename
-
-    `version` should be a valid distribute version number that is available
-    as an egg for download under the `download_base` URL (which should end
-    with a '/'). `to_dir` is the directory where the egg will be downloaded.
-    `delay` is the number of seconds to pause before an actual download
-    attempt.
-    """
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    try:
-        from urllib.request import urlopen
-    except ImportError:
-        from urllib2 import urlopen
-    tgz_name = "distribute-%s.tar.gz" % version
-    url = download_base + tgz_name
-    saveto = os.path.join(to_dir, tgz_name)
-    src = dst = None
-    if not os.path.exists(saveto):  # Avoid repeated downloads
-        try:
-            log.warn("Downloading %s", url)
-            src = urlopen(url)
-            # Read/write all in one block, so we don't create a corrupt file
-            # if the download is interrupted.
-            data = src.read()
-            dst = open(saveto, "wb")
-            dst.write(data)
-        finally:
-            if src:
-                src.close()
-            if dst:
-                dst.close()
-    return os.path.realpath(saveto)
-
-
-def _no_sandbox(function):
-    def __no_sandbox(*args, **kw):
-        try:
-            from setuptools.sandbox import DirectorySandbox
-            if not hasattr(DirectorySandbox, '_old'):
-                def violation(*args):
-                    pass
-                DirectorySandbox._old = DirectorySandbox._violation
-                DirectorySandbox._violation = violation
-                patched = True
-            else:
-                patched = False
-        except ImportError:
-            patched = False
-
-        try:
-            return function(*args, **kw)
-        finally:
-            if patched:
-                DirectorySandbox._violation = DirectorySandbox._old
-                del DirectorySandbox._old
-
-    return __no_sandbox
-
-
-def _patch_file(path, content):
-    """Will backup the file then patch it"""
-    existing_content = open(path).read()
-    if existing_content == content:
-        # already patched
-        log.warn('Already patched.')
-        return False
-    log.warn('Patching...')
-    _rename_path(path)
-    f = open(path, 'w')
-    try:
-        f.write(content)
-    finally:
-        f.close()
-    return True
-
-_patch_file = _no_sandbox(_patch_file)
-
-
-def _same_content(path, content):
-    return open(path).read() == content
-
-
-def _rename_path(path):
-    new_name = path + '.OLD.%s' % time.time()
-    log.warn('Renaming %s to %s', path, new_name)
-    os.rename(path, new_name)
-    return new_name
-
-
-def _remove_flat_installation(placeholder):
-    if not os.path.isdir(placeholder):
-        log.warn('Unknown installation at %s', placeholder)
-        return False
-    found = False
-    for file in os.listdir(placeholder):
-        if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
-            found = True
-            break
-    if not found:
-        log.warn('Could not locate setuptools*.egg-info')
-        return
-
-    log.warn('Moving elements out of the way...')
-    pkg_info = os.path.join(placeholder, file)
-    if os.path.isdir(pkg_info):
-        patched = _patch_egg_dir(pkg_info)
-    else:
-        patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
-
-    if not patched:
-        log.warn('%s already patched.', pkg_info)
-        return False
-    # now let's move the files out of the way
-    for element in ('setuptools', 'pkg_resources.py', 'site.py'):
-        element = os.path.join(placeholder, element)
-        if os.path.exists(element):
-            _rename_path(element)
-        else:
-            log.warn('Could not find the %s element of the '
-                     'Setuptools distribution', element)
-    return True
-
-_remove_flat_installation = _no_sandbox(_remove_flat_installation)
-
-
-def _after_install(dist):
-    log.warn('After install bootstrap.')
-    placeholder = dist.get_command_obj('install').install_purelib
-    _create_fake_setuptools_pkg_info(placeholder)
-
-
-def _create_fake_setuptools_pkg_info(placeholder):
-    if not placeholder or not os.path.exists(placeholder):
-        log.warn('Could not find the install location')
-        return
-    pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
-    setuptools_file = 'setuptools-%s-py%s.egg-info' % \
-            (SETUPTOOLS_FAKED_VERSION, pyver)
-    pkg_info = os.path.join(placeholder, setuptools_file)
-    if os.path.exists(pkg_info):
-        log.warn('%s already exists', pkg_info)
-        return
-
-    log.warn('Creating %s', pkg_info)
-    try:
-        f = open(pkg_info, 'w')
-    except EnvironmentError:
-        log.warn("Don't have permissions to write %s, skipping", pkg_info)
-        return
-    try:
-        f.write(SETUPTOOLS_PKG_INFO)
-    finally:
-        f.close()
-
-    pth_file = os.path.join(placeholder, 'setuptools.pth')
-    log.warn('Creating %s', pth_file)
-    f = open(pth_file, 'w')
-    try:
-        f.write(os.path.join(os.curdir, setuptools_file))
-    finally:
-        f.close()
-
-_create_fake_setuptools_pkg_info = _no_sandbox(
-    _create_fake_setuptools_pkg_info
-)
-
-
-def _patch_egg_dir(path):
-    # let's check if it's already patched
-    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
-    if os.path.exists(pkg_info):
-        if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
-            log.warn('%s already patched.', pkg_info)
-            return False
-    _rename_path(path)
-    os.mkdir(path)
-    os.mkdir(os.path.join(path, 'EGG-INFO'))
-    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
-    f = open(pkg_info, 'w')
-    try:
-        f.write(SETUPTOOLS_PKG_INFO)
-    finally:
-        f.close()
-    return True
-
-_patch_egg_dir = _no_sandbox(_patch_egg_dir)
-
-
-def _before_install():
-    log.warn('Before install bootstrap.')
-    _fake_setuptools()
-
-
-def _under_prefix(location):
-    if 'install' not in sys.argv:
-        return True
-    args = sys.argv[sys.argv.index('install') + 1:]
-    for index, arg in enumerate(args):
-        for option in ('--root', '--prefix'):
-            if arg.startswith('%s=' % option):
-                top_dir = arg.split('root=')[-1]
-                return location.startswith(top_dir)
-            elif arg == option:
-                if len(args) > index:
-                    top_dir = args[index + 1]
-                    return location.startswith(top_dir)
-        if arg == '--user' and USER_SITE is not None:
-            return location.startswith(USER_SITE)
-    return True
-
-
-def _fake_setuptools():
-    log.warn('Scanning installed packages')
-    try:
-        import pkg_resources
-    except ImportError:
-        # we're cool
-        log.warn('Setuptools or Distribute does not seem to be installed.')
-        return
-    ws = pkg_resources.working_set
-    try:
-        setuptools_dist = ws.find(
-            pkg_resources.Requirement.parse('setuptools', replacement=False)
-            )
-    except TypeError:
-        # old distribute API
-        setuptools_dist = ws.find(
-            pkg_resources.Requirement.parse('setuptools')
-        )
-
-    if setuptools_dist is None:
-        log.warn('No setuptools distribution found')
-        return
-    # detecting if it was already faked
-    setuptools_location = setuptools_dist.location
-    log.warn('Setuptools installation detected at %s', setuptools_location)
-
-    # if --root or --preix was provided, and if
-    # setuptools is not located in them, we don't patch it
-    if not _under_prefix(setuptools_location):
-        log.warn('Not patching, --root or --prefix is installing Distribute'
-                 ' in another location')
-        return
-
-    # let's see if its an egg
-    if not setuptools_location.endswith('.egg'):
-        log.warn('Non-egg installation')
-        res = _remove_flat_installation(setuptools_location)
-        if not res:
-            return
-    else:
-        log.warn('Egg installation')
-        pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
-        if (os.path.exists(pkg_info) and
-            _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
-            log.warn('Already patched.')
-            return
-        log.warn('Patching...')
-        # let's create a fake egg replacing setuptools one
-        res = _patch_egg_dir(setuptools_location)
-        if not res:
-            return
-    log.warn('Patching complete.')
-    _relaunch()
-
-
-def _relaunch():
-    log.warn('Relaunching...')
-    # we have to relaunch the process
-    # pip marker to avoid a relaunch bug
-    _cmd1 = ['-c', 'install', '--single-version-externally-managed']
-    _cmd2 = ['-c', 'install', '--record']
-    if sys.argv[:3] == _cmd1 or sys.argv[:3] == _cmd2:
-        sys.argv[0] = 'setup.py'
-    args = [sys.executable] + sys.argv
-    sys.exit(subprocess.call(args))
-
-
-def _extractall(self, path=".", members=None):
-    """Extract all members from the archive to the current working
-       directory and set owner, modification time and permissions on
-       directories afterwards. `path' specifies a different directory
-       to extract to. `members' is optional and must be a subset of the
-       list returned by getmembers().
-    """
-    import copy
-    import operator
-    from tarfile import ExtractError
-    directories = []
-
-    if members is None:
-        members = self
-
-    for tarinfo in members:
-        if tarinfo.isdir():
-            # Extract directories with a safe mode.
-            directories.append(tarinfo)
-            tarinfo = copy.copy(tarinfo)
-            tarinfo.mode = 448  # decimal for oct 0700
-        self.extract(tarinfo, path)
-
-    # Reverse sort directories.
-    if sys.version_info < (2, 4):
-        def sorter(dir1, dir2):
-            return cmp(dir1.name, dir2.name)
-        directories.sort(sorter)
-        directories.reverse()
-    else:
-        directories.sort(key=operator.attrgetter('name'), reverse=True)
-
-    # Set correct owner, mtime and filemode on directories.
-    for tarinfo in directories:
-        dirpath = os.path.join(path, tarinfo.name)
-        try:
-            self.chown(tarinfo, dirpath)
-            self.utime(tarinfo, dirpath)
-            self.chmod(tarinfo, dirpath)
-        except ExtractError:
-            e = sys.exc_info()[1]
-            if self.errorlevel > 1:
-                raise
-            else:
-                self._dbg(1, "tarfile: %s" % e)
-
-
-def _build_install_args(options):
-    """
-    Build the arguments to 'python setup.py install' on the distribute package
-    """
-    install_args = []
-    if options.user_install:
-        if sys.version_info < (2, 6):
-            log.warn("--user requires Python 2.6 or later")
-            raise SystemExit(1)
-        install_args.append('--user')
-    return install_args
-
-def _parse_args():
-    """
-    Parse the command line for options
-    """
-    parser = optparse.OptionParser()
-    parser.add_option(
-        '--user', dest='user_install', action='store_true', default=False,
-        help='install in user site package (requires Python 2.6 or later)')
-    parser.add_option(
-        '--download-base', dest='download_base', metavar="URL",
-        default=DEFAULT_URL,
-        help='alternative URL from where to download the distribute package')
-    options, args = parser.parse_args()
-    # positional arguments are ignored
-    return options
-
-def main(version=DEFAULT_VERSION):
-    """Install or upgrade setuptools and EasyInstall"""
-    options = _parse_args()
-    tarball = download_setuptools(download_base=options.download_base)
-    return _install(tarball, _build_install_args(options))
-
-if __name__ == '__main__':
-    sys.exit(main())

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/README
--- a/doc/README
+++ b/doc/README
@@ -7,4 +7,4 @@
 Because the documentation requires a number of dependencies, we provide
 pre-built versions online, accessible here:
 
-http://yt-project.org/docs/dev-3.0/
+http://yt-project.org/docs/dev/

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/helper_scripts/run_recipes.py
--- a/doc/helper_scripts/run_recipes.py
+++ b/doc/helper_scripts/run_recipes.py
@@ -13,7 +13,7 @@
 from yt.config import ytcfg
 
 FPATTERNS = ['*.png', '*.txt', '*.h5', '*.dat']
-DPATTERNS = ['LC*', 'LR', 'DD0046', 'halo_analysis']
+DPATTERNS = ['LC*', 'LR', 'DD0046']
 BADF = ['cloudy_emissivity.h5', 'apec_emissivity.h5',
         'xray_emissivity.h5', 'AMRGridData_Slice_x_density.png']
 CWD = os.getcwd()

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -1,18 +1,14 @@
 #
 # Hi there!  Welcome to the yt installation script.
 #
+# First things first, if you experience problems, please visit the Help 
+# section at http://yt-project.org.
+#
 # This script is designed to create a fully isolated Python installation
 # with the dependencies you need to run yt.
 #
-# There are a few options, but you only need to set *one* of them.  And
-# that's the next one, DEST_DIR.  But, if you want to use an existing HDF5
-# installation you can set HDF5_DIR, or if you want to use some other
-# subversion checkout of yt, you can set YT_DIR, too.  (It'll already
-# check the current directory and one up.
-#
-# If you experience problems, please visit the Help section at 
-# http://yt-project.org.
-#
+# There are a few options, but you only need to set *one* of them, which is 
+# the next one, DEST_DIR:
 
 DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
@@ -23,16 +19,25 @@
     DEST_DIR=${YT_DEST}
 fi
 
+# What follows are some other options that you may or may not need to change.
+
 # Here's where you put the HDF5 path if you like; otherwise it'll download it
 # and install it on its own
 #HDF5_DIR=
 
+# If you've got yt some other place, set this to point to it. The script will
+# already check the current directory and the one above it in the tree.
+YT_DIR=""
+
 # If you need to supply arguments to the NumPy or SciPy build, supply them here
 # This one turns on gfortran manually:
 #NUMPY_ARGS="--fcompiler=gnu95"
 # If you absolutely can't get the fortran to work, try this:
 #NUMPY_ARGS="--fcompiler=fake"
 
+INST_PY3=0      # Install Python 3 along with Python 2. If this is turned
+                # on, all Python packages (including yt) will be installed
+                # in Python 3 (except Mercurial, which requires Python 2).
 INST_HG=1       # Install Mercurial or not?  If hg is not already
                 # installed, yt cannot be installed.
 INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with
@@ -50,9 +55,6 @@
 INST_ROCKSTAR=0 # Install the Rockstar halo finder?
 INST_SCIPY=0    # Install scipy?
 
-# If you've got yt some other place, set this to point to it.
-YT_DIR=""
-
 # If you need to pass anything to matplotlib, do so here.
 MPL_SUPP_LDFLAGS=""
 MPL_SUPP_CFLAGS=""
@@ -111,6 +113,7 @@
     echo INST_SQLITE3=${INST_SQLITE3} >> ${CONFIG_FILE}
     echo INST_PYX=${INST_PYX} >> ${CONFIG_FILE}
     echo INST_0MQ=${INST_0MQ} >> ${CONFIG_FILE}
+    echo INST_PY3=${INST_PY3} >> ${CONFIG_FILE}
     echo INST_ROCKSTAR=${INST_ROCKSTAR} >> ${CONFIG_FILE}
     echo INST_SCIPY=${INST_SCIPY} >> ${CONFIG_FILE}
     echo YT_DIR=${YT_DIR} >> ${CONFIG_FILE}
@@ -415,6 +418,10 @@
 get_willwont ${INST_SQLITE3}
 echo "be installing SQLite3"
 
+printf "%-15s = %s so I " "INST_PY3" "${INST_PY3}"
+get_willwont ${INST_PY3}
+echo "be installing Python 3"
+
 printf "%-15s = %s so I " "INST_HG" "${INST_HG}"
 get_willwont ${INST_HG}
 echo "be installing Mercurial"
@@ -487,6 +494,13 @@
     exit 1
 }
 
+if [ $INST_PY3 -eq 1 ]
+then
+	 PYTHON_EXEC='python3.4'
+else 
+	 PYTHON_EXEC='python2.7'
+fi
+
 function do_setup_py
 {
     [ -e $1/done ] && return
@@ -501,21 +515,27 @@
     [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
     touch $LIB/extracted
     BUILD_ARGS=""
+    if [[ $LIB =~ .*mercurial.* ]] 
+    then
+        PYEXE="python2.7"
+    else
+        PYEXE=${PYTHON_EXEC}
+    fi
     case $LIB in
         *h5py*)
             pushd $LIB &> /dev/null
-            ( ${DEST_DIR}/bin/python2.7 setup.py configure --hdf5=${HDF5_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
+            ( ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py configure --hdf5=${HDF5_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
             popd &> /dev/null
             ;;
         *numpy*)
-            if [ -e ${DEST_DIR}/lib/python2.7/site-packages/numpy/__init__.py ]
+            if [ -e ${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages/numpy/__init__.py ]
             then
-                VER=$(${DEST_DIR}/bin/python -c 'from distutils.version import StrictVersion as SV; \
+                VER=$(${DEST_DIR}/bin/${PYTHON_EXEC} -c 'from distutils.version import StrictVersion as SV; \
                                                  import numpy; print SV(numpy.__version__) < SV("1.8.0")')
                 if [ $VER == "True" ]
                 then
                     echo "Removing previous NumPy instance (see issue #889)"
-                    rm -rf ${DEST_DIR}/lib/python2.7/site-packages/{numpy*,*.pth}
+                    rm -rf ${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages/{numpy*,*.pth}
                 fi
             fi
             ;;
@@ -523,8 +543,8 @@
             ;;
     esac
     cd $LIB
-    ( ${DEST_DIR}/bin/python2.7 setup.py build ${BUILD_ARGS} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-    ( ${DEST_DIR}/bin/python2.7 setup.py install    2>&1 ) 1>> ${LOG_FILE} || do_exit
+    ( ${DEST_DIR}/bin/${PYEXE} setup.py build ${BUILD_ARGS} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
+    ( ${DEST_DIR}/bin/${PYEXE} setup.py install    2>&1 ) 1>> ${LOG_FILE} || do_exit
     touch done
     cd ..
 }
@@ -592,14 +612,15 @@
 # Set paths to what they should be when yt is activated.
 export PATH=${DEST_DIR}/bin:$PATH
 export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
-export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages
+export PYTHONPATH=${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages
 
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
+PYTHON2='Python-2.7.9'
+PYTHON3='Python-3.4.3'
 CYTHON='Cython-0.22'
 PYX='PyX-0.12.1'
-PYTHON='Python-2.7.9'
 BZLIB='bzip2-1.0.6'
 FREETYPE_VER='freetype-2.4.12' 
 H5PY='h5py-2.5.0'
@@ -620,11 +641,13 @@
 TORNADO='tornado-4.0.2'
 ZEROMQ='zeromq-4.0.5'
 ZLIB='zlib-1.2.8'
+SETUPTOOLS='setuptools-18.0.1'
 
 # Now we dump all our SHA512 files out.
 echo '856220fa579e272ac38dcef091760f527431ff3b98df9af6e68416fcf77d9659ac5abe5c7dee41331f359614637a4ff452033085335ee499830ed126ab584267  Cython-0.22.tar.gz' > Cython-0.22.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
 echo 'a42f28ed8e49f04cf89e2ea7434c5ecbc264e7188dcb79ab97f745adf664dd9ab57f9a913543731635f90859536244ac37dca9adf0fc2aa1b215ba884839d160  Python-2.7.9.tgz' > Python-2.7.9.tgz.sha512
+echo '609cc82586fabecb25f25ecb410f2938e01d21cde85dd3f8824fe55c6edde9ecf3b7609195473d3fa05a16b9b121464f5414db1a0187103b78ea6edfa71684a7  Python-3.4.3.tgz' > Python-3.4.3.tgz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
@@ -646,6 +669,7 @@
 echo '93591068dc63af8d50a7925d528bc0cccdd705232c529b6162619fe28dddaf115e8a460b1842877d35160bd7ed480c1bd0bdbec57d1f359085bd1814e0c1c242  tornado-4.0.2.tar.gz' > tornado-4.0.2.tar.gz.sha512
 echo '0d928ed688ed940d460fa8f8d574a9819dccc4e030d735a8c7db71b59287ee50fa741a08249e356c78356b03c2174f2f2699f05aa7dc3d380ed47d8d7bab5408  zeromq-4.0.5.tar.gz' > zeromq-4.0.5.tar.gz.sha512
 echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
+echo '9b318ce2ee2cf787929dcb886d76c492b433e71024fda9452d8b4927652a298d6bd1bdb7a4c73883a98e100024f89b46ea8aa14b250f896e549e6dd7e10a6b41  setuptools-18.0.1.tar.gz' > setuptools-18.0.1.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject $ZLIB.tar.gz
@@ -660,10 +684,11 @@
 [ $INST_SCIPY -eq 1 ] && get_ytproject $SCIPY.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject $LAPACK.tar.gz
-get_ytproject $PYTHON.tgz
+[ $INST_HG -eq 1 ] && get_ytproject $MERCURIAL.tar.gz
+[ $INST_PY3 -eq 1 ] && get_ytproject $PYTHON3.tgz
+get_ytproject $PYTHON2.tgz
 get_ytproject $NUMPY.tar.gz
 get_ytproject $MATPLOTLIB.tar.gz
-get_ytproject $MERCURIAL.tar.gz
 get_ytproject $IPYTHON.tar.gz
 get_ytproject $H5PY.tar.gz
 get_ytproject $CYTHON.tar.gz
@@ -671,6 +696,7 @@
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz
+get_ytproject $SETUPTOOLS.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e $BZLIB/done ]
@@ -787,11 +813,11 @@
     fi
 fi
 
-if [ ! -e $PYTHON/done ]
+if [ ! -e $PYTHON2/done ]
 then
-    echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e $PYTHON ] && tar xfz $PYTHON.tgz
-    cd $PYTHON
+    echo "Installing Python 2. This may take a while, but don't worry. yt loves you."
+    [ ! -e $PYTHON2 ] && tar xfz $PYTHON2.tgz
+    cd $PYTHON2
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -802,7 +828,30 @@
     cd ..
 fi
 
-export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages/
+if [ $INST_PY3 -eq 1 ]
+then
+    if [ ! -e $PYTHON3/done ]
+    then
+        echo "Installing Python 3. Because two Pythons are better than one."
+        [ ! -e $PYTHON3 ] && tar xfz $PYTHON3.tgz
+        cd $PYTHON3
+        ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
+
+        ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( ln -sf ${DEST_DIR}/bin/python3.4 ${DEST_DIR}/bin/pyyt 2>&1 ) 1>> ${LOG_FILE}
+        ( ln -sf ${DEST_DIR}/bin/python3.4 ${DEST_DIR}/bin/python 2>&1 ) 1>> ${LOG_FILE}
+        ( ln -sf ${DEST_DIR}/bin/python3-config ${DEST_DIR}/bin/python-config 2>&1 ) 1>> ${LOG_FILE}
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
+        touch done
+        cd ..
+    fi
+fi
+
+export PYTHONPATH=${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages/
+
+# Install setuptools
+do_setup_py $SETUPTOOLS
 
 if [ $INST_HG -eq 1 ]
 then
@@ -847,12 +896,10 @@
 
 # This fixes problems with gfortran linking.
 unset LDFLAGS
-
-echo "Installing distribute"
-( ${DEST_DIR}/bin/python2.7 ${YT_DIR}/distribute_setup.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
-
+ 
 echo "Installing pip"
-( ${DEST_DIR}/bin/easy_install-2.7 pip 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( ${GETFILE} https://bootstrap.pypa.io/get-pip.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( ${DEST_DIR}/bin/${PYTHON_EXEC} get-pip.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
 if [ $INST_SCIPY -eq 0 ]
 then
@@ -986,13 +1033,14 @@
 
 echo "Installing yt"
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
-( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
 touch done
 cd $MY_PWD
 
-if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
+if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import readline" 2>&1 )>> ${LOG_FILE}) || \
+	[[ "${MYOS##Darwin}" != "${MYOS}" && $INST_PY3 -eq 1 ]] 
 then
-    if !( ( ${DEST_DIR}/bin/python2.7 -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
+    if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
     then
         echo "Installing pure-python readline"
         ( ${DEST_DIR}/bin/pip install gnureadline 2>&1 ) 1>> ${LOG_FILE}

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/analyzing/analysis_modules/SZ_projections.ipynb
--- a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
+++ b/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:2cc168b2c1737c67647aa29892c0213e7a58233fa53c809f9cd975a4306e9bc8"
+  "signature": "sha256:487383ec23a092310522ec25bd02ad2eb16a3402c5ed3d2b103d33fe17697b3c"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -70,6 +70,13 @@
      ]
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "<font color='red'>**NOTE**</font>: Currently, use of the SZpack library to create S-Z projections in yt is limited to Python 2.x."
+     ]
+    },
+    {
      "cell_type": "heading",
      "level": 2,
      "metadata": {},

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -116,7 +116,7 @@
   the width of the smallest grid element in the simulation from the
   last data snapshot (i.e. the one where time has evolved the
   longest) in the time series:
-  ``ds_last.index.get_smallest_dx() * ds_last['mpch']``.
+  ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
 * ``total_particles``, if supplied, this is a pre-calculated
   total number of dark matter
   particles present in the simulation. For example, this is useful

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -10,6 +10,10 @@
 simulated X-ray photon lists of events from datasets that yt is able
 to read. The simulated events then can be exported to X-ray telescope
 simulators to produce realistic observations or can be analyzed in-line.
+
+For detailed information about the design of the algorithm in yt, check 
+out `the SciPy 2014 Proceedings. <http://conference.scipy.org/proceedings/scipy2014/zuhone.html>`_.
+
 The algorithm is based off of that implemented in
 `PHOX <http://www.mpa-garching.mpg.de/~kdolag/Phox/>`_ for SPH datasets
 by Veronica Biffi and Klaus Dolag. There are two relevant papers:
@@ -139,6 +143,12 @@
 the optional keyword ``thermal_broad`` is set to ``True``, the spectral
 lines will be thermally broadened.
 
+.. note:: 
+
+   ``SpectralModel`` objects based on XSPEC models (both the thermal 
+   emission and Galactic absorption models mentioned below) only work 
+   in Python 2.7, since currently PyXspec only works with Python 2.x. 
+   
 Now that we have our ``SpectralModel`` that gives us a spectrum, we need
 to connect this model to a ``PhotonModel`` that will connect the field
 data in the ``data_source`` to the spectral model to actually generate
@@ -148,7 +158,8 @@
 .. code:: python
 
     thermal_model = ThermalPhotonModel(apec_model, X_H=0.75, Zmet=0.3,
-                                       photons_per_chunk=100000000)
+                                       photons_per_chunk=100000000,
+                                       method="invert_cdf")
 
 Where we pass in the ``SpectralModel``, and can optionally set values for
 the hydrogen mass fraction ``X_H`` and metallicity ``Z_met``. If
@@ -165,6 +176,18 @@
 this parameter needs to be set higher, or if you are looking to decrease memory
 usage, you might set this parameter lower.
 
+The ``method`` keyword argument is also optional, and determines how the individual
+photon energies are generated from the spectrum. It may be set to one of two values:
+
+* ``method="invert_cdf"``: Construct the cumulative distribution function of the spectrum and invert
+  it, using uniformly drawn random numbers to determine the photon energies (fast, but relies
+  on construction of the CDF and interpolation between the points, so for some spectra it
+  may not be accurate enough). 
+* ``method="accept_reject"``: Generate the photon energies from the spectrum using an acceptance-rejection
+  technique (accurate, but likely to be slow). 
+
+``method="invert_cdf"`` (the default) should be sufficient for most cases. 
+
 Next, we need to specify "fiducial" values for the telescope collecting
 area, exposure time, and cosmological redshift. Remember, the initial
 photon generation will act as a source for Monte-Carlo sampling for more
@@ -191,12 +214,29 @@
 By default, the angular diameter distance to the object is determined
 from the ``cosmology`` and the cosmological ``redshift``. If a
 ``Cosmology`` instance is not provided, one will be made from the
-default cosmological parameters. If your source is local to the galaxy,
-you can set its distance directly, using a tuple, e.g.
-``dist=(30, "kpc")``. In this case, the ``redshift`` and ``cosmology``
-will be ignored. Finally, if the photon generating function accepts any
-parameters, they can be passed to ``from_scratch`` via a ``parameters``
-dictionary.
+default cosmological parameters. The ``center`` keyword argument specifies
+the center of the photon distribution, and the photon positions will be 
+rescaled with this value as the origin. This argument accepts the following
+values:
+
+* A NumPy array or list corresponding to the coordinates of the center in
+  units of code length. 
+* A ``YTArray`` corresponding to the coordinates of the center in some
+  length units. 
+* ``"center"`` or ``"c"`` corresponds to the domain center. 
+* ``"max"`` or ``"m"`` corresponds to the location of the maximum gas density. 
+* A two-element tuple specifying the max or min of a specific field, e.g.,
+  ``("min","gravitational_potential")``, ``("max","dark_matter_density")``
+
+If ``center`` is not specified, ``from_scratch`` will attempt to use the 
+``"center"`` field parameter of the ``data_source``. 
+
+``from_scratch`` takes a few other optional keyword arguments. If your 
+source is local to the galaxy, you can set its distance directly, using 
+a tuple, e.g. ``dist=(30, "kpc")``. In this case, the ``redshift`` and 
+``cosmology`` will be ignored. Finally, if the photon generating 
+function accepts any parameters, they can be passed to ``from_scratch`` 
+via a ``parameters`` dictionary.
 
 At this point, the ``photons`` are distributed in the three-dimensional
 space of the ``data_source``, with energies in the rest frame of the
@@ -265,7 +305,7 @@
     abs_model = TableAbsorbModel("tbabs_table.h5", 0.1)
 
 Now we're ready to project the photons. First, we choose a line-of-sight
-vector ``L``. Second, we'll adjust the exposure time and the redshift.
+vector ``normal``. Second, we'll adjust the exposure time and the redshift.
 Third, we'll pass in the absorption ``SpectrumModel``. Fourth, we'll
 specify a ``sky_center`` in RA,DEC on the sky in degrees.
 
@@ -274,26 +314,40 @@
 course far short of a full simulation of a telescope ray-trace, but it's
 a quick-and-dirty way to get something close to the real thing. We'll
 discuss how to get your simulated events into a format suitable for
-reading by telescope simulation codes later.
+reading by telescope simulation codes later. If you just want to convolve 
+the photons with an ARF, you may specify that as the only response, but some
+ARFs are unnormalized and still require the RMF for normalization. Check with
+the documentation associated with these files for details. If we are using the
+RMF to convolve energies, we must set ``convolve_energies=True``. 
 
 .. code:: python
 
     ARF = "chandra_ACIS-S3_onaxis_arf.fits"
     RMF = "chandra_ACIS-S3_onaxis_rmf.fits"
-    L = [0.0,0.0,1.0]
-    events = photons.project_photons(L, exp_time_new=2.0e5, redshift_new=0.07, absorb_model=abs_model,
-                                     sky_center=(187.5,12.333), responses=[ARF,RMF])
+    normal = [0.0,0.0,1.0]
+    events = photons.project_photons(normal, exp_time_new=2.0e5, redshift_new=0.07, dist_new=None, 
+                                     absorb_model=abs_model, sky_center=(187.5,12.333), responses=[ARF,RMF], 
+                                     convolve_energies=True, no_shifting=False, north_vector=None,
+                                     psf_sigma=None)
 
-Also, the optional keyword ``psf_sigma`` specifies a Gaussian standard
-deviation to scatter the photon sky positions around with, providing a
-crude representation of a PSF.
+In this case, we chose a three-vector ``normal`` to specify an arbitrary 
+line-of-sight, but ``"x"``, ``"y"``, or ``"z"`` could also be chosen to 
+project along one of those axes. 
 
-.. warning::
+``project_photons`` takes several other optional keyword arguments. 
 
-   The binned images that result, even if you convolve with responses,
-   are still of the same resolution as the finest cell size of the
-   simulation dataset. If you want a more accurate simulation of a
-   particular X-ray telescope, you should check out `Storing events for future use and for reading-in by telescope simulators`_.
+* ``no_shifting`` (default ``False``) controls whether or not Doppler 
+  shifting of photon energies is turned on. 
+* ``dist_new`` is a (value, unit) tuple that is used to set a new
+  angular diameter distance by hand instead of having it determined
+  by the cosmology and the value of the redshift. Should only be used
+  for simulations of nearby objects. 
+* For off-axis ``normal`` vectors,  the ``north_vector`` argument can 
+  be used to control what vector corresponds to the "up" direction in 
+  the resulting event list. 
+* ``psf_sigma`` may be specified to provide a crude representation of 
+  a PSF, and corresponds to the standard deviation (in degress) of a 
+  Gaussian PSF model. 
 
 Let's just take a quick look at the raw events object:
 
@@ -343,19 +397,27 @@
 
 Which is starting to look like a real observation!
 
+.. warning::
+
+   The binned images that result, even if you convolve with responses,
+   are still of the same resolution as the finest cell size of the
+   simulation dataset. If you want a more accurate simulation of a
+   particular X-ray telescope, you should check out `Storing events for future use and for reading-in by telescope simulators`_.
+
 We can also bin up the spectrum into energy bins, and write it to a FITS
 table file. This is an example where we've binned up the spectrum
 according to the unconvolved photon energy:
 
 .. code:: python
 
-    events.write_spectrum("virgo_spec.fits", energy_bins=True, emin=0.1, emax=10.0, nchan=2000, clobber=True)
+    events.write_spectrum("virgo_spec.fits", bin_type="energy", emin=0.1, emax=10.0, nchan=2000, clobber=True)
 
-If we don't set ``energy_bins=True``, and we have convolved our events
+We can also set ``bin_type="channel"``. If we have convolved our events
 with response files, then any other keywords will be ignored and it will
 try to make a spectrum from the channel information that is contained
-within the RMF, suitable for analyzing in XSPEC. For now, we'll stick
-with the energy spectrum, and plot it up:
+within the RMF. Otherwise, the channels will be determined from the ``emin``, 
+``emax``, and ``nchan`` keywords, and will be numbered from 1 to ``nchan``. 
+For now, we'll stick with the energy spectrum, and plot it up:
 
 .. code:: python
 

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -174,7 +174,7 @@
 
 Field plugins can be loaded dynamically, although at present this is not
 particularly useful.  Plans for extending field plugins to dynamically load, to
-enable simple definition of common types (gradient, divergence, etc), and to
+enable simple definition of common types (divergence, curl, etc), and to
 more verbosely describe available fields, have been put in place for future
 versions.
 
@@ -271,6 +271,29 @@
 
 For a practical application of this, see :ref:`cookbook-radial-velocity`.
 
+Gradient Fields
+---------------
+
+yt provides a way to compute gradients of spatial fields using the
+:meth:`~yt.frontends.flash.data_structures.FLASHDataset.add_gradient_fields` 
+method. If you have a spatially-based field such as density or temperature, 
+and want to calculate the gradient of that field, you can do it like so:
+
+.. code-block:: python
+
+    ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
+    grad_fields = ds.add_gradient_fields(("gas","temperature"))
+
+where the ``grad_fields`` list will now have a list of new field names that can be used
+in calculations, representing the 3 different components of the field and the magnitude
+of the gradient, e.g., ``"temperature_gradient_x"``, ``"temperature_gradient_y"``,
+``"temperature_gradient_z"``, and ``"temperature_gradient_magnitude"``. To see an example
+of how to create and use these fields, see :ref:`cookbook-complicated-derived-fields`.
+
+.. note::
+
+    ``add_gradient_fields`` currently only supports Cartesian geometries!
+
 General Particle Fields
 -----------------------
 

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -47,10 +47,30 @@
    frb = FixedResolutionBuffer(sl, (0.3, 0.5, 0.6, 0.8), (512, 512))
    my_image = frb["density"]
 
-This resultant array can be saved out to disk or visualized using a
-hand-constructed Matplotlib image, for instance using
+This image may then be used in a hand-constructed Matplotlib image, for instance using
 :func:`~matplotlib.pyplot.imshow`.
 
+The buffer arrays can be saved out to disk in either HDF5 or FITS format:
+ 
+.. code-block:: python
+
+   frb.export_hdf5("my_images.h5", fields=["density","temperature"])
+   frb.export_fits("my_images.fits", fields=["density","temperature"],
+                   clobber=True, units="kpc")
+
+In the FITS case, there is an option for setting the ``units`` of the coordinate system in
+the file. If you want to overwrite a file with the same name, set ``clobber=True``. 
+
+The :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer` can even be exported
+as a 2D dataset itself, which may be operated on in the same way as any other dataset in yt:
+
+.. code-block:: python
+
+   ds_frb = frb.export_dataset(fields=["density","temperature"], nprocs=8)
+   sp = ds_frb.sphere("c", (100.,"kpc"))
+
+where the ``nprocs`` parameter can be used to decompose the image into ``nprocs`` number of grids.
+
 .. _generating-profiles-and-histograms:
 
 Profiles and Histograms

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -67,9 +67,9 @@
 # built documents.
 #
 # The short X.Y version.
-version = '3.2'
+version = '3.3'
 # The full version, including alpha/beta/rc tags.
-release = '3.2-dev'
+release = '3.3-dev'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -82,6 +82,17 @@
 
 .. yt_cookbook:: derived_field.py
 
+.. _cookbook-complicated-derived-fields:
+
+Complicated Derived Fields
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to use the 
+:meth:`~yt.frontends.flash.data_structures.FLASHDataset.add_gradient_fields` method
+to generate gradient fields and use them in a more complex derived field. 
+
+.. yt_cookbook:: hse_field.py
+
 Using Particle Filters to Calculate Star Formation Rates
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/cookbook/embedded_javascript_animation.ipynb
--- a/doc/source/cookbook/embedded_javascript_animation.ipynb
+++ /dev/null
@@ -1,71 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:bed79f0227742715a8753a98f2ad54175767a7c9ded19b14976ee6c8ff255f04"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "This example shows how to embed an animation produced by `matplotlib` into an IPython notebook.  This example makes use of `matplotlib`'s [animation toolkit](http://matplotlib.org/api/animation_api.html) to transform individual frames into a final rendered movie.  \n",
-      "\n",
-      "Additionally, this uses Jake VanderPlas' [`JSAnimation`](https://github.com/jakevdp/JSAnimation) library to embed the movie as a javascript widget, directly in the notebook.  This does not use `ffmpeg` to stitch the frames together and thus does not require `ffmpeg`.  However, you must have `JSAnimation` installed.\n",
-      "\n",
-      "To do so, clone to git repostiory and run `python setup.py install` in the root of the repository."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt\n",
-      "from JSAnimation import IPython_display\n",
-      "from matplotlib import animation"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Here we set up the animation.  We use yt to load the data and create each frame and use matplotlib to stitch the frames together.  Note that we customize the plot a bit by calling the `set_zlim` function.  Customizations only need to be applied to the first frame - they will carry through to the rest.\n",
-      "\n",
-      "This may take a while to run, be patient."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import matplotlib.pyplot as plt\n",
-      "from matplotlib.backends.backend_agg import FigureCanvasAgg\n",
-      "\n",
-      "prj = yt.ProjectionPlot(yt.load('Enzo_64/DD0000/data0000'), 0, 'density', weight_field='density',width=(180,'Mpccm'))\n",
-      "prj.set_figure_size(5)\n",
-      "prj.set_zlim('density',1e-32,1e-26)\n",
-      "fig = prj.plots['density'].figure\n",
-      "\n",
-      "# animation function.  This is called sequentially\n",
-      "def animate(i):\n",
-      "    ds = yt.load('Enzo_64/DD%04i/data%04i' % (i,i))\n",
-      "    prj._switch_ds(ds)\n",
-      "\n",
-      "# call the animator.  blit=True means only re-draw the parts that have changed.\n",
-      "animation.FuncAnimation(fig, animate, frames=44, interval=200, blit=False)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/cookbook/embedded_javascript_animation.rst
--- a/doc/source/cookbook/embedded_javascript_animation.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Making a javascript animation widget using JSAnimation
-------------------------------------------------------
-
-.. notebook:: embedded_javascript_animation.ipynb

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/cookbook/fit_spectrum.py
--- a/doc/source/cookbook/fit_spectrum.py
+++ b/doc/source/cookbook/fit_spectrum.py
@@ -10,10 +10,10 @@
 def _OVI_number_density(field, data):
     return data['H_number_density']*2.0
 
-# Define a function that will accept a ds and add the new field 
+# Define a function that will accept a ds and add the new field
 # defined above.  This will be given to the LightRay below.
 def setup_ds(ds):
-    ds.add_field("O_p5_number_density", 
+    ds.add_field(("gas","O_p5_number_density"),
                  function=_OVI_number_density,
                  units="cm**-3")
 
@@ -62,7 +62,7 @@
 
 # Get all fields that need to be added to the light ray
 fields = ['temperature']
-for s, params in species_dicts.iteritems():
+for s, params in species_dicts.items():
     fields.append(params['field'])
 
 # Make a light ray, and set njobs to -1 to use one core
@@ -79,7 +79,7 @@
 sp = AbsorptionSpectrum(900.0, 1400.0, 50000)
 
 # Iterate over species
-for s, params in species_dicts.iteritems():
+for s, params in species_dicts.items():
     # Iterate over transitions for a single species
     for i in range(params['numLines']):
         # Add the lines to the spectrum

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/cookbook/free_free_field.py
--- a/doc/source/cookbook/free_free_field.py
+++ /dev/null
@@ -1,105 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import numpy as np
-import yt
-# Need to grab the proton mass from the constants database
-from yt.utilities.physical_constants import mp
-
-exit()
-# Define the emission field
-
-keVtoerg = 1.602e-9  # Convert energy in keV to energy in erg
-KtokeV = 8.617e-08  # Convert degrees Kelvin to degrees keV
-sqrt3 = np.sqrt(3.)
-expgamma = 1.78107241799  # Exponential of Euler's constant
-
-
-def _FreeFree_Emission(field, data):
-
-    if data.has_field_parameter("Z"):
-        Z = data.get_field_parameter("Z")
-    else:
-        Z = 1.077  # Primordial H/He plasma
-
-    if data.has_field_parameter("mue"):
-        mue = data.get_field_parameter("mue")
-    else:
-        mue = 1./0.875  # Primordial H/He plasma
-
-    if data.has_field_parameter("mui"):
-        mui = data.get_field_parameter("mui")
-    else:
-        mui = 1./0.8125  # Primordial H/He plasma
-
-    if data.has_field_parameter("Ephoton"):
-        Ephoton = data.get_field_parameter("Ephoton")
-    else:
-        Ephoton = 1.0  # in keV
-
-    if data.has_field_parameter("photon_emission"):
-        photon_emission = data.get_field_parameter("photon_emission")
-    else:
-        photon_emission = False  # Flag for energy or photon emission
-
-    n_e = data["density"]/(mue*mp)
-    n_i = data["density"]/(mui*mp)
-    kT = data["temperature"]*KtokeV
-
-    # Compute the Gaunt factor
-
-    g_ff = np.zeros(kT.shape)
-    g_ff[Ephoton/kT > 1.] = np.sqrt((3./np.pi)*kT[Ephoton/kT > 1.]/Ephoton)
-    g_ff[Ephoton/kT < 1.] = (sqrt3/np.pi)*np.log((4./expgamma) *
-                                                 kT[Ephoton/kT < 1.]/Ephoton)
-
-    eps_E = 1.64e-20*Z*Z*n_e*n_i/np.sqrt(data["temperature"]) * \
-        np.exp(-Ephoton/kT)*g_ff
-
-    if photon_emission:
-        eps_E /= (Ephoton*keVtoerg)
-
-    return eps_E
-
-yt.add_field("FreeFree_Emission", function=_FreeFree_Emission)
-
-# Define the luminosity derived quantity
-def _FreeFreeLuminosity(data):
-    return (data["FreeFree_Emission"]*data["cell_volume"]).sum()
-
-
-def _combFreeFreeLuminosity(data, luminosity):
-    return luminosity.sum()
-
-yt.add_quantity("FreeFree_Luminosity", function=_FreeFreeLuminosity,
-                combine_function=_combFreeFreeLuminosity, n_ret=1)
-
-ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
-
-sphere = ds.sphere(ds.domain_center, (100., "kpc"))
-
-# Print out the total luminosity at 1 keV for the sphere
-
-print("L_E (1 keV, primordial) = ", sphere.quantities["FreeFree_Luminosity"]())
-
-# The defaults for the field assume a H/He primordial plasma.
-# Let's set the appropriate parameters for a pure hydrogen plasma.
-
-sphere.set_field_parameter("mue", 1.0)
-sphere.set_field_parameter("mui", 1.0)
-sphere.set_field_parameter("Z", 1.0)
-
-print("L_E (1 keV, pure hydrogen) = ", sphere.quantities["FreeFree_Luminosity"]())
-
-# Now let's print the luminosity at an energy of E = 10 keV
-
-sphere.set_field_parameter("Ephoton", 10.0)
-
-print("L_E (10 keV, pure hydrogen) = ", sphere.quantities["FreeFree_Luminosity"]())
-
-# Finally, let's set the flag for photon emission, to get the total number
-# of photons emitted at this energy:
-
-sphere.set_field_parameter("photon_emission", True)
-
-print("L_ph (10 keV, pure hydrogen) = ", sphere.quantities["FreeFree_Luminosity"]())

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/cookbook/hse_field.py
--- a/doc/source/cookbook/hse_field.py
+++ b/doc/source/cookbook/hse_field.py
@@ -1,44 +1,32 @@
 import numpy as np
 import yt
 
-from yt.fields.field_plugin_registry import \
-    register_field_plugin
-from yt.fields.fluid_fields import \
-    setup_gradient_fields
-
-
-# Define the components of the gravitational acceleration vector field by
-# taking the gradient of the gravitational potential
- at register_field_plugin
-def setup_my_fields(registry, ftype="gas", slice_info=None):
-    setup_gradient_fields(registry, (ftype, "gravitational_potential"),
-                          "cm ** 2 / s ** 2", slice_info)
-
-# Define the "degree of hydrostatic equilibrium" field
-
-
- at yt.derived_field(name='HSE', units=None, take_log=False,
-                  display_name='Hydrostatic Equilibrium')
-def HSE(field, data):
-
-    gx = data["density"] * data["gravitational_potential_gradient_x"]
-    gy = data["density"] * data["gravitational_potential_gradient_y"]
-    gz = data["density"] * data["gravitational_potential_gradient_z"]
-
-    hx = data["pressure_gradient_x"] - gx
-    hy = data["pressure_gradient_y"] - gy
-    hz = data["pressure_gradient_z"] - gz
-
-    h = np.sqrt((hx * hx + hy * hy + hz * hz) / (gx * gx + gy * gy + gz * gz))
-
-    return h
-
-
 # Open a dataset from when there's a lot of sloshing going on.
 
 ds = yt.load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0350")
 
-# gradient operator requires periodic boundaries.  This dataset has
+# Define the components of the gravitational acceleration vector field by
+# taking the gradient of the gravitational potential
+grad_fields = ds.add_gradient_fields(("gas","gravitational_potential"))
+
+# We don't need to do the same for the pressure field because yt already
+# has pressure gradient fields. Now, define the "degree of hydrostatic 
+# equilibrium" field.
+
+def _hse(field, data):
+    # Remember that g is the negative of the potential gradient
+    gx = -data["density"] * data["gravitational_potential_gradient_x"]
+    gy = -data["density"] * data["gravitational_potential_gradient_y"]
+    gz = -data["density"] * data["gravitational_potential_gradient_z"]
+    hx = data["pressure_gradient_x"] - gx
+    hy = data["pressure_gradient_y"] - gy
+    hz = data["pressure_gradient_z"] - gz
+    h = np.sqrt((hx * hx + hy * hy + hz * hz) / (gx * gx + gy * gy + gz * gz))
+    return h
+ds.add_field(('gas','HSE'), function=_hse, units="", take_log=False,
+             display_name='Hydrostatic Equilibrium')
+
+# The gradient operator requires periodic boundaries.  This dataset has
 # open boundary conditions.  We need to hack it for now (this will be fixed
 # in future version of yt)
 ds.periodicity = (True, True, True)

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/cookbook/index.rst
--- a/doc/source/cookbook/index.rst
+++ b/doc/source/cookbook/index.rst
@@ -41,7 +41,6 @@
 
    notebook_tutorial
    custom_colorbar_tickmarks
-   embedded_javascript_animation
    embedded_webm_animation
    gadget_notebook
    owls_notebook

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/cookbook/simulation_analysis.py
--- a/doc/source/cookbook/simulation_analysis.py
+++ b/doc/source/cookbook/simulation_analysis.py
@@ -2,11 +2,11 @@
 yt.enable_parallelism()
 import collections
 
-# Enable parallelism in the script (assuming it was called with 
+# Enable parallelism in the script (assuming it was called with
 # `mpirun -np <n_procs>` )
 yt.enable_parallelism()
 
-# By using wildcards such as ? and * with the load command, we can load up a 
+# By using wildcards such as ? and * with the load command, we can load up a
 # Time Series containing all of these datasets simultaneously.
 ts = yt.load('enzo_tiny_cosmology/DD????/DD????')
 
@@ -16,7 +16,7 @@
 # Create an empty dictionary
 data = {}
 
-# Iterate through each dataset in the Time Series (using piter allows it 
+# Iterate through each dataset in the Time Series (using piter allows it
 # to happen in parallel automatically across available processors)
 for ds in ts.piter():
     ad = ds.all_data()
@@ -31,6 +31,6 @@
 # Print out all the values we calculated.
 print("Dataset      Redshift        Density Min      Density Max")
 print("---------------------------------------------------------")
-for key, val in od.iteritems(): 
+for key, val in od.items(): 
     print("%s       %05.3f          %5.3g g/cm^3   %5.3g g/cm^3" % \
            (key, val[1], val[0][0], val[0][1]))

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/cookbook/time_series.py
--- a/doc/source/cookbook/time_series.py
+++ b/doc/source/cookbook/time_series.py
@@ -12,7 +12,7 @@
 
 storage = {}
 
-# By using the piter() function, we can iterate on every dataset in 
+# By using the piter() function, we can iterate on every dataset in
 # the TimeSeries object.  By using the storage keyword, we can populate
 # a dictionary where the dataset is the key, and sto.result is the value
 # for later use when the loop is complete.
@@ -25,13 +25,13 @@
     sphere = ds.sphere("c", (100., "kpc"))
     # Calculate the entropy within that sphere
     entr = sphere["entropy"].sum()
-    # Store the current time and sphere entropy for this dataset in our 
+    # Store the current time and sphere entropy for this dataset in our
     # storage dictionary as a tuple
     store.result = (ds.current_time.in_units('Gyr'), entr)
 
 # Convert the storage dictionary values to a Nx2 array, so the can be easily
 # plotted
-arr = np.array(storage.values())
+arr = np.array(list(storage.values()))
 
 # Plot up the results: time versus entropy
 plt.semilogy(arr[:,0], arr[:,1], 'r-')

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/developing/building_the_docs.rst
--- a/doc/source/developing/building_the_docs.rst
+++ b/doc/source/developing/building_the_docs.rst
@@ -165,24 +165,22 @@
 
 To build the full documentation, you will need yt, IPython, runipy, and all 
 supplementary yt analysis modules installed. The following dependencies were 
-used to generate the yt documentation during the release of yt 2.6 in late 2013.
+used to generate the yt documentation during the release of yt 3.2 in 2015.
 
-* Sphinx_ 1.1.3
-* IPython_ 1.1
-* runipy_ (git hash f74458c2877)
-* pandoc_ 1.11.1
+* Sphinx_ 1.3.1
+* IPython_ 2.4.1
+* runipy_ 0.1.3
+* pandoc_ 1.13.2
 * Rockstar halo finder 0.99.6
 * SZpack_ 1.1.1
-* ffmpeg_ 1.2.4 (compiled with libvpx support)
-* JSAnimation_ (git hash 1b95cb3a3a)
-* Astropy_ 0.2.5
+* ffmpeg_ 2.7.1 (compiled with libvpx support)
+* Astropy_ 0.4.4
 
 .. _SZpack: http://www.cita.utoronto.ca/~jchluba/Science_Jens/SZpack/SZpack.html
 .. _Astropy: http://astropy.org/
 .. _Sphinx: http://sphinx-doc.org/
 .. _pandoc: http://johnmacfarlane.net/pandoc/
 .. _ffmpeg: http://www.ffmpeg.org/
-.. _JSAnimation: https://github.com/jakevdp/JSAnimation
 
 You will also need the full yt suite of `yt test data
 <http://yt-project.org/data/>`_, including the larger datasets that are not used

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/developing/intro.rst
--- a/doc/source/developing/intro.rst
+++ b/doc/source/developing/intro.rst
@@ -142,3 +142,77 @@
 federated database for simulation outputs, and so on and so forth.
 
 yt is an ambitious project.  Let's be ambitious together.
+
+yt Community Code of Conduct
+----------------------------
+
+The community of participants in open source 
+Scientific projects is made up of members from around the
+globe with a diverse set of skills, personalities, and
+experiences. It is through these differences that our
+community experiences success and continued growth. We
+expect everyone in our community to follow these guidelines
+when interacting with others both inside and outside of our
+community. Our goal is to keep ours a positive, inclusive,
+successful, and growing community.
+
+As members of the community,
+
+- We pledge to treat all people with respect and
+  provide a harassment- and bullying-free environment,
+  regardless of sex, sexual orientation and/or gender
+  identity, disability, physical appearance, body size,
+  race, nationality, ethnicity, and religion. In
+  particular, sexual language and imagery, sexist,
+  racist, or otherwise exclusionary jokes are not
+  appropriate.
+
+- We pledge to respect the work of others by
+  recognizing acknowledgment/citation requests of
+  original authors. As authors, we pledge to be explicit
+  about how we want our own work to be cited or
+  acknowledged.
+
+- We pledge to welcome those interested in joining the
+  community, and realize that including people with a
+  variety of opinions and backgrounds will only serve to
+  enrich our community. In particular, discussions
+  relating to pros/cons of various technologies,
+  programming languages, and so on are welcome, but
+  these should be done with respect, taking proactive
+  measure to ensure that all participants are heard and
+  feel confident that they can freely express their
+  opinions.
+
+- We pledge to welcome questions and answer them
+  respectfully, paying particular attention to those new
+  to the community. We pledge to provide respectful
+  criticisms and feedback in forums, especially in
+  discussion threads resulting from code
+  contributions.
+
+- We pledge to be conscientious of the perceptions of
+  the wider community and to respond to criticism
+  respectfully. We will strive to model behaviors that
+  encourage productive debate and disagreement, both
+  within our community and where we are criticized. We
+  will treat those outside our community with the same
+  respect as people within our community.
+
+- We pledge to help the entire community follow the
+  code of conduct, and to not remain silent when we see
+  violations of the code of conduct. We will take action
+  when members of our community violate this code such as
+  contacting confidential at yt-project.org (all emails sent to
+  this address will be treated with the strictest
+  confidence) or talking privately with the person.
+
+This code of conduct applies to all
+community situations online and offline, including mailing
+lists, forums, social media, conferences, meetings,
+associated social events, and one-to-one interactions.
+
+The yt Community Code of Conduct was adapted from the 
+`Astropy Community Code of Conduct 
+<http://www.astropy.org/about.html#codeofconduct>`_,
+which was partially inspired by the PSF code of conduct.

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -104,7 +104,11 @@
 -----------
 
 Athena 4.x VTK data is *mostly* supported and cared for by John
-ZuHone. Both uniform grid and SMR datasets are supported.
+ZuHone. Both uniform grid and SMR datasets are supported. 
+
+.. note: 
+   yt also recognizes Fargo3D data written to VTK files as 
+   Athena data, but support for Fargo3D data is preliminary. 
 
 Loading Athena datasets is slightly different depending on whether
 your dataset came from a serial or a parallel run. If the data came
@@ -264,7 +268,7 @@
 Support for Pluto AMR data is provided through the Chombo frontend, which
 is currently maintained by Andrew Myers. Pluto output files that don't use
 the Chombo HDF5 format are currently not supported. To load a Pluto dataset, 
-you can use the ``yt.load`` command on the *.hdf5 file. For example, the 
+you can use the ``yt.load`` command on the ``*.hdf5`` files. For example, the 
 KelvinHelmholtz sample dataset is a directory that contains the following
 files:
 
@@ -469,6 +473,8 @@
   first image in the primary file. If this is not the case,
   yt will raise a warning and will not load this field.
 
+.. _additional_fits_options:
+
 Additional Options
 ^^^^^^^^^^^^^^^^^^
 
@@ -570,6 +576,35 @@
 ``WCSAxes`` is still in an experimental state, but as its functionality improves it will be
 utilized more here.
 
+``create_spectral_slabs``
+"""""""""""""""""""""""""
+
+.. note::
+
+  The following functionality requires the `spectral-cube <http://spectral-cube.readthedocs.org>`_
+  library to be installed. 
+  
+If you have a spectral intensity dataset of some sort, and would like to extract emission in 
+particular slabs along the spectral axis of a certain width, ``create_spectral_slabs`` can be
+used to generate a dataset with these slabs as different fields. In this example, we use it
+to extract individual lines from an intensity cube:
+
+.. code-block:: python
+
+  slab_centers = {'13CN': (218.03117, 'GHz'),
+                  'CH3CH2CHO': (218.284256, 'GHz'),
+                  'CH3NH2': (218.40956, 'GHz')}
+  slab_width = (0.05, "GHz")
+  ds = create_spectral_slabs("intensity_cube.fits",
+                                    slab_centers, slab_width,
+                                    nan_mask=0.0)
+
+All keyword arguments to `create_spectral_slabs` are passed on to `load` when creating the dataset
+(see :ref:`additional_fits_options` above). In the returned dataset, the different slabs will be
+different fields, with the field names taken from the keys in ``slab_centers``. The WCS coordinates 
+on the spectral axis are reset so that the center of the domain along this axis is zero, and the 
+left and right edges of the domain along this axis are :math:`\pm` ``0.5*slab_width``.
+
 Examples of Using FITS Data
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -1044,6 +1079,76 @@
 
 .. _loading-pyne-data:
 
+Halo Catalog Data
+-----------------
+
+yt has support for reading halo catalogs produced by Rockstar and the inline 
+FOF/SUBFIND halo finders of Gadget and OWLS.  The halo catalogs are treated as 
+particle datasets where each particle represents a single halo.  At this time, 
+yt does not have the ability to load the member particles for a given halo.  
+However, once loaded, further halo analysis can be performed using 
+:ref:`halo_catalog`.
+
+In the case where halo catalogs are written to multiple files, one must only 
+give the path to one of them.
+
+Gadget FOF/SUBFIND
+^^^^^^^^^^^^^^^^^^
+
+The two field types for GadgetFOF data are "Group" (FOF) and "Subhalo" (SUBFIND).
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("gadget_fof_halos/groups_042/fof_subhalo_tab_042.0.hdf5")
+   ad = ds.all_data()
+   # The halo mass
+   print ad["Group", "particle_mass"]
+   print ad["Subhalo", "particle_mass"]
+   # Halo ID
+   print ad["Group", "particle_identifier"]
+   print ad["Subhalo", "particle_identifier"]
+   # positions
+   print ad["Group", "particle_position_x"]
+   # velocities
+   print ad["Group", "particle_velocity_x"]
+
+Multidimensional fields can be accessed through the field name followed by an 
+underscore and the index.
+
+.. code-block:: python
+
+   # x component of the spin
+   print ad["Subhalo", "SubhaloSpin_0"]
+
+OWLS FOF/SUBFIND
+^^^^^^^^^^^^^^^^
+
+OWLS halo catalogs have a very similar structure to regular Gadget halo catalogs.  
+The two field types are "FOF" and "SUBFIND".
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("owls_fof_halos/groups_008/group_008.0.hdf5")
+   ad = ds.all_data()
+   # The halo mass
+   print ad["FOF", "particle_mass"]
+
+Rockstar
+^^^^^^^^
+
+Rockstar halo catalogs are loaded by providing the path to one of the .bin files.
+The single field type available is "halos".
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("rockstar_halos/halos_0.0.bin")
+   ad = ds.all_data()
+   # The halo mass
+   print ad["halos", "particle_mass"]
+
 PyNE Data
 ---------
 

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -39,6 +39,28 @@
   have the the necessary compilers installed (e.g. the ``build-essentials``
   package on debian and ubuntu).
 
+.. _branches-of-yt:
+
+Branches of yt: ``yt``, ``stable``, and ``yt-2.x``
+++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Before you install yt, you must decide which branch (i.e. version) of the code 
+you prefer to use:
+
+* ``yt`` -- The most up-to-date *development* version with the most current features but sometimes unstable (yt-3.x)
+* ``stable`` -- The latest stable release of yt-3.x
+* ``yt-2.x`` -- The latest stable release of yt-2.x
+
+If this is your first time using the code, we recommend using ``stable``, 
+unless you specifically need some piece of brand-new functionality only 
+available in ``yt`` or need to run an old script developed for ``yt-2.x``.
+There were major API and functionality changes made in yt after version 2.7
+in moving to version 3.0.  For a detailed description of the changes
+between versions 2.x (e.g. branch ``yt-2.x``) and 3.x (e.g. branches ``yt`` and 
+``stable``) see :ref:`yt3differences`.  Lastly, don't feel like you're locked 
+into one branch when you install yt, because you can easily change the active
+branch by following the instructions in :ref:`switching-between-yt-versions`.
+
 .. _install-script:
 
 All-in-One Installation Script
@@ -60,16 +82,22 @@
 its dependencies will be removed from your system (no scattered files remaining
 throughout your system).
 
+.. _installing-yt:
+
 Running the Install Script
 ^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-To get the installation script, download it from:
+To get the installation script for the ``stable`` branch of the code, 
+download it from:
 
 .. code-block:: bash
 
   wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
 
-.. _installing-yt:
+If you wish to install a different version of yt (see 
+:ref:`above <branches-of-yt>`), replace ``stable`` with the appropriate 
+branch name (e.g. ``yt``, ``yt-2.x``) in the path above to get the correct 
+install script.
 
 By default, the bash install script will install an array of items, but there
 are additional packages that can be downloaded and installed (e.g. SciPy, enzo,
@@ -213,10 +241,31 @@
 ++++++++++++++++++++++++++++++++++++++
 
 To install yt from source, you must make sure you have yt's dependencies
-installed on your system.  These include: a C compiler, ``HDF5``, ``python``,
-``Cython``, ``NumPy``, ``matplotlib``, ``sympy``, and ``h5py``. From here, you
-can use ``pip`` (which comes with ``Python``) to install the latest stable
-version of yt:
+installed on your system. 
+
+If you use a Linux OS, use your distro's package manager to install these yt
+dependencies on your system:
+
+- ``HDF5``
+- ``zeromq``
+- ``sqlite`` 
+- ``mercurial``
+
+Then install the required Python packages with ``pip``:
+
+.. code-block:: bash
+
+  $ pip install -r requirements.txt
+
+If you're using IPython notebooks, you can install its dependencies
+with ``pip`` as well:
+
+.. code-block:: bash
+
+  $ pip install -r optional-requirements.txt
+
+From here, you can use ``pip`` (which comes with ``Python``) to install the latest
+stable version of yt:
 
 .. code-block:: bash
 
@@ -308,8 +357,8 @@
 
 .. _switching-between-yt-versions:
 
-Switching between yt-2.x and yt-3.x
------------------------------------
+Switching versions of yt: yt-2.x, yt-3.x, stable, and dev
+---------------------------------------------------------
 
 With the release of version 3.0 of yt, development of the legacy yt 2.x series
 has been relegated to bugfixes.  That said, we will continue supporting the 2.x
@@ -335,12 +384,8 @@
   hg update <desired-version>
   python setup.py develop
 
-Valid versions to jump to are:
+Valid versions to jump to are described in :ref:`branches-of-yt`).
 
-* ``yt`` -- The latest *dev* changes in yt-3.x (can be unstable)
-* ``stable`` -- The latest stable release of yt-3.x
-* ``yt-2.x`` -- The latest stable release of yt-2.x
-    
 You can check which version of yt you have installed by invoking ``yt version``
 at the command line.  If you encounter problems, see :ref:`update-errors`.
 
@@ -366,11 +411,7 @@
   hg update <desired-version>
   python setup.py install --user --prefix=
 
-Valid versions to jump to are:
-
-* ``yt`` -- The latest *dev* changes in yt-3.x (can be unstable)
-* ``stable`` -- The latest stable release of yt-3.x
-* ``yt-2.x`` -- The latest stable release of yt-2.x
+Valid versions to jump to are described in :ref:`branches-of-yt`).
     
 You can check which version of yt you have installed by invoking ``yt version``
 at the command line.  If you encounter problems, see :ref:`update-errors`.

diff -r dd702572dfef60055d5473052c22f4fe252e3379 -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -227,8 +227,6 @@
    ~yt.frontends.chombo.data_structures.Orion2Hierarchy
    ~yt.frontends.chombo.data_structures.Orion2Dataset
    ~yt.frontends.chombo.io.IOHandlerChomboHDF5
-   ~yt.frontends.chombo.io.IOHandlerChombo2DHDF5
-   ~yt.frontends.chombo.io.IOHandlerChombo1DHDF5
    ~yt.frontends.chombo.io.IOHandlerOrion2HDF5
 
 Enzo

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/ea0f2766e94b/
Changeset:   ea0f2766e94b
Branch:      yt
User:        devinsilvia
Date:        2015-07-29 19:50:20+00:00
Summary:     Merged yt_analysis/yt into yt
Affected #:  3 files

diff -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 -r ea0f2766e94b7af708df90413376fa2ef24b6465 doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -93,6 +93,16 @@
 
 .. yt_cookbook:: hse_field.py
 
+Smoothed Fields
+~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to create a smoothed field,
+corresponding to a user-created derived field, using the
+:meth:`~yt.fields.particle_fields.add_volume_weighted_smoothed_field` method.
+See :ref:`gadget-notebook` for how to work with Gadget data.
+
+.. yt_cookbook:: smoothed_field.py
+
 Using Particle Filters to Calculate Star Formation Rates
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

diff -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 -r ea0f2766e94b7af708df90413376fa2ef24b6465 doc/source/cookbook/smoothed_field.py
--- /dev/null
+++ b/doc/source/cookbook/smoothed_field.py
@@ -0,0 +1,48 @@
+import yt
+
+# Load a Gadget dataset following the demonstration notebook.
+fname = 'GadgetDiskGalaxy/snapshot_200.hdf5'
+
+unit_base = {'UnitLength_in_cm'         : 3.08568e+21,
+             'UnitMass_in_g'            :   1.989e+43,
+             'UnitVelocity_in_cm_per_s' :      100000}
+
+bbox_lim = 1e5  # kpc
+
+bbox = [[-bbox_lim, bbox_lim],
+        [-bbox_lim, bbox_lim],
+        [-bbox_lim, bbox_lim]]
+
+ds = yt.load(fname, unit_base=unit_base, bounding_box=bbox)
+
+# Create a derived field, the metal density.
+def _metal_density(field, data):
+    density = data['PartType0', 'Density']
+    Z = data['PartType0', 'metallicity']
+    return density * Z
+    
+# Add it to the dataset.
+ds.add_field(('PartType0', 'metal_density'), function=_metal_density,
+             units="g/cm**3", particle_type=True)
+
+
+# Add the corresponding smoothed field to the dataset.
+from yt.fields.particle_fields import add_volume_weighted_smoothed_field
+
+add_volume_weighted_smoothed_field('PartType0', 'Coordinates', 'Masses',
+                                   'SmoothingLength', 'Density',
+                                   'metal_density', ds.field_info)
+
+# Define the region where the disk galaxy is. (See the Gadget notebook for
+# details. Here I make the box a little larger than needed to eliminate the
+# margin effect.)
+center = ds.arr([31996, 31474, 28970], "code_length")
+box_size = ds.quan(250, "code_length")
+left_edge = center - box_size/2*1.1
+right_edge = center + box_size/2*1.1
+box = ds.box(left_edge=left_edge, right_edge=right_edge)
+
+# And make a projection plot!
+yt.ProjectionPlot(ds, 'z',
+                  ('deposit', 'PartType0_smoothed_metal_density'),
+                  center=center, width=box_size, data_source=box).save()

diff -r 613c3490f3ffcedae9a778bc1b7de9bf421c9886 -r ea0f2766e94b7af708df90413376fa2ef24b6465 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -426,6 +426,43 @@
         else:
             self.index.save_object(self, name)
 
+    def to_dataframe(self, fields = None):
+        r"""Export a data object to a pandas DataFrame.
+
+        This function will take a data object and construct from it and
+        optionally a list of fields a pandas DataFrame object.  If pandas is
+        not importable, this will raise ImportError.
+
+        Parameters
+        ----------
+        fields : list of strings or tuples, default None
+            If this is supplied, it is the list of fields to be exported into
+            the data frame.  If not supplied, whatever fields presently exist
+            will be used.
+
+        Returns
+        -------
+        df : DataFrame
+            The data contained in the object.
+
+        Examples
+        --------
+
+        >>> dd = ds.all_data()
+        >>> df1 = dd.to_dataframe(["density", "temperature"])
+        >>> dd["velocity_magnitude"]
+        >>> df2 = dd.to_dataframe()
+        """
+        import pandas as pd
+        data = {}
+        if fields is not None:
+            for f in fields:
+                data[f] = self[f]
+        else:
+            data.update(self.field_data)
+        df = pd.DataFrame(data)
+        return df
+
     def to_glue(self, fields, label="yt", data_collection=None):
         """
         Takes specific *fields* in the container and exports them to


https://bitbucket.org/yt_analysis/yt/commits/8f64655a891a/
Changeset:   8f64655a891a
Branch:      yt
User:        devinsilvia
Date:        2015-07-29 20:03:21+00:00
Summary:     Adding a redshift attibrute to the light ray that is written out.

This preps the light ray for being read back in as a LightRay object.

Also, my editor trimmed a bunch of white space.
Affected #:  1 file

diff -r ea0f2766e94b7af708df90413376fa2ef24b6465 -r 8f64655a891ac383286f56c886e4cccc9d9fbf18 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -48,7 +48,7 @@
     synthetic QSO lines of sight.
 
     Light rays can also be made from single datasets.
-    
+
     Once the LightRay object is set up, use LightRay.make_light_ray to
     begin making rays.  Different randomizations can be created with a
     single object by providing different random seeds to make_light_ray.
@@ -58,17 +58,17 @@
     parameter_filename : string
         The path to the simulation parameter file or dataset.
     simulation_type : optional, string
-        The simulation type.  If None, the first argument is assumed to 
+        The simulation type.  If None, the first argument is assumed to
         refer to a single dataset.
         Default: None
     near_redshift : optional, float
-        The near (lowest) redshift for a light ray containing multiple 
-        datasets.  Do not use is making a light ray from a single 
+        The near (lowest) redshift for a light ray containing multiple
+        datasets.  Do not use is making a light ray from a single
         dataset.
         Default: None
     far_redshift : optional, float
-        The far (highest) redshift for a light ray containing multiple 
-        datasets.  Do not use is making a light ray from a single 
+        The far (highest) redshift for a light ray containing multiple
+        datasets.  Do not use is making a light ray from a single
         dataset.
         Default: None
     use_minimum_datasets : optional, bool
@@ -98,11 +98,11 @@
         datasets for time series.
         Default: True.
     find_outputs : optional, bool
-        Whether or not to search for datasets in the current 
+        Whether or not to search for datasets in the current
         directory.
         Default: False.
     load_kwargs : optional, dict
-        Optional dictionary of kwargs to be passed to the "load" 
+        Optional dictionary of kwargs to be passed to the "load"
         function, appropriate for use of certain frontends.  E.g.
         Tipsy using "bounding_box"
         Gadget using "unit_base", etc.
@@ -129,7 +129,7 @@
         self.light_ray_solution = []
         self._data = {}
 
-        # Make a light ray from a single, given dataset.        
+        # Make a light ray from a single, given dataset.
         if simulation_type is None:
             ds = load(parameter_filename, **self.load_kwargs)
             if ds.cosmological_simulation:
@@ -156,7 +156,7 @@
                                            time_data=time_data,
                                            redshift_data=redshift_data)
 
-    def _calculate_light_ray_solution(self, seed=None, 
+    def _calculate_light_ray_solution(self, seed=None,
                                       start_position=None, end_position=None,
                                       trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
@@ -185,12 +185,12 @@
                                 np.sin(phi) * np.sin(theta),
                                 np.cos(theta)])
             self.light_ray_solution[0]['traversal_box_fraction'] = \
-              vector_length(self.light_ray_solution[0]['start'], 
+              vector_length(self.light_ray_solution[0]['start'],
                             self.light_ray_solution[0]['end'])
 
         # the normal way (random start positions and trajectories for each dataset)
         else:
-            
+
             # For box coherence, keep track of effective depth travelled.
             box_fraction_used = 0.0
 
@@ -285,15 +285,15 @@
             Default: None.
         trajectory : optional, list of floats
             Used only if creating a light ray from a single dataset.
-            The (r, theta, phi) direction of the light ray.  Use either 
+            The (r, theta, phi) direction of the light ray.  Use either
             end_position or trajectory, not both.
             Default: None.
         fields : optional, list
             A list of fields for which to get data.
             Default: None.
         setup_function : optional, callable, accepts a ds
-            This function will be called on each dataset that is loaded 
-            to create the light ray.  For, example, this can be used to 
+            This function will be called on each dataset that is loaded
+            to create the light ray.  For, example, this can be used to
             add new derived fields.
             Default: None.
         solution_filename : optional, string
@@ -308,13 +308,13 @@
             each point in the ray.
             Default: True.
         redshift : optional, float
-            Used with light rays made from single datasets to specify a 
-            starting redshift for the ray.  If not used, the starting 
-            redshift will be 0 for a non-cosmological dataset and 
+            Used with light rays made from single datasets to specify a
+            starting redshift for the ray.  If not used, the starting
+            redshift will be 0 for a non-cosmological dataset and
             the dataset redshift for a cosmological dataset.
             Default: None.
         njobs : optional, int
-            The number of parallel jobs over which the segments will 
+            The number of parallel jobs over which the segments will
             be split.  Choose -1 for one processor per segment.
             Default: -1.
 
@@ -322,7 +322,7 @@
         --------
 
         Make a light ray from multiple datasets:
-        
+
         >>> import yt
         >>> from yt.analysis_modules.cosmological_observation.light_ray.api import \
         ...     LightRay
@@ -348,12 +348,12 @@
         ...                       data_filename="my_ray.h5",
         ...                       fields=["temperature", "density"],
         ...                       get_los_velocity=True)
-        
+
         """
 
         # Calculate solution.
-        self._calculate_light_ray_solution(seed=seed, 
-                                           start_position=start_position, 
+        self._calculate_light_ray_solution(seed=seed,
+                                           start_position=start_position,
                                            end_position=end_position,
                                            trajectory=trajectory,
                                            filename=solution_filename)
@@ -400,7 +400,7 @@
                 next_redshift = my_segment["redshift"]
             elif self.near_redshift == self.far_redshift:
                 next_redshift = my_segment["redshift"] - \
-                  self._deltaz_forward(my_segment["redshift"], 
+                  self._deltaz_forward(my_segment["redshift"],
                                        ds.domain_width[0].in_units("Mpccm / h") *
                                        my_segment["traversal_box_fraction"])
             elif my_segment.get("next", None) is None:
@@ -453,7 +453,7 @@
 
             # Get redshift for each lixel.  Assume linear relation between l and z.
             sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
-                (sub_data['dl'] / vector_length(my_segment['start'], 
+                (sub_data['dl'] / vector_length(my_segment['start'],
                                                 my_segment['end']).in_cgs())
             sub_data['redshift'] = my_segment['redshift'] - \
               sub_data['dredshift'].cumsum() + sub_data['dredshift']
@@ -496,6 +496,7 @@
         output = h5py.File(filename, 'w')
         for attr in ["omega_lambda", "omega_matter", "hubble_constant"]:
             output.attrs[attr] = getattr(self.cosmology, attr)
+        output.attrs["redshift"] = self.near_redshift
         output.attrs["data_type"] = "light_ray"
         for field in data.keys():
             # if the field is a tuple, only use the second part of the tuple
@@ -553,7 +554,7 @@
 def vector_length(start, end):
     """
     vector_length(start, end)
-    
+
     Calculate vector length.
     """
 
@@ -580,15 +581,15 @@
     """
     periodic_ray(start, end, left=None, right=None)
 
-    Break up periodic ray into non-periodic segments. 
+    Break up periodic ray into non-periodic segments.
     Accepts start and end points of periodic ray as YTArrays.
     Accepts optional left and right edges of periodic volume as YTArrays.
-    Returns a list of lists of coordinates, where each element of the 
-    top-most list is a 2-list of start coords and end coords of the 
-    non-periodic ray: 
+    Returns a list of lists of coordinates, where each element of the
+    top-most list is a 2-list of start coords and end coords of the
+    non-periodic ray:
 
-    [[[x0start,y0start,z0start], [x0end, y0end, z0end]], 
-     [[x1start,y1start,z1start], [x1end, y1end, z1end]], 
+    [[[x0start,y0start,z0start], [x0end, y0end, z0end]],
+     [[x1start,y1start,z1start], [x1end, y1end, z1end]],
      ...,]
 
     """


https://bitbucket.org/yt_analysis/yt/commits/67d33b762f68/
Changeset:   67d33b762f68
Branch:      yt
User:        devinsilvia
Date:        2015-07-29 20:17:36+00:00
Summary:     First pass at adding a frontend for LightRays.

This should allow one to load HDF5 light rays files
and interact with them as yt objects.
Affected #:  6 files

diff -r 8f64655a891ac383286f56c886e4cccc9d9fbf18 -r 67d33b762f68149bb9373400b64e3dd66ea97069 yt/frontends/light_ray/__init__.py
--- /dev/null
+++ b/yt/frontends/light_ray/__init__.py
@@ -0,0 +1,15 @@
+"""
+API for LightRay frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 8f64655a891ac383286f56c886e4cccc9d9fbf18 -r 67d33b762f68149bb9373400b64e3dd66ea97069 yt/frontends/light_ray/api.py
--- /dev/null
+++ b/yt/frontends/light_ray/api.py
@@ -0,0 +1,24 @@
+"""
+API for LightRay frontend
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+     LightRayDataset
+
+from .io import \
+     IOHandlerLightRayHDF5
+
+from .fields import \
+     LightRayFieldInfo

diff -r 8f64655a891ac383286f56c886e4cccc9d9fbf18 -r 67d33b762f68149bb9373400b64e3dd66ea97069 yt/frontends/light_ray/data_structures.py
--- /dev/null
+++ b/yt/frontends/light_ray/data_structures.py
@@ -0,0 +1,97 @@
+"""
+Data structures for LightRay frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+import stat
+import weakref
+import struct
+import glob
+import time
+import os
+
+from .fields import \
+    LightRayFieldInfo
+
+from yt.utilities.cosmology import Cosmology
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
+from yt.data_objects.static_output import \
+    Dataset, \
+    ParticleFile
+import yt.utilities.fortran_utils as fpu
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity
+
+class LightRayHDF5File(ParticleFile):
+    def __init__(self, ds, io, filename, file_id):
+        with h5py.File(filename, "r") as f:
+            self.header = dict((field, f.attrs[field]) \
+                               for field in f.attrs.keys())
+
+        super(LightRayHDF5File, self).__init__(ds, io, filename, file_id)
+
+class LightRayDataset(Dataset):
+    _index_class = ParticleIndex
+    _file_class = LightRayHDF5File
+    _field_info_class = LightRayFieldInfo
+    _suffix = ".h5"
+
+    def __init__(self, filename, dataset_type="lightray_hdf5",
+                 n_ref = 16, over_refine_factor = 1, units_override=None):
+        self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
+        super(LightRayDataset, self).__init__(filename, dataset_type,
+                                                 units_override=units_override)
+
+    def _parse_parameter_file(self):
+        with h5py.File(self.parameter_filename, "r") as f:
+            hvals = dict((key, f.attrs[key]) for key in f.attrs.keys())
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        prefix = ".".join(self.parameter_filename.rsplit(".", 2)[:-2])
+        self.filename_template = "%s.%%(num)s%s" % (prefix, self._suffix)
+        self.file_count = len(glob.glob(prefix + "*" + self._suffix))
+
+        for attr in ["cosmological_simulation", "current_time", "current_redshift",
+                     "hubble_constant", "omega_matter", "omega_lambda",
+                     "domain_left_edge", "domain_right_edge"]:
+            setattr(self, attr, hvals[attr])
+        self.periodicity = (True, True, True)
+        self.particle_types = ("gas")
+        self.particle_types_raw = ("gas")
+
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
+        self.parameters.update(hvals)
+
+    def _set_code_unit_attributes(self):
+        self.length_unit = self.quan(1.0, "cm")
+        self.mass_unit = self.quan(1.0, "g")
+        self.velocity_unit = self.quan(1.0, "cm / s")
+        self.time_unit = self.quan(1.0, "s")
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if not args[0].endswith(".h5"): return False
+        with h5py.File(args[0], "r") as f:
+            if "data_type" in f.attrs and \
+              f.attrs["data_type"] == "light_ray":
+                return True
+        return False

diff -r 8f64655a891ac383286f56c886e4cccc9d9fbf18 -r 67d33b762f68149bb9373400b64e3dd66ea97069 yt/frontends/light_ray/fields.py
--- /dev/null
+++ b/yt/frontends/light_ray/fields.py
@@ -0,0 +1,48 @@
+"""
+LightRay-specific fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.funcs import mylog
+from yt.fields.field_info_container import \
+    FieldInfoContainer
+from yt.units.yt_array import \
+    YTArray
+
+from yt.utilities.physical_constants import \
+    mh, \
+    mass_sun_cgs
+
+m_units = "g"
+p_units = "cm"
+v_units = "cm / s"
+r_units = "cm"
+
+class LightRayFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+    )
+
+    known_particle_fields = (
+        ("particle_identifier", ("", [], None)),
+        ("particle_position_x", (p_units, [], None)),
+        ("particle_position_y", (p_units, [], None)),
+        ("particle_position_z", (p_units, [], None)),
+        ("particle_velocity_x", (v_units, [], None)),
+        ("particle_velocity_y", (v_units, [], None)),
+        ("particle_velocity_z", (v_units, [], None)),
+        ("particle_mass", (m_units, [], "Virial Mass")),
+        ("virial_radius", (r_units, [], "Virial Radius")),
+)

diff -r 8f64655a891ac383286f56c886e4cccc9d9fbf18 -r 67d33b762f68149bb9373400b64e3dd66ea97069 yt/frontends/light_ray/io.py
--- /dev/null
+++ b/yt/frontends/light_ray/io.py
@@ -0,0 +1,119 @@
+"""
+LightRay data-file handling function
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+
+from yt.utilities.exceptions import *
+from yt.funcs import mylog
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+
+from yt.utilities.lib.geometry_utils import compute_morton
+
+from yt.geometry.oct_container import _ORDER_MAX
+
+class IOHandlerLightRayHDF5(BaseIOHandler):
+    _dataset_type = "lightray_hdf5"
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        raise NotImplementedError
+
+    def _read_particle_coords(self, chunks, ptf):
+        # This will read chunks and yield the results.
+        chunks = list(chunks)
+        data_files = set([])
+        # Only support halo reading for now.
+        assert(len(ptf) == 1)
+        assert(list(ptf.keys())[0] == "halos")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files):
+            pcount = data_file.header['num_halos']
+            with h5py.File(data_file.filename, "r") as f:
+                x = f['particle_position_x'].value.astype("float64")
+                y = f['particle_position_y'].value.astype("float64")
+                z = f['particle_position_z'].value.astype("float64")
+                yield "halos", (x, y, z)
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        # Now we have all the sizes, and we can allocate
+        chunks = list(chunks)
+        data_files = set([])
+        # Only support halo reading for now.
+        assert(len(ptf) == 1)
+        assert(list(ptf.keys())[0] == "halos")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files):
+            pcount = data_file.header['num_halos']
+            with h5py.File(data_file.filename, "r") as f:
+                for ptype, field_list in sorted(ptf.items()):
+                    x = f['particle_position_x'].value.astype("float64")
+                    y = f['particle_position_y'].value.astype("float64")
+                    z = f['particle_position_z'].value.astype("float64")
+                    mask = selector.select_points(x, y, z, 0.0)
+                    del x, y, z
+                    if mask is None: continue
+                    for field in field_list:
+                        data = f[field][mask].astype("float64")
+                        yield (ptype, field), data
+
+    def _initialize_index(self, data_file, regions):
+        pcount = data_file.header["num_halos"]
+        morton = np.empty(pcount, dtype='uint64')
+        mylog.debug("Initializing index % 5i (% 7i particles)",
+                    data_file.file_id, pcount)
+        ind = 0
+        with h5py.File(data_file.filename, "r") as f:
+            if not f.keys(): return None
+            pos = np.empty((pcount, 3), dtype="float64")
+            pos = data_file.ds.arr(pos, "code_length")
+            dx = np.finfo(f['particle_position_x'].dtype).eps
+            dx = 2.0*self.ds.quan(dx, "code_length")
+            pos[:,0] = f["particle_position_x"].value
+            pos[:,1] = f["particle_position_y"].value
+            pos[:,2] = f["particle_position_z"].value
+            # These are 32 bit numbers, so we give a little lee-way.
+            # Otherwise, for big sets of particles, we often will bump into the
+            # domain edges.  This helps alleviate that.
+            np.clip(pos, self.ds.domain_left_edge + dx,
+                         self.ds.domain_right_edge - dx, pos)
+            if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \
+               np.any(pos.max(axis=0) > self.ds.domain_right_edge):
+                raise YTDomainOverflow(pos.min(axis=0),
+                                       pos.max(axis=0),
+                                       self.ds.domain_left_edge,
+                                       self.ds.domain_right_edge)
+            regions.add_data_file(pos, data_file.file_id)
+            morton[ind:ind+pos.shape[0]] = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.ds.domain_left_edge,
+                data_file.ds.domain_right_edge)
+        return morton
+
+    def _count_particles(self, data_file):
+        return {'halos': data_file.header['num_halos']}
+
+    def _identify_fields(self, data_file):
+        with h5py.File(data_file.filename, "r") as f:
+            fields = [("halos", field) for field in f]
+            units = dict([(("halos", field),
+                           f[field].attrs["units"]) for field in f])
+        return fields, units

diff -r 8f64655a891ac383286f56c886e4cccc9d9fbf18 -r 67d33b762f68149bb9373400b64e3dd66ea97069 yt/frontends/light_ray/setup.py
--- /dev/null
+++ b/yt/frontends/light_ray/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('light_ray', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


https://bitbucket.org/yt_analysis/yt/commits/4423a983da4c/
Changeset:   4423a983da4c
Branch:      yt
User:        devinsilvia
Date:        2015-08-03 21:19:58+00:00
Summary:     Merged yt_analysis/yt into yt
Affected #:  6 files

diff -r 67d33b762f68149bb9373400b64e3dd66ea97069 -r 4423a983da4cda6ecdaa8dff5aea86f98b94f2b4 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -631,7 +631,7 @@
         # In-place copies do not drop units.
         assert_true(hasattr(out, 'units'))
         assert_true(not hasattr(ret, 'units'))
-    elif ufunc in (np.absolute, np.conjugate, np.floor, np.ceil,
+    elif ufunc in (np.absolute, np.fabs, np.conjugate, np.floor, np.ceil,
                    np.trunc, np.negative):
         ret = ufunc(a, out=out)
 

diff -r 67d33b762f68149bb9373400b64e3dd66ea97069 -r 4423a983da4cda6ecdaa8dff5aea86f98b94f2b4 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -26,7 +26,7 @@
     greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \
     logical_or, logical_xor, logical_not, maximum, minimum, isreal, iscomplex, \
     isfinite, isinf, isnan, signbit, copysign, nextafter, modf, frexp, \
-    floor, ceil, trunc, fmax, fmin
+    floor, ceil, trunc, fmax, fmin, fabs
 
 from yt.units.unit_object import Unit, UnitParseError
 from yt.units.unit_registry import UnitRegistry
@@ -139,7 +139,7 @@
     log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin,
     arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad,
     rad2deg, logical_not, isreal, iscomplex, isfinite, isinf, isnan,
-    signbit, floor, ceil, trunc, modf, frexp,
+    signbit, floor, ceil, trunc, modf, frexp, fabs
 )
 
 binary_operators = (
@@ -223,6 +223,7 @@
         mod: preserve_units,
         fmod: preserve_units,
         absolute: passthrough_unit,
+        fabs: passthrough_unit,
         rint: return_without_unit,
         sign: return_without_unit,
         conj: passthrough_unit,
@@ -1072,7 +1073,8 @@
                                         unit.base_value, out=out_arr)
                             unit = Unit(registry=unit.registry)
         else:
-            raise RuntimeError("Operation is not defined.")
+            raise RuntimeError("Support for the %s ufunc has not been added "
+                               "to YTArray." % str(context[0]))
         if unit is None:
             out_arr = np.array(out_arr, copy=False)
             return out_arr

diff -r 67d33b762f68149bb9373400b64e3dd66ea97069 -r 4423a983da4cda6ecdaa8dff5aea86f98b94f2b4 yt/utilities/pyparselibconfig/libconfig.py
--- a/yt/utilities/pyparselibconfig/libconfig.py
+++ b/yt/utilities/pyparselibconfig/libconfig.py
@@ -1,9 +1,9 @@
 from __future__ import print_function
 
 #-----------------------------------------------------------------------------
-# Copyright (c) 2013, Samuel Skillman 
+# Copyright (c) 2013, yt Development Team
 #
-# Distributed under the terms of the MIT License.
+# Distributed under the terms of the Modified BSD License.
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------

diff -r 67d33b762f68149bb9373400b64e3dd66ea97069 -r 4423a983da4cda6ecdaa8dff5aea86f98b94f2b4 yt/utilities/quantities.py
--- a/yt/utilities/quantities.py
+++ b/yt/utilities/quantities.py
@@ -1,30 +1,18 @@
 """
-Quantities -- floats with units.
+Some old field names.
 
-Author: Casey W. Stark <caseywstark at gmail.com>
-Affiliation: UC Berkeley
 
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2013 Casey W. Stark.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
 
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
 import numpy as np
 
 from yt.units.yt_array import YTArray

diff -r 67d33b762f68149bb9373400b64e3dd66ea97069 -r 4423a983da4cda6ecdaa8dff5aea86f98b94f2b4 yt/utilities/tests/test_fits_image.py
--- a/yt/utilities/tests/test_fits_image.py
+++ b/yt/utilities/tests/test_fits_image.py
@@ -17,7 +17,7 @@
 import os
 import numpy as np
 import shutil
-from yt.testing import fake_random_ds
+from yt.testing import fake_random_ds, requires_module
 from yt.convenience import load
 from numpy.testing import \
     assert_equal
@@ -29,6 +29,8 @@
 from yt.visualization.volume_rendering.camera import \
     off_axis_projection
 
+
+ at requires_module("astropy")
 def test_fits_image():
     tmpdir = tempfile.mkdtemp()
     curdir = os.getcwd()
@@ -87,7 +89,7 @@
     cut_frb = cut.to_frb((0.5, "unitary"), 128)
 
     fid3 = FITSImageData(cut_frb, fields=["density","temperature"], units="cm")
-    fits_cut = FITSOffAxisSlice(ds, [0.1, 0.2, -0.9], ["density","temperature"], 
+    fits_cut = FITSOffAxisSlice(ds, [0.1, 0.2, -0.9], ["density","temperature"],
                                 image_res=128, center=[0.5, 0.42, 0.6],
                                 width=(0.5,"unitary"))
 
@@ -103,26 +105,26 @@
     assert new_fid3.wcs.wcs.ctype[0] == "RA---TAN"
     assert new_fid3.wcs.wcs.ctype[1] == "DEC--TAN"
 
-    buf = off_axis_projection(ds, ds.domain_center, [0.1, 0.2, -0.9], 
+    buf = off_axis_projection(ds, ds.domain_center, [0.1, 0.2, -0.9],
                               0.5, 128, "density").swapaxes(0, 1)
     fid4 = FITSImageData(buf, fields="density", width=100.0)
-    fits_oap = FITSOffAxisProjection(ds, [0.1, 0.2, -0.9], "density", 
-                                     width=(0.5,"unitary"), image_res=128, 
+    fits_oap = FITSOffAxisProjection(ds, [0.1, 0.2, -0.9], "density",
+                                     width=(0.5,"unitary"), image_res=128,
                                      depth_res=128, depth=(0.5,"unitary"))
 
     yield assert_equal, fid4.get_data("density"), fits_oap.get_data("density")
 
-    cvg = ds.covering_grid(ds.index.max_level, [0.25,0.25,0.25], 
+    cvg = ds.covering_grid(ds.index.max_level, [0.25,0.25,0.25],
                            [32, 32, 32], fields=["density","temperature"])
     fid5 = FITSImageData(cvg, fields=["density","temperature"])
     assert fid5.dimensionality == 3
 
     fid5.update_header("density", "time", 0.1)
     fid5.update_header("all", "units", "cgs")
-    
+
     assert fid5["density"].header["time"] == 0.1
     assert fid5["temperature"].header["units"] == "cgs"
     assert fid5["density"].header["units"] == "cgs"
-    
+
     os.chdir(curdir)
     shutil.rmtree(tmpdir)

diff -r 67d33b762f68149bb9373400b64e3dd66ea97069 -r 4423a983da4cda6ecdaa8dff5aea86f98b94f2b4 yt/visualization/tests/test_filters.py
--- a/yt/visualization/tests/test_filters.py
+++ b/yt/visualization/tests/test_filters.py
@@ -10,28 +10,23 @@
 
 """
 
-from yt.testing import fake_amr_ds
+from yt.testing import fake_amr_ds, requires_module
 
 
-class TestFilters():
+ at requires_module("scipy")
+def test_white_noise_filter():
+    ds = fake_amr_ds(fields=("density",))
+    p = ds.proj("density", "z")
+    frb = p.to_frb((1, 'unitary'), 64)
+    frb.apply_white_noise()
+    frb.apply_white_noise(1e-3)
+    frb["density"]
 
-    @classmethod
-    def setup_class(cls):
-        ds = fake_amr_ds(fields=("density",))
-        p = ds.proj("density", "z")
-        cls.frb = p.to_frb((1, 'unitary'), 64)
 
-    def teardown(self):
-        try:
-            del self.frb["density"]
-        except KeyError:
-            pass
-
-    def test_white_noise_filter(self):
-        self.frb.apply_white_noise()
-        self.frb.apply_white_noise(1e-3)
-        self.frb["density"]
-
-    def test_gauss_beam_filter(self):
-        self.frb.apply_gauss_beam(nbeam=15, sigma=1.0)
-        self.frb["density"]
+ at requires_module("scipy")
+def test_gauss_beam_filter():
+    ds = fake_amr_ds(fields=("density",))
+    p = ds.proj("density", "z")
+    frb = p.to_frb((1, 'unitary'), 64)
+    frb.apply_gauss_beam(nbeam=15, sigma=1.0)
+    frb["density"]


https://bitbucket.org/yt_analysis/yt/commits/05e0bc50d400/
Changeset:   05e0bc50d400
Branch:      yt
User:        devinsilvia
Date:        2015-08-03 21:22:10+00:00
Summary:     Adding light_ray to frontends/api.py and fixing a file count issue when one tries to load a LightRay
Affected #:  2 files

diff -r 4423a983da4cda6ecdaa8dff5aea86f98b94f2b4 -r 05e0bc50d400b216de085578405fcdb5ce686492 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -31,6 +31,7 @@
     'gdf',
     'halo_catalog',
     'http_stream',
+    'light_ray',
     'moab',
     'owls',
     'owls_subfind',

diff -r 4423a983da4cda6ecdaa8dff5aea86f98b94f2b4 -r 05e0bc50d400b216de085578405fcdb5ce686492 yt/frontends/light_ray/data_structures.py
--- a/yt/frontends/light_ray/data_structures.py
+++ b/yt/frontends/light_ray/data_structures.py
@@ -66,8 +66,7 @@
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
         prefix = ".".join(self.parameter_filename.rsplit(".", 2)[:-2])
-        self.filename_template = "%s.%%(num)s%s" % (prefix, self._suffix)
-        self.file_count = len(glob.glob(prefix + "*" + self._suffix))
+        self.file_count = 1
 
         for attr in ["cosmological_simulation", "current_time", "current_redshift",
                      "hubble_constant", "omega_matter", "omega_lambda",


https://bitbucket.org/yt_analysis/yt/commits/49042e27fba5/
Changeset:   49042e27fba5
Branch:      yt
User:        devinsilvia
Date:        2015-08-05 10:56:30+00:00
Summary:     Writing out lightrays with x,y,z and in the "group" format.
Affected #:  1 file

diff -r 05e0bc50d400b216de085578405fcdb5ce686492 -r 49042e27fba50a3070ec9243c52425067c65d3f9 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -131,6 +131,7 @@
 
         # Make a light ray from a single, given dataset.
         if simulation_type is None:
+            self.simulation_type = simulation_type
             ds = load(parameter_filename, **self.load_kwargs)
             if ds.cosmological_simulation:
                 redshift = ds.current_redshift
@@ -364,6 +365,8 @@
         data_fields = fields[:]
         all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
+        all_fields.extend(['x', 'y', 'z'])
+        data_fields.extend(['x', 'y', 'z'])
         if get_los_velocity:
             all_fields.extend(['velocity_x', 'velocity_y',
                                'velocity_z', 'velocity_los'])
@@ -491,13 +494,26 @@
 
         Write light ray data to hdf5 file.
         """
-
         mylog.info("Saving light ray data to %s." % filename)
         output = h5py.File(filename, 'w')
         for attr in ["omega_lambda", "omega_matter", "hubble_constant"]:
             output.attrs[attr] = getattr(self.cosmology, attr)
-        output.attrs["redshift"] = self.near_redshift
+        output.attrs["current_redshift"] = self.near_redshift
+        if self.simulation_type == None:
+            ds = load(parameter_filename, **self.load_kwargs)
+            # Do these need to be in CGS, like how halo_catalog does it?
+            output.attrs["domain_left_edge"] = ds.domain_left_edge
+            output.attrs["domain_right_edge"] = ds.domain_right_edge
+            output.attrs["cosmological_simulation"] = ds.cosmological_simulation
+        else:
+            # Do these need to be in CGS, like how halo_catalog does it?
+            output.attrs["domain_left_edge"] = self.simulation.domain_left_edge
+            output.attrs["domain_right_edge"] = self.simulation.domain_right_edge
+            output.attrs["cosmological_simulation"] = self.simulation.cosmological_simulation
+        output.attrs["current_time"] = self.cosmology.t_from_z(self.near_redshift).in_cgs()
         output.attrs["data_type"] = "light_ray"
+        group = output.create_group("grid")
+        group.attrs["num_elements"] = data['x'].size
         for field in data.keys():
             # if the field is a tuple, only use the second part of the tuple
             # in the hdf5 output (i.e. ('gas', 'density') -> 'density')
@@ -505,8 +521,8 @@
                 fieldname = field[1]
             else:
                 fieldname = field
-            output.create_dataset(fieldname, data=data[field])
-            output[fieldname].attrs["units"] = str(data[field].units)
+            group.create_dataset(fieldname, data=data[field])
+            group[fieldname].attrs["units"] = str(data[field].units)
         output.close()
 
     @parallel_root_only


https://bitbucket.org/yt_analysis/yt/commits/9e92992ffac0/
Changeset:   9e92992ffac0
Branch:      yt
User:        devinsilvia
Date:        2015-08-05 12:33:07+00:00
Summary:     Making domain_left_edge and domain_right_edge unitful for EnzoSimulation class.
Affected #:  1 file

diff -r 49042e27fba50a3070ec9243c52425067c65d3f9 -r 9e92992ffac0b449cdeab0bca847bec2c0ce0098 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -37,7 +37,7 @@
     mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects
-    
+
 class EnzoSimulation(SimulationTimeSeries):
     r"""
     Initialize an Enzo Simulation object.
@@ -98,6 +98,8 @@
             self.length_unit = self.quan(self.box_size, "Mpccm / h",
                                          registry=self.unit_registry)
             self.box_size = self.length_unit
+            self.domain_left_edge = self.domain_left_edge * self.length_unit
+            self.domain_right_edge = self.domain_right_edge * self.length_unit
         else:
             self.time_unit = self.quan(self.parameters["TimeUnits"], "s")
         self.unit_registry.modify("code_time", self.time_unit)
@@ -130,21 +132,21 @@
             datasets for time series.
             Default: True.
         initial_time : tuple of type (float, str)
-            The earliest time for outputs to be included.  This should be 
+            The earliest time for outputs to be included.  This should be
             given as the value and the string representation of the units.
-            For example, (5.0, "Gyr").  If None, the initial time of the 
-            simulation is used.  This can be used in combination with 
+            For example, (5.0, "Gyr").  If None, the initial time of the
+            simulation is used.  This can be used in combination with
             either final_time or final_redshift.
             Default: None.
         final_time : tuple of type (float, str)
-            The latest time for outputs to be included.  This should be 
+            The latest time for outputs to be included.  This should be
             given as the value and the string representation of the units.
-            For example, (13.7, "Gyr"). If None, the final time of the 
-            simulation is used.  This can be used in combination with either 
+            For example, (13.7, "Gyr"). If None, the final time of the
+            simulation is used.  This can be used in combination with either
             initial_time or initial_redshift.
             Default: None.
         times : tuple of type (float array, str)
-            A list of times for which outputs will be found and the units 
+            A list of times for which outputs will be found and the units
             of those values.  For example, ([0, 1, 2, 3], "s").
             Default: None.
         initial_redshift : float
@@ -192,8 +194,8 @@
 
         >>> import yt
         >>> es = yt.simulation("my_simulation.par", "Enzo")
-        
-        >>> es.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr"), 
+
+        >>> es.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr"),
                                redshift_data=False)
 
         >>> es.get_time_series(redshifts=[3, 2, 1, 0])
@@ -301,7 +303,7 @@
         for output in my_outputs:
             if os.path.exists(output['filename']):
                 init_outputs.append(output['filename'])
-            
+
         DatasetSeries.__init__(self, outputs=init_outputs, parallel=parallel,
                                 setup_function=setup_function)
         mylog.info("%d outputs loaded into time series.", len(init_outputs))
@@ -583,11 +585,11 @@
         Check a list of files to see if they are valid datasets.
         """
 
-        only_on_root(mylog.info, "Checking %d potential outputs.", 
+        only_on_root(mylog.info, "Checking %d potential outputs.",
                      len(potential_outputs))
 
         my_outputs = {}
-        for my_storage, output in parallel_objects(potential_outputs, 
+        for my_storage, output in parallel_objects(potential_outputs,
                                                    storage=my_outputs):
             if self.parameters['DataDumpDir'] in output:
                 dir_key = self.parameters['DataDumpDir']
@@ -641,6 +643,6 @@
         self.initial_redshift = initial_redshift
         # time units = 1 / sqrt(4 * pi * G rho_0 * (1 + z_i)**3),
         # rho_0 = (3 * Omega_m * h**2) / (8 * pi * G)
-        self.time_unit = ((1.5 * self.omega_matter * self.hubble_constant**2 * 
+        self.time_unit = ((1.5 * self.omega_matter * self.hubble_constant**2 *
                            (1 + self.initial_redshift)**3)**-0.5).in_units("s")
         self.time_unit.units.registry = self.unit_registry


https://bitbucket.org/yt_analysis/yt/commits/95aa7428b219/
Changeset:   95aa7428b219
Branch:      yt
User:        devinsilvia
Date:        2015-08-05 12:36:56+00:00
Summary:     Outputting the domain edge attributes in CGS units.
Affected #:  1 file

diff -r 9e92992ffac0b449cdeab0bca847bec2c0ce0098 -r 95aa7428b219e17862c04a2992f17fc1f6e026ed yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -502,13 +502,13 @@
         if self.simulation_type == None:
             ds = load(parameter_filename, **self.load_kwargs)
             # Do these need to be in CGS, like how halo_catalog does it?
-            output.attrs["domain_left_edge"] = ds.domain_left_edge
-            output.attrs["domain_right_edge"] = ds.domain_right_edge
+            output.attrs["domain_left_edge"] = ds.domain_left_edge.in_cgs()
+            output.attrs["domain_right_edge"] = ds.domain_right_edge.in_cgs()
             output.attrs["cosmological_simulation"] = ds.cosmological_simulation
         else:
             # Do these need to be in CGS, like how halo_catalog does it?
-            output.attrs["domain_left_edge"] = self.simulation.domain_left_edge
-            output.attrs["domain_right_edge"] = self.simulation.domain_right_edge
+            output.attrs["domain_left_edge"] = self.simulation.domain_left_edge.in_cgs()
+            output.attrs["domain_right_edge"] = self.simulation.domain_right_edge.in_cgs()
             output.attrs["cosmological_simulation"] = self.simulation.cosmological_simulation
         output.attrs["current_time"] = self.cosmology.t_from_z(self.near_redshift).in_cgs()
         output.attrs["data_type"] = "light_ray"


https://bitbucket.org/yt_analysis/yt/commits/84bd22dc3ee4/
Changeset:   84bd22dc3ee4
Branch:      yt
User:        brittonsmith
Date:        2015-08-05 12:39:43+00:00
Summary:     Merging with Devin's work.
Affected #:  11 files

diff -r 983689a4cff861c6846f283f90e8a296662d667b -r 84bd22dc3ee4fca0ec189c5e111c1a64f97f71a5 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -48,7 +48,7 @@
     synthetic QSO lines of sight.
 
     Light rays can also be made from single datasets.
-    
+
     Once the LightRay object is set up, use LightRay.make_light_ray to
     begin making rays.  Different randomizations can be created with a
     single object by providing different random seeds to make_light_ray.
@@ -58,17 +58,17 @@
     parameter_filename : string
         The path to the simulation parameter file or dataset.
     simulation_type : optional, string
-        The simulation type.  If None, the first argument is assumed to 
+        The simulation type.  If None, the first argument is assumed to
         refer to a single dataset.
         Default: None
     near_redshift : optional, float
-        The near (lowest) redshift for a light ray containing multiple 
-        datasets.  Do not use is making a light ray from a single 
+        The near (lowest) redshift for a light ray containing multiple
+        datasets.  Do not use is making a light ray from a single
         dataset.
         Default: None
     far_redshift : optional, float
-        The far (highest) redshift for a light ray containing multiple 
-        datasets.  Do not use is making a light ray from a single 
+        The far (highest) redshift for a light ray containing multiple
+        datasets.  Do not use is making a light ray from a single
         dataset.
         Default: None
     use_minimum_datasets : optional, bool
@@ -98,11 +98,11 @@
         datasets for time series.
         Default: True.
     find_outputs : optional, bool
-        Whether or not to search for datasets in the current 
+        Whether or not to search for datasets in the current
         directory.
         Default: False.
     load_kwargs : optional, dict
-        Optional dictionary of kwargs to be passed to the "load" 
+        Optional dictionary of kwargs to be passed to the "load"
         function, appropriate for use of certain frontends.  E.g.
         Tipsy using "bounding_box"
         Gadget using "unit_base", etc.
@@ -129,8 +129,9 @@
         self.light_ray_solution = []
         self._data = {}
 
-        # Make a light ray from a single, given dataset.        
+        # Make a light ray from a single, given dataset.
         if simulation_type is None:
+            self.simulation_type = simulation_type
             ds = load(parameter_filename, **self.load_kwargs)
             if ds.cosmological_simulation:
                 redshift = ds.current_redshift
@@ -156,7 +157,7 @@
                                            time_data=time_data,
                                            redshift_data=redshift_data)
 
-    def _calculate_light_ray_solution(self, seed=None, 
+    def _calculate_light_ray_solution(self, seed=None,
                                       start_position=None, end_position=None,
                                       trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
@@ -185,12 +186,12 @@
                                 np.sin(phi) * np.sin(theta),
                                 np.cos(theta)])
             self.light_ray_solution[0]['traversal_box_fraction'] = \
-              vector_length(self.light_ray_solution[0]['start'], 
+              vector_length(self.light_ray_solution[0]['start'],
                             self.light_ray_solution[0]['end'])
 
         # the normal way (random start positions and trajectories for each dataset)
         else:
-            
+
             # For box coherence, keep track of effective depth travelled.
             box_fraction_used = 0.0
 
@@ -285,15 +286,15 @@
             Default: None.
         trajectory : optional, list of floats
             Used only if creating a light ray from a single dataset.
-            The (r, theta, phi) direction of the light ray.  Use either 
+            The (r, theta, phi) direction of the light ray.  Use either
             end_position or trajectory, not both.
             Default: None.
         fields : optional, list
             A list of fields for which to get data.
             Default: None.
         setup_function : optional, callable, accepts a ds
-            This function will be called on each dataset that is loaded 
-            to create the light ray.  For, example, this can be used to 
+            This function will be called on each dataset that is loaded
+            to create the light ray.  For, example, this can be used to
             add new derived fields.
             Default: None.
         solution_filename : optional, string
@@ -308,13 +309,13 @@
             each point in the ray.
             Default: True.
         redshift : optional, float
-            Used with light rays made from single datasets to specify a 
-            starting redshift for the ray.  If not used, the starting 
-            redshift will be 0 for a non-cosmological dataset and 
+            Used with light rays made from single datasets to specify a
+            starting redshift for the ray.  If not used, the starting
+            redshift will be 0 for a non-cosmological dataset and
             the dataset redshift for a cosmological dataset.
             Default: None.
         njobs : optional, int
-            The number of parallel jobs over which the segments will 
+            The number of parallel jobs over which the segments will
             be split.  Choose -1 for one processor per segment.
             Default: -1.
 
@@ -322,7 +323,7 @@
         --------
 
         Make a light ray from multiple datasets:
-        
+
         >>> import yt
         >>> from yt.analysis_modules.cosmological_observation.light_ray.api import \
         ...     LightRay
@@ -348,12 +349,12 @@
         ...                       data_filename="my_ray.h5",
         ...                       fields=["temperature", "density"],
         ...                       get_los_velocity=True)
-        
+
         """
 
         # Calculate solution.
-        self._calculate_light_ray_solution(seed=seed, 
-                                           start_position=start_position, 
+        self._calculate_light_ray_solution(seed=seed,
+                                           start_position=start_position,
                                            end_position=end_position,
                                            trajectory=trajectory,
                                            filename=solution_filename)
@@ -364,6 +365,8 @@
         data_fields = fields[:]
         all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
+        all_fields.extend(['x', 'y', 'z'])
+        data_fields.extend(['x', 'y', 'z'])
         if get_los_velocity:
             all_fields.extend(['velocity_x', 'velocity_y',
                                'velocity_z', 'velocity_los'])
@@ -400,7 +403,7 @@
                 next_redshift = my_segment["redshift"]
             elif self.near_redshift == self.far_redshift:
                 next_redshift = my_segment["redshift"] - \
-                  self._deltaz_forward(my_segment["redshift"], 
+                  self._deltaz_forward(my_segment["redshift"],
                                        ds.domain_width[0].in_units("Mpccm / h") *
                                        my_segment["traversal_box_fraction"])
             elif my_segment.get("next", None) is None:
@@ -453,7 +456,7 @@
 
             # Get redshift for each lixel.  Assume linear relation between l and z.
             sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
-                (sub_data['dl'] / vector_length(my_segment['start'], 
+                (sub_data['dl'] / vector_length(my_segment['start'],
                                                 my_segment['end']).in_cgs())
             sub_data['redshift'] = my_segment['redshift'] - \
               sub_data['dredshift'].cumsum() + sub_data['dredshift']
@@ -491,9 +494,26 @@
 
         Write light ray data to hdf5 file.
         """
-
         mylog.info("Saving light ray data to %s." % filename)
         output = h5py.File(filename, 'w')
+        for attr in ["omega_lambda", "omega_matter", "hubble_constant"]:
+            output.attrs[attr] = getattr(self.cosmology, attr)
+        output.attrs["current_redshift"] = self.near_redshift
+        if self.simulation_type == None:
+            ds = load(parameter_filename, **self.load_kwargs)
+            # Do these need to be in CGS, like how halo_catalog does it?
+            output.attrs["domain_left_edge"] = ds.domain_left_edge.in_cgs()
+            output.attrs["domain_right_edge"] = ds.domain_right_edge.in_cgs()
+            output.attrs["cosmological_simulation"] = ds.cosmological_simulation
+        else:
+            # Do these need to be in CGS, like how halo_catalog does it?
+            output.attrs["domain_left_edge"] = self.simulation.domain_left_edge.in_cgs()
+            output.attrs["domain_right_edge"] = self.simulation.domain_right_edge.in_cgs()
+            output.attrs["cosmological_simulation"] = self.simulation.cosmological_simulation
+        output.attrs["current_time"] = self.cosmology.t_from_z(self.near_redshift).in_cgs()
+        output.attrs["data_type"] = "light_ray"
+        group = output.create_group("grid")
+        group.attrs["num_elements"] = data['x'].size
         for field in data.keys():
             # if the field is a tuple, only use the second part of the tuple
             # in the hdf5 output (i.e. ('gas', 'density') -> 'density')
@@ -501,8 +521,8 @@
                 fieldname = field[1]
             else:
                 fieldname = field
-            output.create_dataset(fieldname, data=data[field])
-            output[fieldname].attrs["units"] = str(data[field].units)
+            group.create_dataset(fieldname, data=data[field])
+            group[fieldname].attrs["units"] = str(data[field].units)
         output.close()
 
     @parallel_root_only
@@ -550,7 +570,7 @@
 def vector_length(start, end):
     """
     vector_length(start, end)
-    
+
     Calculate vector length.
     """
 
@@ -577,15 +597,15 @@
     """
     periodic_ray(start, end, left=None, right=None)
 
-    Break up periodic ray into non-periodic segments. 
+    Break up periodic ray into non-periodic segments.
     Accepts start and end points of periodic ray as YTArrays.
     Accepts optional left and right edges of periodic volume as YTArrays.
-    Returns a list of lists of coordinates, where each element of the 
-    top-most list is a 2-list of start coords and end coords of the 
-    non-periodic ray: 
+    Returns a list of lists of coordinates, where each element of the
+    top-most list is a 2-list of start coords and end coords of the
+    non-periodic ray:
 
-    [[[x0start,y0start,z0start], [x0end, y0end, z0end]], 
-     [[x1start,y1start,z1start], [x1end, y1end, z1end]], 
+    [[[x0start,y0start,z0start], [x0end, y0end, z0end]],
+     [[x1start,y1start,z1start], [x1end, y1end, z1end]],
      ...,]
 
     """

diff -r 983689a4cff861c6846f283f90e8a296662d667b -r 84bd22dc3ee4fca0ec189c5e111c1a64f97f71a5 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -31,6 +31,7 @@
     'gdf',
     'halo_catalog',
     'http_stream',
+    'light_ray',
     'moab',
     'owls',
     'owls_subfind',

diff -r 983689a4cff861c6846f283f90e8a296662d667b -r 84bd22dc3ee4fca0ec189c5e111c1a64f97f71a5 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -37,7 +37,7 @@
     mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects
-    
+
 class EnzoSimulation(SimulationTimeSeries):
     r"""
     Initialize an Enzo Simulation object.
@@ -98,6 +98,8 @@
             self.length_unit = self.quan(self.box_size, "Mpccm / h",
                                          registry=self.unit_registry)
             self.box_size = self.length_unit
+            self.domain_left_edge = self.domain_left_edge * self.length_unit
+            self.domain_right_edge = self.domain_right_edge * self.length_unit
         else:
             self.time_unit = self.quan(self.parameters["TimeUnits"], "s")
         self.unit_registry.modify("code_time", self.time_unit)
@@ -130,21 +132,21 @@
             datasets for time series.
             Default: True.
         initial_time : tuple of type (float, str)
-            The earliest time for outputs to be included.  This should be 
+            The earliest time for outputs to be included.  This should be
             given as the value and the string representation of the units.
-            For example, (5.0, "Gyr").  If None, the initial time of the 
-            simulation is used.  This can be used in combination with 
+            For example, (5.0, "Gyr").  If None, the initial time of the
+            simulation is used.  This can be used in combination with
             either final_time or final_redshift.
             Default: None.
         final_time : tuple of type (float, str)
-            The latest time for outputs to be included.  This should be 
+            The latest time for outputs to be included.  This should be
             given as the value and the string representation of the units.
-            For example, (13.7, "Gyr"). If None, the final time of the 
-            simulation is used.  This can be used in combination with either 
+            For example, (13.7, "Gyr"). If None, the final time of the
+            simulation is used.  This can be used in combination with either
             initial_time or initial_redshift.
             Default: None.
         times : tuple of type (float array, str)
-            A list of times for which outputs will be found and the units 
+            A list of times for which outputs will be found and the units
             of those values.  For example, ([0, 1, 2, 3], "s").
             Default: None.
         initial_redshift : float
@@ -192,8 +194,8 @@
 
         >>> import yt
         >>> es = yt.simulation("my_simulation.par", "Enzo")
-        
-        >>> es.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr"), 
+
+        >>> es.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr"),
                                redshift_data=False)
 
         >>> es.get_time_series(redshifts=[3, 2, 1, 0])
@@ -301,7 +303,7 @@
         for output in my_outputs:
             if os.path.exists(output['filename']):
                 init_outputs.append(output['filename'])
-            
+
         DatasetSeries.__init__(self, outputs=init_outputs, parallel=parallel,
                                 setup_function=setup_function)
         mylog.info("%d outputs loaded into time series.", len(init_outputs))
@@ -583,11 +585,11 @@
         Check a list of files to see if they are valid datasets.
         """
 
-        only_on_root(mylog.info, "Checking %d potential outputs.", 
+        only_on_root(mylog.info, "Checking %d potential outputs.",
                      len(potential_outputs))
 
         my_outputs = {}
-        for my_storage, output in parallel_objects(potential_outputs, 
+        for my_storage, output in parallel_objects(potential_outputs,
                                                    storage=my_outputs):
             if self.parameters['DataDumpDir'] in output:
                 dir_key = self.parameters['DataDumpDir']
@@ -641,6 +643,6 @@
         self.initial_redshift = initial_redshift
         # time units = 1 / sqrt(4 * pi * G rho_0 * (1 + z_i)**3),
         # rho_0 = (3 * Omega_m * h**2) / (8 * pi * G)
-        self.time_unit = ((1.5 * self.omega_matter * self.hubble_constant**2 * 
+        self.time_unit = ((1.5 * self.omega_matter * self.hubble_constant**2 *
                            (1 + self.initial_redshift)**3)**-0.5).in_units("s")
         self.time_unit.units.registry = self.unit_registry

diff -r 983689a4cff861c6846f283f90e8a296662d667b -r 84bd22dc3ee4fca0ec189c5e111c1a64f97f71a5 yt/frontends/light_ray/__init__.py
--- /dev/null
+++ b/yt/frontends/light_ray/__init__.py
@@ -0,0 +1,15 @@
+"""
+API for LightRay frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 983689a4cff861c6846f283f90e8a296662d667b -r 84bd22dc3ee4fca0ec189c5e111c1a64f97f71a5 yt/frontends/light_ray/api.py
--- /dev/null
+++ b/yt/frontends/light_ray/api.py
@@ -0,0 +1,24 @@
+"""
+API for LightRay frontend
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+     LightRayDataset
+
+from .io import \
+     IOHandlerLightRayHDF5
+
+from .fields import \
+     LightRayFieldInfo

diff -r 983689a4cff861c6846f283f90e8a296662d667b -r 84bd22dc3ee4fca0ec189c5e111c1a64f97f71a5 yt/frontends/light_ray/data_structures.py
--- /dev/null
+++ b/yt/frontends/light_ray/data_structures.py
@@ -0,0 +1,96 @@
+"""
+Data structures for LightRay frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+import stat
+import weakref
+import struct
+import glob
+import time
+import os
+
+from .fields import \
+    LightRayFieldInfo
+
+from yt.utilities.cosmology import Cosmology
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
+from yt.data_objects.static_output import \
+    Dataset, \
+    ParticleFile
+import yt.utilities.fortran_utils as fpu
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity
+
+class LightRayHDF5File(ParticleFile):
+    def __init__(self, ds, io, filename, file_id):
+        with h5py.File(filename, "r") as f:
+            self.header = dict((field, f.attrs[field]) \
+                               for field in f.attrs.keys())
+
+        super(LightRayHDF5File, self).__init__(ds, io, filename, file_id)
+
+class LightRayDataset(Dataset):
+    _index_class = ParticleIndex
+    _file_class = LightRayHDF5File
+    _field_info_class = LightRayFieldInfo
+    _suffix = ".h5"
+
+    def __init__(self, filename, dataset_type="lightray_hdf5",
+                 n_ref = 16, over_refine_factor = 1, units_override=None):
+        self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
+        super(LightRayDataset, self).__init__(filename, dataset_type,
+                                                 units_override=units_override)
+
+    def _parse_parameter_file(self):
+        with h5py.File(self.parameter_filename, "r") as f:
+            hvals = dict((key, f.attrs[key]) for key in f.attrs.keys())
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        prefix = ".".join(self.parameter_filename.rsplit(".", 2)[:-2])
+        self.file_count = 1
+
+        for attr in ["cosmological_simulation", "current_time", "current_redshift",
+                     "hubble_constant", "omega_matter", "omega_lambda",
+                     "domain_left_edge", "domain_right_edge"]:
+            setattr(self, attr, hvals[attr])
+        self.periodicity = (True, True, True)
+        self.particle_types = ("gas")
+        self.particle_types_raw = ("gas")
+
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
+        self.parameters.update(hvals)
+
+    def _set_code_unit_attributes(self):
+        self.length_unit = self.quan(1.0, "cm")
+        self.mass_unit = self.quan(1.0, "g")
+        self.velocity_unit = self.quan(1.0, "cm / s")
+        self.time_unit = self.quan(1.0, "s")
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if not args[0].endswith(".h5"): return False
+        with h5py.File(args[0], "r") as f:
+            if "data_type" in f.attrs and \
+              f.attrs["data_type"] == "light_ray":
+                return True
+        return False

diff -r 983689a4cff861c6846f283f90e8a296662d667b -r 84bd22dc3ee4fca0ec189c5e111c1a64f97f71a5 yt/frontends/light_ray/fields.py
--- /dev/null
+++ b/yt/frontends/light_ray/fields.py
@@ -0,0 +1,48 @@
+"""
+LightRay-specific fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.funcs import mylog
+from yt.fields.field_info_container import \
+    FieldInfoContainer
+from yt.units.yt_array import \
+    YTArray
+
+from yt.utilities.physical_constants import \
+    mh, \
+    mass_sun_cgs
+
+m_units = "g"
+p_units = "cm"
+v_units = "cm / s"
+r_units = "cm"
+
+class LightRayFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+    )
+
+    known_particle_fields = (
+        ("particle_identifier", ("", [], None)),
+        ("particle_position_x", (p_units, [], None)),
+        ("particle_position_y", (p_units, [], None)),
+        ("particle_position_z", (p_units, [], None)),
+        ("particle_velocity_x", (v_units, [], None)),
+        ("particle_velocity_y", (v_units, [], None)),
+        ("particle_velocity_z", (v_units, [], None)),
+        ("particle_mass", (m_units, [], "Virial Mass")),
+        ("virial_radius", (r_units, [], "Virial Radius")),
+)

diff -r 983689a4cff861c6846f283f90e8a296662d667b -r 84bd22dc3ee4fca0ec189c5e111c1a64f97f71a5 yt/frontends/light_ray/io.py
--- /dev/null
+++ b/yt/frontends/light_ray/io.py
@@ -0,0 +1,119 @@
+"""
+LightRay data-file handling function
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+
+from yt.utilities.exceptions import *
+from yt.funcs import mylog
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+
+from yt.utilities.lib.geometry_utils import compute_morton
+
+from yt.geometry.oct_container import _ORDER_MAX
+
+class IOHandlerLightRayHDF5(BaseIOHandler):
+    _dataset_type = "lightray_hdf5"
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        raise NotImplementedError
+
+    def _read_particle_coords(self, chunks, ptf):
+        # This will read chunks and yield the results.
+        chunks = list(chunks)
+        data_files = set([])
+        # Only support halo reading for now.
+        assert(len(ptf) == 1)
+        assert(list(ptf.keys())[0] == "halos")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files):
+            pcount = data_file.header['num_halos']
+            with h5py.File(data_file.filename, "r") as f:
+                x = f['particle_position_x'].value.astype("float64")
+                y = f['particle_position_y'].value.astype("float64")
+                z = f['particle_position_z'].value.astype("float64")
+                yield "halos", (x, y, z)
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        # Now we have all the sizes, and we can allocate
+        chunks = list(chunks)
+        data_files = set([])
+        # Only support halo reading for now.
+        assert(len(ptf) == 1)
+        assert(list(ptf.keys())[0] == "halos")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files):
+            pcount = data_file.header['num_halos']
+            with h5py.File(data_file.filename, "r") as f:
+                for ptype, field_list in sorted(ptf.items()):
+                    x = f['particle_position_x'].value.astype("float64")
+                    y = f['particle_position_y'].value.astype("float64")
+                    z = f['particle_position_z'].value.astype("float64")
+                    mask = selector.select_points(x, y, z, 0.0)
+                    del x, y, z
+                    if mask is None: continue
+                    for field in field_list:
+                        data = f[field][mask].astype("float64")
+                        yield (ptype, field), data
+
+    def _initialize_index(self, data_file, regions):
+        pcount = data_file.header["num_halos"]
+        morton = np.empty(pcount, dtype='uint64')
+        mylog.debug("Initializing index % 5i (% 7i particles)",
+                    data_file.file_id, pcount)
+        ind = 0
+        with h5py.File(data_file.filename, "r") as f:
+            if not f.keys(): return None
+            pos = np.empty((pcount, 3), dtype="float64")
+            pos = data_file.ds.arr(pos, "code_length")
+            dx = np.finfo(f['particle_position_x'].dtype).eps
+            dx = 2.0*self.ds.quan(dx, "code_length")
+            pos[:,0] = f["particle_position_x"].value
+            pos[:,1] = f["particle_position_y"].value
+            pos[:,2] = f["particle_position_z"].value
+            # These are 32 bit numbers, so we give a little lee-way.
+            # Otherwise, for big sets of particles, we often will bump into the
+            # domain edges.  This helps alleviate that.
+            np.clip(pos, self.ds.domain_left_edge + dx,
+                         self.ds.domain_right_edge - dx, pos)
+            if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \
+               np.any(pos.max(axis=0) > self.ds.domain_right_edge):
+                raise YTDomainOverflow(pos.min(axis=0),
+                                       pos.max(axis=0),
+                                       self.ds.domain_left_edge,
+                                       self.ds.domain_right_edge)
+            regions.add_data_file(pos, data_file.file_id)
+            morton[ind:ind+pos.shape[0]] = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.ds.domain_left_edge,
+                data_file.ds.domain_right_edge)
+        return morton
+
+    def _count_particles(self, data_file):
+        return {'halos': data_file.header['num_halos']}
+
+    def _identify_fields(self, data_file):
+        with h5py.File(data_file.filename, "r") as f:
+            fields = [("halos", field) for field in f]
+            units = dict([(("halos", field),
+                           f[field].attrs["units"]) for field in f])
+        return fields, units

diff -r 983689a4cff861c6846f283f90e8a296662d667b -r 84bd22dc3ee4fca0ec189c5e111c1a64f97f71a5 yt/frontends/light_ray/setup.py
--- /dev/null
+++ b/yt/frontends/light_ray/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('light_ray', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 983689a4cff861c6846f283f90e8a296662d667b -r 84bd22dc3ee4fca0ec189c5e111c1a64f97f71a5 yt/utilities/pyparselibconfig/libconfig.py
--- a/yt/utilities/pyparselibconfig/libconfig.py
+++ b/yt/utilities/pyparselibconfig/libconfig.py
@@ -1,9 +1,9 @@
 from __future__ import print_function
 
 #-----------------------------------------------------------------------------
-# Copyright (c) 2013, Samuel Skillman 
+# Copyright (c) 2013, yt Development Team
 #
-# Distributed under the terms of the MIT License.
+# Distributed under the terms of the Modified BSD License.
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------

diff -r 983689a4cff861c6846f283f90e8a296662d667b -r 84bd22dc3ee4fca0ec189c5e111c1a64f97f71a5 yt/utilities/quantities.py
--- a/yt/utilities/quantities.py
+++ b/yt/utilities/quantities.py
@@ -1,30 +1,18 @@
 """
-Quantities -- floats with units.
+Some old field names.
 
-Author: Casey W. Stark <caseywstark at gmail.com>
-Affiliation: UC Berkeley
 
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2013 Casey W. Stark.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
 
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
 import numpy as np
 
 from yt.units.yt_array import YTArray


https://bitbucket.org/yt_analysis/yt/commits/5cbb0c2739e3/
Changeset:   5cbb0c2739e3
Branch:      yt
User:        devinsilvia
Date:        2015-08-05 12:43:18+00:00
Summary:     Removing the light_ray frontend
Affected #:  6 files

diff -r 84bd22dc3ee4fca0ec189c5e111c1a64f97f71a5 -r 5cbb0c2739e3330e10eb78d8d7b180893fe05432 yt/frontends/light_ray/__init__.py
--- a/yt/frontends/light_ray/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-"""
-API for LightRay frontend.
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r 84bd22dc3ee4fca0ec189c5e111c1a64f97f71a5 -r 5cbb0c2739e3330e10eb78d8d7b180893fe05432 yt/frontends/light_ray/api.py
--- a/yt/frontends/light_ray/api.py
+++ /dev/null
@@ -1,24 +0,0 @@
-"""
-API for LightRay frontend
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2014, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import \
-     LightRayDataset
-
-from .io import \
-     IOHandlerLightRayHDF5
-
-from .fields import \
-     LightRayFieldInfo

diff -r 84bd22dc3ee4fca0ec189c5e111c1a64f97f71a5 -r 5cbb0c2739e3330e10eb78d8d7b180893fe05432 yt/frontends/light_ray/data_structures.py
--- a/yt/frontends/light_ray/data_structures.py
+++ /dev/null
@@ -1,96 +0,0 @@
-"""
-Data structures for LightRay frontend.
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py
-import numpy as np
-import stat
-import weakref
-import struct
-import glob
-import time
-import os
-
-from .fields import \
-    LightRayFieldInfo
-
-from yt.utilities.cosmology import Cosmology
-from yt.geometry.particle_geometry_handler import \
-    ParticleIndex
-from yt.data_objects.static_output import \
-    Dataset, \
-    ParticleFile
-import yt.utilities.fortran_utils as fpu
-from yt.units.yt_array import \
-    YTArray, \
-    YTQuantity
-
-class LightRayHDF5File(ParticleFile):
-    def __init__(self, ds, io, filename, file_id):
-        with h5py.File(filename, "r") as f:
-            self.header = dict((field, f.attrs[field]) \
-                               for field in f.attrs.keys())
-
-        super(LightRayHDF5File, self).__init__(ds, io, filename, file_id)
-
-class LightRayDataset(Dataset):
-    _index_class = ParticleIndex
-    _file_class = LightRayHDF5File
-    _field_info_class = LightRayFieldInfo
-    _suffix = ".h5"
-
-    def __init__(self, filename, dataset_type="lightray_hdf5",
-                 n_ref = 16, over_refine_factor = 1, units_override=None):
-        self.n_ref = n_ref
-        self.over_refine_factor = over_refine_factor
-        super(LightRayDataset, self).__init__(filename, dataset_type,
-                                                 units_override=units_override)
-
-    def _parse_parameter_file(self):
-        with h5py.File(self.parameter_filename, "r") as f:
-            hvals = dict((key, f.attrs[key]) for key in f.attrs.keys())
-        self.dimensionality = 3
-        self.refine_by = 2
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        prefix = ".".join(self.parameter_filename.rsplit(".", 2)[:-2])
-        self.file_count = 1
-
-        for attr in ["cosmological_simulation", "current_time", "current_redshift",
-                     "hubble_constant", "omega_matter", "omega_lambda",
-                     "domain_left_edge", "domain_right_edge"]:
-            setattr(self, attr, hvals[attr])
-        self.periodicity = (True, True, True)
-        self.particle_types = ("gas")
-        self.particle_types_raw = ("gas")
-
-        nz = 1 << self.over_refine_factor
-        self.domain_dimensions = np.ones(3, "int32") * nz
-        self.parameters.update(hvals)
-
-    def _set_code_unit_attributes(self):
-        self.length_unit = self.quan(1.0, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.velocity_unit = self.quan(1.0, "cm / s")
-        self.time_unit = self.quan(1.0, "s")
-
-    @classmethod
-    def _is_valid(self, *args, **kwargs):
-        if not args[0].endswith(".h5"): return False
-        with h5py.File(args[0], "r") as f:
-            if "data_type" in f.attrs and \
-              f.attrs["data_type"] == "light_ray":
-                return True
-        return False

diff -r 84bd22dc3ee4fca0ec189c5e111c1a64f97f71a5 -r 5cbb0c2739e3330e10eb78d8d7b180893fe05432 yt/frontends/light_ray/fields.py
--- a/yt/frontends/light_ray/fields.py
+++ /dev/null
@@ -1,48 +0,0 @@
-"""
-LightRay-specific fields
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.funcs import mylog
-from yt.fields.field_info_container import \
-    FieldInfoContainer
-from yt.units.yt_array import \
-    YTArray
-
-from yt.utilities.physical_constants import \
-    mh, \
-    mass_sun_cgs
-
-m_units = "g"
-p_units = "cm"
-v_units = "cm / s"
-r_units = "cm"
-
-class LightRayFieldInfo(FieldInfoContainer):
-    known_other_fields = (
-    )
-
-    known_particle_fields = (
-        ("particle_identifier", ("", [], None)),
-        ("particle_position_x", (p_units, [], None)),
-        ("particle_position_y", (p_units, [], None)),
-        ("particle_position_z", (p_units, [], None)),
-        ("particle_velocity_x", (v_units, [], None)),
-        ("particle_velocity_y", (v_units, [], None)),
-        ("particle_velocity_z", (v_units, [], None)),
-        ("particle_mass", (m_units, [], "Virial Mass")),
-        ("virial_radius", (r_units, [], "Virial Radius")),
-)

diff -r 84bd22dc3ee4fca0ec189c5e111c1a64f97f71a5 -r 5cbb0c2739e3330e10eb78d8d7b180893fe05432 yt/frontends/light_ray/io.py
--- a/yt/frontends/light_ray/io.py
+++ /dev/null
@@ -1,119 +0,0 @@
-"""
-LightRay data-file handling function
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py
-import numpy as np
-
-from yt.utilities.exceptions import *
-from yt.funcs import mylog
-
-from yt.utilities.io_handler import \
-    BaseIOHandler
-
-from yt.utilities.lib.geometry_utils import compute_morton
-
-from yt.geometry.oct_container import _ORDER_MAX
-
-class IOHandlerLightRayHDF5(BaseIOHandler):
-    _dataset_type = "lightray_hdf5"
-
-    def _read_fluid_selection(self, chunks, selector, fields, size):
-        raise NotImplementedError
-
-    def _read_particle_coords(self, chunks, ptf):
-        # This will read chunks and yield the results.
-        chunks = list(chunks)
-        data_files = set([])
-        # Only support halo reading for now.
-        assert(len(ptf) == 1)
-        assert(list(ptf.keys())[0] == "halos")
-        for chunk in chunks:
-            for obj in chunk.objs:
-                data_files.update(obj.data_files)
-        for data_file in sorted(data_files):
-            pcount = data_file.header['num_halos']
-            with h5py.File(data_file.filename, "r") as f:
-                x = f['particle_position_x'].value.astype("float64")
-                y = f['particle_position_y'].value.astype("float64")
-                z = f['particle_position_z'].value.astype("float64")
-                yield "halos", (x, y, z)
-
-    def _read_particle_fields(self, chunks, ptf, selector):
-        # Now we have all the sizes, and we can allocate
-        chunks = list(chunks)
-        data_files = set([])
-        # Only support halo reading for now.
-        assert(len(ptf) == 1)
-        assert(list(ptf.keys())[0] == "halos")
-        for chunk in chunks:
-            for obj in chunk.objs:
-                data_files.update(obj.data_files)
-        for data_file in sorted(data_files):
-            pcount = data_file.header['num_halos']
-            with h5py.File(data_file.filename, "r") as f:
-                for ptype, field_list in sorted(ptf.items()):
-                    x = f['particle_position_x'].value.astype("float64")
-                    y = f['particle_position_y'].value.astype("float64")
-                    z = f['particle_position_z'].value.astype("float64")
-                    mask = selector.select_points(x, y, z, 0.0)
-                    del x, y, z
-                    if mask is None: continue
-                    for field in field_list:
-                        data = f[field][mask].astype("float64")
-                        yield (ptype, field), data
-
-    def _initialize_index(self, data_file, regions):
-        pcount = data_file.header["num_halos"]
-        morton = np.empty(pcount, dtype='uint64')
-        mylog.debug("Initializing index % 5i (% 7i particles)",
-                    data_file.file_id, pcount)
-        ind = 0
-        with h5py.File(data_file.filename, "r") as f:
-            if not f.keys(): return None
-            pos = np.empty((pcount, 3), dtype="float64")
-            pos = data_file.ds.arr(pos, "code_length")
-            dx = np.finfo(f['particle_position_x'].dtype).eps
-            dx = 2.0*self.ds.quan(dx, "code_length")
-            pos[:,0] = f["particle_position_x"].value
-            pos[:,1] = f["particle_position_y"].value
-            pos[:,2] = f["particle_position_z"].value
-            # These are 32 bit numbers, so we give a little lee-way.
-            # Otherwise, for big sets of particles, we often will bump into the
-            # domain edges.  This helps alleviate that.
-            np.clip(pos, self.ds.domain_left_edge + dx,
-                         self.ds.domain_right_edge - dx, pos)
-            if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \
-               np.any(pos.max(axis=0) > self.ds.domain_right_edge):
-                raise YTDomainOverflow(pos.min(axis=0),
-                                       pos.max(axis=0),
-                                       self.ds.domain_left_edge,
-                                       self.ds.domain_right_edge)
-            regions.add_data_file(pos, data_file.file_id)
-            morton[ind:ind+pos.shape[0]] = compute_morton(
-                pos[:,0], pos[:,1], pos[:,2],
-                data_file.ds.domain_left_edge,
-                data_file.ds.domain_right_edge)
-        return morton
-
-    def _count_particles(self, data_file):
-        return {'halos': data_file.header['num_halos']}
-
-    def _identify_fields(self, data_file):
-        with h5py.File(data_file.filename, "r") as f:
-            fields = [("halos", field) for field in f]
-            units = dict([(("halos", field),
-                           f[field].attrs["units"]) for field in f])
-        return fields, units

diff -r 84bd22dc3ee4fca0ec189c5e111c1a64f97f71a5 -r 5cbb0c2739e3330e10eb78d8d7b180893fe05432 yt/frontends/light_ray/setup.py
--- a/yt/frontends/light_ray/setup.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('light_ray', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config


https://bitbucket.org/yt_analysis/yt/commits/463ab3029030/
Changeset:   463ab3029030
Branch:      yt
User:        devinsilvia
Date:        2015-08-05 12:44:11+00:00
Summary:     Removing reference to light_ray in frontends/api.py
Affected #:  1 file

diff -r 5cbb0c2739e3330e10eb78d8d7b180893fe05432 -r 463ab302903043fde0181e73d7a850dbead9a0bb yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -31,7 +31,6 @@
     'gdf',
     'halo_catalog',
     'http_stream',
-    'light_ray',
     'moab',
     'owls',
     'owls_subfind',


https://bitbucket.org/yt_analysis/yt/commits/3d5d3f1344a8/
Changeset:   3d5d3f1344a8
Branch:      yt
User:        brittonsmith
Date:        2015-08-05 19:44:21+00:00
Summary:     Alias for position fields.
Affected #:  1 file

diff -r 463ab302903043fde0181e73d7a850dbead9a0bb -r 3d5d3f1344a8824e89be12fe437bfd4790076d8c yt/frontends/ytdata/fields.py
--- a/yt/frontends/ytdata/fields.py
+++ b/yt/frontends/ytdata/fields.py
@@ -30,7 +30,7 @@
     )
 
     known_particle_fields = (
-        ("particle_position_x", (p_units, [], None)),
-        ("particle_position_y", (p_units, [], None)),
-        ("particle_position_z", (p_units, [], None)),
+        ("x", (p_units, ["particle_position_x"], None)),
+        ("y", (p_units, ["particle_position_y"], None)),
+        ("z", (p_units, ["particle_position_z"], None)),
     )


https://bitbucket.org/yt_analysis/yt/commits/fe6775b1c4f3/
Changeset:   fe6775b1c4f3
Branch:      yt
User:        brittonsmith
Date:        2015-08-06 18:01:33+00:00
Summary:     Have lightray return an all_data container.
Affected #:  1 file

diff -r 3d5d3f1344a8824e89be12fe437bfd4790076d8c -r fe6775b1c4f36c8397f1f046764edd7cae8b0130 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -480,12 +480,14 @@
         # Flatten the list into a single dictionary containing fields
         # for the whole ray.
         all_data = _flatten_dict_list(all_data, exceptions=['segment_redshift'])
+        self._data = all_data
 
         if data_filename is not None:
             self._write_light_ray(data_filename, all_data)
-
-        self._data = all_data
-        return all_data
+            ray_ds = load(data_filename)
+            return ray_ds.all_data()
+        else:
+            return None
 
     @parallel_root_only
     def _write_light_ray(self, filename, data):


https://bitbucket.org/yt_analysis/yt/commits/3849e7e85271/
Changeset:   3849e7e85271
Branch:      yt
User:        brittonsmith
Date:        2015-08-06 18:09:47+00:00
Summary:     Return a loaded ds instead.
Affected #:  1 file

diff -r fe6775b1c4f36c8397f1f046764edd7cae8b0130 -r 3849e7e8527178da483422cdad9cb65c9598481a yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -485,7 +485,7 @@
         if data_filename is not None:
             self._write_light_ray(data_filename, all_data)
             ray_ds = load(data_filename)
-            return ray_ds.all_data()
+            return ray_ds
         else:
             return None
 


https://bitbucket.org/yt_analysis/yt/commits/141abc98ecba/
Changeset:   141abc98ecba
Branch:      yt
User:        brittonsmith
Date:        2015-08-06 18:14:09+00:00
Summary:     Allow querying of light ray fields from light ray.
Affected #:  1 file

diff -r 3849e7e8527178da483422cdad9cb65c9598481a -r 141abc98ecbaf4afa6de764e7221868ca7047ac4 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -489,6 +489,9 @@
         else:
             return None
 
+    def __getitem__(self, field):
+        return self._data[field]
+
     @parallel_root_only
     def _write_light_ray(self, filename, data):
         """


https://bitbucket.org/yt_analysis/yt/commits/63cbede632a9/
Changeset:   63cbede632a9
Branch:      yt
User:        brittonsmith
Date:        2015-08-06 19:48:06+00:00
Summary:     Field data is now a data container from a ray dataset.
Affected #:  1 file

diff -r 141abc98ecbaf4afa6de764e7221868ca7047ac4 -r 63cbede632a9c54c3d8c4aa491c42009b1346be2 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -20,6 +20,7 @@
 
 from .absorption_line import tau_profile
 
+from yt.convenience import load
 from yt.funcs import get_pbar, mylog
 from yt.units.yt_array import YTArray, YTQuantity
 from yt.utilities.physical_constants import \
@@ -108,7 +109,7 @@
                                     'normalization': normalization,
                                     'index': index})
 
-    def make_spectrum(self, input_file, output_file='spectrum.h5',
+    def make_spectrum(self, input_ds, output_file='spectrum.h5',
                       line_list_file='lines.txt',
                       use_peculiar_velocity=True):
         """
@@ -117,8 +118,8 @@
         Parameters
         ----------
 
-        input_file : string
-           path to input ray data.
+        input_ds : string or dataset
+           path to input ray data or a loaded ray dataset
         output_file : string
            path for output file.  File formats are chosen based on the filename extension.
            ``.h5`` for hdf5, ``.fits`` for fits, and everything else is ASCII.
@@ -128,7 +129,6 @@
 
         input_fields = ['dl', 'redshift', 'temperature']
         field_units = {"dl": "cm", "redshift": "", "temperature": "K"}
-        field_data = {}
         if use_peculiar_velocity:
             input_fields.append('velocity_los')
             field_units["velocity_los"] = "cm/s"
@@ -137,10 +137,9 @@
                 input_fields.append(feature['field_name'])
                 field_units[feature["field_name"]] = "cm**-3"
 
-        input = h5py.File(input_file, 'r')
-        for field in input_fields:
-            field_data[field] = YTArray(input[field].value, field_units[field])
-        input.close()
+        if isinstance(input_ds, str):
+            input_ds = load(input_ds)
+        field_data = input_ds.all_data()
 
         self.tau_field = np.zeros(self.lambda_bins.size)
         self.spectrum_line_list = []


https://bitbucket.org/yt_analysis/yt/commits/ea47e41ef79a/
Changeset:   ea47e41ef79a
Branch:      yt
User:        chummels
Date:        2015-08-18 16:16:46+00:00
Summary:     Preventing a crash when you set line_list_file=None in make_spectrum()
Affected #:  1 file

diff -r 63cbede632a9c54c3d8c4aa491c42009b1346be2 -r ea47e41ef79ab452f2df71e981a480b73e19ce0f yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -283,6 +283,8 @@
         """
         Write out list of spectral lines.
         """
+        if filename is None:
+            return
         mylog.info("Writing spectral line list: %s." % filename)
         self.spectrum_line_list.sort(key=lambda obj: obj['wavelength'])
         f = open(filename, 'w')


https://bitbucket.org/yt_analysis/yt/commits/461dea962415/
Changeset:   461dea962415
Branch:      yt
User:        brittonsmith
Date:        2015-08-21 15:52:48+00:00
Summary:     Merging.
Affected #:  16 files

diff -r a13764691cbffd5903c7d3972a18bfd04c89bc7e -r 461dea962415178dff99500c9abf99b2f0c71035 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -20,6 +20,7 @@
 
 from .absorption_line import tau_profile
 
+from yt.convenience import load
 from yt.funcs import get_pbar, mylog
 from yt.units.yt_array import YTArray, YTQuantity
 from yt.utilities.physical_constants import \
@@ -112,7 +113,7 @@
                                     'normalization': normalization,
                                     'index': index})
 
-    def make_spectrum(self, input_file, output_file="spectrum.h5",
+    def make_spectrum(self, input_ds, output_file="spectrum.h5",
                       line_list_file="lines.txt",
                       use_peculiar_velocity=True, njobs="auto"):
         """
@@ -121,8 +122,8 @@
         Parameters
         ----------
 
-        input_file : string
-           path to input ray data.
+        input_ds : string or dataset
+           path to input ray data or a loaded ray dataset
         output_file : optional, string
            path for output file.  File formats are chosen based on the 
            filename extension.  ``.h5`` for hdf5, ``.fits`` for fits, 
@@ -156,7 +157,6 @@
 
         input_fields = ['dl', 'redshift', 'temperature']
         field_units = {"dl": "cm", "redshift": "", "temperature": "K"}
-        field_data = {}
         if use_peculiar_velocity:
             input_fields.append('velocity_los')
             field_units["velocity_los"] = "cm/s"
@@ -165,10 +165,9 @@
                 input_fields.append(feature['field_name'])
                 field_units[feature["field_name"]] = "cm**-3"
 
-        input = h5py.File(input_file, 'r')
-        for field in input_fields:
-            field_data[field] = YTArray(input[field].value, field_units[field])
-        input.close()
+        if isinstance(input_ds, str):
+            input_ds = load(input_ds)
+        field_data = input_ds.all_data()
 
         self.tau_field = np.zeros(self.lambda_bins.size)
         self.spectrum_line_list = []
@@ -326,6 +325,8 @@
         """
         Write out list of spectral lines.
         """
+        if filename is None:
+            return
         mylog.info("Writing spectral line list: %s." % filename)
         self.spectrum_line_list.sort(key=lambda obj: obj['wavelength'])
         f = open(filename, 'w')

diff -r a13764691cbffd5903c7d3972a18bfd04c89bc7e -r 461dea962415178dff99500c9abf99b2f0c71035 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -47,7 +47,7 @@
     synthetic QSO lines of sight.
 
     Light rays can also be made from single datasets.
-    
+
     Once the LightRay object is set up, use LightRay.make_light_ray to
     begin making rays.  Different randomizations can be created with a
     single object by providing different random seeds to make_light_ray.
@@ -57,17 +57,17 @@
     parameter_filename : string
         The path to the simulation parameter file or dataset.
     simulation_type : optional, string
-        The simulation type.  If None, the first argument is assumed to 
+        The simulation type.  If None, the first argument is assumed to
         refer to a single dataset.
         Default: None
     near_redshift : optional, float
-        The near (lowest) redshift for a light ray containing multiple 
-        datasets.  Do not use is making a light ray from a single 
+        The near (lowest) redshift for a light ray containing multiple
+        datasets.  Do not use is making a light ray from a single
         dataset.
         Default: None
     far_redshift : optional, float
-        The far (highest) redshift for a light ray containing multiple 
-        datasets.  Do not use is making a light ray from a single 
+        The far (highest) redshift for a light ray containing multiple
+        datasets.  Do not use is making a light ray from a single
         dataset.
         Default: None
     use_minimum_datasets : optional, bool
@@ -97,11 +97,11 @@
         datasets for time series.
         Default: True.
     find_outputs : optional, bool
-        Whether or not to search for datasets in the current 
+        Whether or not to search for datasets in the current
         directory.
         Default: False.
     load_kwargs : optional, dict
-        Optional dictionary of kwargs to be passed to the "load" 
+        Optional dictionary of kwargs to be passed to the "load"
         function, appropriate for use of certain frontends.  E.g.
         Tipsy using "bounding_box"
         Gadget using "unit_base", etc.
@@ -128,8 +128,9 @@
         self.light_ray_solution = []
         self._data = {}
 
-        # Make a light ray from a single, given dataset.        
+        # Make a light ray from a single, given dataset.
         if simulation_type is None:
+            self.simulation_type = simulation_type
             ds = load(parameter_filename, **self.load_kwargs)
             if ds.cosmological_simulation:
                 redshift = ds.current_redshift
@@ -155,7 +156,7 @@
                                            time_data=time_data,
                                            redshift_data=redshift_data)
 
-    def _calculate_light_ray_solution(self, seed=None, 
+    def _calculate_light_ray_solution(self, seed=None,
                                       start_position=None, end_position=None,
                                       trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
@@ -184,12 +185,12 @@
                                 np.sin(phi) * np.sin(theta),
                                 np.cos(theta)])
             self.light_ray_solution[0]['traversal_box_fraction'] = \
-              vector_length(self.light_ray_solution[0]['start'], 
+              vector_length(self.light_ray_solution[0]['start'],
                             self.light_ray_solution[0]['end'])
 
         # the normal way (random start positions and trajectories for each dataset)
         else:
-            
+
             # For box coherence, keep track of effective depth travelled.
             box_fraction_used = 0.0
 
@@ -284,15 +285,15 @@
             Default: None.
         trajectory : optional, list of floats
             Used only if creating a light ray from a single dataset.
-            The (r, theta, phi) direction of the light ray.  Use either 
+            The (r, theta, phi) direction of the light ray.  Use either
             end_position or trajectory, not both.
             Default: None.
         fields : optional, list
             A list of fields for which to get data.
             Default: None.
         setup_function : optional, callable, accepts a ds
-            This function will be called on each dataset that is loaded 
-            to create the light ray.  For, example, this can be used to 
+            This function will be called on each dataset that is loaded
+            to create the light ray.  For, example, this can be used to
             add new derived fields.
             Default: None.
         solution_filename : optional, string
@@ -307,13 +308,13 @@
             each point in the ray.
             Default: True.
         redshift : optional, float
-            Used with light rays made from single datasets to specify a 
-            starting redshift for the ray.  If not used, the starting 
-            redshift will be 0 for a non-cosmological dataset and 
+            Used with light rays made from single datasets to specify a
+            starting redshift for the ray.  If not used, the starting
+            redshift will be 0 for a non-cosmological dataset and
             the dataset redshift for a cosmological dataset.
             Default: None.
         njobs : optional, int
-            The number of parallel jobs over which the segments will 
+            The number of parallel jobs over which the segments will
             be split.  Choose -1 for one processor per segment.
             Default: -1.
 
@@ -321,7 +322,7 @@
         --------
 
         Make a light ray from multiple datasets:
-        
+
         >>> import yt
         >>> from yt.analysis_modules.cosmological_observation.light_ray.api import \
         ...     LightRay
@@ -347,12 +348,12 @@
         ...                       data_filename="my_ray.h5",
         ...                       fields=["temperature", "density"],
         ...                       get_los_velocity=True)
-        
+
         """
 
         # Calculate solution.
-        self._calculate_light_ray_solution(seed=seed, 
-                                           start_position=start_position, 
+        self._calculate_light_ray_solution(seed=seed,
+                                           start_position=start_position,
                                            end_position=end_position,
                                            trajectory=trajectory,
                                            filename=solution_filename)
@@ -363,6 +364,8 @@
         data_fields = fields[:]
         all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
+        all_fields.extend(['x', 'y', 'z'])
+        data_fields.extend(['x', 'y', 'z'])
         if get_los_velocity:
             all_fields.extend(['velocity_x', 'velocity_y',
                                'velocity_z', 'velocity_los'])
@@ -399,7 +402,7 @@
                 next_redshift = my_segment["redshift"]
             elif self.near_redshift == self.far_redshift:
                 next_redshift = my_segment["redshift"] - \
-                  self._deltaz_forward(my_segment["redshift"], 
+                  self._deltaz_forward(my_segment["redshift"],
                                        ds.domain_width[0].in_units("Mpccm / h") *
                                        my_segment["traversal_box_fraction"])
             elif my_segment.get("next", None) is None:
@@ -452,7 +455,7 @@
 
             # Get redshift for each lixel.  Assume linear relation between l and z.
             sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
-                (sub_data['dl'] / vector_length(my_segment['start'], 
+                (sub_data['dl'] / vector_length(my_segment['start'],
                                                 my_segment['end']).in_cgs())
             sub_data['redshift'] = my_segment['redshift'] - \
               sub_data['dredshift'].cumsum() + sub_data['dredshift']
@@ -476,12 +479,17 @@
         # Flatten the list into a single dictionary containing fields
         # for the whole ray.
         all_data = _flatten_dict_list(all_data, exceptions=['segment_redshift'])
+        self._data = all_data
 
         if data_filename is not None:
             self._write_light_ray(data_filename, all_data)
+            ray_ds = load(data_filename)
+            return ray_ds
+        else:
+            return None
 
-        self._data = all_data
-        return all_data
+    def __getitem__(self, field):
+        return self._data[field]
 
     @parallel_root_only
     def _write_light_ray(self, filename, data):
@@ -490,9 +498,26 @@
 
         Write light ray data to hdf5 file.
         """
-
         mylog.info("Saving light ray data to %s." % filename)
         output = h5py.File(filename, 'w')
+        for attr in ["omega_lambda", "omega_matter", "hubble_constant"]:
+            output.attrs[attr] = getattr(self.cosmology, attr)
+        output.attrs["current_redshift"] = self.near_redshift
+        if self.simulation_type == None:
+            ds = load(parameter_filename, **self.load_kwargs)
+            # Do these need to be in CGS, like how halo_catalog does it?
+            output.attrs["domain_left_edge"] = ds.domain_left_edge.in_cgs()
+            output.attrs["domain_right_edge"] = ds.domain_right_edge.in_cgs()
+            output.attrs["cosmological_simulation"] = ds.cosmological_simulation
+        else:
+            # Do these need to be in CGS, like how halo_catalog does it?
+            output.attrs["domain_left_edge"] = self.simulation.domain_left_edge.in_cgs()
+            output.attrs["domain_right_edge"] = self.simulation.domain_right_edge.in_cgs()
+            output.attrs["cosmological_simulation"] = self.simulation.cosmological_simulation
+        output.attrs["current_time"] = self.cosmology.t_from_z(self.near_redshift).in_cgs()
+        output.attrs["data_type"] = "light_ray"
+        group = output.create_group("grid")
+        group.attrs["num_elements"] = data['x'].size
         for field in data.keys():
             # if the field is a tuple, only use the second part of the tuple
             # in the hdf5 output (i.e. ('gas', 'density') -> 'density')
@@ -500,8 +525,8 @@
                 fieldname = field[1]
             else:
                 fieldname = field
-            output.create_dataset(fieldname, data=data[field])
-            output[fieldname].attrs["units"] = str(data[field].units)
+            group.create_dataset(fieldname, data=data[field])
+            group[fieldname].attrs["units"] = str(data[field].units)
         output.close()
 
     @parallel_root_only
@@ -549,7 +574,7 @@
 def vector_length(start, end):
     """
     vector_length(start, end)
-    
+
     Calculate vector length.
     """
 
@@ -576,15 +601,15 @@
     """
     periodic_ray(start, end, left=None, right=None)
 
-    Break up periodic ray into non-periodic segments. 
+    Break up periodic ray into non-periodic segments.
     Accepts start and end points of periodic ray as YTArrays.
     Accepts optional left and right edges of periodic volume as YTArrays.
-    Returns a list of lists of coordinates, where each element of the 
-    top-most list is a 2-list of start coords and end coords of the 
-    non-periodic ray: 
+    Returns a list of lists of coordinates, where each element of the
+    top-most list is a 2-list of start coords and end coords of the
+    non-periodic ray:
 
-    [[[x0start,y0start,z0start], [x0end, y0end, z0end]], 
-     [[x1start,y1start,z1start], [x1end, y1end, z1end]], 
+    [[[x0start,y0start,z0start], [x0end, y0end, z0end]],
+     [[x1start,y1start,z1start], [x1end, y1end, z1end]],
      ...,]
 
     """

diff -r a13764691cbffd5903c7d3972a18bfd04c89bc7e -r 461dea962415178dff99500c9abf99b2f0c71035 yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -21,6 +21,9 @@
     periodic_distance
 from yt.data_objects.profiles import \
     create_profile
+from yt.frontends.ytdata.utilities import \
+    _hdf5_yt_array, \
+    _yt_array_hdf5
 from yt.units.yt_array import \
     YTArray
 from yt.utilities.exceptions import \
@@ -584,21 +587,3 @@
     del sphere
     
 add_callback("iterative_center_of_mass", iterative_center_of_mass)
-
-def _yt_array_hdf5(fh, fieldname, data):
-    dataset = fh.create_dataset(fieldname, data=data)
-    units = ""
-    if isinstance(data, YTArray):
-        units = str(data.units)
-    dataset.attrs["units"] = units
-
-def _hdf5_yt_array(fh, fieldname, ds=None):
-    if ds is None:
-        new_arr = YTArray
-    else:
-        new_arr = ds.arr
-    units = ""
-    if "units" in fh[fieldname].attrs:
-        units = fh[fieldname].attrs["units"]
-    if units == "dimensionless": units = ""
-    return new_arr(fh[fieldname].value, units)

diff -r a13764691cbffd5903c7d3972a18bfd04c89bc7e -r 461dea962415178dff99500c9abf99b2f0c71035 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -13,7 +13,9 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import h5py
 import itertools
+import os
 import types
 import uuid
 from yt.extern.six import string_types
@@ -25,9 +27,12 @@
 import shelve
 from contextlib import contextmanager
 
+from yt.funcs import get_output_filename
 from yt.funcs import *
 
 from yt.data_objects.particle_io import particle_handler_registry
+from yt.frontends.ytdata.utilities import \
+    to_yt_dataset
 from yt.units.unit_object import UnitParseError
 from yt.utilities.exceptions import \
     YTUnitConversionError, \
@@ -463,6 +468,90 @@
         df = pd.DataFrame(data)
         return df
 
+    def to_dataset(self, filename=None, fields=None):
+        r"""Export a data object to a reloadable yt dataset.
+
+        This function will take a data object and output a dataset 
+        containing either the fields presently existing or fields 
+        given in a list.  The resulting dataset can be reloaded as 
+        a yt dataset.
+
+        Parameters
+        ----------
+        filename : str
+            The name of the file to be written.  If None, the name 
+            will be a combination of the original dataset and the type 
+            of data container.
+        fields : list of strings or tuples, default None
+            If this is supplied, it is the list of fields to be exported into
+            the data frame.  If not supplied, whatever fields presently exist
+            will be used.
+
+        Returns
+        -------
+        filename : str
+            The name of the file that has been created.
+
+        Examples
+        --------
+
+        >>> dd = ds.all_data()
+        >>> fn1 = dd.to_dataset(["density", "temperature"])
+        >>> ds1 = yt.load(fn1)
+        >>> dd["velocity_magnitude"]
+        >>> fn2 = dd.to_dataset()
+        >>> ds2 = yt.load(fn2)
+        """
+
+        keyword = "%s_%s" % (str(self.ds), self._type_name)
+        filename = get_output_filename(filename, keyword, ".h5")
+
+        data = {}
+        if fields is not None:
+            for f in self._determine_fields(fields):
+                data[f] = self[f]
+        else:
+            data.update(self.field_data)
+        data_fields = data.keys()
+
+        need_grid_fields = False
+        need_particle_fields = False
+        ptypes = []
+        ftypes = {}
+        for field in data_fields:
+            if self.ds.field_info[field].particle_type:
+                if field[0] not in ptypes:
+                    ptypes.append(field[0])
+                ftypes[field] = field[0]
+                need_particle_fields = True
+            else:
+                ftypes[field] = "grid"
+                need_grid_fields = True
+
+        for ax in "xyz":
+            if need_particle_fields:
+                for ptype in ptypes:
+                    p_field = (ptype, "particle_position_%s" % ax)
+                    if p_field in self.ds.field_info and p_field not in data:
+                        data_fields.append(field)
+                        ftypes[p_field] = p_field[0]
+                        data[p_field] = self[p_field]
+            if need_grid_fields:
+                g_field = ("index", ax)
+                if g_field in self.ds.field_info and g_field not in data:
+                    data_fields.append(g_field)
+                    ftypes[g_field] = "grid"
+                    data[g_field] = self[g_field]
+                    
+        extra_attrs = dict([(arg, getattr(self, arg, None))
+                            for arg in self._con_args])
+        extra_attrs["data_type"] = "yt_data_container"
+        extra_attrs["container_type"] = self._type_name
+        to_yt_dataset(self.ds, filename, data, field_types=ftypes,
+                      extra_attrs=extra_attrs)
+
+        return filename
+        
     def to_glue(self, fields, label="yt", data_collection=None):
         """
         Takes specific *fields* in the container and exports them to

diff -r a13764691cbffd5903c7d3972a18bfd04c89bc7e -r 461dea962415178dff99500c9abf99b2f0c71035 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -39,6 +39,7 @@
     'sdf',
     'stream',
     'tipsy',
+    'ytdata',
 ]
 
 class _frontend_container:

diff -r a13764691cbffd5903c7d3972a18bfd04c89bc7e -r 461dea962415178dff99500c9abf99b2f0c71035 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -37,7 +37,7 @@
     mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects
-    
+
 class EnzoSimulation(SimulationTimeSeries):
     r"""
     Initialize an Enzo Simulation object.
@@ -98,6 +98,8 @@
             self.length_unit = self.quan(self.box_size, "Mpccm / h",
                                          registry=self.unit_registry)
             self.box_size = self.length_unit
+            self.domain_left_edge = self.domain_left_edge * self.length_unit
+            self.domain_right_edge = self.domain_right_edge * self.length_unit
         else:
             self.time_unit = self.quan(self.parameters["TimeUnits"], "s")
         self.unit_registry.modify("code_time", self.time_unit)
@@ -130,21 +132,21 @@
             datasets for time series.
             Default: True.
         initial_time : tuple of type (float, str)
-            The earliest time for outputs to be included.  This should be 
+            The earliest time for outputs to be included.  This should be
             given as the value and the string representation of the units.
-            For example, (5.0, "Gyr").  If None, the initial time of the 
-            simulation is used.  This can be used in combination with 
+            For example, (5.0, "Gyr").  If None, the initial time of the
+            simulation is used.  This can be used in combination with
             either final_time or final_redshift.
             Default: None.
         final_time : tuple of type (float, str)
-            The latest time for outputs to be included.  This should be 
+            The latest time for outputs to be included.  This should be
             given as the value and the string representation of the units.
-            For example, (13.7, "Gyr"). If None, the final time of the 
-            simulation is used.  This can be used in combination with either 
+            For example, (13.7, "Gyr"). If None, the final time of the
+            simulation is used.  This can be used in combination with either
             initial_time or initial_redshift.
             Default: None.
         times : tuple of type (float array, str)
-            A list of times for which outputs will be found and the units 
+            A list of times for which outputs will be found and the units
             of those values.  For example, ([0, 1, 2, 3], "s").
             Default: None.
         initial_redshift : float
@@ -192,8 +194,8 @@
 
         >>> import yt
         >>> es = yt.simulation("my_simulation.par", "Enzo")
-        
-        >>> es.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr"), 
+
+        >>> es.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr"),
                                redshift_data=False)
 
         >>> es.get_time_series(redshifts=[3, 2, 1, 0])
@@ -301,7 +303,7 @@
         for output in my_outputs:
             if os.path.exists(output['filename']):
                 init_outputs.append(output['filename'])
-            
+
         DatasetSeries.__init__(self, outputs=init_outputs, parallel=parallel,
                                 setup_function=setup_function)
         mylog.info("%d outputs loaded into time series.", len(init_outputs))
@@ -583,11 +585,11 @@
         Check a list of files to see if they are valid datasets.
         """
 
-        only_on_root(mylog.info, "Checking %d potential outputs.", 
+        only_on_root(mylog.info, "Checking %d potential outputs.",
                      len(potential_outputs))
 
         my_outputs = {}
-        for my_storage, output in parallel_objects(potential_outputs, 
+        for my_storage, output in parallel_objects(potential_outputs,
                                                    storage=my_outputs):
             if self.parameters['DataDumpDir'] in output:
                 dir_key = self.parameters['DataDumpDir']
@@ -641,6 +643,6 @@
         self.initial_redshift = initial_redshift
         # time units = 1 / sqrt(4 * pi * G rho_0 * (1 + z_i)**3),
         # rho_0 = (3 * Omega_m * h**2) / (8 * pi * G)
-        self.time_unit = ((1.5 * self.omega_matter * self.hubble_constant**2 * 
+        self.time_unit = ((1.5 * self.omega_matter * self.hubble_constant**2 *
                            (1 + self.initial_redshift)**3)**-0.5).in_units("s")
         self.time_unit.units.registry = self.unit_registry

diff -r a13764691cbffd5903c7d3972a18bfd04c89bc7e -r 461dea962415178dff99500c9abf99b2f0c71035 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -30,6 +30,7 @@
     config.add_subpackage("sph")
     config.add_subpackage("stream")
     config.add_subpackage("tipsy")
+    config.add_subpackage("ytdata")
     config.add_subpackage("art/tests")
     config.add_subpackage("artio/tests")
     config.add_subpackage("athena/tests")

diff -r a13764691cbffd5903c7d3972a18bfd04c89bc7e -r 461dea962415178dff99500c9abf99b2f0c71035 yt/frontends/ytdata/__init__.py
--- /dev/null
+++ b/yt/frontends/ytdata/__init__.py
@@ -0,0 +1,15 @@
+"""
+API for ytData frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r a13764691cbffd5903c7d3972a18bfd04c89bc7e -r 461dea962415178dff99500c9abf99b2f0c71035 yt/frontends/ytdata/api.py
--- /dev/null
+++ b/yt/frontends/ytdata/api.py
@@ -0,0 +1,27 @@
+"""
+API for ytData frontend
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+    YTDataDataset
+
+from .io import \
+    IOHandlerYTDataHDF5
+
+from .fields import \
+    YTDataFieldInfo
+
+from .utilities import \
+    to_yt_dataset

diff -r a13764691cbffd5903c7d3972a18bfd04c89bc7e -r 461dea962415178dff99500c9abf99b2f0c71035 yt/frontends/ytdata/data_structures.py
--- /dev/null
+++ b/yt/frontends/ytdata/data_structures.py
@@ -0,0 +1,99 @@
+"""
+Data structures for YTData frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+import stat
+import weakref
+import struct
+import glob
+import time
+import os
+
+from .fields import \
+    YTDataFieldInfo
+
+from yt.utilities.cosmology import Cosmology
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
+from yt.data_objects.static_output import \
+    Dataset, \
+    ParticleFile
+import yt.utilities.fortran_utils as fpu
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity
+    
+class YTDataHDF5File(ParticleFile):
+    def __init__(self, ds, io, filename, file_id):
+        with h5py.File(filename, "r") as f:
+            self.header = dict((field, f.attrs[field]) \
+                               for field in f.attrs.keys())
+
+        super(YTDataHDF5File, self).__init__(ds, io, filename, file_id)
+    
+class YTDataDataset(Dataset):
+    _index_class = ParticleIndex
+    _file_class = YTDataHDF5File
+    _field_info_class = YTDataFieldInfo
+    _suffix = ".h5"
+
+    def __init__(self, filename, dataset_type="ytdata_hdf5",
+                 n_ref = 16, over_refine_factor = 1, units_override=None):
+        self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
+        super(YTDataDataset, self).__init__(filename, dataset_type,
+                                                 units_override=units_override)
+
+    def _parse_parameter_file(self):
+        with h5py.File(self.parameter_filename, "r") as f:
+            hvals = dict((key, f.attrs[key]) for key in f.attrs.keys())
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        prefix = ".".join(self.parameter_filename.rsplit(".", 2)[:-2])
+        self.filename_template = self.parameter_filename
+        self.file_count = 1
+
+        for attr in ["cosmological_simulation", "current_time", "current_redshift",
+                     "hubble_constant", "omega_matter", "omega_lambda",
+                     "domain_left_edge", "domain_right_edge"]:
+            setattr(self, attr, hvals[attr])
+        self.periodicity = (True, True, True)
+        self.particle_types = ("grid")
+        self.particle_types_raw = ("grid")
+
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
+        self.parameters.update(hvals)
+
+    def _set_code_unit_attributes(self):
+        self.length_unit = self.quan(1.0, "cm")
+        self.mass_unit = self.quan(1.0, "g")
+        self.velocity_unit = self.quan(1.0, "cm / s")
+        self.time_unit = self.quan(1.0, "s")
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if not args[0].endswith(".h5"): return False
+        with h5py.File(args[0], "r") as f:
+            if "data_type" in f.attrs and \
+              f.attrs["data_type"] in ["light_ray",
+                                       "yt_array_data",
+                                       "yt_data_container"]:
+                return True
+        return False

diff -r a13764691cbffd5903c7d3972a18bfd04c89bc7e -r 461dea962415178dff99500c9abf99b2f0c71035 yt/frontends/ytdata/fields.py
--- /dev/null
+++ b/yt/frontends/ytdata/fields.py
@@ -0,0 +1,36 @@
+"""
+YTData-specific fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.funcs import mylog
+from yt.fields.field_info_container import \
+    FieldInfoContainer
+
+m_units = "g"
+p_units = "cm"
+v_units = "cm / s"
+r_units = "cm"
+
+class YTDataFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+    )
+
+    known_particle_fields = (
+        ("x", (p_units, ["particle_position_x"], None)),
+        ("y", (p_units, ["particle_position_y"], None)),
+        ("z", (p_units, ["particle_position_z"], None)),
+    )

diff -r a13764691cbffd5903c7d3972a18bfd04c89bc7e -r 461dea962415178dff99500c9abf99b2f0c71035 yt/frontends/ytdata/io.py
--- /dev/null
+++ b/yt/frontends/ytdata/io.py
@@ -0,0 +1,126 @@
+"""
+YTData data-file handling function
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+
+from yt.utilities.exceptions import *
+from yt.funcs import \
+    mylog
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+
+from yt.utilities.lib.geometry_utils import \
+    compute_morton
+
+from yt.geometry.oct_container import \
+    _ORDER_MAX
+
+class IOHandlerYTDataHDF5(BaseIOHandler):
+    _dataset_type = "ytdata_hdf5"
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        raise NotImplementedError
+
+    def _read_particle_coords(self, chunks, ptf):
+        # This will read chunks and yield the results.
+        chunks = list(chunks)
+        data_files = set([])
+        # Only support halo reading for now.
+        assert(len(ptf) == 1)
+        assert(list(ptf.keys())[0] == "grid")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files):
+            all_count = self._count_particles(data_file)
+            pcount = all_count["grid"]
+            with h5py.File(data_file.filename, "r") as f:
+                x = f["grid"]['x'].value.astype("float64")
+                y = f["grid"]['y'].value.astype("float64")
+                z = f["grid"]['z'].value.astype("float64")
+                yield "grid", (x, y, z)
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        # Now we have all the sizes, and we can allocate
+        chunks = list(chunks)
+        data_files = set([])
+        # Only support halo reading for now.
+        assert(len(ptf) == 1)
+        assert(list(ptf.keys())[0] == "grid")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files):
+            all_count = self._count_particles(data_file)
+            pcount = all_count["grid"]
+            with h5py.File(data_file.filename, "r") as f:
+                for ptype, field_list in sorted(ptf.items()):
+                    x = f["grid"]['x'].value.astype("float64")
+                    y = f["grid"]['y'].value.astype("float64")
+                    z = f["grid"]['z'].value.astype("float64")
+                    mask = selector.select_points(x, y, z, 0.0)
+                    del x, y, z
+                    if mask is None: continue
+                    for field in field_list:
+                        data = f["grid"][field][mask].astype("float64")
+                        yield (ptype, field), data
+
+    def _initialize_index(self, data_file, regions):
+        all_count = self._count_particles(data_file)
+        pcount = all_count["grid"]
+        morton = np.empty(pcount, dtype='uint64')
+        mylog.debug("Initializing index % 5i (% 7i particles)",
+                    data_file.file_id, pcount)
+        ind = 0
+        with h5py.File(data_file.filename, "r") as f:
+            if not f["grid"].keys(): return None
+            pos = np.empty((pcount, 3), dtype="float64")
+            pos = data_file.ds.arr(pos, "code_length")
+            dx = np.finfo(f["grid"]['x'].dtype).eps
+            dx = 2.0*self.ds.quan(dx, "code_length")
+            pos[:,0] = f["grid"]["x"].value
+            pos[:,1] = f["grid"]["y"].value
+            pos[:,2] = f["grid"]["z"].value
+            # These are 32 bit numbers, so we give a little lee-way.
+            # Otherwise, for big sets of particles, we often will bump into the
+            # domain edges.  This helps alleviate that.
+            np.clip(pos, self.ds.domain_left_edge + dx,
+                         self.ds.domain_right_edge - dx, pos)
+            if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \
+               np.any(pos.max(axis=0) > self.ds.domain_right_edge):
+                raise YTDomainOverflow(pos.min(axis=0),
+                                       pos.max(axis=0),
+                                       self.ds.domain_left_edge,
+                                       self.ds.domain_right_edge)
+            regions.add_data_file(pos, data_file.file_id)
+            morton[ind:ind+pos.shape[0]] = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.ds.domain_left_edge,
+                data_file.ds.domain_right_edge)
+        return morton
+
+    def _count_particles(self, data_file):
+        with h5py.File(data_file.filename, "r") as f:
+            return {"grid": f["grid"].attrs["num_elements"]}
+
+    def _identify_fields(self, data_file):
+        with h5py.File(data_file.filename, "r") as f:
+            fields = [("grid", str(field)) for field in f["grid"]]
+            units = dict([(("grid", str(field)), 
+                           f["grid"][field].attrs["units"]) for field in f["grid"]])
+        return fields, units

diff -r a13764691cbffd5903c7d3972a18bfd04c89bc7e -r 461dea962415178dff99500c9abf99b2f0c71035 yt/frontends/ytdata/setup.py
--- /dev/null
+++ b/yt/frontends/ytdata/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('ytdata', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r a13764691cbffd5903c7d3972a18bfd04c89bc7e -r 461dea962415178dff99500c9abf99b2f0c71035 yt/frontends/ytdata/utilities.py
--- /dev/null
+++ b/yt/frontends/ytdata/utilities.py
@@ -0,0 +1,179 @@
+"""
+Utility functions for ytdata frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+
+from yt.funcs import \
+    mylog
+from yt.units.yt_array import \
+    YTArray
+
+def to_yt_dataset(ds, filename, data, field_types=None,
+                  extra_attrs=None):
+    r"""Export a set of field arrays to a reloadable yt dataset.
+
+    This function can be used to create a yt loadable dataset from a 
+    set of arrays.  The field arrays can either be associated with a 
+    loaded dataset or, if not, a dictionary of dataset attributes can
+    be provided that will be used as metadata for the new dataset.  The 
+    resulting dataset can be reloaded as a yt dataset.
+
+    Parameters
+    ----------
+    ds : dataset
+        The dataset associated with the fields.  
+    filename : str
+        The name of the file to be written.
+    data : dict
+        A dictionary of field arrays to be saved.
+    extra_attrs: dict
+        A dictionary of additional attributes to be saved.
+
+    Returns
+    -------
+    filename : str
+        The name of the file that has been created.
+
+    Examples
+    --------
+
+    >>> import yt
+    >>> from yt.frontends.ytdata.api import to_yt_dataset
+    >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+    >>> sphere = ds.sphere([0.5]*3, (10, "Mpc")
+    >>> sphere_density = sphere["density"]
+    >>> region = ds.box([0.]*3, [0.25]*3)
+    >>> region_density = region["density"]
+    >>> data = {}
+    >>> data["sphere_density"] = sphere_density
+    >>> data["region_density"] = region_density
+    >>> to_yt_dataset(ds, "density_data.h5", data)
+
+    >>> import yt
+    >>> from yt.frontends.ytdata.api import to_yt_dataset
+    >>> from yt.units.yt_array import YTArray, YTQuantity
+    >>> data = {"density": YTArray(np.random.random(10), "g/cm**3"),
+    ...         "temperature": YTArray(np.random.random(10), "K")}
+    >>> ds_data = {"domain_left_edge": YTArray(np.zeros(3), "cm"),
+    ...            "domain_right_edge": YTArray(np.ones(3), "cm"),
+    ...            "current_time": YTQuantity(10, "Myr")}
+    >>> to_yt_dataset(ds_data, "random_data.h5", data)
+    
+    """
+
+    mylog.info("Saving field data to yt dataset: %s." % filename)
+
+    if extra_attrs is None: extra_attrs = {}
+    base_attrs  = ["domain_left_edge", "domain_right_edge",
+                   "current_redshift", "current_time",
+                   "domain_dimensions", "periodicity",
+                   "cosmological_simulation", "omega_lambda",
+                   "omega_matter", "hubble_constant"]
+
+    fh = h5py.File(filename, "w")
+    for attr in base_attrs:
+        if isinstance(ds, dict):
+            my_val = ds.get(attr, None)
+        else:
+            my_val = getattr(ds, attr, None)
+        if my_val is None:
+            mylog.warn("Skipping %s attribute, this may be just fine." % attr)
+            continue
+        if hasattr(my_val, "units"):
+            my_val = my_val.in_cgs()
+        fh.attrs[attr] = my_val
+    for attr in extra_attrs:
+        my_val = extra_attrs[attr]
+        if hasattr(my_val, "units"):
+            my_val = my_val.in_cgs()
+        fh.attrs[attr] = my_val
+    if "data_type" not in extra_attrs:
+        fh.attrs["data_type"] = "yt_array_data"
+    for field in data:
+        if field_types is None:
+            field_type = "data"
+        else:
+            field_type = field_types[field]
+        if field_type not in fh:
+            fh.create_group(field_type)
+        
+        # for now, let's avoid writing "code" units
+        if hasattr(field, "units"):
+            data[field].convert_to_cgs()
+        dataset = _yt_array_hdf5(fh[field_type], field, data[field])
+    fh.close()
+
+def _hdf5_yt_array(fh, field, ds=None):
+    r"""Load an hdf5 dataset as a YTArray.
+
+    Reads in a dataset from an open hdf5 file or group and uses the
+    "units" attribute, if it exists, to apply units.
+    
+    Parameters
+    ----------
+    fh : an open hdf5 file or hdf5 group
+        The hdf5 file or group in which the dataset exists.
+    field : str
+        The name of the field to be loaded.
+    ds : yt Dataset
+        If not None, the unit_registry of the dataset
+        is used to apply units.
+
+    Returns
+    -------
+    A YTArray of the requested field.
+    
+    """
+    
+    if ds is None:
+        new_arr = YTArray
+    else:
+        new_arr = ds.arr
+    units = ""
+    if "units" in fh[field].attrs:
+        units = fh[field].attrs["units"]
+    if units == "dimensionless": units = ""
+    return new_arr(fh[field].value, units)
+
+def _yt_array_hdf5(fh, field, data):
+    r"""Save a YTArray to an open hdf5 file or group.
+
+    Save a YTArray to an open hdf5 file or group, and save the 
+    units to a "units" attribute.
+    
+    Parameters
+    ----------
+    fh : an open hdf5 file or hdf5 group
+        The hdf5 file or group to which the data will be written.
+    field : str
+        The name of the field to be saved.
+    ddata : YTArray
+        The data array to be saved.
+
+    Returns
+    -------
+    dataset : hdf5 dataset
+        The created hdf5 dataset.
+    
+    """
+
+    dataset = fh.create_dataset(str(field), data=data)
+    units = ""
+    if isinstance(data, YTArray):
+        units = str(data.units)
+    dataset.attrs["units"] = units
+    return dataset

diff -r a13764691cbffd5903c7d3972a18bfd04c89bc7e -r 461dea962415178dff99500c9abf99b2f0c71035 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -14,6 +14,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import errno
 from yt.extern.six import string_types
 import time, types, signal, inspect, traceback, sys, pdb, os, re
 import contextlib
@@ -672,6 +673,57 @@
     suffix = os.path.splitext(name)[1]
     return suffix if suffix in ['.png', '.eps', '.ps', '.pdf'] else ''
 
+def get_output_filename(name, keyword, suffix):
+    r"""Return an appropriate filename for output.
+
+    With a name provided by the user, this will decide how to 
+    appropriately name the output file by the following rules:
+    1. if name is None, the filename will be the keyword plus 
+       the suffix.
+    2. if name ends with "/", assume name is a directory and 
+       the file will be named name/(keyword+suffix).  If the
+       directory does not exist, first try to create it and
+       raise an exception if an error occurs.
+    3. if name does not end in the suffix, add the suffix.
+    
+    Parameters
+    ----------
+    name : str
+        A filename given by the user.
+    keyword : str
+        A default filename prefix if name is None.
+    suffix : str
+        Suffix that must appear at end of the filename.
+        This will be added if not present.
+
+    Examples
+    --------
+
+    >>> print get_output_filename(None, "Projection_x", ".png")
+    Projection_x.png
+    >>> print get_output_filename("my_file", "Projection_x", ".png")
+    my_file.png
+    >>> print get_output_filename("my_file/", "Projection_x", ".png")
+    my_file/Projection_x.png
+    
+    """
+    if name is None:
+        name = keyword
+    name = os.path.expanduser(name)
+    if name[-1] == os.sep and not os.path.isdir(name):
+        try:
+            os.mkdir(name)
+        except OSError as e:
+            if e.errno == errno.EEXIST:
+                pass
+            else:
+                raise
+    if os.path.isdir(name):
+        name = os.path.join(name, keyword)
+    if not name.endswith(suffix):
+        name += suffix
+    return name
+
 def ensure_dir_exists(path):
     r"""Create all directories in path recursively in a parallel safe manner"""
     my_dir = os.path.dirname(path)

diff -r a13764691cbffd5903c7d3972a18bfd04c89bc7e -r 461dea962415178dff99500c9abf99b2f0c71035 yt/utilities/hierarchy_inspection.py
--- a/yt/utilities/hierarchy_inspection.py
+++ b/yt/utilities/hierarchy_inspection.py
@@ -27,6 +27,8 @@
     # lowest class
     if len(candidates) == 1:
         return candidates
+    elif len(candidates) == 0:
+        return []
 
     mros = [inspect.getmro(c) for c in candidates]
 


https://bitbucket.org/yt_analysis/yt/commits/7796cc7bf496/
Changeset:   7796cc7bf496
Branch:      yt
User:        brittonsmith
Date:        2015-08-22 17:25:30+00:00
Summary:     Stripping field types from dataset names.
Affected #:  1 file

diff -r 461dea962415178dff99500c9abf99b2f0c71035 -r 7796cc7bf496a999311d33b738d4caf8c5b1a93e yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -110,11 +110,14 @@
             field_type = field_types[field]
         if field_type not in fh:
             fh.create_group(field_type)
-        
         # for now, let's avoid writing "code" units
-        if hasattr(field, "units"):
+        if hasattr(data[field], "units"):
             data[field].convert_to_cgs()
-        dataset = _yt_array_hdf5(fh[field_type], field, data[field])
+        if isinstance(field, tuple):
+            field_name = field[1]
+        else:
+            field_name = field
+        dataset = _yt_array_hdf5(fh[field_type], field_name, data[field])
     fh.close()
 
 def _hdf5_yt_array(fh, field, ds=None):


https://bitbucket.org/yt_analysis/yt/commits/2803b19a9179/
Changeset:   2803b19a9179
Branch:      yt
User:        brittonsmith
Date:        2015-08-22 17:35:18+00:00
Summary:     Writing out num_elements attribute.
Affected #:  1 file

diff -r 7796cc7bf496a999311d33b738d4caf8c5b1a93e -r 2803b19a917966a55d0ce2630034d0e4b0c016c1 yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -96,6 +96,7 @@
         if hasattr(my_val, "units"):
             my_val = my_val.in_cgs()
         fh.attrs[attr] = my_val
+
     for attr in extra_attrs:
         my_val = extra_attrs[attr]
         if hasattr(my_val, "units"):
@@ -103,6 +104,7 @@
         fh.attrs[attr] = my_val
     if "data_type" not in extra_attrs:
         fh.attrs["data_type"] = "yt_array_data"
+
     for field in data:
         if field_types is None:
             field_type = "data"
@@ -118,6 +120,13 @@
         else:
             field_name = field
         dataset = _yt_array_hdf5(fh[field_type], field_name, data[field])
+        if "num_elements" in fh[field_type].attrs:
+            if fh[field_type].attrs["num_elements"] != data[field].size:
+                mylog.warn(
+                    "Datasets in %s group have different sizes." % fh[field_type] +
+                    "  This will probably not work right.")
+        else:
+            fh[field_type].attrs["num_elements"] = data[field].size
     fh.close()
 
 def _hdf5_yt_array(fh, field, ds=None):


https://bitbucket.org/yt_analysis/yt/commits/8d68343b1ac9/
Changeset:   8d68343b1ac9
Branch:      yt
User:        brittonsmith
Date:        2015-08-22 18:41:30+00:00
Summary:     Add dx fields.
Affected #:  1 file

diff -r 2803b19a917966a55d0ce2630034d0e4b0c016c1 -r 8d68343b1ac93f378635c3ff1b40c2935c309d6a yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -542,6 +542,11 @@
                     data_fields.append(g_field)
                     ftypes[g_field] = "grid"
                     data[g_field] = self[g_field]
+                g_field = ("index", "d" + ax)
+                if g_field in self.ds.field_info and g_field not in data:
+                    data_fields.append(g_field)
+                    ftypes[g_field] = "grid"
+                    data[g_field] = self[g_field]
                     
         extra_attrs = dict([(arg, getattr(self, arg, None))
                             for arg in self._con_args])


https://bitbucket.org/yt_analysis/yt/commits/b92687d7a840/
Changeset:   b92687d7a840
Branch:      yt
User:        brittonsmith
Date:        2015-08-22 22:19:47+00:00
Summary:     Append _particles to all particle field types so we can still have an all.
Affected #:  1 file

diff -r 8d68343b1ac93f378635c3ff1b40c2935c309d6a -r b92687d7a840eb4b7dd7c83a5a4fcfb996e1d2ab yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -522,7 +522,7 @@
             if self.ds.field_info[field].particle_type:
                 if field[0] not in ptypes:
                     ptypes.append(field[0])
-                ftypes[field] = field[0]
+                ftypes[field] = "%s_particles" % field[0]
                 need_particle_fields = True
             else:
                 ftypes[field] = "grid"
@@ -534,7 +534,7 @@
                     p_field = (ptype, "particle_position_%s" % ax)
                     if p_field in self.ds.field_info and p_field not in data:
                         data_fields.append(field)
-                        ftypes[p_field] = p_field[0]
+                        ftypes[p_field] = "%s_particles" % p_field[0]
                         data[p_field] = self[p_field]
             if need_grid_fields:
                 g_field = ("index", ax)
@@ -547,7 +547,7 @@
                     data_fields.append(g_field)
                     ftypes[g_field] = "grid"
                     data[g_field] = self[g_field]
-                    
+
         extra_attrs = dict([(arg, getattr(self, arg, None))
                             for arg in self._con_args])
         extra_attrs["data_type"] = "yt_data_container"


https://bitbucket.org/yt_analysis/yt/commits/3df7eacb2905/
Changeset:   3df7eacb2905
Branch:      yt
User:        brittonsmith
Date:        2015-08-23 12:05:24+00:00
Summary:     Generalizing index initialize and particle count to multiple field types.
Affected #:  1 file

diff -r b92687d7a840eb4b7dd7c83a5a4fcfb996e1d2ab -r 3df7eacb290577aa81e41732939a29631ca0321b yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -82,41 +82,49 @@
 
     def _initialize_index(self, data_file, regions):
         all_count = self._count_particles(data_file)
-        pcount = all_count["grid"]
+        pcount = sum(all_count.values())
         morton = np.empty(pcount, dtype='uint64')
         mylog.debug("Initializing index % 5i (% 7i particles)",
                     data_file.file_id, pcount)
         ind = 0
         with h5py.File(data_file.filename, "r") as f:
-            if not f["grid"].keys(): return None
-            pos = np.empty((pcount, 3), dtype="float64")
-            pos = data_file.ds.arr(pos, "code_length")
-            dx = np.finfo(f["grid"]['x'].dtype).eps
-            dx = 2.0*self.ds.quan(dx, "code_length")
-            pos[:,0] = f["grid"]["x"].value
-            pos[:,1] = f["grid"]["y"].value
-            pos[:,2] = f["grid"]["z"].value
-            # These are 32 bit numbers, so we give a little lee-way.
-            # Otherwise, for big sets of particles, we often will bump into the
-            # domain edges.  This helps alleviate that.
-            np.clip(pos, self.ds.domain_left_edge + dx,
-                         self.ds.domain_right_edge - dx, pos)
-            if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \
-               np.any(pos.max(axis=0) > self.ds.domain_right_edge):
-                raise YTDomainOverflow(pos.min(axis=0),
-                                       pos.max(axis=0),
-                                       self.ds.domain_left_edge,
-                                       self.ds.domain_right_edge)
-            regions.add_data_file(pos, data_file.file_id)
-            morton[ind:ind+pos.shape[0]] = compute_morton(
-                pos[:,0], pos[:,1], pos[:,2],
-                data_file.ds.domain_left_edge,
-                data_file.ds.domain_right_edge)
+            for ptype in all_count:
+                if not ptype in f or all_count[ptype] == 0: continue
+                pos = np.empty((all_count[ptype], 3), dtype="float64")
+                pos = data_file.ds.arr(pos, "code_length")
+                if ptype == "grid":
+                    pos_name = ""
+                    dx = f["grid"]["dx"].value.min()
+                else:
+                    pos_name = "particle_position_"
+                    dx = 2. * np.finfo(f[ptype][pos_name + "x"].dtype).eps
+                dx = self.ds.quan(dx, "code_length")
+                pos[:,0] = f[ptype][pos_name + "x"].value
+                pos[:,1] = f[ptype][pos_name + "y"].value
+                pos[:,2] = f[ptype][pos_name + "z"].value
+                # These are 32 bit numbers, so we give a little lee-way.
+                # Otherwise, for big sets of particles, we often will bump into the
+                # domain edges.  This helps alleviate that.
+                np.clip(pos, self.ds.domain_left_edge + dx,
+                             self.ds.domain_right_edge - dx, pos)
+                if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \
+                   np.any(pos.max(axis=0) > self.ds.domain_right_edge):
+                    raise YTDomainOverflow(pos.min(axis=0),
+                                           pos.max(axis=0),
+                                           self.ds.domain_left_edge,
+                                           self.ds.domain_right_edge)
+                regions.add_data_file(pos, data_file.file_id)
+                morton[ind:ind+pos.shape[0]] = compute_morton(
+                    pos[:,0], pos[:,1], pos[:,2],
+                    data_file.ds.domain_left_edge,
+                    data_file.ds.domain_right_edge)
+                ind += pos.shape[0]
         return morton
 
     def _count_particles(self, data_file):
         with h5py.File(data_file.filename, "r") as f:
-            return {"grid": f["grid"].attrs["num_elements"]}
+            return dict([(group, f[group].attrs["num_elements"])
+                         for group in f])
 
     def _identify_fields(self, data_file):
         with h5py.File(data_file.filename, "r") as f:


https://bitbucket.org/yt_analysis/yt/commits/3498ae645b00/
Changeset:   3498ae645b00
Branch:      yt
User:        brittonsmith
Date:        2015-08-23 12:11:35+00:00
Summary:     Fixing up field list.
Affected #:  1 file

diff -r 3df7eacb290577aa81e41732939a29631ca0321b -r 3498ae645b00ea1e0610ef2ef352b3126cdebea8 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -127,8 +127,12 @@
                          for group in f])
 
     def _identify_fields(self, data_file):
+        fields = []
+        units = {}
         with h5py.File(data_file.filename, "r") as f:
-            fields = [("grid", str(field)) for field in f["grid"]]
-            units = dict([(("grid", str(field)), 
-                           f["grid"][field].attrs["units"]) for field in f["grid"]])
+            for ptype in f:
+                fields.extend([(ptype, str(field)) for field in f[ptype]])
+                units.update(dict([((ptype, str(field)), 
+                                    f[ptype][field].attrs["units"])
+                                   for field in f[ptype]]))
         return fields, units


https://bitbucket.org/yt_analysis/yt/commits/d568566b8e3f/
Changeset:   d568566b8e3f
Branch:      yt
User:        brittonsmith
Date:        2015-08-23 12:19:14+00:00
Summary:     Fixing up _read_particle_coords.
Affected #:  1 file

diff -r 3498ae645b00ea1e0610ef2ef352b3126cdebea8 -r d568566b8e3f645d0c747c3dab79bd1f729a2bfd yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -40,20 +40,22 @@
         # This will read chunks and yield the results.
         chunks = list(chunks)
         data_files = set([])
-        # Only support halo reading for now.
-        assert(len(ptf) == 1)
-        assert(list(ptf.keys())[0] == "grid")
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
         for data_file in sorted(data_files):
-            all_count = self._count_particles(data_file)
-            pcount = all_count["grid"]
             with h5py.File(data_file.filename, "r") as f:
-                x = f["grid"]['x'].value.astype("float64")
-                y = f["grid"]['y'].value.astype("float64")
-                z = f["grid"]['z'].value.astype("float64")
-                yield "grid", (x, y, z)
+                for ptype, field_list in sorted(ptf.items()):
+                    pcount = data_file.total_particles[ptype]
+                    if pcount == 0: continue
+                    if ptype == "grid":
+                        pos_name = ""
+                    else:
+                        pos_name = "particle_position_"
+                    x = f[ptype][pos_name + "x"].value.astype("float64")
+                    y = f[ptype][pos_name + "y"].value.astype("float64")
+                    z = f[ptype][pos_name + "z"].value.astype("float64")
+                    yield ptype, (x, y, z)
 
     def _read_particle_fields(self, chunks, ptf, selector):
         # Now we have all the sizes, and we can allocate


https://bitbucket.org/yt_analysis/yt/commits/dfef4088233f/
Changeset:   dfef4088233f
Branch:      yt
User:        brittonsmith
Date:        2015-08-23 12:29:42+00:00
Summary:     Fixing up field reading.
Affected #:  1 file

diff -r d568566b8e3f645d0c747c3dab79bd1f729a2bfd -r dfef4088233f25b25b5e790c63177070ea6e9155 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -48,22 +48,15 @@
                 for ptype, field_list in sorted(ptf.items()):
                     pcount = data_file.total_particles[ptype]
                     if pcount == 0: continue
-                    if ptype == "grid":
-                        pos_name = ""
-                    else:
-                        pos_name = "particle_position_"
-                    x = f[ptype][pos_name + "x"].value.astype("float64")
-                    y = f[ptype][pos_name + "y"].value.astype("float64")
-                    z = f[ptype][pos_name + "z"].value.astype("float64")
+                    x = _get_position_array(ptype, f, "x")
+                    y = _get_position_array(ptype, f, "y")
+                    z = _get_position_array(ptype, f, "z")
                     yield ptype, (x, y, z)
 
     def _read_particle_fields(self, chunks, ptf, selector):
         # Now we have all the sizes, and we can allocate
         chunks = list(chunks)
         data_files = set([])
-        # Only support halo reading for now.
-        assert(len(ptf) == 1)
-        assert(list(ptf.keys())[0] == "grid")
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
@@ -72,14 +65,14 @@
             pcount = all_count["grid"]
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):
-                    x = f["grid"]['x'].value.astype("float64")
-                    y = f["grid"]['y'].value.astype("float64")
-                    z = f["grid"]['z'].value.astype("float64")
+                    x = _get_position_array(ptype, f, "x")
+                    y = _get_position_array(ptype, f, "y")
+                    z = _get_position_array(ptype, f, "z")
                     mask = selector.select_points(x, y, z, 0.0)
                     del x, y, z
                     if mask is None: continue
                     for field in field_list:
-                        data = f["grid"][field][mask].astype("float64")
+                        data = f[ptype][field][mask].astype("float64")
                         yield (ptype, field), data
 
     def _initialize_index(self, data_file, regions):
@@ -95,15 +88,13 @@
                 pos = np.empty((all_count[ptype], 3), dtype="float64")
                 pos = data_file.ds.arr(pos, "code_length")
                 if ptype == "grid":
-                    pos_name = ""
                     dx = f["grid"]["dx"].value.min()
                 else:
-                    pos_name = "particle_position_"
-                    dx = 2. * np.finfo(f[ptype][pos_name + "x"].dtype).eps
+                    dx = 2. * np.finfo(f[ptype]["particle_position_x"].dtype).eps
                 dx = self.ds.quan(dx, "code_length")
-                pos[:,0] = f[ptype][pos_name + "x"].value
-                pos[:,1] = f[ptype][pos_name + "y"].value
-                pos[:,2] = f[ptype][pos_name + "z"].value
+                pos[:,0] = _get_position_array(ptype, f, "x")
+                pos[:,1] = _get_position_array(ptype, f, "y")
+                pos[:,2] = _get_position_array(ptype, f, "z")
                 # These are 32 bit numbers, so we give a little lee-way.
                 # Otherwise, for big sets of particles, we often will bump into the
                 # domain edges.  This helps alleviate that.
@@ -138,3 +129,10 @@
                                     f[ptype][field].attrs["units"])
                                    for field in f[ptype]]))
         return fields, units
+
+def _get_position_array(ptype, f, ax):
+    if ptype == "grid":
+        pos_name = ""
+    else:
+        pos_name = "particle_position_"
+    return f[ptype][pos_name + ax].value.astype("float64")


https://bitbucket.org/yt_analysis/yt/commits/199921b582af/
Changeset:   199921b582af
Branch:      yt
User:        brittonsmith
Date:        2015-08-23 12:48:20+00:00
Summary:     Fixing up field aliases.
Affected #:  1 file

diff -r dfef4088233f25b25b5e790c63177070ea6e9155 -r 199921b582af86b58bae4146a5fe286dc9e8c62d yt/frontends/ytdata/fields.py
--- a/yt/frontends/ytdata/fields.py
+++ b/yt/frontends/ytdata/fields.py
@@ -33,4 +33,17 @@
         ("x", (p_units, ["particle_position_x"], None)),
         ("y", (p_units, ["particle_position_y"], None)),
         ("z", (p_units, ["particle_position_z"], None)),
+        ("velocity_x", (v_units, ["particle_velocity_x"], None)),
+        ("velocity_y", (v_units, ["particle_velocity_y"], None)),
+        ("velocity_z", (v_units, ["particle_velocity_z"], None)),
     )
+
+    # these are extra fields to be created for the "all" particle type
+    extra_union_fields = (
+        (p_units, "particle_position_x"),
+        (p_units, "particle_position_y"),
+        (p_units, "particle_position_z"),
+        (v_units, "particle_velocity_x"),
+        (v_units, "particle_velocity_y"),
+        (v_units, "particle_velocity_z"),
+    )


https://bitbucket.org/yt_analysis/yt/commits/e486be6768ea/
Changeset:   e486be6768ea
Branch:      yt
User:        brittonsmith
Date:        2015-08-23 12:57:16+00:00
Summary:     Get particle types from file.
Affected #:  1 file

diff -r 199921b582af86b58bae4146a5fe286dc9e8c62d -r e486be6768ead01ebdfaee8aba27718ea8fce4c4 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -56,11 +56,13 @@
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         super(YTDataDataset, self).__init__(filename, dataset_type,
-                                                 units_override=units_override)
+                                            units_override=units_override)
 
     def _parse_parameter_file(self):
         with h5py.File(self.parameter_filename, "r") as f:
             hvals = dict((key, f.attrs[key]) for key in f.attrs.keys())
+            self.particle_types_raw = tuple(f.keys())
+        self.particle_types = self.particle_types_raw
         self.dimensionality = 3
         self.refine_by = 2
         self.unique_identifier = \
@@ -68,14 +70,11 @@
         prefix = ".".join(self.parameter_filename.rsplit(".", 2)[:-2])
         self.filename_template = self.parameter_filename
         self.file_count = 1
-
         for attr in ["cosmological_simulation", "current_time", "current_redshift",
                      "hubble_constant", "omega_matter", "omega_lambda",
                      "domain_left_edge", "domain_right_edge"]:
             setattr(self, attr, hvals[attr])
         self.periodicity = (True, True, True)
-        self.particle_types = ("grid")
-        self.particle_types_raw = ("grid")
 
         nz = 1 << self.over_refine_factor
         self.domain_dimensions = np.ones(3, "int32") * nz


https://bitbucket.org/yt_analysis/yt/commits/8466e7e51cd9/
Changeset:   8466e7e51cd9
Branch:      yt
User:        brittonsmith
Date:        2015-08-24 11:49:18+00:00
Summary:     Renaming ytdata classes to ytdatacontainer.
Affected #:  4 files

diff -r e486be6768ead01ebdfaee8aba27718ea8fce4c4 -r 8466e7e51cd91398becd8dc0653dac0ea04b7ad7 yt/frontends/ytdata/api.py
--- a/yt/frontends/ytdata/api.py
+++ b/yt/frontends/ytdata/api.py
@@ -15,13 +15,13 @@
 #-----------------------------------------------------------------------------
 
 from .data_structures import \
-    YTDataDataset
+    YTDataContainerDataset
 
 from .io import \
-    IOHandlerYTDataHDF5
+    IOHandlerYTDataContainerHDF5
 
 from .fields import \
-    YTDataFieldInfo
+    YTDataContainerFieldInfo
 
 from .utilities import \
     to_yt_dataset

diff -r e486be6768ead01ebdfaee8aba27718ea8fce4c4 -r 8466e7e51cd91398becd8dc0653dac0ea04b7ad7 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -24,7 +24,7 @@
 import os
 
 from .fields import \
-    YTDataFieldInfo
+    YTDataContainerFieldInfo
 
 from yt.utilities.cosmology import Cosmology
 from yt.geometry.particle_geometry_handler import \
@@ -45,17 +45,17 @@
 
         super(YTDataHDF5File, self).__init__(ds, io, filename, file_id)
     
-class YTDataDataset(Dataset):
+class YTDataContainerDataset(Dataset):
     _index_class = ParticleIndex
     _file_class = YTDataHDF5File
-    _field_info_class = YTDataFieldInfo
+    _field_info_class = YTDataContainerFieldInfo
     _suffix = ".h5"
 
-    def __init__(self, filename, dataset_type="ytdata_hdf5",
+    def __init__(self, filename, dataset_type="ytdatacontainer_hdf5",
                  n_ref = 16, over_refine_factor = 1, units_override=None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
-        super(YTDataDataset, self).__init__(filename, dataset_type,
+        super(YTDataContainerDataset, self).__init__(filename, dataset_type,
                                             units_override=units_override)
 
     def _parse_parameter_file(self):

diff -r e486be6768ead01ebdfaee8aba27718ea8fce4c4 -r 8466e7e51cd91398becd8dc0653dac0ea04b7ad7 yt/frontends/ytdata/fields.py
--- a/yt/frontends/ytdata/fields.py
+++ b/yt/frontends/ytdata/fields.py
@@ -25,7 +25,7 @@
 v_units = "cm / s"
 r_units = "cm"
 
-class YTDataFieldInfo(FieldInfoContainer):
+class YTDataContainerFieldInfo(FieldInfoContainer):
     known_other_fields = (
     )
 

diff -r e486be6768ead01ebdfaee8aba27718ea8fce4c4 -r 8466e7e51cd91398becd8dc0653dac0ea04b7ad7 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -30,8 +30,8 @@
 from yt.geometry.oct_container import \
     _ORDER_MAX
 
-class IOHandlerYTDataHDF5(BaseIOHandler):
-    _dataset_type = "ytdata_hdf5"
+class IOHandlerYTDataContainerHDF5(BaseIOHandler):
+    _dataset_type = "ytdatacontainer_hdf5"
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         raise NotImplementedError


https://bitbucket.org/yt_analysis/yt/commits/9f01448ab4f5/
Changeset:   9f01448ab4f5
Branch:      yt
User:        brittonsmith
Date:        2015-08-24 13:18:56+00:00
Summary:     Starting ytgrid dataset.
Affected #:  3 files

diff -r 8466e7e51cd91398becd8dc0653dac0ea04b7ad7 -r 9f01448ab4f59404ad2bc201d94833ec2ba38f83 yt/frontends/ytdata/api.py
--- a/yt/frontends/ytdata/api.py
+++ b/yt/frontends/ytdata/api.py
@@ -15,13 +15,17 @@
 #-----------------------------------------------------------------------------
 
 from .data_structures import \
-    YTDataContainerDataset
+    YTDataContainerDataset, \
+    YTGridDataset, \
+    YTGridHierarchy, \
+    YTGrid
 
 from .io import \
     IOHandlerYTDataContainerHDF5
 
 from .fields import \
-    YTDataContainerFieldInfo
+    YTDataContainerFieldInfo, \
+    YTGridFieldInfo
 
 from .utilities import \
     to_yt_dataset

diff -r 8466e7e51cd91398becd8dc0653dac0ea04b7ad7 -r 9f01448ab4f59404ad2bc201d94833ec2ba38f83 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -24,19 +24,24 @@
 import os
 
 from .fields import \
-    YTDataContainerFieldInfo
+    YTDataContainerFieldInfo, \
+    YTGridFieldInfo
 
-from yt.utilities.cosmology import Cosmology
-from yt.geometry.particle_geometry_handler import \
-    ParticleIndex
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
 from yt.data_objects.static_output import \
     Dataset, \
     ParticleFile
+from yt.geometry.grid_geometry_handler import \
+    GridIndex
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
+from yt.utilities.cosmology import Cosmology
 import yt.utilities.fortran_utils as fpu
 from yt.units.yt_array import \
     YTArray, \
     YTQuantity
-    
+
 class YTDataHDF5File(ParticleFile):
     def __init__(self, ds, io, filename, file_id):
         with h5py.File(filename, "r") as f:
@@ -44,7 +49,7 @@
                                for field in f.attrs.keys())
 
         super(YTDataHDF5File, self).__init__(ds, io, filename, file_id)
-    
+
 class YTDataContainerDataset(Dataset):
     _index_class = ParticleIndex
     _file_class = YTDataHDF5File
@@ -96,3 +101,35 @@
                                        "yt_data_container"]:
                 return True
         return False
+
+class YTGrid(AMRGridPatch):
+    pass
+
+class YTGridHierarchy(GridIndex):
+    grid = YTGrid
+
+class YTGridDataset(Dataset):
+    _index_class = YTGridHierarchy
+    _field_info_class = YTGridFieldInfo
+    _dataset_type = 'ytgridhdf5'
+    geometry = "cartesian"
+
+    def __init__(self, filename):
+        Dataset.__init__(self, filename, self._dataset_type)
+
+    def _parse_parameter_file(self):
+        with h5py.File(self.parameter_filename, "r") as f:
+            for attr, value in f.attrs.items():
+                setattr(self, attr, value)
+
+    def __repr__(self):
+        return "ytGrid: %s" % self.parameter_filename
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if not args[0].endswith(".h5"): return False
+        with h5py.File(args[0], "r") as f:
+            if "data_type" in f.attrs and \
+              f.attrs["data_type"] in ["yt_grid_data"]:
+                return True
+        return False

diff -r 8466e7e51cd91398becd8dc0653dac0ea04b7ad7 -r 9f01448ab4f59404ad2bc201d94833ec2ba38f83 yt/frontends/ytdata/fields.py
--- a/yt/frontends/ytdata/fields.py
+++ b/yt/frontends/ytdata/fields.py
@@ -47,3 +47,10 @@
         (v_units, "particle_velocity_y"),
         (v_units, "particle_velocity_z"),
     )
+
+class YTGridFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+    )
+
+    known_particle_fields = (
+    )


https://bitbucket.org/yt_analysis/yt/commits/8bed3073faf2/
Changeset:   8bed3073faf2
Branch:      yt
User:        brittonsmith
Date:        2015-08-24 15:37:34+00:00
Summary:     ytGrid dataset now loads, does nothing.
Affected #:  1 file

diff -r 9f01448ab4f59404ad2bc201d94833ec2ba38f83 -r 8bed3073faf219d06d7452b6b96ea831f57b73d0 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -15,13 +15,13 @@
 #-----------------------------------------------------------------------------
 
 import h5py
+from numbers import \
+    Number as numeric_type
 import numpy as np
+import os
 import stat
+import time
 import weakref
-import struct
-import glob
-import time
-import os
 
 from .fields import \
     YTDataContainerFieldInfo, \
@@ -32,6 +32,9 @@
 from yt.data_objects.static_output import \
     Dataset, \
     ParticleFile
+from yt.extern.six import \
+    iteritems, \
+    string_types
 from yt.geometry.grid_geometry_handler import \
     GridIndex
 from yt.geometry.particle_geometry_handler import \
@@ -108,6 +111,9 @@
 class YTGridHierarchy(GridIndex):
     grid = YTGrid
 
+    def _count_grids(self):
+        self.num_grids = 1
+
 class YTGridDataset(Dataset):
     _index_class = YTGridHierarchy
     _field_info_class = YTGridFieldInfo
@@ -118,13 +124,34 @@
         Dataset.__init__(self, filename, self._dataset_type)
 
     def _parse_parameter_file(self):
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.unique_identifier = time.time()
         with h5py.File(self.parameter_filename, "r") as f:
             for attr, value in f.attrs.items():
                 setattr(self, attr, value)
-
+            
     def __repr__(self):
         return "ytGrid: %s" % self.parameter_filename
 
+    def _set_code_unit_attributes(self):
+        attrs = ('length_unit', 'mass_unit', 'time_unit',
+                 'velocity_unit', 'magnetic_unit')
+        cgs_units = ('cm', 'g', 's', 'cm/s', 'gauss')
+        base_units = np.ones(len(attrs))
+        for unit, attr, cgs_unit in zip(base_units, attrs, cgs_units):
+            if isinstance(unit, string_types):
+                uq = self.quan(1.0, unit)
+            elif isinstance(unit, numeric_type):
+                uq = self.quan(unit, cgs_unit)
+            elif isinstance(unit, YTQuantity):
+                uq = unit
+            elif isinstance(unit, tuple):
+                uq = self.quan(unit[0], unit[1])
+            else:
+                raise RuntimeError("%s (%s) is invalid." % (attr, unit))
+            setattr(self, attr, uq)
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False


https://bitbucket.org/yt_analysis/yt/commits/a03ab089b8d3/
Changeset:   a03ab089b8d3
Branch:      yt
User:        brittonsmith
Date:        2015-08-24 16:08:56+00:00
Summary:     Updating domain dimensions for covering grid dimensions.
Affected #:  1 file

diff -r 8bed3073faf219d06d7452b6b96ea831f57b73d0 -r a03ab089b8d390058cbf50cfe8362641f8c14bbf yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -130,7 +130,18 @@
         with h5py.File(self.parameter_filename, "r") as f:
             for attr, value in f.attrs.items():
                 setattr(self, attr, value)
-            
+
+        # correct domain dimensions for the covering grid dimension
+        self.base_domain_left_edge = self.domain_left_edge
+        self.base_domain_right_edge = self.domain_right_edge
+        self.base_domain_dimensions = self.domain_dimensions
+        dx = (self.domain_right_edge - self.domain_left_edge) / \
+          (self.domain_dimensions * self.refine_by**self.level)
+        self.domain_left_edge = self.left_edge
+        self.domain_right_edge = self.domain_left_edge + \
+          self.ActiveDimensions * dx
+        self.domain_dimensions = self.ActiveDimensions
+
     def __repr__(self):
         return "ytGrid: %s" % self.parameter_filename
 


https://bitbucket.org/yt_analysis/yt/commits/f33694ee04a3/
Changeset:   f33694ee04a3
Branch:      yt
User:        brittonsmith
Date:        2015-08-24 19:38:44+00:00
Summary:     Now we can create hierarchy and detect fields.
Affected #:  2 files

diff -r a03ab089b8d390058cbf50cfe8362641f8c14bbf -r f33694ee04a3738eb957efc4d29a10d7a6269ed5 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -39,6 +39,8 @@
     GridIndex
 from yt.geometry.particle_geometry_handler import \
     ParticleIndex
+from yt.utilities.logger import \
+    ytLogger as mylog
 from yt.utilities.cosmology import Cosmology
 import yt.utilities.fortran_utils as fpu
 from yt.units.yt_array import \
@@ -106,14 +108,69 @@
         return False
 
 class YTGrid(AMRGridPatch):
-    pass
+    _id_offset = 0
+    def __init__(self, id, index):
+        AMRGridPatch.__init__(self, id, filename=None, index=index)
+        self._children_ids = []
+        self._parent_id = -1
+        self.Level = 0
+        self.LeftEdge = self.index.ds.domain_left_edge
+        self.RightEdge = self.index.ds.domain_right_edge
+
+    @property
+    def Parent(self):
+        return None
+
+    @property
+    def Children(self):
+        return []
 
 class YTGridHierarchy(GridIndex):
     grid = YTGrid
 
+    def __init__(self, ds, dataset_type = None):
+        self.dataset_type = dataset_type
+        self.float_type = 'float64'
+        self.dataset = weakref.proxy(ds) # for _obtain_enzo
+        self.directory = os.getcwd()
+        GridIndex.__init__(self, ds, dataset_type)
+
     def _count_grids(self):
         self.num_grids = 1
 
+    def _parse_index(self):
+        self.grid_dimensions[:] = self.ds.domain_dimensions
+        self.grid_left_edge[:] = self.ds.domain_left_edge
+        self.grid_right_edge[:] = self.ds.domain_right_edge
+        self.grid_levels[:] = np.zeros(self.num_grids)
+        self.grid_procs = np.zeros(self.num_grids)
+        self.grid_particle_count[:] = sum(self.ds.num_particles.values())
+        self.grids = []
+        # We enumerate, so it's 0-indexed id and 1-indexed pid
+        for id in range(self.num_grids):
+            self.grids.append(self.grid(id, self))
+            self.grids[id].Level = self.grid_levels[id, 0]
+        self.max_level = self.grid_levels.max()
+        temp_grids = np.empty(self.num_grids, dtype='object')
+        for i, grid in enumerate(self.grids):
+            grid.filename = None
+            grid._prepare_grid()
+            grid.proc_num = self.grid_procs[i]
+            temp_grids[i] = grid
+        self.grids = temp_grids
+
+    def _populate_grid_objects(self):
+        for g in self.grids:
+            g._setup_dx()
+        self.max_level = self.grid_levels.max()
+
+    def _detect_output_fields(self):
+        self.field_list = []
+        with h5py.File(self.ds.parameter_filename, "r") as f:
+            for group in f:
+                self.field_list.extend([(str(group), str(field))
+                                        for field in f[group]])
+
 class YTGridDataset(Dataset):
     _index_class = YTGridHierarchy
     _field_info_class = YTGridFieldInfo
@@ -130,6 +187,9 @@
         with h5py.File(self.parameter_filename, "r") as f:
             for attr, value in f.attrs.items():
                 setattr(self, attr, value)
+            self.num_particles = \
+              dict([(group, f[group].attrs["num_elements"])
+                    for group in f if group != "grid"])
 
         # correct domain dimensions for the covering grid dimension
         self.base_domain_left_edge = self.domain_left_edge

diff -r a03ab089b8d390058cbf50cfe8362641f8c14bbf -r f33694ee04a3738eb957efc4d29a10d7a6269ed5 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -30,6 +30,11 @@
 from yt.geometry.oct_container import \
     _ORDER_MAX
 
+class IOHandlerYTGridHDF5(BaseIOHandler):
+    _dataset_type = "ytgridhdf5"
+    _base = slice(None)
+    _field_type = "float64"
+
 class IOHandlerYTDataContainerHDF5(BaseIOHandler):
     _dataset_type = "ytdatacontainer_hdf5"
 


https://bitbucket.org/yt_analysis/yt/commits/529fd7fb5f8d/
Changeset:   529fd7fb5f8d
Branch:      yt
User:        brittonsmith
Date:        2015-08-24 23:15:51+00:00
Summary:     Fluid fields now work.
Affected #:  2 files

diff -r f33694ee04a3738eb957efc4d29a10d7a6269ed5 -r 529fd7fb5f8d3d5791f03a652a56c6a93b5c7391 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -146,14 +146,13 @@
         self.grid_procs = np.zeros(self.num_grids)
         self.grid_particle_count[:] = sum(self.ds.num_particles.values())
         self.grids = []
-        # We enumerate, so it's 0-indexed id and 1-indexed pid
         for id in range(self.num_grids):
             self.grids.append(self.grid(id, self))
             self.grids[id].Level = self.grid_levels[id, 0]
         self.max_level = self.grid_levels.max()
         temp_grids = np.empty(self.num_grids, dtype='object')
         for i, grid in enumerate(self.grids):
-            grid.filename = None
+            grid.filename = self.ds.parameter_filename
             grid._prepare_grid()
             grid.proc_num = self.grid_procs[i]
             temp_grids[i] = grid
@@ -166,16 +165,22 @@
 
     def _detect_output_fields(self):
         self.field_list = []
+        self.ds.field_units = self.ds.field_units or {}
         with h5py.File(self.ds.parameter_filename, "r") as f:
             for group in f:
-                self.field_list.extend([(str(group), str(field))
-                                        for field in f[group]])
+                for field in f[group]:
+                    field_name = (str(group), str(field))
+                    self.field_list.append(field_name)
+                    self.ds.field_units[field_name] = \
+                      f[group][field].attrs["units"]
 
 class YTGridDataset(Dataset):
     _index_class = YTGridHierarchy
     _field_info_class = YTGridFieldInfo
     _dataset_type = 'ytgridhdf5'
     geometry = "cartesian"
+    default_fluid_type = "grid"
+    fluid_types = ("grid", "gas", "deposit", "index")
 
     def __init__(self, filename):
         Dataset.__init__(self, filename, self._dataset_type)
@@ -190,6 +195,8 @@
             self.num_particles = \
               dict([(group, f[group].attrs["num_elements"])
                     for group in f if group != "grid"])
+        self.particle_types_raw = tuple(self.num_particles.keys())
+        self.particle_types = self.particle_types_raw
 
         # correct domain dimensions for the covering grid dimension
         self.base_domain_left_edge = self.domain_left_edge

diff -r f33694ee04a3738eb957efc4d29a10d7a6269ed5 -r 529fd7fb5f8d3d5791f03a652a56c6a93b5c7391 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -17,23 +17,86 @@
 import h5py
 import numpy as np
 
-from yt.utilities.exceptions import *
+from yt.extern.six import \
+    u, b, iteritems
 from yt.funcs import \
     mylog
-
+from yt.geometry.oct_container import \
+    _ORDER_MAX
+from yt.utilities.exceptions import \
+    YTDomainOverflow
 from yt.utilities.io_handler import \
     BaseIOHandler
-
 from yt.utilities.lib.geometry_utils import \
     compute_morton
 
-from yt.geometry.oct_container import \
-    _ORDER_MAX
-
 class IOHandlerYTGridHDF5(BaseIOHandler):
     _dataset_type = "ytgridhdf5"
     _base = slice(None)
-    _field_type = "float64"
+    _field_dtype = "float64"
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        rv = {}
+        # Now we have to do something unpleasant
+        chunks = list(chunks)
+        if selector.__class__.__name__ == "GridSelector":
+            if not (len(chunks) == len(chunks[0].objs) == 1):
+                raise RuntimeError
+            g = chunks[0].objs[0]
+            if g.id in self._cached_fields:
+                gf = self._cached_fields[g.id]
+                rv.update(gf)
+            if len(rv) == len(fields): return rv
+            f = h5py.File(u(g.filename), "r")
+            gds = f["grid"]
+            for field in fields:
+                if field in rv:
+                    self._hits += 1
+                    continue
+                self._misses += 1
+                ftype, fname = field
+                rv[(ftype, fname)] = gds[fname].value
+            if self._cache_on:
+                for gid in rv:
+                    self._cached_fields.setdefault(gid, {})
+                    self._cached_fields[gid].update(rv[gid])
+            f.close()
+            return rv
+        if size is None:
+            size = sum((g.count(selector) for chunk in chunks
+                        for g in chunk.objs))
+        for field in fields:
+            ftype, fname = field
+            fsize = size
+            rv[field] = np.empty(fsize, dtype="float64")
+        ng = sum(len(c.objs) for c in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s grids",
+                   size, [f2 for f1, f2 in fields], ng)
+        ind = 0
+        h5_type = self._field_dtype
+        for chunk in chunks:
+            f = None
+            for g in chunk.objs:
+                if g.filename is None: continue
+                if f is None:
+                    f = h5py.File(g.filename, "r")
+                gf = self._cached_fields.get(g.id, {})
+                nd = 0
+                for field in fields:
+                    if field in gf:
+                        nd = g.select(selector, gf[field], rv[field], ind)
+                        self._hits += 1
+                        continue
+                    self._misses += 1
+                    ftype, fname = field
+                    data = f[ftype][fname].value.astype(self._field_dtype)
+                    if self._cache_on:
+                        self._cached_fields.setdefault(g.id, {})
+                        self._cached_fields[g.id][field] = data
+                    nd = g.select(selector, data, rv[field], ind) # caches
+                ind += nd
+            if f: f.close()
+        return rv
 
 class IOHandlerYTDataContainerHDF5(BaseIOHandler):
     _dataset_type = "ytdatacontainer_hdf5"


https://bitbucket.org/yt_analysis/yt/commits/990ed76eb517/
Changeset:   990ed76eb517
Branch:      yt
User:        brittonsmith
Date:        2015-08-24 23:33:35+00:00
Summary:     Adding _read_particle_coords.
Affected #:  1 file

diff -r 529fd7fb5f8d3d5791f03a652a56c6a93b5c7391 -r 990ed76eb5177f664dc2034107798ff8f707d864 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -98,6 +98,26 @@
             if f: f.close()
         return rv
 
+    def _read_particle_coords(self, chunks, ptf):
+        chunks = list(chunks)
+        for chunk in chunks:
+            f = None
+            for g in chunk.objs:
+                if g.filename is None: continue
+                if f is None:
+                    f = h5py.File(g.filename, "r")
+                if g.NumberOfParticles == 0:
+                    continue
+                for ptype, field_list in sorted(ptf.items()):
+                    pn = "particle_position_%s"
+                    x, y, z = (np.asarray(f[ptype][pn % ax].value, dtype="=f8")
+                               for ax in 'xyz')
+                    for field in field_list:
+                        if np.asarray(f[ptype][field]).ndim > 1:
+                            self._array_fields[field] = f[ptype][field].shape
+                    yield ptype, (x, y, z)
+            if f: f.close()
+
 class IOHandlerYTDataContainerHDF5(BaseIOHandler):
     _dataset_type = "ytdatacontainer_hdf5"
 


https://bitbucket.org/yt_analysis/yt/commits/04991cc2ff2f/
Changeset:   04991cc2ff2f
Branch:      yt
User:        brittonsmith
Date:        2015-08-24 23:37:35+00:00
Summary:     Particle fields now work.
Affected #:  1 file

diff -r 990ed76eb5177f664dc2034107798ff8f707d864 -r 04991cc2ff2f4c86d146de46055ba4df88f96280 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -118,6 +118,27 @@
                     yield ptype, (x, y, z)
             if f: f.close()
 
+    def _read_particle_fields(self, chunks, ptf, selector):
+        chunks = list(chunks)
+        for chunk in chunks: # These should be organized by grid filename
+            f = None
+            for g in chunk.objs:
+                if g.filename is None: continue
+                if f is None:
+                    f = h5py.File(g.filename, "r")
+                if g.NumberOfParticles == 0:
+                    continue
+                for ptype, field_list in sorted(ptf.items()):
+                    pn = "particle_position_%s"
+                    x, y, z = (np.asarray(f[ptype][pn % ax].value, dtype="=f8")
+                               for ax in 'xyz')
+                    mask = selector.select_points(x, y, z, 0.0)
+                    if mask is None: continue
+                    for field in field_list:
+                        data = np.asarray(f[ptype][field].value, "=f8")
+                        yield (ptype, field), data[mask]
+            if f: f.close()
+
 class IOHandlerYTDataContainerHDF5(BaseIOHandler):
     _dataset_type = "ytdatacontainer_hdf5"
 


https://bitbucket.org/yt_analysis/yt/commits/c2add0a382ba/
Changeset:   c2add0a382ba
Branch:      yt
User:        brittonsmith
Date:        2015-08-25 00:15:15+00:00
Summary:     Making sure gas fields alias grid fields.
Affected #:  2 files

diff -r 04991cc2ff2f4c86d146de46055ba4df88f96280 -r c2add0a382ba238c97a64a6656f571d182a6e6c1 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -366,7 +366,8 @@
         self.field_dependencies = {}
         self.derived_field_list = []
         self.filtered_particle_types = []
-        self.field_info = self._field_info_class(self, self.field_list)
+        self.field_info = self.field_info or \
+          self._field_info_class(self, self.field_list)
         self.coordinates.setup_fields(self.field_info)
         self.field_info.setup_fluid_fields()
         for ptype in self.particle_types:

diff -r 04991cc2ff2f4c86d146de46055ba4df88f96280 -r c2add0a382ba238c97a64a6656f571d182a6e6c1 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -194,7 +194,7 @@
                 setattr(self, attr, value)
             self.num_particles = \
               dict([(group, f[group].attrs["num_elements"])
-                    for group in f if group != "grid"])
+                    for group in f if group != self.default_fluid_type])
         self.particle_types_raw = tuple(self.num_particles.keys())
         self.particle_types = self.particle_types_raw
 
@@ -212,6 +212,15 @@
     def __repr__(self):
         return "ytGrid: %s" % self.parameter_filename
 
+    def create_field_info(self):
+        self.field_info = self._field_info_class(self, self.field_list)
+        for ftype, field in self.field_list:
+            if ftype == self.default_fluid_type:
+                self.field_info.alias(
+                    ("gas", field),
+                    (self.default_fluid_type, field))
+        super(YTGridDataset, self).create_field_info()
+
     def _set_code_unit_attributes(self):
         attrs = ('length_unit', 'mass_unit', 'time_unit',
                  'velocity_unit', 'magnetic_unit')


https://bitbucket.org/yt_analysis/yt/commits/73dee1a14bdd/
Changeset:   73dee1a14bdd
Branch:      yt
User:        brittonsmith
Date:        2015-08-25 01:44:32+00:00
Summary:     Make sure field_info exists.
Affected #:  1 file

diff -r c2add0a382ba238c97a64a6656f571d182a6e6c1 -r 73dee1a14bdd8893775f4475462ea019f21038b6 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -366,8 +366,9 @@
         self.field_dependencies = {}
         self.derived_field_list = []
         self.filtered_particle_types = []
-        self.field_info = self.field_info or \
-          self._field_info_class(self, self.field_list)
+        if not hasattr(self, "field_info"):
+            self.field_info = \
+              self._field_info_class(self, self.field_list)
         self.coordinates.setup_fields(self.field_info)
         self.field_info.setup_fluid_fields()
         for ptype in self.particle_types:


https://bitbucket.org/yt_analysis/yt/commits/7f73da4a25da/
Changeset:   7f73da4a25da
Branch:      yt
User:        brittonsmith
Date:        2015-08-25 01:49:37+00:00
Summary:     Making is_valids smart enough to distinguish the grid and particle datasets
Affected #:  2 files

diff -r 73dee1a14bdd8893775f4475462ea019f21038b6 -r 7f73da4a25da74accaf11d978a61e20da6374a28 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -515,7 +515,7 @@
             output.attrs["domain_right_edge"] = self.simulation.domain_right_edge.in_cgs()
             output.attrs["cosmological_simulation"] = self.simulation.cosmological_simulation
         output.attrs["current_time"] = self.cosmology.t_from_z(self.near_redshift).in_cgs()
-        output.attrs["data_type"] = "light_ray"
+        output.attrs["data_type"] = "yt_light_ray"
         group = output.create_group("grid")
         group.attrs["num_elements"] = data['x'].size
         for field in data.keys():

diff -r 73dee1a14bdd8893775f4475462ea019f21038b6 -r 7f73da4a25da74accaf11d978a61e20da6374a28 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -47,6 +47,10 @@
     YTArray, \
     YTQuantity
 
+_grid_data_containers = ["abritrary_grid",
+                         "covering_grid",
+                         "smoothed_covering_grid"]
+
 class YTDataHDF5File(ParticleFile):
     def __init__(self, ds, io, filename, file_id):
         with h5py.File(filename, "r") as f:
@@ -100,10 +104,14 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            if "data_type" in f.attrs and \
-              f.attrs["data_type"] in ["light_ray",
-                                       "yt_array_data",
-                                       "yt_data_container"]:
+            data_type = f.attrs.get("data_type", None)
+            if data_type is None:
+                return False
+            if data_type in ["yt_light_ray", "yt_array_data"]:
+                return True
+            if data_type == "yt_data_container" and \
+              f.attrs.get("container_type", None) not in \
+              _grid_data_containers:
                 return True
         return False
 
@@ -243,7 +251,9 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            if "data_type" in f.attrs and \
-              f.attrs["data_type"] in ["yt_grid_data"]:
+            data_type = f.attrs.get("data_type", None)
+            if data_type == "yt_data_container" and \
+              f.attrs.get("container_type", None) in \
+              _grid_data_containers:
                 return True
         return False


https://bitbucket.org/yt_analysis/yt/commits/9889c3390e51/
Changeset:   9889c3390e51
Branch:      yt
User:        brittonsmith
Date:        2015-08-25 12:59:18+00:00
Summary:     Adding some necessary fields and making sure attributes are available at write-out.
Affected #:  1 file

diff -r 7f73da4a25da74accaf11d978a61e20da6374a28 -r 9889c3390e514e6cdb9c58ff18e5636eea69b7e8 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -364,8 +364,8 @@
         data_fields = fields[:]
         all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
-        all_fields.extend(['x', 'y', 'z'])
-        data_fields.extend(['x', 'y', 'z'])
+        all_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
+        data_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
         if get_los_velocity:
             all_fields.extend(['velocity_x', 'velocity_y',
                                'velocity_z', 'velocity_los'])
@@ -499,35 +499,32 @@
         Write light ray data to hdf5 file.
         """
         mylog.info("Saving light ray data to %s." % filename)
-        output = h5py.File(filename, 'w')
+        fh = h5py.File(filename, "w")
         for attr in ["omega_lambda", "omega_matter", "hubble_constant"]:
-            output.attrs[attr] = getattr(self.cosmology, attr)
-        output.attrs["current_redshift"] = self.near_redshift
+            fh.attrs[attr] = getattr(self.cosmology, attr)
         if self.simulation_type == None:
-            ds = load(parameter_filename, **self.load_kwargs)
-            # Do these need to be in CGS, like how halo_catalog does it?
-            output.attrs["domain_left_edge"] = ds.domain_left_edge.in_cgs()
-            output.attrs["domain_right_edge"] = ds.domain_right_edge.in_cgs()
-            output.attrs["cosmological_simulation"] = ds.cosmological_simulation
+            ds = load(self.parameter_filename, **self.load_kwargs)
+            fh.attrs["current_redshift"] = ds.current_redshift
+            fh.attrs["domain_left_edge"] = ds.domain_left_edge.in_cgs()
+            fh.attrs["domain_right_edge"] = ds.domain_right_edge.in_cgs()
+            fh.attrs["cosmological_simulation"] = ds.cosmological_simulation
         else:
-            # Do these need to be in CGS, like how halo_catalog does it?
-            output.attrs["domain_left_edge"] = self.simulation.domain_left_edge.in_cgs()
-            output.attrs["domain_right_edge"] = self.simulation.domain_right_edge.in_cgs()
-            output.attrs["cosmological_simulation"] = self.simulation.cosmological_simulation
-        output.attrs["current_time"] = self.cosmology.t_from_z(self.near_redshift).in_cgs()
-        output.attrs["data_type"] = "yt_light_ray"
-        group = output.create_group("grid")
+            fh.attrs["current_redshift"] = self.near_redshift
+            fh.attrs["domain_left_edge"] = self.simulation.domain_left_edge.in_cgs()
+            fh.attrs["domain_right_edge"] = self.simulation.domain_right_edge.in_cgs()
+            fh.attrs["cosmological_simulation"] = self.simulation.cosmological_simulation
+        fh.attrs["current_time"] = self.cosmology.t_from_z(fh.attrs["current_redshift"]).in_cgs()
+        fh.attrs["data_type"] = "yt_light_ray"
+        group = fh.create_group("grid")
         group.attrs["num_elements"] = data['x'].size
         for field in data.keys():
-            # if the field is a tuple, only use the second part of the tuple
-            # in the hdf5 output (i.e. ('gas', 'density') -> 'density')
             if isinstance(field, tuple):
                 fieldname = field[1]
             else:
                 fieldname = field
             group.create_dataset(fieldname, data=data[field])
             group[fieldname].attrs["units"] = str(data[field].units)
-        output.close()
+        fh.close()
 
     @parallel_root_only
     def _write_light_ray_solution(self, filename, extra_info=None):


https://bitbucket.org/yt_analysis/yt/commits/6495add85191/
Changeset:   6495add85191
Branch:      yt
User:        brittonsmith
Date:        2015-08-25 20:22:52+00:00
Summary:     Adding periodicity check.
Affected #:  1 file

diff -r 9889c3390e514e6cdb9c58ff18e5636eea69b7e8 -r 6495add85191c983736eebfe3046416ad4553c5b yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -216,6 +216,12 @@
         self.domain_right_edge = self.domain_left_edge + \
           self.ActiveDimensions * dx
         self.domain_dimensions = self.ActiveDimensions
+        self.periodicity = \
+          np.abs(self.domain_left_edge -
+                 self.base_domain_left_edge) < 0.5 * dx
+        self.periodicity &= \
+        np.abs(self.domain_right_edge -
+               self.base_domain_right_edge) < 0.5 * dx
 
     def __repr__(self):
         return "ytGrid: %s" % self.parameter_filename


https://bitbucket.org/yt_analysis/yt/commits/cb45648d3948/
Changeset:   cb45648d3948
Branch:      yt
User:        brittonsmith
Date:        2015-08-26 13:10:47+00:00
Summary:     Be smarter about calculating ray length.
Affected #:  1 file

diff -r 6495add85191c983736eebfe3046416ad4553c5b -r cb45648d3948a464cbf0a912ebf8c4776e125b9f yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -172,9 +172,15 @@
             if not ((end_position is None) ^ (trajectory is None)):
                 raise RuntimeError("LightRay Error: must specify either end_position " + \
                                    "or trajectory, but not both.")
-            self.light_ray_solution[0]['start'] = np.array(start_position)
+            if isinstance(start_position, np.ndarray):
+                self.light_ray_solution[0]['start'] = start_position
+            else:
+                self.light_ray_solution[0]['start'] = np.array(start_position)
             if end_position is not None:
-                self.light_ray_solution[0]['end'] = np.array(end_position)
+                if isinstance(end_position, np.ndarray):
+                    self.light_ray_solution[0]['end'] = end_position
+                else:
+                    self.light_ray_solution[0]['end'] = np.array(end_position)
             else:
                 # assume trajectory given as r, theta, phi
                 if len(trajectory) != 3:
@@ -401,10 +407,15 @@
             if not ds.cosmological_simulation:
                 next_redshift = my_segment["redshift"]
             elif self.near_redshift == self.far_redshift:
+                if isinstance(my_segment["traversal_box_fraction"], YTArray):
+                    segment_length = \
+                      my_segment["traversal_box_fraction"].in_units("Mpccm / h")
+                else:
+                    segment_length = my_segment["traversal_box_fraction"] * \
+                      ds.domain_width[0].in_units("Mpccm / h")
                 next_redshift = my_segment["redshift"] - \
                   self._deltaz_forward(my_segment["redshift"],
-                                       ds.domain_width[0].in_units("Mpccm / h") *
-                                       my_segment["traversal_box_fraction"])
+                                       segment_length)
             elif my_segment.get("next", None) is None:
                 next_redshift = self.near_redshift
             else:


https://bitbucket.org/yt_analysis/yt/commits/07edc87b9f85/
Changeset:   07edc87b9f85
Branch:      yt
User:        brittonsmith
Date:        2015-08-28 19:28:44+00:00
Summary:     Merging.
Affected #:  17 files

diff -r 798706bc587f36f15ab279807b69a420daed42df -r 07edc87b9f85fb1dfb39232c56f5895d4d936cc0 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -20,6 +20,7 @@
 
 from .absorption_line import tau_profile
 
+from yt.convenience import load
 from yt.funcs import get_pbar, mylog
 from yt.units.yt_array import YTArray, YTQuantity
 from yt.utilities.physical_constants import \
@@ -112,7 +113,7 @@
                                     'normalization': normalization,
                                     'index': index})
 
-    def make_spectrum(self, input_file, output_file="spectrum.h5",
+    def make_spectrum(self, input_ds, output_file="spectrum.h5",
                       line_list_file="lines.txt",
                       use_peculiar_velocity=True, njobs="auto"):
         """
@@ -121,8 +122,8 @@
         Parameters
         ----------
 
-        input_file : string
-           path to input ray data.
+        input_ds : string or dataset
+           path to input ray data or a loaded ray dataset
         output_file : optional, string
            path for output file.  File formats are chosen based on the
            filename extension.  ``.h5`` for hdf5, ``.fits`` for fits,
@@ -156,7 +157,6 @@
 
         input_fields = ['dl', 'redshift', 'temperature']
         field_units = {"dl": "cm", "redshift": "", "temperature": "K"}
-        field_data = {}
         if use_peculiar_velocity:
             input_fields.append('velocity_los')
             field_units["velocity_los"] = "cm/s"
@@ -165,10 +165,9 @@
                 input_fields.append(feature['field_name'])
                 field_units[feature["field_name"]] = "cm**-3"
 
-        input = h5py.File(input_file, 'r')
-        for field in input_fields:
-            field_data[field] = YTArray(input[field].value, field_units[field])
-        input.close()
+        if isinstance(input_ds, str):
+            input_ds = load(input_ds)
+        field_data = input_ds.all_data()
 
         self.tau_field = np.zeros(self.lambda_bins.size)
         self.spectrum_line_list = []
@@ -335,6 +334,8 @@
         """
         Write out list of spectral lines.
         """
+        if filename is None:
+            return
         mylog.info("Writing spectral line list: %s." % filename)
         self.spectrum_line_list.sort(key=lambda obj: obj['wavelength'])
         f = open(filename, 'w')

diff -r 798706bc587f36f15ab279807b69a420daed42df -r 07edc87b9f85fb1dfb39232c56f5895d4d936cc0 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -47,7 +47,7 @@
     synthetic QSO lines of sight.
 
     Light rays can also be made from single datasets.
-    
+
     Once the LightRay object is set up, use LightRay.make_light_ray to
     begin making rays.  Different randomizations can be created with a
     single object by providing different random seeds to make_light_ray.
@@ -57,17 +57,17 @@
     parameter_filename : string
         The path to the simulation parameter file or dataset.
     simulation_type : optional, string
-        The simulation type.  If None, the first argument is assumed to 
+        The simulation type.  If None, the first argument is assumed to
         refer to a single dataset.
         Default: None
     near_redshift : optional, float
-        The near (lowest) redshift for a light ray containing multiple 
-        datasets.  Do not use is making a light ray from a single 
+        The near (lowest) redshift for a light ray containing multiple
+        datasets.  Do not use is making a light ray from a single
         dataset.
         Default: None
     far_redshift : optional, float
-        The far (highest) redshift for a light ray containing multiple 
-        datasets.  Do not use is making a light ray from a single 
+        The far (highest) redshift for a light ray containing multiple
+        datasets.  Do not use is making a light ray from a single
         dataset.
         Default: None
     use_minimum_datasets : optional, bool
@@ -97,11 +97,11 @@
         datasets for time series.
         Default: True.
     find_outputs : optional, bool
-        Whether or not to search for datasets in the current 
+        Whether or not to search for datasets in the current
         directory.
         Default: False.
     load_kwargs : optional, dict
-        Optional dictionary of kwargs to be passed to the "load" 
+        Optional dictionary of kwargs to be passed to the "load"
         function, appropriate for use of certain frontends.  E.g.
         Tipsy using "bounding_box"
         Gadget using "unit_base", etc.
@@ -128,8 +128,9 @@
         self.light_ray_solution = []
         self._data = {}
 
-        # Make a light ray from a single, given dataset.        
+        # Make a light ray from a single, given dataset.
         if simulation_type is None:
+            self.simulation_type = simulation_type
             ds = load(parameter_filename, **self.load_kwargs)
             if ds.cosmological_simulation:
                 redshift = ds.current_redshift
@@ -155,7 +156,7 @@
                                            time_data=time_data,
                                            redshift_data=redshift_data)
 
-    def _calculate_light_ray_solution(self, seed=None, 
+    def _calculate_light_ray_solution(self, seed=None,
                                       start_position=None, end_position=None,
                                       trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
@@ -171,9 +172,15 @@
             if not ((end_position is None) ^ (trajectory is None)):
                 raise RuntimeError("LightRay Error: must specify either end_position " + \
                                    "or trajectory, but not both.")
-            self.light_ray_solution[0]['start'] = np.array(start_position)
+            if isinstance(start_position, np.ndarray):
+                self.light_ray_solution[0]['start'] = start_position
+            else:
+                self.light_ray_solution[0]['start'] = np.array(start_position)
             if end_position is not None:
-                self.light_ray_solution[0]['end'] = np.array(end_position)
+                if isinstance(end_position, np.ndarray):
+                    self.light_ray_solution[0]['end'] = end_position
+                else:
+                    self.light_ray_solution[0]['end'] = np.array(end_position)
             else:
                 # assume trajectory given as r, theta, phi
                 if len(trajectory) != 3:
@@ -184,12 +191,12 @@
                                 np.sin(phi) * np.sin(theta),
                                 np.cos(theta)])
             self.light_ray_solution[0]['traversal_box_fraction'] = \
-              vector_length(self.light_ray_solution[0]['start'], 
+              vector_length(self.light_ray_solution[0]['start'],
                             self.light_ray_solution[0]['end'])
 
         # the normal way (random start positions and trajectories for each dataset)
         else:
-            
+
             # For box coherence, keep track of effective depth travelled.
             box_fraction_used = 0.0
 
@@ -284,15 +291,15 @@
             Default: None.
         trajectory : optional, list of floats
             Used only if creating a light ray from a single dataset.
-            The (r, theta, phi) direction of the light ray.  Use either 
+            The (r, theta, phi) direction of the light ray.  Use either
             end_position or trajectory, not both.
             Default: None.
         fields : optional, list
             A list of fields for which to get data.
             Default: None.
         setup_function : optional, callable, accepts a ds
-            This function will be called on each dataset that is loaded 
-            to create the light ray.  For, example, this can be used to 
+            This function will be called on each dataset that is loaded
+            to create the light ray.  For, example, this can be used to
             add new derived fields.
             Default: None.
         solution_filename : optional, string
@@ -307,13 +314,13 @@
             each point in the ray.
             Default: True.
         redshift : optional, float
-            Used with light rays made from single datasets to specify a 
-            starting redshift for the ray.  If not used, the starting 
-            redshift will be 0 for a non-cosmological dataset and 
+            Used with light rays made from single datasets to specify a
+            starting redshift for the ray.  If not used, the starting
+            redshift will be 0 for a non-cosmological dataset and
             the dataset redshift for a cosmological dataset.
             Default: None.
         njobs : optional, int
-            The number of parallel jobs over which the segments will 
+            The number of parallel jobs over which the segments will
             be split.  Choose -1 for one processor per segment.
             Default: -1.
 
@@ -321,7 +328,7 @@
         --------
 
         Make a light ray from multiple datasets:
-        
+
         >>> import yt
         >>> from yt.analysis_modules.cosmological_observation.light_ray.api import \
         ...     LightRay
@@ -347,12 +354,12 @@
         ...                       data_filename="my_ray.h5",
         ...                       fields=["temperature", "density"],
         ...                       get_los_velocity=True)
-        
+
         """
 
         # Calculate solution.
-        self._calculate_light_ray_solution(seed=seed, 
-                                           start_position=start_position, 
+        self._calculate_light_ray_solution(seed=seed,
+                                           start_position=start_position,
                                            end_position=end_position,
                                            trajectory=trajectory,
                                            filename=solution_filename)
@@ -363,6 +370,8 @@
         data_fields = fields[:]
         all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
+        all_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
+        data_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
         if get_los_velocity:
             all_fields.extend(['velocity_x', 'velocity_y',
                                'velocity_z', 'velocity_los'])
@@ -398,10 +407,15 @@
             if not ds.cosmological_simulation:
                 next_redshift = my_segment["redshift"]
             elif self.near_redshift == self.far_redshift:
+                if isinstance(my_segment["traversal_box_fraction"], YTArray):
+                    segment_length = \
+                      my_segment["traversal_box_fraction"].in_units("Mpccm / h")
+                else:
+                    segment_length = my_segment["traversal_box_fraction"] * \
+                      ds.domain_width[0].in_units("Mpccm / h")
                 next_redshift = my_segment["redshift"] - \
-                  self._deltaz_forward(my_segment["redshift"], 
-                                       ds.domain_width[0].in_units("Mpccm / h") *
-                                       my_segment["traversal_box_fraction"])
+                  self._deltaz_forward(my_segment["redshift"],
+                                       segment_length)
             elif my_segment.get("next", None) is None:
                 next_redshift = self.near_redshift
             else:
@@ -452,7 +466,7 @@
 
             # Get redshift for each lixel.  Assume linear relation between l and z.
             sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
-                (sub_data['dl'] / vector_length(my_segment['start'], 
+                (sub_data['dl'] / vector_length(my_segment['start'],
                                                 my_segment['end']).in_cgs())
             sub_data['redshift'] = my_segment['redshift'] - \
               sub_data['dredshift'].cumsum() + sub_data['dredshift']
@@ -476,12 +490,17 @@
         # Flatten the list into a single dictionary containing fields
         # for the whole ray.
         all_data = _flatten_dict_list(all_data, exceptions=['segment_redshift'])
+        self._data = all_data
 
         if data_filename is not None:
             self._write_light_ray(data_filename, all_data)
+            ray_ds = load(data_filename)
+            return ray_ds
+        else:
+            return None
 
-        self._data = all_data
-        return all_data
+    def __getitem__(self, field):
+        return self._data[field]
 
     @parallel_root_only
     def _write_light_ray(self, filename, data):
@@ -490,19 +509,33 @@
 
         Write light ray data to hdf5 file.
         """
-
         mylog.info("Saving light ray data to %s." % filename)
-        output = h5py.File(filename, 'w')
+        fh = h5py.File(filename, "w")
+        for attr in ["omega_lambda", "omega_matter", "hubble_constant"]:
+            fh.attrs[attr] = getattr(self.cosmology, attr)
+        if self.simulation_type == None:
+            ds = load(self.parameter_filename, **self.load_kwargs)
+            fh.attrs["current_redshift"] = ds.current_redshift
+            fh.attrs["domain_left_edge"] = ds.domain_left_edge.in_cgs()
+            fh.attrs["domain_right_edge"] = ds.domain_right_edge.in_cgs()
+            fh.attrs["cosmological_simulation"] = ds.cosmological_simulation
+        else:
+            fh.attrs["current_redshift"] = self.near_redshift
+            fh.attrs["domain_left_edge"] = self.simulation.domain_left_edge.in_cgs()
+            fh.attrs["domain_right_edge"] = self.simulation.domain_right_edge.in_cgs()
+            fh.attrs["cosmological_simulation"] = self.simulation.cosmological_simulation
+        fh.attrs["current_time"] = self.cosmology.t_from_z(fh.attrs["current_redshift"]).in_cgs()
+        fh.attrs["data_type"] = "yt_light_ray"
+        group = fh.create_group("grid")
+        group.attrs["num_elements"] = data['x'].size
         for field in data.keys():
-            # if the field is a tuple, only use the second part of the tuple
-            # in the hdf5 output (i.e. ('gas', 'density') -> 'density')
             if isinstance(field, tuple):
                 fieldname = field[1]
             else:
                 fieldname = field
-            output.create_dataset(fieldname, data=data[field])
-            output[fieldname].attrs["units"] = str(data[field].units)
-        output.close()
+            group.create_dataset(fieldname, data=data[field])
+            group[fieldname].attrs["units"] = str(data[field].units)
+        fh.close()
 
     @parallel_root_only
     def _write_light_ray_solution(self, filename, extra_info=None):
@@ -549,7 +582,7 @@
 def vector_length(start, end):
     """
     vector_length(start, end)
-    
+
     Calculate vector length.
     """
 
@@ -576,15 +609,15 @@
     """
     periodic_ray(start, end, left=None, right=None)
 
-    Break up periodic ray into non-periodic segments. 
+    Break up periodic ray into non-periodic segments.
     Accepts start and end points of periodic ray as YTArrays.
     Accepts optional left and right edges of periodic volume as YTArrays.
-    Returns a list of lists of coordinates, where each element of the 
-    top-most list is a 2-list of start coords and end coords of the 
-    non-periodic ray: 
+    Returns a list of lists of coordinates, where each element of the
+    top-most list is a 2-list of start coords and end coords of the
+    non-periodic ray:
 
-    [[[x0start,y0start,z0start], [x0end, y0end, z0end]], 
-     [[x1start,y1start,z1start], [x1end, y1end, z1end]], 
+    [[[x0start,y0start,z0start], [x0end, y0end, z0end]],
+     [[x1start,y1start,z1start], [x1end, y1end, z1end]],
      ...,]
 
     """

diff -r 798706bc587f36f15ab279807b69a420daed42df -r 07edc87b9f85fb1dfb39232c56f5895d4d936cc0 yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -21,6 +21,9 @@
     periodic_distance
 from yt.data_objects.profiles import \
     create_profile
+from yt.frontends.ytdata.utilities import \
+    _hdf5_yt_array, \
+    _yt_array_hdf5
 from yt.units.yt_array import \
     YTArray
 from yt.utilities.exceptions import \
@@ -584,21 +587,3 @@
     del sphere
     
 add_callback("iterative_center_of_mass", iterative_center_of_mass)
-
-def _yt_array_hdf5(fh, fieldname, data):
-    dataset = fh.create_dataset(fieldname, data=data)
-    units = ""
-    if isinstance(data, YTArray):
-        units = str(data.units)
-    dataset.attrs["units"] = units
-
-def _hdf5_yt_array(fh, fieldname, ds=None):
-    if ds is None:
-        new_arr = YTArray
-    else:
-        new_arr = ds.arr
-    units = ""
-    if "units" in fh[fieldname].attrs:
-        units = fh[fieldname].attrs["units"]
-    if units == "dimensionless": units = ""
-    return new_arr(fh[fieldname].value, units)

diff -r 798706bc587f36f15ab279807b69a420daed42df -r 07edc87b9f85fb1dfb39232c56f5895d4d936cc0 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -13,7 +13,9 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import h5py
 import itertools
+import os
 import types
 import uuid
 from yt.extern.six import string_types
@@ -25,9 +27,12 @@
 import shelve
 from contextlib import contextmanager
 
+from yt.funcs import get_output_filename
 from yt.funcs import *
 
 from yt.data_objects.particle_io import particle_handler_registry
+from yt.frontends.ytdata.utilities import \
+    to_yt_dataset
 from yt.units.unit_object import UnitParseError
 from yt.utilities.exceptions import \
     YTUnitConversionError, \
@@ -463,6 +468,95 @@
         df = pd.DataFrame(data)
         return df
 
+    def to_dataset(self, filename=None, fields=None):
+        r"""Export a data object to a reloadable yt dataset.
+
+        This function will take a data object and output a dataset 
+        containing either the fields presently existing or fields 
+        given in a list.  The resulting dataset can be reloaded as 
+        a yt dataset.
+
+        Parameters
+        ----------
+        filename : str
+            The name of the file to be written.  If None, the name 
+            will be a combination of the original dataset and the type 
+            of data container.
+        fields : list of strings or tuples, default None
+            If this is supplied, it is the list of fields to be exported into
+            the data frame.  If not supplied, whatever fields presently exist
+            will be used.
+
+        Returns
+        -------
+        filename : str
+            The name of the file that has been created.
+
+        Examples
+        --------
+
+        >>> dd = ds.all_data()
+        >>> fn1 = dd.to_dataset(["density", "temperature"])
+        >>> ds1 = yt.load(fn1)
+        >>> dd["velocity_magnitude"]
+        >>> fn2 = dd.to_dataset()
+        >>> ds2 = yt.load(fn2)
+        """
+
+        keyword = "%s_%s" % (str(self.ds), self._type_name)
+        filename = get_output_filename(filename, keyword, ".h5")
+
+        data = {}
+        if fields is not None:
+            for f in self._determine_fields(fields):
+                data[f] = self[f]
+        else:
+            data.update(self.field_data)
+        data_fields = data.keys()
+
+        need_grid_fields = False
+        need_particle_fields = False
+        ptypes = []
+        ftypes = {}
+        for field in data_fields:
+            if self.ds.field_info[field].particle_type:
+                if field[0] not in ptypes:
+                    ptypes.append(field[0])
+                ftypes[field] = "%s_particles" % field[0]
+                need_particle_fields = True
+            else:
+                ftypes[field] = "grid"
+                need_grid_fields = True
+
+        for ax in "xyz":
+            if need_particle_fields:
+                for ptype in ptypes:
+                    p_field = (ptype, "particle_position_%s" % ax)
+                    if p_field in self.ds.field_info and p_field not in data:
+                        data_fields.append(field)
+                        ftypes[p_field] = "%s_particles" % p_field[0]
+                        data[p_field] = self[p_field]
+            if need_grid_fields:
+                g_field = ("index", ax)
+                if g_field in self.ds.field_info and g_field not in data:
+                    data_fields.append(g_field)
+                    ftypes[g_field] = "grid"
+                    data[g_field] = self[g_field]
+                g_field = ("index", "d" + ax)
+                if g_field in self.ds.field_info and g_field not in data:
+                    data_fields.append(g_field)
+                    ftypes[g_field] = "grid"
+                    data[g_field] = self[g_field]
+
+        extra_attrs = dict([(arg, getattr(self, arg, None))
+                            for arg in self._con_args])
+        extra_attrs["data_type"] = "yt_data_container"
+        extra_attrs["container_type"] = self._type_name
+        to_yt_dataset(self.ds, filename, data, field_types=ftypes,
+                      extra_attrs=extra_attrs)
+
+        return filename
+        
     def to_glue(self, fields, label="yt", data_collection=None):
         """
         Takes specific *fields* in the container and exports them to

diff -r 798706bc587f36f15ab279807b69a420daed42df -r 07edc87b9f85fb1dfb39232c56f5895d4d936cc0 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -366,7 +366,9 @@
         self.field_dependencies = {}
         self.derived_field_list = []
         self.filtered_particle_types = []
-        self.field_info = self._field_info_class(self, self.field_list)
+        if not hasattr(self, "field_info"):
+            self.field_info = \
+              self._field_info_class(self, self.field_list)
         self.coordinates.setup_fields(self.field_info)
         self.field_info.setup_fluid_fields()
         for ptype in self.particle_types:

diff -r 798706bc587f36f15ab279807b69a420daed42df -r 07edc87b9f85fb1dfb39232c56f5895d4d936cc0 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -39,6 +39,7 @@
     'sdf',
     'stream',
     'tipsy',
+    'ytdata',
 ]
 
 class _frontend_container:

diff -r 798706bc587f36f15ab279807b69a420daed42df -r 07edc87b9f85fb1dfb39232c56f5895d4d936cc0 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -37,7 +37,7 @@
     mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects
-    
+
 class EnzoSimulation(SimulationTimeSeries):
     r"""
     Initialize an Enzo Simulation object.
@@ -98,6 +98,8 @@
             self.length_unit = self.quan(self.box_size, "Mpccm / h",
                                          registry=self.unit_registry)
             self.box_size = self.length_unit
+            self.domain_left_edge = self.domain_left_edge * self.length_unit
+            self.domain_right_edge = self.domain_right_edge * self.length_unit
         else:
             self.time_unit = self.quan(self.parameters["TimeUnits"], "s")
         self.unit_registry.modify("code_time", self.time_unit)
@@ -130,21 +132,21 @@
             datasets for time series.
             Default: True.
         initial_time : tuple of type (float, str)
-            The earliest time for outputs to be included.  This should be 
+            The earliest time for outputs to be included.  This should be
             given as the value and the string representation of the units.
-            For example, (5.0, "Gyr").  If None, the initial time of the 
-            simulation is used.  This can be used in combination with 
+            For example, (5.0, "Gyr").  If None, the initial time of the
+            simulation is used.  This can be used in combination with
             either final_time or final_redshift.
             Default: None.
         final_time : tuple of type (float, str)
-            The latest time for outputs to be included.  This should be 
+            The latest time for outputs to be included.  This should be
             given as the value and the string representation of the units.
-            For example, (13.7, "Gyr"). If None, the final time of the 
-            simulation is used.  This can be used in combination with either 
+            For example, (13.7, "Gyr"). If None, the final time of the
+            simulation is used.  This can be used in combination with either
             initial_time or initial_redshift.
             Default: None.
         times : tuple of type (float array, str)
-            A list of times for which outputs will be found and the units 
+            A list of times for which outputs will be found and the units
             of those values.  For example, ([0, 1, 2, 3], "s").
             Default: None.
         initial_redshift : float
@@ -192,8 +194,8 @@
 
         >>> import yt
         >>> es = yt.simulation("my_simulation.par", "Enzo")
-        
-        >>> es.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr"), 
+
+        >>> es.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr"),
                                redshift_data=False)
 
         >>> es.get_time_series(redshifts=[3, 2, 1, 0])
@@ -301,7 +303,7 @@
         for output in my_outputs:
             if os.path.exists(output['filename']):
                 init_outputs.append(output['filename'])
-            
+
         DatasetSeries.__init__(self, outputs=init_outputs, parallel=parallel,
                                 setup_function=setup_function)
         mylog.info("%d outputs loaded into time series.", len(init_outputs))
@@ -583,11 +585,11 @@
         Check a list of files to see if they are valid datasets.
         """
 
-        only_on_root(mylog.info, "Checking %d potential outputs.", 
+        only_on_root(mylog.info, "Checking %d potential outputs.",
                      len(potential_outputs))
 
         my_outputs = {}
-        for my_storage, output in parallel_objects(potential_outputs, 
+        for my_storage, output in parallel_objects(potential_outputs,
                                                    storage=my_outputs):
             if self.parameters['DataDumpDir'] in output:
                 dir_key = self.parameters['DataDumpDir']
@@ -641,6 +643,6 @@
         self.initial_redshift = initial_redshift
         # time units = 1 / sqrt(4 * pi * G rho_0 * (1 + z_i)**3),
         # rho_0 = (3 * Omega_m * h**2) / (8 * pi * G)
-        self.time_unit = ((1.5 * self.omega_matter * self.hubble_constant**2 * 
+        self.time_unit = ((1.5 * self.omega_matter * self.hubble_constant**2 *
                            (1 + self.initial_redshift)**3)**-0.5).in_units("s")
         self.time_unit.units.registry = self.unit_registry

diff -r 798706bc587f36f15ab279807b69a420daed42df -r 07edc87b9f85fb1dfb39232c56f5895d4d936cc0 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -30,6 +30,7 @@
     config.add_subpackage("sph")
     config.add_subpackage("stream")
     config.add_subpackage("tipsy")
+    config.add_subpackage("ytdata")
     config.add_subpackage("art/tests")
     config.add_subpackage("artio/tests")
     config.add_subpackage("athena/tests")

diff -r 798706bc587f36f15ab279807b69a420daed42df -r 07edc87b9f85fb1dfb39232c56f5895d4d936cc0 yt/frontends/ytdata/__init__.py
--- /dev/null
+++ b/yt/frontends/ytdata/__init__.py
@@ -0,0 +1,15 @@
+"""
+API for ytData frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 798706bc587f36f15ab279807b69a420daed42df -r 07edc87b9f85fb1dfb39232c56f5895d4d936cc0 yt/frontends/ytdata/api.py
--- /dev/null
+++ b/yt/frontends/ytdata/api.py
@@ -0,0 +1,31 @@
+"""
+API for ytData frontend
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+    YTDataContainerDataset, \
+    YTGridDataset, \
+    YTGridHierarchy, \
+    YTGrid
+
+from .io import \
+    IOHandlerYTDataContainerHDF5
+
+from .fields import \
+    YTDataContainerFieldInfo, \
+    YTGridFieldInfo
+
+from .utilities import \
+    to_yt_dataset

diff -r 798706bc587f36f15ab279807b69a420daed42df -r 07edc87b9f85fb1dfb39232c56f5895d4d936cc0 yt/frontends/ytdata/data_structures.py
--- /dev/null
+++ b/yt/frontends/ytdata/data_structures.py
@@ -0,0 +1,265 @@
+"""
+Data structures for YTData frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+from numbers import \
+    Number as numeric_type
+import numpy as np
+import os
+import stat
+import time
+import weakref
+
+from .fields import \
+    YTDataContainerFieldInfo, \
+    YTGridFieldInfo
+
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.data_objects.static_output import \
+    Dataset, \
+    ParticleFile
+from yt.extern.six import \
+    iteritems, \
+    string_types
+from yt.geometry.grid_geometry_handler import \
+    GridIndex
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
+from yt.utilities.logger import \
+    ytLogger as mylog
+from yt.utilities.cosmology import Cosmology
+import yt.utilities.fortran_utils as fpu
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity
+
+_grid_data_containers = ["abritrary_grid",
+                         "covering_grid",
+                         "smoothed_covering_grid"]
+
+class YTDataHDF5File(ParticleFile):
+    def __init__(self, ds, io, filename, file_id):
+        with h5py.File(filename, "r") as f:
+            self.header = dict((field, f.attrs[field]) \
+                               for field in f.attrs.keys())
+
+        super(YTDataHDF5File, self).__init__(ds, io, filename, file_id)
+
+class YTDataContainerDataset(Dataset):
+    _index_class = ParticleIndex
+    _file_class = YTDataHDF5File
+    _field_info_class = YTDataContainerFieldInfo
+    _suffix = ".h5"
+
+    def __init__(self, filename, dataset_type="ytdatacontainer_hdf5",
+                 n_ref = 16, over_refine_factor = 1, units_override=None):
+        self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
+        super(YTDataContainerDataset, self).__init__(filename, dataset_type,
+                                            units_override=units_override)
+
+    def _parse_parameter_file(self):
+        with h5py.File(self.parameter_filename, "r") as f:
+            hvals = dict((key, f.attrs[key]) for key in f.attrs.keys())
+            self.particle_types_raw = tuple(f.keys())
+        self.particle_types = self.particle_types_raw
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        prefix = ".".join(self.parameter_filename.rsplit(".", 2)[:-2])
+        self.filename_template = self.parameter_filename
+        self.file_count = 1
+        for attr in ["cosmological_simulation", "current_time", "current_redshift",
+                     "hubble_constant", "omega_matter", "omega_lambda",
+                     "domain_left_edge", "domain_right_edge"]:
+            setattr(self, attr, hvals[attr])
+        self.periodicity = (True, True, True)
+
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
+        self.parameters.update(hvals)
+
+    def _set_code_unit_attributes(self):
+        self.length_unit = self.quan(1.0, "cm")
+        self.mass_unit = self.quan(1.0, "g")
+        self.velocity_unit = self.quan(1.0, "cm / s")
+        self.time_unit = self.quan(1.0, "s")
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if not args[0].endswith(".h5"): return False
+        with h5py.File(args[0], "r") as f:
+            data_type = f.attrs.get("data_type", None)
+            if data_type is None:
+                return False
+            if data_type in ["yt_light_ray", "yt_array_data"]:
+                return True
+            if data_type == "yt_data_container" and \
+              f.attrs.get("container_type", None) not in \
+              _grid_data_containers:
+                return True
+        return False
+
+class YTGrid(AMRGridPatch):
+    _id_offset = 0
+    def __init__(self, id, index):
+        AMRGridPatch.__init__(self, id, filename=None, index=index)
+        self._children_ids = []
+        self._parent_id = -1
+        self.Level = 0
+        self.LeftEdge = self.index.ds.domain_left_edge
+        self.RightEdge = self.index.ds.domain_right_edge
+
+    @property
+    def Parent(self):
+        return None
+
+    @property
+    def Children(self):
+        return []
+
+class YTGridHierarchy(GridIndex):
+    grid = YTGrid
+
+    def __init__(self, ds, dataset_type = None):
+        self.dataset_type = dataset_type
+        self.float_type = 'float64'
+        self.dataset = weakref.proxy(ds) # for _obtain_enzo
+        self.directory = os.getcwd()
+        GridIndex.__init__(self, ds, dataset_type)
+
+    def _count_grids(self):
+        self.num_grids = 1
+
+    def _parse_index(self):
+        self.grid_dimensions[:] = self.ds.domain_dimensions
+        self.grid_left_edge[:] = self.ds.domain_left_edge
+        self.grid_right_edge[:] = self.ds.domain_right_edge
+        self.grid_levels[:] = np.zeros(self.num_grids)
+        self.grid_procs = np.zeros(self.num_grids)
+        self.grid_particle_count[:] = sum(self.ds.num_particles.values())
+        self.grids = []
+        for id in range(self.num_grids):
+            self.grids.append(self.grid(id, self))
+            self.grids[id].Level = self.grid_levels[id, 0]
+        self.max_level = self.grid_levels.max()
+        temp_grids = np.empty(self.num_grids, dtype='object')
+        for i, grid in enumerate(self.grids):
+            grid.filename = self.ds.parameter_filename
+            grid._prepare_grid()
+            grid.proc_num = self.grid_procs[i]
+            temp_grids[i] = grid
+        self.grids = temp_grids
+
+    def _populate_grid_objects(self):
+        for g in self.grids:
+            g._setup_dx()
+        self.max_level = self.grid_levels.max()
+
+    def _detect_output_fields(self):
+        self.field_list = []
+        self.ds.field_units = self.ds.field_units or {}
+        with h5py.File(self.ds.parameter_filename, "r") as f:
+            for group in f:
+                for field in f[group]:
+                    field_name = (str(group), str(field))
+                    self.field_list.append(field_name)
+                    self.ds.field_units[field_name] = \
+                      f[group][field].attrs["units"]
+
+class YTGridDataset(Dataset):
+    _index_class = YTGridHierarchy
+    _field_info_class = YTGridFieldInfo
+    _dataset_type = 'ytgridhdf5'
+    geometry = "cartesian"
+    default_fluid_type = "grid"
+    fluid_types = ("grid", "gas", "deposit", "index")
+
+    def __init__(self, filename):
+        Dataset.__init__(self, filename, self._dataset_type)
+
+    def _parse_parameter_file(self):
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.unique_identifier = time.time()
+        with h5py.File(self.parameter_filename, "r") as f:
+            for attr, value in f.attrs.items():
+                setattr(self, attr, value)
+            self.num_particles = \
+              dict([(group, f[group].attrs["num_elements"])
+                    for group in f if group != self.default_fluid_type])
+        self.particle_types_raw = tuple(self.num_particles.keys())
+        self.particle_types = self.particle_types_raw
+
+        # correct domain dimensions for the covering grid dimension
+        self.base_domain_left_edge = self.domain_left_edge
+        self.base_domain_right_edge = self.domain_right_edge
+        self.base_domain_dimensions = self.domain_dimensions
+        dx = (self.domain_right_edge - self.domain_left_edge) / \
+          (self.domain_dimensions * self.refine_by**self.level)
+        self.domain_left_edge = self.left_edge
+        self.domain_right_edge = self.domain_left_edge + \
+          self.ActiveDimensions * dx
+        self.domain_dimensions = self.ActiveDimensions
+        self.periodicity = \
+          np.abs(self.domain_left_edge -
+                 self.base_domain_left_edge) < 0.5 * dx
+        self.periodicity &= \
+        np.abs(self.domain_right_edge -
+               self.base_domain_right_edge) < 0.5 * dx
+
+    def __repr__(self):
+        return "ytGrid: %s" % self.parameter_filename
+
+    def create_field_info(self):
+        self.field_info = self._field_info_class(self, self.field_list)
+        for ftype, field in self.field_list:
+            if ftype == self.default_fluid_type:
+                self.field_info.alias(
+                    ("gas", field),
+                    (self.default_fluid_type, field))
+        super(YTGridDataset, self).create_field_info()
+
+    def _set_code_unit_attributes(self):
+        attrs = ('length_unit', 'mass_unit', 'time_unit',
+                 'velocity_unit', 'magnetic_unit')
+        cgs_units = ('cm', 'g', 's', 'cm/s', 'gauss')
+        base_units = np.ones(len(attrs))
+        for unit, attr, cgs_unit in zip(base_units, attrs, cgs_units):
+            if isinstance(unit, string_types):
+                uq = self.quan(1.0, unit)
+            elif isinstance(unit, numeric_type):
+                uq = self.quan(unit, cgs_unit)
+            elif isinstance(unit, YTQuantity):
+                uq = unit
+            elif isinstance(unit, tuple):
+                uq = self.quan(unit[0], unit[1])
+            else:
+                raise RuntimeError("%s (%s) is invalid." % (attr, unit))
+            setattr(self, attr, uq)
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if not args[0].endswith(".h5"): return False
+        with h5py.File(args[0], "r") as f:
+            data_type = f.attrs.get("data_type", None)
+            if data_type == "yt_data_container" and \
+              f.attrs.get("container_type", None) in \
+              _grid_data_containers:
+                return True
+        return False

diff -r 798706bc587f36f15ab279807b69a420daed42df -r 07edc87b9f85fb1dfb39232c56f5895d4d936cc0 yt/frontends/ytdata/fields.py
--- /dev/null
+++ b/yt/frontends/ytdata/fields.py
@@ -0,0 +1,56 @@
+"""
+YTData-specific fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.funcs import mylog
+from yt.fields.field_info_container import \
+    FieldInfoContainer
+
+m_units = "g"
+p_units = "cm"
+v_units = "cm / s"
+r_units = "cm"
+
+class YTDataContainerFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+    )
+
+    known_particle_fields = (
+        ("x", (p_units, ["particle_position_x"], None)),
+        ("y", (p_units, ["particle_position_y"], None)),
+        ("z", (p_units, ["particle_position_z"], None)),
+        ("velocity_x", (v_units, ["particle_velocity_x"], None)),
+        ("velocity_y", (v_units, ["particle_velocity_y"], None)),
+        ("velocity_z", (v_units, ["particle_velocity_z"], None)),
+    )
+
+    # these are extra fields to be created for the "all" particle type
+    extra_union_fields = (
+        (p_units, "particle_position_x"),
+        (p_units, "particle_position_y"),
+        (p_units, "particle_position_z"),
+        (v_units, "particle_velocity_x"),
+        (v_units, "particle_velocity_y"),
+        (v_units, "particle_velocity_z"),
+    )
+
+class YTGridFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+    )
+
+    known_particle_fields = (
+    )

diff -r 798706bc587f36f15ab279807b69a420daed42df -r 07edc87b9f85fb1dfb39232c56f5895d4d936cc0 yt/frontends/ytdata/io.py
--- /dev/null
+++ b/yt/frontends/ytdata/io.py
@@ -0,0 +1,247 @@
+"""
+YTData data-file handling function
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+
+from yt.extern.six import \
+    u, b, iteritems
+from yt.funcs import \
+    mylog
+from yt.geometry.oct_container import \
+    _ORDER_MAX
+from yt.utilities.exceptions import \
+    YTDomainOverflow
+from yt.utilities.io_handler import \
+    BaseIOHandler
+from yt.utilities.lib.geometry_utils import \
+    compute_morton
+
+class IOHandlerYTGridHDF5(BaseIOHandler):
+    _dataset_type = "ytgridhdf5"
+    _base = slice(None)
+    _field_dtype = "float64"
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        rv = {}
+        # Now we have to do something unpleasant
+        chunks = list(chunks)
+        if selector.__class__.__name__ == "GridSelector":
+            if not (len(chunks) == len(chunks[0].objs) == 1):
+                raise RuntimeError
+            g = chunks[0].objs[0]
+            if g.id in self._cached_fields:
+                gf = self._cached_fields[g.id]
+                rv.update(gf)
+            if len(rv) == len(fields): return rv
+            f = h5py.File(u(g.filename), "r")
+            gds = f["grid"]
+            for field in fields:
+                if field in rv:
+                    self._hits += 1
+                    continue
+                self._misses += 1
+                ftype, fname = field
+                rv[(ftype, fname)] = gds[fname].value
+            if self._cache_on:
+                for gid in rv:
+                    self._cached_fields.setdefault(gid, {})
+                    self._cached_fields[gid].update(rv[gid])
+            f.close()
+            return rv
+        if size is None:
+            size = sum((g.count(selector) for chunk in chunks
+                        for g in chunk.objs))
+        for field in fields:
+            ftype, fname = field
+            fsize = size
+            rv[field] = np.empty(fsize, dtype="float64")
+        ng = sum(len(c.objs) for c in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s grids",
+                   size, [f2 for f1, f2 in fields], ng)
+        ind = 0
+        h5_type = self._field_dtype
+        for chunk in chunks:
+            f = None
+            for g in chunk.objs:
+                if g.filename is None: continue
+                if f is None:
+                    f = h5py.File(g.filename, "r")
+                gf = self._cached_fields.get(g.id, {})
+                nd = 0
+                for field in fields:
+                    if field in gf:
+                        nd = g.select(selector, gf[field], rv[field], ind)
+                        self._hits += 1
+                        continue
+                    self._misses += 1
+                    ftype, fname = field
+                    data = f[ftype][fname].value.astype(self._field_dtype)
+                    if self._cache_on:
+                        self._cached_fields.setdefault(g.id, {})
+                        self._cached_fields[g.id][field] = data
+                    nd = g.select(selector, data, rv[field], ind) # caches
+                ind += nd
+            if f: f.close()
+        return rv
+
+    def _read_particle_coords(self, chunks, ptf):
+        chunks = list(chunks)
+        for chunk in chunks:
+            f = None
+            for g in chunk.objs:
+                if g.filename is None: continue
+                if f is None:
+                    f = h5py.File(g.filename, "r")
+                if g.NumberOfParticles == 0:
+                    continue
+                for ptype, field_list in sorted(ptf.items()):
+                    pn = "particle_position_%s"
+                    x, y, z = (np.asarray(f[ptype][pn % ax].value, dtype="=f8")
+                               for ax in 'xyz')
+                    for field in field_list:
+                        if np.asarray(f[ptype][field]).ndim > 1:
+                            self._array_fields[field] = f[ptype][field].shape
+                    yield ptype, (x, y, z)
+            if f: f.close()
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        chunks = list(chunks)
+        for chunk in chunks: # These should be organized by grid filename
+            f = None
+            for g in chunk.objs:
+                if g.filename is None: continue
+                if f is None:
+                    f = h5py.File(g.filename, "r")
+                if g.NumberOfParticles == 0:
+                    continue
+                for ptype, field_list in sorted(ptf.items()):
+                    pn = "particle_position_%s"
+                    x, y, z = (np.asarray(f[ptype][pn % ax].value, dtype="=f8")
+                               for ax in 'xyz')
+                    mask = selector.select_points(x, y, z, 0.0)
+                    if mask is None: continue
+                    for field in field_list:
+                        data = np.asarray(f[ptype][field].value, "=f8")
+                        yield (ptype, field), data[mask]
+            if f: f.close()
+
+class IOHandlerYTDataContainerHDF5(BaseIOHandler):
+    _dataset_type = "ytdatacontainer_hdf5"
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        raise NotImplementedError
+
+    def _read_particle_coords(self, chunks, ptf):
+        # This will read chunks and yield the results.
+        chunks = list(chunks)
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files):
+            with h5py.File(data_file.filename, "r") as f:
+                for ptype, field_list in sorted(ptf.items()):
+                    pcount = data_file.total_particles[ptype]
+                    if pcount == 0: continue
+                    x = _get_position_array(ptype, f, "x")
+                    y = _get_position_array(ptype, f, "y")
+                    z = _get_position_array(ptype, f, "z")
+                    yield ptype, (x, y, z)
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        # Now we have all the sizes, and we can allocate
+        chunks = list(chunks)
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files):
+            all_count = self._count_particles(data_file)
+            pcount = all_count["grid"]
+            with h5py.File(data_file.filename, "r") as f:
+                for ptype, field_list in sorted(ptf.items()):
+                    x = _get_position_array(ptype, f, "x")
+                    y = _get_position_array(ptype, f, "y")
+                    z = _get_position_array(ptype, f, "z")
+                    mask = selector.select_points(x, y, z, 0.0)
+                    del x, y, z
+                    if mask is None: continue
+                    for field in field_list:
+                        data = f[ptype][field][mask].astype("float64")
+                        yield (ptype, field), data
+
+    def _initialize_index(self, data_file, regions):
+        all_count = self._count_particles(data_file)
+        pcount = sum(all_count.values())
+        morton = np.empty(pcount, dtype='uint64')
+        mylog.debug("Initializing index % 5i (% 7i particles)",
+                    data_file.file_id, pcount)
+        ind = 0
+        with h5py.File(data_file.filename, "r") as f:
+            for ptype in all_count:
+                if not ptype in f or all_count[ptype] == 0: continue
+                pos = np.empty((all_count[ptype], 3), dtype="float64")
+                pos = data_file.ds.arr(pos, "code_length")
+                if ptype == "grid":
+                    dx = f["grid"]["dx"].value.min()
+                else:
+                    dx = 2. * np.finfo(f[ptype]["particle_position_x"].dtype).eps
+                dx = self.ds.quan(dx, "code_length")
+                pos[:,0] = _get_position_array(ptype, f, "x")
+                pos[:,1] = _get_position_array(ptype, f, "y")
+                pos[:,2] = _get_position_array(ptype, f, "z")
+                # These are 32 bit numbers, so we give a little lee-way.
+                # Otherwise, for big sets of particles, we often will bump into the
+                # domain edges.  This helps alleviate that.
+                np.clip(pos, self.ds.domain_left_edge + dx,
+                             self.ds.domain_right_edge - dx, pos)
+                if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \
+                   np.any(pos.max(axis=0) > self.ds.domain_right_edge):
+                    raise YTDomainOverflow(pos.min(axis=0),
+                                           pos.max(axis=0),
+                                           self.ds.domain_left_edge,
+                                           self.ds.domain_right_edge)
+                regions.add_data_file(pos, data_file.file_id)
+                morton[ind:ind+pos.shape[0]] = compute_morton(
+                    pos[:,0], pos[:,1], pos[:,2],
+                    data_file.ds.domain_left_edge,
+                    data_file.ds.domain_right_edge)
+                ind += pos.shape[0]
+        return morton
+
+    def _count_particles(self, data_file):
+        with h5py.File(data_file.filename, "r") as f:
+            return dict([(group, f[group].attrs["num_elements"])
+                         for group in f])
+
+    def _identify_fields(self, data_file):
+        fields = []
+        units = {}
+        with h5py.File(data_file.filename, "r") as f:
+            for ptype in f:
+                fields.extend([(ptype, str(field)) for field in f[ptype]])
+                units.update(dict([((ptype, str(field)), 
+                                    f[ptype][field].attrs["units"])
+                                   for field in f[ptype]]))
+        return fields, units
+
+def _get_position_array(ptype, f, ax):
+    if ptype == "grid":
+        pos_name = ""
+    else:
+        pos_name = "particle_position_"
+    return f[ptype][pos_name + ax].value.astype("float64")

diff -r 798706bc587f36f15ab279807b69a420daed42df -r 07edc87b9f85fb1dfb39232c56f5895d4d936cc0 yt/frontends/ytdata/setup.py
--- /dev/null
+++ b/yt/frontends/ytdata/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('ytdata', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 798706bc587f36f15ab279807b69a420daed42df -r 07edc87b9f85fb1dfb39232c56f5895d4d936cc0 yt/frontends/ytdata/utilities.py
--- /dev/null
+++ b/yt/frontends/ytdata/utilities.py
@@ -0,0 +1,191 @@
+"""
+Utility functions for ytdata frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+
+from yt.funcs import \
+    mylog
+from yt.units.yt_array import \
+    YTArray
+
+def to_yt_dataset(ds, filename, data, field_types=None,
+                  extra_attrs=None):
+    r"""Export a set of field arrays to a reloadable yt dataset.
+
+    This function can be used to create a yt loadable dataset from a 
+    set of arrays.  The field arrays can either be associated with a 
+    loaded dataset or, if not, a dictionary of dataset attributes can
+    be provided that will be used as metadata for the new dataset.  The 
+    resulting dataset can be reloaded as a yt dataset.
+
+    Parameters
+    ----------
+    ds : dataset
+        The dataset associated with the fields.  
+    filename : str
+        The name of the file to be written.
+    data : dict
+        A dictionary of field arrays to be saved.
+    extra_attrs: dict
+        A dictionary of additional attributes to be saved.
+
+    Returns
+    -------
+    filename : str
+        The name of the file that has been created.
+
+    Examples
+    --------
+
+    >>> import yt
+    >>> from yt.frontends.ytdata.api import to_yt_dataset
+    >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+    >>> sphere = ds.sphere([0.5]*3, (10, "Mpc")
+    >>> sphere_density = sphere["density"]
+    >>> region = ds.box([0.]*3, [0.25]*3)
+    >>> region_density = region["density"]
+    >>> data = {}
+    >>> data["sphere_density"] = sphere_density
+    >>> data["region_density"] = region_density
+    >>> to_yt_dataset(ds, "density_data.h5", data)
+
+    >>> import yt
+    >>> from yt.frontends.ytdata.api import to_yt_dataset
+    >>> from yt.units.yt_array import YTArray, YTQuantity
+    >>> data = {"density": YTArray(np.random.random(10), "g/cm**3"),
+    ...         "temperature": YTArray(np.random.random(10), "K")}
+    >>> ds_data = {"domain_left_edge": YTArray(np.zeros(3), "cm"),
+    ...            "domain_right_edge": YTArray(np.ones(3), "cm"),
+    ...            "current_time": YTQuantity(10, "Myr")}
+    >>> to_yt_dataset(ds_data, "random_data.h5", data)
+    
+    """
+
+    mylog.info("Saving field data to yt dataset: %s." % filename)
+
+    if extra_attrs is None: extra_attrs = {}
+    base_attrs  = ["domain_left_edge", "domain_right_edge",
+                   "current_redshift", "current_time",
+                   "domain_dimensions", "periodicity",
+                   "cosmological_simulation", "omega_lambda",
+                   "omega_matter", "hubble_constant"]
+
+    fh = h5py.File(filename, "w")
+    for attr in base_attrs:
+        if isinstance(ds, dict):
+            my_val = ds.get(attr, None)
+        else:
+            my_val = getattr(ds, attr, None)
+        if my_val is None:
+            mylog.warn("Skipping %s attribute, this may be just fine." % attr)
+            continue
+        if hasattr(my_val, "units"):
+            my_val = my_val.in_cgs()
+        fh.attrs[attr] = my_val
+
+    for attr in extra_attrs:
+        my_val = extra_attrs[attr]
+        if hasattr(my_val, "units"):
+            my_val = my_val.in_cgs()
+        fh.attrs[attr] = my_val
+    if "data_type" not in extra_attrs:
+        fh.attrs["data_type"] = "yt_array_data"
+
+    for field in data:
+        if field_types is None:
+            field_type = "data"
+        else:
+            field_type = field_types[field]
+        if field_type not in fh:
+            fh.create_group(field_type)
+        # for now, let's avoid writing "code" units
+        if hasattr(data[field], "units"):
+            data[field].convert_to_cgs()
+        if isinstance(field, tuple):
+            field_name = field[1]
+        else:
+            field_name = field
+        dataset = _yt_array_hdf5(fh[field_type], field_name, data[field])
+        if "num_elements" in fh[field_type].attrs:
+            if fh[field_type].attrs["num_elements"] != data[field].size:
+                mylog.warn(
+                    "Datasets in %s group have different sizes." % fh[field_type] +
+                    "  This will probably not work right.")
+        else:
+            fh[field_type].attrs["num_elements"] = data[field].size
+    fh.close()
+
+def _hdf5_yt_array(fh, field, ds=None):
+    r"""Load an hdf5 dataset as a YTArray.
+
+    Reads in a dataset from an open hdf5 file or group and uses the
+    "units" attribute, if it exists, to apply units.
+    
+    Parameters
+    ----------
+    fh : an open hdf5 file or hdf5 group
+        The hdf5 file or group in which the dataset exists.
+    field : str
+        The name of the field to be loaded.
+    ds : yt Dataset
+        If not None, the unit_registry of the dataset
+        is used to apply units.
+
+    Returns
+    -------
+    A YTArray of the requested field.
+    
+    """
+    
+    if ds is None:
+        new_arr = YTArray
+    else:
+        new_arr = ds.arr
+    units = ""
+    if "units" in fh[field].attrs:
+        units = fh[field].attrs["units"]
+    if units == "dimensionless": units = ""
+    return new_arr(fh[field].value, units)
+
+def _yt_array_hdf5(fh, field, data):
+    r"""Save a YTArray to an open hdf5 file or group.
+
+    Save a YTArray to an open hdf5 file or group, and save the 
+    units to a "units" attribute.
+    
+    Parameters
+    ----------
+    fh : an open hdf5 file or hdf5 group
+        The hdf5 file or group to which the data will be written.
+    field : str
+        The name of the field to be saved.
+    ddata : YTArray
+        The data array to be saved.
+
+    Returns
+    -------
+    dataset : hdf5 dataset
+        The created hdf5 dataset.
+    
+    """
+
+    dataset = fh.create_dataset(str(field), data=data)
+    units = ""
+    if isinstance(data, YTArray):
+        units = str(data.units)
+    dataset.attrs["units"] = units
+    return dataset

diff -r 798706bc587f36f15ab279807b69a420daed42df -r 07edc87b9f85fb1dfb39232c56f5895d4d936cc0 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -14,6 +14,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import errno
 from yt.extern.six import string_types
 import time, types, signal, inspect, traceback, sys, pdb, os, re
 import contextlib
@@ -669,6 +670,57 @@
     suffix = os.path.splitext(name)[1]
     return suffix if suffix in ['.png', '.eps', '.ps', '.pdf'] else ''
 
+def get_output_filename(name, keyword, suffix):
+    r"""Return an appropriate filename for output.
+
+    With a name provided by the user, this will decide how to 
+    appropriately name the output file by the following rules:
+    1. if name is None, the filename will be the keyword plus 
+       the suffix.
+    2. if name ends with "/", assume name is a directory and 
+       the file will be named name/(keyword+suffix).  If the
+       directory does not exist, first try to create it and
+       raise an exception if an error occurs.
+    3. if name does not end in the suffix, add the suffix.
+    
+    Parameters
+    ----------
+    name : str
+        A filename given by the user.
+    keyword : str
+        A default filename prefix if name is None.
+    suffix : str
+        Suffix that must appear at end of the filename.
+        This will be added if not present.
+
+    Examples
+    --------
+
+    >>> print get_output_filename(None, "Projection_x", ".png")
+    Projection_x.png
+    >>> print get_output_filename("my_file", "Projection_x", ".png")
+    my_file.png
+    >>> print get_output_filename("my_file/", "Projection_x", ".png")
+    my_file/Projection_x.png
+    
+    """
+    if name is None:
+        name = keyword
+    name = os.path.expanduser(name)
+    if name[-1] == os.sep and not os.path.isdir(name):
+        try:
+            os.mkdir(name)
+        except OSError as e:
+            if e.errno == errno.EEXIST:
+                pass
+            else:
+                raise
+    if os.path.isdir(name):
+        name = os.path.join(name, keyword)
+    if not name.endswith(suffix):
+        name += suffix
+    return name
+
 def ensure_dir_exists(path):
     r"""Create all directories in path recursively in a parallel safe manner"""
     my_dir = os.path.dirname(path)

diff -r 798706bc587f36f15ab279807b69a420daed42df -r 07edc87b9f85fb1dfb39232c56f5895d4d936cc0 yt/utilities/hierarchy_inspection.py
--- a/yt/utilities/hierarchy_inspection.py
+++ b/yt/utilities/hierarchy_inspection.py
@@ -27,6 +27,8 @@
     # lowest class
     if len(candidates) == 1:
         return candidates
+    elif len(candidates) == 0:
+        return []
 
     mros = [inspect.getmro(c) for c in candidates]
 


https://bitbucket.org/yt_analysis/yt/commits/dabe4ce49cc9/
Changeset:   dabe4ce49cc9
Branch:      yt
User:        brittonsmith
Date:        2015-08-30 20:02:42+00:00
Summary:     Don't choke when attributes are None.
Affected #:  1 file

diff -r 07edc87b9f85fb1dfb39232c56f5895d4d936cc0 -r dabe4ce49cc9d6fc11561ea7926d8cda2cb7ded7 yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -101,6 +101,8 @@
         my_val = extra_attrs[attr]
         if hasattr(my_val, "units"):
             my_val = my_val.in_cgs()
+        if my_val is None:
+            my_val = "None"
         fh.attrs[attr] = my_val
     if "data_type" not in extra_attrs:
         fh.attrs["data_type"] = "yt_array_data"


https://bitbucket.org/yt_analysis/yt/commits/14bff31f214c/
Changeset:   14bff31f214c
Branch:      yt
User:        brittonsmith
Date:        2015-08-31 15:42:18+00:00
Summary:     Removing hard-coded dimensionality and do not save position fields for projection objects.
Affected #:  2 files

diff -r dabe4ce49cc9d6fc11561ea7926d8cda2cb7ded7 -r 14bff31f214ceda6e082e991c522f12854fb2a0c yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -514,29 +514,35 @@
             data.update(self.field_data)
         data_fields = data.keys()
 
-        need_grid_fields = False
-        need_particle_fields = False
+        need_grid_positions = False
+        need_particle_positions = False
         ptypes = []
         ftypes = {}
         for field in data_fields:
-            if self.ds.field_info[field].particle_type:
+            if field in self._container_fields:
+                ftypes[field] = "grid"
+                need_grid_positions = True
+            elif self.ds.field_info[field].particle_type:
                 if field[0] not in ptypes:
                     ptypes.append(field[0])
                 ftypes[field] = "%s_particles" % field[0]
-                need_particle_fields = True
+                need_particle_positions = True
             else:
                 ftypes[field] = "grid"
-                need_grid_fields = True
+                need_grid_positions = True
+        if self._type_name == "proj":
+            need_grid_positions = False
 
-        for ax in "xyz":
-            if need_particle_fields:
+        if need_particle_positions:
+            for ax in "xyz":
                 for ptype in ptypes:
                     p_field = (ptype, "particle_position_%s" % ax)
                     if p_field in self.ds.field_info and p_field not in data:
                         data_fields.append(field)
                         ftypes[p_field] = "%s_particles" % p_field[0]
                         data[p_field] = self[p_field]
-            if need_grid_fields:
+        if need_grid_positions:
+            for ax in "xyz":
                 g_field = ("index", ax)
                 if g_field in self.ds.field_info and g_field not in data:
                     data_fields.append(g_field)
@@ -552,6 +558,7 @@
                             for arg in self._con_args])
         extra_attrs["data_type"] = "yt_data_container"
         extra_attrs["container_type"] = self._type_name
+        extra_attrs["dimensionality"] = self._dimensionality
         to_yt_dataset(self.ds, filename, data, field_types=ftypes,
                       extra_attrs=extra_attrs)
 

diff -r dabe4ce49cc9d6fc11561ea7926d8cda2cb7ded7 -r 14bff31f214ceda6e082e991c522f12854fb2a0c yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -139,7 +139,7 @@
     def __init__(self, ds, dataset_type = None):
         self.dataset_type = dataset_type
         self.float_type = 'float64'
-        self.dataset = weakref.proxy(ds) # for _obtain_enzo
+        self.dataset = weakref.proxy(ds)
         self.directory = os.getcwd()
         GridIndex.__init__(self, ds, dataset_type)
 
@@ -194,7 +194,6 @@
         Dataset.__init__(self, filename, self._dataset_type)
 
     def _parse_parameter_file(self):
-        self.dimensionality = 3
         self.refine_by = 2
         self.unique_identifier = time.time()
         with h5py.File(self.parameter_filename, "r") as f:


https://bitbucket.org/yt_analysis/yt/commits/6037264f144b/
Changeset:   6037264f144b
Branch:      yt
User:        brittonsmith
Date:        2015-08-31 19:37:08+00:00
Summary:     Adding projection dataset type.
Affected #:  3 files

diff -r 14bff31f214ceda6e082e991c522f12854fb2a0c -r 6037264f144bfe442d6149ba9a306655e42ceea7 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -25,6 +25,7 @@
 
 from .fields import \
     YTDataContainerFieldInfo, \
+    YTProjectionFieldInfo, \
     YTGridFieldInfo
 
 from yt.data_objects.grid_patch import \
@@ -77,7 +78,6 @@
             hvals = dict((key, f.attrs[key]) for key in f.attrs.keys())
             self.particle_types_raw = tuple(f.keys())
         self.particle_types = self.particle_types_raw
-        self.dimensionality = 3
         self.refine_by = 2
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
@@ -86,7 +86,7 @@
         self.file_count = 1
         for attr in ["cosmological_simulation", "current_time", "current_redshift",
                      "hubble_constant", "omega_matter", "omega_lambda",
-                     "domain_left_edge", "domain_right_edge"]:
+                     "dimensionality", "domain_left_edge", "domain_right_edge"]:
             setattr(self, attr, hvals[attr])
         self.periodicity = (True, True, True)
 
@@ -115,6 +115,23 @@
                 return True
         return False
 
+class YTProjectionDataset(YTDataContainerDataset):
+    _field_info_class = YTProjectionFieldInfo
+
+    def __init__(self, *args, **kwargs):
+        super(YTProjectionDataset, self).__init__(
+            *args, dataset_type="ytprojection_hdf5", **kwargs)
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if not args[0].endswith(".h5"): return False
+        with h5py.File(args[0], "r") as f:
+            data_type = f.attrs.get("data_type", None)
+            if data_type == "yt_data_container" and \
+              f.attrs.get("container_type", None) == "proj":
+                return True
+        return False
+
 class YTGrid(AMRGridPatch):
     _id_offset = 0
     def __init__(self, id, index):

diff -r 14bff31f214ceda6e082e991c522f12854fb2a0c -r 6037264f144bfe442d6149ba9a306655e42ceea7 yt/frontends/ytdata/fields.py
--- a/yt/frontends/ytdata/fields.py
+++ b/yt/frontends/ytdata/fields.py
@@ -48,6 +48,15 @@
         (v_units, "particle_velocity_z"),
     )
 
+class YTProjectionFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+    )
+
+    known_particle_fields = (
+        ("px", (p_units, ["particle_position_x"], None)),
+        ("py", (p_units, ["particle_position_y"], None)),
+    )
+
 class YTGridFieldInfo(FieldInfoContainer):
     known_other_fields = (
     )

diff -r 14bff31f214ceda6e082e991c522f12854fb2a0c -r 6037264f144bfe442d6149ba9a306655e42ceea7 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -239,6 +239,90 @@
                                    for field in f[ptype]]))
         return fields, units
 
+class IOHandlerYTProjectionHDF5(IOHandlerYTDataContainerHDF5):
+    _dataset_type = "ytprojection_hdf5"
+
+    def _read_particle_coords(self, chunks, ptf):
+        # This will read chunks and yield the results.
+        chunks = list(chunks)
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files):
+            with h5py.File(data_file.filename, "r") as f:
+                for ptype, field_list in sorted(ptf.items()):
+                    pcount = data_file.total_particles[ptype]
+                    if pcount == 0: continue
+                    x = _get_position_array(ptype, f, "px")
+                    y = _get_position_array(ptype, f, "py")
+                    z = np.zeros(x.size, dtype="float64") + \
+                      self.ds.domain_left_edge[2].in_cgs().d
+                    yield ptype, (x, y, z)
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        # Now we have all the sizes, and we can allocate
+        chunks = list(chunks)
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        for data_file in sorted(data_files):
+            all_count = self._count_particles(data_file)
+            pcount = all_count["grid"]
+            with h5py.File(data_file.filename, "r") as f:
+                for ptype, field_list in sorted(ptf.items()):
+                    x = _get_position_array(ptype, f, "px")
+                    y = _get_position_array(ptype, f, "py")
+                    z = np.zeros(all_count[ptype], dtype="float64") + \
+                      self.ds.domain_left_edge[2].in_cgs().d
+                    mask = selector.select_points(x, y, z, 0.0)
+                    del x, y, z
+                    if mask is None: continue
+                    for field in field_list:
+                        data = f[ptype][field][mask].astype("float64")
+                        yield (ptype, field), data
+
+    def _initialize_index(self, data_file, regions):
+        all_count = self._count_particles(data_file)
+        pcount = sum(all_count.values())
+        morton = np.empty(pcount, dtype='uint64')
+        mylog.debug("Initializing index % 5i (% 7i particles)",
+                    data_file.file_id, pcount)
+        ind = 0
+        with h5py.File(data_file.filename, "r") as f:
+            for ptype in all_count:
+                if not ptype in f or all_count[ptype] == 0: continue
+                pos = np.empty((all_count[ptype], 3), dtype="float64")
+                pos = data_file.ds.arr(pos, "code_length")
+                if ptype == "grid":
+                    dx = f["grid"]["pdx"].value.min()
+                else:
+                    dx = 2. * np.finfo(f[ptype]["particle_position_x"].dtype).eps
+                dx = self.ds.quan(dx, "code_length")
+                pos[:,0] = _get_position_array(ptype, f, "px")
+                pos[:,1] = _get_position_array(ptype, f, "py")
+                pos[:,2] = np.zeros(all_count[ptype], dtype="float64") + \
+                  self.ds.domain_left_edge[2].in_cgs().d
+                # These are 32 bit numbers, so we give a little lee-way.
+                # Otherwise, for big sets of particles, we often will bump into the
+                # domain edges.  This helps alleviate that.
+                np.clip(pos, self.ds.domain_left_edge + dx,
+                             self.ds.domain_right_edge - dx, pos)
+                if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or \
+                   np.any(pos.max(axis=0) > self.ds.domain_right_edge):
+                    raise YTDomainOverflow(pos.min(axis=0),
+                                           pos.max(axis=0),
+                                           self.ds.domain_left_edge,
+                                           self.ds.domain_right_edge)
+                regions.add_data_file(pos, data_file.file_id)
+                morton[ind:ind+pos.shape[0]] = compute_morton(
+                    pos[:,0], pos[:,1], pos[:,2],
+                    data_file.ds.domain_left_edge,
+                    data_file.ds.domain_right_edge)
+                ind += pos.shape[0]
+        return morton
+
 def _get_position_array(ptype, f, ax):
     if ptype == "grid":
         pos_name = ""


https://bitbucket.org/yt_analysis/yt/commits/494404a422f7/
Changeset:   494404a422f7
Branch:      yt
User:        brittonsmith
Date:        2015-08-31 20:24:38+00:00
Summary:     Allowing ProjectionPlot to take a projection dataset.
Affected #:  2 files

diff -r 6037264f144bfe442d6149ba9a306655e42ceea7 -r 494404a422f7bcdc330d1808c1bd99b5c36e8323 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -122,6 +122,16 @@
         super(YTProjectionDataset, self).__init__(
             *args, dataset_type="ytprojection_hdf5", **kwargs)
 
+    def _parse_parameter_file(self):
+        super(YTProjectionDataset, self)._parse_parameter_file()
+        self.axis = self.parameters["axis"]
+        self.weight_field = self.parameters["weight_field"]
+        if isinstance(self.weight_field, str) and \
+          self.weight_field == "None":
+            self.weight_field = None
+        elif isinstance(self.weight_field, np.ndarray):
+            self.weight_field = tuple(self.weight_field)
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False

diff -r 6037264f144bfe442d6149ba9a306655e42ceea7 -r 494404a422f7bcdc330d1808c1bd99b5c36e8323 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -46,6 +46,8 @@
 from yt.extern.six.moves import \
     StringIO
 from yt.extern.six import string_types
+from yt.frontends.ytdata.data_structures import \
+    YTProjectionDataset
 from yt.funcs import \
     mylog, iterable, ensure_list, \
     fix_axis, fix_unitary
@@ -1426,9 +1428,18 @@
         (bounds, center, display_center) = \
                 get_window_parameters(axis, center, width, ds)
         if field_parameters is None: field_parameters = {}
-        proj = ds.proj(fields, axis, weight_field=weight_field,
-                       center=center, data_source=data_source,
-                       field_parameters = field_parameters, method = method)
+
+        if isinstance(ds, YTProjectionDataset):
+            proj = ds.all_data()
+            proj.axis = axis
+            if proj.axis != ds.axis:
+                raise RuntimeError("Original projection axis is %s." %
+                                   "xyz"[ds.parameters["axis"]])
+            proj.weight_field = proj._determine_fields(weight_field)[0]
+        else:
+            proj = ds.proj(fields, axis, weight_field=weight_field,
+                           center=center, data_source=data_source,
+                           field_parameters = field_parameters, method = method)
         PWViewerMPL.__init__(self, proj, bounds, fields=fields, origin=origin,
                              fontsize=fontsize, window_size=window_size, 
                              aspect=aspect)


https://bitbucket.org/yt_analysis/yt/commits/5db3a5247fda/
Changeset:   5db3a5247fda
Branch:      yt
User:        brittonsmith
Date:        2015-09-02 14:21:29+00:00
Summary:     Making sure container_fields get saved.
Affected #:  1 file

diff -r 494404a422f7bcdc330d1808c1bd99b5c36e8323 -r 5db3a5247fda6e5e5a68d4b9be11cc3082244593 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -512,6 +512,10 @@
                 data[f] = self[f]
         else:
             data.update(self.field_data)
+        # get the extra fields needed to reconstruct the container
+        for f in [f for f in self._container_fields \
+                  if f not in data]:
+            data[f] = self[f]
         data_fields = data.keys()
 
         need_grid_positions = False
@@ -530,7 +534,8 @@
             else:
                 ftypes[field] = "grid"
                 need_grid_positions = True
-        if self._type_name == "proj":
+        # projections and slices use px and py, so don't need positions
+        if self._type_name in ["proj", "slice"]:
             need_grid_positions = False
 
         if need_particle_positions:


https://bitbucket.org/yt_analysis/yt/commits/337e67278550/
Changeset:   337e67278550
Branch:      yt
User:        brittonsmith
Date:        2015-09-02 14:22:22+00:00
Summary:     Generalizing projection types to spatialplot and adding slice dataset type.
Affected #:  3 files

diff -r 5db3a5247fda6e5e5a68d4b9be11cc3082244593 -r 337e6727855002130591136ea3efbec990592ad7 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -25,7 +25,6 @@
 
 from .fields import \
     YTDataContainerFieldInfo, \
-    YTProjectionFieldInfo, \
     YTGridFieldInfo
 
 from yt.data_objects.grid_patch import \
@@ -116,11 +115,11 @@
         return False
 
 class YTProjectionDataset(YTDataContainerDataset):
-    _field_info_class = YTProjectionFieldInfo
+    _field_info_class = YTGridFieldInfo
 
     def __init__(self, *args, **kwargs):
         super(YTProjectionDataset, self).__init__(
-            *args, dataset_type="ytprojection_hdf5", **kwargs)
+            *args, dataset_type="ytspatialplot_hdf5", **kwargs)
 
     def _parse_parameter_file(self):
         super(YTProjectionDataset, self)._parse_parameter_file()
@@ -142,6 +141,27 @@
                 return True
         return False
 
+class YTSliceDataset(YTDataContainerDataset):
+    _field_info_class = YTGridFieldInfo
+
+    def __init__(self, *args, **kwargs):
+        super(YTSliceDataset, self).__init__(
+            *args, dataset_type="ytspatialplot_hdf5", **kwargs)
+
+    def _parse_parameter_file(self):
+        super(YTSliceDataset, self)._parse_parameter_file()
+        self.axis = self.parameters["axis"]
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if not args[0].endswith(".h5"): return False
+        with h5py.File(args[0], "r") as f:
+            data_type = f.attrs.get("data_type", None)
+            if data_type == "yt_data_container" and \
+              f.attrs.get("container_type", None) == "slice":
+                return True
+        return False
+
 class YTGrid(AMRGridPatch):
     _id_offset = 0
     def __init__(self, id, index):

diff -r 5db3a5247fda6e5e5a68d4b9be11cc3082244593 -r 337e6727855002130591136ea3efbec990592ad7 yt/frontends/ytdata/fields.py
--- a/yt/frontends/ytdata/fields.py
+++ b/yt/frontends/ytdata/fields.py
@@ -48,15 +48,6 @@
         (v_units, "particle_velocity_z"),
     )
 
-class YTProjectionFieldInfo(FieldInfoContainer):
-    known_other_fields = (
-    )
-
-    known_particle_fields = (
-        ("px", (p_units, ["particle_position_x"], None)),
-        ("py", (p_units, ["particle_position_y"], None)),
-    )
-
 class YTGridFieldInfo(FieldInfoContainer):
     known_other_fields = (
     )

diff -r 5db3a5247fda6e5e5a68d4b9be11cc3082244593 -r 337e6727855002130591136ea3efbec990592ad7 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -239,8 +239,8 @@
                                    for field in f[ptype]]))
         return fields, units
 
-class IOHandlerYTProjectionHDF5(IOHandlerYTDataContainerHDF5):
-    _dataset_type = "ytprojection_hdf5"
+class IOHandlerSpatialPlotHDF5(IOHandlerYTDataContainerHDF5):
+    _dataset_type = "ytspatialplot_hdf5"
 
     def _read_particle_coords(self, chunks, ptf):
         # This will read chunks and yield the results.
@@ -298,7 +298,7 @@
                 if ptype == "grid":
                     dx = f["grid"]["pdx"].value.min()
                 else:
-                    dx = 2. * np.finfo(f[ptype]["particle_position_x"].dtype).eps
+                    raise NotImplementedError
                 dx = self.ds.quan(dx, "code_length")
                 pos[:,0] = _get_position_array(ptype, f, "px")
                 pos[:,1] = _get_position_array(ptype, f, "py")


https://bitbucket.org/yt_analysis/yt/commits/dd66b37bd4c8/
Changeset:   dd66b37bd4c8
Branch:      yt
User:        brittonsmith
Date:        2015-09-03 15:18:24+00:00
Summary:     Generalizing to spatial plot dataset instead of projection, slice, cutting datasets.
Affected #:  3 files

diff -r 337e6727855002130591136ea3efbec990592ad7 -r dd66b37bd4c8b042a2425a04b9369c236d240dff yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -535,7 +535,7 @@
                 ftypes[field] = "grid"
                 need_grid_positions = True
         # projections and slices use px and py, so don't need positions
-        if self._type_name in ["proj", "slice"]:
+        if self._type_name in ["cutting", "proj", "slice"]:
             need_grid_positions = False
 
         if need_particle_positions:

diff -r 337e6727855002130591136ea3efbec990592ad7 -r dd66b37bd4c8b042a2425a04b9369c236d240dff yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -114,22 +114,22 @@
                 return True
         return False
 
-class YTProjectionDataset(YTDataContainerDataset):
+class YTSpatialPlotDataset(YTDataContainerDataset):
     _field_info_class = YTGridFieldInfo
 
     def __init__(self, *args, **kwargs):
-        super(YTProjectionDataset, self).__init__(
+        super(YTSpatialPlotDataset, self).__init__(
             *args, dataset_type="ytspatialplot_hdf5", **kwargs)
 
     def _parse_parameter_file(self):
-        super(YTProjectionDataset, self)._parse_parameter_file()
-        self.axis = self.parameters["axis"]
-        self.weight_field = self.parameters["weight_field"]
-        if isinstance(self.weight_field, str) and \
-          self.weight_field == "None":
-            self.weight_field = None
-        elif isinstance(self.weight_field, np.ndarray):
-            self.weight_field = tuple(self.weight_field)
+        super(YTSpatialPlotDataset, self)._parse_parameter_file()
+        if self.parameters["container_type"] == "proj":
+            if isinstance(self.parameters["weight_field"], str) and \
+              self.parameters["weight_field"] == "None":
+                self.parameters["weight_field"] = None
+            elif isinstance(self.parameters["weight_field"], np.ndarray):
+                self.parameters["weight_field"] = \
+                  tuple(self.parameters["weight_field"])
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
@@ -137,28 +137,8 @@
         with h5py.File(args[0], "r") as f:
             data_type = f.attrs.get("data_type", None)
             if data_type == "yt_data_container" and \
-              f.attrs.get("container_type", None) == "proj":
-                return True
-        return False
-
-class YTSliceDataset(YTDataContainerDataset):
-    _field_info_class = YTGridFieldInfo
-
-    def __init__(self, *args, **kwargs):
-        super(YTSliceDataset, self).__init__(
-            *args, dataset_type="ytspatialplot_hdf5", **kwargs)
-
-    def _parse_parameter_file(self):
-        super(YTSliceDataset, self)._parse_parameter_file()
-        self.axis = self.parameters["axis"]
-
-    @classmethod
-    def _is_valid(self, *args, **kwargs):
-        if not args[0].endswith(".h5"): return False
-        with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", None)
-            if data_type == "yt_data_container" and \
-              f.attrs.get("container_type", None) == "slice":
+              f.attrs.get("container_type", None) in \
+              ["cutting", "proj", "slice"]:
                 return True
         return False
 

diff -r 337e6727855002130591136ea3efbec990592ad7 -r dd66b37bd4c8b042a2425a04b9369c236d240dff yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -47,7 +47,7 @@
     StringIO
 from yt.extern.six import string_types
 from yt.frontends.ytdata.data_structures import \
-    YTProjectionDataset
+    YTSpatialPlotDataset
 from yt.funcs import \
     mylog, iterable, ensure_list, \
     fix_axis, fix_unitary
@@ -1266,9 +1266,16 @@
             get_window_parameters(axis, center, width, ds)
         if field_parameters is None:
             field_parameters = {}
-        slc = ds.slice(axis, center[axis], field_parameters=field_parameters,
-                       center=center, data_source=data_source)
-        slc.get_data(fields)
+
+        if isinstance(ds, YTSpatialPlotDataset):
+            slc = ds.all_data()
+            slc.axis = slc.ds.parameters["axis"]
+            if slc.axis != axis:
+                raise RuntimeError("Original slice axis is %s." % slc.axis)
+        else:
+            slc = ds.slice(axis, center[axis], field_parameters=field_parameters,
+                           center=center, data_source=data_source)
+            slc.get_data(fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin,
                              fontsize=fontsize, fields=fields,
                              window_size=window_size, aspect=aspect)
@@ -1429,12 +1436,12 @@
                 get_window_parameters(axis, center, width, ds)
         if field_parameters is None: field_parameters = {}
 
-        if isinstance(ds, YTProjectionDataset):
+        if isinstance(ds, YTSpatialPlotDataset):
             proj = ds.all_data()
             proj.axis = axis
             if proj.axis != ds.axis:
                 raise RuntimeError("Original projection axis is %s." %
-                                   "xyz"[ds.parameters["axis"]])
+                                   ds.parameters["axis"])
             proj.weight_field = proj._determine_fields(weight_field)[0]
         else:
             proj = ds.proj(fields, axis, weight_field=weight_field,


https://bitbucket.org/yt_analysis/yt/commits/52212831ae75/
Changeset:   52212831ae75
Branch:      yt
User:        brittonsmith
Date:        2015-09-03 21:45:23+00:00
Summary:     Fixing axis checking and starting work on cutting planes.
Affected #:  1 file

diff -r dd66b37bd4c8b042a2425a04b9369c236d240dff -r 52212831ae75d73eddbaed3bd890fadfea9cbb9c yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1269,9 +1269,10 @@
 
         if isinstance(ds, YTSpatialPlotDataset):
             slc = ds.all_data()
-            slc.axis = slc.ds.parameters["axis"]
-            if slc.axis != axis:
-                raise RuntimeError("Original slice axis is %s." % slc.axis)
+            slc.axis = axis
+            if slc.axis != ds.parameters["axis"]:
+                raise RuntimeError("Original slice axis is %s." %
+                                   ds.parameters["axis"])
         else:
             slc = ds.slice(axis, center[axis], field_parameters=field_parameters,
                            center=center, data_source=data_source)
@@ -1439,7 +1440,7 @@
         if isinstance(ds, YTSpatialPlotDataset):
             proj = ds.all_data()
             proj.axis = axis
-            if proj.axis != ds.axis:
+            if proj.axis != ds.parameters["axis"]:
                 raise RuntimeError("Original projection axis is %s." %
                                    ds.parameters["axis"])
             proj.weight_field = proj._determine_fields(weight_field)[0]
@@ -1531,10 +1532,15 @@
         (bounds, center_rot) = get_oblique_window_parameters(normal,center,width,ds)
         if field_parameters is None:
             field_parameters = {}
-        cutting = ds.cutting(normal, center, north_vector=north_vector,
-                             field_parameters=field_parameters,
-                             data_source=data_source)
-        cutting.get_data(fields)
+
+        if isinstance(ds, YTSpatialPlotDataset):
+            cutting = ds.all_data()
+            cutting.axis = 4
+        else:
+            cutting = ds.cutting(normal, center, north_vector=north_vector,
+                                 field_parameters=field_parameters,
+                                 data_source=data_source)
+            cutting.get_data(fields)
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
         PWViewerMPL.__init__(self, cutting, bounds, fields=fields,


https://bitbucket.org/yt_analysis/yt/commits/2c0d47529a94/
Changeset:   2c0d47529a94
Branch:      yt
User:        brittonsmith
Date:        2015-09-09 19:15:14+00:00
Summary:     Adding some attributes to make sure all necessary info is saved for recreating cutting plane images.
Affected #:  3 files

diff -r 52212831ae75d73eddbaed3bd890fadfea9cbb9c -r 2c0d47529a94c433911356f3076cca9ab014b55b yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -103,6 +103,7 @@
     _con_args = ()
     _skip_add = False
     _container_fields = ()
+    _tds_fields = ()
     _field_cache = None
     _index = None
 
@@ -513,7 +514,8 @@
         else:
             data.update(self.field_data)
         # get the extra fields needed to reconstruct the container
-        for f in [f for f in self._container_fields \
+        tds_fields = tuple(self._determine_fields(list(self._tds_fields)))
+        for f in [f for f in self._container_fields + tds_fields \
                   if f not in data]:
             data[f] = self[f]
         data_fields = data.keys()
@@ -560,7 +562,7 @@
                     data[g_field] = self[g_field]
 
         extra_attrs = dict([(arg, getattr(self, arg, None))
-                            for arg in self._con_args])
+                            for arg in self._con_args + self._tds_attrs])
         extra_attrs["data_type"] = "yt_data_container"
         extra_attrs["container_type"] = self._type_name
         extra_attrs["dimensionality"] = self._dimensionality

diff -r 52212831ae75d73eddbaed3bd890fadfea9cbb9c -r 2c0d47529a94c433911356f3076cca9ab014b55b yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -347,6 +347,8 @@
     _key_fields = YTSelectionContainer2D._key_fields + ['pz','pdz']
     _type_name = "cutting"
     _con_args = ('normal', 'center')
+    _tds_attrs = ("_inv_mat",)
+    _tds_fields = ("x", "y", "z", "dx")
     _container_fields = ("px", "py", "pz", "pdx", "pdy", "pdz")
     def __init__(self, normal, center, north_vector=None,
                  ds=None, field_parameters=None, data_source=None):

diff -r 52212831ae75d73eddbaed3bd890fadfea9cbb9c -r 2c0d47529a94c433911356f3076cca9ab014b55b yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1536,6 +1536,7 @@
         if isinstance(ds, YTSpatialPlotDataset):
             cutting = ds.all_data()
             cutting.axis = 4
+            cutting._inv_mat = ds.parameters["_inv_mat"]
         else:
             cutting = ds.cutting(normal, center, north_vector=north_vector,
                                  field_parameters=field_parameters,


https://bitbucket.org/yt_analysis/yt/commits/03aee494bce6/
Changeset:   03aee494bce6
Branch:      yt
User:        brittonsmith
Date:        2015-09-09 20:08:13+00:00
Summary:     Make sure we have this attribute.
Affected #:  1 file

diff -r 2c0d47529a94c433911356f3076cca9ab014b55b -r 03aee494bce633287159eeef8b3b5ef8d62599a1 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -103,6 +103,7 @@
     _con_args = ()
     _skip_add = False
     _container_fields = ()
+    _tds_attrs = ()
     _tds_fields = ()
     _field_cache = None
     _index = None


https://bitbucket.org/yt_analysis/yt/commits/6bc9540cf2bb/
Changeset:   6bc9540cf2bb
Branch:      yt
User:        brittonsmith
Date:        2015-09-09 20:28:39+00:00
Summary:     Make sure to save dimensionality.
Affected #:  1 file

diff -r 03aee494bce633287159eeef8b3b5ef8d62599a1 -r 6bc9540cf2bb730cb32ddcaa531ee004b9f6be71 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -519,11 +519,13 @@
             fh.attrs["domain_left_edge"] = ds.domain_left_edge.in_cgs()
             fh.attrs["domain_right_edge"] = ds.domain_right_edge.in_cgs()
             fh.attrs["cosmological_simulation"] = ds.cosmological_simulation
+            fh.attrs["dimensionality"] = ds.dimensionality
         else:
             fh.attrs["current_redshift"] = self.near_redshift
             fh.attrs["domain_left_edge"] = self.simulation.domain_left_edge.in_cgs()
             fh.attrs["domain_right_edge"] = self.simulation.domain_right_edge.in_cgs()
             fh.attrs["cosmological_simulation"] = self.simulation.cosmological_simulation
+            fh.attrs["dimensionality"] = self.simulation.dimensionality
         fh.attrs["current_time"] = self.cosmology.t_from_z(fh.attrs["current_redshift"]).in_cgs()
         fh.attrs["data_type"] = "yt_light_ray"
         group = fh.create_group("grid")


https://bitbucket.org/yt_analysis/yt/commits/5037a2dbf3f3/
Changeset:   5037a2dbf3f3
Branch:      yt
User:        brittonsmith
Date:        2015-09-11 18:38:02+00:00
Summary:     Save dataset dimensionality.
Affected #:  1 file

diff -r 6bc9540cf2bb730cb32ddcaa531ee004b9f6be71 -r 5037a2dbf3f35a58c778c6f416210b50cbe6266a yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -78,7 +78,8 @@
     mylog.info("Saving field data to yt dataset: %s." % filename)
 
     if extra_attrs is None: extra_attrs = {}
-    base_attrs  = ["domain_left_edge", "domain_right_edge",
+    base_attrs  = ["dimensionality",
+                   "domain_left_edge", "domain_right_edge",
                    "current_redshift", "current_time",
                    "domain_dimensions", "periodicity",
                    "cosmological_simulation", "omega_lambda",


https://bitbucket.org/yt_analysis/yt/commits/727861c6b20c/
Changeset:   727861c6b20c
Branch:      yt
User:        brittonsmith
Date:        2015-09-14 12:29:38+00:00
Summary:     Adding to_dataset function for frbs.
Affected #:  1 file

diff -r 5037a2dbf3f35a58c778c6f416210b50cbe6266a -r 727861c6b20cba4a218f71defe6a170ec86dff27 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -13,7 +13,11 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.funcs import mylog
+from yt.frontends.ytdata.utilities import \
+    to_yt_dataset
+from yt.funcs import \
+    get_output_filename, \
+    mylog
 from yt.units.unit_object import Unit
 from .volume_rendering.api import off_axis_projection
 from .fixed_resolution_filters import apply_filter, filter_registry
@@ -379,6 +383,71 @@
                                  geometry=self.ds.geometry,
                                  nprocs=nprocs)
 
+    def to_dataset(self, filename=None, fields=None):
+        r"""Export a fixed resolution buffer to a reloadable yt dataset.
+
+        This function will take a fixed resolution buffer and output a 
+        dataset containing either the fields presently existing or fields 
+        given in a list.  The resulting dataset can be reloaded as 
+        a yt dataset.
+
+        Parameters
+        ----------
+        filename : str
+            The name of the file to be written.  If None, the name 
+            will be a combination of the original dataset and the type 
+            of data container.
+        fields : list of strings or tuples, default None
+            If this is supplied, it is the list of fields to be exported into
+            the data frame.  If not supplied, whatever fields presently exist
+            will be used.
+
+        Returns
+        -------
+        filename : str
+            The name of the file that has been created.
+
+        Examples
+        --------
+
+        >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+        >>> proj = ds.proj("density", "x", weight_field="density")
+        >>> frb = proj.to_frb(1.0, (800, 800))
+        >>> fn = frb.to_dataset(fields=["density"])
+        >>> ds2 = yt.load(fn)
+        """
+
+        keyword = "%s_%s_frb" % (str(self.ds), self.data_source._type_name)
+        filename = get_output_filename(filename, keyword, ".h5")
+
+        data = {}
+        if fields is not None:
+            for f in self.data_source._determine_fields(fields):
+                data[f] = self[f]
+        else:
+            data.update(self.data)
+
+        data["dx"] = np.ones(self.buff_size) * \
+          (self.bounds[1] - self.bounds[0]) / self.buff_size[0]
+        data["dy"] = np.ones(self.buff_size) * \
+          (self.bounds[3] - self.bounds[2]) / self.buff_size[1]
+        x, y = np.mgrid[0.5:self.buff_size[0]:1.,
+                        0.5:self.buff_size[1]:1.]
+        data["x"] = x * data["dx"][0][0]
+        data["y"] = y * data["dy"][0][0]
+
+        ftypes = dict([(field, "grid") for field in data])
+        extra_attrs = dict([(arg, getattr(self, arg, None))
+                            for arg in self.data_source._con_args +
+                            self.data_source._tds_attrs])
+        extra_attrs["data_type"] = "yt_frb"
+        extra_attrs["container_type"] = self.data_source._type_name
+        extra_attrs["dimensionality"] = self.data_source._dimensionality
+        to_yt_dataset(self.ds, filename, data, field_types=ftypes,
+                      extra_attrs=extra_attrs)
+
+        return filename
+
     @property
     def limits(self):
         rv = dict(x = None, y = None, z = None)


https://bitbucket.org/yt_analysis/yt/commits/4278e72fd389/
Changeset:   4278e72fd389
Branch:      yt
User:        brittonsmith
Date:        2015-09-14 14:05:12+00:00
Summary:     Adding some extra things to make frbs work right with data selection.
Affected #:  3 files

diff -r 727861c6b20cba4a218f71defe6a170ec86dff27 -r 4278e72fd389c1c1c3e37279f4978525cd40de82 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -236,18 +236,26 @@
         self.base_domain_left_edge = self.domain_left_edge
         self.base_domain_right_edge = self.domain_right_edge
         self.base_domain_dimensions = self.domain_dimensions
-        dx = (self.domain_right_edge - self.domain_left_edge) / \
-          (self.domain_dimensions * self.refine_by**self.level)
-        self.domain_left_edge = self.left_edge
-        self.domain_right_edge = self.domain_left_edge + \
-          self.ActiveDimensions * dx
-        self.domain_dimensions = self.ActiveDimensions
-        self.periodicity = \
-          np.abs(self.domain_left_edge -
-                 self.base_domain_left_edge) < 0.5 * dx
-        self.periodicity &= \
-        np.abs(self.domain_right_edge -
-               self.base_domain_right_edge) < 0.5 * dx
+        if self.container_type in _grid_data_containers:
+            dx = (self.domain_right_edge - self.domain_left_edge) / \
+              (self.domain_dimensions * self.refine_by**self.level)
+            self.domain_left_edge = self.left_edge
+            self.domain_right_edge = self.domain_left_edge + \
+              self.ActiveDimensions * dx
+            self.domain_dimensions = self.ActiveDimensions
+            self.periodicity = \
+              np.abs(self.domain_left_edge -
+                     self.base_domain_left_edge) < 0.5 * dx
+            self.periodicity &= \
+            np.abs(self.domain_right_edge -
+                   self.base_domain_right_edge) < 0.5 * dx
+        elif self.data_type == "yt_frb":
+            self.domain_left_edge = \
+              np.concatenate([self.left_edge, [0.]])
+            self.domain_right_edge = \
+              np.concatenate([self.right_edge, [1.]])
+            self.domain_dimensions = \
+              np.concatenate([self.ActiveDimensions, [1]])
 
     def __repr__(self):
         return "ytGrid: %s" % self.parameter_filename
@@ -284,6 +292,8 @@
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
             data_type = f.attrs.get("data_type", None)
+            if data_type == "yt_frb":
+                return True
             if data_type == "yt_data_container" and \
               f.attrs.get("container_type", None) in \
               _grid_data_containers:

diff -r 727861c6b20cba4a218f71defe6a170ec86dff27 -r 4278e72fd389c1c1c3e37279f4978525cd40de82 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -89,7 +89,10 @@
                         continue
                     self._misses += 1
                     ftype, fname = field
+                    # make input data 3D if less than 3D
                     data = f[ftype][fname].value.astype(self._field_dtype)
+                    for dim in range(len(data.shape), 3):
+                        data = np.expand_dims(data, dim)
                     if self._cache_on:
                         self._cached_fields.setdefault(g.id, {})
                         self._cached_fields[g.id][field] = data

diff -r 727861c6b20cba4a218f71defe6a170ec86dff27 -r 4278e72fd389c1c1c3e37279f4978525cd40de82 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -427,19 +427,16 @@
         else:
             data.update(self.data)
 
-        data["dx"] = np.ones(self.buff_size) * \
-          (self.bounds[1] - self.bounds[0]) / self.buff_size[0]
-        data["dy"] = np.ones(self.buff_size) * \
-          (self.bounds[3] - self.bounds[2]) / self.buff_size[1]
-        x, y = np.mgrid[0.5:self.buff_size[0]:1.,
-                        0.5:self.buff_size[1]:1.]
-        data["x"] = x * data["dx"][0][0]
-        data["y"] = y * data["dy"][0][0]
-
         ftypes = dict([(field, "grid") for field in data])
         extra_attrs = dict([(arg, getattr(self, arg, None))
                             for arg in self.data_source._con_args +
                             self.data_source._tds_attrs])
+        extra_attrs["left_edge"] = self.ds.arr([self.bounds[0],
+                                                self.bounds[2]])
+        extra_attrs["right_edge"] = self.ds.arr([self.bounds[1],
+                                                 self.bounds[3]])
+        extra_attrs["ActiveDimensions"] = self.buff_size
+        extra_attrs["level"] = 0
         extra_attrs["data_type"] = "yt_frb"
         extra_attrs["container_type"] = self.data_source._type_name
         extra_attrs["dimensionality"] = self.data_source._dimensionality


https://bitbucket.org/yt_analysis/yt/commits/ba3f3cb429d3/
Changeset:   ba3f3cb429d3
Branch:      yt
User:        brittonsmith
Date:        2015-09-15 07:22:02+00:00
Summary:     Fixing a couple things in light ray.
Affected #:  1 file

diff -r 4278e72fd389c1c1c3e37279f4978525cd40de82 -r ba3f3cb429d3d8edccca6b83753eebccace671ce yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -172,15 +172,9 @@
             if not ((end_position is None) ^ (trajectory is None)):
                 raise RuntimeError("LightRay Error: must specify either end_position " + \
                                    "or trajectory, but not both.")
-            if isinstance(start_position, np.ndarray):
-                self.light_ray_solution[0]['start'] = start_position
-            else:
-                self.light_ray_solution[0]['start'] = np.array(start_position)
+            self.light_ray_solution[0]['start'] = np.asarray(start_position)
             if end_position is not None:
-                if isinstance(end_position, np.ndarray):
-                    self.light_ray_solution[0]['end'] = end_position
-                else:
-                    self.light_ray_solution[0]['end'] = np.array(end_position)
+                self.light_ray_solution[0]['end'] = np.asarray(end_position)
             else:
                 # assume trajectory given as r, theta, phi
                 if len(trajectory) != 3:
@@ -513,7 +507,7 @@
         fh = h5py.File(filename, "w")
         for attr in ["omega_lambda", "omega_matter", "hubble_constant"]:
             fh.attrs[attr] = getattr(self.cosmology, attr)
-        if self.simulation_type == None:
+        if self.simulation_type is None:
             ds = load(self.parameter_filename, **self.load_kwargs)
             fh.attrs["current_redshift"] = ds.current_redshift
             fh.attrs["domain_left_edge"] = ds.domain_left_edge.in_cgs()


https://bitbucket.org/yt_analysis/yt/commits/675e79c694a2/
Changeset:   675e79c694a2
Branch:      yt
User:        brittonsmith
Date:        2015-09-15 08:19:04+00:00
Summary:     Fix comment.
Affected #:  1 file

diff -r ba3f3cb429d3d8edccca6b83753eebccace671ce -r 675e79c694a2a95d75c53f8dde567a04a816ac5f yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -89,7 +89,7 @@
                         continue
                     self._misses += 1
                     ftype, fname = field
-                    # make input data 3D if less than 3D
+                    # add extra dimensions to make data 3D
                     data = f[ftype][fname].value.astype(self._field_dtype)
                     for dim in range(len(data.shape), 3):
                         data = np.expand_dims(data, dim)


https://bitbucket.org/yt_analysis/yt/commits/9b6f7f8c3175/
Changeset:   9b6f7f8c3175
Branch:      yt
User:        brittonsmith
Date:        2015-09-15 09:16:04+00:00
Summary:     Adding frb shortcut for frb datasets.
Affected #:  1 file

diff -r 675e79c694a2a95d75c53f8dde567a04a816ac5f -r 9b6f7f8c31759a15054b38cf8188ba86c07208a4 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -144,14 +144,25 @@
 
 class YTGrid(AMRGridPatch):
     _id_offset = 0
-    def __init__(self, id, index):
-        AMRGridPatch.__init__(self, id, filename=None, index=index)
+    def __init__(self, id, index, filename=None):
+        AMRGridPatch.__init__(self, id, filename=filename, index=index)
         self._children_ids = []
         self._parent_id = -1
         self.Level = 0
         self.LeftEdge = self.index.ds.domain_left_edge
         self.RightEdge = self.index.ds.domain_right_edge
 
+    def __getitem__(self, key):
+        tr = super(YTGrid, self).__getitem__(key)
+        try:
+            fields = self._determine_fields(key)
+        except YTFieldTypeNotFound:
+            return tr
+        finfo = self.ds._get_field_info(*fields[0])
+        if not finfo.particle_type:
+            return tr.reshape(self.ActiveDimensions[:self.ds.dimensionality])
+        return tr
+
     @property
     def Parent(self):
         return None
@@ -220,6 +231,9 @@
     def __init__(self, filename):
         Dataset.__init__(self, filename, self._dataset_type)
 
+        if self.data_type == "yt_frb":
+            self.frb = self.index.grids[0]
+
     def _parse_parameter_file(self):
         self.refine_by = 2
         self.unique_identifier = time.time()


https://bitbucket.org/yt_analysis/yt/commits/b8e2a81c8908/
Changeset:   b8e2a81c8908
Branch:      yt
User:        brittonsmith
Date:        2015-09-19 09:26:27+00:00
Summary:     Adding to_dataset method for profiles.
Affected #:  1 file

diff -r 9b6f7f8c31759a15054b38cf8188ba86c07208a4 -r b8e2a81c890852bee329e452e932fc21d75f9237 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -16,8 +16,10 @@
 import h5py
 import numpy as np
 
+from yt.frontends.ytdata.utilities import \
+    to_yt_dataset
+from yt.funcs import get_output_filename
 from yt.funcs import *
-
 from yt.units.yt_array import uconcatenate, array_like_field
 from yt.units.unit_object import Unit
 from yt.data_objects.data_containers import YTFieldData
@@ -949,6 +951,64 @@
         else:
             return np.linspace(mi, ma, n+1)
 
+    def to_dataset(self, filename=None):
+        r"""Export a data object to a reloadable yt dataset.
+
+        This function will take a profile and output a dataset
+        containing either the fields presently existing or fields
+        given in a list.  The resulting dataset can be reloaded as
+        a yt dataset.
+
+        Parameters
+        ----------
+        filename : str
+            The name of the file to be written.  If None, the name
+            will be a combination of the original dataset plus
+            "profile".
+
+        Returns
+        -------
+        filename : str
+            The name of the file that has been created.
+
+        Examples
+        --------
+
+        >>> dd = ds.all_data()
+        >>> fn1 = dd.to_dataset(["density", "temperature"])
+        >>> ds1 = yt.load(fn1)
+        >>> dd["velocity_magnitude"]
+        >>> fn2 = dd.to_dataset()
+        >>> ds2 = yt.load(fn2)
+        """
+
+        keyword = "%s_profile" % str(self.ds)
+        filename = get_output_filename(filename, keyword, ".h5")
+
+        args = ("field", "log")
+        extra_attrs = {"data_type": "yt_profile"}
+        data = {}
+        data.update(self.field_data)
+        data["weight"] = self.weight
+        data["used"] = self.used
+
+        dimensionality = 0
+        for ax in "xyz":
+            if hasattr(self, ax):
+                dimensionality += 1
+                data[ax] = getattr(self, ax)
+                data["%s_bins" % ax] = getattr(self, "%s_bins" % ax)
+                for arg in args:
+                    key = "%s_%s" % (ax, arg)
+                    extra_attrs[key] = getattr(self, key)
+
+        extra_attrs["dimensionality"] = dimensionality
+        ftypes = dict([(field, "data") for field in data])
+        to_yt_dataset(self.ds, filename, data, field_types=ftypes,
+                      extra_attrs=extra_attrs)
+
+        return filename
+
 class Profile1D(ProfileND):
     """An object that represents a 1D profile.
 


https://bitbucket.org/yt_analysis/yt/commits/c6eff1294306/
Changeset:   c6eff1294306
Branch:      yt
User:        brittonsmith
Date:        2015-09-23 15:57:15+00:00
Summary:     Adding non-spatial dataset, index, grid object, and io.
Affected #:  2 files

diff -r b8e2a81c890852bee329e452e932fc21d75f9237 -r c6eff1294306b99772984a874f6f3309909840bb yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -14,6 +14,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+from collections import \
+    defaultdict
 import h5py
 from numbers import \
     Number as numeric_type
@@ -41,7 +43,10 @@
     ParticleIndex
 from yt.utilities.logger import \
     ytLogger as mylog
-from yt.utilities.cosmology import Cosmology
+from yt.utilities.cosmology import \
+    Cosmology
+from yt.utilities.exceptions import \
+    YTFieldTypeNotFound
 import yt.utilities.fortran_utils as fpu
 from yt.units.yt_array import \
     YTArray, \
@@ -144,8 +149,8 @@
 
 class YTGrid(AMRGridPatch):
     _id_offset = 0
-    def __init__(self, id, index, filename=None):
-        AMRGridPatch.__init__(self, id, filename=filename, index=index)
+    def __init__(self, gid, index, filename=None):
+        AMRGridPatch.__init__(self, gid, filename=filename, index=index)
         self._children_ids = []
         self._parent_id = -1
         self.Level = 0
@@ -153,7 +158,7 @@
         self.RightEdge = self.index.ds.domain_right_edge
 
     def __getitem__(self, key):
-        tr = super(YTGrid, self).__getitem__(key)
+        tr = super(AMRGridPatch, self).__getitem__(key)
         try:
             fields = self._determine_fields(key)
         except YTFieldTypeNotFound:
@@ -192,9 +197,9 @@
         self.grid_procs = np.zeros(self.num_grids)
         self.grid_particle_count[:] = sum(self.ds.num_particles.values())
         self.grids = []
-        for id in range(self.num_grids):
-            self.grids.append(self.grid(id, self))
-            self.grids[id].Level = self.grid_levels[id, 0]
+        for gid in range(self.num_grids):
+            self.grids.append(self.grid(gid, self))
+            self.grids[gid].Level = self.grid_levels[gid, 0]
         self.max_level = self.grid_levels.max()
         temp_grids = np.empty(self.num_grids, dtype='object')
         for i, grid in enumerate(self.grids):
@@ -313,3 +318,225 @@
               _grid_data_containers:
                 return True
         return False
+
+class YTNonSpatialGrid(AMRGridPatch):
+    _id_offset = 0
+    def __init__(self, gid, index, filename=None):
+        AMRGridPatch.__init__(self, gid, filename=filename, index=index)
+        self._children_ids = []
+        self._parent_id = -1
+        self.Level = 0
+        self.LeftEdge = self.index.ds.domain_left_edge
+        self.RightEdge = self.index.ds.domain_right_edge
+
+    def __getitem__(self, key):
+        tr = super(AMRGridPatch, self).__getitem__(key)
+        try:
+            fields = self._determine_fields(key)
+        except YTFieldTypeNotFound:
+            return tr
+        finfo = self.ds._get_field_info(*fields[0])
+        return tr
+
+    def get_data(self, fields=None):
+        if fields is None: return
+        nfields = []
+        apply_fields = defaultdict(list)
+        for field in self._determine_fields(fields):
+            if field[0] in self.ds.filtered_particle_types:
+                f = self.ds.known_filters[field[0]]
+                apply_fields[field[0]].append(
+                    (f.filtered_type, field[1]))
+            else:
+                nfields.append(field)
+        for filter_type in apply_fields:
+            f = self.ds.known_filters[filter_type]
+            with f.apply(self):
+                self.get_data(apply_fields[filter_type])
+        fields = nfields
+        if len(fields) == 0: return
+        # Now we collect all our fields
+        # Here is where we need to perform a validation step, so that if we
+        # have a field requested that we actually *can't* yet get, we put it
+        # off until the end.  This prevents double-reading fields that will
+        # need to be used in spatial fields later on.
+        fields_to_get = []
+        # This will be pre-populated with spatial fields
+        fields_to_generate = []
+        for field in self._determine_fields(fields):
+            if field in self.field_data: continue
+            finfo = self.ds._get_field_info(*field)
+            try:
+                finfo.check_available(self)
+            except NeedsGridType:
+                fields_to_generate.append(field)
+                continue
+            fields_to_get.append(field)
+        if len(fields_to_get) == 0 and len(fields_to_generate) == 0:
+            return
+        elif self._locked == True:
+            raise GenerationInProgress(fields)
+        # Track which ones we want in the end
+        ofields = set(list(self.field_data.keys())
+                    + fields_to_get
+                    + fields_to_generate)
+        # At this point, we want to figure out *all* our dependencies.
+        fields_to_get = self._identify_dependencies(fields_to_get,
+            self._spatial)
+        # We now split up into readers for the types of fields
+        fluids, particles = [], []
+        finfos = {}
+        for ftype, fname in fields_to_get:
+            finfo = self.ds._get_field_info(ftype, fname)
+            finfos[ftype, fname] = finfo
+            if finfo.particle_type:
+                particles.append((ftype, fname))
+            elif (ftype, fname) not in fluids:
+                fluids.append((ftype, fname))
+        # The _read method will figure out which fields it needs to get from
+        # disk, and return a dict of those fields along with the fields that
+        # need to be generated.
+        read_fluids, gen_fluids = self.index._read_fluid_fields(
+                                        fluids, self, self._current_chunk)
+        for f, v in read_fluids.items():
+            self.field_data[f] = self.ds.arr(v, input_units = finfos[f].units)
+            self.field_data[f].convert_to_units(finfos[f].output_units)
+
+        read_particles, gen_particles = self.index._read_particle_fields(
+                                        particles, self, self._current_chunk)
+        for f, v in read_particles.items():
+            self.field_data[f] = self.ds.arr(v, input_units = finfos[f].units)
+            self.field_data[f].convert_to_units(finfos[f].output_units)
+
+        fields_to_generate += gen_fluids + gen_particles
+        self._generate_fields(fields_to_generate)
+        for field in list(self.field_data.keys()):
+            if field not in ofields:
+                self.field_data.pop(field)
+
+class YTNonSpatialHierarchy(GridIndex):
+    grid = YTNonSpatialGrid
+
+    def __init__(self, ds, dataset_type = None):
+        self.dataset_type = dataset_type
+        self.float_type = 'float64'
+        self.dataset = weakref.proxy(ds)
+        self.directory = os.getcwd()
+        GridIndex.__init__(self, ds, dataset_type)
+
+    def _count_grids(self):
+        self.num_grids = 1
+
+    def _parse_index(self):
+        self.grid_dimensions[:] = self.ds.domain_dimensions
+        self.grid_left_edge[:] = self.ds.domain_left_edge
+        self.grid_right_edge[:] = self.ds.domain_right_edge
+        self.grid_levels[:] = np.zeros(self.num_grids)
+        self.grid_procs = np.zeros(self.num_grids)
+        self.grid_particle_count[:] = sum(self.ds.num_particles.values())
+        self.grids = []
+        for gid in range(self.num_grids):
+            self.grids.append(self.grid(gid, self))
+            self.grids[gid].Level = self.grid_levels[gid, 0]
+        self.max_level = self.grid_levels.max()
+        temp_grids = np.empty(self.num_grids, dtype='object')
+        for i, grid in enumerate(self.grids):
+            grid.filename = self.ds.parameter_filename
+            grid._prepare_grid()
+            grid.proc_num = self.grid_procs[i]
+            temp_grids[i] = grid
+        self.grids = temp_grids
+
+    def _populate_grid_objects(self):
+        self.max_level = self.grid_levels.max()
+
+    def _detect_output_fields(self):
+        self.field_list = []
+        self.ds.field_units = self.ds.field_units or {}
+        with h5py.File(self.ds.parameter_filename, "r") as f:
+            for group in f:
+                for field in f[group]:
+                    field_name = (str(group), str(field))
+                    self.field_list.append(field_name)
+                    self.ds.field_units[field_name] = \
+                      f[group][field].attrs["units"]
+
+    def _read_fluid_fields(self, fields, dobj, chunk = None):
+        if len(fields) == 0: return {}, []
+        fields_to_read, fields_to_generate = self._split_fields(fields)
+        if len(fields_to_read) == 0:
+            return {}, fields_to_generate
+        selector = dobj.selector
+        fields_to_return = self.io._read_fluid_selection(
+            dobj,
+            selector,
+            fields_to_read)
+        return fields_to_return, fields_to_generate
+
+class YTProfileDataset(YTGridDataset):
+    _index_class = YTNonSpatialHierarchy
+    _field_info_class = YTGridFieldInfo
+    _dataset_type = 'ytprofilehdf5'
+    geometry = "cartesian"
+    default_fluid_type = "data"
+    fluid_types = ("data")
+
+    def __init__(self, filename):
+        super(YTProfileDataset, self).__init__(filename)
+
+        self.data = YTNonSpatialGrid(0, self.index, self.parameter_filename)
+
+    def _parse_parameter_file(self):
+        self.refine_by = 2
+        self.unique_identifier = time.time()
+        with h5py.File(self.parameter_filename, "r") as f:
+            for attr, value in f.attrs.items():
+                setattr(self, attr, value)
+            self.num_particles = \
+              dict([(group, f[group].attrs["num_elements"])
+                    for group in f if group != self.default_fluid_type])
+        self.particle_types_raw = tuple(self.num_particles.keys())
+        self.particle_types = self.particle_types_raw
+
+        self.base_domain_left_edge = self.domain_left_edge
+        self.base_domain_right_edge = self.domain_right_edge
+        self.base_domain_dimensions = self.domain_dimensions
+
+    def __repr__(self):
+        return "ytProfile: %s" % self.parameter_filename
+
+    def create_field_info(self):
+        self.field_info = self._field_info_class(self, self.field_list)
+        for ftype, field in self.field_list:
+            if ftype == self.default_fluid_type:
+                self.field_info.alias(
+                    ("gas", field),
+                    (self.default_fluid_type, field))
+        super(YTGridDataset, self).create_field_info()
+
+    def _set_code_unit_attributes(self):
+        attrs = ('length_unit', 'mass_unit', 'time_unit',
+                 'velocity_unit', 'magnetic_unit')
+        cgs_units = ('cm', 'g', 's', 'cm/s', 'gauss')
+        base_units = np.ones(len(attrs))
+        for unit, attr, cgs_unit in zip(base_units, attrs, cgs_units):
+            if isinstance(unit, string_types):
+                uq = self.quan(1.0, unit)
+            elif isinstance(unit, numeric_type):
+                uq = self.quan(unit, cgs_unit)
+            elif isinstance(unit, YTQuantity):
+                uq = unit
+            elif isinstance(unit, tuple):
+                uq = self.quan(unit[0], unit[1])
+            else:
+                raise RuntimeError("%s (%s) is invalid." % (attr, unit))
+            setattr(self, attr, uq)
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if not args[0].endswith(".h5"): return False
+        with h5py.File(args[0], "r") as f:
+            data_type = f.attrs.get("data_type", None)
+            if data_type == "yt_profile":
+                return True
+        return False

diff -r b8e2a81c890852bee329e452e932fc21d75f9237 -r c6eff1294306b99772984a874f6f3309909840bb yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -30,6 +30,37 @@
 from yt.utilities.lib.geometry_utils import \
     compute_morton
 
+class IOHandlerYTProfileHDF5(BaseIOHandler):
+    _dataset_type = "ytprofilehdf5"
+    _base = slice(None)
+    _field_dtype = "float64"
+
+    def _read_fluid_selection(self, g, selector, fields):
+        rv = {}
+        if selector.__class__.__name__ == "GridSelector":
+            if g.id in self._cached_fields:
+                gf = self._cached_fields[g.id]
+                rv.update(gf)
+            if len(rv) == len(fields): return rv
+            f = h5py.File(u(g.filename), "r")
+            gds = f["data"]
+            for field in fields:
+                if field in rv:
+                    self._hits += 1
+                    continue
+                self._misses += 1
+                ftype, fname = field
+                rv[(ftype, fname)] = gds[fname].value
+            if self._cache_on:
+                for gid in rv:
+                    self._cached_fields.setdefault(gid, {})
+                    self._cached_fields[gid].update(rv[gid])
+            f.close()
+            return rv
+        else:
+            raise RuntimeError(
+                "Geometric selection not supported for non-spatial datasets.")
+
 class IOHandlerYTGridHDF5(BaseIOHandler):
     _dataset_type = "ytgridhdf5"
     _base = slice(None)
@@ -48,7 +79,7 @@
                 rv.update(gf)
             if len(rv) == len(fields): return rv
             f = h5py.File(u(g.filename), "r")
-            gds = f["grid"]
+            gds = f[self.ds.default_fluid_type]
             for field in fields:
                 if field in rv:
                     self._hits += 1


https://bitbucket.org/yt_analysis/yt/commits/f8b0d94954c8/
Changeset:   f8b0d94954c8
Branch:      yt
User:        brittonsmith
Date:        2015-09-24 13:37:44+00:00
Summary:     Make profiles write out bin fields with shape of profile data.
Affected #:  1 file

diff -r c6eff1294306b99772984a874f6f3309909840bb -r f8b0d94954c8d6cee863416b91b86a75dd8398db yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -986,22 +986,29 @@
         filename = get_output_filename(filename, keyword, ".h5")
 
         args = ("field", "log")
-        extra_attrs = {"data_type": "yt_profile"}
+        extra_attrs = {"data_type": "yt_profile",
+                       "profile_dimensions": self.size}
         data = {}
         data.update(self.field_data)
         data["weight"] = self.weight
-        data["used"] = self.used
+        data["used"] = self.used.astype("float64")
 
         dimensionality = 0
+        bin_data = []
         for ax in "xyz":
             if hasattr(self, ax):
                 dimensionality += 1
                 data[ax] = getattr(self, ax)
+                bin_data.append(data[ax])
                 data["%s_bins" % ax] = getattr(self, "%s_bins" % ax)
                 for arg in args:
                     key = "%s_%s" % (ax, arg)
                     extra_attrs[key] = getattr(self, key)
 
+        bin_fields = np.meshgrid(*bin_data)
+        for i, ax in enumerate("xyz"[:dimensionality]):
+            data[getattr(self, "%s_field" % ax)] = bin_fields[i]
+
         extra_attrs["dimensionality"] = dimensionality
         ftypes = dict([(field, "data") for field in data])
         to_yt_dataset(self.ds, filename, data, field_types=ftypes,


https://bitbucket.org/yt_analysis/yt/commits/780555d7af87/
Changeset:   780555d7af87
Branch:      yt
User:        brittonsmith
Date:        2015-09-24 16:17:27+00:00
Summary:     Save range attributes for profiles.
Affected #:  1 file

diff -r f8b0d94954c8d6cee863416b91b86a75dd8398db -r 780555d7af876bf9ac87f4ad5d5a5b453f8c64d9 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -1000,7 +1000,10 @@
                 dimensionality += 1
                 data[ax] = getattr(self, ax)
                 bin_data.append(data[ax])
-                data["%s_bins" % ax] = getattr(self, "%s_bins" % ax)
+                bin_field_name = "%s_bins" % ax
+                data[bin_field_name] = getattr(self, bin_field_name)
+                extra_attrs["%s_range" % ax] = self.ds.arr([data[bin_field_name][0],
+                                                            data[bin_field_name][-1]])
                 for arg in args:
                     key = "%s_%s" % (ax, arg)
                     extra_attrs[key] = getattr(self, key)


https://bitbucket.org/yt_analysis/yt/commits/ed6248920dd0/
Changeset:   ed6248920dd0
Branch:      yt
User:        brittonsmith
Date:        2015-09-24 16:19:05+00:00
Summary:     Write out units for unitful attributes.
Affected #:  1 file

diff -r 780555d7af876bf9ac87f4ad5d5a5b453f8c64d9 -r ed6248920dd052b3a68178cef139648292654c47 yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -96,12 +96,14 @@
             continue
         if hasattr(my_val, "units"):
             my_val = my_val.in_cgs()
+            fh.attrs["%s_units" % attr] = str(my_val.units)
         fh.attrs[attr] = my_val
 
     for attr in extra_attrs:
         my_val = extra_attrs[attr]
         if hasattr(my_val, "units"):
             my_val = my_val.in_cgs()
+            fh.attrs["%s_units" % attr] = str(my_val.units)
         if my_val is None:
             my_val = "None"
         fh.attrs[attr] = my_val


https://bitbucket.org/yt_analysis/yt/commits/53802d0ec9db/
Changeset:   53802d0ec9db
Branch:      yt
User:        brittonsmith
Date:        2015-09-24 16:21:02+00:00
Summary:     Refactoring profile frontend a bit and polishing it up a bit.
Affected #:  1 file

diff -r ed6248920dd052b3a68178cef139648292654c47 -r 53802d0ec9db96449ed04198190e3b92b87277b9 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -47,9 +47,9 @@
     Cosmology
 from yt.utilities.exceptions import \
     YTFieldTypeNotFound
-import yt.utilities.fortran_utils as fpu
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_root_only
 from yt.units.yt_array import \
-    YTArray, \
     YTQuantity
 
 _grid_data_containers = ["abritrary_grid",
@@ -319,16 +319,19 @@
                 return True
         return False
 
-class YTNonSpatialGrid(AMRGridPatch):
+class YTNonspatialGrid(AMRGridPatch):
     _id_offset = 0
     def __init__(self, gid, index, filename=None):
-        AMRGridPatch.__init__(self, gid, filename=filename, index=index)
+        super(YTNonspatialGrid, self).__init__(gid, filename=filename, index=index)
         self._children_ids = []
         self._parent_id = -1
         self.Level = 0
         self.LeftEdge = self.index.ds.domain_left_edge
         self.RightEdge = self.index.ds.domain_right_edge
 
+    def __repr__(self):
+        return "YTNonspatialGrid"
+
     def __getitem__(self, key):
         tr = super(AMRGridPatch, self).__getitem__(key)
         try:
@@ -414,8 +417,16 @@
             if field not in ofields:
                 self.field_data.pop(field)
 
-class YTNonSpatialHierarchy(GridIndex):
-    grid = YTNonSpatialGrid
+    @property
+    def Parent(self):
+        return None
+
+    @property
+    def Children(self):
+        return []
+
+class YTNonspatialHierarchy(GridIndex):
+    grid = YTNonspatialGrid
 
     def __init__(self, ds, dataset_type = None):
         self.dataset_type = dataset_type
@@ -448,6 +459,11 @@
         self.grids = temp_grids
 
     def _populate_grid_objects(self):
+        for g in self.grids:
+            g._setup_dx()
+            # this is non-spatial, so remove the code_length units
+            g.dds = self.ds.arr(g.dds.d, "")
+            g.ActiveDimensions = self.ds.domain_dimensions
         self.max_level = self.grid_levels.max()
 
     def _detect_output_fields(self):
@@ -474,17 +490,16 @@
         return fields_to_return, fields_to_generate
 
 class YTProfileDataset(YTGridDataset):
-    _index_class = YTNonSpatialHierarchy
+    _index_class = YTNonspatialHierarchy
     _field_info_class = YTGridFieldInfo
     _dataset_type = 'ytprofilehdf5'
     geometry = "cartesian"
     default_fluid_type = "data"
-    fluid_types = ("data")
+    fluid_types = ("data", "gas")
 
     def __init__(self, filename):
         super(YTProfileDataset, self).__init__(filename)
-
-        self.data = YTNonSpatialGrid(0, self.index, self.parameter_filename)
+        self.data = self.index.grids[0]
 
     def _parse_parameter_file(self):
         self.refine_by = 2
@@ -498,39 +513,50 @@
         self.particle_types_raw = tuple(self.num_particles.keys())
         self.particle_types = self.particle_types_raw
 
+    def _set_derived_attrs(self):
         self.base_domain_left_edge = self.domain_left_edge
         self.base_domain_right_edge = self.domain_right_edge
         self.base_domain_dimensions = self.domain_dimensions
 
+        self.domain_dimensions = np.ones(3, dtype="int")
+        self.domain_dimensions[:self.dimensionality] = self.profile_dimensions
+        self.domain_left_edge = np.zeros(3)
+        self.domain_right_edge = np.ones(3)
+        for i, ax in enumerate("xyz"[:self.dimensionality]):
+            range_name = "%s_range" % ax
+            my_edge = getattr(self, range_name)
+            if getattr(self, "%s_log" % ax, False):
+                my_edge = np.log10(my_edge)
+            self.domain_left_edge[i] = my_edge[0]
+            self.domain_right_edge[i] = my_edge[1]
+            setattr(self, range_name,
+                    self.arr(getattr(self, range_name),
+                             getattr(self, range_name+"_units")))
+        self.domain_center = 0.5 * (self.domain_right_edge + self.domain_left_edge)
+        self.domain_width = self.domain_right_edge - self.domain_left_edge
+
+    def _setup_classes(self):
+        # We don't allow geometric selection for non-spatial datasets
+        pass
+
+    @parallel_root_only
+    def print_key_parameters(self):
+        mylog.info("YTProfileDataset")
+        for a in ["dimensionality", "profile_dimensions"] + \
+          ["%s_%s" % (ax, attr)
+           for ax in "xyz"[:self.dimensionality]
+           for attr in ["range", "log"]]:
+            v = getattr(self, a)
+            mylog.info("Parameters: %-25s = %s", a, v)
+        super(YTProfileDataset, self).print_key_parameters()
+        mylog.warn("Geometric data selection not available for this dataset type.")
+
     def __repr__(self):
         return "ytProfile: %s" % self.parameter_filename
 
     def create_field_info(self):
         self.field_info = self._field_info_class(self, self.field_list)
-        for ftype, field in self.field_list:
-            if ftype == self.default_fluid_type:
-                self.field_info.alias(
-                    ("gas", field),
-                    (self.default_fluid_type, field))
-        super(YTGridDataset, self).create_field_info()
-
-    def _set_code_unit_attributes(self):
-        attrs = ('length_unit', 'mass_unit', 'time_unit',
-                 'velocity_unit', 'magnetic_unit')
-        cgs_units = ('cm', 'g', 's', 'cm/s', 'gauss')
-        base_units = np.ones(len(attrs))
-        for unit, attr, cgs_unit in zip(base_units, attrs, cgs_units):
-            if isinstance(unit, string_types):
-                uq = self.quan(1.0, unit)
-            elif isinstance(unit, numeric_type):
-                uq = self.quan(unit, cgs_unit)
-            elif isinstance(unit, YTQuantity):
-                uq = unit
-            elif isinstance(unit, tuple):
-                uq = self.quan(unit[0], unit[1])
-            else:
-                raise RuntimeError("%s (%s) is invalid." % (attr, unit))
-            setattr(self, attr, uq)
+        super(YTProfileDataset, self).create_field_info()
 
     @classmethod
     def _is_valid(self, *args, **kwargs):


https://bitbucket.org/yt_analysis/yt/commits/40163fdcfd13/
Changeset:   40163fdcfd13
Branch:      yt
User:        brittonsmith
Date:        2015-09-25 14:21:52+00:00
Summary:     Merging.
Affected #:  62 files

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,10 +1,10 @@
-include distribute_setup.py README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt
+include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt
 include yt/visualization/mapserver/html/map_index.html
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js
 include yt/visualization/mapserver/html/leaflet/images/*.png
 recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE* *.cu
-recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.inc *.html
+recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.html
 recursive-include doc *.h *.c *.sh *.svgz *.pdf *.svg *.pyx
 include doc/README doc/activate doc/activate.csh doc/cheatsheet.tex
 include doc/extensions/README doc/Makefile
@@ -12,5 +12,3 @@
 prune doc/build
 recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
 prune yt/frontends/_skeleton
-prune tests
-exclude clean.sh .hgchurn

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 doc/source/analyzing/filtering.rst
--- a/doc/source/analyzing/filtering.rst
+++ b/doc/source/analyzing/filtering.rst
@@ -111,7 +111,7 @@
 
 .. code-block:: python
 
-    @yt.particle_filter(requires=["particle_type], filtered_type='all')
+    @yt.particle_filter(requires=["particle_type"], filtered_type='all')
     def stars(pfilter, data):
         filter = data[(pfilter.filtered_type, "particle_type")] == 2
         return filter

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:4d19ee42177c60fb4b39550b5acd7a0f7e97f59f5c2da3565ff42cdd580454b0"
+  "signature": "sha256:6a06d5720eb6316ac0d322ef0898ec20f33d65ea3eeeacef35ae1d869af12607"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -352,7 +352,7 @@
      "level": 3,
      "metadata": {},
      "source": [
-      "Round-Trip Conversions to and from AstroPy's Units System"
+      "Round-Trip Conversions to and from Other Unit Systems"
      ]
     },
     {
@@ -503,6 +503,58 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also do the same thing with unitful quantities from the [Pint package](http://pint.readthedocs.org), using essentially the same procedure:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from pint import UnitRegistry\n",
+      "ureg = UnitRegistry()\n",
+      "v = 1000.*ureg.km/ureg.s\n",
+      "w = yt.YTQuantity.from_pint(v)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print v, type(v)\n",
+      "print w, type(w)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ptemp = temp.to_pint()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print temp, type(temp)\n",
+      "print ptemp, type(ptemp)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -84,6 +84,9 @@
 * :func:`~yt.testing.assert_equal` can operate on arrays.
 * :func:`~yt.testing.assert_almost_equal` can operate on arrays and accepts a
   relative allowable difference.
+* :func:`~yt.testing.assert_allclose_units` raises an error if two arrays are
+  not equal up to a desired absolute or relative tolerance. This wraps numpy's
+  assert_allclose to correctly verify unit consistency as well.
 * :func:`~yt.testing.amrspace` provides the ability to create AMR grid
   structures.
 * :func:`~yt.testing.expand_keywords` provides the ability to iterate over
@@ -99,9 +102,10 @@
 #. Inside that directory, create a new python file prefixed with ``test_`` and
    including the name of the functionality.
 #. Inside that file, create one or more routines prefixed with ``test_`` that
-   accept no arguments.  These should ``yield`` a set of values of the form
-   ``function``, ``arguments``.  For example ``yield assert_equal, 1.0, 1.0``
-   would evaluate that 1.0 equaled 1.0.
+   accept no arguments.  These should ``yield`` a tuple of the form
+   ``function``, ``argument_one``, ``argument_two``, etc.  For example
+   ``yield assert_equal, 1.0, 1.0`` would be captured by nose as a test that
+   asserts that 1.0 is equal to 1.0.
 #. Use ``fake_random_ds`` to test on datasets, and be sure to test for
    several combinations of ``nproc``, so that domain decomposition can be
    tested as well.
@@ -113,6 +117,53 @@
 ``yt/data_objects/tests/test_covering_grid.py``, which covers a great deal of
 functionality.
 
+Debugging failing tests
+^^^^^^^^^^^^^^^^^^^^^^^
+
+When writing new tests, often one exposes bugs or writes a test incorrectly,
+causing an exception to be raised or a failed test. To help debug issues like
+this, ``nose`` can drop into a debugger whenever a test fails or raises an
+exception. This can be accomplished by passing ``--pdb`` and ``--pdb-failures``
+to the ``nosetests`` executable. These options will drop into the pdb debugger
+whenever an error is raised or a failure happens, respectively. Inside the
+debugger you can interactively print out variables and go up and down the call
+stack to determine the context for your failure or error.
+
+.. code-block:: bash
+
+    nosetests --pdb --pdb-failures
+
+In addition, one can debug more crudely using print statements. To do this,
+you can add print statements to the code as normal. However, the test runner
+will capture all print output by default. To ensure that output gets printed
+to your terminal while the tests are running, pass ``-s`` to the ``nosetests``
+executable.
+
+Lastly, to quickly debug a specific failing test, it is best to only run that
+one test during your testing session. This can be accomplished by explicitly
+passing the name of the test function or class to ``nosetests``, as in the
+following example:
+
+.. code-block:: bash
+
+    $ nosetests yt.visualization.tests.test_plotwindow:TestSetWidth
+
+This nosetests invocation will only run the tests defined by the
+``TestSetWidth`` class.
+
+Finally, to determine which test is failing while the tests are running, it helps
+to run the tests in "verbose" mode. This can be done by passing the ``-v`` option
+to the ``nosetests`` executable.
+
+All of the above ``nosetests`` options can be combined. So, for example to run
+the ``TestSetWidth`` tests with verbose output, letting the output of print
+statements come out on the terminal prompt, and enabling pdb debugging on errors
+or test failures, one would do:
+
+.. code-block:: bash
+
+    $ nosetests --pdb --pdb-failures -v -s yt.visualization.tests.test_plotwindow:TestSetWidth
+
 .. _answer_testing:
 
 Answer Testing
@@ -122,8 +173,8 @@
 ^^^^^^^^^^^^^^^^^^^^^^^
 
 Answer tests test **actual data**, and many operations on that data, to make
-sure that answers don't drift over time.  This is how we will be testing
-frontends, as opposed to operations, in yt.
+sure that answers don't drift over time.  This is how we test frontends, as
+opposed to operations, in yt.
 
 .. _run_answer_testing:
 
@@ -133,20 +184,104 @@
 The very first step is to make a directory and copy over the data against which
 you want to test.  Currently, we test:
 
+NMSU ART
+~~~~~~~~
+
+* ``D9p_500/10MpcBox_HartGal_csf_a0.500.d``
+
+ARTIO
+~~~~~
+
+* ``sizmbhloz-clref04SNth-rs9_a0.9011/sizmbhloz-clref04SNth-rs9_a0.9011.art``
+
+Athena
+~~~~~~
+
+* ``ShockCloud/id0/Cloud.0050.vtk``
+* ``MHDBlast/id0/Blast.0100.vtk``
+* ``RamPressureStripping/id0/rps.0062.vtk``
+* ``MHDSloshing/virgo_low_res.0054.vtk``
+
+Boxlib
+~~~~~~
+
+* ``RadAdvect/plt00000``
+* ``RadTube/plt00500``
+* ``StarParticles/plrd01000``
+
+Chombo
+~~~~~~
+
+* ``TurbBoxLowRes/data.0005.3d.hdf5``
+* ``GaussianCloud/data.0077.3d.hdf5``
+* ``IsothermalSphere/data.0000.3d.hdf5``
+* ``ZeldovichPancake/plt32.2d.hdf5``
+* ``KelvinHelmholtz/data.0004.hdf5``
+
+Enzo
+~~~~
+
 * ``DD0010/moving7_0010`` (available in ``tests/`` in the yt distribution)
 * ``IsolatedGalaxy/galaxy0030/galaxy0030``
+* ``enzo_tiny_cosmology/DD0046/DD0046``
+* ``enzo_cosmology_pluts/DD0046/DD0046``
+
+FITS
+~~~~
+
+* ``radio_fits/grs-50-cube.fits``
+* ``UnigridData/velocity_field_20.fits``
+
+FLASH
+~~~~~
+
 * ``WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030``
 * ``GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300``
-* ``TurbBoxLowRes/data.0005.3d.hdf5``
-* ``GaussianCloud/data.0077.3d.hdf5``
-* ``RadAdvect/plt00000``
-* ``RadTube/plt00500``
+
+Gadget
+~~~~~~
+
+* ``IsothermalCollapse/snap_505``
+* ``IsothermalCollapse/snap_505.hdf5``
+* ``GadgetDiskGalaxy/snapshot_200.hdf5``
+
+Halo Catalog
+~~~~~~~~~~~~
+
+* ``owls_fof_halos/groups_001/group_001.0.hdf5``
+* ``owls_fof_halos/groups_008/group_008.0.hdf5``
+* ``gadget_fof_halos/groups_005/fof_subhalo_tab_005.0.hdf5``
+* ``gadget_fof_halos/groups_042/fof_subhalo_tab_042.0.hdf5``
+* ``rockstar_halos/halos_0.0.bin``
+
+MOAB
+~~~~
+
+* ``c5/c5.h5m``
+
+
+RAMSES
+~~~~~~
+
+* ``output_00080/info_00080.txt``
+
+Tipsy
+~~~~~
+
+* ``halo1e11_run1.00400/halo1e11_run1.00400``
+* ``agora_1e11.00400/agora_1e11.00400``
+* ``TipsyGalaxy/galaxy.00300``
+
+OWLS
+~~~~
+
+* ``snapshot_033/snap_033.0.hdf5``
 
 These datasets are available at http://yt-project.org/data/.
 
 Next, modify the file ``~/.yt/config`` to include a section ``[yt]``
 with the parameter ``test_data_dir``.  Set this to point to the
-directory with the test data you want to compare.  Here is an example
+directory with the test data you want to test with.  Here is an example
 config file:
 
 .. code-block:: none
@@ -154,47 +289,45 @@
    [yt]
    test_data_dir = /Users/tomservo/src/yt-data
 
-More data will be added over time.  To run the tests, you can import the yt
-module and invoke ``yt.run_nose()`` with a new keyword argument:
+More data will be added over time.  To run the answer tests, you must first
+generate a set of test answers locally on a "known good" revision, then update
+to the revision you want to test, and run the tests again using the locally
+stored answers.
 
-.. code-block:: python
-
-   import yt
-   yt.run_nose(run_answer_tests=True)
-
-If you have installed yt using ``python setup.py develop`` you can also
-optionally invoke nose using the ``nosetests`` command line interface:
+Let's focus on running the answer tests for a single frontend. It's possible to
+run the answer tests for **all** the frontends, but due to the large number of
+test datasets we currently use this is not normally done except on the yt
+project's contiguous integration server.
 
 .. code-block:: bash
 
    $ cd $YT_HG
-   $ nosetests --with-answer-testing
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store frontends.tipsy
 
-In either case, the current gold standard results will be downloaded from the
-rackspace cloud and compared to what is generated locally.  The results from a
-nose testing session are pretty straightforward to understand, the results for
-each test are printed directly to STDOUT. If a test passes, nose prints a
-period, F if a test fails, and E if the test encounters an exception or errors
-out for some reason.  If you want to also run tests for the 'big' datasets,
-then you can use the ``answer_big_data`` keyword argument:
-
-.. code-block:: python
-
-   import yt
-   yt.run_nose(run_answer_tests=True, answer_big_data=True)
-
-or, in the base directory of the yt mercurial repository:
+This command will create a set of local answers from the tipsy frontend tests
+and store them in ``$HOME/Documents/test`` (this can but does not have to be the
+same directory as the ``test_data_dir`` configuration variable defined in your
+``.yt/config`` file). To run the tipsy frontend's answer tests using a different
+yt changeset, update to that changeset, recompile if necessary, and run the
+tests using the following command:
 
 .. code-block:: bash
 
-   $ nosetests --with-answer-testing --answer-big-data
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test frontends.tipsy
 
-It's also possible to only run the answer tests for one frontend.  For example,
-to run only the enzo answers tests, one can do,
+The results from a nose testing session are pretty straightforward to
+understand, the results for each test are printed directly to STDOUT.  If a test
+passes, nose prints a period, F if a test fails, and E if the test encounters an
+exception or errors out for some reason.  Explicit descriptions for each test
+are also printed if you pass ``-v`` to the ``nosetests`` executable.  If you
+want to also run tests for the 'big' datasets, then you will need to pass
+``--answer-big-data`` to ``nosetests``.  For example, to run the tests for the
+OWLS frontend, do the following:
 
 .. code-block:: bash
 
-   $ nosetests --with-answer-testing yt.frontends.enzo
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store --answer-big-data frontends.owls
+
 
 How to Write Answer Tests
 ^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -260,38 +393,21 @@
   directory.
 
 * Create a new routine that operates similarly to the routines you can see
-  in Enzo's outputs.
+  in Enzo's output tests.
 
   * This routine should test a number of different fields and data objects.
 
   * The test routine itself should be decorated with
-    ``@requires_ds(file_name)``  This decorate can accept the argument
-    ``big_data`` for if this data is too big to run all the time.
+    ``@requires_ds(path_to_test_dataset)``. This decorator can accept the
+    argument ``big_data=True`` if the test is expensive.
 
-  * There are ``small_patch_amr`` and ``big_patch_amr`` routines that
-    you can yield from to execute a bunch of standard tests.  This is where
-    you should start, and then yield additional tests that stress the
-    outputs in whatever ways are necessary to ensure functionality.
+  * There are ``small_patch_amr`` and ``big_patch_amr`` routines that you can
+    yield from to execute a bunch of standard tests. In addition we have created
+    ``sph_answer`` which is more suited for particle SPH datasets. This is where
+    you should start, and then yield additional tests that stress the outputs in
+    whatever ways are necessary to ensure functionality.
 
   * **All tests should be yielded!**
 
 If you are adding to a frontend that has a few tests already, skip the first
 two steps.
-
-How to Upload Answers
-^^^^^^^^^^^^^^^^^^^^^
-
-To upload answers you can execute this command:
-
-.. code-block:: bash
-
-   $ nosetests --with-answer-testing frontends/enzo/ --answer-store --answer-name=whatever
-
-The current version of the gold standard can be found in the variable
-``_latest`` inside ``yt/utilities/answer_testing/framework.py``  As of
-the time of this writing, it is ``gold007``  Note that the name of the
-suite of results is now disconnected from the dataset's name, so you
-can upload multiple outputs with the same name and not collide.
-
-To upload answers, you **must** have the package boto installed, and you
-**must** have an Amazon key provided by Matt.  Contact Matt for these keys.

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1257,8 +1257,8 @@
 
 .. _specifying-cosmology-tipsy:
 
-Specifying Tipsy Cosmological Parameters
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Specifying Tipsy Cosmological Parameters and Setting Default Units
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Cosmological parameters can be specified to Tipsy to enable computation of
 default units.  The parameters recognized are of this form:
@@ -1270,5 +1270,27 @@
                            'omega_matter': 0.272,
                            'hubble_constant': 0.702}
 
-These will be used set the units, if they are specified.
+If you wish to set the default units directly, you can do so by using the
+``unit_base`` keyword in the load statement.
 
+ .. code-block:: python
+
+    import yt
+    ds = yt.load(filename, unit_base={'length', (1.0, 'Mpc')})
+
+
+Loading Cosmological Simulations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If you are not using a parameter file (i.e. non-Gasoline users), then you must
+use keyword ``cosmology_parameters`` when loading your data set to indicate to
+yt that it is a cosmological data set. If you do not wish to set any
+non-default cosmological parameters, you may pass an empty dictionary.
+
+ .. code-block:: python
+
+    import yt
+    ds = yt.load(filename, cosmology_parameters={})
+
+
+

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -159,7 +159,9 @@
         field_units = {"dl": "cm", "redshift": "", "temperature": "K"}
         if use_peculiar_velocity:
             input_fields.append('velocity_los')
+            input_fields.append('redshift_eff')
             field_units["velocity_los"] = "cm/s"
+            field_units["redshift_eff"] = ""
         for feature in self.line_list + self.continuum_list:
             if not feature['field_name'] in input_fields:
                 input_fields.append(feature['field_name'])
@@ -203,11 +205,11 @@
 
         for continuum in self.continuum_list:
             column_density = field_data[continuum['field_name']] * field_data['dl']
-            delta_lambda = continuum['wavelength'] * field_data['redshift']
+            # redshift_eff field combines cosmological and velocity redshifts
             if use_peculiar_velocity:
-                # include factor of (1 + z) because our velocity is in proper frame.
-                delta_lambda += continuum['wavelength'] * (1 + field_data['redshift']) * \
-                    field_data['velocity_los'] / speed_of_light_cgs
+                delta_lambda = continuum['wavelength'] * field_data['redshift_eff']
+            else:
+                delta_lambda = continuum['wavelength'] * field_data['redshift']
             this_wavelength = delta_lambda + continuum['wavelength']
             right_index = np.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
             left_index = np.digitize((this_wavelength *
@@ -241,11 +243,11 @@
 
         for line in parallel_objects(self.line_list, njobs=njobs):
             column_density = field_data[line['field_name']] * field_data['dl']
-            delta_lambda = line['wavelength'] * field_data['redshift']
+            # redshift_eff field combines cosmological and velocity redshifts
             if use_peculiar_velocity:
-                # include factor of (1 + z) because our velocity is in proper frame.
-                delta_lambda += line['wavelength'] * (1 + field_data['redshift']) * \
-                    field_data['velocity_los'] / speed_of_light_cgs
+                delta_lambda = line['wavelength'] * field_data['redshift_eff']
+            else:
+                delta_lambda = line['wavelength'] * field_data['redshift']
             thermal_b =  np.sqrt((2 * boltzmann_constant_cgs *
                                   field_data['temperature']) /
                                   line['atomic_mass'])

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -29,6 +29,7 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects, \
     parallel_root_only
+from yt.utilities.physical_constants import speed_of_light_cgs
 
 class LightRay(CosmologySplice):
     """
@@ -368,7 +369,7 @@
         data_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
         if get_los_velocity:
             all_fields.extend(['velocity_x', 'velocity_y',
-                               'velocity_z', 'velocity_los'])
+                               'velocity_z', 'velocity_los', 'redshift_eff'])
             data_fields.extend(['velocity_x', 'velocity_y', 'velocity_z'])
 
         all_ray_storage = {}
@@ -465,6 +466,28 @@
             sub_data['redshift'] = my_segment['redshift'] - \
               sub_data['dredshift'].cumsum() + sub_data['dredshift']
 
+            # When velocity_los is present, add effective redshift 
+            # (redshift_eff) field by combining cosmological redshift and 
+            # doppler redshift.
+            
+            # first convert los velocities to comoving frame (ie mult. by (1+z)), 
+            # then calculate doppler redshift:
+            # 1 + redshift_dopp = sqrt((1+v/c) / (1-v/c))
+
+            # then to add cosmological redshift and doppler redshift, follow
+            # eqn 3.75 in Peacock's Cosmological Physics:
+            # 1 + z_obs = (1 + z_cosmo) * (1 + z_doppler)
+            # Alternatively, see eqn 5.49 in Peebles for a similar result.
+            if get_los_velocity:
+
+                velocity_los_cm = (1 + sub_data['redshift']) * \
+                                  sub_data['velocity_los']
+                redshift_dopp = ((1 + velocity_los_cm / speed_of_light_cgs) /
+                                (1 - velocity_los_cm / speed_of_light_cgs))**(0.5) - 1
+                sub_data['redshift_eff'] = (1 + redshift_dopp) * \
+                                           (1 + sub_data['redshift'])
+                del velocity_los_cm, redshift_dopp
+
             # Remove empty lixels.
             sub_dl_nonzero = sub_data['dl'].nonzero()
             for field in all_fields:

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -32,18 +32,6 @@
     match, at which point it returns an instance of the appropriate
     :class:`yt.data_objects.api.Dataset` subclass.
     """
-    if len(args) == 0:
-        try:
-            from yt.extern.six.moves import tkinter
-            import tkinter, tkFileDialog
-        except ImportError:
-            raise YTOutputNotIdentified(args, kwargs)
-        root = tkinter.Tk()
-        filename = tkFileDialog.askopenfilename(parent=root,title='Choose a file')
-        if filename != None:
-            return load(filename)
-        else:
-            raise YTOutputNotIdentified(args, kwargs)
     candidates = []
     args = [os.path.expanduser(arg) if isinstance(arg, str)
             else arg for arg in args]
@@ -100,32 +88,6 @@
         mylog.error("    Possible: %s", c)
     raise YTOutputNotIdentified(args, kwargs)
 
-def projload(ds, axis, weight_field = None):
-    # This is something of a hack, so that we can just get back a projection
-    # and not utilize any of the intermediate index objects.
-    class ProjMock(dict):
-        pass
-    import h5py
-    f = h5py.File(os.path.join(ds.fullpath, ds.parameter_filename + ".yt"))
-    b = f["/Projections/%s/" % (axis)]
-    wf = "weight_field_%s" % weight_field
-    if wf not in b: raise KeyError(wf)
-    fields = []
-    for k in b:
-        if k.startswith("weight_field"): continue
-        if k.endswith("_%s" % weight_field):
-            fields.append(k)
-    proj = ProjMock()
-    for f in ["px","py","pdx","pdy"]:
-        proj[f] = b[f][:]
-    for f in fields:
-        new_name = f[:-(len(weight_field) + 1)]
-        proj[new_name] = b[f][:]
-    proj.axis = axis
-    proj.ds = ds
-    f.close()
-    return proj
-
 def simulation(parameter_filename, simulation_type, find_outputs=False):
     """
     Loads a simulation time series object of the specified

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -136,6 +136,8 @@
         # that dx=dy=dz, at least here.  We probably do elsewhere.
         id = self.id - self._id_offset
         if self.Parent is not None:
+            if not hasattr(self.Parent, 'dds'):
+                self.Parent._setup_dx()
             self.dds = self.Parent.dds.ndarray_view() / self.ds.refine_by
         else:
             LE, RE = self.index.grid_left_edge[id,:], \

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -159,7 +159,7 @@
             indexed octree will be constructed on these particles.
         fields : list of arrays
             All the necessary fields for computing the particle operation.  For
-            instance, this might include mass, velocity, etc.  
+            instance, this might include mass, velocity, etc.
         method : string
             This is the "method name" which will be looked up in the
             `particle_deposit` namespace as `methodname_deposit`.  Current
@@ -212,7 +212,7 @@
             indexed octree will be constructed on these particles.
         fields : list of arrays
             All the necessary fields for computing the particle operation.  For
-            instance, this might include mass, velocity, etc.  
+            instance, this might include mass, velocity, etc.
         index_fields : list of arrays
             All of the fields defined on the mesh that may be used as input to
             the operation.
@@ -265,11 +265,14 @@
         op.initialize()
         mylog.debug("Smoothing %s particles into %s Octs",
             positions.shape[0], nvals[-1])
-        op.process_octree(self.oct_handler, mdom_ind, positions, 
+        # Pointer operations within 'process_octree' require arrays to be
+        # contiguous cf. https://bitbucket.org/yt_analysis/yt/issues/1079
+        fields = [np.ascontiguousarray(f, dtype="float64") for f in fields]
+        op.process_octree(self.oct_handler, mdom_ind, positions,
             self.fcoords, fields,
             self.domain_id, self._domain_offset, self.ds.periodicity,
             index_fields, particle_octree, pdom_ind, self.ds.geometry)
-        # If there are 0s in the smoothing field this will not throw an error, 
+        # If there are 0s in the smoothing field this will not throw an error,
         # but silently return nans for vals where dividing by 0
         # Same as what is currently occurring, but suppressing the div by zero
         # error.
@@ -342,7 +345,7 @@
         op.initialize()
         mylog.debug("Smoothing %s particles into %s Octs",
             positions.shape[0], nvals[-1])
-        op.process_particles(particle_octree, pdom_ind, positions, 
+        op.process_particles(particle_octree, pdom_ind, positions,
             fields, self.domain_id, self._domain_offset, self.ds.periodicity,
             self.ds.geometry)
         vals = op.finalize()
@@ -494,7 +497,7 @@
         LE -= np.abs(LE) * eps
         RE = self.max(axis=0)
         RE += np.abs(RE) * eps
-        octree = ParticleOctreeContainer(dims, LE, RE, 
+        octree = ParticleOctreeContainer(dims, LE, RE,
             over_refine = over_refine_factor)
         octree.n_ref = n_ref
         octree.add(mi)

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -693,8 +693,7 @@
 
         """
         from yt.units.dimensions import length
-        if hasattr(self, "cosmological_simulation") \
-           and getattr(self, "cosmological_simulation"):
+        if getattr(self, "cosmological_simulation", False):
             # this dataset is cosmological, so add cosmological units.
             self.unit_registry.modify("h", self.hubble_constant)
             # Comoving lengths
@@ -707,16 +706,15 @@
 
         self.set_code_units()
 
-        if hasattr(self, "cosmological_simulation") \
-           and getattr(self, "cosmological_simulation"):
+        if getattr(self, "cosmological_simulation", False):
             # this dataset is cosmological, add a cosmology object
-            setattr(self, "cosmology",
+            self.cosmology = \
                     Cosmology(hubble_constant=self.hubble_constant,
                               omega_matter=self.omega_matter,
                               omega_lambda=self.omega_lambda,
-                              unit_registry=self.unit_registry))
-            setattr(self, "critical_density",
-                    self.cosmology.critical_density(self.current_redshift))
+                              unit_registry=self.unit_registry)
+            self.critical_density = \
+                    self.cosmology.critical_density(self.current_redshift)
             self.scale_factor = 1.0 / (1.0 + self.current_redshift)
 
     def get_unit_from_registry(self, unit_str):

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -156,7 +156,7 @@
     def __iter__(self):
         # We can make this fancier, but this works
         for o in self._pre_outputs:
-            if isinstance(o, str):
+            if isinstance(o, string_types):
                 ds = load(o, **self.kwargs)
                 self._setup_function(ds)
                 yield ds
@@ -170,7 +170,7 @@
             # This will return a sliced up object!
             return DatasetSeries(self._pre_outputs[key], self.parallel)
         o = self._pre_outputs[key]
-        if isinstance(o, str):
+        if isinstance(o, string_types):
             o = load(o, **self.kwargs)
             self._setup_function(o)
         return o
@@ -248,13 +248,31 @@
 
         """
         dynamic = False
-        if self.parallel == False:
+        if self.parallel is False:
             njobs = 1
         else:
-            if self.parallel == True: njobs = -1
-            else: njobs = self.parallel
-        return parallel_objects(self, njobs=njobs, storage=storage,
-                                dynamic=dynamic)
+            if self.parallel is True:
+                njobs = -1
+            else:
+                njobs = self.parallel
+
+        for output in parallel_objects(self._pre_outputs, njobs=njobs,
+                                       storage=storage, dynamic=dynamic):
+            if storage is not None:
+                sto, output = output
+
+            if isinstance(output, string_types):
+                ds = load(output, **self.kwargs)
+                self._setup_function(ds)
+            else:
+                ds = output
+
+            if storage is not None:
+                next_ret = (sto, ds)
+            else:
+                next_ret = ds
+
+            yield next_ret
 
     def eval(self, tasks, obj=None):
         tasks = ensure_list(tasks)
@@ -323,13 +341,13 @@
 
         """
         
-        if isinstance(filenames, str):
+        if isinstance(filenames, string_types):
             filenames = get_filenames_from_glob_pattern(filenames)
 
         # This will crash with a less informative error if filenames is not
         # iterable, but the plural keyword should give users a clue...
         for fn in filenames:
-            if not isinstance(fn, str):
+            if not isinstance(fn, string_types):
                 raise YTOutputNotIdentified("DataSeries accepts a list of "
                                             "strings, but "
                                             "received {0}".format(fn))

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/angular_momentum.py
--- a/yt/fields/angular_momentum.py
+++ b/yt/fields/angular_momentum.py
@@ -15,12 +15,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import types
 import numpy as np
-import inspect
-import copy
-
-from yt.units.yt_array import YTArray
 
 from .derived_field import \
     ValidateParameter
@@ -29,8 +24,8 @@
     register_field_plugin
 
 from .vector_operations import \
-     create_magnitude_field
-    
+    create_magnitude_field
+
 from yt.utilities.lib.geometry_utils import \
     obtain_rvec, obtain_rv_vec
 
@@ -78,7 +73,7 @@
 
     create_magnitude_field(registry, "specific_angular_momentum",
                            "cm**2 / s", ftype=ftype)
-    
+
     def _angular_momentum_x(field, data):
         return data[ftype, "cell_mass"] \
              * data[ftype, "specific_angular_momentum_x"]
@@ -105,4 +100,3 @@
 
     create_magnitude_field(registry, "angular_momentum",
                            "g * cm**2 / s", ftype=ftype)
-                           

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/astro_fields.py
--- a/yt/fields/astro_fields.py
+++ b/yt/fields/astro_fields.py
@@ -16,8 +16,7 @@
 import numpy as np
 
 from .derived_field import \
-    ValidateParameter, \
-    ValidateSpatial
+    ValidateParameter
 from .field_exceptions import \
     NeedsParameter
 from .field_plugin_registry import \
@@ -30,7 +29,7 @@
     clight, \
     kboltz, \
     G
-    
+
 @register_field_plugin
 def setup_astro_fields(registry, ftype = "gas", slice_info = None):
     # slice_info would be the left, the right, and the factor.
@@ -45,7 +44,7 @@
         div_fac = 2.0
     else:
         sl_left, sl_right, div_fac = slice_info
-    
+
     def _dynamical_time(field, data):
         """
         sqrt(3 pi / (16 G rho))
@@ -71,7 +70,7 @@
 
     def _chandra_emissivity(field, data):
         logT0 = np.log10(data[ftype, "temperature"].to_ndarray().astype(np.float64)) - 7
-        # we get rid of the units here since this is a fit and not an 
+        # we get rid of the units here since this is a fit and not an
         # analytical expression
         return data.ds.arr(data[ftype, "number_density"].to_ndarray().astype(np.float64)**2
                            * (10**(- 0.0103 * logT0**8 + 0.0417 * logT0**7
@@ -91,7 +90,7 @@
     registry.add_field((ftype, "chandra_emissivity"),
                        function=_chandra_emissivity,
                        units="") # add correct units here
-    
+
     def _xray_emissivity(field, data):
         # old scaling coefficient was 2.168e60
         return data.ds.arr(data[ftype, "density"].to_ndarray().astype(np.float64)**2
@@ -110,7 +109,7 @@
     registry.add_field((ftype,"mazzotta_weighting"),
                        function=_mazzotta_weighting,
                        units="keV**-0.25*cm**-6")
-    
+
     def _sz_kinetic(field, data):
         scale = 0.88 * sigma_thompson / mh / clight
         vel_axis = data.get_field_parameter("axis")

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/astro_simulations.py
--- a/yt/fields/astro_simulations.py
+++ b/yt/fields/astro_simulations.py
@@ -13,7 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
 from .domain_context import DomainContext
 
 # Here's how this all works:

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/cosmology_fields.py
--- a/yt/fields/cosmology_fields.py
+++ b/yt/fields/cosmology_fields.py
@@ -14,21 +14,17 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
+from .derived_field import \
+    ValidateParameter
+from .field_exceptions import \
+    NeedsConfiguration, \
+    NeedsParameter
+from .field_plugin_registry import \
+    register_field_plugin
 
-from .derived_field import \
-     ValidateParameter
-from .field_exceptions import \
-     NeedsConfiguration, \
-     NeedsParameter
-from .field_plugin_registry import \
-     register_field_plugin
+from yt.utilities.physical_constants import \
+    speed_of_light_cgs
 
-from yt.utilities.cosmology import \
-     Cosmology
-from yt.utilities.physical_constants import \
-     speed_of_light_cgs
-    
 @register_field_plugin
 def setup_cosmology_fields(registry, ftype = "gas", slice_info = None):
     # slice_info would be the left, the right, and the factor.
@@ -49,7 +45,7 @@
           data[ftype, "dark_matter_density"]
 
     registry.add_field((ftype, "matter_density"),
-                       function=_matter_density, 
+                       function=_matter_density,
                        units="g/cm**3")
 
     def _matter_mass(field, data):
@@ -67,7 +63,7 @@
         co = data.ds.cosmology
         return data[ftype, "matter_density"] / \
           co.critical_density(data.ds.current_redshift)
-    
+
     registry.add_field((ftype, "overdensity"),
                        function=_overdensity,
                        units="")
@@ -116,7 +112,7 @@
                        function=_virial_radius_fraction,
                        validators=[ValidateParameter("virial_radius")],
                        units="")
-    
+
     # Weak lensing convergence.
     # Eqn 4 of Metzler, White, & Loken (2001, ApJ, 547, 560).
     # This needs to be checked for accuracy.
@@ -127,7 +123,7 @@
         co = data.ds.cosmology
         observer_redshift = data.get_field_parameter('observer_redshift')
         source_redshift = data.get_field_parameter('source_redshift')
-        
+
         # observer to lens
         dl = co.angular_diameter_distance(observer_redshift, data.ds.current_redshift)
         # observer to source
@@ -135,11 +131,11 @@
         # lens to source
         dls = co.angular_diameter_distance(data.ds.current_redshift, source_redshift)
 
-        # removed the factor of 1 / a to account for the fact that we are projecting 
+        # removed the factor of 1 / a to account for the fact that we are projecting
         # with a proper distance.
         return (1.5 * (co.hubble_constant / speed_of_light_cgs)**2 * (dl * dls / ds) * \
           data[ftype, "matter_overdensity"]).in_units("1/cm")
-       
+
     registry.add_field((ftype, "weak_lensing_convergence"),
                        function=_weak_lensing_convergence,
                        units="1/cm",

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -16,10 +16,7 @@
 
 from yt.funcs import \
     ensure_list
-from yt.units.yt_array import \
-    YTArray
 from .field_exceptions import \
-    ValidationException, \
     NeedsGridType, \
     NeedsOriginalGrid, \
     NeedsDataField, \
@@ -30,15 +27,9 @@
     FieldDetector
 from yt.units.unit_object import \
     Unit
+from yt.utilities.exceptions import \
+    YTFieldNotFound
 
-def derived_field(**kwargs):
-    def inner_decorator(function):
-        if 'name' not in kwargs:
-            kwargs['name'] = function.__name__
-        kwargs['function'] = function
-        add_field(**kwargs)
-        return function
-    return inner_decorator
 
 def TranslationFunc(field_name):
     def _TranslationFunc(field, data):
@@ -48,7 +39,7 @@
 
 def NullFunc(field, data):
     raise YTFieldNotFound(field.name)
- 
+
 class DerivedField(object):
     """
     This is the base class used to describe a cell-by-cell derived field.
@@ -178,7 +169,7 @@
 
     def __call__(self, data):
         """ Return the value of the field in a given *data* object. """
-        ii = self.check_available(data)
+        self.check_available(data)
         original_fields = data.keys() # Copy
         if self._function is NullFunc:
             raise RuntimeError(

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/domain_context.py
--- a/yt/fields/domain_context.py
+++ b/yt/fields/domain_context.py
@@ -14,8 +14,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-
 domain_context_registry = {}
 
 class DomainContext(object):

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -15,16 +15,9 @@
 
 import numpy as np
 from collections import defaultdict
-from yt.units.unit_object import Unit
 from yt.units.yt_array import YTArray
 from .field_exceptions import \
-    ValidationException, \
-    NeedsGridType, \
-    NeedsOriginalGrid, \
-    NeedsDataField, \
-    NeedsProperty, \
-    NeedsParameter, \
-    FieldUnitsError
+    NeedsGridType
 
 class FieldDetector(defaultdict):
     Level = 1
@@ -87,27 +80,18 @@
         return arr.reshape(self.ActiveDimensions, order="C")
 
     def __missing__(self, item):
-        if hasattr(self.ds, "field_info"):
-            if not isinstance(item, tuple):
-                field = ("unknown", item)
-                finfo = self.ds._get_field_info(*field)
-                #mylog.debug("Guessing field %s is %s", item, finfo.name)
-            else:
-                field = item
-            finfo = self.ds._get_field_info(*field)
-            # For those cases where we are guessing the field type, we will
-            # need to re-update -- otherwise, our item will always not have the
-            # field type.  This can lead to, for instance, "unknown" particle
-            # types not getting correctly identified.
-            # Note that the *only* way this works is if we also fix our field
-            # dependencies during checking.  Bug #627 talks about this.
-            item = self.ds._last_freq
+        if not isinstance(item, tuple):
+            field = ("unknown", item)
         else:
-            FI = getattr(self.ds, "field_info", FieldInfo)
-            if item in FI:
-                finfo = FI[item]
-            else:
-                finfo = None
+            field = item
+        finfo = self.ds._get_field_info(*field)
+        # For those cases where we are guessing the field type, we will
+        # need to re-update -- otherwise, our item will always not have the
+        # field type.  This can lead to, for instance, "unknown" particle
+        # types not getting correctly identified.
+        # Note that the *only* way this works is if we also fix our field
+        # dependencies during checking.  Bug #627 talks about this.
+        item = self.ds._last_freq
         if finfo is not None and finfo._function.__name__ != 'NullFunc':
             try:
                 vv = finfo(self)
@@ -171,10 +155,7 @@
 
     def _read_data(self, field_name):
         self.requested.append(field_name)
-        if hasattr(self.ds, "field_info"):
-            finfo = self.ds._get_field_info(*field_name)
-        else:
-            finfo = FieldInfo[field_name]
+        finfo = self.ds._get_field_info(*field_name)
         if finfo.particle_type:
             self.requested.append(field_name)
             return np.ones(self.NumberOfParticles)

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/field_exceptions.py
--- a/yt/fields/field_exceptions.py
+++ b/yt/fields/field_exceptions.py
@@ -13,7 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
 
 class ValidationException(Exception):
     pass

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/field_functions.py
--- a/yt/fields/field_functions.py
+++ b/yt/fields/field_functions.py
@@ -32,7 +32,7 @@
         # it from a cm**2 array.
         np.subtract(data["%s%s" % (field_prefix, ax)].in_units("cm"),
                     center[i], r)
-        if data.ds.periodicity[i] == True:
+        if data.ds.periodicity[i] is True:
             np.abs(r, r)
             np.subtract(r, DW[i], rdw)
             np.abs(rdw, rdw)

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/field_plugin_registry.py
--- a/yt/fields/field_plugin_registry.py
+++ b/yt/fields/field_plugin_registry.py
@@ -13,8 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-
 field_plugins = {}
 
 def register_field_plugin(func):

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/fluid_fields.py
--- a/yt/fields/fluid_fields.py
+++ b/yt/fields/fluid_fields.py
@@ -15,20 +15,16 @@
 
 import numpy as np
 
-from yt.funcs import \
-    just_one
-
 from .derived_field import \
-    ValidateParameter, \
     ValidateSpatial
 
 from .field_plugin_registry import \
     register_field_plugin
 
 from .vector_operations import \
-     create_averaged_field, \
-     create_magnitude_field, \
-     create_vector_fields
+    create_averaged_field, \
+    create_magnitude_field, \
+    create_vector_fields
 
 from yt.utilities.physical_constants import \
     mh, \
@@ -37,20 +33,6 @@
 from yt.utilities.physical_ratios import \
     metallicity_sun
 
-from yt.units.yt_array import \
-    YTArray
-
-from yt.utilities.math_utils import \
-    get_sph_r_component, \
-    get_sph_theta_component, \
-    get_sph_phi_component, \
-    get_cyl_r_component, \
-    get_cyl_z_component, \
-    get_cyl_theta_component, \
-    get_cyl_r, get_cyl_theta, \
-    get_cyl_z, get_sph_r, \
-    get_sph_theta, get_sph_phi, \
-    periodic_dist, euclidean_dist
 
 @register_field_plugin
 def setup_fluid_fields(registry, ftype = "gas", slice_info = None):

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/fluid_vector_fields.py
--- a/yt/fields/fluid_vector_fields.py
+++ b/yt/fields/fluid_vector_fields.py
@@ -16,10 +16,7 @@
 import numpy as np
 
 from yt.fields.derived_field import \
-    ValidateGridType, \
-    ValidateParameter, \
-    ValidateSpatial, \
-    NeedsParameter
+    ValidateSpatial
 
 from .field_plugin_registry import \
     register_field_plugin
@@ -28,8 +25,8 @@
     just_one
 
 from .vector_operations import \
-     create_magnitude_field, \
-     create_squared_field
+    create_magnitude_field, \
+    create_squared_field
 
 @register_field_plugin
 def setup_fluid_vector_fields(registry, ftype = "gas", slice_info = None):

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/interpolated_fields.py
--- a/yt/fields/interpolated_fields.py
+++ b/yt/fields/interpolated_fields.py
@@ -13,8 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-
 from yt.fields.local_fields import add_field
 
 from yt.utilities.linear_interpolators import \

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/local_fields.py
--- a/yt/fields/local_fields.py
+++ b/yt/fields/local_fields.py
@@ -13,8 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-
 from yt.utilities.logger import \
     ytLogger as mylog
 

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/magnetic_field.py
--- a/yt/fields/magnetic_field.py
+++ b/yt/fields/magnetic_field.py
@@ -15,11 +15,6 @@
 
 import numpy as np
 
-from yt.units.yt_array import YTArray
-from yt.utilities.lib.misc_utilities import \
-    obtain_rvec, obtain_rv_vec
-from yt.utilities.math_utils import resize_vector
-from yt.utilities.cosmology import Cosmology
 from yt.fields.derived_field import \
     ValidateParameter
 
@@ -27,16 +22,8 @@
     register_field_plugin
 
 from yt.utilities.math_utils import \
-    get_sph_r_component, \
     get_sph_theta_component, \
-    get_sph_phi_component, \
-    get_cyl_r_component, \
-    get_cyl_z_component, \
-    get_cyl_theta_component, \
-    get_cyl_r, get_cyl_theta, \
-    get_cyl_z, get_sph_r, \
-    get_sph_theta, get_sph_phi, \
-    periodic_dist, euclidean_dist
+    get_sph_phi_component
 
 @register_field_plugin
 def setup_magnetic_field_fields(registry, ftype = "gas", slice_info = None):

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/my_plugin_fields.py
--- a/yt/fields/my_plugin_fields.py
+++ b/yt/fields/my_plugin_fields.py
@@ -13,8 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-
 from .field_plugin_registry import \
     register_field_plugin
 

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -16,8 +16,6 @@
 
 import numpy as np
 
-from yt.funcs import *
-from yt.units.yt_array import YTArray
 from yt.fields.derived_field import \
     ValidateParameter, \
     ValidateSpatial
@@ -125,7 +123,7 @@
     def particle_density(field, data):
         pos = data[ptype, coord_name].convert_to_units("code_length")
         mass = data[ptype, mass_name].convert_to_units("code_mass")
-        d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
+        d = data.deposit(pos, [mass], method = "sum")
         d = data.ds.arr(d, "code_mass")
         d /= data["index", "cell_volume"]
         return d
@@ -790,13 +788,19 @@
         kwargs = {}
         if nneighbors:
             kwargs['nneighbors'] = nneighbors
+        # This is for applying cutoffs, similar to in the SPLASH paper.
+        smooth_cutoff = data["index","cell_volume"]**(1./3)
+        smooth_cutoff.convert_to_units("code_length")
         # volume_weighted smooth operations return lists of length 1.
         rv = data.smooth(pos, [mass, hsml, dens, quan],
                          method="volume_weighted",
                          create_octree=True,
+                         index_fields=[smooth_cutoff],
                          kernel_name=kernel_name)[0]
         rv[np.isnan(rv)] = 0.0
         # Now some quick unit conversions.
+        # This should be used when seeking a non-normalized value:
+        rv /= hsml.uq**3 / hsml.uq.in_cgs().uq**3
         rv = data.apply_units(rv, field_units)
         return rv
     registry.add_field(field_name, function = _vol_weight,
@@ -827,7 +831,7 @@
         field_name = (ptype, "smoothed_density")
     else:
         field_name = (ptype, "%s_smoothed_density" % (kernel_name))
-    field_units = registry[ptype, mass_name].units
+
     def _nth_neighbor(field, data):
         pos = data[ptype, coord_name]
         pos.convert_to_units("code_length")

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/setup.py
--- a/yt/fields/setup.py
+++ b/yt/fields/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -17,12 +17,10 @@
 import re
 
 from yt.utilities.physical_constants import \
-    mh, \
-    mass_sun_cgs, \
     amu_cgs
 from yt.utilities.physical_ratios import \
     primordial_H_mass_fraction
-from yt.funcs import *
+
 from yt.utilities.chemical_formulas import \
     ChemicalFormula
 from .field_plugin_registry import \

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -1,13 +1,20 @@
-from yt.testing import *
 import numpy as np
+
+from yt.testing import \
+    fake_random_ds, \
+    assert_equal, \
+    assert_array_almost_equal_nulp, \
+    assert_array_equal, \
+    assert_raises
 from yt.utilities.cosmology import \
-     Cosmology
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
+    Cosmology
 from yt.frontends.stream.fields import \
     StreamFieldInfo
 from yt.units.yt_array import \
-     YTArray, YTQuantity
+    YTArray, YTQuantity
+from yt.utilities.exceptions import \
+    YTFieldUnitError, \
+    YTFieldUnitParseError
 
 def setup():
     global base_ds
@@ -88,19 +95,6 @@
         return field
     return field[1]
 
-def _expand_field(field):
-    if isinstance(field, tuple):
-        return field
-    if field in KnownStreamFields:
-        fi = KnownStreamFields[field]
-        if fi.particle_type:
-            return ("all", field)
-        else:
-            return ("gas", field)
-    # Otherwise, we just guess.
-    if "particle" in field:
-        return ("all", field)
-    return ("gas", field)
 
 class TestFieldAccess(object):
     description = None

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -178,7 +178,8 @@
         """
         Returns (in code units) the smallest cell size in the simulation.
         """
-        return  1.0/(2**self.max_level)
+        return (self.dataset.domain_width /
+                (self.dataset.domain_dimensions * 2**(self.max_level))).min()
 
     def convert(self, unit):
         return self.dataset.conversion_factors[unit]

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -185,27 +185,10 @@
             for alias in aliases:
                 self.alias((ptype, alias), (ptype, f), units = output_units)
 
-        # We'll either have particle_position or particle_position_[xyz]
-        if (ptype, "particle_position") in self.field_list or \
-           (ptype, "particle_position") in self.field_aliases:
-            particle_scalar_functions(ptype,
-                   "particle_position", "particle_velocity",
-                   self)
-        else:
-            # We need to check to make sure that there's a "known field" that
-            # overlaps with one of the vector fields.  For instance, if we are
-            # in the Stream frontend, and we have a set of scalar position
-            # fields, they will overlap with -- and be overridden by -- the
-            # "known" vector field that the frontend creates.  So the easiest
-            # thing to do is to simply remove the on-disk field (which doesn't
-            # exist) and replace it with a derived field.
-            if (ptype, "particle_position") in self and \
-                 self[ptype, "particle_position"]._function == NullFunc:
-                self.pop((ptype, "particle_position"))
-            particle_vector_functions(ptype,
-                    ["particle_position_%s" % ax for ax in 'xyz'],
-                    ["particle_velocity_%s" % ax for ax in 'xyz'],
-                    self)
+        ppos_fields = ["particle_position_%s" % ax for ax in 'xyz']
+        pvel_fields = ["particle_velocity_%s" % ax for ax in 'xyz']
+        particle_vector_functions(ptype, ppos_fields, pvel_fields, self)
+
         particle_deposition_functions(ptype, "particle_position",
             "particle_mass", self)
         standard_particle_fields(self, ptype)
@@ -219,7 +202,7 @@
             self.add_output_field(field, 
                                   units = self.ds.field_units.get(field, ""),
                                   particle_type = True)
-        self.setup_smoothed_fields(ptype, 
+        self.setup_smoothed_fields(ptype,
                                    num_neighbors=num_neighbors,
                                    ftype=ftype)
 

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -77,7 +77,7 @@
 
     def _set_units(self):
         self.unit_registry = UnitRegistry()
-        self.unit_registry.lut["code_time"] = (1.0, dimensions.time)
+        self.unit_registry.add("code_time", 1.0, dimensions.time)
         if self.cosmological_simulation:
             # Instantiate EnzoCosmology object for units and time conversions.
             self.cosmology = \

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -420,7 +420,7 @@
         Generates the conversion to various physical _units based on the parameter file
         """
         default_length_units = [u for u,v in default_unit_symbol_lut.items()
-                                if str(v[-1]) == "(length)"]
+                                if str(v[1]) == "(length)"]
         more_length_units = []
         for unit in default_length_units:
             if unit in prefixable_units:

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/frontends/gadget/tests/test_outputs.py
--- a/yt/frontends/gadget/tests/test_outputs.py
+++ b/yt/frontends/gadget/tests/test_outputs.py
@@ -1,5 +1,5 @@
 """
-Gadget frontend tests using the IsothermalCollapse dataset
+Gadget frontend tests
 
 
 
@@ -14,15 +14,56 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import *
+from collections import OrderedDict
+
+from yt.testing import requires_file
 from yt.utilities.answer_testing.framework import \
+    data_dir_load, \
     requires_ds, \
-    data_dir_load
-from yt.frontends.gadget.api import GadgetHDF5Dataset
+    sph_answer
+from yt.frontends.gadget.api import GadgetHDF5Dataset, GadgetDataset
 
-isothermal = "IsothermalCollapse/snap_505.hdf5"
- at requires_file(isothermal)
+isothermal_h5 = "IsothermalCollapse/snap_505.hdf5"
+isothermal_bin = "IsothermalCollapse/snap_505"
+gdg = "GadgetDiskGalaxy/snapshot_200.hdf5"
+
+# This maps from field names to weight field names to use for projections
+iso_fields = OrderedDict(
+    [
+        (("gas", "density"), None),
+        (("gas", "temperature"), None),
+        (("gas", "temperature"), ('gas', 'density')),
+        (('gas', 'velocity_magnitude'), None),
+        (("deposit", "all_density"), None),
+        (("deposit", "all_count"), None),
+        (("deposit", "all_cic"), None),
+        (("deposit", "PartType0_density"), None),
+    ]
+)
+iso_kwargs = dict(bounding_box=[[-3, 3], [-3, 3], [-3, 3]])
+
+gdg_fields = iso_fields.copy()
+gdg_fields["deposit", "PartType4_density"] = None
+gdg_kwargs = dict(bounding_box=[[-1e5, 1e5], [-1e5, 1e5], [-1e5, 1e5]])
+
+
+ at requires_file(isothermal_h5)
+ at requires_file(isothermal_bin)
 def test_GadgetDataset():
-    kwargs = dict(bounding_box=[[-3,3], [-3,3], [-3,3]])
-    assert isinstance(data_dir_load(isothermal, kwargs=kwargs),
+    assert isinstance(data_dir_load(isothermal_h5, kwargs=iso_kwargs),
                       GadgetHDF5Dataset)
+    assert isinstance(data_dir_load(isothermal_bin, kwargs=iso_kwargs),
+                      GadgetDataset)
+
+
+ at requires_ds(isothermal_h5)
+def test_iso_collapse():
+    for test in sph_answer(isothermal_h5, 'snap_505', 2**17,
+                           iso_fields, ds_kwargs=iso_kwargs):
+        yield test
+
+ at requires_ds(gdg, big_data=True)
+def test_gadget_disk_galaxy():
+    for test in sph_answer(gdg, 'snapshot_200', 11907080, gdg_fields,
+                           ds_kwargs=gdg_kwargs):
+        yield test

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/frontends/owls/tests/test_outputs.py
--- a/yt/frontends/owls/tests/test_outputs.py
+++ b/yt/frontends/owls/tests/test_outputs.py
@@ -14,45 +14,39 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import *
+from collections import OrderedDict
+
+from yt.testing import \
+    requires_file
 from yt.utilities.answer_testing.framework import \
     requires_ds, \
-    small_patch_amr, \
-    big_patch_amr, \
     data_dir_load, \
-    PixelizedProjectionValuesTest, \
-    FieldValuesTest, \
-    create_obj
+    sph_answer
 from yt.frontends.owls.api import OWLSDataset
 
-_fields = (("deposit", "all_density"), ("deposit", "all_count"),
-           ("deposit", "PartType0_density"),
-           ("deposit", "PartType4_density"))
+os33 = "snapshot_033/snap_033.0.hdf5"
 
-os33 = "snapshot_033/snap_033.0.hdf5"
+# This maps from field names to weight field names to use for projections
+_fields = OrderedDict(
+    [
+        (("gas", "density"), None),
+        (("gas", "temperature"), None),
+        (("gas", "temperature"), ("gas", "density")),
+        (('gas', 'He_p0_number_density'), None),
+        (('gas', 'velocity_magnitude'), None),
+        (("deposit", "all_density"), None),
+        (("deposit", "all_count"), None),
+        (("deposit", "all_cic"), None),
+        (("deposit", "PartType0_density"), None),
+        (("deposit", "PartType4_density"), None),
+    ]
+)
+
+
 @requires_ds(os33, big_data=True)
 def test_snapshot_033():
-    ds = data_dir_load(os33)
-    yield assert_equal, str(ds), "snap_033"
-    dso = [ None, ("sphere", ("c", (0.1, 'unitary')))]
-    dd = ds.all_data()
-    yield assert_equal, dd["particle_position"].shape[0], 2*(128*128*128)
-    yield assert_equal, dd["particle_position"].shape[1], 3
-    tot = sum(dd[ptype,"particle_position"].shape[0]
-              for ptype in ds.particle_types if ptype != "all")
-    yield assert_equal, tot, (2*128*128*128)
-    for dobj_name in dso:
-        for field in _fields:
-            for axis in [0, 1, 2]:
-                for weight_field in [None, "density"]:
-                    yield PixelizedProjectionValuesTest(
-                        os33, axis, field, weight_field,
-                        dobj_name)
-            yield FieldValuesTest(os33, field, dobj_name)
-        dobj = create_obj(ds, dobj_name)
-        s1 = dobj["ones"].sum()
-        s2 = sum(mask.sum() for block, mask in dobj.blocks)
-        yield assert_equal, s1, s2
+    for test in sph_answer(os33, 'snap_033', 2*128**3, _fields):
+        yield test
 
 
 @requires_file(os33)

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -39,7 +39,7 @@
         ("Metals", ("code_metallicity", ["metallicity"], None)),
         ("Metallicity", ("code_metallicity", ["metallicity"], None)),
         ("Phi", ("code_length", [], None)),
-        ("StarFormationRate", ("code_mass / code_time", [], None)),
+        ("StarFormationRate", ("Msun / yr", [], None)),
         ("FormationTime", ("code_time", ["creation_time"], None)),
         # These are metallicity fields that get discovered for FIRE simulations
         ("Metallicity_00", ("", ["metallicity"], None)),

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/frontends/stream/tests/test_outputs.py
--- a/yt/frontends/stream/tests/test_outputs.py
+++ b/yt/frontends/stream/tests/test_outputs.py
@@ -19,7 +19,7 @@
 import unittest
 
 from yt.testing import assert_raises
-from yt.utilities.answer_testing.framework import data_dir_load
+from yt.convenience import load
 from yt.utilities.exceptions import YTOutputNotIdentified
 
 class TestEmptyLoad(unittest.TestCase):
@@ -40,6 +40,6 @@
         shutil.rmtree(self.tmpdir)
 
     def test_load_empty_file(self):
-        assert_raises(YTOutputNotIdentified, data_dir_load, "not_a_file")
-        assert_raises(YTOutputNotIdentified, data_dir_load, "empty_file")
-        assert_raises(YTOutputNotIdentified, data_dir_load, "empty_directory")
+        assert_raises(YTOutputNotIdentified, load, "not_a_file")
+        assert_raises(YTOutputNotIdentified, load, "empty_file")
+        assert_raises(YTOutputNotIdentified, load, "empty_directory")

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/frontends/tipsy/data_structures.py
--- a/yt/frontends/tipsy/data_structures.py
+++ b/yt/frontends/tipsy/data_structures.py
@@ -32,6 +32,7 @@
 from yt.utilities.physical_constants import \
     G, \
     cm_per_kpc
+from yt import YTQuantity
 
 from .fields import \
     TipsyFieldInfo
@@ -71,7 +72,7 @@
                  bounding_box=None,
                  units_override=None):
         # Because Tipsy outputs don't have a fixed domain boundary, one can
-        # specify a bounding box which effectively gives a domain_left_edge 
+        # specify a bounding box which effectively gives a domain_left_edge
         # and domain_right_edge
         self.bounding_box = bounding_box
         self.filter_bbox = (bounding_box is not None)
@@ -167,9 +168,9 @@
         self.domain_dimensions = np.ones(3, "int32") * nz
         periodic = self.parameters.get('bPeriodic', True)
         period = self.parameters.get('dPeriod', None)
-        comoving = self.parameters.get('bComove', False)
         self.periodicity = (periodic, periodic, periodic)
-        if comoving and period is None:
+        self.comoving = self.parameters.get('bComove', False)
+        if self.comoving and period is None:
             period = 1.0
         if self.bounding_box is None:
             if periodic and period is not None:
@@ -179,14 +180,16 @@
             else:
                 self.domain_left_edge = None
                 self.domain_right_edge = None
-        else: 
+        else:
             bbox = np.array(self.bounding_box, dtype="float64")
             if bbox.shape == (2, 3):
                 bbox = bbox.transpose()
             self.domain_left_edge = bbox[:,0]
             self.domain_right_edge = bbox[:,1]
 
-        if comoving:
+        # If the cosmology parameters dictionary got set when data is
+        # loaded, we can assume it's a cosmological data set
+        if self.comoving or self._cosmology_parameters is not None:
             cosm = self._cosmology_parameters or {}
             self.scale_factor = hvals["time"]#In comoving simulations, time stores the scale factor a
             self.cosmological_simulation = 1
@@ -224,8 +227,15 @@
             self.length_unit = self.quan(lu, 'kpc')*self.scale_factor
             self.mass_unit = self.quan(mu, 'Msun')
             density_unit = self.mass_unit/ (self.length_unit/self.scale_factor)**3
-            # Gasoline's hubble constant, dHubble0, is stored units of proper code time.
-            self.hubble_constant *= np.sqrt(G.in_units('kpc**3*Msun**-1*s**-2')*density_unit).value/(3.2407793e-18)  
+
+            # If self.comoving is set, we know this is a gasoline data set,
+            # and we do the conversion on the hubble constant.
+            if self.comoving:
+                # Gasoline's hubble constant, dHubble0, is stored units of
+                # proper code time.
+                self.hubble_constant *= np.sqrt(G.in_units(
+                    'kpc**3*Msun**-1*s**-2') * density_unit).value / (
+                    3.2407793e-18)
             cosmo = Cosmology(self.hubble_constant,
                               self.omega_matter, self.omega_lambda)
             self.current_time = cosmo.hubble_time(self.current_redshift)
@@ -237,12 +247,30 @@
             density_unit = self.mass_unit / self.length_unit**3
         self.time_unit = 1.0 / np.sqrt(G * density_unit)
 
+        # If unit base is defined by the user, override all relevant units
+        if self._unit_base is not None:
+            length = self._unit_base.get('length', self.length_unit)
+            length = self.quan(*length) if isinstance(length, tuple) else self.quan(length)
+            self.length_unit = length
+
+            mass = self._unit_base.get('mass', self.mass_unit)
+            mass = self.quan(*mass) if isinstance(mass, tuple) else self.quan(mass)
+            self.mass_unit = mass
+
+            density_unit = self.mass_unit / self.length_unit**3
+            self.time_unit = 1.0 / np.sqrt(G * density_unit)
+
+            time = self._unit_base.get('time', self.time_unit)
+            time = self.quan(*time) if isinstance(time, tuple) else self.quan(time)
+            self.time_unit = time
+
+
     @staticmethod
     def _validate_header(filename):
         '''
         This method automatically detects whether the tipsy file is big/little endian
         and is not corrupt/invalid.  It returns a tuple of (Valid, endianswap) where
-        Valid is a boolean that is true if the file is a tipsy file, and endianswap is 
+        Valid is a boolean that is true if the file is a tipsy file, and endianswap is
         the endianness character '>' or '<'.
         '''
         try:

diff -r 53802d0ec9db96449ed04198190e3b92b87277b9 -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 yt/frontends/tipsy/fields.py
--- a/yt/frontends/tipsy/fields.py
+++ b/yt/frontends/tipsy/fields.py
@@ -38,7 +38,8 @@
         'FeMassFrac':("FeMassFrac", ("dimensionless", ["Fe_fraction"], None)),
         'c':("c", ("code_velocity", [""], None)),
         'acc':("acc", ("code_velocity / code_time", [""], None)),
-        'accg':("accg", ("code_velocity / code_time", [""], None))}
+        'accg':("accg", ("code_velocity / code_time", [""], None)),
+        'smoothlength':('smoothlength', ("code_length", ["smoothing_length"], None))}
 
     def __init__(self, ds, field_list, slice_info = None):
         for field in field_list:
@@ -60,15 +61,19 @@
 
     def setup_gas_particle_fields(self, ptype):
 
-        def _smoothing_length(field, data):
-            # For now, we hardcode num_neighbors.  We should make this configurable
-            # in the future.
-            num_neighbors = 64
-            fn, = add_nearest_neighbor_field(ptype, "particle_position", self, num_neighbors)
-            return data[ptype, 'nearest_neighbor_distance_%d' % num_neighbors]
+        num_neighbors = 65
+        fn, = add_nearest_neighbor_field(ptype, "particle_position", self, num_neighbors)
+        def _func():
+            def _smoothing_length(field, data):
+                # For now, we hardcode num_neighbors.  We should make this configurable
+                # in the future.
+                rv = data[ptype, 'nearest_neighbor_distance_%d' % num_neighbors]
+                #np.maximum(rv, 0.5*data[ptype, "Epsilon"], rv)
+                return rv
+            return _smoothing_length
 
         self.add_field(
             (ptype, "smoothing_length"),
-            function=_smoothing_length,
+            function=_func(),
             particle_type=True,
             units="code_length")

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/fd6f87791a62/
Changeset:   fd6f87791a62
Branch:      yt
User:        brittonsmith
Date:        2015-09-25 14:29:00+00:00
Summary:     Changing function names from to_dataset to save_as_dataset and to_yt_dataset to save_as_ytdata.
Affected #:  5 files

diff -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 -r fd6f87791a623ba21b513b9b6bb203a363697aca yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -32,7 +32,7 @@
 
 from yt.data_objects.particle_io import particle_handler_registry
 from yt.frontends.ytdata.utilities import \
-    to_yt_dataset
+    save_as_ytdata
 from yt.units.unit_object import UnitParseError
 from yt.utilities.exceptions import \
     YTUnitConversionError, \
@@ -470,7 +470,7 @@
         df = pd.DataFrame(data)
         return df
 
-    def to_dataset(self, filename=None, fields=None):
+    def save_as_dataset(self, filename=None, fields=None):
         r"""Export a data object to a reloadable yt dataset.
 
         This function will take a data object and output a dataset 
@@ -498,10 +498,10 @@
         --------
 
         >>> dd = ds.all_data()
-        >>> fn1 = dd.to_dataset(["density", "temperature"])
+        >>> fn1 = dd.save_as_dataset(["density", "temperature"])
         >>> ds1 = yt.load(fn1)
         >>> dd["velocity_magnitude"]
-        >>> fn2 = dd.to_dataset()
+        >>> fn2 = dd.save_as_dataset()
         >>> ds2 = yt.load(fn2)
         """
 
@@ -567,8 +567,8 @@
         extra_attrs["data_type"] = "yt_data_container"
         extra_attrs["container_type"] = self._type_name
         extra_attrs["dimensionality"] = self._dimensionality
-        to_yt_dataset(self.ds, filename, data, field_types=ftypes,
-                      extra_attrs=extra_attrs)
+        save_as_ytdata(self.ds, filename, data, field_types=ftypes,
+                       extra_attrs=extra_attrs)
 
         return filename
         

diff -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 -r fd6f87791a623ba21b513b9b6bb203a363697aca yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -17,7 +17,7 @@
 import numpy as np
 
 from yt.frontends.ytdata.utilities import \
-    to_yt_dataset
+    save_as_ytdata
 from yt.funcs import get_output_filename
 from yt.funcs import *
 from yt.units.yt_array import uconcatenate, array_like_field
@@ -951,7 +951,7 @@
         else:
             return np.linspace(mi, ma, n+1)
 
-    def to_dataset(self, filename=None):
+    def save_as_dataset(self, filename=None):
         r"""Export a data object to a reloadable yt dataset.
 
         This function will take a profile and output a dataset
@@ -975,10 +975,10 @@
         --------
 
         >>> dd = ds.all_data()
-        >>> fn1 = dd.to_dataset(["density", "temperature"])
+        >>> fn1 = dd.save_as_dataset(["density", "temperature"])
         >>> ds1 = yt.load(fn1)
         >>> dd["velocity_magnitude"]
-        >>> fn2 = dd.to_dataset()
+        >>> fn2 = dd.save_as_dataset()
         >>> ds2 = yt.load(fn2)
         """
 
@@ -1014,8 +1014,8 @@
 
         extra_attrs["dimensionality"] = dimensionality
         ftypes = dict([(field, "data") for field in data])
-        to_yt_dataset(self.ds, filename, data, field_types=ftypes,
-                      extra_attrs=extra_attrs)
+        save_as_ytdata(self.ds, filename, data, field_types=ftypes,
+                       extra_attrs=extra_attrs)
 
         return filename
 

diff -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 -r fd6f87791a623ba21b513b9b6bb203a363697aca yt/frontends/ytdata/api.py
--- a/yt/frontends/ytdata/api.py
+++ b/yt/frontends/ytdata/api.py
@@ -28,4 +28,4 @@
     YTGridFieldInfo
 
 from .utilities import \
-    to_yt_dataset
+    save_as_ytdata

diff -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 -r fd6f87791a623ba21b513b9b6bb203a363697aca yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -22,8 +22,8 @@
 from yt.units.yt_array import \
     YTArray
 
-def to_yt_dataset(ds, filename, data, field_types=None,
-                  extra_attrs=None):
+def save_as_ytdata(ds, filename, data, field_types=None,
+                   extra_attrs=None):
     r"""Export a set of field arrays to a reloadable yt dataset.
 
     This function can be used to create a yt loadable dataset from a 
@@ -52,7 +52,7 @@
     --------
 
     >>> import yt
-    >>> from yt.frontends.ytdata.api import to_yt_dataset
+    >>> from yt.frontends.ytdata.api import save_as_ytdata
     >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
     >>> sphere = ds.sphere([0.5]*3, (10, "Mpc")
     >>> sphere_density = sphere["density"]
@@ -61,17 +61,17 @@
     >>> data = {}
     >>> data["sphere_density"] = sphere_density
     >>> data["region_density"] = region_density
-    >>> to_yt_dataset(ds, "density_data.h5", data)
+    >>> save_as_ytdata(ds, "density_data.h5", data)
 
     >>> import yt
-    >>> from yt.frontends.ytdata.api import to_yt_dataset
+    >>> from yt.frontends.ytdata.api import save_as_ytdata
     >>> from yt.units.yt_array import YTArray, YTQuantity
     >>> data = {"density": YTArray(np.random.random(10), "g/cm**3"),
     ...         "temperature": YTArray(np.random.random(10), "K")}
     >>> ds_data = {"domain_left_edge": YTArray(np.zeros(3), "cm"),
     ...            "domain_right_edge": YTArray(np.ones(3), "cm"),
     ...            "current_time": YTQuantity(10, "Myr")}
-    >>> to_yt_dataset(ds_data, "random_data.h5", data)
+    >>> save_as_ytdata(ds_data, "random_data.h5", data)
     
     """
 

diff -r 40163fdcfd13d122f163b4029f4d3f147fbe89b5 -r fd6f87791a623ba21b513b9b6bb203a363697aca yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -14,7 +14,7 @@
 #-----------------------------------------------------------------------------
 
 from yt.frontends.ytdata.utilities import \
-    to_yt_dataset
+    save_as_ytdata
 from yt.funcs import \
     get_output_filename, \
     mylog
@@ -383,7 +383,7 @@
                                  geometry=self.ds.geometry,
                                  nprocs=nprocs)
 
-    def to_dataset(self, filename=None, fields=None):
+    def save_as_dataset(self, filename=None, fields=None):
         r"""Export a fixed resolution buffer to a reloadable yt dataset.
 
         This function will take a fixed resolution buffer and output a 
@@ -413,7 +413,7 @@
         >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
         >>> proj = ds.proj("density", "x", weight_field="density")
         >>> frb = proj.to_frb(1.0, (800, 800))
-        >>> fn = frb.to_dataset(fields=["density"])
+        >>> fn = frb.save_as_dataset(fields=["density"])
         >>> ds2 = yt.load(fn)
         """
 
@@ -440,8 +440,8 @@
         extra_attrs["data_type"] = "yt_frb"
         extra_attrs["container_type"] = self.data_source._type_name
         extra_attrs["dimensionality"] = self.data_source._dimensionality
-        to_yt_dataset(self.ds, filename, data, field_types=ftypes,
-                      extra_attrs=extra_attrs)
+        save_as_ytdata(self.ds, filename, data, field_types=ftypes,
+                       extra_attrs=extra_attrs)
 
         return filename
 


https://bitbucket.org/yt_analysis/yt/commits/22398ff08091/
Changeset:   22398ff08091
Branch:      yt
User:        brittonsmith
Date:        2015-09-25 14:34:12+00:00
Summary:     Particles fields retain their exact type name instead of type_particles.  This removes the all union that would combine grid cells and particles.
Affected #:  1 file

diff -r fd6f87791a623ba21b513b9b6bb203a363697aca -r 22398ff080918d6335a465b85851fadbfea59dd2 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -532,7 +532,7 @@
             elif self.ds.field_info[field].particle_type:
                 if field[0] not in ptypes:
                     ptypes.append(field[0])
-                ftypes[field] = "%s_particles" % field[0]
+                ftypes[field] = field[0]
                 need_particle_positions = True
             else:
                 ftypes[field] = "grid"
@@ -547,7 +547,7 @@
                     p_field = (ptype, "particle_position_%s" % ax)
                     if p_field in self.ds.field_info and p_field not in data:
                         data_fields.append(field)
-                        ftypes[p_field] = "%s_particles" % p_field[0]
+                        ftypes[p_field] = p_field[0]
                         data[p_field] = self[p_field]
         if need_grid_positions:
             for ax in "xyz":


https://bitbucket.org/yt_analysis/yt/commits/25069b04ecd3/
Changeset:   25069b04ecd3
Branch:      yt
User:        brittonsmith
Date:        2015-09-25 17:57:27+00:00
Summary:     FRBs now use ds.data instead of ds.frb.
Affected #:  1 file

diff -r 22398ff080918d6335a465b85851fadbfea59dd2 -r 25069b04ecd337ee81362ecb9d320de90c931616 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -235,9 +235,7 @@
 
     def __init__(self, filename):
         Dataset.__init__(self, filename, self._dataset_type)
-
-        if self.data_type == "yt_frb":
-            self.frb = self.index.grids[0]
+        self.data = self.index.grids[0]
 
     def _parse_parameter_file(self):
         self.refine_by = 2
@@ -497,10 +495,6 @@
     default_fluid_type = "data"
     fluid_types = ("data", "gas")
 
-    def __init__(self, filename):
-        super(YTProfileDataset, self).__init__(filename)
-        self.data = self.index.grids[0]
-
     def _parse_parameter_file(self):
         self.refine_by = 2
         self.unique_identifier = time.time()


https://bitbucket.org/yt_analysis/yt/commits/f21cf5ab687b/
Changeset:   f21cf5ab687b
Branch:      yt
User:        brittonsmith
Date:        2015-09-25 18:06:41+00:00
Summary:     Removing some imports.
Affected #:  1 file

diff -r 25069b04ecd337ee81362ecb9d320de90c931616 -r f21cf5ab687b312e287d7c9477967de08aabadd0 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -18,11 +18,9 @@
 import numpy as np
 
 from yt.extern.six import \
-    u, b, iteritems
+    u
 from yt.funcs import \
     mylog
-from yt.geometry.oct_container import \
-    _ORDER_MAX
 from yt.utilities.exceptions import \
     YTDomainOverflow
 from yt.utilities.io_handler import \


https://bitbucket.org/yt_analysis/yt/commits/9a3299116a29/
Changeset:   9a3299116a29
Branch:      yt
User:        brittonsmith
Date:        2015-09-25 18:25:38+00:00
Summary:     Removing warning about unequal sized data.
Affected #:  1 file

diff -r f21cf5ab687b312e287d7c9477967de08aabadd0 -r 9a3299116a29468dc9d7e72b9f1f48b3b07ade47 yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -125,12 +125,7 @@
         else:
             field_name = field
         dataset = _yt_array_hdf5(fh[field_type], field_name, data[field])
-        if "num_elements" in fh[field_type].attrs:
-            if fh[field_type].attrs["num_elements"] != data[field].size:
-                mylog.warn(
-                    "Datasets in %s group have different sizes." % fh[field_type] +
-                    "  This will probably not work right.")
-        else:
+        if "num_elements" not in fh[field_type].attrs:
             fh[field_type].attrs["num_elements"] = data[field].size
     fh.close()
 


https://bitbucket.org/yt_analysis/yt/commits/d85a33193637/
Changeset:   d85a33193637
Branch:      yt
User:        brittonsmith
Date:        2015-09-25 19:29:47+00:00
Summary:     Adding non-spatial dataset and some more refactoring.
Affected #:  2 files

diff -r 9a3299116a29468dc9d7e72b9f1f48b3b07ade47 -r d85a331936379fb5fa7313be76ab00f5a28f6f0e yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -111,7 +111,7 @@
             data_type = f.attrs.get("data_type", None)
             if data_type is None:
                 return False
-            if data_type in ["yt_light_ray", "yt_array_data"]:
+            if data_type in ["yt_light_ray"]:
                 return True
             if data_type == "yt_data_container" and \
               f.attrs.get("container_type", None) not in \
@@ -274,9 +274,6 @@
             self.domain_dimensions = \
               np.concatenate([self.ActiveDimensions, [1]])
 
-    def __repr__(self):
-        return "ytGrid: %s" % self.parameter_filename
-
     def create_field_info(self):
         self.field_info = self._field_info_class(self, self.field_list)
         for ftype, field in self.field_list:
@@ -487,10 +484,10 @@
             fields_to_read)
         return fields_to_return, fields_to_generate
 
-class YTProfileDataset(YTGridDataset):
+class YTNonspatialDataset(YTGridDataset):
     _index_class = YTNonspatialHierarchy
     _field_info_class = YTGridFieldInfo
-    _dataset_type = 'ytprofilehdf5'
+    _dataset_type = 'ytnonspatialhdf5'
     geometry = "cartesian"
     default_fluid_type = "data"
     fluid_types = ("data", "gas")
@@ -508,6 +505,23 @@
         self.particle_types = self.particle_types_raw
 
     def _set_derived_attrs(self):
+        pass
+
+    def _setup_classes(self):
+        # We don't allow geometric selection for non-spatial datasets
+        pass
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if not args[0].endswith(".h5"): return False
+        with h5py.File(args[0], "r") as f:
+            data_type = f.attrs.get("data_type", None)
+            if data_type == "yt_array_data":
+                return True
+        return False
+
+class YTProfileDataset(YTNonspatialDataset):
+    def _set_derived_attrs(self):
         self.base_domain_left_edge = self.domain_left_edge
         self.base_domain_right_edge = self.domain_right_edge
         self.base_domain_dimensions = self.domain_dimensions
@@ -529,10 +543,6 @@
         self.domain_center = 0.5 * (self.domain_right_edge + self.domain_left_edge)
         self.domain_width = self.domain_right_edge - self.domain_left_edge
 
-    def _setup_classes(self):
-        # We don't allow geometric selection for non-spatial datasets
-        pass
-
     @parallel_root_only
     def print_key_parameters(self):
         mylog.info("YTProfileDataset")
@@ -545,13 +555,6 @@
         super(YTProfileDataset, self).print_key_parameters()
         mylog.warn("Geometric data selection not available for this dataset type.")
 
-    def __repr__(self):
-        return "ytProfile: %s" % self.parameter_filename
-
-    def create_field_info(self):
-        self.field_info = self._field_info_class(self, self.field_list)
-        super(YTProfileDataset, self).create_field_info()
-
     @classmethod
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False

diff -r 9a3299116a29468dc9d7e72b9f1f48b3b07ade47 -r d85a331936379fb5fa7313be76ab00f5a28f6f0e yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -28,8 +28,8 @@
 from yt.utilities.lib.geometry_utils import \
     compute_morton
 
-class IOHandlerYTProfileHDF5(BaseIOHandler):
-    _dataset_type = "ytprofilehdf5"
+class IOHandlerYTNonspatialhdf5(BaseIOHandler):
+    _dataset_type = "ytnonspatialhdf5"
     _base = slice(None)
     _field_dtype = "float64"
 


https://bitbucket.org/yt_analysis/yt/commits/abf03fc01434/
Changeset:   abf03fc01434
Branch:      yt
User:        brittonsmith
Date:        2015-09-26 11:47:36+00:00
Summary:     Updating header string.
Affected #:  1 file

diff -r d85a331936379fb5fa7313be76ab00f5a28f6f0e -r abf03fc0143400a832f88bd3b43cd08eb8dcf0ff yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -1,7 +1,7 @@
 """
-Generalized Enzo output objects, both static and time-series.
+Dataset and related data structures.
 
-Presumably at some point EnzoRun will be absorbed into here.
+
 
 
 """


https://bitbucket.org/yt_analysis/yt/commits/136e32d085d6/
Changeset:   136e32d085d6
Branch:      yt
User:        brittonsmith
Date:        2015-09-26 14:05:43+00:00
Summary:     Adding general YTDataset class to get rid of redundant code.
Affected #:  1 file

diff -r abf03fc0143400a832f88bd3b43cd08eb8dcf0ff -r 136e32d085d6cf4f8bde699dd1a9773419d5c72d yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -41,6 +41,8 @@
     GridIndex
 from yt.geometry.particle_geometry_handler import \
     ParticleIndex
+from yt.units.yt_array import \
+    YTQuantity
 from yt.utilities.logger import \
     ytLogger as mylog
 from yt.utilities.cosmology import \
@@ -49,13 +51,46 @@
     YTFieldTypeNotFound
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
-from yt.units.yt_array import \
-    YTQuantity
 
 _grid_data_containers = ["abritrary_grid",
                          "covering_grid",
                          "smoothed_covering_grid"]
 
+class YTDataset(Dataset):
+    def _parse_parameter_file(self):
+        self.refine_by = 2
+        with h5py.File(self.parameter_filename, "r") as f:
+            self.parameters.update(
+                dict((key, f.attrs[key]) for key in f.attrs.keys()))
+            self.num_particles = \
+              dict([(group, f[group].attrs["num_elements"])
+                    for group in f if group != self.default_fluid_type])
+        for attr in ["cosmological_simulation", "current_time", "current_redshift",
+                     "hubble_constant", "omega_matter", "omega_lambda",
+                     "dimensionality", "domain_dimensions", "periodicity",
+                     "domain_left_edge", "domain_right_edge"]:
+            setattr(self, attr, self.parameters.get(attr))
+        self.unique_identifier = \
+          int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+
+    def _set_code_unit_attributes(self):
+        attrs = ('length_unit', 'mass_unit', 'time_unit',
+                 'velocity_unit', 'magnetic_unit')
+        cgs_units = ('cm', 'g', 's', 'cm/s', 'gauss')
+        base_units = np.ones(len(attrs))
+        for unit, attr, cgs_unit in zip(base_units, attrs, cgs_units):
+            if isinstance(unit, string_types):
+                uq = self.quan(1.0, unit)
+            elif isinstance(unit, numeric_type):
+                uq = self.quan(unit, cgs_unit)
+            elif isinstance(unit, YTQuantity):
+                uq = unit
+            elif isinstance(unit, tuple):
+                uq = self.quan(unit[0], unit[1])
+            else:
+                raise RuntimeError("%s (%s) is invalid." % (attr, unit))
+            setattr(self, attr, uq)
+
 class YTDataHDF5File(ParticleFile):
     def __init__(self, ds, io, filename, file_id):
         with h5py.File(filename, "r") as f:
@@ -64,7 +99,7 @@
 
         super(YTDataHDF5File, self).__init__(ds, io, filename, file_id)
 
-class YTDataContainerDataset(Dataset):
+class YTDataContainerDataset(YTDataset):
     _index_class = ParticleIndex
     _file_class = YTDataHDF5File
     _field_info_class = YTDataContainerFieldInfo
@@ -75,34 +110,16 @@
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         super(YTDataContainerDataset, self).__init__(filename, dataset_type,
-                                            units_override=units_override)
+            units_override=units_override)
 
     def _parse_parameter_file(self):
-        with h5py.File(self.parameter_filename, "r") as f:
-            hvals = dict((key, f.attrs[key]) for key in f.attrs.keys())
-            self.particle_types_raw = tuple(f.keys())
+        super(YTDataContainerDataset, self)._parse_parameter_file()
+        self.particle_types_raw = tuple(self.num_particles.keys())
         self.particle_types = self.particle_types_raw
-        self.refine_by = 2
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        prefix = ".".join(self.parameter_filename.rsplit(".", 2)[:-2])
         self.filename_template = self.parameter_filename
         self.file_count = 1
-        for attr in ["cosmological_simulation", "current_time", "current_redshift",
-                     "hubble_constant", "omega_matter", "omega_lambda",
-                     "dimensionality", "domain_left_edge", "domain_right_edge"]:
-            setattr(self, attr, hvals[attr])
-        self.periodicity = (True, True, True)
-
         nz = 1 << self.over_refine_factor
         self.domain_dimensions = np.ones(3, "int32") * nz
-        self.parameters.update(hvals)
-
-    def _set_code_unit_attributes(self):
-        self.length_unit = self.quan(1.0, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.velocity_unit = self.quan(1.0, "cm / s")
-        self.time_unit = self.quan(1.0, "s")
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
@@ -225,7 +242,7 @@
                     self.ds.field_units[field_name] = \
                       f[group][field].attrs["units"]
 
-class YTGridDataset(Dataset):
+class YTGridDataset(YTDataset):
     _index_class = YTGridHierarchy
     _field_info_class = YTGridFieldInfo
     _dataset_type = 'ytgridhdf5'
@@ -234,18 +251,12 @@
     fluid_types = ("grid", "gas", "deposit", "index")
 
     def __init__(self, filename):
-        Dataset.__init__(self, filename, self._dataset_type)
+        super(YTGridDataset, self).__init__(filename, self._dataset_type)
         self.data = self.index.grids[0]
 
     def _parse_parameter_file(self):
-        self.refine_by = 2
-        self.unique_identifier = time.time()
-        with h5py.File(self.parameter_filename, "r") as f:
-            for attr, value in f.attrs.items():
-                setattr(self, attr, value)
-            self.num_particles = \
-              dict([(group, f[group].attrs["num_elements"])
-                    for group in f if group != self.default_fluid_type])
+        super(YTGridDataset, self)._parse_parameter_file()
+        self.num_particles.pop(self.default_fluid_type, None)
         self.particle_types_raw = tuple(self.num_particles.keys())
         self.particle_types = self.particle_types_raw
 
@@ -283,24 +294,6 @@
                     (self.default_fluid_type, field))
         super(YTGridDataset, self).create_field_info()
 
-    def _set_code_unit_attributes(self):
-        attrs = ('length_unit', 'mass_unit', 'time_unit',
-                 'velocity_unit', 'magnetic_unit')
-        cgs_units = ('cm', 'g', 's', 'cm/s', 'gauss')
-        base_units = np.ones(len(attrs))
-        for unit, attr, cgs_unit in zip(base_units, attrs, cgs_units):
-            if isinstance(unit, string_types):
-                uq = self.quan(1.0, unit)
-            elif isinstance(unit, numeric_type):
-                uq = self.quan(unit, cgs_unit)
-            elif isinstance(unit, YTQuantity):
-                uq = unit
-            elif isinstance(unit, tuple):
-                uq = self.quan(unit[0], unit[1])
-            else:
-                raise RuntimeError("%s (%s) is invalid." % (attr, unit))
-            setattr(self, attr, uq)
-
     @classmethod
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
@@ -493,14 +486,8 @@
     fluid_types = ("data", "gas")
 
     def _parse_parameter_file(self):
-        self.refine_by = 2
-        self.unique_identifier = time.time()
-        with h5py.File(self.parameter_filename, "r") as f:
-            for attr, value in f.attrs.items():
-                setattr(self, attr, value)
-            self.num_particles = \
-              dict([(group, f[group].attrs["num_elements"])
-                    for group in f if group != self.default_fluid_type])
+        super(YTGridDataset, self)._parse_parameter_file()
+        self.num_particles.pop(self.default_fluid_type, None)
         self.particle_types_raw = tuple(self.num_particles.keys())
         self.particle_types = self.particle_types_raw
 
@@ -521,27 +508,39 @@
         return False
 
 class YTProfileDataset(YTNonspatialDataset):
-    def _set_derived_attrs(self):
+    def _parse_parameter_file(self):
+        super(YTGridDataset, self)._parse_parameter_file()
+        for a in ["profile_dimensions"] + \
+          ["%s_%s" % (ax, attr)
+           for ax in "xyz"[:self.dimensionality]
+           for attr in ["log"]]:
+            setattr(self, a, self.parameters[a])
+
         self.base_domain_left_edge = self.domain_left_edge
         self.base_domain_right_edge = self.domain_right_edge
         self.base_domain_dimensions = self.domain_dimensions
 
         self.domain_dimensions = np.ones(3, dtype="int")
-        self.domain_dimensions[:self.dimensionality] = self.profile_dimensions
+        self.domain_dimensions[:self.dimensionality] = \
+          self.profile_dimensions
         self.domain_left_edge = np.zeros(3)
         self.domain_right_edge = np.ones(3)
         for i, ax in enumerate("xyz"[:self.dimensionality]):
             range_name = "%s_range" % ax
-            my_edge = getattr(self, range_name)
+            my_range = self.parameters[range_name]
             if getattr(self, "%s_log" % ax, False):
-                my_edge = np.log10(my_edge)
-            self.domain_left_edge[i] = my_edge[0]
-            self.domain_right_edge[i] = my_edge[1]
+                my_range = np.log10(my_range)
+            self.domain_left_edge[i] = my_range[0]
+            self.domain_right_edge[i] = my_range[1]
             setattr(self, range_name,
-                    self.arr(getattr(self, range_name),
-                             getattr(self, range_name+"_units")))
-        self.domain_center = 0.5 * (self.domain_right_edge + self.domain_left_edge)
-        self.domain_width = self.domain_right_edge - self.domain_left_edge
+                    self.arr(self.parameters[range_name],
+                             self.parameters[range_name+"_units"]))
+
+    def _set_derived_attrs(self):
+        self.domain_center = 0.5 * (self.domain_right_edge +
+                                    self.domain_left_edge)
+        self.domain_width = self.domain_right_edge - \
+          self.domain_left_edge
 
     @parallel_root_only
     def print_key_parameters(self):


https://bitbucket.org/yt_analysis/yt/commits/9d13001b0fea/
Changeset:   9d13001b0fea
Branch:      yt
User:        brittonsmith
Date:        2015-09-26 14:19:50+00:00
Summary:     Fix some bugs from my refactoring.
Affected #:  1 file

diff -r 136e32d085d6cf4f8bde699dd1a9773419d5c72d -r 9d13001b0fea07209a015cfdb64861b13954b825 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -68,7 +68,8 @@
         for attr in ["cosmological_simulation", "current_time", "current_redshift",
                      "hubble_constant", "omega_matter", "omega_lambda",
                      "dimensionality", "domain_dimensions", "periodicity",
-                     "domain_left_edge", "domain_right_edge"]:
+                     "domain_left_edge", "domain_right_edge",
+                     "container_type", "data_type"]:
             setattr(self, attr, self.parameters.get(attr))
         self.unique_identifier = \
           int(os.stat(self.parameter_filename)[stat.ST_CTIME])
@@ -266,11 +267,12 @@
         self.base_domain_dimensions = self.domain_dimensions
         if self.container_type in _grid_data_containers:
             dx = (self.domain_right_edge - self.domain_left_edge) / \
-              (self.domain_dimensions * self.refine_by**self.level)
-            self.domain_left_edge = self.left_edge
+              (self.domain_dimensions *
+               self.refine_by**self.parameters["level"])
+            self.domain_left_edge = self.parameters["left_edge"]
             self.domain_right_edge = self.domain_left_edge + \
-              self.ActiveDimensions * dx
-            self.domain_dimensions = self.ActiveDimensions
+              self.parameters["ActiveDimensions"] * dx
+            self.domain_dimensions = self.parameters["ActiveDimensions"]
             self.periodicity = \
               np.abs(self.domain_left_edge -
                      self.base_domain_left_edge) < 0.5 * dx
@@ -279,11 +281,11 @@
                    self.base_domain_right_edge) < 0.5 * dx
         elif self.data_type == "yt_frb":
             self.domain_left_edge = \
-              np.concatenate([self.left_edge, [0.]])
+              np.concatenate([self.parameters["left_edge"], [0.]])
             self.domain_right_edge = \
-              np.concatenate([self.right_edge, [1.]])
+              np.concatenate([self.parameters["right_edge"], [1.]])
             self.domain_dimensions = \
-              np.concatenate([self.ActiveDimensions, [1]])
+              np.concatenate([self.parameters["ActiveDimensions"], [1]])
 
     def create_field_info(self):
         self.field_info = self._field_info_class(self, self.field_list)


https://bitbucket.org/yt_analysis/yt/commits/239e35e120e4/
Changeset:   239e35e120e4
Branch:      yt
User:        brittonsmith
Date:        2015-09-26 14:38:58+00:00
Summary:     Abstracting the hierarchy classes.
Affected #:  1 file

diff -r 9d13001b0fea07209a015cfdb64861b13954b825 -r 239e35e120e4752cdfaf5d735ff4cedff2487b48 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -194,15 +194,13 @@
     def Children(self):
         return []
 
-class YTGridHierarchy(GridIndex):
-    grid = YTGrid
-
+class YTDataHierarchy(GridIndex):
     def __init__(self, ds, dataset_type = None):
         self.dataset_type = dataset_type
         self.float_type = 'float64'
         self.dataset = weakref.proxy(ds)
         self.directory = os.getcwd()
-        GridIndex.__init__(self, ds, dataset_type)
+        super(YTDataHierarchy, self).__init__(ds, dataset_type)
 
     def _count_grids(self):
         self.num_grids = 1
@@ -227,11 +225,6 @@
             temp_grids[i] = grid
         self.grids = temp_grids
 
-    def _populate_grid_objects(self):
-        for g in self.grids:
-            g._setup_dx()
-        self.max_level = self.grid_levels.max()
-
     def _detect_output_fields(self):
         self.field_list = []
         self.ds.field_units = self.ds.field_units or {}
@@ -243,6 +236,14 @@
                     self.ds.field_units[field_name] = \
                       f[group][field].attrs["units"]
 
+class YTGridHierarchy(YTDataHierarchy):
+    grid = YTGrid
+
+    def _populate_grid_objects(self):
+        for g in self.grids:
+            g._setup_dx()
+        self.max_level = self.grid_levels.max()
+
 class YTGridDataset(YTDataset):
     _index_class = YTGridHierarchy
     _field_info_class = YTGridFieldInfo
@@ -415,39 +416,9 @@
     def Children(self):
         return []
 
-class YTNonspatialHierarchy(GridIndex):
+class YTNonspatialHierarchy(YTDataHierarchy):
     grid = YTNonspatialGrid
 
-    def __init__(self, ds, dataset_type = None):
-        self.dataset_type = dataset_type
-        self.float_type = 'float64'
-        self.dataset = weakref.proxy(ds)
-        self.directory = os.getcwd()
-        GridIndex.__init__(self, ds, dataset_type)
-
-    def _count_grids(self):
-        self.num_grids = 1
-
-    def _parse_index(self):
-        self.grid_dimensions[:] = self.ds.domain_dimensions
-        self.grid_left_edge[:] = self.ds.domain_left_edge
-        self.grid_right_edge[:] = self.ds.domain_right_edge
-        self.grid_levels[:] = np.zeros(self.num_grids)
-        self.grid_procs = np.zeros(self.num_grids)
-        self.grid_particle_count[:] = sum(self.ds.num_particles.values())
-        self.grids = []
-        for gid in range(self.num_grids):
-            self.grids.append(self.grid(gid, self))
-            self.grids[gid].Level = self.grid_levels[gid, 0]
-        self.max_level = self.grid_levels.max()
-        temp_grids = np.empty(self.num_grids, dtype='object')
-        for i, grid in enumerate(self.grids):
-            grid.filename = self.ds.parameter_filename
-            grid._prepare_grid()
-            grid.proc_num = self.grid_procs[i]
-            temp_grids[i] = grid
-        self.grids = temp_grids
-
     def _populate_grid_objects(self):
         for g in self.grids:
             g._setup_dx()
@@ -456,17 +427,6 @@
             g.ActiveDimensions = self.ds.domain_dimensions
         self.max_level = self.grid_levels.max()
 
-    def _detect_output_fields(self):
-        self.field_list = []
-        self.ds.field_units = self.ds.field_units or {}
-        with h5py.File(self.ds.parameter_filename, "r") as f:
-            for group in f:
-                for field in f[group]:
-                    field_name = (str(group), str(field))
-                    self.field_list.append(field_name)
-                    self.ds.field_units[field_name] = \
-                      f[group][field].attrs["units"]
-
     def _read_fluid_fields(self, fields, dobj, chunk = None):
         if len(fields) == 0: return {}, []
         fields_to_read, fields_to_generate = self._split_fields(fields)


https://bitbucket.org/yt_analysis/yt/commits/609254e8ac7e/
Changeset:   609254e8ac7e
Branch:      yt
User:        brittonsmith
Date:        2015-09-26 14:45:16+00:00
Summary:     No need to open the file again to count particles.
Affected #:  1 file

diff -r 239e35e120e4752cdfaf5d735ff4cedff2487b48 -r 609254e8ac7e83a84fc0bc8e01c3b2a0e6cc7874 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -256,9 +256,7 @@
         return morton
 
     def _count_particles(self, data_file):
-        with h5py.File(data_file.filename, "r") as f:
-            return dict([(group, f[group].attrs["num_elements"])
-                         for group in f])
+        return self.ds.num_particles
 
     def _identify_fields(self, data_file):
         fields = []


https://bitbucket.org/yt_analysis/yt/commits/0d1578fef055/
Changeset:   0d1578fef055
Branch:      yt
User:        brittonsmith
Date:        2015-09-26 14:55:20+00:00
Summary:     Adding save_as_ytdata to primary import.
Affected #:  1 file

diff -r 609254e8ac7e83a84fc0bc8e01c3b2a0e6cc7874 -r 0d1578fef0559413d1a59b1559989b0decd67c38 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -138,6 +138,9 @@
     load_particles, load_hexahedral_mesh, load_octree, \
     hexahedral_connectivity
 
+from yt.frontends.ytdata.api import \
+    save_as_ytdata
+
 # For backwards compatibility
 GadgetDataset = frontends.gadget.GadgetDataset
 GadgetStaticOutput = deprecated_class(GadgetDataset)


https://bitbucket.org/yt_analysis/yt/commits/2774fcdde642/
Changeset:   2774fcdde642
Branch:      yt
User:        brittonsmith
Date:        2015-09-26 15:15:40+00:00
Summary:     Changing save_as_ytdata to save_as_dataset.
Affected #:  6 files

diff -r 0d1578fef0559413d1a59b1559989b0decd67c38 -r 2774fcdde6429218b8103fb029589fe838062c01 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -139,7 +139,7 @@
     hexahedral_connectivity
 
 from yt.frontends.ytdata.api import \
-    save_as_ytdata
+    save_as_dataset
 
 # For backwards compatibility
 GadgetDataset = frontends.gadget.GadgetDataset

diff -r 0d1578fef0559413d1a59b1559989b0decd67c38 -r 2774fcdde6429218b8103fb029589fe838062c01 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -32,7 +32,7 @@
 
 from yt.data_objects.particle_io import particle_handler_registry
 from yt.frontends.ytdata.utilities import \
-    save_as_ytdata
+    save_as_dataset
 from yt.units.unit_object import UnitParseError
 from yt.utilities.exceptions import \
     YTUnitConversionError, \
@@ -567,8 +567,8 @@
         extra_attrs["data_type"] = "yt_data_container"
         extra_attrs["container_type"] = self._type_name
         extra_attrs["dimensionality"] = self._dimensionality
-        save_as_ytdata(self.ds, filename, data, field_types=ftypes,
-                       extra_attrs=extra_attrs)
+        save_as_dataset(self.ds, filename, data, field_types=ftypes,
+                        extra_attrs=extra_attrs)
 
         return filename
         

diff -r 0d1578fef0559413d1a59b1559989b0decd67c38 -r 2774fcdde6429218b8103fb029589fe838062c01 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -17,7 +17,7 @@
 import numpy as np
 
 from yt.frontends.ytdata.utilities import \
-    save_as_ytdata
+    save_as_dataset
 from yt.funcs import get_output_filename
 from yt.funcs import *
 from yt.units.yt_array import uconcatenate, array_like_field
@@ -1014,8 +1014,8 @@
 
         extra_attrs["dimensionality"] = dimensionality
         ftypes = dict([(field, "data") for field in data])
-        save_as_ytdata(self.ds, filename, data, field_types=ftypes,
-                       extra_attrs=extra_attrs)
+        save_as_dataset(self.ds, filename, data, field_types=ftypes,
+                        extra_attrs=extra_attrs)
 
         return filename
 

diff -r 0d1578fef0559413d1a59b1559989b0decd67c38 -r 2774fcdde6429218b8103fb029589fe838062c01 yt/frontends/ytdata/api.py
--- a/yt/frontends/ytdata/api.py
+++ b/yt/frontends/ytdata/api.py
@@ -28,4 +28,4 @@
     YTGridFieldInfo
 
 from .utilities import \
-    save_as_ytdata
+    save_as_dataset

diff -r 0d1578fef0559413d1a59b1559989b0decd67c38 -r 2774fcdde6429218b8103fb029589fe838062c01 yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -22,8 +22,8 @@
 from yt.units.yt_array import \
     YTArray
 
-def save_as_ytdata(ds, filename, data, field_types=None,
-                   extra_attrs=None):
+def save_as_dataset(ds, filename, data, field_types=None,
+                    extra_attrs=None):
     r"""Export a set of field arrays to a reloadable yt dataset.
 
     This function can be used to create a yt loadable dataset from a 
@@ -52,7 +52,7 @@
     --------
 
     >>> import yt
-    >>> from yt.frontends.ytdata.api import save_as_ytdata
+    >>> from yt.frontends.ytdata.api import save_as_dataset
     >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
     >>> sphere = ds.sphere([0.5]*3, (10, "Mpc")
     >>> sphere_density = sphere["density"]
@@ -61,17 +61,17 @@
     >>> data = {}
     >>> data["sphere_density"] = sphere_density
     >>> data["region_density"] = region_density
-    >>> save_as_ytdata(ds, "density_data.h5", data)
+    >>> save_as_dataset(ds, "density_data.h5", data)
 
     >>> import yt
-    >>> from yt.frontends.ytdata.api import save_as_ytdata
+    >>> from yt.frontends.ytdata.api import save_as_dataset
     >>> from yt.units.yt_array import YTArray, YTQuantity
     >>> data = {"density": YTArray(np.random.random(10), "g/cm**3"),
     ...         "temperature": YTArray(np.random.random(10), "K")}
     >>> ds_data = {"domain_left_edge": YTArray(np.zeros(3), "cm"),
     ...            "domain_right_edge": YTArray(np.ones(3), "cm"),
     ...            "current_time": YTQuantity(10, "Myr")}
-    >>> save_as_ytdata(ds_data, "random_data.h5", data)
+    >>> save_as_dataset(ds_data, "random_data.h5", data)
     
     """
 

diff -r 0d1578fef0559413d1a59b1559989b0decd67c38 -r 2774fcdde6429218b8103fb029589fe838062c01 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -14,7 +14,7 @@
 #-----------------------------------------------------------------------------
 
 from yt.frontends.ytdata.utilities import \
-    save_as_ytdata
+    save_as_dataset
 from yt.funcs import \
     get_output_filename, \
     mylog
@@ -440,8 +440,8 @@
         extra_attrs["data_type"] = "yt_frb"
         extra_attrs["container_type"] = self.data_source._type_name
         extra_attrs["dimensionality"] = self.data_source._dimensionality
-        save_as_ytdata(self.ds, filename, data, field_types=ftypes,
-                       extra_attrs=extra_attrs)
+        save_as_dataset(self.ds, filename, data, field_types=ftypes,
+                        extra_attrs=extra_attrs)
 
         return filename
 


https://bitbucket.org/yt_analysis/yt/commits/59dde4157232/
Changeset:   59dde4157232
Branch:      yt
User:        brittonsmith
Date:        2015-09-26 15:21:56+00:00
Summary:     Adding imports.
Affected #:  1 file

diff -r 2774fcdde6429218b8103fb029589fe838062c01 -r 59dde41572328e3962721cf3d2fe99f9164b0024 yt/frontends/ytdata/api.py
--- a/yt/frontends/ytdata/api.py
+++ b/yt/frontends/ytdata/api.py
@@ -16,9 +16,14 @@
 
 from .data_structures import \
     YTDataContainerDataset, \
+    YTSpatialPlotDataset, \
     YTGridDataset, \
     YTGridHierarchy, \
-    YTGrid
+    YTGrid, \
+    YTNonspatialDataset, \
+    YTNonspatialHierarchy, \
+    YTNonspatialGrid, \
+    YTProfileDataset
 
 from .io import \
     IOHandlerYTDataContainerHDF5


https://bitbucket.org/yt_analysis/yt/commits/a816434ed309/
Changeset:   a816434ed309
Branch:      yt
User:        brittonsmith
Date:        2015-09-26 15:29:23+00:00
Summary:     Adding profile argument to supply a profile object to PhasePlot as the doctring already advertised.
Affected #:  1 file

diff -r 59dde41572328e3962721cf3d2fe99f9164b0024 -r a816434ed30947084cc3a1eb59a10cea50bdd5a6 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -712,17 +712,18 @@
 
     def __init__(self, data_source, x_field, y_field, z_fields,
                  weight_field="cell_mass", x_bins=128, y_bins=128,
-                 accumulation=False, fractional=False,
+                 accumulation=False, fractional=False, profile=None,
                  fontsize=18, figure_size=8.0):
 
-        profile = create_profile(
-            data_source,
-            [x_field, y_field],
-            ensure_list(z_fields),
-            n_bins=[x_bins, y_bins],
-            weight_field=weight_field,
-            accumulation=accumulation,
-            fractional=fractional)
+        if profile is None:
+            profile = create_profile(
+                data_source,
+                [x_field, y_field],
+                ensure_list(z_fields),
+                n_bins=[x_bins, y_bins],
+                weight_field=weight_field,
+                accumulation=accumulation,
+                fractional=fractional)
 
         type(self)._initialize_instance(self, data_source, profile, fontsize,
                                         figure_size)


https://bitbucket.org/yt_analysis/yt/commits/6eea7c902226/
Changeset:   6eea7c902226
Branch:      yt
User:        brittonsmith
Date:        2015-10-01 20:08:28+00:00
Summary:     Adding profile objects for profile datasets.
Affected #:  2 files

diff -r a816434ed30947084cc3a1eb59a10cea50bdd5a6 -r 6eea7c9022267ee2e2bde6c8115a51e3cdcebfd8 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -982,12 +982,14 @@
         >>> ds2 = yt.load(fn2)
         """
 
-        keyword = "%s_profile" % str(self.ds)
+        keyword = "%s_%s" % (str(self.ds), self.__class__.__name__)
         filename = get_output_filename(filename, keyword, ".h5")
 
         args = ("field", "log")
         extra_attrs = {"data_type": "yt_profile",
-                       "profile_dimensions": self.size}
+                       "profile_dimensions": self.size,
+                       "weight_field": self.weight_field,
+                       "fractional": self.fractional}
         data = {}
         data.update(self.field_data)
         data["weight"] = self.weight
@@ -1178,6 +1180,46 @@
         return ((self.x_bins[0], self.x_bins[-1]),
                 (self.y_bins[0], self.y_bins[-1]))
 
+class ProfileNDFromDataset(ProfileND):
+    """
+    An ND profile object loaded from a ytdata dataset.
+    """
+    def __init__(self, ds):
+        ProfileND.__init__(self, ds.data, ds.parameters["weight_field"])
+        self.fractional = ds.parameters["fractional"]
+        exclude_fields = ["used", "weight"]
+        for ax in "xyz"[:ds.dimensionality]:
+            setattr(self, ax, ds.data[ax])
+            setattr(self, "%s_bins" % ax, ds.data["%s_bins" % ax])
+            setattr(self, "%s_field" % ax,
+                    tuple(ds.parameters["%s_field" % ax]))
+            setattr(self, "%s_log" % ax, ds.parameters["%s_log" % ax])
+            exclude_fields.extend([ax, "%s_bins" % ax,
+                                   ds.parameters["%s_field" % ax][1]])
+        self.weight = ds.data["weight"]
+        self.used = ds.data["used"].d.astype(bool)
+        profile_fields = [f for f in ds.field_list
+                          if f[1] not in exclude_fields]
+        for field in profile_fields:
+            self.field_map[field[1]] = field
+            self.field_data[field] = ds.data[field]
+            self.field_units[field] = ds.data[field].units
+
+class Profile2DFromDataset(ProfileNDFromDataset, Profile2D):
+    """
+    A 2D profile object loaded from a ytdata dataset.
+    """
+
+    def __init(self, ds):
+        ProfileNDFromDataset.__init__(self, ds)
+
+class Profile1DFromDataset(ProfileNDFromDataset, Profile1D):
+    """
+    A 1D profile object loaded from a ytdata dataset.
+    """
+
+    def __init(self, ds):
+        ProfileNDFromDataset.__init__(self, ds)
 
 class ParticleProfile(Profile2D):
     """An object that represents a *deposited* 2D profile. This is like a

diff -r a816434ed30947084cc3a1eb59a10cea50bdd5a6 -r 6eea7c9022267ee2e2bde6c8115a51e3cdcebfd8 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -31,6 +31,9 @@
 
 from yt.data_objects.grid_patch import \
     AMRGridPatch
+from yt.data_objects.profiles import \
+    Profile1DFromDataset, \
+    Profile2DFromDataset
 from yt.data_objects.static_output import \
     Dataset, \
     ParticleFile
@@ -470,8 +473,27 @@
         return False
 
 class YTProfileDataset(YTNonspatialDataset):
+    def __init__(self, filename):
+        super(YTProfileDataset, self).__init__(filename)
+
+    @property
+    def profile(self):
+        if self.dimensionality == 1:
+            return Profile1DFromDataset(self)
+        if self.dimensionality == 2:
+            return Profile2DFromDataset(self)
+        return None
+
     def _parse_parameter_file(self):
         super(YTGridDataset, self)._parse_parameter_file()
+
+        if isinstance(self.parameters["weight_field"], str) and \
+          self.parameters["weight_field"] == "None":
+            self.parameters["weight_field"] = None
+        elif isinstance(self.parameters["weight_field"], np.ndarray):
+            self.parameters["weight_field"] = \
+              tuple(self.parameters["weight_field"])
+
         for a in ["profile_dimensions"] + \
           ["%s_%s" % (ax, attr)
            for ax in "xyz"[:self.dimensionality]
@@ -498,6 +520,16 @@
                     self.arr(self.parameters[range_name],
                              self.parameters[range_name+"_units"]))
 
+            bin_field = "%s_field" % ax
+            if isinstance(self.parameters[bin_field], str) and \
+              self.parameters[bin_field] == "None":
+                self.parameters[bin_field] = None
+            elif isinstance(self.parameters[bin_field], np.ndarray):
+                self.parameters[bin_field] = \
+                  tuple(self.parameters[bin_field])
+            setattr(self, bin_field, self.parameters[bin_field])
+
+
     def _set_derived_attrs(self):
         self.domain_center = 0.5 * (self.domain_right_edge +
                                     self.domain_left_edge)
@@ -510,7 +542,7 @@
         for a in ["dimensionality", "profile_dimensions"] + \
           ["%s_%s" % (ax, attr)
            for ax in "xyz"[:self.dimensionality]
-           for attr in ["range", "log"]]:
+           for attr in ["field", "range", "log"]]:
             v = getattr(self, a)
             mylog.info("Parameters: %-25s = %s", a, v)
         super(YTProfileDataset, self).print_key_parameters()


https://bitbucket.org/yt_analysis/yt/commits/b131863943ad/
Changeset:   b131863943ad
Branch:      yt
User:        brittonsmith
Date:        2015-10-01 20:26:59+00:00
Summary:     ProfilePlot and PhasePlot now accept YTProfileDatasets.
Affected #:  1 file

diff -r 6eea7c9022267ee2e2bde6c8115a51e3cdcebfd8 -r b131863943ad314009eaede74939f531648f7f82 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -32,6 +32,8 @@
     validate_plot, invalidate_plot
 from yt.data_objects.profiles import \
     create_profile
+from yt.frontends.ytdata.data_structures import \
+    YTProfileDataset
 from yt.utilities.exceptions import \
     YTNotInsideNotebook
 from yt.utilities.logger import ytLogger as mylog
@@ -204,13 +206,16 @@
         else:
             logs = {x_field:x_log}
 
-        profiles = [create_profile(data_source, [x_field],
-                                   n_bins=[n_bins],
-                                   fields=ensure_list(y_fields),
-                                   weight_field=weight_field,
-                                   accumulation=accumulation,
-                                   fractional=fractional,
-                                   logs=logs)]
+        if isinstance(data_source.ds, YTProfileDataset):
+            profiles = [data_source.ds.profile]
+        else:
+            profiles = [create_profile(data_source, [x_field],
+                                       n_bins=[n_bins],
+                                       fields=ensure_list(y_fields),
+                                       weight_field=weight_field,
+                                       accumulation=accumulation,
+                                       fractional=fractional,
+                                       logs=logs)]
 
         if plot_spec is None:
             plot_spec = [dict() for p in profiles]
@@ -716,14 +721,17 @@
                  fontsize=18, figure_size=8.0):
 
         if profile is None:
-            profile = create_profile(
-                data_source,
-                [x_field, y_field],
-                ensure_list(z_fields),
-                n_bins=[x_bins, y_bins],
-                weight_field=weight_field,
-                accumulation=accumulation,
-                fractional=fractional)
+            if isinstance(data_source.ds, YTProfileDataset):
+                profile = data_source.ds.profile
+            else:
+                profile = create_profile(
+                    data_source,
+                    [x_field, y_field],
+                    ensure_list(z_fields),
+                    n_bins=[x_bins, y_bins],
+                    weight_field=weight_field,
+                    accumulation=accumulation,
+                    fractional=fractional)
 
         type(self)._initialize_instance(self, data_source, profile, fontsize,
                                         figure_size)


https://bitbucket.org/yt_analysis/yt/commits/9de4925af097/
Changeset:   9de4925af097
Branch:      yt
User:        brittonsmith
Date:        2015-10-01 20:57:39+00:00
Summary:     Fixing up docstrings.
Affected #:  4 files

diff -r b131863943ad314009eaede74939f531648f7f82 -r 9de4925af0977e3824c2de257e03b9b14548b739 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -480,11 +480,11 @@
 
         Parameters
         ----------
-        filename : str
+        filename : str, optional
             The name of the file to be written.  If None, the name 
             will be a combination of the original dataset and the type 
             of data container.
-        fields : list of strings or tuples, default None
+        fields : list of strings or tuples, optional
             If this is supplied, it is the list of fields to be exported into
             the data frame.  If not supplied, whatever fields presently exist
             will be used.
@@ -497,12 +497,14 @@
         Examples
         --------
 
-        >>> dd = ds.all_data()
-        >>> fn1 = dd.save_as_dataset(["density", "temperature"])
-        >>> ds1 = yt.load(fn1)
-        >>> dd["velocity_magnitude"]
-        >>> fn2 = dd.save_as_dataset()
-        >>> ds2 = yt.load(fn2)
+        >>> import yt
+        >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+        >>> sp = ds.sphere(ds.domain_center, (10, "Mpc"))
+        >>> fn = sp.save_as_dataset(["density", "temperature"])
+        >>> sphere_ds = yt.load(fn)
+        >>> ad = sphere_ds.all_data()
+        >>> print ad["temperature"]
+
         """
 
         keyword = "%s_%s" % (str(self.ds), self._type_name)

diff -r b131863943ad314009eaede74939f531648f7f82 -r 9de4925af0977e3824c2de257e03b9b14548b739 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -952,7 +952,7 @@
             return np.linspace(mi, ma, n+1)
 
     def save_as_dataset(self, filename=None):
-        r"""Export a data object to a reloadable yt dataset.
+        r"""Export a profile to a reloadable yt dataset.
 
         This function will take a profile and output a dataset
         containing either the fields presently existing or fields
@@ -961,10 +961,10 @@
 
         Parameters
         ----------
-        filename : str
+        filename : str, optional
             The name of the file to be written.  If None, the name
             will be a combination of the original dataset plus
-            "profile".
+            the type of object, e.g., Profile1D.
 
         Returns
         -------
@@ -974,12 +974,20 @@
         Examples
         --------
 
-        >>> dd = ds.all_data()
-        >>> fn1 = dd.save_as_dataset(["density", "temperature"])
-        >>> ds1 = yt.load(fn1)
-        >>> dd["velocity_magnitude"]
-        >>> fn2 = dd.save_as_dataset()
-        >>> ds2 = yt.load(fn2)
+        >>> import yt
+        >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+        >>> ad = ds.all_data()
+        >>> profile = yt.create_profile(ad, ["density", "temperature"],
+        ...                            "cell_mass", weight_field=None,
+        ...                             n_bins=(128, 128))
+        >>> fn = profile.save_as_dataset()
+        >>> prof_ds = yt.load(fn)
+        >>> print prof_ds.data["cell_mass"]
+        >>> print prof_ds.data["x"]
+        >>> print prof_ds.data["density"]
+        >>> p = yt.PhasePlot(prof_ds.data, "density", "temperature",
+        ...                  "cell_mass", weight_field=None)
+
         """
 
         keyword = "%s_%s" % (str(self.ds), self.__class__.__name__)

diff -r b131863943ad314009eaede74939f531648f7f82 -r 9de4925af0977e3824c2de257e03b9b14548b739 yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -40,7 +40,12 @@
         The name of the file to be written.
     data : dict
         A dictionary of field arrays to be saved.
-    extra_attrs: dict
+    field_types: dict, optional
+        A dictionary denoting the group name to which each field is to
+        be saved.  When the resulting dataset is reloaded, this will be
+        the field type for this field.  If not given, "data" will be
+        used.
+    extra_attrs: dict, optional
         A dictionary of additional attributes to be saved.
 
     Returns
@@ -52,7 +57,6 @@
     --------
 
     >>> import yt
-    >>> from yt.frontends.ytdata.api import save_as_dataset
     >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
     >>> sphere = ds.sphere([0.5]*3, (10, "Mpc")
     >>> sphere_density = sphere["density"]
@@ -61,18 +65,20 @@
     >>> data = {}
     >>> data["sphere_density"] = sphere_density
     >>> data["region_density"] = region_density
-    >>> save_as_dataset(ds, "density_data.h5", data)
+    >>> yt.save_as_dataset(ds, "density_data.h5", data)
+    >>> new_ds = yt.load("density_data.h5")
+    >>> print new_ds.data["region_density"]
+    >>> print new_ds.data["sphere_density"]
 
-    >>> import yt
-    >>> from yt.frontends.ytdata.api import save_as_dataset
-    >>> from yt.units.yt_array import YTArray, YTQuantity
-    >>> data = {"density": YTArray(np.random.random(10), "g/cm**3"),
-    ...         "temperature": YTArray(np.random.random(10), "K")}
-    >>> ds_data = {"domain_left_edge": YTArray(np.zeros(3), "cm"),
-    ...            "domain_right_edge": YTArray(np.ones(3), "cm"),
-    ...            "current_time": YTQuantity(10, "Myr")}
-    >>> save_as_dataset(ds_data, "random_data.h5", data)
-    
+    >>> data = {"density": yt.YTArray(np.random.random(10), "g/cm**3"),
+    ...         "temperature": yt.YTArray(np.random.random(10), "K")}
+    >>> ds_data = {"domain_left_edge": yt.YTArray(np.zeros(3), "cm"),
+    ...            "domain_right_edge": yt.YTArray(np.ones(3), "cm"),
+    ...            "current_time": yt.YTQuantity(10, "Myr")}
+    >>> yt.save_as_dataset(ds_data, "random_data.h5", data)
+    >>> new_ds = yt.load("random_data.h5")
+    >>> print new_ds.data["temperature"]
+
     """
 
     mylog.info("Saving field data to yt dataset: %s." % filename)

diff -r b131863943ad314009eaede74939f531648f7f82 -r 9de4925af0977e3824c2de257e03b9b14548b739 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -393,11 +393,11 @@
 
         Parameters
         ----------
-        filename : str
+        filename : str, optional
             The name of the file to be written.  If None, the name 
             will be a combination of the original dataset and the type 
             of data container.
-        fields : list of strings or tuples, default None
+        fields : list of strings or tuples, optional
             If this is supplied, it is the list of fields to be exported into
             the data frame.  If not supplied, whatever fields presently exist
             will be used.
@@ -410,11 +410,14 @@
         Examples
         --------
 
+        >>> import yt
         >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
         >>> proj = ds.proj("density", "x", weight_field="density")
         >>> frb = proj.to_frb(1.0, (800, 800))
         >>> fn = frb.save_as_dataset(fields=["density"])
         >>> ds2 = yt.load(fn)
+        >>> print ds2.data["density"]
+
         """
 
         keyword = "%s_%s_frb" % (str(self.ds), self.data_source._type_name)


https://bitbucket.org/yt_analysis/yt/commits/2bed2a36b739/
Changeset:   2bed2a36b739
Branch:      yt
User:        brittonsmith
Date:        2015-10-01 21:10:31+00:00
Summary:     Adding a few more docstrings and a 3D profile from dataset.
Affected #:  2 files

diff -r 9de4925af0977e3824c2de257e03b9b14548b739 -r 2bed2a36b73928bd5e67bef83a45e0cbfbca46ec yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -1029,6 +1029,31 @@
 
         return filename
 
+class ProfileNDFromDataset(ProfileND):
+    """
+    An ND profile object loaded from a ytdata dataset.
+    """
+    def __init__(self, ds):
+        ProfileND.__init__(self, ds.data, ds.parameters["weight_field"])
+        self.fractional = ds.parameters["fractional"]
+        exclude_fields = ["used", "weight"]
+        for ax in "xyz"[:ds.dimensionality]:
+            setattr(self, ax, ds.data[ax])
+            setattr(self, "%s_bins" % ax, ds.data["%s_bins" % ax])
+            setattr(self, "%s_field" % ax,
+                    tuple(ds.parameters["%s_field" % ax]))
+            setattr(self, "%s_log" % ax, ds.parameters["%s_log" % ax])
+            exclude_fields.extend([ax, "%s_bins" % ax,
+                                   ds.parameters["%s_field" % ax][1]])
+        self.weight = ds.data["weight"]
+        self.used = ds.data["used"].d.astype(bool)
+        profile_fields = [f for f in ds.field_list
+                          if f[1] not in exclude_fields]
+        for field in profile_fields:
+            self.field_map[field[1]] = field
+            self.field_data[field] = ds.data[field]
+            self.field_units[field] = ds.data[field].units
+
 class Profile1D(ProfileND):
     """An object that represents a 1D profile.
 
@@ -1091,6 +1116,14 @@
     def bounds(self):
         return ((self.x_bins[0], self.x_bins[-1]),)
 
+class Profile1DFromDataset(ProfileNDFromDataset, Profile1D):
+    """
+    A 1D profile object loaded from a ytdata dataset.
+    """
+
+    def __init(self, ds):
+        ProfileNDFromDataset.__init__(self, ds)
+
 class Profile2D(ProfileND):
     """An object that represents a 2D profile.
 
@@ -1188,31 +1221,6 @@
         return ((self.x_bins[0], self.x_bins[-1]),
                 (self.y_bins[0], self.y_bins[-1]))
 
-class ProfileNDFromDataset(ProfileND):
-    """
-    An ND profile object loaded from a ytdata dataset.
-    """
-    def __init__(self, ds):
-        ProfileND.__init__(self, ds.data, ds.parameters["weight_field"])
-        self.fractional = ds.parameters["fractional"]
-        exclude_fields = ["used", "weight"]
-        for ax in "xyz"[:ds.dimensionality]:
-            setattr(self, ax, ds.data[ax])
-            setattr(self, "%s_bins" % ax, ds.data["%s_bins" % ax])
-            setattr(self, "%s_field" % ax,
-                    tuple(ds.parameters["%s_field" % ax]))
-            setattr(self, "%s_log" % ax, ds.parameters["%s_log" % ax])
-            exclude_fields.extend([ax, "%s_bins" % ax,
-                                   ds.parameters["%s_field" % ax][1]])
-        self.weight = ds.data["weight"]
-        self.used = ds.data["used"].d.astype(bool)
-        profile_fields = [f for f in ds.field_list
-                          if f[1] not in exclude_fields]
-        for field in profile_fields:
-            self.field_map[field[1]] = field
-            self.field_data[field] = ds.data[field]
-            self.field_units[field] = ds.data[field].units
-
 class Profile2DFromDataset(ProfileNDFromDataset, Profile2D):
     """
     A 2D profile object loaded from a ytdata dataset.
@@ -1221,14 +1229,6 @@
     def __init(self, ds):
         ProfileNDFromDataset.__init__(self, ds)
 
-class Profile1DFromDataset(ProfileNDFromDataset, Profile1D):
-    """
-    A 1D profile object loaded from a ytdata dataset.
-    """
-
-    def __init(self, ds):
-        ProfileNDFromDataset.__init__(self, ds)
-
 class ParticleProfile(Profile2D):
     """An object that represents a *deposited* 2D profile. This is like a
     Profile2D, except that it is intended for particle data. Instead of just
@@ -1474,6 +1474,13 @@
         self.z_bins.convert_to_units(new_unit)
         self.z = 0.5*(self.z_bins[1:]+self.z_bins[:-1])
 
+class Profile3DFromDataset(ProfileNDFromDataset, Profile3D):
+    """
+    A 2D profile object loaded from a ytdata dataset.
+    """
+
+    def __init(self, ds):
+        ProfileNDFromDataset.__init__(self, ds)
 
 def sanitize_field_tuple_keys(input_dict, data_source):
     if input_dict is not None:

diff -r 9de4925af0977e3824c2de257e03b9b14548b739 -r 2bed2a36b73928bd5e67bef83a45e0cbfbca46ec yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -33,7 +33,8 @@
     AMRGridPatch
 from yt.data_objects.profiles import \
     Profile1DFromDataset, \
-    Profile2DFromDataset
+    Profile2DFromDataset, \
+    Profile3DFromDataset
 from yt.data_objects.static_output import \
     Dataset, \
     ParticleFile
@@ -60,6 +61,7 @@
                          "smoothed_covering_grid"]
 
 class YTDataset(Dataset):
+    """Base dataset class for all ytdata datasets."""
     def _parse_parameter_file(self):
         self.refine_by = 2
         with h5py.File(self.parameter_filename, "r") as f:
@@ -104,6 +106,7 @@
         super(YTDataHDF5File, self).__init__(ds, io, filename, file_id)
 
 class YTDataContainerDataset(YTDataset):
+    """Dataset for saved geometric data containers."""
     _index_class = ParticleIndex
     _file_class = YTDataHDF5File
     _field_info_class = YTDataContainerFieldInfo
@@ -141,6 +144,7 @@
         return False
 
 class YTSpatialPlotDataset(YTDataContainerDataset):
+    """Dataset for saved slices and projections."""
     _field_info_class = YTGridFieldInfo
 
     def __init__(self, *args, **kwargs):
@@ -248,6 +252,7 @@
         self.max_level = self.grid_levels.max()
 
 class YTGridDataset(YTDataset):
+    """Dataset for saved covering grids, arbitrary grids, and FRBs."""
     _index_class = YTGridHierarchy
     _field_info_class = YTGridFieldInfo
     _dataset_type = 'ytgridhdf5'
@@ -443,6 +448,7 @@
         return fields_to_return, fields_to_generate
 
 class YTNonspatialDataset(YTGridDataset):
+    """Dataset for general array data."""
     _index_class = YTNonspatialHierarchy
     _field_info_class = YTGridFieldInfo
     _dataset_type = 'ytnonspatialhdf5'
@@ -473,6 +479,7 @@
         return False
 
 class YTProfileDataset(YTNonspatialDataset):
+    """Dataset for saved profile objects."""
     def __init__(self, filename):
         super(YTProfileDataset, self).__init__(filename)
 
@@ -482,6 +489,8 @@
             return Profile1DFromDataset(self)
         if self.dimensionality == 2:
             return Profile2DFromDataset(self)
+        if self.dimensionality == 3:
+            return Profile3DFromDataset(self)
         return None
 
     def _parse_parameter_file(self):


https://bitbucket.org/yt_analysis/yt/commits/f29188e5cde6/
Changeset:   f29188e5cde6
Branch:      yt
User:        brittonsmith
Date:        2015-10-02 15:21:30+00:00
Summary:     Setting minimum defaults to make generic array data load.
Affected #:  1 file

diff -r 2bed2a36b73928bd5e67bef83a45e0cbfbca46ec -r f29188e5cde6b8a10ffe40b5ba349c940e8ae117 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -463,12 +463,37 @@
         self.particle_types = self.particle_types_raw
 
     def _set_derived_attrs(self):
-        pass
+        # set some defaults just to make things go
+        default_attrs = {
+            "dimensionality": 3,
+            "domain_dimensions": np.ones(3, dtype="int"),
+            "domain_left_edge": np.zeros(3),
+            "domain_right_edge": np.ones(3),
+            "periodicity": np.ones(3, dtype="bool")
+        }
+        for att, val in default_attrs.items():
+            if getattr(self, att, None) is None:
+                setattr(self, att, val)
 
     def _setup_classes(self):
         # We don't allow geometric selection for non-spatial datasets
         pass
 
+    @parallel_root_only
+    def print_key_parameters(self):
+        mylog.info("YTArrayDataset")
+        for a in ["current_time", "domain_dimensions", "domain_left_edge",
+                  "domain_right_edge", "cosmological_simulation"]:
+            v = getattr(self, a)
+            if v is not None: mylog.info("Parameters: %-25s = %s", a, v)
+        if hasattr(self, "cosmological_simulation") and \
+           getattr(self, "cosmological_simulation"):
+            for a in ["current_redshift", "omega_lambda", "omega_matter",
+                      "hubble_constant"]:
+                v = getattr(self, a)
+                if v is not None: mylog.info("Parameters: %-25s = %s", a, v)
+        mylog.warn("Geometric data selection not available for this dataset type.")
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
@@ -538,7 +563,6 @@
                   tuple(self.parameters[bin_field])
             setattr(self, bin_field, self.parameters[bin_field])
 
-
     def _set_derived_attrs(self):
         self.domain_center = 0.5 * (self.domain_right_edge +
                                     self.domain_left_edge)


https://bitbucket.org/yt_analysis/yt/commits/9cc2ede36b06/
Changeset:   9cc2ede36b06
Branch:      yt
User:        brittonsmith
Date:        2015-10-02 15:24:43+00:00
Summary:     Remove this warning, because it will always be just fine.
Affected #:  1 file

diff -r f29188e5cde6b8a10ffe40b5ba349c940e8ae117 -r 9cc2ede36b069040fef1d8e8cdffd117e8bb4525 yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -98,7 +98,6 @@
         else:
             my_val = getattr(ds, attr, None)
         if my_val is None:
-            mylog.warn("Skipping %s attribute, this may be just fine." % attr)
             continue
         if hasattr(my_val, "units"):
             my_val = my_val.in_cgs()


https://bitbucket.org/yt_analysis/yt/commits/378b014f93c6/
Changeset:   378b014f93c6
Branch:      yt
User:        brittonsmith
Date:        2015-10-03 15:07:06+00:00
Summary:     Adding periodicity to light ray output.
Affected #:  1 file

diff -r 9cc2ede36b069040fef1d8e8cdffd117e8bb4525 -r 378b014f93c670984c99f410a350a6ee6dbe451b yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -537,12 +537,14 @@
             fh.attrs["domain_right_edge"] = ds.domain_right_edge.in_cgs()
             fh.attrs["cosmological_simulation"] = ds.cosmological_simulation
             fh.attrs["dimensionality"] = ds.dimensionality
+            fh.attrs["periodicity"] = ds.periodicity
         else:
             fh.attrs["current_redshift"] = self.near_redshift
             fh.attrs["domain_left_edge"] = self.simulation.domain_left_edge.in_cgs()
             fh.attrs["domain_right_edge"] = self.simulation.domain_right_edge.in_cgs()
             fh.attrs["cosmological_simulation"] = self.simulation.cosmological_simulation
             fh.attrs["dimensionality"] = self.simulation.dimensionality
+            fh.attrs["periodicity"] = (True, True, True)
         fh.attrs["current_time"] = self.cosmology.t_from_z(fh.attrs["current_redshift"]).in_cgs()
         fh.attrs["data_type"] = "yt_light_ray"
         group = fh.create_group("grid")


https://bitbucket.org/yt_analysis/yt/commits/d525d8451c7c/
Changeset:   d525d8451c7c
Branch:      yt
User:        brittonsmith
Date:        2015-10-04 19:53:36+00:00
Summary:     Adding initial testing for ytdata frontend.
Affected #:  3 files

diff -r 378b014f93c670984c99f410a350a6ee6dbe451b -r d525d8451c7c813f8a98d773321fb58a2c91720d yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -49,4 +49,5 @@
     config.add_subpackage("rockstar/tests")
     config.add_subpackage("stream/tests")
     config.add_subpackage("tipsy/tests")
+    config.add_subpackage("ytdata/tests")
     return config

diff -r 378b014f93c670984c99f410a350a6ee6dbe451b -r d525d8451c7c813f8a98d773321fb58a2c91720d yt/frontends/ytdata/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/ytdata/tests/test_outputs.py
@@ -0,0 +1,71 @@
+"""
+ytdata frontend tests using enzo_tiny_cosmology
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.convenience import \
+    load
+from yt.frontends.ytdata.api import \
+    YTDataContainerDataset, \
+    YTSpatialPlotDataset, \
+    YTGridDataset, \
+    YTNonspatialDataset, \
+    YTProfileDataset
+from yt.testing import \
+    assert_allclose_units, \
+    assert_equal
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    data_dir_load, \
+    AnswerTestingTest
+
+class YTDataFieldTest(AnswerTestingTest):
+    _type_name = "YTDataTest"
+    _attrs = ("field_name", )
+
+    def __init__(self, ds_fn, field, decimals = 10):
+        super(YTDataFieldTest, self).__init__(ds_fn)
+        self.field = field
+        if isinstance(field, tuple):
+            self.field_name = field[1]
+        else:
+            self.field_name = field
+        self.decimals = decimals
+
+    def run(self):
+        obj = self.ds.data
+        num_e = obj[field].size
+        avg = obj[field].mean()
+        return np.array([num_e, avg])
+
+    def compare(self, new_result, old_result):
+        err_msg = "YTData field values for %s not equal." % \
+          (self.field,)
+        if self.decimals is None:
+            assert_equal(new_result, old_result,
+                         err_msg=err_msg, verbose=True)
+        else:
+            assert_allclose_units(new_result, old_result, 
+                                  10.**(-self.decimals),
+                                  err_msg=err_msg, verbose=True)
+
+enzotiny = "enzo_tiny_cosmology/DD0046/DD0046"
+ at requires_ds(enzotiny)
+def test_data_container_data():
+    ds = data_dir_load(enzotiny)
+    sphere = ds.sphere(ds.domain_center, (10, "Mpc"))
+    fn = sphere.save_as_dataset(fields=["density", "particle_mass"])
+    new_ds = load(fn)
+    assert isinstance(new_ds, YTDataContainerDataset)
+    yield YTDataFieldTest(enzotiny, ("grid", "density"))
+    yield YTDataFieldTest(enzotiny, ("all", "particle_mass))


https://bitbucket.org/yt_analysis/yt/commits/7841c7f75b2f/
Changeset:   7841c7f75b2f
Branch:      yt
User:        brittonsmith
Date:        2015-10-04 19:57:17+00:00
Summary:     Adding distinction between geometric and non-geometric dataset tests.
Affected #:  1 file

diff -r d525d8451c7c813f8a98d773321fb58a2c91720d -r 7841c7f75b2fb19811e7eac3c87d4921a1b22726 yt/frontends/ytdata/tests/test_outputs.py
--- a/yt/frontends/ytdata/tests/test_outputs.py
+++ b/yt/frontends/ytdata/tests/test_outputs.py
@@ -33,7 +33,8 @@
     _type_name = "YTDataTest"
     _attrs = ("field_name", )
 
-    def __init__(self, ds_fn, field, decimals = 10):
+    def __init__(self, ds_fn, field, decimals = 10,
+                 geometric=True):
         super(YTDataFieldTest, self).__init__(ds_fn)
         self.field = field
         if isinstance(field, tuple):
@@ -41,9 +42,13 @@
         else:
             self.field_name = field
         self.decimals = decimals
+        self.geometric = geometric
 
     def run(self):
-        obj = self.ds.data
+        if self.geometric:
+            obj = self.ds.all_data()
+        else:
+            obj = self.ds.data
         num_e = obj[field].size
         avg = obj[field].mean()
         return np.array([num_e, avg])


https://bitbucket.org/yt_analysis/yt/commits/c3e865959d09/
Changeset:   c3e865959d09
Branch:      yt
User:        brittonsmith
Date:        2015-10-04 20:28:18+00:00
Summary:     Adding tests to cover all ytdata dataset types.
Affected #:  1 file

diff -r 7841c7f75b2fb19811e7eac3c87d4921a1b22726 -r c3e865959d09fc2eeea3256ce2f77cf0c7418541 yt/frontends/ytdata/tests/test_outputs.py
--- a/yt/frontends/ytdata/tests/test_outputs.py
+++ b/yt/frontends/ytdata/tests/test_outputs.py
@@ -37,7 +37,7 @@
                  geometric=True):
         super(YTDataFieldTest, self).__init__(ds_fn)
         self.field = field
-        if isinstance(field, tuple):
+        if isinstance(field, tuple) and len(field) == 2:
             self.field_name = field[1]
         else:
             self.field_name = field
@@ -66,11 +66,83 @@
 
 enzotiny = "enzo_tiny_cosmology/DD0046/DD0046"
 @requires_ds(enzotiny)
-def test_data_container_data():
+def test_datacontainer_data():
     ds = data_dir_load(enzotiny)
     sphere = ds.sphere(ds.domain_center, (10, "Mpc"))
     fn = sphere.save_as_dataset(fields=["density", "particle_mass"])
-    new_ds = load(fn)
-    assert isinstance(new_ds, YTDataContainerDataset)
-    yield YTDataFieldTest(enzotiny, ("grid", "density"))
-    yield YTDataFieldTest(enzotiny, ("all", "particle_mass))
+    sphere_ds = load(fn)
+    assert isinstance(sphere_ds, YTDataContainerDataset)
+    yield YTDataFieldTest(fn, ("grid", "density"))
+    yield YTDataFieldTest(fn, ("all", "particle_mass"))
+
+ at requires_ds(enzotiny)
+def test_grid_datacontainer_data():
+    ds = data_dir_load(enzotiny)
+    cg = ds.covering_grid(level=0, left_edge=[0.25]*3, dims=[16]*3)
+    fn = cg.save_as_dataset(fields=["density", "particle_mass"])
+    cg_ds = load(fn)
+    assert isinstance(cg_ds, YTGridDataset)
+    yield YTDataFieldTest(fn, ("grid", "density"))
+    yield YTDataFieldTest(fn, ("all", "particle_mass"))
+
+    my_proj = ds.proj("density", "x", weight_field="density")
+    frb = my_proj.to_frb(1.0, (800, 800))
+    fn = frb.save_as_dataset(fields=["density"])
+    frb_ds = load(fn)
+    assert isinstance(frb_ds, YTGridDataset)
+    yield YTDataFieldTest(fn, "density", geometric=False)
+
+ at requires_ds(enzotiny)
+def test_spatial_data():
+    ds = data_dir_load(enzotiny)
+    proj = ds.proj("density", "x", weight_field="density")
+    fn = proj.save_as_dataset()
+    proj_ds = yt.load(fn)
+    assert isinstance(proj_ds, YTSpatialPlotDataset)
+    yield YTDataFieldTest(fn, ("grid", "density"), geometric=False)
+
+ at requires_ds(enzotiny)
+def test_profile_data():
+    ds = data_dir_load(enzotiny)
+
+    profile_1d = yt.create_profile(ad, "density", "temperature",
+                               weight_field="cell_mass")
+    fn = profile_1d.save_as_dataset()
+    prof_1d_ds = load(fn)
+    assert isinstance(prof_1d_ds, YTProfileDataset)
+    yield YTDataFieldTest(fn, "temperature", geometric=False)
+    yield YTDataFieldTest(fn, "x", geometric=False)
+    yield YTDataFieldTest(fn, "density", geometric=False)
+
+    profile_2d = yt.create_profile(ad, ["density", "temperature"],
+                               "cell_mass", weight_field=None,
+                               n_bins=(128, 128))
+    fn = profile_2d.save_as_dataset()
+    prof_2d_ds = yt.load(fn)
+    assert isinstance(prof_2d_ds, YTProfileDataset)
+    yield YTDataFieldTest(fn, "density", geometric=False)
+    yield YTDataFieldTest(fn, "x", geometric=False)
+    yield YTDataFieldTest(fn, "temperature", geometric=False)
+    yield YTDataFieldTest(fn, "y", geometric=False)
+    yield YTDataFieldTest(fn, "cell_mass", geometric=False)
+
+ at requires_ds(enzotiny)
+def test_nonspatial_data():
+    ds = data_dir_load(enzotiny)
+    region = ds.box([0.25]*3, [0.75]*3)
+    sphere = ds.sphere(ds.domain_center, (10, "Mpc"))
+    my_data = {}
+    my_data["region_density"] = region["density"]
+    my_data["sphere_density"] = sphere["density"]
+    fn = yt.save_as_dataset(ds, "test_data.h5", my_data)
+    array_ds = yt.load(fn)
+    assert isinstance(array_ds, YTNonspatialDataset)
+    yield YTDataFieldTest(fn, "region_density", geometric=False)
+    yield YTDataFieldTest(fn, "sphere_density", geometric=False)
+
+    my_data = {"density": yt.YTArray(np.random.random(10), "g/cm**3")}
+    fake_ds = {"current_time": yt.YTQuantity(10, "Myr")}
+    fn = yt.save_as_dataset(fake_ds, "random_data.h5", my_data)
+    new_ds = yt.load(fn)
+    assert isinstance(new_ds, YTNonspatialDataset)
+    yield YTDataFieldTest(fn, "density", geometric=False)


https://bitbucket.org/yt_analysis/yt/commits/2cb6454d47bd/
Changeset:   2cb6454d47bd
Branch:      yt
User:        brittonsmith
Date:        2015-10-05 13:49:14+00:00
Summary:     Merging.
Affected #:  1 file

diff -r c3e865959d09fc2eeea3256ce2f77cf0c7418541 -r 2cb6454d47bda2d7cd94bd97903f50c22ee99692 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -484,8 +484,8 @@
                                   sub_data['velocity_los']
                 redshift_dopp = ((1 + velocity_los_cm / speed_of_light_cgs) /
                                 (1 - velocity_los_cm / speed_of_light_cgs))**(0.5) - 1
-                sub_data['redshift_eff'] = (1 + redshift_dopp) * \
-                                           (1 + sub_data['redshift'])
+                sub_data['redshift_eff'] = ((1 + redshift_dopp) * \
+                                           (1 + sub_data['redshift'])) - 1
                 del velocity_los_cm, redshift_dopp
 
             # Remove empty lixels.


https://bitbucket.org/yt_analysis/yt/commits/206b77e51554/
Changeset:   206b77e51554
Branch:      yt
User:        brittonsmith
Date:        2015-10-05 19:46:52+00:00
Summary:     Fixing logic in writing attributes for light ray.
Affected #:  1 file

diff -r 2cb6454d47bda2d7cd94bd97903f50c22ee99692 -r 206b77e5155456a6fed71ff63a2cf3e066a0271d yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -133,6 +133,7 @@
         if simulation_type is None:
             self.simulation_type = simulation_type
             ds = load(parameter_filename, **self.load_kwargs)
+            self.cosmological_simulation = ds.cosmological_simulation
             if ds.cosmological_simulation:
                 redshift = ds.current_redshift
                 self.cosmology = Cosmology(
@@ -148,6 +149,7 @@
         # Make a light ray from a simulation time-series.
         else:
             # Get list of datasets for light ray solution.
+            self.cosmological_simulation = 1
             CosmologySplice.__init__(self, parameter_filename, simulation_type,
                                      find_outputs=find_outputs)
             self.light_ray_solution = \
@@ -528,8 +530,6 @@
         """
         mylog.info("Saving light ray data to %s." % filename)
         fh = h5py.File(filename, "w")
-        for attr in ["omega_lambda", "omega_matter", "hubble_constant"]:
-            fh.attrs[attr] = getattr(self.cosmology, attr)
         if self.simulation_type is None:
             ds = load(self.parameter_filename, **self.load_kwargs)
             fh.attrs["current_redshift"] = ds.current_redshift
@@ -545,7 +545,13 @@
             fh.attrs["cosmological_simulation"] = self.simulation.cosmological_simulation
             fh.attrs["dimensionality"] = self.simulation.dimensionality
             fh.attrs["periodicity"] = (True, True, True)
-        fh.attrs["current_time"] = self.cosmology.t_from_z(fh.attrs["current_redshift"]).in_cgs()
+        if self.cosmological_simulation:
+            for attr in ["omega_lambda", "omega_matter", "hubble_constant"]:
+                fh.attrs[attr] = getattr(self.cosmology, attr)
+                fh.attrs["current_time"] = \
+                  self.cosmology.t_from_z(fh.attrs["current_redshift"]).in_cgs()
+        else:
+            fh.attrs["current_time"] = ds.current_time
         fh.attrs["data_type"] = "yt_light_ray"
         group = fh.create_group("grid")
         group.attrs["num_elements"] = data['x'].size


https://bitbucket.org/yt_analysis/yt/commits/64cc0adf6a6a/
Changeset:   64cc0adf6a6a
Branch:      yt
User:        brittonsmith
Date:        2015-10-05 20:02:29+00:00
Summary:     Refactoring write_light_ray to use save_as_dataset.
Affected #:  2 files

diff -r 206b77e5155456a6fed71ff63a2cf3e066a0271d -r 64cc0adf6a6a72f7ae80a6aacdafa0318872ab6a yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -13,19 +13,20 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
 import numpy as np
 
 from yt.analysis_modules.cosmological_observation.cosmology_splice import \
     CosmologySplice
 from yt.convenience import \
     load
-from yt.funcs import \
-    mylog
+from yt.frontends.ytdata.utilities import \
+    save_as_dataset
 from yt.units.yt_array import \
     YTArray
 from yt.utilities.cosmology import \
     Cosmology
+from yt.utilities.logger import \
+    ytLogger as mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects, \
     parallel_root_only
@@ -133,7 +134,6 @@
         if simulation_type is None:
             self.simulation_type = simulation_type
             ds = load(parameter_filename, **self.load_kwargs)
-            self.cosmological_simulation = ds.cosmological_simulation
             if ds.cosmological_simulation:
                 redshift = ds.current_redshift
                 self.cosmology = Cosmology(
@@ -149,7 +149,6 @@
         # Make a light ray from a simulation time-series.
         else:
             # Get list of datasets for light ray solution.
-            self.cosmological_simulation = 1
             CosmologySplice.__init__(self, parameter_filename, simulation_type,
                                      find_outputs=find_outputs)
             self.light_ray_solution = \
@@ -528,41 +527,24 @@
 
         Write light ray data to hdf5 file.
         """
-        mylog.info("Saving light ray data to %s." % filename)
-        fh = h5py.File(filename, "w")
         if self.simulation_type is None:
             ds = load(self.parameter_filename, **self.load_kwargs)
-            fh.attrs["current_redshift"] = ds.current_redshift
-            fh.attrs["domain_left_edge"] = ds.domain_left_edge.in_cgs()
-            fh.attrs["domain_right_edge"] = ds.domain_right_edge.in_cgs()
-            fh.attrs["cosmological_simulation"] = ds.cosmological_simulation
-            fh.attrs["dimensionality"] = ds.dimensionality
-            fh.attrs["periodicity"] = ds.periodicity
         else:
-            fh.attrs["current_redshift"] = self.near_redshift
-            fh.attrs["domain_left_edge"] = self.simulation.domain_left_edge.in_cgs()
-            fh.attrs["domain_right_edge"] = self.simulation.domain_right_edge.in_cgs()
-            fh.attrs["cosmological_simulation"] = self.simulation.cosmological_simulation
-            fh.attrs["dimensionality"] = self.simulation.dimensionality
-            fh.attrs["periodicity"] = (True, True, True)
-        if self.cosmological_simulation:
+            ds = {}
+            ds["dimensionality"] = self.simulation.dimensionality
+            ds["domain_left_edge"] = self.simulation.domain_left_edge
+            ds["domain_right_edge"] = self.simulation.domain_right_edge
+            ds["cosmological_simulation"] = self.simulation.cosmological_simulation
+            ds["periodicity"] = (True, True, True)
+            ds["current_redshift"] = self.near_redshift
             for attr in ["omega_lambda", "omega_matter", "hubble_constant"]:
-                fh.attrs[attr] = getattr(self.cosmology, attr)
-                fh.attrs["current_time"] = \
-                  self.cosmology.t_from_z(fh.attrs["current_redshift"]).in_cgs()
-        else:
-            fh.attrs["current_time"] = ds.current_time
-        fh.attrs["data_type"] = "yt_light_ray"
-        group = fh.create_group("grid")
-        group.attrs["num_elements"] = data['x'].size
-        for field in data.keys():
-            if isinstance(field, tuple):
-                fieldname = field[1]
-            else:
-                fieldname = field
-            group.create_dataset(fieldname, data=data[field])
-            group[fieldname].attrs["units"] = str(data[field].units)
-        fh.close()
+                ds[attr] = getattr(self.cosmology, attr)
+            ds["current_time"] = \
+              self.cosmology.t_from_z(fh.attrs["current_redshift"])
+        extra_attrs = {"data_type": "yt_light_ray"}
+        field_types = dict([(field, "grid") for field in data.keys()])
+        save_as_dataset(ds, filename, data, field_types=field_types,
+                        extra_attrs=extra_attrs)
 
     @parallel_root_only
     def _write_light_ray_solution(self, filename, extra_info=None):

diff -r 206b77e5155456a6fed71ff63a2cf3e066a0271d -r 64cc0adf6a6a72f7ae80a6aacdafa0318872ab6a yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -17,10 +17,10 @@
 import h5py
 import numpy as np
 
-from yt.funcs import \
-    mylog
 from yt.units.yt_array import \
     YTArray
+from yt.utilities.logger import \
+    ytLogger as mylog
 
 def save_as_dataset(ds, filename, data, field_types=None,
                     extra_attrs=None):


https://bitbucket.org/yt_analysis/yt/commits/172909d8d0b9/
Changeset:   172909d8d0b9
Branch:      yt
User:        brittonsmith
Date:        2015-10-07 01:21:15+00:00
Summary:     Adding section on saving data.
Affected #:  2 files

diff -r 64cc0adf6a6a72f7ae80a6aacdafa0318872ab6a -r 172909d8d0b99aff8405f94bfa520e5a56393e43 doc/source/analyzing/index.rst
--- a/doc/source/analyzing/index.rst
+++ b/doc/source/analyzing/index.rst
@@ -20,5 +20,6 @@
    units/index
    filtering
    generating_processed_data
+   saving_data
    time_series_analysis
    parallel_computation

diff -r 64cc0adf6a6a72f7ae80a6aacdafa0318872ab6a -r 172909d8d0b99aff8405f94bfa520e5a56393e43 doc/source/analyzing/saving_data.rst
--- /dev/null
+++ b/doc/source/analyzing/saving_data.rst
@@ -0,0 +1,5 @@
+.. _saving_data
+
+Saving Reloadable Data
+======================
+


https://bitbucket.org/yt_analysis/yt/commits/3e7d4fd598e5/
Changeset:   3e7d4fd598e5
Branch:      yt
User:        brittonsmith
Date:        2015-10-07 03:47:17+00:00
Summary:     Adding section on geometric data containers.
Affected #:  1 file

diff -r 172909d8d0b99aff8405f94bfa520e5a56393e43 -r 3e7d4fd598e55a0817269f8877d2ac746896dbd1 doc/source/analyzing/saving_data.rst
--- a/doc/source/analyzing/saving_data.rst
+++ b/doc/source/analyzing/saving_data.rst
@@ -3,3 +3,66 @@
 Saving Reloadable Data
 ======================
 
+Most of the data loaded into or generated with yt can be saved to a
+format that can be reloaded as a first-class dataset.  This includes
+the following:
+
+  * geometric data containers (regions, spheres, disks, rays, etc.)
+
+  * grid data containers (covering grids, arbitrary grids, fixed
+    resolution buffers)
+
+  * spatial plots (projections, slices, cutting planes)
+
+  * profiles
+
+  * generic array data
+
+In the case of projections, slices, and profiles, reloaded data can be
+used to remake plots using the methods decribed in :ref:`how-to-make-plots`.
+
+Geometric Data Containers
+-------------------------
+
+Data from geometric data containers can be saved with the
+:func:`~yt.data_objects.data_containers.save_as_dataset`` function.
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+
+   sphere = ds.sphere([0.5]*3, (10, "Mpc"))
+   fn = sphere.save_as_dataset(fields=["density", "particle_mass"])
+
+This function will return the name of the file to which the dataset
+was saved.  The filename will be a combination of the name of the
+original dataset and the type of data container.  Optionally, a
+specific filename can be given with the ``filename`` keyword.  If no
+fields are given, the fields that have previously been queried will
+be saved.
+
+The newly created dataset can be loaded like all other supported
+data through ``yt.load``.  Once loaded, field data can be accessed
+through the traditional data containers.  Grid data is accessed by
+the ``grid`` data type and particle data is accessed with the
+original particle type.  As with the original dataset, grid
+positions and cell sizes are accessible with, for example,
+("grid", "x") and ("grid", "dx").  Particle positions are
+accessible as (<particle_type>, "particle_position_x").  All original
+simulation parameters are accessible in the ``parameters``
+dictionary, normally associated with all datasets.
+
+.. code-block:: python
+
+   sphere_ds = yt.load("DD0046_sphere.h5")
+   ad = sphere_ds.all_data()
+
+   # grid data
+   print ad["grid", "density"]
+   print ad["grid", "x"]
+   print ad["grid", "dx"]
+
+   # particle data
+   print ad["all", "particle_mass"]
+   print ad["all", "particle_position_x"]


https://bitbucket.org/yt_analysis/yt/commits/755b84735d7c/
Changeset:   755b84735d7c
Branch:      yt
User:        brittonsmith
Date:        2015-10-07 04:09:11+00:00
Summary:     Adding grid data container section.
Affected #:  1 file

diff -r 3e7d4fd598e55a0817269f8877d2ac746896dbd1 -r 755b84735d7c7ec1e1fe50102ffa12f281d47735 doc/source/analyzing/saving_data.rst
--- a/doc/source/analyzing/saving_data.rst
+++ b/doc/source/analyzing/saving_data.rst
@@ -66,3 +66,45 @@
    # particle data
    print ad["all", "particle_mass"]
    print ad["all", "particle_position_x"]
+
+Note that because field data queried from geometric containers is
+returned as unordered 1D arrays, data container datasets are treated,
+effectively, as particle data.  Thus, 3D indexing of grid data from
+these datasets is not possible.
+
+Grid Data Containers
+--------------------
+
+Data containers that return field data as multidimensional arrays
+can be saved so as to preserve this type of access.  This includes
+covering grids, arbitrary grids, and fixed resolution buffers.
+Saving data from these containers works just as with geometric data
+containers.  Field data can be accessed through geometric data
+containers.
+
+.. code-block:: python
+
+   cg = ds.covering_grid(level=0, left_edge=[0.25]*3, dims=[16]*3)
+   fn = cg.save_as_dataset(fields=["density", "particle_mass"])
+
+   cg_ds = yt.load(fn)
+   ad = cg_ds.all_data()
+   print ad["grid", "density"]
+
+Multidimensional indexing of field data is also available through
+the ``data`` attribute.
+
+.. code-block:: python
+
+   print cg_ds.data["grid", "density"]
+
+Fixed resolution buffers work just the same.
+
+.. code-block:: python
+
+   my_proj = ds.proj("density", "x", weight_field="density")
+   frb = my_proj.to_frb(1.0, (800, 800))
+   fn = frb.save_as_dataset(fields=["density"])
+   frb_ds = yt.load(fn)
+   print frb_ds.data["density"]
+


https://bitbucket.org/yt_analysis/yt/commits/93b00eb99cff/
Changeset:   93b00eb99cff
Branch:      yt
User:        brittonsmith
Date:        2015-10-07 04:20:23+00:00
Summary:     Adding spatial plot section.
Affected #:  1 file

diff -r 755b84735d7c7ec1e1fe50102ffa12f281d47735 -r 93b00eb99cffe732b48a48a3fbedf160644a91b3 doc/source/analyzing/saving_data.rst
--- a/doc/source/analyzing/saving_data.rst
+++ b/doc/source/analyzing/saving_data.rst
@@ -108,3 +108,26 @@
    frb_ds = yt.load(fn)
    print frb_ds.data["density"]
 
+.. _saving-spatial-plots:
+
+Spatial Plots
+-------------
+
+Spatial plots, such as projections, slices, and off-axis slices
+(cutting planes) can also be saved and reloaded.
+
+.. code-block:: python
+
+   proj = ds.proj("density", "x", weight_field="density")
+   proj.save_as_dataset()
+
+Once reloaded, they can be handed to their associated plotting
+functions to make images.
+
+.. code-block::
+
+   proj_ds = yt.load("DD0046_proj.h5")
+   p = yt.ProjectionPlot(proj_ds, "x", "density",
+                         weight_field="density")
+   p.save()
+


https://bitbucket.org/yt_analysis/yt/commits/3a9652a43877/
Changeset:   3a9652a43877
Branch:      yt
User:        brittonsmith
Date:        2015-10-07 04:37:04+00:00
Summary:     Adding profile section.
Affected #:  1 file

diff -r 93b00eb99cffe732b48a48a3fbedf160644a91b3 -r 3a9652a43877cf18ce30351368bbd8b749219fef doc/source/analyzing/saving_data.rst
--- a/doc/source/analyzing/saving_data.rst
+++ b/doc/source/analyzing/saving_data.rst
@@ -124,10 +124,62 @@
 Once reloaded, they can be handed to their associated plotting
 functions to make images.
 
-.. code-block::
+.. code-block:: python
 
    proj_ds = yt.load("DD0046_proj.h5")
    p = yt.ProjectionPlot(proj_ds, "x", "density",
                          weight_field="density")
    p.save()
 
+Profiles
+--------
+
+Profiles created with :func:`~yt.data_objects.profiles.create_profile`,
+:class:`~yt.visualization.profile_plotter.ProfilePlot`, and
+:class:`~yt.visualization.profile_plotter.PhasePlot` can be saved with
+the :func:`~yt.data_objects.profiles.save_as_dataset` function, which
+works just as above.  Profile datasets are a type of non-spatial grid
+datasets.  Geometric selection is not possible, but data can be
+accessed through the ``.data`` attribute.
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+   ad = ds.all_data()
+
+   profile_2d = yt.create_profile(ad, ["density", "temperature"],
+                                  "cell_mass", weight_field=None,
+                                  n_bins=(128, 128))
+   profile_2d.save_as_dataset()
+
+   prof_2d_ds = yt.load("DD0046_Profile2D.h5")
+   print prof_2d_ds.data["cell_mass"]
+
+The x, y (if at least 2D), and z (if 3D) bin fields can be accessed as 1D
+arrays with "x", "y", and "z".
+
+.. code-block:: python
+
+   print prof_2d_ds.data.data["x"]
+
+The bin fields can also be returned with the same shape as the profile
+data by accessing them with their original names.  This allows for
+boolean masking of profile data using the bin fields.
+
+.. code-block:: python
+
+   # density is the x bin field
+   print prof_2d_ds.data.data["density"]
+
+For 1, 2, and 3D profile datasets, a fake profile object will be
+constructed by accessing the ".profile" attribute.  This is used
+primarily in the case of 1 and 2D profiles to create figures using
+:class:`~yt.visualization.profile_plotter.ProfilePlot` and
+:class:`~yt.visualization.profile_plotter.PhasePlot`.
+
+.. code-block:: python
+
+   p = yt.PhasePlot(prof_2d_ds.data, "density", "temperature",
+                    "cell_mass", weight_field=None)
+   p.save()


https://bitbucket.org/yt_analysis/yt/commits/0d97cc218cdd/
Changeset:   0d97cc218cdd
Branch:      yt
User:        brittonsmith
Date:        2015-10-07 04:46:19+00:00
Summary:     Adding section on saving array data.
Affected #:  1 file

diff -r 3a9652a43877cf18ce30351368bbd8b749219fef -r 0d97cc218cdd94c764d2d4ce9be57a1f93519ee6 doc/source/analyzing/saving_data.rst
--- a/doc/source/analyzing/saving_data.rst
+++ b/doc/source/analyzing/saving_data.rst
@@ -131,6 +131,8 @@
                          weight_field="density")
    p.save()
 
+.. _saving-profile-data:
+
 Profiles
 --------
 
@@ -183,3 +185,41 @@
    p = yt.PhasePlot(prof_2d_ds.data, "density", "temperature",
                     "cell_mass", weight_field=None)
    p.save()
+
+.. _saving-array-data:
+
+Generic Array Data
+------------------
+
+Generic arrays can be saved and reloaded as non-spatial data using
+the :func:`~yt.frontends.ytdata.utilities.save_as_dataset` function,
+also available as ``yt.save_as_dataset``.  As with profiles, geometric
+selection is not possible, but the data can be accessed through the
+``.data`` attribute.
+
+.. code-block:: python
+
+   region = ds.box([0.25]*3, [0.75]*3)
+   sphere = ds.sphere(ds.domain_center, (10, "Mpc"))
+   my_data = {}
+   my_data["region_density"] = region["density"]
+   my_data["sphere_density"] = sphere["density"]
+   yt.save_as_dataset(ds, "test_data.h5", my_data)
+
+   array_ds = yt.load("test_data.h5")
+   print array_ds.data["region_density"]
+   print array_ds.data["sphere_density"]
+
+Array data can be saved with or without a dataset loaded.  If no
+dataset has been loaded, as fake dataset can be provided as a
+dictionary.
+
+.. code-block:: python
+
+   my_data = {"density": yt.YTArray(np.random.random(10), "g/cm**3"),
+              "temperature": yt.YTArray(np.random.random(10), "K")}
+   fake_ds = {"current_time": yt.YTQuantity(10, "Myr")}
+   yt.save_as_dataset(fake_ds, "random_data.h5", my_data)
+
+   new_ds = yt.load("random_data.h5")
+   print new_ds.data["density"]


https://bitbucket.org/yt_analysis/yt/commits/c6ae082e0467/
Changeset:   c6ae082e0467
Branch:      yt
User:        brittonsmith
Date:        2015-10-07 04:55:35+00:00
Summary:     Adding labels and mention of save_as_dataset in FRB section.
Affected #:  2 files

diff -r 0d97cc218cdd94c764d2d4ce9be57a1f93519ee6 -r c6ae082e04679908c71451bccebb90df6c38bfee doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -54,10 +54,13 @@
  
 .. code-block:: python
 
-   frb.export_hdf5("my_images.h5", fields=["density","temperature"])
+   frb.save_as_dataset("my_images.h5", fields=["density","temperature"])
    frb.export_fits("my_images.fits", fields=["density","temperature"],
                    clobber=True, units="kpc")
 
+In the HDF5 case, the created file can be reloaded just like a regular dataset with
+``yt.load`` and will, itself, be a first-class dataset.  For more information on
+this, see :ref:`saving-grid-data-containers`.
 In the FITS case, there is an option for setting the ``units`` of the coordinate system in
 the file. If you want to overwrite a file with the same name, set ``clobber=True``. 
 

diff -r 0d97cc218cdd94c764d2d4ce9be57a1f93519ee6 -r c6ae082e04679908c71451bccebb90df6c38bfee doc/source/analyzing/saving_data.rst
--- a/doc/source/analyzing/saving_data.rst
+++ b/doc/source/analyzing/saving_data.rst
@@ -21,6 +21,8 @@
 In the case of projections, slices, and profiles, reloaded data can be
 used to remake plots using the methods decribed in :ref:`how-to-make-plots`.
 
+.. _saving-data-containers:
+
 Geometric Data Containers
 -------------------------
 
@@ -72,6 +74,8 @@
 effectively, as particle data.  Thus, 3D indexing of grid data from
 these datasets is not possible.
 
+.. _saving-grid-data-containers:
+
 Grid Data Containers
 --------------------
 


https://bitbucket.org/yt_analysis/yt/commits/61566e85ae1e/
Changeset:   61566e85ae1e
Branch:      yt
User:        brittonsmith
Date:        2015-10-07 05:03:16+00:00
Summary:     Redoing section on saving object to use new functionality.
Affected #:  1 file

diff -r c6ae082e04679908c71451bccebb90df6c38bfee -r 61566e85ae1edf3ebee822d4da8dc7cae6d0ffa8 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -457,69 +457,9 @@
 ---------------------------
 
 Often, when operating interactively or via the scripting interface, it is
-convenient to save an object or multiple objects out to disk and then restart
-the calculation later.  For example, this is useful after clump finding 
-(:ref:`clump_finding`), which can be very time consuming.  
-Typically, the save and load operations are used on 3D data objects.  yt
-has a separate set of serialization operations for 2D objects such as
-projections.
-
-yt will save out objects to disk under the presupposition that the
-construction of the objects is the difficult part, rather than the generation
-of the data -- this means that you can save out an object as a description of
-how to recreate it in space, but not the actual data arrays affiliated with
-that object.  The information that is saved includes the dataset off of
-which the object "hangs."  It is this piece of information that is the most
-difficult; the object, when reloaded, must be able to reconstruct a dataset
-from whatever limited information it has in the save file.
-
-You can save objects to an output file using the function 
-:func:`~yt.data_objects.index.save_object`: 
-
-.. code-block:: python
-
-   import yt
-   ds = yt.load("my_data")
-   sp = ds.sphere([0.5, 0.5, 0.5], (10.0, 'kpc'))
-   sp.save_object("sphere_name", "save_file.cpkl")
-
-This will store the object as ``sphere_name`` in the file
-``save_file.cpkl``, which will be created or accessed using the standard
-python module :mod:`shelve`.  
-
-To re-load an object saved this way, you can use the shelve module directly:
-
-.. code-block:: python
-
-   import yt
-   import shelve
-   ds = yt.load("my_data") 
-   saved_fn = shelve.open("save_file.cpkl")
-   ds, sp = saved_fn["sphere_name"]
-
-Additionally, we can store multiple objects in a single shelve file, so we 
-have to call the sphere by name.
-
-For certain data objects such as projections, serialization can be performed
-automatically if ``serialize`` option is set to ``True`` in :ref:`the
-configuration file <configuration-file>` or set directly in the script:
-
-.. code-block:: python
-
-   from yt.config import ytcfg; ytcfg["yt", "serialize"] = "True"
-
-.. note:: Use serialization with caution. Enabling serialization means that
-   once a projection of a dataset has been created (and stored in the .yt file
-   in the same directory), any subsequent changes to that dataset will be
-   ignored when attempting to create the same projection. So if you take a
-   density projection of your dataset in the 'x' direction, then somehow tweak
-   that dataset significantly, and take the density projection again, yt will
-   default to finding the original projection and 
-   :ref:`not your new one <faq-old-data>`.
-
-.. note:: It's also possible to use the standard :mod:`cPickle` module for
-          loading and storing objects -- so in theory you could even save a
-          list of objects!
-
-This method works for clumps, as well, and the entire clump index will be
-stored and restored upon load.
+convenient to save an object to disk and then restart the calculation later or
+transfer the data from a container to another filesystem.  This can be
+particularly useful when working with extremely large datasets.  Field data
+can be saved to disk in a format that allows for it to be reloaded just like
+a regular dataset.  For information on how to do this, see
+:ref:`saving-data-containers`.


https://bitbucket.org/yt_analysis/yt/commits/f33c72992a57/
Changeset:   f33c72992a57
Branch:      yt
User:        brittonsmith
Date:        2015-10-07 16:35:53+00:00
Summary:     Moving saving ytarrays to hdf5 attributes to a function.
Affected #:  1 file

diff -r 61566e85ae1edf3ebee822d4da8dc7cae6d0ffa8 -r f33c72992a5727d828e1ffd73fee8ac0aa91f04a yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -34,8 +34,9 @@
 
     Parameters
     ----------
-    ds : dataset
-        The dataset associated with the fields.  
+    ds : dataset or dict
+        The dataset associated with the fields or a dictionary of
+        parameters.
     filename : str
         The name of the file to be written.
     data : dict
@@ -72,9 +73,7 @@
 
     >>> data = {"density": yt.YTArray(np.random.random(10), "g/cm**3"),
     ...         "temperature": yt.YTArray(np.random.random(10), "K")}
-    >>> ds_data = {"domain_left_edge": yt.YTArray(np.zeros(3), "cm"),
-    ...            "domain_right_edge": yt.YTArray(np.ones(3), "cm"),
-    ...            "current_time": yt.YTQuantity(10, "Myr")}
+    >>> ds_data = {"current_time": yt.YTQuantity(10, "Myr")}
     >>> yt.save_as_dataset(ds_data, "random_data.h5", data)
     >>> new_ds = yt.load("random_data.h5")
     >>> print new_ds.data["temperature"]
@@ -92,6 +91,7 @@
                    "omega_matter", "hubble_constant"]
 
     fh = h5py.File(filename, "w")
+    if ds is None: ds = {}
     for attr in base_attrs:
         if isinstance(ds, dict):
             my_val = ds.get(attr, None)
@@ -99,19 +99,11 @@
             my_val = getattr(ds, attr, None)
         if my_val is None:
             continue
-        if hasattr(my_val, "units"):
-            my_val = my_val.in_cgs()
-            fh.attrs["%s_units" % attr] = str(my_val.units)
-        fh.attrs[attr] = my_val
+        _yt_array_hdf5_attr(fh, attr, my_val)
 
     for attr in extra_attrs:
         my_val = extra_attrs[attr]
-        if hasattr(my_val, "units"):
-            my_val = my_val.in_cgs()
-            fh.attrs["%s_units" % attr] = str(my_val.units)
-        if my_val is None:
-            my_val = "None"
-        fh.attrs[attr] = my_val
+        _yt_array_hdf5_attr(fh, attr, my_val)
     if "data_type" not in extra_attrs:
         fh.attrs["data_type"] = "yt_array_data"
 
@@ -178,7 +170,7 @@
         The hdf5 file or group to which the data will be written.
     field : str
         The name of the field to be saved.
-    ddata : YTArray
+    data : YTArray
         The data array to be saved.
 
     Returns
@@ -194,3 +186,27 @@
         units = str(data.units)
     dataset.attrs["units"] = units
     return dataset
+
+def _yt_array_hdf5_attr(fh, att, val):
+    r"""Save a YTArray or YTQuantity as an hdf5 attribute.
+
+    Save an hdf5 attribute.  If it has units, save an
+    additional attribute with the units.
+
+    Parameters
+    ----------
+    fh : an open hdf5 file, group, or dataset
+        The hdf5 file, group, or dataset to which the
+        attribute will be written.
+    att : str
+        The name of the attribute to be saved.
+    val : anything
+        The value to be saved.
+
+    """
+
+    if val is None: val = "None"
+    if hasattr(val, "units"):
+        val = val.in_cgs()
+        fh.attrs["%s_units" % att] = str(val.units)
+    fh.attrs[str(att)] = val


https://bitbucket.org/yt_analysis/yt/commits/4bffe408325b/
Changeset:   4bffe408325b
Branch:      yt
User:        brittonsmith
Date:        2015-10-07 16:45:25+00:00
Summary:     Writing out simulation parameters like I always said I would.
Affected #:  1 file

diff -r f33c72992a5727d828e1ffd73fee8ac0aa91f04a -r 4bffe408325bb594912cfeee8f7d4ac1cba68820 yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -92,6 +92,11 @@
 
     fh = h5py.File(filename, "w")
     if ds is None: ds = {}
+
+    if hasattr(ds, "parameters") and isinstance(ds.parameters, dict):
+        for attr, val in ds.parameters.items():
+            _yt_array_hdf5_attr(fh, attr, val)
+
     for attr in base_attrs:
         if isinstance(ds, dict):
             my_val = ds.get(attr, None)
@@ -187,7 +192,7 @@
     dataset.attrs["units"] = units
     return dataset
 
-def _yt_array_hdf5_attr(fh, att, val):
+def _yt_array_hdf5_attr(fh, attr, val):
     r"""Save a YTArray or YTQuantity as an hdf5 attribute.
 
     Save an hdf5 attribute.  If it has units, save an
@@ -198,7 +203,7 @@
     fh : an open hdf5 file, group, or dataset
         The hdf5 file, group, or dataset to which the
         attribute will be written.
-    att : str
+    attr : str
         The name of the attribute to be saved.
     val : anything
         The value to be saved.
@@ -208,5 +213,5 @@
     if val is None: val = "None"
     if hasattr(val, "units"):
         val = val.in_cgs()
-        fh.attrs["%s_units" % att] = str(val.units)
-    fh.attrs[str(att)] = val
+        fh.attrs["%s_units" % attr] = str(val.units)
+    fh.attrs[str(attr)] = val


https://bitbucket.org/yt_analysis/yt/commits/3d66ace92d50/
Changeset:   3d66ace92d50
Branch:      yt
User:        brittonsmith
Date:        2015-10-08 11:30:39+00:00
Summary:     Make sure to alias weight field for profile datasets and don't create the field info container over and over.
Affected #:  2 files

diff -r 4bffe408325bb594912cfeee8f7d4ac1cba68820 -r 3d66ace92d502a7722be66cf6d88965579b00326 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -366,9 +366,7 @@
         self.field_dependencies = {}
         self.derived_field_list = []
         self.filtered_particle_types = []
-        if not hasattr(self, "field_info"):
-            self.field_info = \
-              self._field_info_class(self, self.field_list)
+        self.field_info = self._field_info_class(self, self.field_list)
         self.coordinates.setup_fields(self.field_info)
         self.field_info.setup_fluid_fields()
         for ptype in self.particle_types:

diff -r 4bffe408325bb594912cfeee8f7d4ac1cba68820 -r 3d66ace92d502a7722be66cf6d88965579b00326 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -297,13 +297,12 @@
               np.concatenate([self.parameters["ActiveDimensions"], [1]])
 
     def create_field_info(self):
-        self.field_info = self._field_info_class(self, self.field_list)
+        super(YTGridDataset, self).create_field_info()
         for ftype, field in self.field_list:
             if ftype == self.default_fluid_type:
                 self.field_info.alias(
                     ("gas", field),
                     (self.default_fluid_type, field))
-        super(YTGridDataset, self).create_field_info()
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
@@ -563,6 +562,11 @@
                   tuple(self.parameters[bin_field])
             setattr(self, bin_field, self.parameters[bin_field])
 
+    def create_field_info(self):
+        super(YTProfileDataset, self).create_field_info()
+        self.field_info.alias(self.parameters["weight_field"],
+                              (self.default_fluid_type, "weight"))
+
     def _set_derived_attrs(self):
         self.domain_center = 0.5 * (self.domain_right_edge +
                                     self.domain_left_edge)


https://bitbucket.org/yt_analysis/yt/commits/7801033d04e6/
Changeset:   7801033d04e6
Branch:      yt
User:        brittonsmith
Date:        2015-10-08 14:55:53+00:00
Summary:     Merging with tip.
Affected #:  72 files

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -3,6 +3,7 @@
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js
 include yt/visualization/mapserver/html/leaflet/images/*.png
+exclude scripts/pr_backport.py
 recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE* *.cu
 recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.html
 recursive-include doc *.h *.c *.sh *.svgz *.pdf *.svg *.pyx

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac doc/source/developing/index.rst
--- a/doc/source/developing/index.rst
+++ b/doc/source/developing/index.rst
@@ -21,6 +21,7 @@
    building_the_docs
    testing
    debugdrive
+   releasing
    creating_datatypes
    creating_derived_fields
    creating_derived_quantities

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac doc/source/developing/releasing.rst
--- /dev/null
+++ b/doc/source/developing/releasing.rst
@@ -0,0 +1,208 @@
+How to Do a Release
+-------------------
+
+Periodically, the yt development community issues new releases. Since yt follows
+`semantic versioning <http://semver.org/>`_, the type of release can be read off
+from the version number used. Version numbers should follow the scheme
+``MAJOR.MINOR.PATCH``. There are three kinds of possible releases:
+
+* Bugfix releases
+
+  These releases are regularly scheduled and will optimally happen approximately
+  once a month. These releases should contain only fixes for bugs discovered in
+  earlier releases and should not contain new features or API changes. Bugfix
+  releases should increment the ``PATCH`` version number. Bugfix releases should
+  *not* be generated by merging from the ``yt`` branch, instead bugfix pull
+  requests should be manually backported using the PR backport script, described
+  below. Version ``3.2.2`` is a bugfix release.
+
+* Minor releases
+
+  These releases happen when new features are deemed ready to be merged into the
+  ``stable`` branch and should not happen on a regular schedule. Minor releases
+  can also include fixes for bugs if the fix is determined to be too invasive
+  for a bugfix release. Minor releases should *not* inlucde
+  backwards-incompatible changes and should not change APIs.  If an API change
+  is deemed to be necessary, the old API should continue to function but might
+  trigger deprecation warnings. Minor releases should happen by merging the
+  ``yt`` branch into the ``stable`` branch. Minor releases should increment the
+  ``MINOR`` version number and reset the ``PATCH`` version number to zero.
+  Version ``3.3.0`` is a minor release.
+
+* Major releases
+
+  These releases happen when the development community decides to make major
+  backwards-incompatible changes. In principle a major version release could
+  include arbitrary changes to the library. Major version releases should only
+  happen after extensive discussion and vetting among the developer and user
+  community. Like minor releases, a major release should happen by merging the
+  ``yt`` branch into the ``stable`` branch. Major releases should increment the
+  ``MAJOR`` version number and reset the ``MINOR`` and ``PATCH`` version numbers
+  to zero. If it ever happens, version ``4.0.0`` will be a major release.
+
+The job of doing a release differs depending on the kind of release. Below, we
+describe the necessary steps for each kind of release in detail.
+
+Doing a Bugfix Release
+~~~~~~~~~~~~~~~~~~~~~~
+
+As described above, bugfix releases are regularly scheduled updates for minor
+releases to ensure fixes for bugs make their way out to users in a timely
+manner. Since bugfix releases should not include new features, we do not issue
+bugfix releases by simply merging from the development ``yt`` branch into the
+``stable`` branch.  Instead, we make use of the ``pr_backport.py`` script to
+manually cherry-pick bugfixes from the from ``yt`` branch onto the ``stable``
+branch.
+
+The backport script issues interactive prompts to backport individual pull
+requests to the ``stable`` branch in a temporary clone of the main yt mercurial
+repository on bitbucket. The script is written this way to to avoid editing
+history in a clone of the repository that a developer uses for day-to-day work
+and to avoid mixing work-in-progress changes with changes that have made their
+way to the "canonical" yt repository on bitbucket.
+
+Rather than automatically manipulating the temporary repository by scripting
+mercurial commands using ``python-hglib``, the script must be "operated" by a
+human who is ready to think carefully about what the script is telling them
+to do. Most operations will merely require copy/pasting a suggested mercurial
+command. However, some changes will require manual backporting.
+
+To run the backport script, first open two terminal windows. The first window
+will be used to run the backport script. The second terminal will be used to
+manipulate a temporary clone of the yt mercurial repository. In the first
+window, navigate to the ``scripts`` directory at the root of the yt repository
+and run the backport script,
+
+.. code-block:: bash
+
+   $ cd $YT_HG/scripts
+   $ python pr_backport.py
+
+You will then need to wait for about a minute (depending on the speed of your
+internet connection and bitbucket's servers) while the script makes a clone of
+the main yt repository and then gathers information about pull requests that
+have been merged since the last tagged release. Once this step finishes, you
+will be prompted to navigate to the temporary folder in a new separate terminal
+session. Do so, and then hit the enter key in the original terminal session.
+
+For each pull request in the set of pull requests that were merged since the
+last tagged release that were pointed at the "main" line of development
+(e.g. not the ``experimental`` bookmark), you will be prompted by the script
+with the PR number, title, description, and a suggested mercurial
+command to use to backport the pull request. If the pull request consists of a
+single changeset, you will be prompted to use ``hg graft``. If it contains more
+than one changeset, you will be prompted to use ``hg rebase``. Note that
+``rebase`` is an optional extension for mercurial that is not turned on by
+default. To enable it, add a section like the following in your ``.hgrc`` file:
+
+.. code-block:: none
+
+   [extensions]
+   rebase=
+
+Since ``rebase`` is bundled with core mercurial, you do not need to specify a
+path to the rebase extension, just say ``rebase=`` and mercurial will find the
+version of ``rebase`` bundled with mercurial. Note also that mercurial does not
+automatically update to the tip of the rebased head after executing ``hg
+rebase`` so you will need to manually issue ``hg update stable`` to move your
+working directory to the new head of the stable branch. The backport script
+should prompt you with a suggestion to update as well.
+
+If the pull request contains merge commits, you must take care to *not* backport
+commits that merge with the main line of development on the ``yt`` branch. Doing
+so may bring unrelated changes, including new features, into a bugfix
+release. If the pull request you'd like to backport contains merge commits, the
+backport script should warn you to be extra careful.
+
+Once you've finished backporting, the script will let you know that you are done
+and warn you to push your work. The temporary repository you have been working
+with will be deleted as soon as the script exits, so take care to push your work
+on the ``stable`` branch to your fork on bitbucket. Once you've pushed to your
+fork, you will be able to issue a pull request containing the backported fixes
+just like any other yt pull request.
+
+Doing a Minor or Major Release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This is much simpler than a bugfix release.  All that needs to happen is the
+``yt`` branch must get merged into the ``stable`` branch, and any conflicts that
+happen must be resolved, almost certainly in favor of the yt branch. This can
+happen either using a merge tool like ``vimdiff`` and ``kdiff3`` or by telling
+mercurial to write merge markers. If you prefer merge markers, the following
+configuration options should be turned on in your ``hgrc`` to get more detail
+during the merge:
+
+.. code-block:: none
+
+   [ui]
+   merge = internal:merge3
+   mergemarkers = detailed
+
+The first option tells mercurial to write merge markers that show the state of
+the conflicted region of the code on both sides of the merge as well as the
+"base" most recent common ancestor changeset. The second option tells mercurial
+to add extra information about the code near the merge markers.
+
+
+Incrementing Version Numbers and Tagging a Release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Before creating the tag for the release, you must increment the version numbers
+that are hard-coded in a few files in the yt source so that version metadata
+for the code is generated correctly. This includes things like ``yt.__version__``
+and the version that gets read by the Python Package Index (PyPI) infrastructure.
+
+The paths relative to the root of the repository for the three files that need
+to be edited are:
+
+* ``doc/source/conf.py``
+
+  The ``version`` and ``release`` variables need to be updated.
+
+* ``setup.py``
+
+  The ``VERSION`` variable needs to be updated
+
+* ``yt/__init__.py``
+
+  The ``__version__`` variable must be updated.
+
+Once these files have been updated, commit these updates. This is the commit we
+will tag for the release.
+
+To actually create the tag, issue the following command:
+
+.. code-block:: bash
+
+   hg tag <tag-name>
+
+Where ``<tag-name>`` follows the project's naming scheme for tags
+(e.g. ``yt-3.2.1``). Commit the tag, and you should be ready to upload the
+release to pypi.
+
+If you are doing a minor or major version number release, you will also need to
+update back to the development branch and update the development version numbers
+in the same files.
+
+
+Uploading to PyPI
+~~~~~~~~~~~~~~~~~
+
+To actually upload the release to the Python Package Index, you just need to
+issue the following command:
+
+.. code-block:: bash
+
+   python setup.py sdist upload -r https://pypi.python.org/pypi
+
+You will be prompted for your PyPI credentials and then the package should
+upload. Note that for this to complete successfully, you will need an account on
+PyPI and that account will need to be registered as an "owner" of the yt
+package. Right now there are three owners: Matt Turk, Britton Smith, and Nathan
+Goldbaum.
+
+After the release is uploaded to PyPI, you should send out an announcement
+e-mail to the yt mailing lists as well as other possibly interested mailing
+lists for all but bugfix releases. In addition, you should contact John ZuHone
+about uploading binary wheels to PyPI for Windows and OS X users and contact
+Nathan Goldbaum about getting the Anaconda packages updated.

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -22,7 +22,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Athena                |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
-| Castro                |     Y      |     Y     |   Partial  |   Y   |    Y     |    Y     |     N      |   Full   |
+| Castro                |     Y      |     N     |   Partial  |   Y   |    Y     |    Y     |     N      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Chombo                |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
@@ -42,7 +42,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | MOAB                  |     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
-| Nyx                   |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
+| Nyx                   |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Orion                 |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac scripts/pr_backport.py
--- /dev/null
+++ b/scripts/pr_backport.py
@@ -0,0 +1,311 @@
+import hglib
+import requests
+import shutil
+import tempfile
+
+from datetime import datetime
+from distutils.version import LooseVersion
+from time import strptime, mktime
+
+MERGED_PR_ENDPOINT = ("http://bitbucket.org/api/2.0/repositories/yt_analysis/"
+                      "yt/pullrequests/?state=MERGED")
+
+YT_REPO = "https://bitbucket.org/yt_analysis/yt"
+
+
+def clone_new_repo(source=None):
+    """Clones a new copy of yt_analysis/yt and returns a path to it"""
+    path = tempfile.mkdtemp()
+    dest_repo_path = path+'/yt-backport'
+    if source is None:
+        source = YT_REPO
+    hglib.clone(source=source, dest=dest_repo_path)
+    with hglib.open(dest_repo_path) as client:
+        # Changesets that are on the yt branch but aren't topological ancestors
+        # of whichever changeset the experimental bookmark is pointing at
+        client.update('heads(branch(yt) - ::bookmark(experimental))')
+    return dest_repo_path
+
+
+def get_first_commit_after_last_major_release(repo_path):
+    """Returns the SHA1 hash of the first commit to the yt branch that wasn't
+    included in the last tagged release.
+    """
+    with hglib.open(repo_path) as client:
+        tags = client.log("reverse(tag())")
+        tags = sorted([LooseVersion(t[2]) for t in tags])
+        for t in tags[::-1]:
+            if t.version[0:2] != ['yt', '-']:
+                continue
+            if len(t.version) == 4 or t.version[4] == 0:
+                last_major_tag = t
+                break
+        last_before_release = client.log(
+            "last(ancestors(%s) and branch(yt))" % str(last_major_tag))
+        first_after_release = client.log(
+            "first(descendants(%s) and branch(yt) and not %s)"
+            % (last_before_release[0][1], last_before_release[0][1]))
+    return str(first_after_release[0][1][:12])
+
+
+def get_branch_tip(repo_path, branch, exclude=None):
+    """Returns the SHA1 hash of the most recent commit on the given branch"""
+    revset = "head() and branch(%s)" % branch
+    if exclude is not None:
+        revset += "and not %s" % exclude
+    with hglib.open(repo_path) as client:
+        change = client.log(revset)[0][1][:12]
+    return change
+
+
+def get_lineage_between_release_and_tip(repo_path, first, last):
+    """Returns the lineage of changesets that were at one point the public tip"""
+    with hglib.open(repo_path) as client:
+        lineage = client.log("'%s'::'%s' and p1('%s'::'%s') + '%s'"
+                             % (first, last, first, last, last))
+        return lineage
+
+
+def get_pull_requests_since_last_release(repo_path):
+    """Returns a list of pull requests made since the last tagged release"""
+    r = requests.get(MERGED_PR_ENDPOINT)
+    done = False
+    merged_prs = []
+    with hglib.open(repo_path) as client:
+        last_tag = client.log("reverse(tag())")[0]
+    while not done:
+        if r.status_code != 200:
+            raise RuntimeError
+        data = r.json()
+        prs = data['values']
+        for pr in prs:
+            activity = requests.get(pr['links']['activity']['href']).json()
+            merge_date = None
+            for action in activity['values']:
+                if 'update' in action and action['update']['state'] == 'MERGED':
+                    merge_date = action['update']['date']
+                    merge_date = merge_date.split('.')[0]
+                    timestamp = mktime(strptime(merge_date, "%Y-%m-%dT%H:%M:%S"))
+                    merge_date = datetime.fromtimestamp(timestamp)
+                    break
+            if merge_date is None:
+                break
+            if merge_date < last_tag[6]:
+                done = True
+                break
+            merged_prs.append(pr)
+        r = requests.get(data['next'])
+    return merged_prs
+
+
+def cache_commit_data(prs):
+    """Avoid repeated calls to bitbucket API to get the list of commits per PR"""
+    commit_data = {}
+    for pr in prs:
+        data = requests.get(pr['links']['commits']['href']).json()
+        if data.keys() == [u'error']:
+            # this happens when commits have been stripped, e.g.
+            # https://bitbucket.org/yt_analysis/yt/pull-requests/1641
+            continue
+        done = False
+        commits = []
+        while not done:
+            commits.extend(data['values'])
+            if 'next' not in data:
+                done = True
+            else:
+                data = requests.get(data['next']).json()
+        commit_data[pr['id']] = commits
+    return commit_data
+
+
+def find_commit_in_prs(needle, commit_data, prs):
+    """Finds the commit `needle` PR in the commit_data dictionary
+
+    If found, returns the pr the needle commit is in. If the commit was not
+    part of the PRs in the dictionary, returns None.
+    """
+    for pr_id in commit_data:
+        commits = commit_data[pr_id]
+        for commit in commits:
+            if commit['hash'] == needle[1]:
+                pr = [pr for pr in prs if pr['id'] == pr_id][0]
+                return pr
+    return None
+
+
+def find_merge_commit_in_prs(needle, prs):
+    """Find the merge commit `needle` in the list of `prs`
+
+    If found, returns the pr the merge commit comes from. If not found, return
+    None
+    """
+    for pr in prs[::-1]:
+        if pr['merge_commit'] is not None:
+            if pr['merge_commit']['hash'] == needle[1][:12]:
+                return pr
+    return None
+
+
+def create_commits_to_prs_mapping(linege, prs):
+    """create a mapping from commits to the pull requests that the commit is
+    part of
+    """
+    commits_to_prs = {}
+    # make a copy of this list to avoid side effects from calling this function
+    my_prs = list(prs)
+    commit_data = cache_commit_data(my_prs)
+    for commit in lineage:
+        cset_hash = commit[1]
+        message = commit[5]
+        if message.startswith('Merged in') and '(pull request #' in message:
+            pr = find_merge_commit_in_prs(commit, my_prs)
+            if pr is None:
+                continue
+            commits_to_prs[cset_hash] = pr
+            # Since we know this PR won't have another commit associated with it,
+            # remove from global list to reduce number of network accesses
+            my_prs.remove(commits_to_prs[cset_hash])
+        else:
+            pr = find_commit_in_prs(commit, commit_data, my_prs)
+            commits_to_prs[cset_hash] = pr
+    return commits_to_prs
+
+
+def invert_commits_to_prs_mapping(commits_to_prs):
+    """invert the mapping from individual commits to pull requests"""
+    inv_map = {}
+    for k, v in commits_to_prs.iteritems():
+        # can't save v itself in inv_map since it's an unhashable dictionary
+        if v is not None:
+            created_date = v['created_on'].split('.')[0]
+            timestamp = mktime(strptime(created_date, "%Y-%m-%dT%H:%M:%S"))
+            created_date = datetime.fromtimestamp(timestamp)
+            pr_desc = (v['id'], v['title'], created_date,
+                       v['links']['html']['href'], v['description'])
+        else:
+            pr_desc = None
+        inv_map[pr_desc] = inv_map.get(pr_desc, [])
+        inv_map[pr_desc].append(k)
+    return inv_map
+
+
+def get_last_descendant(repo_path, commit):
+    """get the most recent descendant of a commit"""
+    with hglib.open(repo_path) as client:
+        com = client.log('last(%s::)' % commit)
+    return com[0][1][:12]
+
+def screen_already_backported(repo_path, inv_map):
+    with hglib.open(repo_path) as client:
+        tags = client.log("reverse(tag())")
+        major_tags = [t for t in tags if t[2].endswith('.0')]
+        most_recent_major_tag_name = major_tags[0][2]
+        lineage = client.log(
+            "descendants(%s) and branch(stable)" % most_recent_major_tag_name)
+        prs_to_screen = []
+        for pr in inv_map:
+            for commit in lineage:
+                if commit[5].startswith('Backporting PR #%s' % pr[0]):
+                    prs_to_screen.append(pr)
+        for pr in prs_to_screen:
+            del inv_map[pr]
+        return inv_map
+
+def commit_already_on_stable(repo_path, commit):
+    with hglib.open(repo_path) as client:
+        commit_info = client.log(commit)[0]
+        most_recent_tag_name = client.log("reverse(tag())")[0][2]
+        lineage = client.log(
+            "descendants(%s) and branch(stable)" % most_recent_tag_name)
+        # if there is a stable commit with the same commit message,
+        # it's been grafted
+        if any([commit_info[5] == c[5] for c in lineage]):
+            return True
+        return False
+
+def backport_pr_commits(repo_path, inv_map, last_stable, prs):
+    """backports pull requests to the stable branch.
+
+    Accepts a dictionary mapping pull requests to a list of commits that
+    are in the pull request.
+    """
+    pr_list = inv_map.keys()
+    pr_list = sorted(pr_list, key=lambda x: x[2])
+    for pr_desc in pr_list:
+        merge_warn = False
+        merge_commits = []
+        pr = [pr for pr in prs if pr['id'] == pr_desc[0]][0]
+        data = requests.get(pr['links']['commits']['href']).json()
+        commits = data['values']
+        while 'next' in data:
+            data = requests.get(data['next']).json()
+            commits.extend(data['values'])
+        commits = [com['hash'][:12] for com in commits]
+        with hglib.open(repo_path) as client:
+            for com in commits:
+                if client.log('merge() and %s' % com) != []:
+                    merge_warn = True
+                    merge_commits.append(com)
+        if len(commits) > 1:
+            revset = " | ".join(commits)
+            revset = '"%s"' % revset
+            message = "Backporting PR #%s %s" % \
+                (pr['id'], pr['links']['html']['href'])
+            dest = get_last_descendant(repo_path, last_stable)
+            message = \
+                "hg rebase -r %s --keep --collapse -m \"%s\" -d %s\n" % \
+                (revset, message, dest)
+            message += "hg update stable\n\n"
+            if merge_warn is True:
+                if len(merge_commits) > 1:
+                    merge_commits = ", ".join(merge_commits)
+                else:
+                    merge_commits = merge_commits[0]
+                message += \
+                    "WARNING, PULL REQUEST CONTAINS MERGE COMMITS, CONSIDER\n" \
+                    "BACKPORTING BY HAND TO AVOID BACKPORTING UNWANTED CHANGES\n"
+                message += \
+                    "Merge commits are %s\n\n" % merge_commits
+        else:
+            if commit_already_on_stable(repo_path, commits[0]) is True:
+                continue
+            message = "hg graft %s\n" % commits[0]
+        print "PR #%s\nTitle: %s\nCreated on: %s\nLink: %s\n%s" % pr_desc
+        print "To backport, issue the following command(s):\n"
+        print message
+        raw_input('Press any key to continue')
+
+
+if __name__ == "__main__":
+    print ""
+    print "Gathering PR information, this may take a minute."
+    print "Don't worry, yt loves you."
+    print ""
+    repo_path = clone_new_repo()
+    try:
+        last_major_release = get_first_commit_after_last_major_release(repo_path)
+        last_dev = get_branch_tip(repo_path, 'yt', 'experimental')
+        last_stable = get_branch_tip(repo_path, 'stable')
+        lineage = get_lineage_between_release_and_tip(
+            repo_path, last_major_release, last_dev)
+        prs = get_pull_requests_since_last_release(repo_path)
+        commits_to_prs = create_commits_to_prs_mapping(lineage, prs)
+        inv_map = invert_commits_to_prs_mapping(commits_to_prs)
+        # for now, ignore commits that aren't part of a pull request since
+        # the last bugfix release. These are mostly commits in pull requests
+        # from before the last bugfix release but might include commits that
+        # were pushed directly to the repo.
+        del inv_map[None]
+
+        inv_map = screen_already_backported(repo_path, inv_map)
+        print "In another terminal window, navigate to the following path:"
+        print "%s" % repo_path
+        raw_input("Press any key to continue")
+        backport_pr_commits(repo_path, inv_map, last_stable, prs)
+        raw_input(
+            "Now you need to push your backported changes. The temporary\n"
+            "repository currently being used will be deleted as soon as you\n"
+            "press any key.")
+    finally:
+        shutil.rmtree(repo_path)

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac scripts/yt_lodgeit.py
--- a/scripts/yt_lodgeit.py
+++ /dev/null
@@ -1,320 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-    LodgeIt!
-    ~~~~~~~~
-
-    A script that pastes stuff into the yt-project pastebin on
-    paste.yt-project.org.
-
-    Modified (very, very slightly) from the original script by the authors
-    below.
-
-    .lodgeitrc / _lodgeitrc
-    -----------------------
-
-    Under UNIX create a file called ``~/.lodgeitrc``, under Windows
-    create a file ``%APPDATA%/_lodgeitrc`` to override defaults::
-
-        language=default_language
-        clipboard=true/false
-        open_browser=true/false
-        encoding=fallback_charset
-
-    :authors: 2007-2008 Georg Brandl <georg at python.org>,
-              2006 Armin Ronacher <armin.ronacher at active-4.com>,
-              2006 Matt Good <matt at matt-good.net>,
-              2005 Raphael Slinckx <raphael at slinckx.net>
-"""
-import os
-import sys
-from optparse import OptionParser
-
-
-SCRIPT_NAME = os.path.basename(sys.argv[0])
-VERSION = '0.3'
-SERVICE_URL = 'http://paste.yt-project.org/'
-SETTING_KEYS = ['author', 'title', 'language', 'private', 'clipboard',
-                'open_browser']
-
-# global server proxy
-_xmlrpc_service = None
-
-
-def fail(msg, code):
-    """Bail out with an error message."""
-    print >> sys.stderr, 'ERROR: %s' % msg
-    sys.exit(code)
-
-
-def load_default_settings():
-    """Load the defaults from the lodgeitrc file."""
-    settings = {
-        'language':     None,
-        'clipboard':    True,
-        'open_browser': False,
-        'encoding':     'iso-8859-15'
-    }
-    rcfile = None
-    if os.name == 'posix':
-        rcfile = os.path.expanduser('~/.lodgeitrc')
-    elif os.name == 'nt' and 'APPDATA' in os.environ:
-        rcfile = os.path.expandvars(r'$APPDATA\_lodgeitrc')
-    if rcfile:
-        try:
-            f = open(rcfile)
-            for line in f:
-                if line.strip()[:1] in '#;':
-                    continue
-                p = line.split('=', 1)
-                if len(p) == 2:
-                    key = p[0].strip().lower()
-                    if key in settings:
-                        if key in ('clipboard', 'open_browser'):
-                            settings[key] = p[1].strip().lower() in \
-                                            ('true', '1', 'on', 'yes')
-                        else:
-                            settings[key] = p[1].strip()
-            f.close()
-        except IOError:
-            pass
-    settings['tags'] = []
-    settings['title'] = None
-    return settings
-
-
-def make_utf8(text, encoding):
-    """Convert a text to UTF-8, brute-force."""
-    try:
-        u = unicode(text, 'utf-8')
-        uenc = 'utf-8'
-    except UnicodeError:
-        try:
-            u = unicode(text, encoding)
-            uenc = 'utf-8'
-        except UnicodeError:
-            u = unicode(text, 'iso-8859-15', 'ignore')
-            uenc = 'iso-8859-15'
-    try:
-        import chardet
-    except ImportError:
-        return u.encode('utf-8')
-    d = chardet.detect(text)
-    if d['encoding'] == uenc:
-        return u.encode('utf-8')
-    return unicode(text, d['encoding'], 'ignore').encode('utf-8')
-
-
-def get_xmlrpc_service():
-    """Create the XMLRPC server proxy and cache it."""
-    global _xmlrpc_service
-    import xmlrpclib
-    if _xmlrpc_service is None:
-        try:
-            _xmlrpc_service = xmlrpclib.ServerProxy(SERVICE_URL + 'xmlrpc/',
-                                                    allow_none=True)
-        except Exception, err:
-            fail('Could not connect to Pastebin: %s' % err, -1)
-    return _xmlrpc_service
-
-
-def copy_url(url):
-    """Copy the url into the clipboard."""
-    # try windows first
-    try:
-        import win32clipboard
-    except ImportError:
-        # then give pbcopy a try.  do that before gtk because
-        # gtk might be installed on os x but nobody is interested
-        # in the X11 clipboard there.
-        from subprocess import Popen, PIPE
-        try:
-            client = Popen(['pbcopy'], stdin=PIPE)
-        except OSError:
-            try:
-                import pygtk
-                pygtk.require('2.0')
-                import gtk
-                import gobject
-            except ImportError:
-                return
-            gtk.clipboard_get(gtk.gdk.SELECTION_CLIPBOARD).set_text(url)
-            gobject.idle_add(gtk.main_quit)
-            gtk.main()
-        else:
-            client.stdin.write(url)
-            client.stdin.close()
-            client.wait()
-    else:
-        win32clipboard.OpenClipboard()
-        win32clipboard.EmptyClipboard()
-        win32clipboard.SetClipboardText(url)
-        win32clipboard.CloseClipboard()
-
-
-def open_webbrowser(url):
-    """Open a new browser window."""
-    import webbrowser
-    webbrowser.open(url)
-
-
-def language_exists(language):
-    """Check if a language alias exists."""
-    xmlrpc = get_xmlrpc_service()
-    langs = xmlrpc.pastes.getLanguages()
-    return language in langs
-
-
-def get_mimetype(data, filename):
-    """Try to get MIME type from data."""
-    try:
-        import gnomevfs
-    except ImportError:
-        from mimetypes import guess_type
-        if filename:
-            return guess_type(filename)[0]
-    else:
-        if filename:
-            return gnomevfs.get_mime_type(os.path.abspath(filename))
-        return gnomevfs.get_mime_type_for_data(data)
-
-
-def print_languages():
-    """Print a list of all supported languages, with description."""
-    xmlrpc = get_xmlrpc_service()
-    languages = xmlrpc.pastes.getLanguages().items()
-    languages.sort(lambda a, b: cmp(a[1].lower(), b[1].lower()))
-    print 'Supported Languages:'
-    for alias, name in languages:
-        print '    %-30s%s' % (alias, name)
-
-
-def download_paste(uid):
-    """Download a paste given by ID."""
-    xmlrpc = get_xmlrpc_service()
-    paste = xmlrpc.pastes.getPaste(uid)
-    if not paste:
-        fail('Paste "%s" does not exist.' % uid, 5)
-    print paste['code'].encode('utf-8')
-
-
-def create_paste(code, language, filename, mimetype, private):
-    """Create a new paste."""
-    xmlrpc = get_xmlrpc_service()
-    rv = xmlrpc.pastes.newPaste(language, code, None, filename, mimetype,
-                                private)
-    if not rv:
-        fail('Could not create paste. Something went wrong '
-             'on the server side.', 4)
-    return rv
-
-
-def compile_paste(filenames, langopt):
-    """Create a single paste out of zero, one or multiple files."""
-    def read_file(f):
-        try:
-            return f.read()
-        finally:
-            f.close()
-    mime = ''
-    lang = langopt or ''
-    if not filenames:
-        data = read_file(sys.stdin)
-        if not langopt:
-            mime = get_mimetype(data, '') or ''
-        fname = ""
-    elif len(filenames) == 1:
-        fname = filenames[0]
-        data = read_file(open(filenames[0], 'rb'))
-        if not langopt:
-            mime = get_mimetype(data, filenames[0]) or ''
-    else:
-        result = []
-        for fname in filenames:
-            data = read_file(open(fname, 'rb'))
-            if langopt:
-                result.append('### %s [%s]\n\n' % (fname, langopt))
-            else:
-                result.append('### %s\n\n' % fname)
-            result.append(data)
-            result.append('\n\n')
-        data = ''.join(result)
-        lang = 'multi'
-    return data, lang, fname, mime
-
-
-def main():
-    """Main script entry point."""
-
-    usage = ('Usage: %%prog [options] [FILE ...]\n\n'
-             'Read the files and paste their contents to %s.\n'
-             'If no file is given, read from standard input.\n'
-             'If multiple files are given, they are put into a single paste.'
-             % SERVICE_URL)
-    parser = OptionParser(usage=usage)
-
-    settings = load_default_settings()
-
-    parser.add_option('-v', '--version', action='store_true',
-                      help='Print script version')
-    parser.add_option('-L', '--languages', action='store_true', default=False,
-                      help='Retrieve a list of supported languages')
-    parser.add_option('-l', '--language', default=settings['language'],
-                      help='Used syntax highlighter for the file')
-    parser.add_option('-e', '--encoding', default=settings['encoding'],
-                      help='Specify the encoding of a file (default is '
-                           'utf-8 or guessing if available)')
-    parser.add_option('-b', '--open-browser', dest='open_browser',
-                      action='store_true',
-                      default=settings['open_browser'],
-                      help='Open the paste in a web browser')
-    parser.add_option('-p', '--private', action='store_true', default=False,
-                      help='Paste as private')
-    parser.add_option('--no-clipboard', dest='clipboard',
-                      action='store_false',
-                      default=settings['clipboard'],
-                      help="Don't copy the url into the clipboard")
-    parser.add_option('--download', metavar='UID',
-                      help='Download a given paste')
-
-    opts, args = parser.parse_args()
-
-    # special modes of operation:
-    # - paste script version
-    if opts.version:
-        print '%s: version %s' % (SCRIPT_NAME, VERSION)
-        sys.exit()
-    # - print list of languages
-    elif opts.languages:
-        print_languages()
-        sys.exit()
-    # - download Paste
-    elif opts.download:
-        download_paste(opts.download)
-        sys.exit()
-
-    # check language if given
-    if opts.language and not language_exists(opts.language):
-        fail('Language %s is not supported.' % opts.language, 3)
-
-    # load file(s)
-    try:
-        data, language, filename, mimetype = compile_paste(args, opts.language)
-    except Exception, err:
-        fail('Error while reading the file(s): %s' % err, 2)
-    if not data:
-        fail('Aborted, no content to paste.', 4)
-
-    # create paste
-    code = make_utf8(data, opts.encoding)
-    pid = create_paste(code, language, filename, mimetype, opts.private)
-    url = '%sshow/%s/' % (SERVICE_URL, pid)
-    print url
-    if opts.open_browser:
-        open_webbrowser(url)
-    if opts.clipboard:
-        copy_url(url)
-
-
-if __name__ == '__main__':
-    sys.exit(main())

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac setup.py
--- a/setup.py
+++ b/setup.py
@@ -164,7 +164,7 @@
     config.make_config_py()
     # config.make_svn_version_py()
     config.add_subpackage('yt', 'yt')
-    config.add_scripts("scripts/*")
+    config.add_scripts("scripts/iyt")
 
     return config
 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -15,7 +15,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 
 from .absorption_line import tau_profile

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -1,4 +1,4 @@
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 
 from yt.analysis_modules.absorption_spectrum.absorption_line import \

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 import os
 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
@@ -34,7 +34,7 @@
 
 
 import numpy as np
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import glob
 import os
 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 import os
 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/analysis_modules/halo_analysis/halo_catalog.py
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ b/yt/analysis_modules/halo_analysis/halo_catalog.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 import os
 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -14,7 +14,7 @@
 #-----------------------------------------------------------------------------
 
 import gc
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import math
 import numpy as np
 import glob

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -22,7 +22,7 @@
 from collections import OrderedDict
 
 import numpy as np
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 
 class ParticleTrajectories(object):
     r"""A collection of particle trajectories in time over a series of

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -199,15 +199,24 @@
                 ei = start_e
                 for cn, Z in zip(number_of_photons[ibegin:iend], metalZ[ibegin:iend]):
                     if cn == 0: continue
+                    # The rather verbose form of the few next statements is a
+                    # result of code optimization and shouldn't be changed
+                    # without checking for perfomance degradation. See
+                    # https://bitbucket.org/yt_analysis/yt/pull-requests/1766
+                    # for details.
                     if self.method == "invert_cdf":
-                        cumspec = cumspec_c + Z*cumspec_m
-                        cumspec /= cumspec[-1]
+                        cumspec = cumspec_c
+                        cumspec += Z * cumspec_m
+                        norm_factor = 1.0 / cumspec[-1]
+                        cumspec *= norm_factor
                         randvec = np.random.uniform(size=cn)
                         randvec.sort()
                         cell_e = np.interp(randvec, cumspec, ebins)
                     elif self.method == "accept_reject":
-                        tot_spec = cspec.d+Z*mspec.d
-                        tot_spec /= tot_spec.sum()
+                        tot_spec = cspec.d
+                        tot_spec += Z * mspec.d
+                        norm_factor = 1.0 / tot_spec.sum()
+                        tot_spec *= norm_factor
                         eidxs = np.random.choice(nchan, size=cn, p=tot_spec)
                         cell_e = emid[eidxs]
                     energies[ei:ei+cn] = cell_e

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -35,7 +35,7 @@
     communication_system, parallel_root_only, get_mpi_type, \
     parallel_capable
 from yt.units.yt_array import YTQuantity, YTArray, uconcatenate
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 from yt.utilities.on_demand_imports import _astropy
 import warnings
 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -11,7 +11,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 import os
 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -14,7 +14,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 import os
 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -16,7 +16,7 @@
 
 import os
 import numpy as np
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import math
 
 from yt.config import ytcfg

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 
 from yt.funcs import mylog

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 
 from yt.frontends.ytdata.utilities import \

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/data_objects/tests/test_compose.py
--- a/yt/data_objects/tests/test_compose.py
+++ b/yt/data_objects/tests/test_compose.py
@@ -20,7 +20,6 @@
     yi = y / min_dx
     zi = z / min_dx
     index = xi + delta[0] * (yi + delta[1] * zi)
-    index = index.astype('int64')
     return index
 
 def test_compose_no_overlap():

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/fields/field_aliases.py
--- a/yt/fields/field_aliases.py
+++ b/yt/fields/field_aliases.py
@@ -83,7 +83,11 @@
     ("CuttingPlaneBy",                   "cutting_plane_by"),
     ("MeanMolecularWeight",              "mean_molecular_weight"),
     ("particle_density",                 "particle_density"),
+    ("ThermalEnergy",                    "thermal_energy"),
+    ("TotalEnergy",                      "total_energy"),
     ("MagneticEnergy",                   "magnetic_energy"),
+    ("GasEnergy",                        "thermal_energy"),
+    ("Gas_Energy",                       "thermal_energy"),
     ("BMagnitude",                       "b_magnitude"),
     ("PlasmaBeta",                       "plasma_beta"),
     ("MagneticPressure",                 "magnetic_pressure"),

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -806,6 +806,7 @@
     registry.add_field(field_name, function = _vol_weight,
                        validators = [ValidateSpatial(0)],
                        units = field_units)
+    registry.find_dependencies((field_name,))
     return [field_name]
 
 def add_nearest_neighbor_field(ptype, coord_name, registry, nneighbors = 64):

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -193,7 +193,7 @@
         particle header, star files, etc.
         """
         base_prefix, base_suffix = filename_pattern['amr']
-        aexpstr = 'a'+file_amr.rsplit('a',1)[1].replace(base_suffix,'')
+        numericstr = file_amr.rsplit('_',1)[1].replace(base_suffix,'')
         possibles = glob.glob(os.path.dirname(os.path.abspath(file_amr))+"/*")
         for filetype, (prefix, suffix) in filename_pattern.items():
             # if this attribute is already set skip it
@@ -201,7 +201,10 @@
                 continue
             match = None
             for possible in possibles:
-                if possible.endswith(aexpstr+suffix):
+                if possible.endswith(numericstr+suffix):
+                    if os.path.basename(possible).startswith(prefix):
+                        match = possible
+                elif possible.endswith(suffix):
                     if os.path.basename(possible).startswith(prefix):
                         match = possible
             if match is not None:

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 import weakref
 import glob #ST 9/12

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import re
 import os
 import weakref

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -88,7 +88,7 @@
         field_dict = {}
         for key, val in self._handle.attrs.items():
             if key.startswith('particle_'):
-                comp_number = int(re.match('particle_component_(\d)', key).groups()[0])
+                comp_number = int(re.match('particle_component_(\d+)', key).groups()[0])
                 field_dict[val.decode("ascii")] = comp_number
         self._particle_field_index = field_dict
         return self._particle_field_index

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/eagle/data_structures.py
--- a/yt/frontends/eagle/data_structures.py
+++ b/yt/frontends/eagle/data_structures.py
@@ -15,7 +15,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 import types
 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/enzo/answer_testing_support.py
--- a/yt/frontends/enzo/answer_testing_support.py
+++ b/yt/frontends/enzo/answer_testing_support.py
@@ -78,16 +78,17 @@
 
     def __call__(self):
         # Read in the ds
-        ds = load(self.data_file)  
-        exact = self.get_analytical_solution() 
+        ds = load(self.data_file)
+        ds.setup_deprecated_fields()
+        exact = self.get_analytical_solution()
 
         ad = ds.all_data()
         position = ad['x']
         for k in self.fields:
-            field = ad[k]
+            field = ad[k].d
             for xmin, xmax in zip(self.left_edges, self.right_edges):
                 mask = (position >= xmin)*(position <= xmax)
-                exact_field = np.interp(position[mask], exact['pos'], exact[k]) 
+                exact_field = np.interp(position[mask], exact['pos'], exact[k])
                 myname = "ShockTubeTest_%s" % k
                 # yield test vs analytical solution 
                 yield AssertWrapper(myname, assert_allclose, field[mask], 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import weakref
 import numpy as np
 import os
@@ -636,7 +636,7 @@
 
     def _fill_arrays(self, ei, si, LE, RE, npart, nap):
         self.grid_dimensions[:,:1] = ei
-        self.grid_dimensions[:,:1] -= np.array(si, self.float_type)
+        self.grid_dimensions[:,:1] -= np.array(si, dtype='i4')
         self.grid_dimensions += 1
         self.grid_left_edge[:,:1] = LE
         self.grid_right_edge[:,:1] = RE
@@ -651,7 +651,7 @@
 
     def _fill_arrays(self, ei, si, LE, RE, npart, nap):
         self.grid_dimensions[:,:2] = ei
-        self.grid_dimensions[:,:2] -= np.array(si, self.float_type)
+        self.grid_dimensions[:,:2] -= np.array(si, dtype='i4')
         self.grid_dimensions += 1
         self.grid_left_edge[:,:2] = LE
         self.grid_right_edge[:,:2] = RE

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -22,7 +22,7 @@
 from yt.utilities.logger import ytLogger as mylog
 from yt.geometry.selection_routines import mask_fill, AlwaysSelector
 from yt.extern.six import u, b, iteritems
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 
 import numpy as np
 from yt.funcs import *

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import stat
 import numpy as np
 import weakref

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -14,7 +14,7 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 from yt.utilities.math_utils import prec_accum
 from itertools import groupby
 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -15,7 +15,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 import stat
 import struct

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/gadget/tests/test_outputs.py
--- a/yt/frontends/gadget/tests/test_outputs.py
+++ b/yt/frontends/gadget/tests/test_outputs.py
@@ -60,10 +60,12 @@
 def test_iso_collapse():
     for test in sph_answer(isothermal_h5, 'snap_505', 2**17,
                            iso_fields, ds_kwargs=iso_kwargs):
+        test_iso_collapse.__name__ = test.description
         yield test
 
 @requires_ds(gdg, big_data=True)
 def test_gadget_disk_galaxy():
     for test in sph_answer(gdg, 'snapshot_200', 11907080, gdg_fields,
                            ds_kwargs=gdg_kwargs):
+        test_gadget_disk_galaxy.__name__ = test.description
         yield test

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -15,7 +15,7 @@
 #-----------------------------------------------------------------------------
 
 from collections import defaultdict
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 import stat
 import weakref

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -14,7 +14,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 
 from yt.utilities.exceptions import *

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import types
 import numpy as np
 import weakref

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/gdf/io.py
--- a/yt/frontends/gdf/io.py
+++ b/yt/frontends/gdf/io.py
@@ -14,7 +14,7 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 from yt.funcs import \
     mylog
 from yt.utilities.io_handler import \

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/halo_catalog/data_structures.py
--- a/yt/frontends/halo_catalog/data_structures.py
+++ b/yt/frontends/halo_catalog/data_structures.py
@@ -14,7 +14,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 import stat
 import weakref

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/halo_catalog/io.py
--- a/yt/frontends/halo_catalog/io.py
+++ b/yt/frontends/halo_catalog/io.py
@@ -14,7 +14,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 
 from yt.utilities.exceptions import *

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import os
 import numpy as np
 import weakref

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/owls/data_structures.py
--- a/yt/frontends/owls/data_structures.py
+++ b/yt/frontends/owls/data_structures.py
@@ -15,7 +15,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import types
 
 import yt.units

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/owls/io.py
--- a/yt/frontends/owls/io.py
+++ b/yt/frontends/owls/io.py
@@ -15,7 +15,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 import os
 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/owls/owls_ion_tables.py
--- a/yt/frontends/owls/owls_ion_tables.py
+++ b/yt/frontends/owls/owls_ion_tables.py
@@ -17,7 +17,7 @@
 #-----------------------------------------------------------------------------
 
 import sys
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 
 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/owls/tests/test_outputs.py
--- a/yt/frontends/owls/tests/test_outputs.py
+++ b/yt/frontends/owls/tests/test_outputs.py
@@ -46,6 +46,7 @@
 @requires_ds(os33, big_data=True)
 def test_snapshot_033():
     for test in sph_answer(os33, 'snap_033', 2*128**3, _fields):
+        test_snapshot_033.__name__ = test.description
         yield test
 
 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/owls_subfind/data_structures.py
--- a/yt/frontends/owls_subfind/data_structures.py
+++ b/yt/frontends/owls_subfind/data_structures.py
@@ -15,7 +15,7 @@
 #-----------------------------------------------------------------------------
 
 from collections import defaultdict
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 import stat
 import weakref

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/owls_subfind/io.py
--- a/yt/frontends/owls_subfind/io.py
+++ b/yt/frontends/owls_subfind/io.py
@@ -14,7 +14,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 
 from yt.utilities.exceptions import *

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/rockstar/io.py
--- a/yt/frontends/rockstar/io.py
+++ b/yt/frontends/rockstar/io.py
@@ -14,7 +14,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 
 from yt.utilities.exceptions import *

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -15,7 +15,7 @@
 #-----------------------------------------------------------------------------
 
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 import stat
 import weakref

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/frontends/tipsy/tests/test_outputs.py
--- a/yt/frontends/tipsy/tests/test_outputs.py
+++ b/yt/frontends/tipsy/tests/test_outputs.py
@@ -111,6 +111,7 @@
 @requires_ds(tipsy_gal)
 def test_tipsy_galaxy():
     for test in sph_answer(tipsy_gal, 'galaxy.00300', 315372, tg_fields):
+        test_tipsy_galaxy.__name__ = test.description
         yield test
 
 @requires_file(gasoline_dmonly)

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -17,7 +17,7 @@
 import os
 from yt.extern.six.moves import cPickle
 import weakref
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 
 from yt.config import ytcfg

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -14,7 +14,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 import weakref
 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -467,6 +467,13 @@
     yield assert_equal, str(em3.in_mks().units), 'kg/(m*s**2)'
     yield assert_equal, str(em3.in_cgs().units), 'g/(cm*s**2)'
 
+    dimless = YTQuantity(1.0, "")
+    yield assert_equal, dimless.in_cgs(), dimless
+    yield assert_equal, dimless.in_cgs(), 1.0
+    yield assert_equal, dimless.in_mks(), dimless
+    yield assert_equal, dimless.in_mks(), 1.0
+    yield assert_equal, str(dimless.in_cgs().units), "dimensionless"
+
 def test_temperature_conversions():
     """
     Test conversions between various supported temperatue scales.

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -394,9 +394,15 @@
         # Use sympy to factor the dimensions into base CGS unit symbols.
         units = []
         my_dims = self.dimensions.expand()
-        for dim in base_units:
+        if my_dims is dimensionless:
+            return ""
+        for factor in my_dims.as_ordered_factors():
+            dim = list(factor.free_symbols)[0]
             unit_string = base_units[dim]
-            power_string = "**(%s)" % my_dims.as_coeff_exponent(dim)[1]
+            if factor.is_Pow:
+                power_string = "**(%s)" % factor.as_base_exp()[1]
+            else:
+                power_string = ""
             units.append("".join([unit_string, power_string]))
         return " * ".join(units)
 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -16,7 +16,7 @@
 
 from yt.funcs import *
 import numpy as np
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 from .amr_kdtools import \
         receive_and_reduce, send_to_parent, scatter_image
 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -826,18 +826,21 @@
     if ds_kwargs is None:
         ds_kwargs = {}
     ds = data_dir_load(ds_fn, kwargs=ds_kwargs)
-    yield assert_equal, str(ds), ds_str_repr
+    yield AssertWrapper("%s_string_representation" % str(ds), assert_equal,
+                        str(ds), ds_str_repr)
     dso = [None, ("sphere", ("c", (0.1, 'unitary')))]
     dd = ds.all_data()
-    yield assert_equal, dd["particle_position"].shape, (ds_nparticles, 3)
+    yield AssertWrapper("%s_all_data_part_shape" % str(ds), assert_equal,
+                        dd["particle_position"].shape, (ds_nparticles, 3))
     tot = sum(dd[ptype, "particle_position"].shape[0]
               for ptype in ds.particle_types if ptype != "all")
-    yield assert_equal, tot, ds_nparticles
+    yield AssertWrapper("%s_all_data_part_total" % str(ds), assert_equal,
+                        tot, ds_nparticles)
     for dobj_name in dso:
         dobj = create_obj(ds, dobj_name)
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
-        yield assert_equal, s1, s2
+        yield AssertWrapper("%s_mask_test" % str(ds), assert_equal, s1, s2)
         for field, weight_field in fields.items():
             if field[0] in ds.particle_types:
                 particle_type = True
@@ -850,7 +853,6 @@
                         dobj_name)
             yield FieldValuesTest(ds_fn, field, dobj_name,
                                   particle_type=particle_type)
-    return
 
 def create_obj(ds, obj_type):
     # obj_type should be tuple of

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/utilities/file_handler.py
--- a/yt/utilities/file_handler.py
+++ b/yt/utilities/file_handler.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 
 from distutils.version import LooseVersion
 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/utilities/grid_data_format/conversion/conversion_athena.py
--- a/yt/utilities/grid_data_format/conversion/conversion_athena.py
+++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py
@@ -3,7 +3,7 @@
 import os
 import weakref
 import numpy as np
-import h5py as h5
+from yt.utilities.on_demand_imports import _h5py as h5
 from .conversion_abc import *
 from glob import glob
 from collections import \

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/utilities/grid_data_format/tests/test_writer.py
--- a/yt/utilities/grid_data_format/tests/test_writer.py
+++ b/yt/utilities/grid_data_format/tests/test_writer.py
@@ -15,7 +15,7 @@
 import tempfile
 import shutil
 import os
-import h5py as h5
+from yt.utilities.on_demand_imports import _h5py as h5
 from yt.testing import \
     fake_random_ds, assert_equal
 from yt.utilities.grid_data_format.writer import \

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/utilities/grid_data_format/writer.py
--- a/yt/utilities/grid_data_format/writer.py
+++ b/yt/utilities/grid_data_format/writer.py
@@ -15,7 +15,7 @@
 
 import os
 import sys
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 from contextlib import contextmanager
 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -19,7 +19,7 @@
 from yt.funcs import mylog
 from yt.extern.six.moves import cPickle
 import os
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 from yt.extern.six import add_metaclass
 

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -17,6 +17,7 @@
 
         # Get compiler invocation
         compiler = os.getenv('CC', 'cc')
+        compiler = compiler.split(' ')
 
         # Attempt to compile a test script.
         # See http://openmp.org/wp/openmp-compilers/
@@ -32,11 +33,13 @@
             )
         file.flush()
         with open(os.devnull, 'w') as fnull:
-            exit_code = subprocess.call([compiler, '-fopenmp', filename],
+            exit_code = subprocess.call(compiler + ['-fopenmp', filename],
                                         stdout=fnull, stderr=fnull)
 
         # Clean up
         file.close()
+    except OSError:
+        return False
     finally:
         os.chdir(curdir)
         shutil.rmtree(tmpdir)

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -17,7 +17,7 @@
 import abc
 import json
 import sys
-import h5py as h5
+from yt.utilities.on_demand_imports import _h5py as h5
 import os
 from uuid import uuid4
 from yt.extern.six.moves import urllib

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/utilities/on_demand_imports.py
--- a/yt/utilities/on_demand_imports.py
+++ b/yt/utilities/on_demand_imports.py
@@ -166,3 +166,97 @@
         return self._spatial
 
 _scipy = scipy_imports()
+
+class h5py_imports:
+    _name = "h5py"
+
+    _File = None
+    @property
+    def File(self):
+        if self._File is None:
+            try:
+                from h5py import File
+            except ImportError:
+                File = NotAModule(self._name)
+            self._File = File
+        return self._File
+
+    _Group = None
+    @property
+    def Group(self):
+        if self._Group is None:
+            try:
+                from h5py import Group
+            except ImportError:
+                Group = NotAModule(self._name)
+            self._Group = Group
+        return self._Group
+
+    ___version__ = None
+    @property
+    def __version__(self):
+        if self.___version__ is None:
+            try:
+                from h5py import __version__
+            except ImportError:
+                __version__ = NotAModule(self._name)
+            self.___version__ = __version__
+        return self.___version__
+
+    _get_config = None
+    @property
+    def get_config(self):
+        if self._get_config is None:
+            try:
+                from h5py import get_config
+            except ImportError:
+                get_config = NotAModule(self._name)
+            self._get_config = get_config
+        return self._get_config
+
+    _h5f = None
+    @property
+    def h5f(self):
+        if self._h5f is None:
+            try:
+                import h5py.h5f as h5f
+            except ImportError:
+                h5f = NotAModule(self._name)
+            self._h5f = h5f
+        return self._h5f
+
+    _h5d = None
+    @property
+    def h5d(self):
+        if self._h5d is None:
+            try:
+                import h5py.h5d as h5d
+            except ImportError:
+                h5d = NotAModule(self._name)
+            self._h5d = h5d
+        return self._h5d
+
+    _h5s = None
+    @property
+    def h5s(self):
+        if self._h5s is None:
+            try:
+                import h5py.h5s as h5s
+            except ImportError:
+                h5s = NotAModule(self._name)
+            self._h5s = h5s
+        return self._h5s
+
+    _version = None
+    @property
+    def version(self):
+        if self._version is None:
+            try:
+                import h5py.version as File
+            except ImportError:
+                version = NotAModule(self._name)
+            self._version = File
+        return self._version
+
+_h5py = h5py_imports()
+

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/utilities/png_writer.py
--- a/yt/utilities/png_writer.py
+++ b/yt/utilities/png_writer.py
@@ -10,20 +10,31 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import matplotlib
 import matplotlib._png as _png
 from yt.extern.six.moves import cStringIO
+from distutils.version import LooseVersion
+
+MPL_VERSION = LooseVersion(matplotlib.__version__)
+MPL_API_2_VERSION = LooseVersion("1.5.0")
+
+if MPL_VERSION < MPL_API_2_VERSION:
+    def call_png_write_png(buffer, width, height, filename, dpi):
+        _png.write_png(buffer, width, height, filename, dpi)
+else:
+    def call_png_write_png(buffer, width, height, filename, dpi):
+        _png.write_png(buffer, filename, dpi)
 
 def write_png(buffer, filename, dpi=100):
     width = buffer.shape[1]
     height = buffer.shape[0]
-    _png.write_png(buffer, width, height, filename, dpi)
+    call_png_write_png(buffer, width, height, filename, dpi)
 
 def write_png_to_string(buffer, dpi=100, gray=0):
     width = buffer.shape[1]
     height = buffer.shape[0]
     fileobj = cStringIO()
-    _png.write_png(buffer, width, height, fileobj, dpi)
+    call_png_write_png(buffer, width, height, fileobj, dpi)
     png_str = fileobj.getvalue()
     fileobj.close()
     return png_str
-

diff -r 3d66ace92d502a7722be66cf6d88965579b00326 -r 7801033d04e69d5429d591b176b752c00ae5a4ac yt/visualization/volume_rendering/image_handling.py
--- a/yt/visualization/volume_rendering/image_handling.py
+++ b/yt/visualization/volume_rendering/image_handling.py
@@ -12,7 +12,7 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 
 from yt.funcs import mylog


https://bitbucket.org/yt_analysis/yt/commits/fb875d746266/
Changeset:   fb875d746266
Branch:      yt
User:        brittonsmith
Date:        2015-10-08 14:59:19+00:00
Summary:     Switching h5py to on-demand import.
Affected #:  2 files

diff -r 7801033d04e69d5429d591b176b752c00ae5a4ac -r fb875d7462660e21a2470abee4daa4fe9d73b987 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -16,7 +16,6 @@
 
 from collections import \
     defaultdict
-import h5py
 from numbers import \
     Number as numeric_type
 import numpy as np
@@ -53,6 +52,8 @@
     Cosmology
 from yt.utilities.exceptions import \
     YTFieldTypeNotFound
+from yt.utilities.on_demand_imports import \
+    _h5py as h5py
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
 

diff -r 7801033d04e69d5429d591b176b752c00ae5a4ac -r fb875d7462660e21a2470abee4daa4fe9d73b987 yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -21,6 +21,8 @@
     YTArray
 from yt.utilities.logger import \
     ytLogger as mylog
+from yt.utilities.on_demand_imports import \
+    _h5py as h5py
 
 def save_as_dataset(ds, filename, data, field_types=None,
                     extra_attrs=None):


https://bitbucket.org/yt_analysis/yt/commits/feac7a7e7be7/
Changeset:   feac7a7e7be7
Branch:      yt
User:        brittonsmith
Date:        2015-10-09 08:30:16+00:00
Summary:     Had one reference to open h5py file left in there.
Affected #:  1 file

diff -r fb875d7462660e21a2470abee4daa4fe9d73b987 -r feac7a7e7be74b8e542450439944ae5f35b62ca4 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -540,7 +540,7 @@
             for attr in ["omega_lambda", "omega_matter", "hubble_constant"]:
                 ds[attr] = getattr(self.cosmology, attr)
             ds["current_time"] = \
-              self.cosmology.t_from_z(fh.attrs["current_redshift"])
+              self.cosmology.t_from_z(ds["current_redshift"])
         extra_attrs = {"data_type": "yt_light_ray"}
         field_types = dict([(field, "grid") for field in data.keys()])
         save_as_dataset(ds, filename, data, field_types=field_types,


https://bitbucket.org/yt_analysis/yt/commits/47963a85c82b/
Changeset:   47963a85c82b
Branch:      yt
User:        brittonsmith
Date:        2015-10-10 21:06:43+00:00
Summary:     Adding section in plotting docs on saving datasets.
Affected #:  1 file

diff -r feac7a7e7be74b8e542450439944ae5f35b62ca4 -r 47963a85c82b5a1d3c316846388b3be1df1c51cf doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -1284,6 +1284,79 @@
    bananas_Slice_z_kT.eps
    bananas_Slice_z_density.eps
 
+Remaking Figures from Plot Datasets
+-----------------------------------
+
+When working with datasets that are too large to be stored locally,
+making figures just right can be cumbersome as it requires continuously
+moving images somewhere they can be viewed.  However, image creation is
+actually a two step process of first creating the projection, slice,
+or profile object, and then converting that object into an actual image.
+Fortunately, the hard part (creating slices, projections, profiles) can
+be separated from the easy part (generating images).  The intermediate
+slice, projection, and profile objects can be saved as reloadable
+datasets, then handed back to the plotting machinery discussed here.
+
+For slices and projections, the savable object is associated with the
+plot object as ``data_source``.  This can be saved with the
+:func:`~yt.data_objects.data_containers.save_as_dataset`` function.  For
+more information, see :ref:`saving_data`.
+
+.. code-block:: python
+
+   p = yt.ProjectionPlot(ds, "x", "density",
+                         weight_field="density")
+   fn = p.data_source.save_as_dataset()
+
+This function will optionally take a ``filename`` keyword that follows
+the same logic as dicussed above in :ref:`saving_plots`.  The filename
+to which the dataset was written will be returned.
+
+Once saved, this file can be reloaded completely independently of the
+original dataset and given back to the plot function with the same
+arguments.  One can now continue to tweak the figure to one's liking.
+
+.. code-block:: python
+
+   new_ds = yt.load(fn)
+   new_p = yt.ProjectionPlot(new_ds, "x", "density",
+                             weight_field="density")
+   new_p.save()
+
+The same functionality is available for profile and phase plots.  In
+each case, a special data container, ``data``, is given to the plotting
+functions.
+
+For ``ProfilePlot``:
+
+.. code-block:: python
+
+   ad = ds.all_data()
+   p1 = yt.ProfilePlot(ad, "density", "temperature",
+                       weight_field="cell_mass")
+
+   # note that ProfilePlots can hold a list of profiles
+   fn = p1.profiles[0].save_as_dataset()
+
+   new_ds = yt.load(fn)
+   p2 = yt.ProfilePlot(new_ds.data, "density", "temperature",
+                       weight_field="cell_mass")
+   p2.save()
+
+For ``PhasePlot``:
+
+.. code-block:: python
+
+   ad = ds.all_data()
+   p1 = yt.PhasePlot(ad, "density", "temperature",
+                     "cell_mass", weight_field=None)
+   fn = p1.profile.save_as_dataset()
+
+   new_ds = yt.load(fn)
+   p2 = yt.PhasePlot(new_ds.data, "density", "temperature",
+                     "cell_mass", weight_field=None)
+   p2.save()
+
 .. _eps-writer:
 
 Publication-ready Figures


https://bitbucket.org/yt_analysis/yt/commits/628ae583cadb/
Changeset:   628ae583cadb
Branch:      yt
User:        brittonsmith
Date:        2015-10-15 15:56:07+00:00
Summary:     Converting some code-blocks to notebook-cells in ytdata docs and fixing two typos.
Affected #:  1 file

diff -r 47963a85c82b5a1d3c316846388b3be1df1c51cf -r 628ae583cadb7fe392804d84586f7e290abbc0e1 doc/source/analyzing/saving_data.rst
--- a/doc/source/analyzing/saving_data.rst
+++ b/doc/source/analyzing/saving_data.rst
@@ -29,13 +29,14 @@
 Data from geometric data containers can be saved with the
 :func:`~yt.data_objects.data_containers.save_as_dataset`` function.
 
-.. code-block:: python
+.. notebook-cell:: python
 
    import yt
    ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
 
    sphere = ds.sphere([0.5]*3, (10, "Mpc"))
    fn = sphere.save_as_dataset(fields=["density", "particle_mass"])
+   print fn
 
 This function will return the name of the file to which the dataset
 was saved.  The filename will be a combination of the name of the
@@ -148,7 +149,7 @@
 datasets.  Geometric selection is not possible, but data can be
 accessed through the ``.data`` attribute.
 
-.. code-block:: python
+.. notebook-cell:: python
 
    import yt
    ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
@@ -167,7 +168,7 @@
 
 .. code-block:: python
 
-   print prof_2d_ds.data.data["x"]
+   print prof_2d_ds.data["x"]
 
 The bin fields can also be returned with the same shape as the profile
 data by accessing them with their original names.  This allows for
@@ -176,7 +177,7 @@
 .. code-block:: python
 
    # density is the x bin field
-   print prof_2d_ds.data.data["density"]
+   print prof_2d_ds.data["density"]
 
 For 1, 2, and 3D profile datasets, a fake profile object will be
 constructed by accessing the ".profile" attribute.  This is used
@@ -201,7 +202,10 @@
 selection is not possible, but the data can be accessed through the
 ``.data`` attribute.
 
-.. code-block:: python
+.. notebook-cell:: python
+
+   import yt
+   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
 
    region = ds.box([0.25]*3, [0.75]*3)
    sphere = ds.sphere(ds.domain_center, (10, "Mpc"))
@@ -218,7 +222,9 @@
 dataset has been loaded, as fake dataset can be provided as a
 dictionary.
 
-.. code-block:: python
+.. notebook-cell:: python
+
+   import yt
 
    my_data = {"density": yt.YTArray(np.random.random(10), "g/cm**3"),
               "temperature": yt.YTArray(np.random.random(10), "K")}


https://bitbucket.org/yt_analysis/yt/commits/9ddc25cc4835/
Changeset:   9ddc25cc4835
Branch:      yt
User:        brittonsmith
Date:        2015-10-15 16:03:47+00:00
Summary:     Adding output to docstring.
Affected #:  1 file

diff -r 628ae583cadb7fe392804d84586f7e290abbc0e1 -r 9ddc25cc4835e148fd5f98dc4663cfd69cc8a75c yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -61,7 +61,7 @@
 
     >>> import yt
     >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
-    >>> sphere = ds.sphere([0.5]*3, (10, "Mpc")
+    >>> sphere = ds.sphere([0.5]*3, (10, "Mpc"))
     >>> sphere_density = sphere["density"]
     >>> region = ds.box([0.]*3, [0.25]*3)
     >>> region_density = region["density"]
@@ -71,14 +71,18 @@
     >>> yt.save_as_dataset(ds, "density_data.h5", data)
     >>> new_ds = yt.load("density_data.h5")
     >>> print new_ds.data["region_density"]
+    [  7.47650434e-32   7.70370740e-32   9.74692941e-32 ...,   1.22384547e-27
+       5.13889063e-28   2.91811974e-28] g/cm**3
     >>> print new_ds.data["sphere_density"]
-
-    >>> data = {"density": yt.YTArray(np.random.random(10), "g/cm**3"),
-    ...         "temperature": yt.YTArray(np.random.random(10), "K")}
+    [  4.46237613e-32   4.86830178e-32   4.46335118e-32 ...,   6.43956165e-30
+       3.57339907e-30   2.83150720e-30] g/cm**3
+    >>> data = {"density": yt.YTArray(1e-24 * np.ones(10), "g/cm**3"),
+    ...         "temperature": yt.YTArray(1000. * np.ones(10), "K")}
     >>> ds_data = {"current_time": yt.YTQuantity(10, "Myr")}
     >>> yt.save_as_dataset(ds_data, "random_data.h5", data)
     >>> new_ds = yt.load("random_data.h5")
     >>> print new_ds.data["temperature"]
+    [ 1000.  1000.  1000.  1000.  1000.  1000.  1000.  1000.  1000.  1000.] K
 
     """
 


https://bitbucket.org/yt_analysis/yt/commits/5babb9d0a8de/
Changeset:   5babb9d0a8de
Branch:      yt
User:        brittonsmith
Date:        2015-10-15 16:13:40+00:00
Summary:     Adding output to more docstrings.
Affected #:  3 files

diff -r 9ddc25cc4835e148fd5f98dc4663cfd69cc8a75c -r 5babb9d0a8de152e27a09afbde03fad2af1fa4cb yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -500,10 +500,12 @@
         >>> import yt
         >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
         >>> sp = ds.sphere(ds.domain_center, (10, "Mpc"))
-        >>> fn = sp.save_as_dataset(["density", "temperature"])
+        >>> fn = sp.save_as_dataset(fields=["density", "temperature"])
         >>> sphere_ds = yt.load(fn)
         >>> ad = sphere_ds.all_data()
         >>> print ad["temperature"]
+        [  1.00000000e+00   1.00000000e+00   1.00000000e+00 ...,   4.40108359e+04
+           4.54380547e+04   4.72560117e+04] K
 
         """
 

diff -r 9ddc25cc4835e148fd5f98dc4663cfd69cc8a75c -r 5babb9d0a8de152e27a09afbde03fad2af1fa4cb yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -983,10 +983,14 @@
         >>> fn = profile.save_as_dataset()
         >>> prof_ds = yt.load(fn)
         >>> print prof_ds.data["cell_mass"]
-        >>> print prof_ds.data["x"]
-        >>> print prof_ds.data["density"]
+        (128, 128)
+        >>> print prof_ds.data["x"].shape # x bins as 1D array
+        (128,)
+        >>> print prof_ds.data["density"] # x bins as 2D array
+        (128, 128)
         >>> p = yt.PhasePlot(prof_ds.data, "density", "temperature",
         ...                  "cell_mass", weight_field=None)
+        >>> p.save()
 
         """
 

diff -r 9ddc25cc4835e148fd5f98dc4663cfd69cc8a75c -r 5babb9d0a8de152e27a09afbde03fad2af1fa4cb yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -417,6 +417,19 @@
         >>> fn = frb.save_as_dataset(fields=["density"])
         >>> ds2 = yt.load(fn)
         >>> print ds2.data["density"]
+        [[  1.25025353e-30   1.25025353e-30   1.25025353e-30 ...,   7.90820691e-31
+            7.90820691e-31   7.90820691e-31]
+         [  1.25025353e-30   1.25025353e-30   1.25025353e-30 ...,   7.90820691e-31
+            7.90820691e-31   7.90820691e-31]
+         [  1.25025353e-30   1.25025353e-30   1.25025353e-30 ...,   7.90820691e-31
+            7.90820691e-31   7.90820691e-31]
+         ...,
+         [  1.55834239e-30   1.55834239e-30   1.55834239e-30 ...,   8.51353199e-31
+            8.51353199e-31   8.51353199e-31]
+         [  1.55834239e-30   1.55834239e-30   1.55834239e-30 ...,   8.51353199e-31
+            8.51353199e-31   8.51353199e-31]
+         [  1.55834239e-30   1.55834239e-30   1.55834239e-30 ...,   8.51353199e-31
+            8.51353199e-31   8.51353199e-31]] g/cm**3
 
         """
 


https://bitbucket.org/yt_analysis/yt/commits/ce996df25c88/
Changeset:   ce996df25c88
Branch:      yt
User:        brittonsmith
Date:        2015-10-21 13:52:57+00:00
Summary:     Removing unnecessary variables that assumed the presence of grid data and therefore broke particle-only datasets.
Affected #:  1 file

diff -r 5babb9d0a8de152e27a09afbde03fad2af1fa4cb -r ce996df25c88deaa8273919164646a48c5572ea7 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -203,7 +203,6 @@
                 data_files.update(obj.data_files)
         for data_file in sorted(data_files):
             all_count = self._count_particles(data_file)
-            pcount = all_count["grid"]
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):
                     x = _get_position_array(ptype, f, "x")
@@ -299,7 +298,6 @@
                 data_files.update(obj.data_files)
         for data_file in sorted(data_files):
             all_count = self._count_particles(data_file)
-            pcount = all_count["grid"]
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):
                     x = _get_position_array(ptype, f, "px")


https://bitbucket.org/yt_analysis/yt/commits/fed0dd1e488b/
Changeset:   fed0dd1e488b
Branch:      yt
User:        brittonsmith
Date:        2015-10-21 14:03:43+00:00
Summary:     Linking docs to specific section.
Affected #:  2 files

diff -r ce996df25c88deaa8273919164646a48c5572ea7 -r fed0dd1e488bc86598251d2f8a10a9f02c5d267a doc/source/analyzing/saving_data.rst
--- a/doc/source/analyzing/saving_data.rst
+++ b/doc/source/analyzing/saving_data.rst
@@ -19,7 +19,7 @@
   * generic array data
 
 In the case of projections, slices, and profiles, reloaded data can be
-used to remake plots using the methods decribed in :ref:`how-to-make-plots`.
+used to remake plots.  For information on this, see :ref:`remaking-plots`.
 
 .. _saving-data-containers:
 

diff -r ce996df25c88deaa8273919164646a48c5572ea7 -r fed0dd1e488bc86598251d2f8a10a9f02c5d267a doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -1284,6 +1284,8 @@
    bananas_Slice_z_kT.eps
    bananas_Slice_z_density.eps
 
+.. _remaking-plots:
+
 Remaking Figures from Plot Datasets
 -----------------------------------
 


https://bitbucket.org/yt_analysis/yt/commits/55816e188eef/
Changeset:   55816e188eef
Branch:      yt
User:        brittonsmith
Date:        2015-10-21 14:11:19+00:00
Summary:     Clarifying docstring.
Affected #:  1 file

diff -r fed0dd1e488bc86598251d2f8a10a9f02c5d267a -r 55816e188eefbe7bd911828d4aff29e5f3a8abdf yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -398,9 +398,9 @@
             will be a combination of the original dataset and the type 
             of data container.
         fields : list of strings or tuples, optional
-            If this is supplied, it is the list of fields to be exported into
-            the data frame.  If not supplied, whatever fields presently exist
-            will be used.
+            If this is supplied, it is the list of fields to be saved to
+            disk.  If not supplied, all the fields that have been queried
+            will be saved.
 
         Returns
         -------


https://bitbucket.org/yt_analysis/yt/commits/ff4115e47720/
Changeset:   ff4115e47720
Branch:      yt
User:        brittonsmith
Date:        2015-10-21 14:41:03+00:00
Summary:     Clarifying docstring.
Affected #:  1 file

diff -r 55816e188eefbe7bd911828d4aff29e5f3a8abdf -r ff4115e477207a1db437cee482c7259381eb1eed yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -485,9 +485,9 @@
             will be a combination of the original dataset and the type 
             of data container.
         fields : list of strings or tuples, optional
-            If this is supplied, it is the list of fields to be exported into
-            the data frame.  If not supplied, whatever fields presently exist
-            will be used.
+            If this is supplied, it is the list of fields to be saved to
+            disk.  If not supplied, all the fields that have been queried
+            will be saved.
 
         Returns
         -------


https://bitbucket.org/yt_analysis/yt/commits/22b25c3f4e2d/
Changeset:   22b25c3f4e2d
Branch:      yt
User:        brittonsmith
Date:        2015-10-21 15:48:57+00:00
Summary:     Making gas fields alias grid fields for all ytdatasets.
Affected #:  1 file

diff -r ff4115e477207a1db437cee482c7259381eb1eed -r 22b25c3f4e2dc2b6ef3d04f3eb5e278ba8a48a8f yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -30,6 +30,8 @@
 
 from yt.data_objects.grid_patch import \
     AMRGridPatch
+from yt.data_objects.particle_unions import \
+    ParticleUnion
 from yt.data_objects.profiles import \
     Profile1DFromDataset, \
     Profile2DFromDataset, \
@@ -80,6 +82,32 @@
         self.unique_identifier = \
           int(os.stat(self.parameter_filename)[stat.ST_CTIME])
 
+    def create_field_info(self):
+        self.field_dependencies = {}
+        self.derived_field_list = []
+        self.filtered_particle_types = []
+        self.field_info = self._field_info_class(self, self.field_list)
+        self.coordinates.setup_fields(self.field_info)
+        self.field_info.setup_fluid_fields()
+        for ptype in self.particle_types:
+            self.field_info.setup_particle_fields(ptype)
+
+        for ftype, field in self.field_list:
+            if ftype == self.default_fluid_type:
+                self.field_info.alias(
+                    ("gas", field),
+                    (self.default_fluid_type, field))
+
+        if "all" not in self.particle_types:
+            mylog.debug("Creating Particle Union 'all'")
+            pu = ParticleUnion("all", list(self.particle_types_raw))
+            self.add_particle_union(pu)
+        self.field_info.setup_extra_union_fields()
+        mylog.info("Loading field plugins.")
+        self.field_info.load_all_plugins()
+        deps, unloaded = self.field_info.check_derived_fields()
+        self.field_dependencies.update(deps)
+
     def _set_code_unit_attributes(self):
         attrs = ('length_unit', 'mass_unit', 'time_unit',
                  'velocity_unit', 'magnetic_unit')
@@ -297,14 +325,6 @@
             self.domain_dimensions = \
               np.concatenate([self.parameters["ActiveDimensions"], [1]])
 
-    def create_field_info(self):
-        super(YTGridDataset, self).create_field_info()
-        for ftype, field in self.field_list:
-            if ftype == self.default_fluid_type:
-                self.field_info.alias(
-                    ("gas", field),
-                    (self.default_fluid_type, field))
-
     @classmethod
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
@@ -481,7 +501,6 @@
 
     @parallel_root_only
     def print_key_parameters(self):
-        mylog.info("YTArrayDataset")
         for a in ["current_time", "domain_dimensions", "domain_left_edge",
                   "domain_right_edge", "cosmological_simulation"]:
             v = getattr(self, a)


https://bitbucket.org/yt_analysis/yt/commits/bdf9487cb73e/
Changeset:   bdf9487cb73e
Branch:      yt
User:        brittonsmith
Date:        2015-10-22 11:30:19+00:00
Summary:     Making sure gas gets aliases to grid properly for both grid and particle datasets.
Affected #:  1 file

diff -r 22b25c3f4e2dc2b6ef3d04f3eb5e278ba8a48a8f -r bdf9487cb73e2ba389e2084c392d18614beb1e2e yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -92,11 +92,7 @@
         for ptype in self.particle_types:
             self.field_info.setup_particle_fields(ptype)
 
-        for ftype, field in self.field_list:
-            if ftype == self.default_fluid_type:
-                self.field_info.alias(
-                    ("gas", field),
-                    (self.default_fluid_type, field))
+        self._setup_gas_alias()
 
         if "all" not in self.particle_types:
             mylog.debug("Creating Particle Union 'all'")
@@ -157,6 +153,14 @@
         nz = 1 << self.over_refine_factor
         self.domain_dimensions = np.ones(3, "int32") * nz
 
+    def _setup_gas_alias(self):
+        "Alias the grid type to gas by making a particle union."
+
+        if "grid" in self.particle_types and \
+          "gas" not in self.particle_types:
+            pu = ParticleUnion("gas", ["grid"])
+            self.add_particle_union(pu)
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
@@ -325,6 +329,13 @@
             self.domain_dimensions = \
               np.concatenate([self.parameters["ActiveDimensions"], [1]])
 
+    def _setup_gas_alias(self):
+        "Alias the grid type to gas with a field alias."
+
+        for ftype, field in self.field_list:
+            if ftype == "grid":
+                self.field_info.alias(("gas", field), ("grid", field))
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False


https://bitbucket.org/yt_analysis/yt/commits/6e14f058bb13/
Changeset:   6e14f058bb13
Branch:      yt
User:        brittonsmith
Date:        2015-10-23 07:38:36+00:00
Summary:     Fixing docs to use notebook-cell properly and making them work in python3.
Affected #:  1 file

diff -r bdf9487cb73e2ba389e2084c392d18614beb1e2e -r 6e14f058bb135c822330d5233d97c3c36132d48b doc/source/analyzing/saving_data.rst
--- a/doc/source/analyzing/saving_data.rst
+++ b/doc/source/analyzing/saving_data.rst
@@ -29,14 +29,14 @@
 Data from geometric data containers can be saved with the
 :func:`~yt.data_objects.data_containers.save_as_dataset`` function.
 
-.. notebook-cell:: python
+.. notebook-cell::
 
    import yt
    ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
 
    sphere = ds.sphere([0.5]*3, (10, "Mpc"))
    fn = sphere.save_as_dataset(fields=["density", "particle_mass"])
-   print fn
+   print (fn)
 
 This function will return the name of the file to which the dataset
 was saved.  The filename will be a combination of the name of the
@@ -62,13 +62,13 @@
    ad = sphere_ds.all_data()
 
    # grid data
-   print ad["grid", "density"]
-   print ad["grid", "x"]
-   print ad["grid", "dx"]
+   print (ad["grid", "density"])
+   print (ad["grid", "x"])
+   print (ad["grid", "dx"])
 
    # particle data
-   print ad["all", "particle_mass"]
-   print ad["all", "particle_position_x"]
+   print (ad["all", "particle_mass"])
+   print (ad["all", "particle_position_x"])
 
 Note that because field data queried from geometric containers is
 returned as unordered 1D arrays, data container datasets are treated,
@@ -94,14 +94,14 @@
 
    cg_ds = yt.load(fn)
    ad = cg_ds.all_data()
-   print ad["grid", "density"]
+   print (ad["grid", "density"])
 
 Multidimensional indexing of field data is also available through
 the ``data`` attribute.
 
 .. code-block:: python
 
-   print cg_ds.data["grid", "density"]
+   print (cg_ds.data["grid", "density"])
 
 Fixed resolution buffers work just the same.
 
@@ -111,7 +111,7 @@
    frb = my_proj.to_frb(1.0, (800, 800))
    fn = frb.save_as_dataset(fields=["density"])
    frb_ds = yt.load(fn)
-   print frb_ds.data["density"]
+   print (frb_ds.data["density"])
 
 .. _saving-spatial-plots:
 
@@ -149,7 +149,7 @@
 datasets.  Geometric selection is not possible, but data can be
 accessed through the ``.data`` attribute.
 
-.. notebook-cell:: python
+.. notebook-cell::
 
    import yt
    ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
@@ -161,14 +161,14 @@
    profile_2d.save_as_dataset()
 
    prof_2d_ds = yt.load("DD0046_Profile2D.h5")
-   print prof_2d_ds.data["cell_mass"]
+   print (prof_2d_ds.data["cell_mass"])
 
 The x, y (if at least 2D), and z (if 3D) bin fields can be accessed as 1D
 arrays with "x", "y", and "z".
 
 .. code-block:: python
 
-   print prof_2d_ds.data["x"]
+   print (prof_2d_ds.data["x"])
 
 The bin fields can also be returned with the same shape as the profile
 data by accessing them with their original names.  This allows for
@@ -177,7 +177,7 @@
 .. code-block:: python
 
    # density is the x bin field
-   print prof_2d_ds.data["density"]
+   print (prof_2d_ds.data["density"])
 
 For 1, 2, and 3D profile datasets, a fake profile object will be
 constructed by accessing the ".profile" attribute.  This is used
@@ -202,7 +202,7 @@
 selection is not possible, but the data can be accessed through the
 ``.data`` attribute.
 
-.. notebook-cell:: python
+.. notebook-cell::
 
    import yt
    ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
@@ -215,15 +215,16 @@
    yt.save_as_dataset(ds, "test_data.h5", my_data)
 
    array_ds = yt.load("test_data.h5")
-   print array_ds.data["region_density"]
-   print array_ds.data["sphere_density"]
+   print (array_ds.data["region_density"])
+   print (array_ds.data["sphere_density"])
 
 Array data can be saved with or without a dataset loaded.  If no
 dataset has been loaded, as fake dataset can be provided as a
 dictionary.
 
-.. notebook-cell:: python
+.. notebook-cell::
 
+   import numpy as np
    import yt
 
    my_data = {"density": yt.YTArray(np.random.random(10), "g/cm**3"),
@@ -232,4 +233,4 @@
    yt.save_as_dataset(fake_ds, "random_data.h5", my_data)
 
    new_ds = yt.load("random_data.h5")
-   print new_ds.data["density"]
+   print (new_ds.data["density"])


https://bitbucket.org/yt_analysis/yt/commits/cda05f1dbec5/
Changeset:   cda05f1dbec5
Branch:      yt
User:        brittonsmith
Date:        2015-10-23 07:42:49+00:00
Summary:     Making docstrings compatible with python3.
Affected #:  3 files

diff -r 6e14f058bb135c822330d5233d97c3c36132d48b -r cda05f1dbec51a2bdb7224cd6b0e0d1a788d70a7 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -982,11 +982,11 @@
         ...                             n_bins=(128, 128))
         >>> fn = profile.save_as_dataset()
         >>> prof_ds = yt.load(fn)
-        >>> print prof_ds.data["cell_mass"]
+        >>> print (prof_ds.data["cell_mass"])
         (128, 128)
-        >>> print prof_ds.data["x"].shape # x bins as 1D array
+        >>> print (prof_ds.data["x"].shape) # x bins as 1D array
         (128,)
-        >>> print prof_ds.data["density"] # x bins as 2D array
+        >>> print (prof_ds.data["density"]) # x bins as 2D array
         (128, 128)
         >>> p = yt.PhasePlot(prof_ds.data, "density", "temperature",
         ...                  "cell_mass", weight_field=None)

diff -r 6e14f058bb135c822330d5233d97c3c36132d48b -r cda05f1dbec51a2bdb7224cd6b0e0d1a788d70a7 yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -59,6 +59,7 @@
     Examples
     --------
 
+    >>> import numpy as np
     >>> import yt
     >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
     >>> sphere = ds.sphere([0.5]*3, (10, "Mpc"))
@@ -70,10 +71,10 @@
     >>> data["region_density"] = region_density
     >>> yt.save_as_dataset(ds, "density_data.h5", data)
     >>> new_ds = yt.load("density_data.h5")
-    >>> print new_ds.data["region_density"]
+    >>> print (new_ds.data["region_density"])
     [  7.47650434e-32   7.70370740e-32   9.74692941e-32 ...,   1.22384547e-27
        5.13889063e-28   2.91811974e-28] g/cm**3
-    >>> print new_ds.data["sphere_density"]
+    >>> print (new_ds.data["sphere_density"])
     [  4.46237613e-32   4.86830178e-32   4.46335118e-32 ...,   6.43956165e-30
        3.57339907e-30   2.83150720e-30] g/cm**3
     >>> data = {"density": yt.YTArray(1e-24 * np.ones(10), "g/cm**3"),
@@ -81,7 +82,7 @@
     >>> ds_data = {"current_time": yt.YTQuantity(10, "Myr")}
     >>> yt.save_as_dataset(ds_data, "random_data.h5", data)
     >>> new_ds = yt.load("random_data.h5")
-    >>> print new_ds.data["temperature"]
+    >>> print (new_ds.data["temperature"])
     [ 1000.  1000.  1000.  1000.  1000.  1000.  1000.  1000.  1000.  1000.] K
 
     """

diff -r 6e14f058bb135c822330d5233d97c3c36132d48b -r cda05f1dbec51a2bdb7224cd6b0e0d1a788d70a7 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -416,7 +416,7 @@
         >>> frb = proj.to_frb(1.0, (800, 800))
         >>> fn = frb.save_as_dataset(fields=["density"])
         >>> ds2 = yt.load(fn)
-        >>> print ds2.data["density"]
+        >>> print (ds2.data["density"])
         [[  1.25025353e-30   1.25025353e-30   1.25025353e-30 ...,   7.90820691e-31
             7.90820691e-31   7.90820691e-31]
          [  1.25025353e-30   1.25025353e-30   1.25025353e-30 ...,   7.90820691e-31


https://bitbucket.org/yt_analysis/yt/commits/48651908938f/
Changeset:   48651908938f
Branch:      yt
User:        brittonsmith
Date:        2015-10-23 10:40:44+00:00
Summary:     Fixing a couple docstring print statements to be python3 compatible.
Affected #:  2 files

diff -r cda05f1dbec51a2bdb7224cd6b0e0d1a788d70a7 -r 48651908938fb7591108e477ea2a3b52e03c7457 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -503,7 +503,7 @@
         >>> fn = sp.save_as_dataset(fields=["density", "temperature"])
         >>> sphere_ds = yt.load(fn)
         >>> ad = sphere_ds.all_data()
-        >>> print ad["temperature"]
+        >>> print (ad["temperature"])
         [  1.00000000e+00   1.00000000e+00   1.00000000e+00 ...,   4.40108359e+04
            4.54380547e+04   4.72560117e+04] K
 

diff -r cda05f1dbec51a2bdb7224cd6b0e0d1a788d70a7 -r 48651908938fb7591108e477ea2a3b52e03c7457 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -1560,8 +1560,8 @@
     >>> profile = create_profile(ad, [("gas", "density")],
     ...                              [("gas", "temperature"),
     ...                               ("gas", "velocity_x")])
-    >>> print profile.x
-    >>> print profile["gas", "temperature"]
+    >>> print (profile.x)
+    >>> print (profile["gas", "temperature"])
 
     """
     bin_fields = data_source._determine_fields(bin_fields)


https://bitbucket.org/yt_analysis/yt/commits/90ebe7f324bf/
Changeset:   90ebe7f324bf
Branch:      yt
User:        brittonsmith
Date:        2015-10-23 11:20:55+00:00
Summary:     Adding data property to recreate the original data container when possible.
Affected #:  2 files

diff -r 48651908938fb7591108e477ea2a3b52e03c7457 -r 90ebe7f324bff1e0ce8baff1f3496401473ed880 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -568,6 +568,7 @@
 
         extra_attrs = dict([(arg, getattr(self, arg, None))
                             for arg in self._con_args + self._tds_attrs])
+        extra_attrs["con_args"] = self._con_args
         extra_attrs["data_type"] = "yt_data_container"
         extra_attrs["container_type"] = self._type_name
         extra_attrs["dimensionality"] = self._dimensionality

diff -r 48651908938fb7591108e477ea2a3b52e03c7457 -r 90ebe7f324bff1e0ce8baff1f3496401473ed880 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -161,6 +161,33 @@
             pu = ParticleUnion("gas", ["grid"])
             self.add_particle_union(pu)
 
+    @property
+    def data(self):
+        """
+        Return a data container configured like the original used to
+        create this dataset.
+        """
+
+        # Some data containers can't be recontructed in the same way
+        # since this is now particle-like data.
+        if self.parameters["container_type"] in \
+          ["cutting", "proj", "ray", "slice"]:
+            mylog.info("Returning an all_data data container.")
+            return self.all_data()
+
+        my_obj = getattr(self, self.parameters["container_type"])
+        my_args = []
+        for con_arg in self.parameters["con_args"]:
+            my_arg = self.parameters[con_arg]
+            my_units = self.parameters.get("%s_units" % con_arg)
+            if my_units is not None:
+                if isinstance(my_arg, np.ndarray):
+                    my_arg = self.arr(my_arg, my_units)
+                else:
+                    my_arg = self.quan(my_arg, my_units)
+            my_args.append(my_arg)
+        return my_obj(*my_args)
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False


https://bitbucket.org/yt_analysis/yt/commits/a0daa2f42d17/
Changeset:   a0daa2f42d17
Branch:      yt
User:        brittonsmith
Date:        2015-10-23 12:29:15+00:00
Summary:     Updating docstring.
Affected #:  1 file

diff -r 90ebe7f324bff1e0ce8baff1f3496401473ed880 -r a0daa2f42d1789b1c3288034065b6ae317226055 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -502,6 +502,10 @@
         >>> sp = ds.sphere(ds.domain_center, (10, "Mpc"))
         >>> fn = sp.save_as_dataset(fields=["density", "temperature"])
         >>> sphere_ds = yt.load(fn)
+        >>> # the original data container is available as the data attribute
+        >>> print (sds.data["density"])
+        [  4.46237613e-32   4.86830178e-32   4.46335118e-32 ...,   6.43956165e-30
+           3.57339907e-30   2.83150720e-30] g/cm**3
         >>> ad = sphere_ds.all_data()
         >>> print (ad["temperature"])
         [  1.00000000e+00   1.00000000e+00   1.00000000e+00 ...,   4.40108359e+04


https://bitbucket.org/yt_analysis/yt/commits/7f1d23987ae0/
Changeset:   7f1d23987ae0
Branch:      yt
User:        brittonsmith
Date:        2015-10-23 12:45:22+00:00
Summary:     Updating narrative docs.
Affected #:  1 file

diff -r a0daa2f42d1789b1c3288034065b6ae317226055 -r 7f1d23987ae0663498cb178dfe402a6b35837436 doc/source/analyzing/saving_data.rst
--- a/doc/source/analyzing/saving_data.rst
+++ b/doc/source/analyzing/saving_data.rst
@@ -47,9 +47,11 @@
 
 The newly created dataset can be loaded like all other supported
 data through ``yt.load``.  Once loaded, field data can be accessed
-through the traditional data containers.  Grid data is accessed by
-the ``grid`` data type and particle data is accessed with the
-original particle type.  As with the original dataset, grid
+through the traditional data containers or through the ``data``
+attribute, which will be a data container configured like the
+original data container used to make the dataset.  Grid data is
+accessed by the ``grid`` data type and particle data is accessed
+with the original particle type.  As with the original dataset, grid
 positions and cell sizes are accessible with, for example,
 ("grid", "x") and ("grid", "dx").  Particle positions are
 accessible as (<particle_type>, "particle_position_x").  All original
@@ -59,6 +61,11 @@
 .. code-block:: python
 
    sphere_ds = yt.load("DD0046_sphere.h5")
+
+   # use the original data container
+   print (sphere_ds.data["grid", "density"])
+
+   # create a new data container
    ad = sphere_ds.all_data()
 
    # grid data


https://bitbucket.org/yt_analysis/yt/commits/a9af378ec553/
Changeset:   a9af378ec553
Branch:      yt
User:        brittonsmith
Date:        2015-10-23 12:51:29+00:00
Summary:     Removing redundant print statement.
Affected #:  1 file

diff -r 7f1d23987ae0663498cb178dfe402a6b35837436 -r a9af378ec55373bcac559d876f5354abb28cbd81 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -641,7 +641,6 @@
             v = getattr(self, a)
             mylog.info("Parameters: %-25s = %s", a, v)
         super(YTProfileDataset, self).print_key_parameters()
-        mylog.warn("Geometric data selection not available for this dataset type.")
 
     @classmethod
     def _is_valid(self, *args, **kwargs):


https://bitbucket.org/yt_analysis/yt/commits/cc3b15dc6d07/
Changeset:   cc3b15dc6d07
Branch:      yt
User:        brittonsmith
Date:        2015-10-23 15:54:08+00:00
Summary:     Clarifying docstring.
Affected #:  1 file

diff -r a9af378ec55373bcac559d876f5354abb28cbd81 -r cc3b15dc6d0799ce0e60b685fc89a7ee779b9eb8 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -475,8 +475,8 @@
 
         This function will take a data object and output a dataset 
         containing either the fields presently existing or fields 
-        given in a list.  The resulting dataset can be reloaded as 
-        a yt dataset.
+        given in the ``fields`` list.  The resulting dataset can be
+        reloaded as a yt dataset.
 
         Parameters
         ----------


https://bitbucket.org/yt_analysis/yt/commits/a4484ed8c02b/
Changeset:   a4484ed8c02b
Branch:      yt
User:        brittonsmith
Date:        2015-10-23 15:56:16+00:00
Summary:     Clarifying docstrings.
Affected #:  2 files

diff -r cc3b15dc6d0799ce0e60b685fc89a7ee779b9eb8 -r a4484ed8c02ba395774362c1e21a245a8d649196 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -955,9 +955,8 @@
         r"""Export a profile to a reloadable yt dataset.
 
         This function will take a profile and output a dataset
-        containing either the fields presently existing or fields
-        given in a list.  The resulting dataset can be reloaded as
-        a yt dataset.
+        containing all relevant fields.  The resulting dataset
+        can be reloaded as a yt dataset.
 
         Parameters
         ----------

diff -r cc3b15dc6d0799ce0e60b685fc89a7ee779b9eb8 -r a4484ed8c02ba395774362c1e21a245a8d649196 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -388,8 +388,8 @@
 
         This function will take a fixed resolution buffer and output a 
         dataset containing either the fields presently existing or fields 
-        given in a list.  The resulting dataset can be reloaded as 
-        a yt dataset.
+        given in the ``fields`` list.  The resulting dataset can be
+        reloaded as a yt dataset.
 
         Parameters
         ----------


https://bitbucket.org/yt_analysis/yt/commits/5af81f44829d/
Changeset:   5af81f44829d
Branch:      yt
User:        brittonsmith
Date:        2015-10-23 16:01:55+00:00
Summary:     Returning to original arg name.
Affected #:  1 file

diff -r a4484ed8c02ba395774362c1e21a245a8d649196 -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -113,7 +113,7 @@
                                     'normalization': normalization,
                                     'index': index})
 
-    def make_spectrum(self, input_ds, output_file="spectrum.h5",
+    def make_spectrum(self, input_file, output_file="spectrum.h5",
                       line_list_file="lines.txt",
                       use_peculiar_velocity=True, njobs="auto"):
         """
@@ -122,7 +122,7 @@
         Parameters
         ----------
 
-        input_ds : string or dataset
+        input_file : string or dataset
            path to input ray data or a loaded ray dataset
         output_file : optional, string
            path for output file.  File formats are chosen based on the
@@ -167,8 +167,10 @@
                 input_fields.append(feature['field_name'])
                 field_units[feature["field_name"]] = "cm**-3"
 
-        if isinstance(input_ds, str):
-            input_ds = load(input_ds)
+        if isinstance(input_file, str):
+            input_ds = load(input_file)
+        else:
+            input_ds = input_file
         field_data = input_ds.all_data()
 
         self.tau_field = np.zeros(self.lambda_bins.size)


https://bitbucket.org/yt_analysis/yt/commits/7d263b8427ff/
Changeset:   7d263b8427ff
Branch:      yt
User:        brittonsmith
Date:        2015-10-23 16:08:01+00:00
Summary:     Merging.
Affected #:  141 files

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -10,6 +10,7 @@
 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.c
 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/analysis_modules/ppv_cube/ppv_utils.c
+yt/analysis_modules/photon_simulator/utils.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/frontends/sph/smoothing_kernel.c
 yt/geometry/fake_octree.c

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -132,7 +132,7 @@
 
 .. code:: python
 
-    apec_model = TableApecModel("atomdb_v2.0.2",
+    apec_model = TableApecModel("$SPECTRAL_DATA/spectral",
                                 0.01, 20.0, 20000,
                                 thermal_broad=False,
                                 apec_vers="2.0.2")

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 doc/source/analyzing/filtering.rst
--- a/doc/source/analyzing/filtering.rst
+++ b/doc/source/analyzing/filtering.rst
@@ -23,7 +23,7 @@
 ----------------------
 
 Mesh fields can be filtered by two methods: cut region objects 
-(:class:`~yt.data_objects.selection_data_containers.YTCutRegionBase`) 
+(:class:`~yt.data_objects.selection_data_containers.YTCutRegion`) 
 and NumPy boolean masks.  Boolean masks are simpler, but they only work
 for examining datasets, whereas cut regions objects create wholly new
 data objects suitable for full analysis (data examination, image generation, 

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -176,7 +176,7 @@
 ---------------------------------
 
 To calculate the values along a line connecting two points in a simulation, you
-can use the object :class:`~yt.data_objects.selection_data_containers.YTRayBase`,
+can use the object :class:`~yt.data_objects.selection_data_containers.YTRay`,
 accessible as the ``ray`` property on a index.  (See :ref:`data-objects`
 for more information on this.)  To do so, you can supply two points and access
 fields within the returned object.  For instance, this code will generate a ray

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -1,4 +1,4 @@
-.. _data-objects:
+.. _Data-objects:
 
 Data Objects
 ============
@@ -97,7 +97,7 @@
 """"""""""
 
 **Point** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTPointBase`    
+    | Class :class:`~yt.data_objects.selection_data_containers.YTPoint`    
     | Usage: ``point(coord, ds=None, field_parameters=None, data_source=None)``
     | A point defined by a single cell at specified coordinates.
 
@@ -105,7 +105,7 @@
 """"""""""
 
 **Ray (Axis-Aligned)** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTOrthoRayBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTOrthoRay`
     | Usage: ``ortho_ray(axis, coord, ds=None, field_parameters=None, data_source=None)``
     | A line (of data cells) stretching through the full domain 
       aligned with one of the x,y,z axes.  Defined by an axis and a point
@@ -113,7 +113,7 @@
       :ref:`note about ray data value ordering <ray-data-ordering>`.
 
 **Ray (Arbitrarily-Aligned)** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTRayBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTRay`
     | Usage: ``ray(start_coord, end_coord, ds=None, field_parameters=None, data_source=None)``
     | A line (of data cells) defined by arbitrary start and end coordinates. 
       Please see this 
@@ -123,13 +123,13 @@
 """"""""""
 
 **Slice (Axis-Aligned)** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTSliceBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTSlice`
     | Usage: ``slice(axis, coord, center=None, ds=None, field_parameters=None, data_source=None)``
     | A plane normal to one of the axes and intersecting a particular 
       coordinate.
 
 **Slice (Arbitrarily-Aligned)** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTCuttingPlaneBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTCuttingPlane`
     | Usage: ``cutting(normal, coord, north_vector=None, ds=None, field_parameters=None, data_source=None)``
     | A plane normal to a specified vector and intersecting a particular 
       coordinate.
@@ -145,7 +145,7 @@
       ``ds.region(ds.domain_center, ds.domain_left_edge, ds.domain_right_edge)``.
 
 **Box Region** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTRegionBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTRegion`
     | Usage: ``region(center, left_edge, right_edge, fields=None, ds=None, field_parameters=None, data_source=None)``
     | Alternatively: ``box(left_edge, right_edge, fields=None, ds=None, field_parameters=None, data_source=None)``
     | A box-like region aligned with the grid axis orientation.  It is 
@@ -156,14 +156,14 @@
       is assumed to be the midpoint between the left and right edges.
 
 **Disk/Cylinder** 
-    | Class: :class:`~yt.data_objects.selection_data_containers.YTDiskBase`
+    | Class: :class:`~yt.data_objects.selection_data_containers.YTDisk`
     | Usage: ``disk(center, normal, radius, height, fields=None, ds=None, field_parameters=None, data_source=None)``
     | A cylinder defined by a point at the center of one of the circular bases,
       a normal vector to it defining the orientation of the length of the
       cylinder, and radius and height values for the cylinder's dimensions.
 
 **Ellipsoid** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTEllipsoidBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTEllipsoid`
     | Usage: ``ellipsoid(center, semi_major_axis_length, semi_medium_axis_length, semi_minor_axis_length, semi_major_vector, tilt, fields=None, ds=None, field_parameters=None, data_source=None)``
     | An ellipsoid with axis magnitudes set by semi_major_axis_length, 
      semi_medium_axis_length, and semi_minor_axis_length.  semi_major_vector 
@@ -171,7 +171,7 @@
      of the semi-medium and semi_minor axes.
 
 **Sphere** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTSphereBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTSphere`
     | Usage: ``sphere(center, radius, ds=None, field_parameters=None, data_source=None)``
     | A sphere defined by a central coordinate and a radius.
 
@@ -194,7 +194,7 @@
     | See :ref:`boolean_data_objects`.
 
 **Filter** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTCutRegionBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTCutRegion`
     | Usage: ``cut_region(base_object, conditionals, ds=None, field_parameters=None)``
     | A ``cut_region`` is a filter which can be applied to any other data 
       object.  The filter is defined by the conditionals present, which 
@@ -203,7 +203,7 @@
       For more detailed information and examples, see :ref:`cut-regions`.
 
 **Collection of Data Objects** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTDataCollectionBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTDataCollection`
     | Usage: ``data_collection(center, obj_list, ds=None, field_parameters=None)``
     | A ``data_collection`` is a list of data objects that can be 
       sampled and processed as a whole in a single data object.
@@ -214,13 +214,13 @@
 ^^^^^^^^^^^^^^^^^^^^
 
 **Fixed-Resolution Region** 
-    | Class :class:`~yt.data_objects.construction_data_containers.YTCoveringGridBase`
+    | Class :class:`~yt.data_objects.construction_data_containers.YTCoveringGrid`
     | Usage: ``covering_grid(level, left_edge, dimensions, fields=None, ds=None, num_ghost_zones=0, use_pbar=True, field_parameters=None)``
     | A 3D region with all data extracted to a single, specified resolution.
       See :ref:`examining-grid-data-in-a-fixed-resolution-array`.
 
 **Fixed-Resolution Region with Smoothing** 
-    | Class :class:`~yt.data_objects.construction_data_containers.YTSmoothedCoveringGridBase`
+    | Class :class:`~yt.data_objects.construction_data_containers.YTSmoothedCoveringGrid`
     | Usage: ``smoothed_covering_grid(level, left_edge, dimensions, fields=None, ds=None, num_ghost_zones=0, use_pbar=True, field_parameters=None)``
     | A 3D region with all data extracted and interpolated to a single, 
       specified resolution.  Identical to covering_grid, except that it 
@@ -228,7 +228,7 @@
       :ref:`examining-grid-data-in-a-fixed-resolution-array`.
 
 **Fixed-Resolution Region for Particle Deposition** 
-    | Class :class:`~yt.data_objects.construction_data_containers.YTArbitraryGridBase`
+    | Class :class:`~yt.data_objects.construction_data_containers.YTArbitraryGrid`
     | Usage: ``arbitrary_grid(left_edge, right_edge, dimensions, ds=None, field_parameters=None)``
     | When particles are deposited on to mesh fields, they use the existing
       mesh structure, but this may have too much or too little resolution
@@ -238,7 +238,7 @@
       information.
 
 **Projection** 
-    | Class :class:`~yt.data_objects.construction_data_containers.YTQuadTreeProjBase`
+    | Class :class:`~yt.data_objects.construction_data_containers.YTQuadTreeProj`
     | Usage: ``proj(field, axis, weight_field=None, center=None, ds=None, data_source=None, method="integrate", field_parameters=None)``
     | A 2D projection of a 3D volume along one of the axis directions.  
       By default, this is a line integral through the entire simulation volume 
@@ -248,14 +248,14 @@
       of the projection outcome.  See :ref:`projection-types` for more information.
 
 **Streamline** 
-    | Class :class:`~yt.data_objects.construction_data_containers.YTStreamlineBase`
+    | Class :class:`~yt.data_objects.construction_data_containers.YTStreamline`
     | Usage: ``streamline(coord_list, length, fields=None, ds=None, field_parameters=None)``
     | A ``streamline`` can be traced out by identifying a starting coordinate (or 
       list of coordinates) and allowing it to trace a vector field, like gas
       velocity.  See :ref:`streamlines` for more information.
 
 **Surface** 
-    | Class :class:`~yt.data_objects.construction_data_containers.YTSurfaceBase`
+    | Class :class:`~yt.data_objects.construction_data_containers.YTSurface`
     | Usage: ``surface(data_source, field, field_value)``
     | The surface defined by all an isocontour in any mesh field.  An existing 
       data object must be provided as the source, as well as a mesh field
@@ -358,7 +358,7 @@
 holdover from the time when yt was used exclusively for data that came in
 regularly structured grid patches, and does not necessarily work as well for
 data that is composed of discrete objects like particles.  To augment this, the
-:class:`~yt.data_objects.construction_data_containers.YTArbitraryGridBase` object 
+:class:`~yt.data_objects.construction_data_containers.YTArbitraryGrid` object 
 was created, which enables construction of meshes (onto which particles can be
 deposited or smoothed) in arbitrary regions.  This eliminates any assumptions
 on yt's part about how the data is organized, and will allow for more
@@ -444,7 +444,7 @@
 set of level sets.  The second (``connected_sets``) will be a dict of dicts.
 The key for the first (outer) dict is the level of the contour, corresponding
 to ``contour_values``.  The inner dict returned is keyed by the contour ID.  It
-contains :class:`~yt.data_objects.selection_data_containers.YTCutRegionBase`
+contains :class:`~yt.data_objects.selection_data_containers.YTCutRegion`
 objects.  These can be queried just as any other data object.  The clump finder 
 (:ref:`clump_finding`) differs from the above method in that the contour 
 identification is performed recursively within each individual structure, and 

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -302,18 +302,19 @@
 .. code-block:: bash
 
    $ cd $YT_HG
-   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store frontends.tipsy
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store --answer-name=local-tipsy frontends.tipsy
 
 This command will create a set of local answers from the tipsy frontend tests
 and store them in ``$HOME/Documents/test`` (this can but does not have to be the
 same directory as the ``test_data_dir`` configuration variable defined in your
-``.yt/config`` file). To run the tipsy frontend's answer tests using a different
-yt changeset, update to that changeset, recompile if necessary, and run the
-tests using the following command:
+``.yt/config`` file) in a file named ``local-tipsy``. To run the tipsy
+frontend's answer tests using a different yt changeset, update to that
+changeset, recompile if necessary, and run the tests using the following
+command:
 
 .. code-block:: bash
 
-   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test frontends.tipsy
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-name=local-tipsy frontends.tipsy
 
 The results from a nose testing session are pretty straightforward to
 understand, the results for each test are printed directly to STDOUT.  If a test

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 doc/source/faq/index.rst
--- a/doc/source/faq/index.rst
+++ b/doc/source/faq/index.rst
@@ -329,8 +329,8 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Using the Ray objects 
-(:class:`~yt.data_objects.selection_data_containers.YTOrthoRayBase` and 
-:class:`~yt.data_objects.selection_data_containers.YTRayBase`) with AMR data 
+(:class:`~yt.data_objects.selection_data_containers.YTOrthoRay` and 
+:class:`~yt.data_objects.selection_data_containers.YTRay`) with AMR data 
 gives non-contiguous cell information in the Ray's data array. The 
 higher-resolution cells are appended to the end of the array.  Unfortunately, 
 due to how data is loaded by chunks for data containers, there is really no 

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -87,17 +87,17 @@
 .. autosummary::
    :toctree: generated/
 
-   ~yt.data_objects.selection_data_containers.YTPointBase
-   ~yt.data_objects.selection_data_containers.YTOrthoRayBase
-   ~yt.data_objects.selection_data_containers.YTRayBase
-   ~yt.data_objects.selection_data_containers.YTSliceBase
-   ~yt.data_objects.selection_data_containers.YTCuttingPlaneBase
-   ~yt.data_objects.selection_data_containers.YTDiskBase
-   ~yt.data_objects.selection_data_containers.YTRegionBase
-   ~yt.data_objects.selection_data_containers.YTDataCollectionBase
-   ~yt.data_objects.selection_data_containers.YTSphereBase
-   ~yt.data_objects.selection_data_containers.YTEllipsoidBase
-   ~yt.data_objects.selection_data_containers.YTCutRegionBase
+   ~yt.data_objects.selection_data_containers.YTPoint
+   ~yt.data_objects.selection_data_containers.YTOrthoRay
+   ~yt.data_objects.selection_data_containers.YTRay
+   ~yt.data_objects.selection_data_containers.YTSlice
+   ~yt.data_objects.selection_data_containers.YTCuttingPlane
+   ~yt.data_objects.selection_data_containers.YTDisk
+   ~yt.data_objects.selection_data_containers.YTRegion
+   ~yt.data_objects.selection_data_containers.YTDataCollection
+   ~yt.data_objects.selection_data_containers.YTSphere
+   ~yt.data_objects.selection_data_containers.YTEllipsoid
+   ~yt.data_objects.selection_data_containers.YTCutRegion
    ~yt.data_objects.grid_patch.AMRGridPatch
 
 Construction Objects
@@ -110,12 +110,12 @@
 .. autosummary::
    :toctree: generated/
 
-   ~yt.data_objects.construction_data_containers.YTStreamlineBase
-   ~yt.data_objects.construction_data_containers.YTQuadTreeProjBase
-   ~yt.data_objects.construction_data_containers.YTCoveringGridBase
-   ~yt.data_objects.construction_data_containers.YTArbitraryGridBase
-   ~yt.data_objects.construction_data_containers.YTSmoothedCoveringGridBase
-   ~yt.data_objects.construction_data_containers.YTSurfaceBase
+   ~yt.data_objects.construction_data_containers.YTStreamline
+   ~yt.data_objects.construction_data_containers.YTQuadTreeProj
+   ~yt.data_objects.construction_data_containers.YTCoveringGrid
+   ~yt.data_objects.construction_data_containers.YTArbitraryGrid
+   ~yt.data_objects.construction_data_containers.YTSmoothedCoveringGrid
+   ~yt.data_objects.construction_data_containers.YTSurface
 
 Time Series Objects
 ^^^^^^^^^^^^^^^^^^^
@@ -211,8 +211,6 @@
    ~yt.frontends.boxlib.data_structures.OrionDataset
    ~yt.frontends.boxlib.fields.BoxlibFieldInfo
    ~yt.frontends.boxlib.io.IOHandlerBoxlib
-   ~yt.frontends.boxlib.io.IOHandlerCastro
-   ~yt.frontends.boxlib.io.IOHandlerNyx
    ~yt.frontends.boxlib.io.IOHandlerOrion
 
 Chombo

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 doc/source/visualizing/callbacks.rst
--- a/doc/source/visualizing/callbacks.rst
+++ b/doc/source/visualizing/callbacks.rst
@@ -677,9 +677,13 @@
    (This is a proxy for
    :class:`~yt.visualization.plot_modifications.RayCallback`.)
 
-    Adds a line representing the projected path of a ray across the plot.
-    The ray can be either a YTOrthoRayBase, YTRayBase, or a LightRay object.
-    annotate_ray() will properly account for periodic rays across the volume.
+    Adds a line representing the projected path of a ray across the plot.  The
+    ray can be either a
+    :class:`~yt.data_objects.selection_data_containers.YTOrthoRay`,
+    :class:`~yt.data_objects.selection_data_contaners.YTRay`, or a
+    :class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`
+    object.  annotate_ray() will properly account for periodic rays across the
+    volume.
 
 .. python-script::
 

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 doc/source/visualizing/manual_plotting.rst
--- a/doc/source/visualizing/manual_plotting.rst
+++ b/doc/source/visualizing/manual_plotting.rst
@@ -125,7 +125,7 @@
 This is perhaps the simplest thing to do. yt provides a number of one
 dimensional objects, and these return a 1-D numpy array of their contents with
 direct dictionary access. As a simple example, take a
-:class:`~yt.data_objects.selection_data_containers.YTOrthoRayBase` object, which can be
+:class:`~yt.data_objects.selection_data_containers.YTOrthoRay` object, which can be
 created from a index by calling ``pf.ortho_ray(axis, center)``.
 
 .. python-script::

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -192,7 +192,7 @@
 
 Off axis slice plots can be generated in much the same way as
 grid-aligned slices.  Off axis slices use
-:class:`~yt.data_objects.selection_data_containers.YTCuttingPlaneBase` to slice
+:class:`~yt.data_objects.selection_data_containers.YTCuttingPlane` to slice
 through simulation domains at an arbitrary oblique angle.  A
 :class:`~yt.visualization.plot_window.OffAxisSlicePlot` can be
 instantiated by specifying a dataset, the normal to the cutting
@@ -670,7 +670,7 @@
    plot = yt.ProfilePlot(my_galaxy, "density", ["temperature"])
    plot.save()
 
-This will create a :class:`~yt.data_objects.selection_data_containers.YTDiskBase`
+This will create a :class:`~yt.data_objects.selection_data_containers.YTDisk`
 centered at [0.5, 0.5, 0.5], with a normal vector of [0.0, 0.0, 1.0], radius of
 10 kiloparsecs and height of 3 kiloparsecs and will then make a plot of the
 mass-weighted average temperature as a function of density for all of the gas

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 doc/source/visualizing/sketchfab.rst
--- a/doc/source/visualizing/sketchfab.rst
+++ b/doc/source/visualizing/sketchfab.rst
@@ -47,7 +47,7 @@
 both of these operations will run in parallel.  For more information on enabling
 parallelism in yt, see :ref:`parallel-computation`.
 
-Alternatively, you can make an object called ``YTSurfaceBase`` that makes
+Alternatively, you can make an object called ``YTSurface`` that makes
 this process much easier.  You can create one of these objects by specifying a
 source data object and a field over which to identify a surface at a given
 value.  For example:
@@ -101,7 +101,7 @@
 discuss morphological properties of a dataset with collaborators.  It's also
 just plain cool.
 
-The ``YTSurfaceBase`` object includes a method to upload directly to Sketchfab,
+The ``YTSurface`` object includes a method to upload directly to Sketchfab,
 but it requires that you get an API key first.  You can get this API key by
 creating an account and then going to your "dashboard," where it will be listed
 on the right hand side.  Once you've obtained it, put it into your

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 doc/source/visualizing/streamlines.rst
--- a/doc/source/visualizing/streamlines.rst
+++ b/doc/source/visualizing/streamlines.rst
@@ -19,7 +19,7 @@
 returned a set of 3D positions that can, in turn, be used to visualize
 the 3D path of the streamlines.  Additionally, individual streamlines
 can be converted into
-:class:`~yt.data_objects.construction_data_containers.YTStreamlineBase` objects,
+:class:`~yt.data_objects.construction_data_containers.YTStreamline` objects,
 and queried for all the available fields along the streamline.
 
 The implementation of streamlining  in yt is described below.
@@ -100,7 +100,7 @@
     let us know on the yt-dev mailing list.
 
 Once the streamlines are found, a
-:class:`~yt.data_objects.construction_data_containers.YTStreamlineBase` object can
+:class:`~yt.data_objects.construction_data_containers.YTStreamline` object can
 be created using the
 :meth:`~yt.visualization.streamlines.Streamlines.path` function, which
 takes as input the index of the streamline requested. This conversion

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 doc/source/yt3differences.rst
--- a/doc/source/yt3differences.rst
+++ b/doc/source/yt3differences.rst
@@ -295,7 +295,7 @@
 Previously, projections were inconsistent with the other data objects.
 (The API for Plot Windows is the same.)  The argument order is now ``field``
 then ``axis`` as seen here: 
-:class:`~yt.data_objects.construction_data_containers.YTQuadTreeProjBase`.
+:class:`~yt.data_objects.construction_data_containers.YTQuadTreeProj`.
 
 Field Parameters
 ^^^^^^^^^^^^^^^^

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -299,7 +299,7 @@
 
         Returns
         -------
-        sphere : `yt.data_objects.api.YTSphereBase`
+        sphere : `yt.data_objects.api.YTSphere`
             The empty data source.
 
         Examples
@@ -668,7 +668,7 @@
 
         Returns
         -------
-        ellipsoid : `yt.data_objects.data_containers.YTEllipsoidBase`
+        ellipsoid : `yt.data_objects.data_containers.YTEllipsoid`
             The ellipsoidal data object.
 
         Examples
@@ -861,7 +861,7 @@
 
         Returns
         -------
-        ellipsoid : `yt.data_objects.data_containers.YTEllipsoidBase`
+        ellipsoid : `yt.data_objects.data_containers.YTEllipsoid`
             The ellipsoidal data object.
 
         Examples
@@ -890,7 +890,7 @@
 
         Returns
         -------
-        sphere : `yt.data_objects.api.YTSphereBase`
+        sphere : `yt.data_objects.api.YTSphere`
             The empty data source.
 
         Examples

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -536,6 +536,21 @@
         D_A0 = self.parameters["FiducialAngularDiameterDistance"]
         scale_factor = 1.0
 
+        # If we use an RMF, figure out where the response matrix actually is.
+        if "RMF" in parameters:
+            rmf = _astropy.pyfits.open(parameters["RMF"])
+            if "MATRIX" in rmf:
+                mat_key = "MATRIX"
+            elif "SPECRESP MATRIX" in rmf:
+                mat_key = "SPECRESP MATRIX"
+            else:
+                raise RuntimeError("Cannot find the response matrix in the RMF "
+                                   "file %s! " % parameters["RMF"]+"It should "
+                                   "be named \"MATRIX\" or \"SPECRESP MATRIX\".")
+            rmf.close()
+        else:
+            mat_key = None
+
         if (exp_time_new is None and area_new is None and
             redshift_new is None and dist_new is None):
             my_n_obs = n_ph_tot
@@ -552,18 +567,17 @@
                 if comm.rank == 0:
                     mylog.info("Using energy-dependent effective area: %s" % (parameters["ARF"]))
                 f = _astropy.pyfits.open(area_new)
-                elo = f["SPECRESP"].data.field("ENERG_LO")
-                ehi = f["SPECRESP"].data.field("ENERG_HI")
+                earf = 0.5*(f["SPECRESP"].data.field("ENERG_LO")+f["SPECRESP"].data.field("ENERG_HI"))
                 eff_area = np.nan_to_num(f["SPECRESP"].data.field("SPECRESP"))
                 if "RMF" in parameters:
-                    weights = self._normalize_arf(parameters["RMF"])
+                    weights = self._normalize_arf(parameters["RMF"], mat_key)
                     eff_area *= weights
                 else:
                     mylog.warning("You specified an ARF but not an RMF. This is ok if the "+
                                   "responses are normalized properly. If not, you may "+
                                   "get inconsistent results.")
                 f.close()
-                Aratio = eff_area.max()/self.parameters["FiducialArea"]
+                Aratio = eff_area.max()/self.parameters["FiducialArea"].v
             else:
                 mylog.info("Using constant effective area.")
                 Aratio = parse_value(area_new, "cm**2")/self.parameters["FiducialArea"]
@@ -583,7 +597,12 @@
                          (D_A*D_A*(1.+zobs)**3)
             fak = Aratio*Tratio*Dratio
             if fak > 1:
-                raise ValueError("Spectrum scaling factor = %g, cannot be greater than unity." % fak)
+                raise ValueError("This combination of requested parameters results in "
+                                 "%g%% more photons collected than are " % (100.*(fak-1.)) +
+                                 "available in the sample. Please reduce the collecting "
+                                 "area, exposure time, or increase the distance/redshift "
+                                 "of the object. Alternatively, generate a larger sample "
+                                 "of photons.")
             my_n_obs = np.uint64(n_ph_tot*fak)
 
         n_obs_all = comm.mpi_allreduce(my_n_obs)
@@ -652,7 +671,6 @@
             detected = np.ones(eobs.shape, dtype='bool')
         else:
             mylog.info("Applying energy-dependent effective area.")
-            earf = 0.5*(elo+ehi)
             earea = np.interp(eobs, earf, eff_area, left=0.0, right=0.0)
             randvec = eff_area.max()*np.random.random(eobs.shape)
             detected = randvec < earea
@@ -676,11 +694,13 @@
 
         num_events = len(events["xpix"])
 
-        if comm.rank == 0: mylog.info("Total number of observed photons: %d" % num_events)
+        if comm.rank == 0:
+            mylog.info("Total number of observed photons: %d" % num_events)
 
         if "RMF" in parameters and convolve_energies:
-            events, info = self._convolve_with_rmf(parameters["RMF"], events)
-            for k, v in info.items(): parameters[k] = v
+            events, info = self._convolve_with_rmf(parameters["RMF"], events, mat_key)
+            for k, v in info.items():
+                parameters[k] = v
 
         if exp_time_new is None:
             parameters["ExposureTime"] = self.parameters["FiducialExposureTime"]
@@ -698,23 +718,22 @@
 
         return EventList(events, parameters)
 
-    def _normalize_arf(self, respfile):
+    def _normalize_arf(self, respfile, mat_key):
         rmf = _astropy.pyfits.open(respfile)
-        table = rmf["MATRIX"]
+        table = rmf[mat_key]
         weights = np.array([w.sum() for w in table.data["MATRIX"]])
         rmf.close()
         return weights
 
-    def _convolve_with_rmf(self, respfile, events):
+    def _convolve_with_rmf(self, respfile, events, mat_key):
         """
         Convolve the events with a RMF file.
         """
-        mylog.warning("This routine has not been tested to work with all RMFs. YMMV.")
         mylog.info("Reading response matrix file (RMF): %s" % (respfile))
 
         hdulist = _astropy.pyfits.open(respfile)
 
-        tblhdu = hdulist["MATRIX"]
+        tblhdu = hdulist[mat_key]
         n_de = len(tblhdu.data["ENERG_LO"])
         mylog.info("Number of energy bins in RMF: %d" % (n_de))
         mylog.info("Energy limits: %g %g" % (min(tblhdu.data["ENERG_LO"]),
@@ -727,21 +746,19 @@
         eidxs = np.argsort(events["eobs"])
 
         phEE = events["eobs"][eidxs].d
-        phXX = events["xpix"][eidxs]
-        phYY = events["ypix"][eidxs]
 
         detectedChannels = []
 
         # run through all photon energies and find which bin they go in
         k = 0
         fcurr = 0
-        last = len(phEE)-1
+        last = len(phEE)
 
-        pbar = get_pbar("Scattering energies with RMF:", n_de)
+        pbar = get_pbar("Scattering energies with RMF:", last)
 
         for low,high in zip(tblhdu.data["ENERG_LO"],tblhdu.data["ENERG_HI"]):
             # weight function for probabilities from RMF
-            weights = np.nan_to_num(tblhdu.data[k]["MATRIX"][:])
+            weights = np.nan_to_num(np.float64(tblhdu.data[k]["MATRIX"][:]))
             weights /= weights.sum()
             # build channel number list associated to array value,
             # there are groups of channels in rmfs with nonzero probabilities
@@ -764,18 +781,18 @@
                 for q in range(fcurr,last):
                     if phEE[q] >= low and phEE[q] < high:
                         channelInd = np.random.choice(len(weights), p=weights)
-                        fcurr +=1
+                        fcurr += 1
                         detectedChannels.append(trueChannel[channelInd])
                     if phEE[q] >= high:
                         break
-            pbar.update(k)
-            k+=1
+            pbar.update(fcurr)
+            k += 1
         pbar.finish()
 
         dchannel = np.array(detectedChannels)
 
-        events["xpix"] = phXX
-        events["ypix"] = phYY
+        events["xpix"] = events["xpix"][eidxs]
+        events["ypix"] = events["ypix"][eidxs]
         events["eobs"] = YTArray(phEE, "keV")
         events[tblhdu.header["CHANTYPE"]] = dchannel.astype(int)
 
@@ -790,7 +807,6 @@
 class EventList(object) :
 
     def __init__(self, events, parameters):
-
         self.events = events
         self.parameters = parameters
         self.num_events = events["xpix"].shape[0]
@@ -800,9 +816,6 @@
         self.wcs.wcs.cdelt = [-parameters["dtheta"].value, parameters["dtheta"].value]
         self.wcs.wcs.ctype = ["RA---TAN","DEC--TAN"]
         self.wcs.wcs.cunit = ["deg"]*2
-        x,y = self.wcs.wcs_pix2world(self.events["xpix"], self.events["ypix"], 1)
-        self.events["xsky"] = YTArray(x, "degree")
-        self.events["ysky"] = YTArray(y, "degree")
 
     def keys(self):
         return self.events.keys()
@@ -817,11 +830,19 @@
         return self.events.values()
 
     def __getitem__(self,key):
+        if key not in self.events:
+            if key == "xsky" or key == "ysky":
+                x,y = self.wcs.wcs_pix2world(self.events["xpix"], self.events["ypix"], 1)
+                self.events["xsky"] = YTArray(x, "degree")
+                self.events["ysky"] = YTArray(y, "degree")
         return self.events[key]
 
     def __repr__(self):
         return self.events.__repr__()
 
+    def __contains__(self, key):
+        return key in self.events
+
     def __add__(self, other):
         assert_same_wcs(self.wcs, other.wcs)
         keys1 = list(self.parameters.keys())
@@ -860,11 +881,11 @@
             reg = pyregion.parse(region)
         r = reg.as_imagecoord(header=self.wcs.to_header())
         f = r.get_filter()
-        idxs = f.inside_x_y(self.events["xpix"], self.events["ypix"])
+        idxs = f.inside_x_y(self["xpix"], self["ypix"])
         if idxs.sum() == 0:
             raise RuntimeError("No events are inside this region!")
         new_events = {}
-        for k, v in self.events.items():
+        for k, v in self.items():
             new_events[k] = v[idxs]
         return EventList(new_events, self.parameters)
 
@@ -964,28 +985,46 @@
         Set *clobber* to True if you need to overwrite a previous file.
         """
         pyfits = _astropy.pyfits
+        Time = _astropy.time.Time
+        TimeDelta = _astropy.time.TimeDelta
+
+        exp_time = float(self.parameters["ExposureTime"])
+
+        t_begin = Time.now()
+        dt = TimeDelta(exp_time, format='sec')
+        t_end = t_begin + dt
 
         cols = []
 
-        col1 = pyfits.Column(name='ENERGY', format='E', unit='eV',
-                             array=self.events["eobs"].in_units("eV").d)
-        col2 = pyfits.Column(name='X', format='D', unit='pixel',
-                             array=self.events["xpix"])
-        col3 = pyfits.Column(name='Y', format='D', unit='pixel',
-                             array=self.events["ypix"])
+        col_e = pyfits.Column(name='ENERGY', format='E', unit='eV',
+                              array=self["eobs"].in_units("eV").d)
+        col_x = pyfits.Column(name='X', format='D', unit='pixel',
+                              array=self["xpix"])
+        col_y = pyfits.Column(name='Y', format='D', unit='pixel',
+                              array=self["ypix"])
 
-        cols = [col1, col2, col3]
+        cols = [col_e, col_x, col_y]
 
         if "ChannelType" in self.parameters:
              chantype = self.parameters["ChannelType"]
              if chantype == "PHA":
-                  cunit="adu"
+                  cunit = "adu"
              elif chantype == "PI":
-                  cunit="Chan"
-             col4 = pyfits.Column(name=chantype.upper(), format='1J',
-                                  unit=cunit, array=self.events[chantype])
-             cols.append(col4)
+                  cunit = "Chan"
+             col_ch = pyfits.Column(name=chantype.upper(), format='1J',
+                                    unit=cunit, array=self.events[chantype])
+             cols.append(col_ch)
 
+             mylog.info("Generating times for events assuming uniform time "
+                        "distribution. In future versions this will be made "
+                        "more general.")
+
+             time = np.random.uniform(size=self.num_events, low=0.0,
+                                      high=float(self.parameters["ExposureTime"]))
+             col_t = pyfits.Column(name="TIME", format='1D', unit='s', 
+                                   array=time)
+             cols.append(col_t)
+        
         coldefs = pyfits.ColDefs(cols)
         tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
         tbhdu.update_ext_name("EVENTS")
@@ -1006,7 +1045,9 @@
         tbhdu.header["TLMIN3"] = 0.5
         tbhdu.header["TLMAX2"] = 2.*self.parameters["pix_center"][0]-0.5
         tbhdu.header["TLMAX3"] = 2.*self.parameters["pix_center"][1]-0.5
-        tbhdu.header["EXPOSURE"] = float(self.parameters["ExposureTime"])
+        tbhdu.header["EXPOSURE"] = exp_time
+        tbhdu.header["TSTART"] = 0.0
+        tbhdu.header["TSTOP"] = exp_time
         if isinstance(self.parameters["Area"], string_types):
             tbhdu.header["AREA"] = self.parameters["Area"]
         else:
@@ -1016,8 +1057,18 @@
         tbhdu.header["HDUVERS"] = "1.1.0"
         tbhdu.header["RADECSYS"] = "FK5"
         tbhdu.header["EQUINOX"] = 2000.0
+        tbhdu.header["HDUCLASS"] = "OGIP"
+        tbhdu.header["HDUCLAS1"] = "EVENTS"
+        tbhdu.header["HDUCLAS2"] = "ACCEPTED"
+        tbhdu.header["DATE"] = t_begin.tt.isot
+        tbhdu.header["DATE-OBS"] = t_begin.tt.isot
+        tbhdu.header["DATE-END"] = t_end.tt.isot
         if "RMF" in self.parameters:
             tbhdu.header["RESPFILE"] = self.parameters["RMF"]
+            f = pyfits.open(self.parameters["RMF"])
+            nchan = int(f["EBOUNDS"].header["DETCHANS"])
+            tbhdu.header["PHA_BINS"] = nchan
+            f.close()
         if "ARF" in self.parameters:
             tbhdu.header["ANCRFILE"] = self.parameters["ARF"]
         if "ChannelType" in self.parameters:
@@ -1029,7 +1080,30 @@
         if "Instrument" in self.parameters:
             tbhdu.header["INSTRUME"] = self.parameters["Instrument"]
 
-        tbhdu.writeto(fitsfile, clobber=clobber)
+        hdulist = [pyfits.PrimaryHDU(), tbhdu]
+
+        if "ChannelType" in self.parameters:
+            start = pyfits.Column(name='START', format='1D', unit='s',
+                                  array=np.array([0.0]))
+            stop = pyfits.Column(name='STOP', format='1D', unit='s',
+                                 array=np.array([exp_time]))
+
+            tbhdu_gti = pyfits.BinTableHDU.from_columns([start,stop])
+            tbhdu_gti.update_ext_name("STDGTI")
+            tbhdu_gti.header["TSTART"] = 0.0
+            tbhdu_gti.header["TSTOP"] = exp_time
+            tbhdu_gti.header["HDUCLASS"] = "OGIP"
+            tbhdu_gti.header["HDUCLAS1"] = "GTI"
+            tbhdu_gti.header["HDUCLAS2"] = "STANDARD"
+            tbhdu_gti.header["RADECSYS"] = "FK5"
+            tbhdu_gti.header["EQUINOX"] = 2000.0
+            tbhdu_gti.header["DATE"] = t_begin.tt.isot
+            tbhdu_gti.header["DATE-OBS"] = t_begin.tt.isot
+            tbhdu_gti.header["DATE-END"] = t_end.tt.isot
+
+            hdulist.append(tbhdu_gti)
+
+        pyfits.HDUList(hdulist).writeto(fitsfile, clobber=clobber)
 
     @parallel_root_only
     def write_simput_file(self, prefix, clobber=False, emin=None, emax=None):
@@ -1055,21 +1129,17 @@
              raise TypeError("Writing SIMPUT files is only supported if you didn't convolve with responses.")
 
         if emin is None:
-            emin = self.events["eobs"].min().value
+            emin = self["eobs"].min().value
         if emax is None:
-            emax = self.events["eobs"].max().value
+            emax = self["eobs"].max().value
 
-        idxs = np.logical_and(self.events["eobs"].d >= emin,
-                              self.events["eobs"].d <= emax)
-        flux = np.sum(self.events["eobs"][idxs].in_units("erg")) / \
+        idxs = np.logical_and(self["eobs"].d >= emin, self["eobs"].d <= emax)
+        flux = np.sum(self["eobs"][idxs].in_units("erg")) / \
                self.parameters["ExposureTime"]/self.parameters["Area"]
 
-        col1 = pyfits.Column(name='ENERGY', format='E',
-                             array=self["eobs"].d)
-        col2 = pyfits.Column(name='DEC', format='D',
-                             array=self["ysky"].d)
-        col3 = pyfits.Column(name='RA', format='D',
-                             array=self["xsky"].d)
+        col1 = pyfits.Column(name='ENERGY', format='E', array=self["eobs"].d)
+        col2 = pyfits.Column(name='DEC', format='D', array=self["ysky"].d)
+        col3 = pyfits.Column(name='RA', format='D', array=self["xsky"].d)
 
         coldefs = pyfits.ColDefs([col1, col2, col3])
 
@@ -1091,8 +1161,8 @@
         tbhdu.writeto(phfile, clobber=clobber)
 
         col1 = pyfits.Column(name='SRC_ID', format='J', array=np.array([1]).astype("int32"))
-        col2 = pyfits.Column(name='RA', format='D', array=np.array([float(self.parameters["sky_center"][0])]))
-        col3 = pyfits.Column(name='DEC', format='D', array=np.array([float(self.parameters["sky_center"][1])]))
+        col2 = pyfits.Column(name='RA', format='D', array=np.array([0.0]))
+        col3 = pyfits.Column(name='DEC', format='D', array=np.array([0.0]))
         col4 = pyfits.Column(name='E_MIN', format='D', array=np.array([float(emin)]))
         col5 = pyfits.Column(name='E_MAX', format='D', array=np.array([float(emax)]))
         col6 = pyfits.Column(name='FLUX', format='D', array=np.array([flux.value]))
@@ -1147,15 +1217,15 @@
         if "Instrument" in self.parameters:
             f.create_dataset("/instrument", data=self.parameters["Instrument"])
 
-        f.create_dataset("/xpix", data=self.events["xpix"])
-        f.create_dataset("/ypix", data=self.events["ypix"])
-        f.create_dataset("/xsky", data=self.events["xsky"].d)
-        f.create_dataset("/ysky", data=self.events["ysky"].d)
-        f.create_dataset("/eobs", data=self.events["eobs"].d)
-        if "PI" in self.events:
-            f.create_dataset("/pi", data=self.events["PI"])
-        if "PHA" in self.events:
-            f.create_dataset("/pha", data=self.events["PHA"])                  
+        f.create_dataset("/xpix", data=self["xpix"])
+        f.create_dataset("/ypix", data=self["ypix"])
+        f.create_dataset("/xsky", data=self["xsky"].d)
+        f.create_dataset("/ysky", data=self["ysky"].d)
+        f.create_dataset("/eobs", data=self["eobs"].d)
+        if "PI" in self:
+            f.create_dataset("/pi", data=self["PI"])
+        if "PHA" in self:
+            f.create_dataset("/pha", data=self["PHA"])                  
         f.create_dataset("/sky_center", data=self.parameters["sky_center"].d)
         f.create_dataset("/pix_center", data=self.parameters["pix_center"])
         f.create_dataset("/dtheta", data=float(self.parameters["dtheta"]))
@@ -1181,13 +1251,13 @@
             The maximum energy of the photons to put in the image, in keV.
         """
         if emin is None:
-            mask_emin = np.ones((self.num_events), dtype='bool')
+            mask_emin = np.ones(self.num_events, dtype='bool')
         else:
-            mask_emin = self.events["eobs"].d > emin
+            mask_emin = self["eobs"].d > emin
         if emax is None:
-            mask_emax = np.ones((self.num_events), dtype='bool')
+            mask_emax = np.ones(self.num_events, dtype='bool')
         else:
-            mask_emax = self.events["eobs"].d < emax
+            mask_emax = self["eobs"].d < emax
 
         mask = np.logical_and(mask_emin, mask_emax)
 
@@ -1197,8 +1267,8 @@
         xbins = np.linspace(0.5, float(nx)+0.5, nx+1, endpoint=True)
         ybins = np.linspace(0.5, float(ny)+0.5, ny+1, endpoint=True)
 
-        H, xedges, yedges = np.histogram2d(self.events["xpix"][mask],
-                                           self.events["ypix"][mask],
+        H, xedges, yedges = np.histogram2d(self["xpix"][mask],
+                                           self["ypix"][mask],
                                            bins=[xbins,ybins])
 
         hdu = _astropy.pyfits.PrimaryHDU(H.T)
@@ -1252,21 +1322,27 @@
         if bin_type == "channel" and "ChannelType" in self.parameters:
             spectype = self.parameters["ChannelType"]
             f = pyfits.open(self.parameters["RMF"])
-            nchan = int(f[1].header["DETCHANS"])
-            try:
-                cmin = int(f[1].header["TLMIN4"])
-            except KeyError:
+            nchan = int(f["EBOUNDS"].header["DETCHANS"])
+            num = 0
+            for i in range(1,len(f["EBOUNDS"].columns)+1):
+                if f["EBOUNDS"].header["TTYPE%d" % i] == "CHANNEL":
+                    num = i
+                    break
+            if num > 0:
+                tlmin = "TLMIN%d" % num
+                cmin = int(f["EBOUNDS"].header[tlmin])
+            else:
                 mylog.warning("Cannot determine minimum allowed value for channel. " +
                               "Setting to 0, which may be wrong.")
-                cmin = int(0)
+                cmin = 0
             f.close()
             minlength = nchan
             if cmin == 1: minlength += 1
-            spec = np.bincount(self.events[spectype],minlength=minlength)
+            spec = np.bincount(self[spectype],minlength=minlength)
             if cmin == 1: spec = spec[1:]
             bins = (np.arange(nchan)+cmin).astype("int32")
         else:
-            espec = self.events["eobs"].d
+            espec = self["eobs"].d
             erange = (emin, emax)
             spec, ee = np.histogram(espec, bins=nchan, range=erange)
             if bin_type == "energy":
@@ -1283,7 +1359,7 @@
 
         coldefs = pyfits.ColDefs([col1, col2, col3, col4])
 
-        tbhdu = pyfits.new_table(coldefs)
+        tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
         tbhdu.update_ext_name("SPECTRUM")
 
         tbhdu.header["DETCHANS"] = spec.shape[0]

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 yt/analysis_modules/photon_simulator/setup.py
--- a/yt/analysis_modules/photon_simulator/setup.py
+++ b/yt/analysis_modules/photon_simulator/setup.py
@@ -4,6 +4,8 @@
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('photon_simulator', parent_package, top_path)
+    config.add_extension("utils",
+                         ["yt/analysis_modules/photon_simulator/utils.pyx"])
     config.add_subpackage("tests")
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -17,8 +17,10 @@
 
 from yt.funcs import mylog
 from yt.units.yt_array import YTArray, YTQuantity
-from yt.utilities.on_demand_imports import _astropy, _scipy
-from yt.utilities.physical_constants import hcgs, clight, erg_per_keV, amu_cgs
+from yt.utilities.on_demand_imports import _astropy
+from yt.utilities.physical_constants import hcgs, clight
+from yt.utilities.physical_ratios import erg_per_keV, amu_grams
+from yt.analysis_modules.photon_simulator.utils import broaden_lines
 
 hc = (hcgs*clight).in_units("keV*angstrom").v
 cl = clight.v
@@ -30,7 +32,7 @@
         self.emin = YTQuantity(emin, "keV")
         self.emax = YTQuantity(emax, "keV")
         self.nchan = nchan
-        self.ebins = np.linspace(self.emin, self.emax, nchan+1)
+        self.ebins = YTArray(np.linspace(self.emin, self.emax, nchan+1), "keV")
         self.de = np.diff(self.ebins)
         self.emid = 0.5*(self.ebins[1:]+self.ebins[:-1])
 
@@ -63,7 +65,7 @@
     --------
     >>> mekal_model = XSpecThermalModel("mekal", 0.05, 50.0, 1000)
     """
-    def __init__(self, model_name, emin, emax, nchan, 
+    def __init__(self, model_name, emin, emax, nchan,
                  thermal_broad=False, settings=None):
         self.model_name = model_name
         self.thermal_broad = thermal_broad
@@ -133,7 +135,7 @@
     --------
     >>> abs_model = XSpecAbsorbModel("wabs", 0.1)
     """
-    def __init__(self, model_name, nH, emin=0.01, emax=50.0, 
+    def __init__(self, model_name, nH, emin=0.01, emax=50.0,
                  nchan=100000, settings=None):
         self.model_name = model_name
         self.nH = nH
@@ -191,8 +193,8 @@
 
     Examples
     --------
-    >>> apec_model = TableApecModel("/Users/jzuhone/Data/atomdb_v2.0.2/", 0.05, 50.0,
-    ...                             1000, thermal_broad=True)
+    >>> apec_model = TableApecModel("$SPECTRAL_DATA/spectral/", 0.05, 50.0,
+    ...                             1000, apec_vers="3.0", thermal_broad=True)
     """
     def __init__(self, apec_root, emin, emax, nchan,
                  apec_vers="2.0.2", thermal_broad=False):
@@ -202,7 +204,7 @@
                                      self.apec_prefix+"_coco.fits")
         self.linefile = os.path.join(self.apec_root,
                                      self.apec_prefix+"_line.fits")
-        super(TableApecModel, self).__init__(emin, emax, nchan)  
+        super(TableApecModel, self).__init__(emin, emax, nchan)
         self.wvbins = hc/self.ebins[::-1].d
         # H, He, and trace elements
         self.cosmic_elem = [1,2,3,4,5,9,11,15,17,19,21,22,23,24,25,27,29,30]
@@ -237,47 +239,47 @@
         self.maxlam = self.wvbins.max()
         self.scale_factor = 1.0/(1.+zobs)
 
-    def _make_spectrum(self, element, tindex):
+    def _make_spectrum(self, kT, element, tindex):
 
         tmpspec = np.zeros(self.nchan)
 
-        i = np.where((self.line_handle[tindex].data.field('element') == element) &
-                     (self.line_handle[tindex].data.field('lambda') > self.minlam) &
-                     (self.line_handle[tindex].data.field('lambda') < self.maxlam))[0]
+        line_data = self.line_handle[tindex].data
+        coco_data = self.coco_handle[tindex].data
 
-        vec = np.zeros(self.nchan)
-        E0 = hc/self.line_handle[tindex].data.field('lambda')[i]*self.scale_factor
-        amp = self.line_handle[tindex].data.field('epsilon')[i]
+        i = np.where((line_data.field('element') == element) &
+                     (line_data.field('lambda') > self.minlam) &
+                     (line_data.field('lambda') < self.maxlam))[0]
+
+        E0 = hc/line_data.field('lambda')[i].astype("float64")*self.scale_factor
+        amp = line_data.field('epsilon')[i].astype("float64")
         ebins = self.ebins.d
+        de = self.de.d
+        emid = self.emid.d
         if self.thermal_broad:
-            vec = np.zeros(self.nchan)
-            sigma = E0*np.sqrt(self.Tvals[tindex]*erg_per_keV/(self.A[element]*amu_cgs))/cl
-            for E, sig, a in zip(E0, sigma, amp):
-                cdf = _scipy.stats.norm(E,sig).cdf(ebins)
-                vec += np.diff(cdf)*a
+            sigma = E0*np.sqrt(2.*kT*erg_per_keV/(self.A[element]*amu_grams))/cl
+            vec = broaden_lines(E0, sigma, amp, emid)*de
         else:
-            ie = np.searchsorted(ebins, E0, side='right')-1
-            for i, a in zip(ie, amp): vec[i] += a
+            vec = np.histogram(E0, ebins, weights=amp)[0]
         tmpspec += vec
 
-        ind = np.where((self.coco_handle[tindex].data.field('Z') == element) &
-                       (self.coco_handle[tindex].data.field('rmJ') == 0))[0]
+        ind = np.where((coco_data.field('Z') == element) &
+                       (coco_data.field('rmJ') == 0))[0]
         if len(ind) == 0:
             return tmpspec
         else:
             ind = ind[0]
 
-        n_cont = self.coco_handle[tindex].data.field('N_Cont')[ind]
-        e_cont = self.coco_handle[tindex].data.field('E_Cont')[ind][:n_cont]*self.scale_factor
-        continuum = self.coco_handle[tindex].data.field('Continuum')[ind][:n_cont]
+        n_cont = coco_data.field('N_Cont')[ind]
+        e_cont = coco_data.field('E_Cont')[ind][:n_cont]
+        continuum = coco_data.field('Continuum')[ind][:n_cont]
 
-        tmpspec += np.interp(self.emid.d, e_cont, continuum)*self.de.d
+        tmpspec += np.interp(emid, e_cont*self.scale_factor, continuum)*de/self.scale_factor
 
-        n_pseudo = self.coco_handle[tindex].data.field('N_Pseudo')[ind]
-        e_pseudo = self.coco_handle[tindex].data.field('E_Pseudo')[ind][:n_pseudo]*self.scale_factor
-        pseudo = self.coco_handle[tindex].data.field('Pseudo')[ind][:n_pseudo]
+        n_pseudo = coco_data.field('N_Pseudo')[ind]
+        e_pseudo = coco_data.field('E_Pseudo')[ind][:n_pseudo]
+        pseudo = coco_data.field('Pseudo')[ind][:n_pseudo]
 
-        tmpspec += np.interp(self.emid.d, e_pseudo, pseudo)*self.de.d
+        tmpspec += np.interp(emid, e_pseudo*self.scale_factor, pseudo)*de/self.scale_factor
 
         return tmpspec
 
@@ -295,12 +297,12 @@
         dT = (kT-self.Tvals[tindex])/self.dTvals[tindex]
         # First do H,He, and trace elements
         for elem in self.cosmic_elem:
-            cspec_l += self._make_spectrum(elem, tindex+2)
-            cspec_r += self._make_spectrum(elem, tindex+3)            
+            cspec_l += self._make_spectrum(kT, elem, tindex+2)
+            cspec_r += self._make_spectrum(kT, elem, tindex+3)
         # Next do the metals
         for elem in self.metal_elem:
-            mspec_l += self._make_spectrum(elem, tindex+2)
-            mspec_r += self._make_spectrum(elem, tindex+3)
+            mspec_l += self._make_spectrum(kT, elem, tindex+2)
+            mspec_r += self._make_spectrum(kT, elem, tindex+3)
         cosmic_spec = YTArray(cspec_l*(1.-dT)+cspec_r*dT, "cm**3/s")
         metal_spec = YTArray(mspec_l*(1.-dT)+mspec_r*dT, "cm**3/s")
         return cosmic_spec, metal_spec

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 yt/analysis_modules/photon_simulator/tests/test_spectra.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/tests/test_spectra.py
@@ -0,0 +1,44 @@
+from yt.analysis_modules.photon_simulator.api import \
+    TableApecModel, XSpecThermalModel
+import numpy as np
+from yt.testing import requires_module, fake_random_ds
+from yt.utilities.answer_testing.framework import \
+    GenericArrayTest, data_dir_load
+from yt.config import ytcfg
+
+def setup():
+    ytcfg["yt", "__withintesting"] = "True"
+
+test_data_dir = ytcfg.get("yt", "test_data_dir")
+
+ds = fake_random_ds(64)
+
+ at requires_module("xspec")
+ at requires_module("astropy")
+def test_apec():
+
+    settings = {"APECROOT":test_data_dir+"/xray_data/apec_v2.0.2"}
+    xmod = XSpecThermalModel("apec", 0.1, 10.0, 10000, thermal_broad=True,
+                             settings=settings)
+    xmod.prepare_spectrum(0.2)
+
+    xcspec, xmspec = xmod.get_spectrum(6.0)
+    spec1 = xcspec+0.3*xmspec
+
+    amod = TableApecModel(test_data_dir+"/xray_data", 0.1, 10.0, 
+                          10000, thermal_broad=True)
+    amod.prepare_spectrum(0.2)
+    
+    acspec, amspec = amod.get_spectrum(6.0)
+    spec2 = acspec+0.3*amspec
+
+    def spec1_test():
+        return spec1.v
+    def spec2_test():
+        return spec2.v
+
+    for test in [GenericArrayTest(ds, spec1_test),
+                 GenericArrayTest(ds, spec2_test)]:
+        test_apec.__name__ = test.description
+        yield test
+

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 yt/analysis_modules/photon_simulator/utils.pyx
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/utils.pyx
@@ -0,0 +1,31 @@
+import numpy as np
+cimport numpy as np
+cimport cython
+from libc.math cimport exp
+
+cdef double gfac = 1.0/np.sqrt(np.pi)
+
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def broaden_lines(np.ndarray[np.float64_t, ndim=1] E0,
+                  np.ndarray[np.float64_t, ndim=1] sigma,
+                  np.ndarray[np.float64_t, ndim=1] amp,
+                  np.ndarray[np.float64_t, ndim=1] E):
+
+    cdef int i, j, n
+    cdef double x, isigma, iamp
+    cdef np.ndarray[np.float64_t, ndim=1] lines
+
+    n = E0.shape[0]
+    m = E.shape[0]
+    lines = np.zeros(m)
+
+    for i in range(n):
+        isigma = 1.0/sigma[i]
+        iamp = gfac*amp[i]*isigma
+        for j in range(m):
+            x = (E[j]-E0[i])*isigma
+            lines[j] += iamp*exp(-x*x)
+
+    return lines

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -58,7 +58,7 @@
 from yt.fields.derived_field import \
     TranslationFunc
 
-class YTStreamlineBase(YTSelectionContainer1D):
+class YTStreamline(YTSelectionContainer1D):
     """
     This is a streamline, which is a set of points defined as
     being parallel to some vector field.
@@ -152,7 +152,7 @@
         return mask
 
 
-class YTQuadTreeProjBase(YTSelectionContainer2D):
+class YTQuadTreeProj(YTSelectionContainer2D):
     """
     This is a data object corresponding to a line integral through the
     simulation domain.
@@ -455,7 +455,7 @@
         pw = self._get_pw(fields, center, width, origin, 'Projection')
         return pw
 
-class YTCoveringGridBase(YTSelectionContainer3D):
+class YTCoveringGrid(YTSelectionContainer3D):
     """A 3D region with all data extracted to a single, specified
     resolution.  Left edge should align with a cell boundary, but
     defaults to the closest cell boundary.
@@ -736,7 +736,7 @@
                                sim_time=self.ds.current_time.v)
         write_to_gdf(ds, gdf_path, **kwargs)
 
-class YTArbitraryGridBase(YTCoveringGridBase):
+class YTArbitraryGrid(YTCoveringGrid):
     """A 3D region with arbitrary bounds and dimensions.
 
     In contrast to the Covering Grid, this object accepts a left edge, a right
@@ -806,7 +806,7 @@
     base_dx = None
     dds = None
 
-class YTSmoothedCoveringGridBase(YTCoveringGridBase):
+class YTSmoothedCoveringGrid(YTCoveringGrid):
     """A 3D region with all data extracted and interpolated to a
     single, specified resolution. (Identical to covering_grid,
     except that it interpolates.)
@@ -834,13 +834,13 @@
     """
     _type_name = "smoothed_covering_grid"
     filename = None
-    @wraps(YTCoveringGridBase.__init__)
+    @wraps(YTCoveringGrid.__init__)
     def __init__(self, *args, **kwargs):
-        self._base_dx = (
-              (self.ds.domain_right_edge - self.ds.domain_left_edge) /
-               self.ds.domain_dimensions.astype("float64"))
+        ds = kwargs['ds']
+        self._base_dx = ((ds.domain_right_edge - ds.domain_left_edge) /
+                         ds.domain_dimensions.astype("float64"))
         self.global_endindex = None
-        YTCoveringGridBase.__init__(self, *args, **kwargs)
+        YTCoveringGrid.__init__(self, *args, **kwargs)
         self._final_start_index = self.global_startindex
 
     def _setup_data_source(self, level_state = None):
@@ -958,7 +958,7 @@
         level_state.fields = new_fields
         self._setup_data_source(ls)
 
-class YTSurfaceBase(YTSelectionContainer3D):
+class YTSurface(YTSelectionContainer3D):
     r"""This surface object identifies isocontours on a cell-by-cell basis,
     with no consideration of global connectedness, and returns the vertices
     of the Triangles in that isocontour.
@@ -1006,14 +1006,13 @@
                          ("index", "y"),
                          ("index", "z"))
     vertices = None
-    def __init__(self, data_source, surface_field, field_value):
+    def __init__(self, data_source, surface_field, field_value, ds=None):
         self.data_source = data_source
         self.surface_field = surface_field
         self.field_value = field_value
         self.vertex_samples = YTFieldData()
         center = data_source.get_field_parameter("center")
-        super(YTSurfaceBase, self).__init__(center = center, ds =
-                    data_source.ds )
+        super(YTSurface, self).__init__(center = center, ds=ds)
 
     def _generate_container_field(self, field):
         self.get_data(field)

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1142,7 +1142,7 @@
 
     def cut_region(self, field_cuts, field_parameters=None):
         """
-        Return an YTCutRegionBase, where the a cell is identified as being inside
+        Return a YTCutRegion, where the a cell is identified as being inside
         the cut region based on the value of one or more fields.  Note that in
         previous versions of yt the name 'grid' was used to represent the data
         object used to construct the field cut, as of yt 3.0, this has been

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -37,7 +37,7 @@
 from yt.units.yt_array import YTQuantity
 
 
-class YTPointBase(YTSelectionContainer0D):
+class YTPoint(YTSelectionContainer0D):
     """
     A 0-dimensional object defined by a single point
 
@@ -68,10 +68,10 @@
     _type_name = "point"
     _con_args = ('p',)
     def __init__(self, p, ds=None, field_parameters=None, data_source=None):
-        super(YTPointBase, self).__init__(ds, field_parameters, data_source)
+        super(YTPoint, self).__init__(ds, field_parameters, data_source)
         self.p = p
 
-class YTOrthoRayBase(YTSelectionContainer1D):
+class YTOrthoRay(YTSelectionContainer1D):
     """
     This is an orthogonal ray cast through the entire domain, at a specific
     coordinate.
@@ -123,7 +123,7 @@
     _con_args = ('axis', 'coords')
     def __init__(self, axis, coords, ds=None, 
                  field_parameters=None, data_source=None):
-        super(YTOrthoRayBase, self).__init__(ds, field_parameters, data_source)
+        super(YTOrthoRay, self).__init__(ds, field_parameters, data_source)
         self.axis = axis
         xax = self.ds.coordinates.x_axis[self.axis]
         yax = self.ds.coordinates.y_axis[self.axis]
@@ -139,7 +139,7 @@
     def coords(self):
         return (self.px, self.py)
 
-class YTRayBase(YTSelectionContainer1D):
+class YTRay(YTSelectionContainer1D):
     """
     This is an arbitrarily-aligned ray cast through the entire domain, at a
     specific coordinate.
@@ -191,7 +191,7 @@
     _container_fields = ("t", "dts")
     def __init__(self, start_point, end_point, ds=None,
                  field_parameters=None, data_source=None):
-        super(YTRayBase, self).__init__(ds, field_parameters, data_source)
+        super(YTRay, self).__init__(ds, field_parameters, data_source)
         self.start_point = self.ds.arr(start_point,
                             'code_length', dtype='float64')
         self.end_point = self.ds.arr(end_point,
@@ -211,7 +211,7 @@
         else:
             raise KeyError(field)
 
-class YTSliceBase(YTSelectionContainer2D):
+class YTSlice(YTSelectionContainer2D):
     """
     This is a data object corresponding to a slice through the simulation
     domain.
@@ -295,7 +295,7 @@
         pw = self._get_pw(fields, center, width, origin, 'Slice')
         return pw
 
-class YTCuttingPlaneBase(YTSelectionContainer2D):
+class YTCuttingPlane(YTSelectionContainer2D):
     """
     This is a data object corresponding to an oblique slice through the
     simulation domain.
@@ -499,7 +499,7 @@
                                            periodic=periodic)
         return frb
 
-class YTDiskBase(YTSelectionContainer3D):
+class YTDisk(YTSelectionContainer3D):
     """
     By providing a *center*, a *normal*, a *radius* and a *height* we
     can define a cylinder of any proportion.  Only cells whose centers are
@@ -549,7 +549,7 @@
         self.radius = fix_length(radius, self.ds)
         self._d = -1.0 * np.dot(self._norm_vec, self.center)
 
-class YTRegionBase(YTSelectionContainer3D):
+class YTRegion(YTSelectionContainer3D):
     """A 3D region of data with an arbitrary center.
 
     Takes an array of three *left_edge* coordinates, three
@@ -582,7 +582,7 @@
         else:
             self.right_edge = right_edge
 
-class YTDataCollectionBase(YTSelectionContainer3D):
+class YTDataCollection(YTSelectionContainer3D):
     """
     By selecting an arbitrary *object_list*, we can act on those grids.
     Child cells are not returned.
@@ -597,7 +597,7 @@
                                 dtype="int64")
         self._obj_list = obj_list
 
-class YTSphereBase(YTSelectionContainer3D):
+class YTSphere(YTSelectionContainer3D):
     """
     A sphere of points defined by a *center* and a *radius*.
 
@@ -620,7 +620,7 @@
     _con_args = ('center', 'radius')
     def __init__(self, center, radius, ds=None,
                  field_parameters=None, data_source=None):
-        super(YTSphereBase, self).__init__(center, ds,
+        super(YTSphere, self).__init__(center, ds,
                                            field_parameters, data_source)
         # Unpack the radius, if necessary
         radius = fix_length(radius, self.ds)
@@ -631,7 +631,7 @@
         self.set_field_parameter("center", self.center)
         self.radius = radius
 
-class YTEllipsoidBase(YTSelectionContainer3D):
+class YTEllipsoid(YTSelectionContainer3D):
     """
     By providing a *center*,*A*,*B*,*C*,*e0*,*tilt* we
     can define a ellipsoid of any proportion.  Only cells whose
@@ -711,7 +711,7 @@
         self.set_field_parameter('e1', e1)
         self.set_field_parameter('e2', e2)
 
-class YTCutRegionBase(YTSelectionContainer3D):
+class YTCutRegion(YTSelectionContainer3D):
     """
     This is a data object designed to allow individuals to apply logical
     operations to fields and filter as a result of those cuts.
@@ -745,7 +745,7 @@
                 raise RuntimeError(
                     "Cannot use both base_object and data_source")
             data_source=base_object
-        super(YTCutRegionBase, self).__init__(
+        super(YTCutRegion, self).__init__(
             data_source.center, ds, field_parameters, data_source=data_source)
         self.conditionals = ensure_list(conditionals)
         self.base_object = data_source

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -554,7 +554,7 @@
                 continue
             cname = cls.__name__
             if cname.endswith("Base"): cname = cname[:-4]
-            self._add_object_class(name, cname, cls, {'ds':weakref.proxy(self)})
+            self._add_object_class(name, cls)
         if self.refine_by != 2 and hasattr(self, 'proj') and \
             hasattr(self, 'overlap_proj'):
             mylog.warning("Refine by something other than two: reverting to"
@@ -567,10 +567,9 @@
             self.proj = self.overlap_proj
         self.object_types.sort()
 
-    def _add_object_class(self, name, class_name, base, dd):
+    def _add_object_class(self, name, base):
         self.object_types.append(name)
-        dd.update({'__doc__': base.__doc__})
-        obj = type(class_name, (base,), dd)
+        obj = functools.partial(base, ds=weakref.proxy(self))
         setattr(self, name, obj)
 
     def find_max(self, field):

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 yt/frontends/_skeleton/data_structures.py
--- a/yt/frontends/_skeleton/data_structures.py
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -13,6 +13,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import os
+
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.geometry.grid_geometry_handler import \

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 yt/frontends/_skeleton/fields.py
--- a/yt/frontends/_skeleton/fields.py
+++ b/yt/frontends/_skeleton/fields.py
@@ -13,8 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-from yt.funcs import mylog
 from yt.fields.field_info_container import \
     FieldInfoContainer
 

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 yt/frontends/_skeleton/setup.py
--- a/yt/frontends/_skeleton/setup.py
+++ b/yt/frontends/_skeleton/setup.py
@@ -1,8 +1,4 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
 
 
 def configuration(parent_package='', top_path=None):

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -9,48 +9,47 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
+import glob
 import numpy as np
 import os
 import stat
+import struct
 import weakref
-from yt.extern.six.moves import cStringIO
-import difflib
-import glob
 
-from yt.funcs import *
 from yt.geometry.oct_geometry_handler import \
     OctreeIndex
 from yt.geometry.geometry_handler import \
-    Index, YTDataChunk
+    YTDataChunk
 from yt.data_objects.static_output import \
     Dataset, ParticleFile
 from yt.data_objects.octree_subset import \
     OctreeSubset
+from yt.funcs import \
+    mylog
 from yt.geometry.oct_container import \
     ARTOctreeContainer
-from .fields import ARTFieldInfo
-from yt.utilities.io_handler import \
-    io_registry
-from yt.utilities.lib.misc_utilities import \
-    get_box_grids_level
+from yt.frontends.art.definitions import \
+    fluid_fields, \
+    particle_fields, \
+    filename_pattern, \
+    particle_header_struct, \
+    amr_header_struct, \
+    dmparticle_header_struct, \
+    constants, \
+    seek_extras
+from yt.frontends.art.fields import ARTFieldInfo
 from yt.data_objects.particle_unions import \
     ParticleUnion
 from yt.geometry.particle_geometry_handler import \
     ParticleIndex
-from yt.utilities.lib.geometry_utils import compute_morton
 
-from yt.frontends.art.definitions import *
 import yt.utilities.fortran_utils as fpu
-from .io import _read_art_level_info
-from .io import _read_child_level
-from .io import _read_root_level
-from .io import b2t
-from .io import a2b
-
-from yt.utilities.io_handler import \
-    io_registry
-from yt.fields.field_info_container import \
-    FieldInfoContainer, NullFunc
+from yt.frontends.art.io import \
+    _read_art_level_info, \
+    _read_child_level, \
+    _read_root_level, \
+    b2t, \
+    a2b
 
 
 class ARTIndex(OctreeIndex):
@@ -181,8 +180,6 @@
         self.max_level = limit_level
         self.force_max_level = force_max_level
         self.spread_age = spread_age
-        self.domain_left_edge = np.zeros(3, dtype='float')
-        self.domain_right_edge = np.zeros(3, dtype='float')+1.0
         Dataset.__init__(self, filename, dataset_type,
                          units_override=units_override)
         self.storage_filename = storage_filename
@@ -231,7 +228,6 @@
         aexpn = self.parameters["aexpn"]
 
         # all other units
-        wmu = self.parameters["wmu"]
         Om0 = self.parameters['Om0']
         ng = self.parameters['ng']
         boxh = self.parameters['boxh']
@@ -255,6 +251,8 @@
         """
         Get the various simulation parameters & constants.
         """
+        self.domain_left_edge = np.zeros(3, dtype='float')
+        self.domain_right_edge = np.zeros(3, dtype='float')+1.0
         self.dimensionality = 3
         self.refine_by = 2
         self.periodicity = (True, True, True)
@@ -268,7 +266,7 @@
         with open(self._file_amr, 'rb') as f:
             amr_header_vals = fpu.read_attrs(f, amr_header_struct, '>')
             for to_skip in ['tl', 'dtl', 'tlold', 'dtlold', 'iSO']:
-                skipped = fpu.skip(f, endian='>')
+                fpu.skip(f, endian='>')
             (self.ncell) = fpu.read_vector(f, 'i', '>')[0]
             # Try to figure out the root grid dimensions
             est = int(np.rint(self.ncell**(1.0/3.0)))
@@ -383,7 +381,7 @@
             return False
         with open(f, 'rb') as fh:
             try:
-                amr_header_vals = fpu.read_attrs(fh, amr_header_struct, '>')
+                fpu.read_attrs(fh, amr_header_struct, '>')
                 return True
             except:
                 return False
@@ -425,8 +423,6 @@
         self.parameter_filename = filename
         self.skip_stars = skip_stars
         self.spread_age = spread_age
-        self.domain_left_edge = np.zeros(3, dtype='float')
-        self.domain_right_edge = np.zeros(3, dtype='float')+1.0
         Dataset.__init__(self, filename, dataset_type)
         self.storage_filename = storage_filename
 
@@ -470,7 +466,6 @@
         aexpn = self.parameters["aexpn"]
 
         # all other units
-        wmu = self.parameters["wmu"]
         Om0 = self.parameters['Om0']
         ng = self.parameters['ng']
         boxh = self.parameters['boxh']
@@ -494,6 +489,8 @@
         """
         Get the various simulation parameters & constants.
         """
+        self.domain_left_edge = np.zeros(3, dtype='float')
+        self.domain_right_edge = np.zeros(3, dtype='float')+1.0
         self.dimensionality = 3
         self.refine_by = 2
         self.periodicity = (True, True, True)
@@ -633,32 +630,32 @@
             try:
                 seek = 4
                 fh.seek(seek)
-                headerstr = np.fromfile(fh, count=1, dtype=(str,45))
-                aexpn = np.fromfile(fh, count=1, dtype='>f4')
-                aexp0 = np.fromfile(fh, count=1, dtype='>f4')
-                amplt = np.fromfile(fh, count=1, dtype='>f4')
-                astep = np.fromfile(fh, count=1, dtype='>f4')
-                istep = np.fromfile(fh, count=1, dtype='>i4')
-                partw = np.fromfile(fh, count=1, dtype='>f4')
-                tintg = np.fromfile(fh, count=1, dtype='>f4')
-                ekin = np.fromfile(fh, count=1, dtype='>f4')
-                ekin1 = np.fromfile(fh, count=1, dtype='>f4')
-                ekin2 = np.fromfile(fh, count=1, dtype='>f4')
-                au0 = np.fromfile(fh, count=1, dtype='>f4')
-                aeu0 = np.fromfile(fh, count=1, dtype='>f4')
-                nrowc = np.fromfile(fh, count=1, dtype='>i4')
-                ngridc = np.fromfile(fh, count=1, dtype='>i4')
-                nspecs = np.fromfile(fh, count=1, dtype='>i4')
-                nseed = np.fromfile(fh, count=1, dtype='>i4')
-                Om0 = np.fromfile(fh, count=1, dtype='>f4')
-                Oml0 = np.fromfile(fh, count=1, dtype='>f4')
-                hubble = np.fromfile(fh, count=1, dtype='>f4')
-                Wp5 = np.fromfile(fh, count=1, dtype='>f4')
-                Ocurv = np.fromfile(fh, count=1, dtype='>f4')
-                wspecies = np.fromfile(fh, count=10, dtype='>f4')
-                lspecies = np.fromfile(fh, count=10, dtype='>i4')
-                extras = np.fromfile(fh, count=79, dtype='>f4')
-                boxsize = np.fromfile(fh, count=1, dtype='>f4')
+                headerstr = np.fromfile(fh, count=1, dtype=(str,45))  # NOQA
+                aexpn = np.fromfile(fh, count=1, dtype='>f4')  # NOQA
+                aexp0 = np.fromfile(fh, count=1, dtype='>f4')  # NOQA
+                amplt = np.fromfile(fh, count=1, dtype='>f4')  # NOQA
+                astep = np.fromfile(fh, count=1, dtype='>f4')  # NOQA
+                istep = np.fromfile(fh, count=1, dtype='>i4')  # NOQA
+                partw = np.fromfile(fh, count=1, dtype='>f4')  # NOQA
+                tintg = np.fromfile(fh, count=1, dtype='>f4')  # NOQA
+                ekin = np.fromfile(fh, count=1, dtype='>f4')  # NOQA
+                ekin1 = np.fromfile(fh, count=1, dtype='>f4')  # NOQA
+                ekin2 = np.fromfile(fh, count=1, dtype='>f4')  # NOQA
+                au0 = np.fromfile(fh, count=1, dtype='>f4')  # NOQA
+                aeu0 = np.fromfile(fh, count=1, dtype='>f4')  # NOQA
+                nrowc = np.fromfile(fh, count=1, dtype='>i4')  # NOQA
+                ngridc = np.fromfile(fh, count=1, dtype='>i4')  # NOQA
+                nspecs = np.fromfile(fh, count=1, dtype='>i4')  # NOQA
+                nseed = np.fromfile(fh, count=1, dtype='>i4')  # NOQA
+                Om0 = np.fromfile(fh, count=1, dtype='>f4')  # NOQA
+                Oml0 = np.fromfile(fh, count=1, dtype='>f4')  # NOQA
+                hubble = np.fromfile(fh, count=1, dtype='>f4')  # NOQA
+                Wp5 = np.fromfile(fh, count=1, dtype='>f4')  # NOQA
+                Ocurv = np.fromfile(fh, count=1, dtype='>f4')  # NOQA
+                wspecies = np.fromfile(fh, count=10, dtype='>f4')  # NOQA
+                lspecies = np.fromfile(fh, count=10, dtype='>i4')  # NOQA
+                extras = np.fromfile(fh, count=79, dtype='>f4')  # NOQA
+                boxsize = np.fromfile(fh, count=1, dtype='>f4')  # NOQA
                 return True
             except:
                 return False
@@ -702,7 +699,7 @@
         oct_handler.fill_level(0, levels, cell_inds, file_inds, tr, source)
         del source
         # Now we continue with the additional levels.
-        for level in range(1, self.ds.max_level + 1):
+        for level in range(1, self.ds.index.max_level + 1):
             no = self.domain.level_count[level]
             noct_range = [0, no]
             source = _read_child_level(
@@ -789,9 +786,7 @@
             Level[Lev], iNOLL[Lev], iHOLL[Lev] = fpu.read_vector(f, 'i', '>')
             # print 'Level %i : '%Lev, iNOLL
             # print 'offset after level record:',f.tell()
-            iOct = iHOLL[Lev] - 1
             nLevel = iNOLL[Lev]
-            nLevCells = nLevel * nchild
             ntot = ntot + nLevel
 
             # Skip all the oct hierarchy data
@@ -834,11 +829,9 @@
 
     def _read_amr_root(self, oct_handler):
         self.level_offsets
-        f = open(self.ds._file_amr, "rb")
         # add the root *cell* not *oct* mesh
         root_octs_side = self.ds.domain_dimensions[0]/2
         NX = np.ones(3)*root_octs_side
-        octs_side = NX*2 # Level == 0
         LE = np.array([0.0, 0.0, 0.0], dtype='float64')
         RE = np.array([1.0, 1.0, 1.0], dtype='float64')
         root_dx = (RE - LE) / NX
@@ -849,7 +842,7 @@
                            LL[1]:RL[1]:NX[1]*1j,
                            LL[2]:RL[2]:NX[2]*1j]
         root_fc = np.vstack([p.ravel() for p in root_fc]).T
-        nocts_check = oct_handler.add(self.domain_id, 0, root_fc)
+        oct_handler.add(self.domain_id, 0, root_fc)
         assert(oct_handler.nocts == root_fc.shape[0])
         mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
                     root_octs_side**3, 0, oct_handler.nocts)

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -13,13 +13,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-
 from yt.fields.field_info_container import \
     FieldInfoContainer
-from yt.units.yt_array import \
-    YTArray
-from yt.frontends.art.definitions import *
 
 b_units = "code_magnetic"
 ra_units = "code_length / code_time**2"
@@ -68,7 +63,7 @@
             tr *= data.ds.parameters['wmu'] * data.ds.parameters['Om0']
             tr *= (data.ds.parameters['gamma'] - 1.)
             tr /= data.ds.parameters['aexpn']**2
-            return  tr * data['art', 'GasEnergy'] / data['art', 'Density']
+            return tr * data['art', 'GasEnergy'] / data['art', 'Density']
         self.add_field(('gas', 'temperature'),
                        function=_temperature, 
                        units='K')

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -15,24 +15,30 @@
 
 
 import numpy as np
-import struct
 import os
 import os.path
 import sys
+
+from collections import defaultdict
+
 if sys.version_info >= (3,0,0):
     long = int
-    
-from yt.funcs import *
+
+from yt.frontends.art.definitions import \
+    particle_star_fields, \
+    particle_fields, \
+    star_struct, \
+    hydro_struct
 from yt.utilities.io_handler import \
     BaseIOHandler
-from yt.utilities.fortran_utils import *
 from yt.utilities.logger import ytLogger as mylog
-from yt.frontends.art.definitions import *
-from yt.utilities.physical_constants import sec_per_year
 from yt.utilities.lib.geometry_utils import compute_morton
-from yt.geometry.oct_container import _ORDER_MAX
-from yt.units.yt_array import YTQuantity
-
+from yt.utilities.fortran_utils import \
+    read_vector, \
+    skip
+from yt.units.yt_array import \
+    YTQuantity, \
+    YTArray
 
 class IOHandlerART(BaseIOHandler):
     _dataset_type = "art"
@@ -80,7 +86,6 @@
         key = (selector, ftype)
         if key in self.masks.keys() and self.caching:
             return self.masks[key]
-        ds = self.ds
         pstr = 'particle_position_%s'
         x,y,z = [self._get_field((ftype, pstr % ax)) for ax in 'xyz']
         mask = selector.select_points(x, y, z, 0.0)
@@ -120,7 +125,7 @@
         tr = {}
         ftype, fname = field
         ptmax = self.ws[-1]
-        pbool, idxa, idxb = _determine_field_size(self.ds, ftype, 
+        pbool, idxa, idxb = _determine_field_size(self.ds, ftype,
                                                   self.ls, ptmax)
         npa = idxb - idxa
         sizes = np.diff(np.concatenate(([0], self.ls)))
@@ -178,7 +183,7 @@
             # dark_matter -- stars are regular matter.
             tr[field] /= self.ds.domain_dimensions.prod()
         if tr == {}:
-            tr = dict((f, np.array([])) for f in fields)
+            tr = dict((f, np.array([])) for f in [field])
         if self.caching:
             self.cache[field] = tr[field]
             return self.cache[field]
@@ -195,7 +200,6 @@
         count = data_file.ds.parameters['lspecies'][-1]
         DLE = data_file.ds.domain_left_edge
         DRE = data_file.ds.domain_right_edge
-        dx = (DRE - DLE) / 2**_ORDER_MAX
         with open(data_file.filename, "rb") as f:
             # The first total_particles * 3 values are positions
             pp = np.fromfile(f, dtype = '>f4', count = totcount*3)
@@ -209,7 +213,6 @@
 
     def _identify_fields(self, domain):
         field_list = []
-        tp = domain.total_particles
         self.particle_field_list = [f for f in particle_fields]
         for ptype in self.ds.particle_types_raw:
             for pfield in self.particle_field_list:
@@ -225,7 +228,7 @@
         tr = {}
         ftype, fname = field
         ptmax = self.ws[-1]
-        pbool, idxa, idxb = _determine_field_size(self.ds, ftype, 
+        pbool, idxa, idxb = _determine_field_size(self.ds, ftype,
                                                   self.ls, ptmax)
         npa = idxb - idxa
         sizes = np.diff(np.concatenate(([0], self.ls)))
@@ -258,17 +261,6 @@
                     data[a: a + size] = i
                     a += size
             tr[field] = data
-        if fname == "particle_creation_time":
-            self.tb, self.ages, data = interpolate_ages(
-                tr[field][-nstars:],
-                self.file_stars,
-                self.tb,
-                self.ages,
-                self.ds.current_time)
-            temp = tr.get(field, np.zeros(npa, 'f8'))
-            temp[-nstars:] = data
-            tr[field] = temp
-            del data
         # We check again, after it's been filled
         if fname.startswith("particle_mass"):
             # We now divide by NGrid in order to make this match up.  Note that
@@ -356,7 +348,6 @@
     # ioct always represents the index of the next variable
     # not the current, so shift forward one index
     # the last index isn't used
-    ioctso = iocts.copy()
     iocts[1:] = iocts[:-1]  # shift
     iocts = iocts[:nLevel]  # chop off the last, unused, index
     iocts[0] = iOct  # starting value
@@ -400,11 +391,11 @@
     # Posy   = d_x * (iOctPs(2,iO) + sign ( id , idelta(j,2) ))
     # Posz   = d_x * (iOctPs(3,iO) + sign ( id , idelta(j,3) ))
     # idelta = [[-1,  1, -1,  1, -1,  1, -1,  1],
-              #[-1, -1,  1,  1, -1, -1,  1,  1],
-              #[-1, -1, -1, -1,  1,  1,  1,  1]]
+    #           [-1, -1,  1,  1, -1, -1,  1,  1],
+    #           [-1, -1, -1, -1,  1,  1,  1,  1]]
     # idelta = np.array(idelta)
     # if ncell0 is None:
-        # ncell0 = coarse_grid**3
+    #     ncell0 = coarse_grid**3
     # nchild = 8
     # ndim = 3
     # nshift = nchild -1
@@ -424,15 +415,13 @@
     f.seek(pos)
     return unitary_center, fl, iocts, nLevel, root_level
 
-def get_ranges(skip, count, field, words=6, real_size=4, np_per_page=4096**2, 
+def get_ranges(skip, count, field, words=6, real_size=4, np_per_page=4096**2,
                   num_pages=1):
     #translate every particle index into a file position ranges
     ranges = []
     arr_size = np_per_page * real_size
-    page_size = words * np_per_page * real_size
     idxa, idxb = 0, 0
     posa, posb = 0, 0
-    left = count
     for page in range(num_pages):
         idxb += np_per_page
         for i, fname in enumerate(['x', 'y', 'z', 'vx', 'vy', 'vz']):
@@ -462,7 +451,7 @@
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
     fh = open(file, 'r')
     skip, count = idxa, idxb - idxa
-    kwargs = dict(words=words, real_size=real_size, 
+    kwargs = dict(words=words, real_size=real_size,
                   np_per_page=np_per_page, num_pages=num_pages)
     arrs = []
     for field in fields:
@@ -495,7 +484,6 @@
 
 def _read_child_mask_level(f, level_child_offsets, level, nLevel, nhydro_vars):
     f.seek(level_child_offsets[level])
-    nvals = nLevel * (nhydro_vars + 6)  # 2 vars, 2 pads
     ioctch = np.zeros(nLevel, dtype='uint8')
     idc = np.zeros(nLevel, dtype='int32')
 
@@ -639,8 +627,6 @@
         return a2t(b2a(tb))
     if len(tb) < n:
         n = len(tb)
-    age_min = a2t(b2a(tb.max(), **kwargs), **kwargs)
-    age_max = a2t(b2a(tb.min(), **kwargs), **kwargs)
     tbs = -1.*np.logspace(np.log10(-tb.min()),
                           np.log10(-tb.max()), n)
     ages = []

diff -r 5af81f44829d344784ea71d1e3bbd6ecf0d86050 -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 yt/frontends/art/setup.py
--- a/yt/frontends/art/setup.py
+++ b/yt/frontends/art/setup.py
@@ -1,6 +1,5 @@
 #!/usr/bin/env python
-import setuptools
-import os, sys, os.path
+
 
 def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/915fc24899ac/
Changeset:   915fc24899ac
Branch:      yt
User:        brittonsmith
Date:        2015-10-23 16:13:04+00:00
Summary:     Removing profile kwarg from PhasePlot.
Affected #:  1 file

diff -r 7d263b8427ff2f415133a4f8b9b9464715fcd783 -r 915fc24899ac3e161641c037d04cf0cd043060de yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -682,10 +682,6 @@
     fractional : If True the profile values are divided by the sum of all 
         the profile data such that the profile represents a probability 
         distribution function.
-    profile : profile object
-        If not None, a profile object created with 
-        `yt.data_objects.profiles.create_profile`.
-        Default: None.
     fontsize: int
         Font size for all text in the plot.
         Default: 18.
@@ -717,21 +713,20 @@
 
     def __init__(self, data_source, x_field, y_field, z_fields,
                  weight_field="cell_mass", x_bins=128, y_bins=128,
-                 accumulation=False, fractional=False, profile=None,
+                 accumulation=False, fractional=False,
                  fontsize=18, figure_size=8.0):
 
-        if profile is None:
-            if isinstance(data_source.ds, YTProfileDataset):
-                profile = data_source.ds.profile
-            else:
-                profile = create_profile(
-                    data_source,
-                    [x_field, y_field],
-                    ensure_list(z_fields),
-                    n_bins=[x_bins, y_bins],
-                    weight_field=weight_field,
-                    accumulation=accumulation,
-                    fractional=fractional)
+        if isinstance(data_source.ds, YTProfileDataset):
+            profile = data_source.ds.profile
+        else:
+            profile = create_profile(
+                data_source,
+                [x_field, y_field],
+                ensure_list(z_fields),
+                n_bins=[x_bins, y_bins],
+                weight_field=weight_field,
+                accumulation=accumulation,
+                fractional=fractional)
 
         type(self)._initialize_instance(self, data_source, profile, fontsize,
                                         figure_size)


https://bitbucket.org/yt_analysis/yt/commits/d85c3f4e94ce/
Changeset:   d85c3f4e94ce
Branch:      yt
User:        brittonsmith
Date:        2015-10-23 16:38:22+00:00
Summary:     Adding a couple more imports.
Affected #:  2 files

diff -r 915fc24899ac3e161641c037d04cf0cd043060de -r d85c3f4e94ce597cb31ff843fe44ca00fa4c0315 yt/frontends/ytdata/api.py
--- a/yt/frontends/ytdata/api.py
+++ b/yt/frontends/ytdata/api.py
@@ -26,7 +26,10 @@
     YTProfileDataset
 
 from .io import \
-    IOHandlerYTDataContainerHDF5
+    IOHandlerYTDataContainerHDF5, \
+    IOHandlerYTGridHDF5, \
+    IOHandlerYTSpatialPlotHDF5, \
+    IOHandlerYTNonspatialhdf5
 
 from .fields import \
     YTDataContainerFieldInfo, \

diff -r 915fc24899ac3e161641c037d04cf0cd043060de -r d85c3f4e94ce597cb31ff843fe44ca00fa4c0315 yt/frontends/ytdata/io.py
--- a/yt/frontends/ytdata/io.py
+++ b/yt/frontends/ytdata/io.py
@@ -268,7 +268,7 @@
                                    for field in f[ptype]]))
         return fields, units
 
-class IOHandlerSpatialPlotHDF5(IOHandlerYTDataContainerHDF5):
+class IOHandlerYTSpatialPlotHDF5(IOHandlerYTDataContainerHDF5):
     _dataset_type = "ytspatialplot_hdf5"
 
     def _read_particle_coords(self, chunks, ptf):


https://bitbucket.org/yt_analysis/yt/commits/e9f7f140d82e/
Changeset:   e9f7f140d82e
Branch:      yt
User:        brittonsmith
Date:        2015-10-23 16:38:49+00:00
Summary:     Adding to reference docs.
Affected #:  1 file

diff -r d85c3f4e94ce597cb31ff843fe44ca00fa4c0315 -r e9f7f140d82eb2939a1cf3db0038f9cc86df9027 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -72,6 +72,7 @@
 .. autosummary::
    :toctree: generated/
 
+   ~yt.data_objects.data_containers.YTDataContainer
    ~yt.data_objects.data_containers.YTSelectionContainer
    ~yt.data_objects.data_containers.YTSelectionContainer0D
    ~yt.data_objects.data_containers.YTSelectionContainer1D
@@ -383,6 +384,28 @@
    ~yt.frontends.stream.io.IOHandlerStreamOctree
    ~yt.frontends.stream.io.StreamParticleIOHandler
 
+ytdata
+^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.frontends.ytdata.data_structures.YTDataContainerDataset
+   ~yt.frontends.ytdata.data_structures.YTSpatialPlotDataset
+   ~yt.frontends.ytdata.data_structures.YTGridDataset
+   ~yt.frontends.ytdata.data_structures.YTGridHierarchy
+   ~yt.frontends.ytdata.data_structures.YTGrid
+   ~yt.frontends.ytdata.data_structures.YTNonspatialDataset
+   ~yt.frontends.ytdata.data_structures.YTNonspatialHierarchy
+   ~yt.frontends.ytdata.data_structures.YTNonspatialGrid
+   ~yt.frontends.ytdata.data_structures.YTProfileDataset
+   ~yt.frontends.ytdata.fields.YTDataContainerFieldInfo
+   ~yt.frontends.ytdata.fields.YTGridFieldInfo
+   ~yt.frontends.ytdata.io.IOHandlerYTDataContainerHDF5
+   ~yt.frontends.ytdata.io.IOHandlerYTGridHDF5
+   ~yt.frontends.ytdata.io.IOHandlerYTSpatialPlotHDF5
+   ~yt.frontends.ytdata.io.IOHandlerYTNonspatialhdf5
+
 Loading Data
 ------------
 
@@ -416,7 +439,6 @@
    ~yt.data_objects.profiles.Profile2D
    ~yt.data_objects.profiles.Profile3D
    ~yt.data_objects.profiles.ParticleProfile
-   ~yt.data_objects.profiles.create_profile
 
 .. _halo_analysis_ref:
 
@@ -739,8 +761,12 @@
    :toctree: generated/
 
    ~yt.convenience.load
+   ~yt.frontends.ytdata.utilities.save_as_dataset
    ~yt.data_objects.static_output.Dataset.all_data
    ~yt.data_objects.static_output.Dataset.box
+   ~yt.data_objects.static_output.Dataset.disk
+   ~yt.data_objects.static_output.Dataset.ray
+   ~yt.data_objects.static_output.Dataset.sphere
    ~yt.funcs.deprecate
    ~yt.funcs.ensure_list
    ~yt.funcs.get_pbar


https://bitbucket.org/yt_analysis/yt/commits/62efba1a94f0/
Changeset:   62efba1a94f0
Branch:      yt
User:        brittonsmith
Date:        2015-10-23 16:41:25+00:00
Summary:     Never meant to get rid of that entry.
Affected #:  1 file

diff -r e9f7f140d82eb2939a1cf3db0038f9cc86df9027 -r 62efba1a94f0ee32fc929b121fb69cb5f2bce200 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -439,6 +439,7 @@
    ~yt.data_objects.profiles.Profile2D
    ~yt.data_objects.profiles.Profile3D
    ~yt.data_objects.profiles.ParticleProfile
+   ~yt.data_objects.profiles.create_profile
 
 .. _halo_analysis_ref:
 


https://bitbucket.org/yt_analysis/yt/commits/754e08c17816/
Changeset:   754e08c17816
Branch:      yt
User:        brittonsmith
Date:        2015-10-23 21:09:56+00:00
Summary:     On second thought, we don't want those in there.
Affected #:  1 file

diff -r 62efba1a94f0ee32fc929b121fb69cb5f2bce200 -r 754e08c17816db382304107a0d4b80af017f99c9 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -765,9 +765,6 @@
    ~yt.frontends.ytdata.utilities.save_as_dataset
    ~yt.data_objects.static_output.Dataset.all_data
    ~yt.data_objects.static_output.Dataset.box
-   ~yt.data_objects.static_output.Dataset.disk
-   ~yt.data_objects.static_output.Dataset.ray
-   ~yt.data_objects.static_output.Dataset.sphere
    ~yt.funcs.deprecate
    ~yt.funcs.ensure_list
    ~yt.funcs.get_pbar


https://bitbucket.org/yt_analysis/yt/commits/5846d4353d09/
Changeset:   5846d4353d09
Branch:      yt
User:        brittonsmith
Date:        2015-10-27 12:22:54+00:00
Summary:     Adding cell_volume and cell_mass fields for data container datasets.
Affected #:  1 file

diff -r 754e08c17816db382304107a0d4b80af017f99c9 -r 5846d4353d098533086a31a87dde032500df3b2c yt/frontends/ytdata/fields.py
--- a/yt/frontends/ytdata/fields.py
+++ b/yt/frontends/ytdata/fields.py
@@ -48,6 +48,31 @@
         (v_units, "particle_velocity_z"),
     )
 
+    def __init__(self, ds, field_list):
+        super(YTDataContainerFieldInfo, self).__init__(ds, field_list)
+        self.add_fake_grid_fields()
+
+    def add_fake_grid_fields(self):
+        """
+        Add cell volume and mass fields that use the dx, dy, and dz
+        fields that come with the dataset instead of the index fields
+        which correspond to the oct tree.  We need to do this for now
+        since we're treating the grid data like particles until we
+        implement exporting AMR hierarchies.
+        """
+
+        def _cell_volume(field, data):
+            return data["grid", "dx"] * \
+              data["grid", "dy"] * \
+              data["grid", "dz"]
+        self.add_field(("grid", "cell_volume"), function=_cell_volume,
+                       units="cm**3", particle_type=True)
+
+        def _cell_mass(field, data):
+            return data["grid", "density"] * data["grid", "cell_volume"]
+        self.add_field(("grid", "cell_mass"), function=_cell_mass,
+                       units="g", particle_type=True)
+
 class YTGridFieldInfo(FieldInfoContainer):
     known_other_fields = (
     )


https://bitbucket.org/yt_analysis/yt/commits/6aff260d8a6d/
Changeset:   6aff260d8a6d
Branch:      yt
User:        brittonsmith
Date:        2015-10-27 13:06:23+00:00
Summary:     Adding a patch for yt data container datasets to override the cell_mass field.
Affected #:  1 file

diff -r 5846d4353d098533086a31a87dde032500df3b2c -r 6aff260d8a6de0f6554d6b22ef84a4767758036d yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -101,9 +101,18 @@
         self.field_info.setup_extra_union_fields()
         mylog.info("Loading field plugins.")
         self.field_info.load_all_plugins()
+
+        self._setup_override_fields()
+
         deps, unloaded = self.field_info.check_derived_fields()
         self.field_dependencies.update(deps)
 
+    def _setup_gas_alias(self):
+        pass
+
+    def _setup_override_fields(self):
+        pass
+
     def _set_code_unit_attributes(self):
         attrs = ('length_unit', 'mass_unit', 'time_unit',
                  'velocity_unit', 'magnetic_unit')
@@ -161,6 +170,16 @@
             pu = ParticleUnion("gas", ["grid"])
             self.add_particle_union(pu)
 
+    def _setup_override_fields(self):
+        """
+        Override some derived fields to use frontend-specific fields.
+        We need to do this because we are treating grid data like particles.
+        This will be fixed eventually when grid data can be exported properly.
+        """
+
+        del self.field_info[("gas", "cell_mass")]
+        self.field_info.alias(("gas", "cell_mass"), ("grid", "cell_mass"))
+
     @property
     def data(self):
         """


https://bitbucket.org/yt_analysis/yt/commits/ded8366fcde5/
Changeset:   ded8366fcde5
Branch:      yt
User:        jzuhone
Date:        2015-10-25 17:30:23+00:00
Summary:     Fixing Python 3-related bugs
Affected #:  2 files

diff -r 754e08c17816db382304107a0d4b80af017f99c9 -r ded8366fcde5f18312dce7adb0fa59a0867e4583 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -527,7 +527,7 @@
         for f in [f for f in self._container_fields + tds_fields \
                   if f not in data]:
             data[f] = self[f]
-        data_fields = data.keys()
+        data_fields = list(data.keys())
 
         need_grid_positions = False
         need_particle_positions = False

diff -r 754e08c17816db382304107a0d4b80af017f99c9 -r ded8366fcde5f18312dce7adb0fa59a0867e4583 yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -221,4 +221,8 @@
     if hasattr(val, "units"):
         val = val.in_cgs()
         fh.attrs["%s_units" % attr] = str(val.units)
+    # The following is a crappy workaround
+    # for h5py in Python 3
+    if attr is 'con_args':
+        val = np.array(val, dtype='|S9')
     fh.attrs[str(attr)] = val


https://bitbucket.org/yt_analysis/yt/commits/ebaf5593d215/
Changeset:   ebaf5593d215
Branch:      yt
User:        jzuhone
Date:        2015-10-26 20:34:51+00:00
Summary:     Some work on getting the tests up and running--not there yet
Affected #:  1 file

diff -r ded8366fcde5f18312dce7adb0fa59a0867e4583 -r ebaf5593d2154e93e9f3023ca79a8138ee36fb3a yt/frontends/ytdata/tests/test_outputs.py
--- a/yt/frontends/ytdata/tests/test_outputs.py
+++ b/yt/frontends/ytdata/tests/test_outputs.py
@@ -20,7 +20,8 @@
     YTSpatialPlotDataset, \
     YTGridDataset, \
     YTNonspatialDataset, \
-    YTProfileDataset
+    YTProfileDataset, \
+    save_as_dataset
 from yt.testing import \
     assert_allclose_units, \
     assert_equal
@@ -28,6 +29,15 @@
     requires_ds, \
     data_dir_load, \
     AnswerTestingTest
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity
+from yt.data_objects.api import \
+    create_profile
+import numpy as np
+import tempfile
+import os
+import shutil
 
 class YTDataFieldTest(AnswerTestingTest):
     _type_name = "YTDataTest"
@@ -67,6 +77,9 @@
 enzotiny = "enzo_tiny_cosmology/DD0046/DD0046"
 @requires_ds(enzotiny)
 def test_datacontainer_data():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
     ds = data_dir_load(enzotiny)
     sphere = ds.sphere(ds.domain_center, (10, "Mpc"))
     fn = sphere.save_as_dataset(fields=["density", "particle_mass"])
@@ -74,9 +87,14 @@
     assert isinstance(sphere_ds, YTDataContainerDataset)
     yield YTDataFieldTest(fn, ("grid", "density"))
     yield YTDataFieldTest(fn, ("all", "particle_mass"))
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
 
 @requires_ds(enzotiny)
 def test_grid_datacontainer_data():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
     ds = data_dir_load(enzotiny)
     cg = ds.covering_grid(level=0, left_edge=[0.25]*3, dims=[16]*3)
     fn = cg.save_as_dataset(fields=["density", "particle_mass"])
@@ -91,22 +109,32 @@
     frb_ds = load(fn)
     assert isinstance(frb_ds, YTGridDataset)
     yield YTDataFieldTest(fn, "density", geometric=False)
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
 
 @requires_ds(enzotiny)
 def test_spatial_data():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
     ds = data_dir_load(enzotiny)
     proj = ds.proj("density", "x", weight_field="density")
     fn = proj.save_as_dataset()
-    proj_ds = yt.load(fn)
+    proj_ds = load(fn)
     assert isinstance(proj_ds, YTSpatialPlotDataset)
     yield YTDataFieldTest(fn, ("grid", "density"), geometric=False)
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
 
 @requires_ds(enzotiny)
 def test_profile_data():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
     ds = data_dir_load(enzotiny)
-
-    profile_1d = yt.create_profile(ad, "density", "temperature",
-                               weight_field="cell_mass")
+    ad = ds.all_data()
+    profile_1d = create_profile(ad, "density", "temperature",
+                                weight_field="cell_mass")
     fn = profile_1d.save_as_dataset()
     prof_1d_ds = load(fn)
     assert isinstance(prof_1d_ds, YTProfileDataset)
@@ -114,35 +142,42 @@
     yield YTDataFieldTest(fn, "x", geometric=False)
     yield YTDataFieldTest(fn, "density", geometric=False)
 
-    profile_2d = yt.create_profile(ad, ["density", "temperature"],
+    profile_2d = create_profile(ad, ["density", "temperature"],
                                "cell_mass", weight_field=None,
                                n_bins=(128, 128))
     fn = profile_2d.save_as_dataset()
-    prof_2d_ds = yt.load(fn)
+    prof_2d_ds = load(fn)
     assert isinstance(prof_2d_ds, YTProfileDataset)
     yield YTDataFieldTest(fn, "density", geometric=False)
     yield YTDataFieldTest(fn, "x", geometric=False)
     yield YTDataFieldTest(fn, "temperature", geometric=False)
     yield YTDataFieldTest(fn, "y", geometric=False)
     yield YTDataFieldTest(fn, "cell_mass", geometric=False)
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
 
 @requires_ds(enzotiny)
 def test_nonspatial_data():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
     ds = data_dir_load(enzotiny)
     region = ds.box([0.25]*3, [0.75]*3)
     sphere = ds.sphere(ds.domain_center, (10, "Mpc"))
     my_data = {}
     my_data["region_density"] = region["density"]
     my_data["sphere_density"] = sphere["density"]
-    fn = yt.save_as_dataset(ds, "test_data.h5", my_data)
-    array_ds = yt.load(fn)
+    fn = save_as_dataset(ds, "test_data.h5", my_data)
+    array_ds = load(fn)
     assert isinstance(array_ds, YTNonspatialDataset)
     yield YTDataFieldTest(fn, "region_density", geometric=False)
     yield YTDataFieldTest(fn, "sphere_density", geometric=False)
 
-    my_data = {"density": yt.YTArray(np.random.random(10), "g/cm**3")}
-    fake_ds = {"current_time": yt.YTQuantity(10, "Myr")}
-    fn = yt.save_as_dataset(fake_ds, "random_data.h5", my_data)
-    new_ds = yt.load(fn)
+    my_data = {"density": YTArray(np.random.random(10), "g/cm**3")}
+    fake_ds = {"current_time": YTQuantity(10, "Myr")}
+    fn = save_as_dataset(fake_ds, "random_data.h5", my_data)
+    new_ds = load(fn)
     assert isinstance(new_ds, YTNonspatialDataset)
     yield YTDataFieldTest(fn, "density", geometric=False)
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)


https://bitbucket.org/yt_analysis/yt/commits/55bf5e28d238/
Changeset:   55bf5e28d238
Branch:      yt
User:        jzuhone
Date:        2015-10-26 21:01:41+00:00
Summary:     Better way to handle these unicode strings
Affected #:  1 file

diff -r ebaf5593d2154e93e9f3023ca79a8138ee36fb3a -r 55bf5e28d2384e3871ecef3fd526c1138367fc8b yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -17,6 +17,7 @@
 import h5py
 import numpy as np
 
+from yt.funcs import iterable
 from yt.units.yt_array import \
     YTArray
 from yt.utilities.logger import \
@@ -221,8 +222,10 @@
     if hasattr(val, "units"):
         val = val.in_cgs()
         fh.attrs["%s_units" % attr] = str(val.units)
-    # The following is a crappy workaround
-    # for h5py in Python 3
-    if attr is 'con_args':
-        val = np.array(val, dtype='|S9')
+    # The following is a crappy workaround for getting
+    # Unicode strings into HDF5 attributes in Python 3
+    if iterable(val):
+        val = np.array(val)
+        if val.dtype.kind == 'U':
+            val = val.astype('|S40')
     fh.attrs[str(attr)] = val


https://bitbucket.org/yt_analysis/yt/commits/49955afe212e/
Changeset:   49955afe212e
Branch:      yt
User:        jzuhone
Date:        2015-10-26 21:34:46+00:00
Summary:     Trying to get _is_valid to work properly on Python 3. not there yet
Affected #:  1 file

diff -r 55bf5e28d2384e3871ecef3fd526c1138367fc8b -r 49955afe212e6f4a0f8cc5e4f7de3168b255d4a5 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -58,6 +58,10 @@
     _h5py as h5py
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
+from yt.fields.field_exceptions import \
+    NeedsGridType
+from yt.data_objects.data_containers import \
+    GenerationInProgress
 
 _grid_data_containers = ["abritrary_grid",
                          "covering_grid",
@@ -192,14 +196,14 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", None)
+            data_type = f.attrs.get("data_type", b"").decode("utf8")
+            cont_type = f.attrs.get("container_type", b"").decode("utf8")
             if data_type is None:
                 return False
             if data_type in ["yt_light_ray"]:
                 return True
             if data_type == "yt_data_container" and \
-              f.attrs.get("container_type", None) not in \
-              _grid_data_containers:
+                cont_type not in _grid_data_containers:
                 return True
         return False
 
@@ -225,10 +229,10 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", None)
+            data_type = f.attrs.get("data_type", b"").decode("utf8")
+            cont_type = f.attrs.get("container_type", b"").decode("utf8")
             if data_type == "yt_data_container" and \
-              f.attrs.get("container_type", None) in \
-              ["cutting", "proj", "slice"]:
+                cont_type in ["cutting", "proj", "slice"]:
                 return True
         return False
 
@@ -367,12 +371,12 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", None)
+            data_type = f.attrs.get("data_type", b"").decode("utf8")
+            cont_type = f.attrs.get("container_type", b"").decode("utf8")
             if data_type == "yt_frb":
                 return True
             if data_type == "yt_data_container" and \
-              f.attrs.get("container_type", None) in \
-              _grid_data_containers:
+                cont_type in _grid_data_containers:
                 return True
         return False
 
@@ -555,7 +559,7 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", None)
+            data_type = f.attrs.get("data_type", b"").decode("utf8")
             if data_type == "yt_array_data":
                 return True
         return False
@@ -646,7 +650,7 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", None)
+            data_type = f.attrs.get("data_type", b"").decode("utf8")
             if data_type == "yt_profile":
                 return True
         return False


https://bitbucket.org/yt_analysis/yt/commits/ed09f24dabe6/
Changeset:   ed09f24dabe6
Branch:      yt
User:        jzuhone
Date:        2015-10-26 22:13:46+00:00
Summary:     More work on the tests
Affected #:  1 file

diff -r 49955afe212e6f4a0f8cc5e4f7de3168b255d4a5 -r ed09f24dabe694256685adc08848e10fb8880675 yt/frontends/ytdata/tests/test_outputs.py
--- a/yt/frontends/ytdata/tests/test_outputs.py
+++ b/yt/frontends/ytdata/tests/test_outputs.py
@@ -59,8 +59,8 @@
             obj = self.ds.all_data()
         else:
             obj = self.ds.data
-        num_e = obj[field].size
-        avg = obj[field].mean()
+        num_e = obj[self.field].size
+        avg = obj[self.field].mean()
         return np.array([num_e, avg])
 
     def compare(self, new_result, old_result):
@@ -83,10 +83,11 @@
     ds = data_dir_load(enzotiny)
     sphere = ds.sphere(ds.domain_center, (10, "Mpc"))
     fn = sphere.save_as_dataset(fields=["density", "particle_mass"])
-    sphere_ds = load(fn)
+    full_fn = os.path.join(tmpdir, fn)
+    sphere_ds = load(full_fn)
     assert isinstance(sphere_ds, YTDataContainerDataset)
-    yield YTDataFieldTest(fn, ("grid", "density"))
-    yield YTDataFieldTest(fn, ("all", "particle_mass"))
+    yield YTDataFieldTest(full_fn, ("grid", "density"))
+    yield YTDataFieldTest(full_fn, ("all", "particle_mass"))
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
 
@@ -98,17 +99,18 @@
     ds = data_dir_load(enzotiny)
     cg = ds.covering_grid(level=0, left_edge=[0.25]*3, dims=[16]*3)
     fn = cg.save_as_dataset(fields=["density", "particle_mass"])
-    cg_ds = load(fn)
+    full_fn = os.path.join(tmpdir, fn)
+    cg_ds = load(full_fn)
     assert isinstance(cg_ds, YTGridDataset)
-    yield YTDataFieldTest(fn, ("grid", "density"))
-    yield YTDataFieldTest(fn, ("all", "particle_mass"))
 
+    yield YTDataFieldTest(full_fn, ("grid", "density"))
+    yield YTDataFieldTest(full_fn, ("all", "particle_mass"))
     my_proj = ds.proj("density", "x", weight_field="density")
     frb = my_proj.to_frb(1.0, (800, 800))
     fn = frb.save_as_dataset(fields=["density"])
     frb_ds = load(fn)
     assert isinstance(frb_ds, YTGridDataset)
-    yield YTDataFieldTest(fn, "density", geometric=False)
+    yield YTDataFieldTest(full_fn, "density", geometric=False)
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
 
@@ -120,9 +122,10 @@
     ds = data_dir_load(enzotiny)
     proj = ds.proj("density", "x", weight_field="density")
     fn = proj.save_as_dataset()
-    proj_ds = load(fn)
+    full_fn = os.path.join(tmpdir, fn)
+    proj_ds = load(full_fn)
     assert isinstance(proj_ds, YTSpatialPlotDataset)
-    yield YTDataFieldTest(fn, ("grid", "density"), geometric=False)
+    yield YTDataFieldTest(full_fn, ("grid", "density"), geometric=False)
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
 
@@ -136,23 +139,25 @@
     profile_1d = create_profile(ad, "density", "temperature",
                                 weight_field="cell_mass")
     fn = profile_1d.save_as_dataset()
-    prof_1d_ds = load(fn)
+    full_fn = os.path.join(tmpdir, fn)
+    prof_1d_ds = load(full_fn)
     assert isinstance(prof_1d_ds, YTProfileDataset)
-    yield YTDataFieldTest(fn, "temperature", geometric=False)
-    yield YTDataFieldTest(fn, "x", geometric=False)
-    yield YTDataFieldTest(fn, "density", geometric=False)
 
+    yield YTDataFieldTest(full_fn, "temperature", geometric=False)
+    yield YTDataFieldTest(full_fn, "x", geometric=False)
+    yield YTDataFieldTest(full_fn, "density", geometric=False)
     profile_2d = create_profile(ad, ["density", "temperature"],
                                "cell_mass", weight_field=None,
                                n_bins=(128, 128))
     fn = profile_2d.save_as_dataset()
-    prof_2d_ds = load(fn)
+    full_fn = os.path.join(tmpdir, fn)
+    prof_2d_ds = load(full_fn)
     assert isinstance(prof_2d_ds, YTProfileDataset)
-    yield YTDataFieldTest(fn, "density", geometric=False)
-    yield YTDataFieldTest(fn, "x", geometric=False)
-    yield YTDataFieldTest(fn, "temperature", geometric=False)
-    yield YTDataFieldTest(fn, "y", geometric=False)
-    yield YTDataFieldTest(fn, "cell_mass", geometric=False)
+    yield YTDataFieldTest(full_fn, "density", geometric=False)
+    yield YTDataFieldTest(full_fn, "x", geometric=False)
+    yield YTDataFieldTest(full_fn, "temperature", geometric=False)
+    yield YTDataFieldTest(full_fn, "y", geometric=False)
+    yield YTDataFieldTest(full_fn, "cell_mass", geometric=False)
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
 
@@ -168,16 +173,18 @@
     my_data["region_density"] = region["density"]
     my_data["sphere_density"] = sphere["density"]
     fn = save_as_dataset(ds, "test_data.h5", my_data)
-    array_ds = load(fn)
+    full_fn = os.path.join(tmpdir, fn)
+    array_ds = load(full_fn)
     assert isinstance(array_ds, YTNonspatialDataset)
-    yield YTDataFieldTest(fn, "region_density", geometric=False)
-    yield YTDataFieldTest(fn, "sphere_density", geometric=False)
+    yield YTDataFieldTest(full_fn, "region_density", geometric=False)
+    yield YTDataFieldTest(full_fn, "sphere_density", geometric=False)
 
     my_data = {"density": YTArray(np.random.random(10), "g/cm**3")}
     fake_ds = {"current_time": YTQuantity(10, "Myr")}
     fn = save_as_dataset(fake_ds, "random_data.h5", my_data)
-    new_ds = load(fn)
+    full_fn = os.path.join(tmpdir, fn)
+    new_ds = load(full_fn)
     assert isinstance(new_ds, YTNonspatialDataset)
-    yield YTDataFieldTest(fn, "density", geometric=False)
+    yield YTDataFieldTest(full_fn, "density", geometric=False)
     os.chdir(curdir)
     shutil.rmtree(tmpdir)


https://bitbucket.org/yt_analysis/yt/commits/8a4a6ee19037/
Changeset:   8a4a6ee19037
Branch:      yt
User:        jzuhone
Date:        2015-10-26 22:23:17+00:00
Summary:     Tests pass under Python 2 now
Affected #:  1 file

diff -r ed09f24dabe694256685adc08848e10fb8880675 -r 8a4a6ee190376a7e11e9b77458edc63360d2cb01 yt/frontends/ytdata/tests/test_outputs.py
--- a/yt/frontends/ytdata/tests/test_outputs.py
+++ b/yt/frontends/ytdata/tests/test_outputs.py
@@ -172,16 +172,18 @@
     my_data = {}
     my_data["region_density"] = region["density"]
     my_data["sphere_density"] = sphere["density"]
-    fn = save_as_dataset(ds, "test_data.h5", my_data)
+    fn = "test_data.h5"
+    save_as_dataset(ds, fn, my_data)
     full_fn = os.path.join(tmpdir, fn)
     array_ds = load(full_fn)
     assert isinstance(array_ds, YTNonspatialDataset)
     yield YTDataFieldTest(full_fn, "region_density", geometric=False)
     yield YTDataFieldTest(full_fn, "sphere_density", geometric=False)
 
-    my_data = {"density": YTArray(np.random.random(10), "g/cm**3")}
+    my_data = {"density": YTArray(np.linspace(1.,20.,10), "g/cm**3")}
     fake_ds = {"current_time": YTQuantity(10, "Myr")}
-    fn = save_as_dataset(fake_ds, "random_data.h5", my_data)
+    fn = "random_data.h5"
+    save_as_dataset(fake_ds, fn, my_data)
     full_fn = os.path.join(tmpdir, fn)
     new_ds = load(full_fn)
     assert isinstance(new_ds, YTNonspatialDataset)


https://bitbucket.org/yt_analysis/yt/commits/4d2c376eef78/
Changeset:   4d2c376eef78
Branch:      yt
User:        jzuhone
Date:        2015-10-27 02:15:16+00:00
Summary:     Tests now work in py2 and py3
Affected #:  1 file

diff -r 8a4a6ee190376a7e11e9b77458edc63360d2cb01 -r 4d2c376eef789a5ac55d23da0e3cc6181f7c1e44 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -67,13 +67,23 @@
                          "covering_grid",
                          "smoothed_covering_grid"]
 
+def parse_h5_attr(f, attr):
+    val = f.attrs.get(attr, None)
+    if isinstance(val, bytes):
+        return val.decode('utf8')
+    else:
+        return val
+
 class YTDataset(Dataset):
     """Base dataset class for all ytdata datasets."""
     def _parse_parameter_file(self):
         self.refine_by = 2
         with h5py.File(self.parameter_filename, "r") as f:
-            self.parameters.update(
-                dict((key, f.attrs[key]) for key in f.attrs.keys()))
+            for key in f.attrs.keys():
+                v = f.attrs[key]
+                if isinstance(v, bytes):
+                    v = v.decode("utf8")
+                self.parameters[key] = v
             self.num_particles = \
               dict([(group, f[group].attrs["num_elements"])
                     for group in f if group != self.default_fluid_type])
@@ -196,8 +206,8 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", b"").decode("utf8")
-            cont_type = f.attrs.get("container_type", b"").decode("utf8")
+            data_type = parse_h5_attr(f, "data_type")
+            cont_type = parse_h5_attr(f, "container_type")
             if data_type is None:
                 return False
             if data_type in ["yt_light_ray"]:
@@ -229,8 +239,8 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", b"").decode("utf8")
-            cont_type = f.attrs.get("container_type", b"").decode("utf8")
+            data_type = parse_h5_attr(f, "data_type")
+            cont_type = parse_h5_attr(f, "container_type")
             if data_type == "yt_data_container" and \
                 cont_type in ["cutting", "proj", "slice"]:
                 return True
@@ -371,8 +381,8 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", b"").decode("utf8")
-            cont_type = f.attrs.get("container_type", b"").decode("utf8")
+            data_type = parse_h5_attr(f, "data_type")
+            cont_type = parse_h5_attr(f, "container_type")
             if data_type == "yt_frb":
                 return True
             if data_type == "yt_data_container" and \
@@ -559,7 +569,7 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", b"").decode("utf8")
+            data_type = parse_h5_attr(f, "data_type")
             if data_type == "yt_array_data":
                 return True
         return False
@@ -650,7 +660,7 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", b"").decode("utf8")
+            data_type = parse_h5_attr(f, "data_type")
             if data_type == "yt_profile":
                 return True
         return False


https://bitbucket.org/yt_analysis/yt/commits/0064b22f46f4/
Changeset:   0064b22f46f4
Branch:      yt
User:        jzuhone
Date:        2015-10-27 02:56:41+00:00
Summary:     Don't convert to cgs unless the units are actually made up of at least one code unit
Affected #:  1 file

diff -r 4d2c376eef789a5ac55d23da0e3cc6181f7c1e44 -r 0064b22f46f427b3a8e41e2592ca17470c26d27b yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -129,7 +129,10 @@
             fh.create_group(field_type)
         # for now, let's avoid writing "code" units
         if hasattr(data[field], "units"):
-            data[field].convert_to_cgs()
+            for atom in data[field].units.expr.atoms():
+                if str(atom).startswith("code"):
+                    data[field].convert_to_cgs()
+                    break
         if isinstance(field, tuple):
             field_name = field[1]
         else:


https://bitbucket.org/yt_analysis/yt/commits/adde82086741/
Changeset:   adde82086741
Branch:      yt
User:        jzuhone
Date:        2015-10-27 12:57:27+00:00
Summary:     Catch con_args and convert it to string
Affected #:  1 file

diff -r 0064b22f46f427b3a8e41e2592ca17470c26d27b -r adde82086741a4e4a2a78dee79ffbe61a27f222f yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -83,6 +83,8 @@
                 v = f.attrs[key]
                 if isinstance(v, bytes):
                     v = v.decode("utf8")
+                if key == "con_args":
+                    v = v.astype("str")
                 self.parameters[key] = v
             self.num_particles = \
               dict([(group, f[group].attrs["num_elements"])


https://bitbucket.org/yt_analysis/yt/commits/97685cfc2d55/
Changeset:   97685cfc2d55
Branch:      yt
User:        brittonsmith
Date:        2015-10-27 14:46:30+00:00
Summary:     Merging with Johe Zuhone's work.
Affected #:  4 files

diff -r 6aff260d8a6de0f6554d6b22ef84a4767758036d -r 97685cfc2d55bc6786a35c817f4e57be9dbf44ff yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -527,7 +527,7 @@
         for f in [f for f in self._container_fields + tds_fields \
                   if f not in data]:
             data[f] = self[f]
-        data_fields = data.keys()
+        data_fields = list(data.keys())
 
         need_grid_positions = False
         need_particle_positions = False

diff -r 6aff260d8a6de0f6554d6b22ef84a4767758036d -r 97685cfc2d55bc6786a35c817f4e57be9dbf44ff yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -58,18 +58,34 @@
     _h5py as h5py
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
+from yt.fields.field_exceptions import \
+    NeedsGridType
+from yt.data_objects.data_containers import \
+    GenerationInProgress
 
 _grid_data_containers = ["abritrary_grid",
                          "covering_grid",
                          "smoothed_covering_grid"]
 
+def parse_h5_attr(f, attr):
+    val = f.attrs.get(attr, None)
+    if isinstance(val, bytes):
+        return val.decode('utf8')
+    else:
+        return val
+
 class YTDataset(Dataset):
     """Base dataset class for all ytdata datasets."""
     def _parse_parameter_file(self):
         self.refine_by = 2
         with h5py.File(self.parameter_filename, "r") as f:
-            self.parameters.update(
-                dict((key, f.attrs[key]) for key in f.attrs.keys()))
+            for key in f.attrs.keys():
+                v = f.attrs[key]
+                if isinstance(v, bytes):
+                    v = v.decode("utf8")
+                if key == "con_args":
+                    v = v.astype("str")
+                self.parameters[key] = v
             self.num_particles = \
               dict([(group, f[group].attrs["num_elements"])
                     for group in f if group != self.default_fluid_type])
@@ -211,14 +227,14 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", None)
+            data_type = parse_h5_attr(f, "data_type")
+            cont_type = parse_h5_attr(f, "container_type")
             if data_type is None:
                 return False
             if data_type in ["yt_light_ray"]:
                 return True
             if data_type == "yt_data_container" and \
-              f.attrs.get("container_type", None) not in \
-              _grid_data_containers:
+                cont_type not in _grid_data_containers:
                 return True
         return False
 
@@ -244,10 +260,10 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", None)
+            data_type = parse_h5_attr(f, "data_type")
+            cont_type = parse_h5_attr(f, "container_type")
             if data_type == "yt_data_container" and \
-              f.attrs.get("container_type", None) in \
-              ["cutting", "proj", "slice"]:
+                cont_type in ["cutting", "proj", "slice"]:
                 return True
         return False
 
@@ -386,12 +402,12 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", None)
+            data_type = parse_h5_attr(f, "data_type")
+            cont_type = parse_h5_attr(f, "container_type")
             if data_type == "yt_frb":
                 return True
             if data_type == "yt_data_container" and \
-              f.attrs.get("container_type", None) in \
-              _grid_data_containers:
+                cont_type in _grid_data_containers:
                 return True
         return False
 
@@ -574,7 +590,7 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", None)
+            data_type = parse_h5_attr(f, "data_type")
             if data_type == "yt_array_data":
                 return True
         return False
@@ -665,7 +681,7 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", None)
+            data_type = parse_h5_attr(f, "data_type")
             if data_type == "yt_profile":
                 return True
         return False

diff -r 6aff260d8a6de0f6554d6b22ef84a4767758036d -r 97685cfc2d55bc6786a35c817f4e57be9dbf44ff yt/frontends/ytdata/tests/test_outputs.py
--- a/yt/frontends/ytdata/tests/test_outputs.py
+++ b/yt/frontends/ytdata/tests/test_outputs.py
@@ -20,7 +20,8 @@
     YTSpatialPlotDataset, \
     YTGridDataset, \
     YTNonspatialDataset, \
-    YTProfileDataset
+    YTProfileDataset, \
+    save_as_dataset
 from yt.testing import \
     assert_allclose_units, \
     assert_equal
@@ -28,6 +29,15 @@
     requires_ds, \
     data_dir_load, \
     AnswerTestingTest
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity
+from yt.data_objects.api import \
+    create_profile
+import numpy as np
+import tempfile
+import os
+import shutil
 
 class YTDataFieldTest(AnswerTestingTest):
     _type_name = "YTDataTest"
@@ -49,8 +59,8 @@
             obj = self.ds.all_data()
         else:
             obj = self.ds.data
-        num_e = obj[field].size
-        avg = obj[field].mean()
+        num_e = obj[self.field].size
+        avg = obj[self.field].mean()
         return np.array([num_e, avg])
 
     def compare(self, new_result, old_result):
@@ -67,82 +77,116 @@
 enzotiny = "enzo_tiny_cosmology/DD0046/DD0046"
 @requires_ds(enzotiny)
 def test_datacontainer_data():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
     ds = data_dir_load(enzotiny)
     sphere = ds.sphere(ds.domain_center, (10, "Mpc"))
     fn = sphere.save_as_dataset(fields=["density", "particle_mass"])
-    sphere_ds = load(fn)
+    full_fn = os.path.join(tmpdir, fn)
+    sphere_ds = load(full_fn)
     assert isinstance(sphere_ds, YTDataContainerDataset)
-    yield YTDataFieldTest(fn, ("grid", "density"))
-    yield YTDataFieldTest(fn, ("all", "particle_mass"))
+    yield YTDataFieldTest(full_fn, ("grid", "density"))
+    yield YTDataFieldTest(full_fn, ("all", "particle_mass"))
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
 
 @requires_ds(enzotiny)
 def test_grid_datacontainer_data():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
     ds = data_dir_load(enzotiny)
     cg = ds.covering_grid(level=0, left_edge=[0.25]*3, dims=[16]*3)
     fn = cg.save_as_dataset(fields=["density", "particle_mass"])
-    cg_ds = load(fn)
+    full_fn = os.path.join(tmpdir, fn)
+    cg_ds = load(full_fn)
     assert isinstance(cg_ds, YTGridDataset)
-    yield YTDataFieldTest(fn, ("grid", "density"))
-    yield YTDataFieldTest(fn, ("all", "particle_mass"))
 
+    yield YTDataFieldTest(full_fn, ("grid", "density"))
+    yield YTDataFieldTest(full_fn, ("all", "particle_mass"))
     my_proj = ds.proj("density", "x", weight_field="density")
     frb = my_proj.to_frb(1.0, (800, 800))
     fn = frb.save_as_dataset(fields=["density"])
     frb_ds = load(fn)
     assert isinstance(frb_ds, YTGridDataset)
-    yield YTDataFieldTest(fn, "density", geometric=False)
+    yield YTDataFieldTest(full_fn, "density", geometric=False)
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
 
 @requires_ds(enzotiny)
 def test_spatial_data():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
     ds = data_dir_load(enzotiny)
     proj = ds.proj("density", "x", weight_field="density")
     fn = proj.save_as_dataset()
-    proj_ds = yt.load(fn)
+    full_fn = os.path.join(tmpdir, fn)
+    proj_ds = load(full_fn)
     assert isinstance(proj_ds, YTSpatialPlotDataset)
-    yield YTDataFieldTest(fn, ("grid", "density"), geometric=False)
+    yield YTDataFieldTest(full_fn, ("grid", "density"), geometric=False)
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
 
 @requires_ds(enzotiny)
 def test_profile_data():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
     ds = data_dir_load(enzotiny)
+    ad = ds.all_data()
+    profile_1d = create_profile(ad, "density", "temperature",
+                                weight_field="cell_mass")
+    fn = profile_1d.save_as_dataset()
+    full_fn = os.path.join(tmpdir, fn)
+    prof_1d_ds = load(full_fn)
+    assert isinstance(prof_1d_ds, YTProfileDataset)
 
-    profile_1d = yt.create_profile(ad, "density", "temperature",
-                               weight_field="cell_mass")
-    fn = profile_1d.save_as_dataset()
-    prof_1d_ds = load(fn)
-    assert isinstance(prof_1d_ds, YTProfileDataset)
-    yield YTDataFieldTest(fn, "temperature", geometric=False)
-    yield YTDataFieldTest(fn, "x", geometric=False)
-    yield YTDataFieldTest(fn, "density", geometric=False)
-
-    profile_2d = yt.create_profile(ad, ["density", "temperature"],
+    yield YTDataFieldTest(full_fn, "temperature", geometric=False)
+    yield YTDataFieldTest(full_fn, "x", geometric=False)
+    yield YTDataFieldTest(full_fn, "density", geometric=False)
+    profile_2d = create_profile(ad, ["density", "temperature"],
                                "cell_mass", weight_field=None,
                                n_bins=(128, 128))
     fn = profile_2d.save_as_dataset()
-    prof_2d_ds = yt.load(fn)
+    full_fn = os.path.join(tmpdir, fn)
+    prof_2d_ds = load(full_fn)
     assert isinstance(prof_2d_ds, YTProfileDataset)
-    yield YTDataFieldTest(fn, "density", geometric=False)
-    yield YTDataFieldTest(fn, "x", geometric=False)
-    yield YTDataFieldTest(fn, "temperature", geometric=False)
-    yield YTDataFieldTest(fn, "y", geometric=False)
-    yield YTDataFieldTest(fn, "cell_mass", geometric=False)
+    yield YTDataFieldTest(full_fn, "density", geometric=False)
+    yield YTDataFieldTest(full_fn, "x", geometric=False)
+    yield YTDataFieldTest(full_fn, "temperature", geometric=False)
+    yield YTDataFieldTest(full_fn, "y", geometric=False)
+    yield YTDataFieldTest(full_fn, "cell_mass", geometric=False)
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
 
 @requires_ds(enzotiny)
 def test_nonspatial_data():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
     ds = data_dir_load(enzotiny)
     region = ds.box([0.25]*3, [0.75]*3)
     sphere = ds.sphere(ds.domain_center, (10, "Mpc"))
     my_data = {}
     my_data["region_density"] = region["density"]
     my_data["sphere_density"] = sphere["density"]
-    fn = yt.save_as_dataset(ds, "test_data.h5", my_data)
-    array_ds = yt.load(fn)
+    fn = "test_data.h5"
+    save_as_dataset(ds, fn, my_data)
+    full_fn = os.path.join(tmpdir, fn)
+    array_ds = load(full_fn)
     assert isinstance(array_ds, YTNonspatialDataset)
-    yield YTDataFieldTest(fn, "region_density", geometric=False)
-    yield YTDataFieldTest(fn, "sphere_density", geometric=False)
+    yield YTDataFieldTest(full_fn, "region_density", geometric=False)
+    yield YTDataFieldTest(full_fn, "sphere_density", geometric=False)
 
-    my_data = {"density": yt.YTArray(np.random.random(10), "g/cm**3")}
-    fake_ds = {"current_time": yt.YTQuantity(10, "Myr")}
-    fn = yt.save_as_dataset(fake_ds, "random_data.h5", my_data)
-    new_ds = yt.load(fn)
+    my_data = {"density": YTArray(np.linspace(1.,20.,10), "g/cm**3")}
+    fake_ds = {"current_time": YTQuantity(10, "Myr")}
+    fn = "random_data.h5"
+    save_as_dataset(fake_ds, fn, my_data)
+    full_fn = os.path.join(tmpdir, fn)
+    new_ds = load(full_fn)
     assert isinstance(new_ds, YTNonspatialDataset)
-    yield YTDataFieldTest(fn, "density", geometric=False)
+    yield YTDataFieldTest(full_fn, "density", geometric=False)
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

diff -r 6aff260d8a6de0f6554d6b22ef84a4767758036d -r 97685cfc2d55bc6786a35c817f4e57be9dbf44ff yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -17,6 +17,7 @@
 import h5py
 import numpy as np
 
+from yt.funcs import iterable
 from yt.units.yt_array import \
     YTArray
 from yt.utilities.logger import \
@@ -128,7 +129,10 @@
             fh.create_group(field_type)
         # for now, let's avoid writing "code" units
         if hasattr(data[field], "units"):
-            data[field].convert_to_cgs()
+            for atom in data[field].units.expr.atoms():
+                if str(atom).startswith("code"):
+                    data[field].convert_to_cgs()
+                    break
         if isinstance(field, tuple):
             field_name = field[1]
         else:
@@ -221,4 +225,10 @@
     if hasattr(val, "units"):
         val = val.in_cgs()
         fh.attrs["%s_units" % attr] = str(val.units)
+    # The following is a crappy workaround for getting
+    # Unicode strings into HDF5 attributes in Python 3
+    if iterable(val):
+        val = np.array(val)
+        if val.dtype.kind == 'U':
+            val = val.astype('|S40')
     fh.attrs[str(attr)] = val


https://bitbucket.org/yt_analysis/yt/commits/eb416b5847db/
Changeset:   eb416b5847db
Branch:      yt
User:        MatthewTurk
Date:        2015-10-29 18:25:10+00:00
Summary:     Adding aliases for index to fluid types
Affected #:  3 files

diff -r 754e08c17816db382304107a0d4b80af017f99c9 -r eb416b5847db9298e4514b02eb0d52580c590dda yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -371,6 +371,7 @@
         self.field_info.setup_fluid_fields()
         for ptype in self.particle_types:
             self.field_info.setup_particle_fields(ptype)
+        self.field_info.setup_fluid_index_fields()
         if "all" not in self.particle_types:
             mylog.debug("Creating Particle Union 'all'")
             pu = ParticleUnion("all", list(self.particle_types_raw))

diff -r 754e08c17816db382304107a0d4b80af017f99c9 -r eb416b5847db9298e4514b02eb0d52580c590dda yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -63,6 +63,16 @@
     def setup_fluid_fields(self):
         pass
 
+    def setup_fluid_index_fields(self):
+        # Now we get all our index types and set up aliases to them
+        if self.ds is None: return
+        index_fields = set([f for _, f in self if _ == "index"])
+        for ftype in self.ds.fluid_types + tuple(self.ds.particle_types_raw):
+            if ftype in ("index", "deposit"): continue
+            if (ftype, f) in self.field_list: continue
+            for f in index_fields:
+                self.alias((ftype, f), ("index", f))
+
     def setup_particle_fields(self, ptype, ftype='gas', num_neighbors=64 ):
         skip_output_units = ("code_length",)
         for f, (units, aliases, dn) in sorted(self.known_particle_fields):

diff -r 754e08c17816db382304107a0d4b80af017f99c9 -r eb416b5847db9298e4514b02eb0d52580c590dda yt/fields/fluid_fields.py
--- a/yt/fields/fluid_fields.py
+++ b/yt/fields/fluid_fields.py
@@ -52,7 +52,7 @@
     create_vector_fields(registry, "velocity", "cm / s", ftype, slice_info)
 
     def _cell_mass(field, data):
-        return data[ftype, "density"] * data["index", "cell_volume"]
+        return data[ftype, "density"] * data[ftype, "cell_volume"]
 
     registry.add_field((ftype, "cell_mass"),
         function=_cell_mass,
@@ -89,11 +89,11 @@
             units = "")
 
     def _courant_time_step(field, data):
-        t1 = data["index", "dx"] / (data[ftype, "sound_speed"]
+        t1 = data[ftype, "dx"] / (data[ftype, "sound_speed"]
                         + np.abs(data[ftype, "velocity_x"]))
-        t2 = data["index", "dy"] / (data[ftype, "sound_speed"]
+        t2 = data[ftype, "dy"] / (data[ftype, "sound_speed"]
                         + np.abs(data[ftype, "velocity_y"]))
-        t3 = data["index", "dz"] / (data[ftype, "sound_speed"]
+        t3 = data[ftype, "dz"] / (data[ftype, "sound_speed"]
                         + np.abs(data[ftype, "velocity_z"]))
         tr = np.minimum(np.minimum(t1, t2), t3)
         return tr
@@ -140,7 +140,7 @@
              units="Zsun")
 
     def _metal_mass(field, data):
-        return data[ftype, "metal_density"] * data["index", "cell_volume"]
+        return data[ftype, "metal_density"] * data[ftype, "cell_volume"]
     registry.add_field((ftype, "metal_mass"),
                        function=_metal_mass,
                        units="g")
@@ -188,7 +188,7 @@
         slice_3dl[axi] = sl_left
         slice_3dr[axi] = sl_right
         def func(field, data):
-            ds = div_fac * data["index", "d%s" % ax]
+            ds = div_fac * data[ftype, "d%s" % ax]
             f  = data[grad_field][slice_3dr]/ds[slice_3d]
             f -= data[grad_field][slice_3dl]/ds[slice_3d]
             new_field = data.ds.arr(np.zeros_like(data[grad_field], dtype=np.float64),


https://bitbucket.org/yt_analysis/yt/commits/a6d7182fe986/
Changeset:   a6d7182fe986
Branch:      yt
User:        brittonsmith
Date:        2015-10-30 15:44:16+00:00
Summary:     Making sure we check the right field names in loop.
Affected #:  1 file

diff -r eb416b5847db9298e4514b02eb0d52580c590dda -r a6d7182fe986121f3cd4d830a384ec2bab213d59 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -69,8 +69,8 @@
         index_fields = set([f for _, f in self if _ == "index"])
         for ftype in self.ds.fluid_types + tuple(self.ds.particle_types_raw):
             if ftype in ("index", "deposit"): continue
-            if (ftype, f) in self.field_list: continue
             for f in index_fields:
+                if (ftype, f) in self: continue
                 self.alias((ftype, f), ("index", f))
 
     def setup_particle_fields(self, ptype, ftype='gas', num_neighbors=64 ):


https://bitbucket.org/yt_analysis/yt/commits/d64af678d1b3/
Changeset:   d64af678d1b3
Branch:      yt
User:        brittonsmith
Date:        2015-10-30 15:44:40+00:00
Summary:     Adding setup of index fields.
Affected #:  1 file

diff -r a6d7182fe986121f3cd4d830a384ec2bab213d59 -r d64af678d1b31ee0867c768c0057e7b7e61c950a yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -93,6 +93,7 @@
             self.field_info.setup_particle_fields(ptype)
 
         self._setup_gas_alias()
+        self.field_info.setup_fluid_index_fields()
 
         if "all" not in self.particle_types:
             mylog.debug("Creating Particle Union 'all'")


https://bitbucket.org/yt_analysis/yt/commits/77e3587426b1/
Changeset:   77e3587426b1
Branch:      yt
User:        brittonsmith
Date:        2015-10-30 15:45:33+00:00
Summary:     Merging back with ytdata.
Affected #:  5 files

diff -r d64af678d1b31ee0867c768c0057e7b7e61c950a -r 77e3587426b14d9526123aba4e7c4e1fb4cbf423 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -527,7 +527,7 @@
         for f in [f for f in self._container_fields + tds_fields \
                   if f not in data]:
             data[f] = self[f]
-        data_fields = data.keys()
+        data_fields = list(data.keys())
 
         need_grid_positions = False
         need_particle_positions = False

diff -r d64af678d1b31ee0867c768c0057e7b7e61c950a -r 77e3587426b14d9526123aba4e7c4e1fb4cbf423 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -58,18 +58,34 @@
     _h5py as h5py
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
+from yt.fields.field_exceptions import \
+    NeedsGridType
+from yt.data_objects.data_containers import \
+    GenerationInProgress
 
 _grid_data_containers = ["abritrary_grid",
                          "covering_grid",
                          "smoothed_covering_grid"]
 
+def parse_h5_attr(f, attr):
+    val = f.attrs.get(attr, None)
+    if isinstance(val, bytes):
+        return val.decode('utf8')
+    else:
+        return val
+
 class YTDataset(Dataset):
     """Base dataset class for all ytdata datasets."""
     def _parse_parameter_file(self):
         self.refine_by = 2
         with h5py.File(self.parameter_filename, "r") as f:
-            self.parameters.update(
-                dict((key, f.attrs[key]) for key in f.attrs.keys()))
+            for key in f.attrs.keys():
+                v = f.attrs[key]
+                if isinstance(v, bytes):
+                    v = v.decode("utf8")
+                if key == "con_args":
+                    v = v.astype("str")
+                self.parameters[key] = v
             self.num_particles = \
               dict([(group, f[group].attrs["num_elements"])
                     for group in f if group != self.default_fluid_type])
@@ -102,9 +118,18 @@
         self.field_info.setup_extra_union_fields()
         mylog.info("Loading field plugins.")
         self.field_info.load_all_plugins()
+
+        self._setup_override_fields()
+
         deps, unloaded = self.field_info.check_derived_fields()
         self.field_dependencies.update(deps)
 
+    def _setup_gas_alias(self):
+        pass
+
+    def _setup_override_fields(self):
+        pass
+
     def _set_code_unit_attributes(self):
         attrs = ('length_unit', 'mass_unit', 'time_unit',
                  'velocity_unit', 'magnetic_unit')
@@ -162,6 +187,16 @@
             pu = ParticleUnion("gas", ["grid"])
             self.add_particle_union(pu)
 
+    def _setup_override_fields(self):
+        """
+        Override some derived fields to use frontend-specific fields.
+        We need to do this because we are treating grid data like particles.
+        This will be fixed eventually when grid data can be exported properly.
+        """
+
+        del self.field_info[("gas", "cell_mass")]
+        self.field_info.alias(("gas", "cell_mass"), ("grid", "cell_mass"))
+
     @property
     def data(self):
         """
@@ -193,14 +228,14 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", None)
+            data_type = parse_h5_attr(f, "data_type")
+            cont_type = parse_h5_attr(f, "container_type")
             if data_type is None:
                 return False
             if data_type in ["yt_light_ray"]:
                 return True
             if data_type == "yt_data_container" and \
-              f.attrs.get("container_type", None) not in \
-              _grid_data_containers:
+                cont_type not in _grid_data_containers:
                 return True
         return False
 
@@ -226,10 +261,10 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", None)
+            data_type = parse_h5_attr(f, "data_type")
+            cont_type = parse_h5_attr(f, "container_type")
             if data_type == "yt_data_container" and \
-              f.attrs.get("container_type", None) in \
-              ["cutting", "proj", "slice"]:
+                cont_type in ["cutting", "proj", "slice"]:
                 return True
         return False
 
@@ -368,12 +403,12 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", None)
+            data_type = parse_h5_attr(f, "data_type")
+            cont_type = parse_h5_attr(f, "container_type")
             if data_type == "yt_frb":
                 return True
             if data_type == "yt_data_container" and \
-              f.attrs.get("container_type", None) in \
-              _grid_data_containers:
+                cont_type in _grid_data_containers:
                 return True
         return False
 
@@ -556,7 +591,7 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", None)
+            data_type = parse_h5_attr(f, "data_type")
             if data_type == "yt_array_data":
                 return True
         return False
@@ -647,7 +682,7 @@
     def _is_valid(self, *args, **kwargs):
         if not args[0].endswith(".h5"): return False
         with h5py.File(args[0], "r") as f:
-            data_type = f.attrs.get("data_type", None)
+            data_type = parse_h5_attr(f, "data_type")
             if data_type == "yt_profile":
                 return True
         return False

diff -r d64af678d1b31ee0867c768c0057e7b7e61c950a -r 77e3587426b14d9526123aba4e7c4e1fb4cbf423 yt/frontends/ytdata/fields.py
--- a/yt/frontends/ytdata/fields.py
+++ b/yt/frontends/ytdata/fields.py
@@ -48,6 +48,31 @@
         (v_units, "particle_velocity_z"),
     )
 
+    def __init__(self, ds, field_list):
+        super(YTDataContainerFieldInfo, self).__init__(ds, field_list)
+        self.add_fake_grid_fields()
+
+    def add_fake_grid_fields(self):
+        """
+        Add cell volume and mass fields that use the dx, dy, and dz
+        fields that come with the dataset instead of the index fields
+        which correspond to the oct tree.  We need to do this for now
+        since we're treating the grid data like particles until we
+        implement exporting AMR hierarchies.
+        """
+
+        def _cell_volume(field, data):
+            return data["grid", "dx"] * \
+              data["grid", "dy"] * \
+              data["grid", "dz"]
+        self.add_field(("grid", "cell_volume"), function=_cell_volume,
+                       units="cm**3", particle_type=True)
+
+        def _cell_mass(field, data):
+            return data["grid", "density"] * data["grid", "cell_volume"]
+        self.add_field(("grid", "cell_mass"), function=_cell_mass,
+                       units="g", particle_type=True)
+
 class YTGridFieldInfo(FieldInfoContainer):
     known_other_fields = (
     )

diff -r d64af678d1b31ee0867c768c0057e7b7e61c950a -r 77e3587426b14d9526123aba4e7c4e1fb4cbf423 yt/frontends/ytdata/tests/test_outputs.py
--- a/yt/frontends/ytdata/tests/test_outputs.py
+++ b/yt/frontends/ytdata/tests/test_outputs.py
@@ -20,7 +20,8 @@
     YTSpatialPlotDataset, \
     YTGridDataset, \
     YTNonspatialDataset, \
-    YTProfileDataset
+    YTProfileDataset, \
+    save_as_dataset
 from yt.testing import \
     assert_allclose_units, \
     assert_equal
@@ -28,6 +29,15 @@
     requires_ds, \
     data_dir_load, \
     AnswerTestingTest
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity
+from yt.data_objects.api import \
+    create_profile
+import numpy as np
+import tempfile
+import os
+import shutil
 
 class YTDataFieldTest(AnswerTestingTest):
     _type_name = "YTDataTest"
@@ -49,8 +59,8 @@
             obj = self.ds.all_data()
         else:
             obj = self.ds.data
-        num_e = obj[field].size
-        avg = obj[field].mean()
+        num_e = obj[self.field].size
+        avg = obj[self.field].mean()
         return np.array([num_e, avg])
 
     def compare(self, new_result, old_result):
@@ -67,82 +77,116 @@
 enzotiny = "enzo_tiny_cosmology/DD0046/DD0046"
 @requires_ds(enzotiny)
 def test_datacontainer_data():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
     ds = data_dir_load(enzotiny)
     sphere = ds.sphere(ds.domain_center, (10, "Mpc"))
     fn = sphere.save_as_dataset(fields=["density", "particle_mass"])
-    sphere_ds = load(fn)
+    full_fn = os.path.join(tmpdir, fn)
+    sphere_ds = load(full_fn)
     assert isinstance(sphere_ds, YTDataContainerDataset)
-    yield YTDataFieldTest(fn, ("grid", "density"))
-    yield YTDataFieldTest(fn, ("all", "particle_mass"))
+    yield YTDataFieldTest(full_fn, ("grid", "density"))
+    yield YTDataFieldTest(full_fn, ("all", "particle_mass"))
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
 
 @requires_ds(enzotiny)
 def test_grid_datacontainer_data():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
     ds = data_dir_load(enzotiny)
     cg = ds.covering_grid(level=0, left_edge=[0.25]*3, dims=[16]*3)
     fn = cg.save_as_dataset(fields=["density", "particle_mass"])
-    cg_ds = load(fn)
+    full_fn = os.path.join(tmpdir, fn)
+    cg_ds = load(full_fn)
     assert isinstance(cg_ds, YTGridDataset)
-    yield YTDataFieldTest(fn, ("grid", "density"))
-    yield YTDataFieldTest(fn, ("all", "particle_mass"))
 
+    yield YTDataFieldTest(full_fn, ("grid", "density"))
+    yield YTDataFieldTest(full_fn, ("all", "particle_mass"))
     my_proj = ds.proj("density", "x", weight_field="density")
     frb = my_proj.to_frb(1.0, (800, 800))
     fn = frb.save_as_dataset(fields=["density"])
     frb_ds = load(fn)
     assert isinstance(frb_ds, YTGridDataset)
-    yield YTDataFieldTest(fn, "density", geometric=False)
+    yield YTDataFieldTest(full_fn, "density", geometric=False)
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
 
 @requires_ds(enzotiny)
 def test_spatial_data():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
     ds = data_dir_load(enzotiny)
     proj = ds.proj("density", "x", weight_field="density")
     fn = proj.save_as_dataset()
-    proj_ds = yt.load(fn)
+    full_fn = os.path.join(tmpdir, fn)
+    proj_ds = load(full_fn)
     assert isinstance(proj_ds, YTSpatialPlotDataset)
-    yield YTDataFieldTest(fn, ("grid", "density"), geometric=False)
+    yield YTDataFieldTest(full_fn, ("grid", "density"), geometric=False)
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
 
 @requires_ds(enzotiny)
 def test_profile_data():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
     ds = data_dir_load(enzotiny)
+    ad = ds.all_data()
+    profile_1d = create_profile(ad, "density", "temperature",
+                                weight_field="cell_mass")
+    fn = profile_1d.save_as_dataset()
+    full_fn = os.path.join(tmpdir, fn)
+    prof_1d_ds = load(full_fn)
+    assert isinstance(prof_1d_ds, YTProfileDataset)
 
-    profile_1d = yt.create_profile(ad, "density", "temperature",
-                               weight_field="cell_mass")
-    fn = profile_1d.save_as_dataset()
-    prof_1d_ds = load(fn)
-    assert isinstance(prof_1d_ds, YTProfileDataset)
-    yield YTDataFieldTest(fn, "temperature", geometric=False)
-    yield YTDataFieldTest(fn, "x", geometric=False)
-    yield YTDataFieldTest(fn, "density", geometric=False)
-
-    profile_2d = yt.create_profile(ad, ["density", "temperature"],
+    yield YTDataFieldTest(full_fn, "temperature", geometric=False)
+    yield YTDataFieldTest(full_fn, "x", geometric=False)
+    yield YTDataFieldTest(full_fn, "density", geometric=False)
+    profile_2d = create_profile(ad, ["density", "temperature"],
                                "cell_mass", weight_field=None,
                                n_bins=(128, 128))
     fn = profile_2d.save_as_dataset()
-    prof_2d_ds = yt.load(fn)
+    full_fn = os.path.join(tmpdir, fn)
+    prof_2d_ds = load(full_fn)
     assert isinstance(prof_2d_ds, YTProfileDataset)
-    yield YTDataFieldTest(fn, "density", geometric=False)
-    yield YTDataFieldTest(fn, "x", geometric=False)
-    yield YTDataFieldTest(fn, "temperature", geometric=False)
-    yield YTDataFieldTest(fn, "y", geometric=False)
-    yield YTDataFieldTest(fn, "cell_mass", geometric=False)
+    yield YTDataFieldTest(full_fn, "density", geometric=False)
+    yield YTDataFieldTest(full_fn, "x", geometric=False)
+    yield YTDataFieldTest(full_fn, "temperature", geometric=False)
+    yield YTDataFieldTest(full_fn, "y", geometric=False)
+    yield YTDataFieldTest(full_fn, "cell_mass", geometric=False)
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
 
 @requires_ds(enzotiny)
 def test_nonspatial_data():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
     ds = data_dir_load(enzotiny)
     region = ds.box([0.25]*3, [0.75]*3)
     sphere = ds.sphere(ds.domain_center, (10, "Mpc"))
     my_data = {}
     my_data["region_density"] = region["density"]
     my_data["sphere_density"] = sphere["density"]
-    fn = yt.save_as_dataset(ds, "test_data.h5", my_data)
-    array_ds = yt.load(fn)
+    fn = "test_data.h5"
+    save_as_dataset(ds, fn, my_data)
+    full_fn = os.path.join(tmpdir, fn)
+    array_ds = load(full_fn)
     assert isinstance(array_ds, YTNonspatialDataset)
-    yield YTDataFieldTest(fn, "region_density", geometric=False)
-    yield YTDataFieldTest(fn, "sphere_density", geometric=False)
+    yield YTDataFieldTest(full_fn, "region_density", geometric=False)
+    yield YTDataFieldTest(full_fn, "sphere_density", geometric=False)
 
-    my_data = {"density": yt.YTArray(np.random.random(10), "g/cm**3")}
-    fake_ds = {"current_time": yt.YTQuantity(10, "Myr")}
-    fn = yt.save_as_dataset(fake_ds, "random_data.h5", my_data)
-    new_ds = yt.load(fn)
+    my_data = {"density": YTArray(np.linspace(1.,20.,10), "g/cm**3")}
+    fake_ds = {"current_time": YTQuantity(10, "Myr")}
+    fn = "random_data.h5"
+    save_as_dataset(fake_ds, fn, my_data)
+    full_fn = os.path.join(tmpdir, fn)
+    new_ds = load(full_fn)
     assert isinstance(new_ds, YTNonspatialDataset)
-    yield YTDataFieldTest(fn, "density", geometric=False)
+    yield YTDataFieldTest(full_fn, "density", geometric=False)
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

diff -r d64af678d1b31ee0867c768c0057e7b7e61c950a -r 77e3587426b14d9526123aba4e7c4e1fb4cbf423 yt/frontends/ytdata/utilities.py
--- a/yt/frontends/ytdata/utilities.py
+++ b/yt/frontends/ytdata/utilities.py
@@ -17,6 +17,7 @@
 import h5py
 import numpy as np
 
+from yt.funcs import iterable
 from yt.units.yt_array import \
     YTArray
 from yt.utilities.logger import \
@@ -128,7 +129,10 @@
             fh.create_group(field_type)
         # for now, let's avoid writing "code" units
         if hasattr(data[field], "units"):
-            data[field].convert_to_cgs()
+            for atom in data[field].units.expr.atoms():
+                if str(atom).startswith("code"):
+                    data[field].convert_to_cgs()
+                    break
         if isinstance(field, tuple):
             field_name = field[1]
         else:
@@ -221,4 +225,10 @@
     if hasattr(val, "units"):
         val = val.in_cgs()
         fh.attrs["%s_units" % attr] = str(val.units)
+    # The following is a crappy workaround for getting
+    # Unicode strings into HDF5 attributes in Python 3
+    if iterable(val):
+        val = np.array(val)
+        if val.dtype.kind == 'U':
+            val = val.astype('|S40')
     fh.attrs[str(attr)] = val


https://bitbucket.org/yt_analysis/yt/commits/f14de51e9252/
Changeset:   f14de51e9252
Branch:      yt
User:        brittonsmith
Date:        2015-10-30 17:03:56+00:00
Summary:     Explicitly aliasing cell_volume field.  Now ('gas', 'cell_mass') works.
Affected #:  2 files

diff -r 77e3587426b14d9526123aba4e7c4e1fb4cbf423 -r f14de51e92523929aca16158c20c89ee7ba1b440 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -118,9 +118,6 @@
         self.field_info.setup_extra_union_fields()
         mylog.info("Loading field plugins.")
         self.field_info.load_all_plugins()
-
-        self._setup_override_fields()
-
         deps, unloaded = self.field_info.check_derived_fields()
         self.field_dependencies.update(deps)
 
@@ -162,6 +159,7 @@
     _file_class = YTDataHDF5File
     _field_info_class = YTDataContainerFieldInfo
     _suffix = ".h5"
+    fluid_types = ("grid", "gas", "deposit", "index")
 
     def __init__(self, filename, dataset_type="ytdatacontainer_hdf5",
                  n_ref = 16, over_refine_factor = 1, units_override=None):
@@ -186,16 +184,9 @@
           "gas" not in self.particle_types:
             pu = ParticleUnion("gas", ["grid"])
             self.add_particle_union(pu)
-
-    def _setup_override_fields(self):
-        """
-        Override some derived fields to use frontend-specific fields.
-        We need to do this because we are treating grid data like particles.
-        This will be fixed eventually when grid data can be exported properly.
-        """
-
-        del self.field_info[("gas", "cell_mass")]
-        self.field_info.alias(("gas", "cell_mass"), ("grid", "cell_mass"))
+        # We have to alias this because particle unions only
+        # cover the field_list.
+        self.field_info.alias(("gas", "cell_volume"), ("grid", "cell_volume"))
 
     @property
     def data(self):

diff -r 77e3587426b14d9526123aba4e7c4e1fb4cbf423 -r f14de51e92523929aca16158c20c89ee7ba1b440 yt/frontends/ytdata/fields.py
--- a/yt/frontends/ytdata/fields.py
+++ b/yt/frontends/ytdata/fields.py
@@ -68,11 +68,6 @@
         self.add_field(("grid", "cell_volume"), function=_cell_volume,
                        units="cm**3", particle_type=True)
 
-        def _cell_mass(field, data):
-            return data["grid", "density"] * data["grid", "cell_volume"]
-        self.add_field(("grid", "cell_mass"), function=_cell_mass,
-                       units="g", particle_type=True)
-
 class YTGridFieldInfo(FieldInfoContainer):
     known_other_fields = (
     )


https://bitbucket.org/yt_analysis/yt/commits/01e16c9fd8a3/
Changeset:   01e16c9fd8a3
Branch:      yt
User:        brittonsmith
Date:        2015-10-31 08:22:00+00:00
Summary:     Adding a field type to position and velocity in obtain_rvec and obtain_rv_vec.
Affected #:  1 file

diff -r f14de51e92523929aca16158c20c89ee7ba1b440 -r 01e16c9fd8a3eefbc466a054f8b399e72dea4ed8 yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -72,7 +72,7 @@
 @cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def obtain_rvec(data):
+def obtain_rvec(data, ftype="gas"):
     # This is just to let the pointers exist and whatnot.  We can't cdef them
     # inside conditionals.
     cdef np.ndarray[np.float64_t, ndim=1] xf
@@ -87,11 +87,11 @@
     cdef int i, j, k
     center = data.get_field_parameter("center")
     c[0] = center[0]; c[1] = center[1]; c[2] = center[2]
-    if len(data['x'].shape) == 1:
+    if len(data[ftype, 'x'].shape) == 1:
         # One dimensional data
-        xf = data['x']
-        yf = data['y']
-        zf = data['z']
+        xf = data[ftype, 'x']
+        yf = data[ftype, 'y']
+        zf = data[ftype, 'z']
         rf = np.empty((3, xf.shape[0]), 'float64')
         for i in range(xf.shape[0]):
             rf[0, i] = xf[i] - c[0]
@@ -100,9 +100,9 @@
         return rf
     else:
         # Three dimensional data
-        xg = data['x']
-        yg = data['y']
-        zg = data['z']
+        xg = data[ftype, 'x']
+        yg = data[ftype, 'y']
+        zg = data[ftype, 'z']
         rg = np.empty((3, xg.shape[0], xg.shape[1], xg.shape[2]), 'float64')
         for i in range(xg.shape[0]):
             for j in range(xg.shape[1]):
@@ -432,7 +432,7 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def obtain_rv_vec(data):
+def obtain_rv_vec(data, ftype="gas"):
     # This is just to let the pointers exist and whatnot.  We can't cdef them
     # inside conditionals.
     cdef np.ndarray[np.float64_t, ndim=1] vxf
@@ -449,11 +449,11 @@
     if bulk_velocity == None:
         bulk_velocity = np.zeros(3)
     bv[0] = bulk_velocity[0]; bv[1] = bulk_velocity[1]; bv[2] = bulk_velocity[2]
-    if len(data['velocity_x'].shape) == 1:
+    if len(data[ftype, 'velocity_x'].shape) == 1:
         # One dimensional data
-        vxf = data['velocity_x'].astype("float64")
-        vyf = data['velocity_y'].astype("float64")
-        vzf = data['velocity_z'].astype("float64")
+        vxf = data[ftype, 'velocity_x'].astype("float64")
+        vyf = data[ftype, 'velocity_y'].astype("float64")
+        vzf = data[ftype, 'velocity_z'].astype("float64")
         rvf = np.empty((3, vxf.shape[0]), 'float64')
         for i in range(vxf.shape[0]):
             rvf[0, i] = vxf[i] - bv[0]
@@ -462,9 +462,9 @@
         return rvf
     else:
         # Three dimensional data
-        vxg = data['velocity_x'].astype("float64")
-        vyg = data['velocity_y'].astype("float64")
-        vzg = data['velocity_z'].astype("float64")
+        vxg = data[ftype, 'velocity_x'].astype("float64")
+        vyg = data[ftype, 'velocity_y'].astype("float64")
+        vzg = data[ftype, 'velocity_z'].astype("float64")
         rvg = np.empty((3, vxg.shape[0], vxg.shape[1], vxg.shape[2]), 'float64')
         for i in range(vxg.shape[0]):
             for j in range(vxg.shape[1]):


https://bitbucket.org/yt_analysis/yt/commits/b7005f9fa277/
Changeset:   b7005f9fa277
Branch:      yt
User:        brittonsmith
Date:        2015-10-31 08:22:22+00:00
Summary:     Adding generic field type to some derived quantities.
Affected #:  1 file

diff -r 01e16c9fd8a3eefbc466a054f8b399e72dea4ed8 -r b7005f9fa27745751d653512c90b4c2ff3a3ea88 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -256,7 +256,8 @@
           (("all", "particle_mass") in self.data_source.ds.field_info)
         vals = []
         if use_gas:
-            vals += [(data[ax] * data["gas", "cell_mass"]).sum(dtype=np.float64)
+            vals += [(data["gas", ax] *
+                      data["gas", "cell_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
             vals.append(data["gas", "cell_mass"].sum(dtype=np.float64))
         if use_particles:
@@ -657,7 +658,7 @@
         m = data.ds.quan(0., "g")
         if use_gas:
             e += (data["gas", "kinetic_energy"] *
-                  data["index", "cell_volume"]).sum(dtype=np.float64)
+                  data["gas", "cell_volume"]).sum(dtype=np.float64)
             j += data["gas", "angular_momentum_magnitude"].sum(dtype=np.float64)
             m += data["gas", "cell_mass"].sum(dtype=np.float64)
         if use_particles:


https://bitbucket.org/yt_analysis/yt/commits/7ebc614dd59f/
Changeset:   7ebc614dd59f
Branch:      yt
User:        brittonsmith
Date:        2015-10-31 08:27:00+00:00
Summary:     Only create cell_volume field if it doesn't exist.
Affected #:  1 file

diff -r b7005f9fa27745751d653512c90b4c2ff3a3ea88 -r 7ebc614dd59f457374e2d220e22d9d2edfc7f844 yt/frontends/ytdata/fields.py
--- a/yt/frontends/ytdata/fields.py
+++ b/yt/frontends/ytdata/fields.py
@@ -61,12 +61,13 @@
         implement exporting AMR hierarchies.
         """
 
-        def _cell_volume(field, data):
-            return data["grid", "dx"] * \
-              data["grid", "dy"] * \
-              data["grid", "dz"]
-        self.add_field(("grid", "cell_volume"), function=_cell_volume,
-                       units="cm**3", particle_type=True)
+        if ("grid", "cell_volume") not in self.field_list:
+            def _cell_volume(field, data):
+                return data["grid", "dx"] * \
+                  data["grid", "dy"] * \
+                  data["grid", "dz"]
+            self.add_field(("grid", "cell_volume"), function=_cell_volume,
+                           units="cm**3", particle_type=True)
 
 class YTGridFieldInfo(FieldInfoContainer):
     known_other_fields = (


https://bitbucket.org/yt_analysis/yt/commits/3914e655c6b1/
Changeset:   3914e655c6b1
Branch:      yt
User:        brittonsmith
Date:        2015-10-31 08:30:29+00:00
Summary:     Removing unnecessary aliases.
Affected #:  1 file

diff -r 7ebc614dd59f457374e2d220e22d9d2edfc7f844 -r 3914e655c6b1bc70f4fc58b5b15286a1ec6a0a07 yt/frontends/ytdata/fields.py
--- a/yt/frontends/ytdata/fields.py
+++ b/yt/frontends/ytdata/fields.py
@@ -30,22 +30,6 @@
     )
 
     known_particle_fields = (
-        ("x", (p_units, ["particle_position_x"], None)),
-        ("y", (p_units, ["particle_position_y"], None)),
-        ("z", (p_units, ["particle_position_z"], None)),
-        ("velocity_x", (v_units, ["particle_velocity_x"], None)),
-        ("velocity_y", (v_units, ["particle_velocity_y"], None)),
-        ("velocity_z", (v_units, ["particle_velocity_z"], None)),
-    )
-
-    # these are extra fields to be created for the "all" particle type
-    extra_union_fields = (
-        (p_units, "particle_position_x"),
-        (p_units, "particle_position_y"),
-        (p_units, "particle_position_z"),
-        (v_units, "particle_velocity_x"),
-        (v_units, "particle_velocity_y"),
-        (v_units, "particle_velocity_z"),
     )
 
     def __init__(self, ds, field_list):


https://bitbucket.org/yt_analysis/yt/commits/785d2b7ed0ae/
Changeset:   785d2b7ed0ae
Branch:      yt
User:        brittonsmith
Date:        2015-10-31 14:37:38+00:00
Summary:     Do not need to alias index fields to particle fields.
Affected #:  1 file

diff -r 3914e655c6b1bc70f4fc58b5b15286a1ec6a0a07 -r 785d2b7ed0aee66246bec0cfcbaae96ba6dbd01d yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -67,7 +67,7 @@
         # Now we get all our index types and set up aliases to them
         if self.ds is None: return
         index_fields = set([f for _, f in self if _ == "index"])
-        for ftype in self.ds.fluid_types + tuple(self.ds.particle_types_raw):
+        for ftype in self.ds.fluid_types:
             if ftype in ("index", "deposit"): continue
             for f in index_fields:
                 if (ftype, f) in self: continue


https://bitbucket.org/yt_analysis/yt/commits/c04772e65e24/
Changeset:   c04772e65e24
Branch:      yt
User:        brittonsmith
Date:        2015-10-31 17:18:30+00:00
Summary:     Making sure the aliases x/dx fields have the same units as the index fields.
Affected #:  1 file

diff -r 785d2b7ed0aee66246bec0cfcbaae96ba6dbd01d -r c04772e65e24336710edf96bf66905909d53c7a3 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -72,6 +72,10 @@
             for f in index_fields:
                 if (ftype, f) in self: continue
                 self.alias((ftype, f), ("index", f))
+                # Different field types have different default units.
+                # We want to make sure the aliased field will have
+                # the same units as the "index" field.
+                self[(ftype, f)].units = self["index", f].units
 
     def setup_particle_fields(self, ptype, ftype='gas', num_neighbors=64 ):
         skip_output_units = ("code_length",)


https://bitbucket.org/yt_analysis/yt/commits/697ca7baf306/
Changeset:   697ca7baf306
Branch:      yt
User:        bwkeller
Date:        2015-11-02 19:32:46+00:00
Summary:     Merged in brittonsmith/yt (pull request #1788)

Adding ytdata frontend
Affected #:  35 files

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -54,10 +54,13 @@
  
 .. code-block:: python
 
-   frb.export_hdf5("my_images.h5", fields=["density","temperature"])
+   frb.save_as_dataset("my_images.h5", fields=["density","temperature"])
    frb.export_fits("my_images.fits", fields=["density","temperature"],
                    clobber=True, units="kpc")
 
+In the HDF5 case, the created file can be reloaded just like a regular dataset with
+``yt.load`` and will, itself, be a first-class dataset.  For more information on
+this, see :ref:`saving-grid-data-containers`.
 In the FITS case, there is an option for setting the ``units`` of the coordinate system in
 the file. If you want to overwrite a file with the same name, set ``clobber=True``. 
 

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 doc/source/analyzing/index.rst
--- a/doc/source/analyzing/index.rst
+++ b/doc/source/analyzing/index.rst
@@ -20,5 +20,6 @@
    units/index
    filtering
    generating_processed_data
+   saving_data
    time_series_analysis
    parallel_computation

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -457,69 +457,9 @@
 ---------------------------
 
 Often, when operating interactively or via the scripting interface, it is
-convenient to save an object or multiple objects out to disk and then restart
-the calculation later.  For example, this is useful after clump finding 
-(:ref:`clump_finding`), which can be very time consuming.  
-Typically, the save and load operations are used on 3D data objects.  yt
-has a separate set of serialization operations for 2D objects such as
-projections.
-
-yt will save out objects to disk under the presupposition that the
-construction of the objects is the difficult part, rather than the generation
-of the data -- this means that you can save out an object as a description of
-how to recreate it in space, but not the actual data arrays affiliated with
-that object.  The information that is saved includes the dataset off of
-which the object "hangs."  It is this piece of information that is the most
-difficult; the object, when reloaded, must be able to reconstruct a dataset
-from whatever limited information it has in the save file.
-
-You can save objects to an output file using the function 
-:func:`~yt.data_objects.index.save_object`: 
-
-.. code-block:: python
-
-   import yt
-   ds = yt.load("my_data")
-   sp = ds.sphere([0.5, 0.5, 0.5], (10.0, 'kpc'))
-   sp.save_object("sphere_name", "save_file.cpkl")
-
-This will store the object as ``sphere_name`` in the file
-``save_file.cpkl``, which will be created or accessed using the standard
-python module :mod:`shelve`.  
-
-To re-load an object saved this way, you can use the shelve module directly:
-
-.. code-block:: python
-
-   import yt
-   import shelve
-   ds = yt.load("my_data") 
-   saved_fn = shelve.open("save_file.cpkl")
-   ds, sp = saved_fn["sphere_name"]
-
-Additionally, we can store multiple objects in a single shelve file, so we 
-have to call the sphere by name.
-
-For certain data objects such as projections, serialization can be performed
-automatically if ``serialize`` option is set to ``True`` in :ref:`the
-configuration file <configuration-file>` or set directly in the script:
-
-.. code-block:: python
-
-   from yt.config import ytcfg; ytcfg["yt", "serialize"] = "True"
-
-.. note:: Use serialization with caution. Enabling serialization means that
-   once a projection of a dataset has been created (and stored in the .yt file
-   in the same directory), any subsequent changes to that dataset will be
-   ignored when attempting to create the same projection. So if you take a
-   density projection of your dataset in the 'x' direction, then somehow tweak
-   that dataset significantly, and take the density projection again, yt will
-   default to finding the original projection and 
-   :ref:`not your new one <faq-old-data>`.
-
-.. note:: It's also possible to use the standard :mod:`cPickle` module for
-          loading and storing objects -- so in theory you could even save a
-          list of objects!
-
-This method works for clumps, as well, and the entire clump index will be
-stored and restored upon load.
+convenient to save an object to disk and then restart the calculation later or
+transfer the data from a container to another filesystem.  This can be
+particularly useful when working with extremely large datasets.  Field data
+can be saved to disk in a format that allows for it to be reloaded just like
+a regular dataset.  For information on how to do this, see
+:ref:`saving-data-containers`.

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 doc/source/analyzing/saving_data.rst
--- /dev/null
+++ b/doc/source/analyzing/saving_data.rst
@@ -0,0 +1,243 @@
+.. _saving_data
+
+Saving Reloadable Data
+======================
+
+Most of the data loaded into or generated with yt can be saved to a
+format that can be reloaded as a first-class dataset.  This includes
+the following:
+
+  * geometric data containers (regions, spheres, disks, rays, etc.)
+
+  * grid data containers (covering grids, arbitrary grids, fixed
+    resolution buffers)
+
+  * spatial plots (projections, slices, cutting planes)
+
+  * profiles
+
+  * generic array data
+
+In the case of projections, slices, and profiles, reloaded data can be
+used to remake plots.  For information on this, see :ref:`remaking-plots`.
+
+.. _saving-data-containers:
+
+Geometric Data Containers
+-------------------------
+
+Data from geometric data containers can be saved with the
+:func:`~yt.data_objects.data_containers.save_as_dataset`` function.
+
+.. notebook-cell::
+
+   import yt
+   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+
+   sphere = ds.sphere([0.5]*3, (10, "Mpc"))
+   fn = sphere.save_as_dataset(fields=["density", "particle_mass"])
+   print (fn)
+
+This function will return the name of the file to which the dataset
+was saved.  The filename will be a combination of the name of the
+original dataset and the type of data container.  Optionally, a
+specific filename can be given with the ``filename`` keyword.  If no
+fields are given, the fields that have previously been queried will
+be saved.
+
+The newly created dataset can be loaded like all other supported
+data through ``yt.load``.  Once loaded, field data can be accessed
+through the traditional data containers or through the ``data``
+attribute, which will be a data container configured like the
+original data container used to make the dataset.  Grid data is
+accessed by the ``grid`` data type and particle data is accessed
+with the original particle type.  As with the original dataset, grid
+positions and cell sizes are accessible with, for example,
+("grid", "x") and ("grid", "dx").  Particle positions are
+accessible as (<particle_type>, "particle_position_x").  All original
+simulation parameters are accessible in the ``parameters``
+dictionary, normally associated with all datasets.
+
+.. code-block:: python
+
+   sphere_ds = yt.load("DD0046_sphere.h5")
+
+   # use the original data container
+   print (sphere_ds.data["grid", "density"])
+
+   # create a new data container
+   ad = sphere_ds.all_data()
+
+   # grid data
+   print (ad["grid", "density"])
+   print (ad["grid", "x"])
+   print (ad["grid", "dx"])
+
+   # particle data
+   print (ad["all", "particle_mass"])
+   print (ad["all", "particle_position_x"])
+
+Note that because field data queried from geometric containers is
+returned as unordered 1D arrays, data container datasets are treated,
+effectively, as particle data.  Thus, 3D indexing of grid data from
+these datasets is not possible.
+
+.. _saving-grid-data-containers:
+
+Grid Data Containers
+--------------------
+
+Data containers that return field data as multidimensional arrays
+can be saved so as to preserve this type of access.  This includes
+covering grids, arbitrary grids, and fixed resolution buffers.
+Saving data from these containers works just as with geometric data
+containers.  Field data can be accessed through geometric data
+containers.
+
+.. code-block:: python
+
+   cg = ds.covering_grid(level=0, left_edge=[0.25]*3, dims=[16]*3)
+   fn = cg.save_as_dataset(fields=["density", "particle_mass"])
+
+   cg_ds = yt.load(fn)
+   ad = cg_ds.all_data()
+   print (ad["grid", "density"])
+
+Multidimensional indexing of field data is also available through
+the ``data`` attribute.
+
+.. code-block:: python
+
+   print (cg_ds.data["grid", "density"])
+
+Fixed resolution buffers work just the same.
+
+.. code-block:: python
+
+   my_proj = ds.proj("density", "x", weight_field="density")
+   frb = my_proj.to_frb(1.0, (800, 800))
+   fn = frb.save_as_dataset(fields=["density"])
+   frb_ds = yt.load(fn)
+   print (frb_ds.data["density"])
+
+.. _saving-spatial-plots:
+
+Spatial Plots
+-------------
+
+Spatial plots, such as projections, slices, and off-axis slices
+(cutting planes) can also be saved and reloaded.
+
+.. code-block:: python
+
+   proj = ds.proj("density", "x", weight_field="density")
+   proj.save_as_dataset()
+
+Once reloaded, they can be handed to their associated plotting
+functions to make images.
+
+.. code-block:: python
+
+   proj_ds = yt.load("DD0046_proj.h5")
+   p = yt.ProjectionPlot(proj_ds, "x", "density",
+                         weight_field="density")
+   p.save()
+
+.. _saving-profile-data:
+
+Profiles
+--------
+
+Profiles created with :func:`~yt.data_objects.profiles.create_profile`,
+:class:`~yt.visualization.profile_plotter.ProfilePlot`, and
+:class:`~yt.visualization.profile_plotter.PhasePlot` can be saved with
+the :func:`~yt.data_objects.profiles.save_as_dataset` function, which
+works just as above.  Profile datasets are a type of non-spatial grid
+datasets.  Geometric selection is not possible, but data can be
+accessed through the ``.data`` attribute.
+
+.. notebook-cell::
+
+   import yt
+   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+   ad = ds.all_data()
+
+   profile_2d = yt.create_profile(ad, ["density", "temperature"],
+                                  "cell_mass", weight_field=None,
+                                  n_bins=(128, 128))
+   profile_2d.save_as_dataset()
+
+   prof_2d_ds = yt.load("DD0046_Profile2D.h5")
+   print (prof_2d_ds.data["cell_mass"])
+
+The x, y (if at least 2D), and z (if 3D) bin fields can be accessed as 1D
+arrays with "x", "y", and "z".
+
+.. code-block:: python
+
+   print (prof_2d_ds.data["x"])
+
+The bin fields can also be returned with the same shape as the profile
+data by accessing them with their original names.  This allows for
+boolean masking of profile data using the bin fields.
+
+.. code-block:: python
+
+   # density is the x bin field
+   print (prof_2d_ds.data["density"])
+
+For 1, 2, and 3D profile datasets, a fake profile object will be
+constructed by accessing the ".profile" attribute.  This is used
+primarily in the case of 1 and 2D profiles to create figures using
+:class:`~yt.visualization.profile_plotter.ProfilePlot` and
+:class:`~yt.visualization.profile_plotter.PhasePlot`.
+
+.. code-block:: python
+
+   p = yt.PhasePlot(prof_2d_ds.data, "density", "temperature",
+                    "cell_mass", weight_field=None)
+   p.save()
+
+.. _saving-array-data:
+
+Generic Array Data
+------------------
+
+Generic arrays can be saved and reloaded as non-spatial data using
+the :func:`~yt.frontends.ytdata.utilities.save_as_dataset` function,
+also available as ``yt.save_as_dataset``.  As with profiles, geometric
+selection is not possible, but the data can be accessed through the
+``.data`` attribute.
+
+.. notebook-cell::
+
+   import yt
+   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+
+   region = ds.box([0.25]*3, [0.75]*3)
+   sphere = ds.sphere(ds.domain_center, (10, "Mpc"))
+   my_data = {}
+   my_data["region_density"] = region["density"]
+   my_data["sphere_density"] = sphere["density"]
+   yt.save_as_dataset(ds, "test_data.h5", my_data)
+
+   array_ds = yt.load("test_data.h5")
+   print (array_ds.data["region_density"])
+   print (array_ds.data["sphere_density"])
+
+Array data can be saved with or without a dataset loaded.  If no
+dataset has been loaded, as fake dataset can be provided as a
+dictionary.
+
+.. notebook-cell::
+
+   import numpy as np
+   import yt
+
+   my_data = {"density": yt.YTArray(np.random.random(10), "g/cm**3"),
+              "temperature": yt.YTArray(np.random.random(10), "K")}
+   fake_ds = {"current_time": yt.YTQuantity(10, "Myr")}
+   yt.save_as_dataset(fake_ds, "random_data.h5", my_data)
+
+   new_ds = yt.load("random_data.h5")
+   print (new_ds.data["density"])

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -72,6 +72,7 @@
 .. autosummary::
    :toctree: generated/
 
+   ~yt.data_objects.data_containers.YTDataContainer
    ~yt.data_objects.data_containers.YTSelectionContainer
    ~yt.data_objects.data_containers.YTSelectionContainer0D
    ~yt.data_objects.data_containers.YTSelectionContainer1D
@@ -383,6 +384,28 @@
    ~yt.frontends.stream.io.IOHandlerStreamOctree
    ~yt.frontends.stream.io.StreamParticleIOHandler
 
+ytdata
+^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.frontends.ytdata.data_structures.YTDataContainerDataset
+   ~yt.frontends.ytdata.data_structures.YTSpatialPlotDataset
+   ~yt.frontends.ytdata.data_structures.YTGridDataset
+   ~yt.frontends.ytdata.data_structures.YTGridHierarchy
+   ~yt.frontends.ytdata.data_structures.YTGrid
+   ~yt.frontends.ytdata.data_structures.YTNonspatialDataset
+   ~yt.frontends.ytdata.data_structures.YTNonspatialHierarchy
+   ~yt.frontends.ytdata.data_structures.YTNonspatialGrid
+   ~yt.frontends.ytdata.data_structures.YTProfileDataset
+   ~yt.frontends.ytdata.fields.YTDataContainerFieldInfo
+   ~yt.frontends.ytdata.fields.YTGridFieldInfo
+   ~yt.frontends.ytdata.io.IOHandlerYTDataContainerHDF5
+   ~yt.frontends.ytdata.io.IOHandlerYTGridHDF5
+   ~yt.frontends.ytdata.io.IOHandlerYTSpatialPlotHDF5
+   ~yt.frontends.ytdata.io.IOHandlerYTNonspatialhdf5
+
 Loading Data
 ------------
 
@@ -739,6 +762,7 @@
    :toctree: generated/
 
    ~yt.convenience.load
+   ~yt.frontends.ytdata.utilities.save_as_dataset
    ~yt.data_objects.static_output.Dataset.all_data
    ~yt.data_objects.static_output.Dataset.box
    ~yt.funcs.deprecate

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -1284,6 +1284,81 @@
    bananas_Slice_z_kT.eps
    bananas_Slice_z_density.eps
 
+.. _remaking-plots:
+
+Remaking Figures from Plot Datasets
+-----------------------------------
+
+When working with datasets that are too large to be stored locally,
+making figures just right can be cumbersome as it requires continuously
+moving images somewhere they can be viewed.  However, image creation is
+actually a two step process of first creating the projection, slice,
+or profile object, and then converting that object into an actual image.
+Fortunately, the hard part (creating slices, projections, profiles) can
+be separated from the easy part (generating images).  The intermediate
+slice, projection, and profile objects can be saved as reloadable
+datasets, then handed back to the plotting machinery discussed here.
+
+For slices and projections, the savable object is associated with the
+plot object as ``data_source``.  This can be saved with the
+:func:`~yt.data_objects.data_containers.save_as_dataset`` function.  For
+more information, see :ref:`saving_data`.
+
+.. code-block:: python
+
+   p = yt.ProjectionPlot(ds, "x", "density",
+                         weight_field="density")
+   fn = p.data_source.save_as_dataset()
+
+This function will optionally take a ``filename`` keyword that follows
+the same logic as dicussed above in :ref:`saving_plots`.  The filename
+to which the dataset was written will be returned.
+
+Once saved, this file can be reloaded completely independently of the
+original dataset and given back to the plot function with the same
+arguments.  One can now continue to tweak the figure to one's liking.
+
+.. code-block:: python
+
+   new_ds = yt.load(fn)
+   new_p = yt.ProjectionPlot(new_ds, "x", "density",
+                             weight_field="density")
+   new_p.save()
+
+The same functionality is available for profile and phase plots.  In
+each case, a special data container, ``data``, is given to the plotting
+functions.
+
+For ``ProfilePlot``:
+
+.. code-block:: python
+
+   ad = ds.all_data()
+   p1 = yt.ProfilePlot(ad, "density", "temperature",
+                       weight_field="cell_mass")
+
+   # note that ProfilePlots can hold a list of profiles
+   fn = p1.profiles[0].save_as_dataset()
+
+   new_ds = yt.load(fn)
+   p2 = yt.ProfilePlot(new_ds.data, "density", "temperature",
+                       weight_field="cell_mass")
+   p2.save()
+
+For ``PhasePlot``:
+
+.. code-block:: python
+
+   ad = ds.all_data()
+   p1 = yt.PhasePlot(ad, "density", "temperature",
+                     "cell_mass", weight_field=None)
+   fn = p1.profile.save_as_dataset()
+
+   new_ds = yt.load(fn)
+   p2 = yt.PhasePlot(new_ds.data, "density", "temperature",
+                     "cell_mass", weight_field=None)
+   p2.save()
+
 .. _eps-writer:
 
 Publication-ready Figures

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -138,6 +138,9 @@
     load_particles, load_hexahedral_mesh, load_octree, \
     hexahedral_connectivity
 
+from yt.frontends.ytdata.api import \
+    save_as_dataset
+
 # For backwards compatibility
 GadgetDataset = frontends.gadget.GadgetDataset
 GadgetStaticOutput = deprecated_class(GadgetDataset)

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -20,6 +20,7 @@
 
 from .absorption_line import tau_profile
 
+from yt.convenience import load
 from yt.funcs import get_pbar, mylog
 from yt.units.yt_array import YTArray, YTQuantity
 from yt.utilities.physical_constants import \
@@ -121,8 +122,8 @@
         Parameters
         ----------
 
-        input_file : string
-           path to input ray data.
+        input_file : string or dataset
+           path to input ray data or a loaded ray dataset
         output_file : optional, string
            path for output file.  File formats are chosen based on the
            filename extension.  ``.h5`` for hdf5, ``.fits`` for fits,
@@ -156,7 +157,6 @@
 
         input_fields = ['dl', 'redshift', 'temperature']
         field_units = {"dl": "cm", "redshift": "", "temperature": "K"}
-        field_data = {}
         if use_peculiar_velocity:
             input_fields.append('velocity_los')
             input_fields.append('redshift_eff')
@@ -167,10 +167,11 @@
                 input_fields.append(feature['field_name'])
                 field_units[feature["field_name"]] = "cm**-3"
 
-        input = h5py.File(input_file, 'r')
-        for field in input_fields:
-            field_data[field] = YTArray(input[field].value, field_units[field])
-        input.close()
+        if isinstance(input_file, str):
+            input_ds = load(input_file)
+        else:
+            input_ds = input_file
+        field_data = input_ds.all_data()
 
         self.tau_field = np.zeros(self.lambda_bins.size)
         self.spectrum_line_list = []
@@ -337,6 +338,8 @@
         """
         Write out list of spectral lines.
         """
+        if filename is None:
+            return
         mylog.info("Writing spectral line list: %s." % filename)
         self.spectrum_line_list.sort(key=lambda obj: obj['wavelength'])
         f = open(filename, 'w')

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -13,19 +13,20 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 
 from yt.analysis_modules.cosmological_observation.cosmology_splice import \
     CosmologySplice
 from yt.convenience import \
     load
-from yt.funcs import \
-    mylog
+from yt.frontends.ytdata.utilities import \
+    save_as_dataset
 from yt.units.yt_array import \
     YTArray
 from yt.utilities.cosmology import \
     Cosmology
+from yt.utilities.logger import \
+    ytLogger as mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects, \
     parallel_root_only
@@ -48,7 +49,7 @@
     synthetic QSO lines of sight.
 
     Light rays can also be made from single datasets.
-    
+
     Once the LightRay object is set up, use LightRay.make_light_ray to
     begin making rays.  Different randomizations can be created with a
     single object by providing different random seeds to make_light_ray.
@@ -58,17 +59,17 @@
     parameter_filename : string
         The path to the simulation parameter file or dataset.
     simulation_type : optional, string
-        The simulation type.  If None, the first argument is assumed to 
+        The simulation type.  If None, the first argument is assumed to
         refer to a single dataset.
         Default: None
     near_redshift : optional, float
-        The near (lowest) redshift for a light ray containing multiple 
-        datasets.  Do not use is making a light ray from a single 
+        The near (lowest) redshift for a light ray containing multiple
+        datasets.  Do not use is making a light ray from a single
         dataset.
         Default: None
     far_redshift : optional, float
-        The far (highest) redshift for a light ray containing multiple 
-        datasets.  Do not use is making a light ray from a single 
+        The far (highest) redshift for a light ray containing multiple
+        datasets.  Do not use is making a light ray from a single
         dataset.
         Default: None
     use_minimum_datasets : optional, bool
@@ -98,11 +99,11 @@
         datasets for time series.
         Default: True.
     find_outputs : optional, bool
-        Whether or not to search for datasets in the current 
+        Whether or not to search for datasets in the current
         directory.
         Default: False.
     load_kwargs : optional, dict
-        Optional dictionary of kwargs to be passed to the "load" 
+        Optional dictionary of kwargs to be passed to the "load"
         function, appropriate for use of certain frontends.  E.g.
         Tipsy using "bounding_box"
         Gadget using "unit_base", etc.
@@ -129,8 +130,9 @@
         self.light_ray_solution = []
         self._data = {}
 
-        # Make a light ray from a single, given dataset.        
+        # Make a light ray from a single, given dataset.
         if simulation_type is None:
+            self.simulation_type = simulation_type
             ds = load(parameter_filename, **self.load_kwargs)
             if ds.cosmological_simulation:
                 redshift = ds.current_redshift
@@ -156,7 +158,7 @@
                                            time_data=time_data,
                                            redshift_data=redshift_data)
 
-    def _calculate_light_ray_solution(self, seed=None, 
+    def _calculate_light_ray_solution(self, seed=None,
                                       start_position=None, end_position=None,
                                       trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
@@ -172,9 +174,9 @@
             if not ((end_position is None) ^ (trajectory is None)):
                 raise RuntimeError("LightRay Error: must specify either end_position " + \
                                    "or trajectory, but not both.")
-            self.light_ray_solution[0]['start'] = np.array(start_position)
+            self.light_ray_solution[0]['start'] = np.asarray(start_position)
             if end_position is not None:
-                self.light_ray_solution[0]['end'] = np.array(end_position)
+                self.light_ray_solution[0]['end'] = np.asarray(end_position)
             else:
                 # assume trajectory given as r, theta, phi
                 if len(trajectory) != 3:
@@ -185,12 +187,12 @@
                                 np.sin(phi) * np.sin(theta),
                                 np.cos(theta)])
             self.light_ray_solution[0]['traversal_box_fraction'] = \
-              vector_length(self.light_ray_solution[0]['start'], 
+              vector_length(self.light_ray_solution[0]['start'],
                             self.light_ray_solution[0]['end'])
 
         # the normal way (random start positions and trajectories for each dataset)
         else:
-            
+
             # For box coherence, keep track of effective depth travelled.
             box_fraction_used = 0.0
 
@@ -285,15 +287,15 @@
             Default: None.
         trajectory : optional, list of floats
             Used only if creating a light ray from a single dataset.
-            The (r, theta, phi) direction of the light ray.  Use either 
+            The (r, theta, phi) direction of the light ray.  Use either
             end_position or trajectory, not both.
             Default: None.
         fields : optional, list
             A list of fields for which to get data.
             Default: None.
         setup_function : optional, callable, accepts a ds
-            This function will be called on each dataset that is loaded 
-            to create the light ray.  For, example, this can be used to 
+            This function will be called on each dataset that is loaded
+            to create the light ray.  For, example, this can be used to
             add new derived fields.
             Default: None.
         solution_filename : optional, string
@@ -308,13 +310,13 @@
             each point in the ray.
             Default: True.
         redshift : optional, float
-            Used with light rays made from single datasets to specify a 
-            starting redshift for the ray.  If not used, the starting 
-            redshift will be 0 for a non-cosmological dataset and 
+            Used with light rays made from single datasets to specify a
+            starting redshift for the ray.  If not used, the starting
+            redshift will be 0 for a non-cosmological dataset and
             the dataset redshift for a cosmological dataset.
             Default: None.
         njobs : optional, int
-            The number of parallel jobs over which the segments will 
+            The number of parallel jobs over which the segments will
             be split.  Choose -1 for one processor per segment.
             Default: -1.
 
@@ -322,7 +324,7 @@
         --------
 
         Make a light ray from multiple datasets:
-        
+
         >>> import yt
         >>> from yt.analysis_modules.cosmological_observation.light_ray.api import \
         ...     LightRay
@@ -348,12 +350,12 @@
         ...                       data_filename="my_ray.h5",
         ...                       fields=["temperature", "density"],
         ...                       get_los_velocity=True)
-        
+
         """
 
         # Calculate solution.
-        self._calculate_light_ray_solution(seed=seed, 
-                                           start_position=start_position, 
+        self._calculate_light_ray_solution(seed=seed,
+                                           start_position=start_position,
                                            end_position=end_position,
                                            trajectory=trajectory,
                                            filename=solution_filename)
@@ -364,6 +366,8 @@
         data_fields = fields[:]
         all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
+        all_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
+        data_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
         if get_los_velocity:
             all_fields.extend(['velocity_x', 'velocity_y',
                                'velocity_z', 'velocity_los', 'redshift_eff'])
@@ -399,10 +403,15 @@
             if not ds.cosmological_simulation:
                 next_redshift = my_segment["redshift"]
             elif self.near_redshift == self.far_redshift:
+                if isinstance(my_segment["traversal_box_fraction"], YTArray):
+                    segment_length = \
+                      my_segment["traversal_box_fraction"].in_units("Mpccm / h")
+                else:
+                    segment_length = my_segment["traversal_box_fraction"] * \
+                      ds.domain_width[0].in_units("Mpccm / h")
                 next_redshift = my_segment["redshift"] - \
-                  self._deltaz_forward(my_segment["redshift"], 
-                                       ds.domain_width[0].in_units("Mpccm / h") *
-                                       my_segment["traversal_box_fraction"])
+                  self._deltaz_forward(my_segment["redshift"],
+                                       segment_length)
             elif my_segment.get("next", None) is None:
                 next_redshift = self.near_redshift
             else:
@@ -454,7 +463,7 @@
 
             # Get redshift for each lixel.  Assume linear relation between l and z.
             sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
-                (sub_data['dl'] / vector_length(my_segment['start'], 
+                (sub_data['dl'] / vector_length(my_segment['start'],
                                                 my_segment['end']).in_cgs())
             sub_data['redshift'] = my_segment['redshift'] - \
               sub_data['dredshift'].cumsum() + sub_data['dredshift']
@@ -500,12 +509,17 @@
         # Flatten the list into a single dictionary containing fields
         # for the whole ray.
         all_data = _flatten_dict_list(all_data, exceptions=['segment_redshift'])
+        self._data = all_data
 
         if data_filename is not None:
             self._write_light_ray(data_filename, all_data)
+            ray_ds = load(data_filename)
+            return ray_ds
+        else:
+            return None
 
-        self._data = all_data
-        return all_data
+    def __getitem__(self, field):
+        return self._data[field]
 
     @parallel_root_only
     def _write_light_ray(self, filename, data):
@@ -514,19 +528,24 @@
 
         Write light ray data to hdf5 file.
         """
-
-        mylog.info("Saving light ray data to %s." % filename)
-        output = h5py.File(filename, 'w')
-        for field in data.keys():
-            # if the field is a tuple, only use the second part of the tuple
-            # in the hdf5 output (i.e. ('gas', 'density') -> 'density')
-            if isinstance(field, tuple):
-                fieldname = field[1]
-            else:
-                fieldname = field
-            output.create_dataset(fieldname, data=data[field])
-            output[fieldname].attrs["units"] = str(data[field].units)
-        output.close()
+        if self.simulation_type is None:
+            ds = load(self.parameter_filename, **self.load_kwargs)
+        else:
+            ds = {}
+            ds["dimensionality"] = self.simulation.dimensionality
+            ds["domain_left_edge"] = self.simulation.domain_left_edge
+            ds["domain_right_edge"] = self.simulation.domain_right_edge
+            ds["cosmological_simulation"] = self.simulation.cosmological_simulation
+            ds["periodicity"] = (True, True, True)
+            ds["current_redshift"] = self.near_redshift
+            for attr in ["omega_lambda", "omega_matter", "hubble_constant"]:
+                ds[attr] = getattr(self.cosmology, attr)
+            ds["current_time"] = \
+              self.cosmology.t_from_z(ds["current_redshift"])
+        extra_attrs = {"data_type": "yt_light_ray"}
+        field_types = dict([(field, "grid") for field in data.keys()])
+        save_as_dataset(ds, filename, data, field_types=field_types,
+                        extra_attrs=extra_attrs)
 
     @parallel_root_only
     def _write_light_ray_solution(self, filename, extra_info=None):
@@ -573,7 +592,7 @@
 def vector_length(start, end):
     """
     vector_length(start, end)
-    
+
     Calculate vector length.
     """
 
@@ -600,15 +619,15 @@
     """
     periodic_ray(start, end, left=None, right=None)
 
-    Break up periodic ray into non-periodic segments. 
+    Break up periodic ray into non-periodic segments.
     Accepts start and end points of periodic ray as YTArrays.
     Accepts optional left and right edges of periodic volume as YTArrays.
-    Returns a list of lists of coordinates, where each element of the 
-    top-most list is a 2-list of start coords and end coords of the 
-    non-periodic ray: 
+    Returns a list of lists of coordinates, where each element of the
+    top-most list is a 2-list of start coords and end coords of the
+    non-periodic ray:
 
-    [[[x0start,y0start,z0start], [x0end, y0end, z0end]], 
-     [[x1start,y1start,z1start], [x1end, y1end, z1end]], 
+    [[[x0start,y0start,z0start], [x0end, y0end, z0end]],
+     [[x1start,y1start,z1start], [x1end, y1end, z1end]],
      ...,]
 
     """

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -21,6 +21,9 @@
     periodic_distance
 from yt.data_objects.profiles import \
     create_profile
+from yt.frontends.ytdata.utilities import \
+    _hdf5_yt_array, \
+    _yt_array_hdf5
 from yt.units.yt_array import \
     YTArray
 from yt.utilities.exceptions import \
@@ -584,21 +587,3 @@
     del sphere
     
 add_callback("iterative_center_of_mass", iterative_center_of_mass)
-
-def _yt_array_hdf5(fh, fieldname, data):
-    dataset = fh.create_dataset(fieldname, data=data)
-    units = ""
-    if isinstance(data, YTArray):
-        units = str(data.units)
-    dataset.attrs["units"] = units
-
-def _hdf5_yt_array(fh, fieldname, ds=None):
-    if ds is None:
-        new_arr = YTArray
-    else:
-        new_arr = ds.arr
-    units = ""
-    if "units" in fh[fieldname].attrs:
-        units = fh[fieldname].attrs["units"]
-    if units == "dimensionless": units = ""
-    return new_arr(fh[fieldname].value, units)

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -13,7 +13,9 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import h5py
 import itertools
+import os
 import types
 import uuid
 from yt.extern.six import string_types
@@ -25,9 +27,12 @@
 import shelve
 from contextlib import contextmanager
 
+from yt.funcs import get_output_filename
 from yt.funcs import *
 
 from yt.data_objects.particle_io import particle_handler_registry
+from yt.frontends.ytdata.utilities import \
+    save_as_dataset
 from yt.units.unit_object import UnitParseError
 from yt.utilities.exceptions import \
     YTUnitConversionError, \
@@ -98,6 +103,8 @@
     _con_args = ()
     _skip_add = False
     _container_fields = ()
+    _tds_attrs = ()
+    _tds_fields = ()
     _field_cache = None
     _index = None
 
@@ -463,6 +470,117 @@
         df = pd.DataFrame(data)
         return df
 
+    def save_as_dataset(self, filename=None, fields=None):
+        r"""Export a data object to a reloadable yt dataset.
+
+        This function will take a data object and output a dataset 
+        containing either the fields presently existing or fields 
+        given in the ``fields`` list.  The resulting dataset can be
+        reloaded as a yt dataset.
+
+        Parameters
+        ----------
+        filename : str, optional
+            The name of the file to be written.  If None, the name 
+            will be a combination of the original dataset and the type 
+            of data container.
+        fields : list of strings or tuples, optional
+            If this is supplied, it is the list of fields to be saved to
+            disk.  If not supplied, all the fields that have been queried
+            will be saved.
+
+        Returns
+        -------
+        filename : str
+            The name of the file that has been created.
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+        >>> sp = ds.sphere(ds.domain_center, (10, "Mpc"))
+        >>> fn = sp.save_as_dataset(fields=["density", "temperature"])
+        >>> sphere_ds = yt.load(fn)
+        >>> # the original data container is available as the data attribute
+        >>> print (sds.data["density"])
+        [  4.46237613e-32   4.86830178e-32   4.46335118e-32 ...,   6.43956165e-30
+           3.57339907e-30   2.83150720e-30] g/cm**3
+        >>> ad = sphere_ds.all_data()
+        >>> print (ad["temperature"])
+        [  1.00000000e+00   1.00000000e+00   1.00000000e+00 ...,   4.40108359e+04
+           4.54380547e+04   4.72560117e+04] K
+
+        """
+
+        keyword = "%s_%s" % (str(self.ds), self._type_name)
+        filename = get_output_filename(filename, keyword, ".h5")
+
+        data = {}
+        if fields is not None:
+            for f in self._determine_fields(fields):
+                data[f] = self[f]
+        else:
+            data.update(self.field_data)
+        # get the extra fields needed to reconstruct the container
+        tds_fields = tuple(self._determine_fields(list(self._tds_fields)))
+        for f in [f for f in self._container_fields + tds_fields \
+                  if f not in data]:
+            data[f] = self[f]
+        data_fields = list(data.keys())
+
+        need_grid_positions = False
+        need_particle_positions = False
+        ptypes = []
+        ftypes = {}
+        for field in data_fields:
+            if field in self._container_fields:
+                ftypes[field] = "grid"
+                need_grid_positions = True
+            elif self.ds.field_info[field].particle_type:
+                if field[0] not in ptypes:
+                    ptypes.append(field[0])
+                ftypes[field] = field[0]
+                need_particle_positions = True
+            else:
+                ftypes[field] = "grid"
+                need_grid_positions = True
+        # projections and slices use px and py, so don't need positions
+        if self._type_name in ["cutting", "proj", "slice"]:
+            need_grid_positions = False
+
+        if need_particle_positions:
+            for ax in "xyz":
+                for ptype in ptypes:
+                    p_field = (ptype, "particle_position_%s" % ax)
+                    if p_field in self.ds.field_info and p_field not in data:
+                        data_fields.append(field)
+                        ftypes[p_field] = p_field[0]
+                        data[p_field] = self[p_field]
+        if need_grid_positions:
+            for ax in "xyz":
+                g_field = ("index", ax)
+                if g_field in self.ds.field_info and g_field not in data:
+                    data_fields.append(g_field)
+                    ftypes[g_field] = "grid"
+                    data[g_field] = self[g_field]
+                g_field = ("index", "d" + ax)
+                if g_field in self.ds.field_info and g_field not in data:
+                    data_fields.append(g_field)
+                    ftypes[g_field] = "grid"
+                    data[g_field] = self[g_field]
+
+        extra_attrs = dict([(arg, getattr(self, arg, None))
+                            for arg in self._con_args + self._tds_attrs])
+        extra_attrs["con_args"] = self._con_args
+        extra_attrs["data_type"] = "yt_data_container"
+        extra_attrs["container_type"] = self._type_name
+        extra_attrs["dimensionality"] = self._dimensionality
+        save_as_dataset(self.ds, filename, data, field_types=ftypes,
+                        extra_attrs=extra_attrs)
+
+        return filename
+        
     def to_glue(self, fields, label="yt", data_collection=None):
         """
         Takes specific *fields* in the container and exports them to

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -256,7 +256,8 @@
           (("all", "particle_mass") in self.data_source.ds.field_info)
         vals = []
         if use_gas:
-            vals += [(data[ax] * data["gas", "cell_mass"]).sum(dtype=np.float64)
+            vals += [(data["gas", ax] *
+                      data["gas", "cell_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
             vals.append(data["gas", "cell_mass"].sum(dtype=np.float64))
         if use_particles:
@@ -657,7 +658,7 @@
         m = data.ds.quan(0., "g")
         if use_gas:
             e += (data["gas", "kinetic_energy"] *
-                  data["index", "cell_volume"]).sum(dtype=np.float64)
+                  data["gas", "cell_volume"]).sum(dtype=np.float64)
             j += data["gas", "angular_momentum_magnitude"].sum(dtype=np.float64)
             m += data["gas", "cell_mass"].sum(dtype=np.float64)
         if use_particles:

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -16,8 +16,10 @@
 from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 
+from yt.frontends.ytdata.utilities import \
+    save_as_dataset
+from yt.funcs import get_output_filename
 from yt.funcs import *
-
 from yt.units.yt_array import uconcatenate, array_like_field
 from yt.units.unit_object import Unit
 from yt.data_objects.data_containers import YTFieldData
@@ -949,6 +951,112 @@
         else:
             return np.linspace(mi, ma, n+1)
 
+    def save_as_dataset(self, filename=None):
+        r"""Export a profile to a reloadable yt dataset.
+
+        This function will take a profile and output a dataset
+        containing all relevant fields.  The resulting dataset
+        can be reloaded as a yt dataset.
+
+        Parameters
+        ----------
+        filename : str, optional
+            The name of the file to be written.  If None, the name
+            will be a combination of the original dataset plus
+            the type of object, e.g., Profile1D.
+
+        Returns
+        -------
+        filename : str
+            The name of the file that has been created.
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+        >>> ad = ds.all_data()
+        >>> profile = yt.create_profile(ad, ["density", "temperature"],
+        ...                            "cell_mass", weight_field=None,
+        ...                             n_bins=(128, 128))
+        >>> fn = profile.save_as_dataset()
+        >>> prof_ds = yt.load(fn)
+        >>> print (prof_ds.data["cell_mass"])
+        (128, 128)
+        >>> print (prof_ds.data["x"].shape) # x bins as 1D array
+        (128,)
+        >>> print (prof_ds.data["density"]) # x bins as 2D array
+        (128, 128)
+        >>> p = yt.PhasePlot(prof_ds.data, "density", "temperature",
+        ...                  "cell_mass", weight_field=None)
+        >>> p.save()
+
+        """
+
+        keyword = "%s_%s" % (str(self.ds), self.__class__.__name__)
+        filename = get_output_filename(filename, keyword, ".h5")
+
+        args = ("field", "log")
+        extra_attrs = {"data_type": "yt_profile",
+                       "profile_dimensions": self.size,
+                       "weight_field": self.weight_field,
+                       "fractional": self.fractional}
+        data = {}
+        data.update(self.field_data)
+        data["weight"] = self.weight
+        data["used"] = self.used.astype("float64")
+
+        dimensionality = 0
+        bin_data = []
+        for ax in "xyz":
+            if hasattr(self, ax):
+                dimensionality += 1
+                data[ax] = getattr(self, ax)
+                bin_data.append(data[ax])
+                bin_field_name = "%s_bins" % ax
+                data[bin_field_name] = getattr(self, bin_field_name)
+                extra_attrs["%s_range" % ax] = self.ds.arr([data[bin_field_name][0],
+                                                            data[bin_field_name][-1]])
+                for arg in args:
+                    key = "%s_%s" % (ax, arg)
+                    extra_attrs[key] = getattr(self, key)
+
+        bin_fields = np.meshgrid(*bin_data)
+        for i, ax in enumerate("xyz"[:dimensionality]):
+            data[getattr(self, "%s_field" % ax)] = bin_fields[i]
+
+        extra_attrs["dimensionality"] = dimensionality
+        ftypes = dict([(field, "data") for field in data])
+        save_as_dataset(self.ds, filename, data, field_types=ftypes,
+                        extra_attrs=extra_attrs)
+
+        return filename
+
+class ProfileNDFromDataset(ProfileND):
+    """
+    An ND profile object loaded from a ytdata dataset.
+    """
+    def __init__(self, ds):
+        ProfileND.__init__(self, ds.data, ds.parameters["weight_field"])
+        self.fractional = ds.parameters["fractional"]
+        exclude_fields = ["used", "weight"]
+        for ax in "xyz"[:ds.dimensionality]:
+            setattr(self, ax, ds.data[ax])
+            setattr(self, "%s_bins" % ax, ds.data["%s_bins" % ax])
+            setattr(self, "%s_field" % ax,
+                    tuple(ds.parameters["%s_field" % ax]))
+            setattr(self, "%s_log" % ax, ds.parameters["%s_log" % ax])
+            exclude_fields.extend([ax, "%s_bins" % ax,
+                                   ds.parameters["%s_field" % ax][1]])
+        self.weight = ds.data["weight"]
+        self.used = ds.data["used"].d.astype(bool)
+        profile_fields = [f for f in ds.field_list
+                          if f[1] not in exclude_fields]
+        for field in profile_fields:
+            self.field_map[field[1]] = field
+            self.field_data[field] = ds.data[field]
+            self.field_units[field] = ds.data[field].units
+
 class Profile1D(ProfileND):
     """An object that represents a 1D profile.
 
@@ -1011,6 +1119,14 @@
     def bounds(self):
         return ((self.x_bins[0], self.x_bins[-1]),)
 
+class Profile1DFromDataset(ProfileNDFromDataset, Profile1D):
+    """
+    A 1D profile object loaded from a ytdata dataset.
+    """
+
+    def __init(self, ds):
+        ProfileNDFromDataset.__init__(self, ds)
+
 class Profile2D(ProfileND):
     """An object that represents a 2D profile.
 
@@ -1108,6 +1224,13 @@
         return ((self.x_bins[0], self.x_bins[-1]),
                 (self.y_bins[0], self.y_bins[-1]))
 
+class Profile2DFromDataset(ProfileNDFromDataset, Profile2D):
+    """
+    A 2D profile object loaded from a ytdata dataset.
+    """
+
+    def __init(self, ds):
+        ProfileNDFromDataset.__init__(self, ds)
 
 class ParticleProfile(Profile2D):
     """An object that represents a *deposited* 2D profile. This is like a
@@ -1354,6 +1477,13 @@
         self.z_bins.convert_to_units(new_unit)
         self.z = 0.5*(self.z_bins[1:]+self.z_bins[:-1])
 
+class Profile3DFromDataset(ProfileNDFromDataset, Profile3D):
+    """
+    A 2D profile object loaded from a ytdata dataset.
+    """
+
+    def __init(self, ds):
+        ProfileNDFromDataset.__init__(self, ds)
 
 def sanitize_field_tuple_keys(input_dict, data_source):
     if input_dict is not None:
@@ -1429,8 +1559,8 @@
     >>> profile = create_profile(ad, [("gas", "density")],
     ...                              [("gas", "temperature"),
     ...                               ("gas", "velocity_x")])
-    >>> print profile.x
-    >>> print profile["gas", "temperature"]
+    >>> print (profile.x)
+    >>> print (profile["gas", "temperature"])
 
     """
     bin_fields = data_source._determine_fields(bin_fields)

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -347,6 +347,8 @@
     _key_fields = YTSelectionContainer2D._key_fields + ['pz','pdz']
     _type_name = "cutting"
     _con_args = ('normal', 'center')
+    _tds_attrs = ("_inv_mat",)
+    _tds_fields = ("x", "y", "z", "dx")
     _container_fields = ("px", "py", "pz", "pdx", "pdy", "pdz")
     def __init__(self, normal, center, north_vector=None,
                  ds=None, field_parameters=None, data_source=None):

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -1,7 +1,7 @@
 """
-Generalized Enzo output objects, both static and time-series.
+Dataset and related data structures.
 
-Presumably at some point EnzoRun will be absorbed into here.
+
 
 
 """
@@ -373,6 +373,7 @@
         self.field_info.setup_fluid_fields()
         for ptype in self.particle_types:
             self.field_info.setup_particle_fields(ptype)
+        self.field_info.setup_fluid_index_fields()
         if "all" not in self.particle_types:
             mylog.debug("Creating Particle Union 'all'")
             pu = ParticleUnion("all", list(self.particle_types_raw))

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -63,6 +63,20 @@
     def setup_fluid_fields(self):
         pass
 
+    def setup_fluid_index_fields(self):
+        # Now we get all our index types and set up aliases to them
+        if self.ds is None: return
+        index_fields = set([f for _, f in self if _ == "index"])
+        for ftype in self.ds.fluid_types:
+            if ftype in ("index", "deposit"): continue
+            for f in index_fields:
+                if (ftype, f) in self: continue
+                self.alias((ftype, f), ("index", f))
+                # Different field types have different default units.
+                # We want to make sure the aliased field will have
+                # the same units as the "index" field.
+                self[(ftype, f)].units = self["index", f].units
+
     def setup_particle_fields(self, ptype, ftype='gas', num_neighbors=64 ):
         skip_output_units = ("code_length",)
         for f, (units, aliases, dn) in sorted(self.known_particle_fields):

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 yt/fields/fluid_fields.py
--- a/yt/fields/fluid_fields.py
+++ b/yt/fields/fluid_fields.py
@@ -52,7 +52,7 @@
     create_vector_fields(registry, "velocity", "cm / s", ftype, slice_info)
 
     def _cell_mass(field, data):
-        return data[ftype, "density"] * data["index", "cell_volume"]
+        return data[ftype, "density"] * data[ftype, "cell_volume"]
 
     registry.add_field((ftype, "cell_mass"),
         function=_cell_mass,
@@ -89,11 +89,11 @@
             units = "")
 
     def _courant_time_step(field, data):
-        t1 = data["index", "dx"] / (data[ftype, "sound_speed"]
+        t1 = data[ftype, "dx"] / (data[ftype, "sound_speed"]
                         + np.abs(data[ftype, "velocity_x"]))
-        t2 = data["index", "dy"] / (data[ftype, "sound_speed"]
+        t2 = data[ftype, "dy"] / (data[ftype, "sound_speed"]
                         + np.abs(data[ftype, "velocity_y"]))
-        t3 = data["index", "dz"] / (data[ftype, "sound_speed"]
+        t3 = data[ftype, "dz"] / (data[ftype, "sound_speed"]
                         + np.abs(data[ftype, "velocity_z"]))
         tr = np.minimum(np.minimum(t1, t2), t3)
         return tr
@@ -140,7 +140,7 @@
              units="Zsun")
 
     def _metal_mass(field, data):
-        return data[ftype, "metal_density"] * data["index", "cell_volume"]
+        return data[ftype, "metal_density"] * data[ftype, "cell_volume"]
     registry.add_field((ftype, "metal_mass"),
                        function=_metal_mass,
                        units="g")
@@ -188,7 +188,7 @@
         slice_3dl[axi] = sl_left
         slice_3dr[axi] = sl_right
         def func(field, data):
-            ds = div_fac * data["index", "d%s" % ax]
+            ds = div_fac * data[ftype, "d%s" % ax]
             f  = data[grad_field][slice_3dr]/ds[slice_3d]
             f -= data[grad_field][slice_3dl]/ds[slice_3d]
             new_field = data.ds.arr(np.zeros_like(data[grad_field], dtype=np.float64),

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -39,6 +39,7 @@
     'sdf',
     'stream',
     'tipsy',
+    'ytdata',
 ]
 
 class _frontend_container:

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -40,7 +40,7 @@
     mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects
-    
+
 class EnzoSimulation(SimulationTimeSeries):
     r"""
     Initialize an Enzo Simulation object.
@@ -101,6 +101,8 @@
             self.length_unit = self.quan(self.box_size, "Mpccm / h",
                                          registry=self.unit_registry)
             self.box_size = self.length_unit
+            self.domain_left_edge = self.domain_left_edge * self.length_unit
+            self.domain_right_edge = self.domain_right_edge * self.length_unit
         else:
             self.time_unit = self.quan(self.parameters["TimeUnits"], "s")
         self.unit_registry.modify("code_time", self.time_unit)
@@ -133,21 +135,21 @@
             datasets for time series.
             Default: True.
         initial_time : tuple of type (float, str)
-            The earliest time for outputs to be included.  This should be 
+            The earliest time for outputs to be included.  This should be
             given as the value and the string representation of the units.
-            For example, (5.0, "Gyr").  If None, the initial time of the 
-            simulation is used.  This can be used in combination with 
+            For example, (5.0, "Gyr").  If None, the initial time of the
+            simulation is used.  This can be used in combination with
             either final_time or final_redshift.
             Default: None.
         final_time : tuple of type (float, str)
-            The latest time for outputs to be included.  This should be 
+            The latest time for outputs to be included.  This should be
             given as the value and the string representation of the units.
-            For example, (13.7, "Gyr"). If None, the final time of the 
-            simulation is used.  This can be used in combination with either 
+            For example, (13.7, "Gyr"). If None, the final time of the
+            simulation is used.  This can be used in combination with either
             initial_time or initial_redshift.
             Default: None.
         times : tuple of type (float array, str)
-            A list of times for which outputs will be found and the units 
+            A list of times for which outputs will be found and the units
             of those values.  For example, ([0, 1, 2, 3], "s").
             Default: None.
         initial_redshift : float
@@ -195,8 +197,8 @@
 
         >>> import yt
         >>> es = yt.simulation("my_simulation.par", "Enzo")
-        
-        >>> es.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr"), 
+
+        >>> es.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr"),
                                redshift_data=False)
 
         >>> es.get_time_series(redshifts=[3, 2, 1, 0])
@@ -304,7 +306,7 @@
         for output in my_outputs:
             if os.path.exists(output['filename']):
                 init_outputs.append(output['filename'])
-            
+
         DatasetSeries.__init__(self, outputs=init_outputs, parallel=parallel,
                                 setup_function=setup_function)
         mylog.info("%d outputs loaded into time series.", len(init_outputs))
@@ -586,11 +588,11 @@
         Check a list of files to see if they are valid datasets.
         """
 
-        only_on_root(mylog.info, "Checking %d potential outputs.", 
+        only_on_root(mylog.info, "Checking %d potential outputs.",
                      len(potential_outputs))
 
         my_outputs = {}
-        for my_storage, output in parallel_objects(potential_outputs, 
+        for my_storage, output in parallel_objects(potential_outputs,
                                                    storage=my_outputs):
             if self.parameters['DataDumpDir'] in output:
                 dir_key = self.parameters['DataDumpDir']
@@ -643,6 +645,6 @@
         self.initial_redshift = initial_redshift
         # time units = 1 / sqrt(4 * pi * G rho_0 * (1 + z_i)**3),
         # rho_0 = (3 * Omega_m * h**2) / (8 * pi * G)
-        self.time_unit = ((1.5 * self.omega_matter * self.hubble_constant**2 * 
+        self.time_unit = ((1.5 * self.omega_matter * self.hubble_constant**2 *
                            (1 + self.initial_redshift)**3)**-0.5).in_units("s")
         self.time_unit.units.registry = self.unit_registry

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -29,6 +29,7 @@
     config.add_subpackage("sph")
     config.add_subpackage("stream")
     config.add_subpackage("tipsy")
+    config.add_subpackage("ytdata")
     config.add_subpackage("art/tests")
     config.add_subpackage("artio/tests")
     config.add_subpackage("athena/tests")
@@ -47,4 +48,5 @@
     config.add_subpackage("rockstar/tests")
     config.add_subpackage("stream/tests")
     config.add_subpackage("tipsy/tests")
+    config.add_subpackage("ytdata/tests")
     return config

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 yt/frontends/ytdata/__init__.py
--- /dev/null
+++ b/yt/frontends/ytdata/__init__.py
@@ -0,0 +1,15 @@
+"""
+API for ytData frontend.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 811884bbb9f9ba343af6aaa2e2a178fde02a3453 -r 697ca7baf306df33d900376704a185a48ff08723 yt/frontends/ytdata/api.py
--- /dev/null
+++ b/yt/frontends/ytdata/api.py
@@ -0,0 +1,39 @@
+"""
+API for ytData frontend
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+    YTDataContainerDataset, \
+    YTSpatialPlotDataset, \
+    YTGridDataset, \
+    YTGridHierarchy, \
+    YTGrid, \
+    YTNonspatialDataset, \
+    YTNonspatialHierarchy, \
+    YTNonspatialGrid, \
+    YTProfileDataset
+
+from .io import \
+    IOHandlerYTDataContainerHDF5, \
+    IOHandlerYTGridHDF5, \
+    IOHandlerYTSpatialPlotHDF5, \
+    IOHandlerYTNonspatialhdf5
+
+from .fields import \
+    YTDataContainerFieldInfo, \
+    YTGridFieldInfo
+
+from .utilities import \
+    save_as_dataset

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list