[yt-svn] commit/yt: 15 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Jun 1 12:26:51 PDT 2015


15 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/7ea8cdd500cc/
Changeset:   7ea8cdd500cc
Branch:      yt
User:        jzuhone
Date:        2015-05-09 15:12:10+00:00
Summary:     First stab at loading an FRB as a dataset
Affected #:  1 file

diff -r 3e1887de7362d631d057e8ef56843f4015b008a3 -r 7ea8cdd500cce9eff16c49ff2f38f17bc8d23d40 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -712,6 +712,28 @@
     
     return sds
 
+def load_frb(frb, fields=None, nprocs=1):
+    nx, ny = frb.buff_size
+    data = {}
+    if fields is None:
+        fields = list(frb.keys())
+    for field in fields:
+        arr = frb[field].d
+        data[field] = (arr.reshape(nx,ny,1), str(arr.units))
+    bounds = [b.in_units("code_length").v for b in frb.bounds]
+    bbox = np.array([[bounds[0],bounds[1]],[bounds[2],bounds[3]],[0.,1.]])
+    return load_uniform_grid(frb, [nx,ny,1], 
+                             length_unit=frb.ds.length_unit,
+                             bbox=bbox,
+                             sim_time=frb.ds.current_time.in_units("s").v,
+                             mass_unit=frb.ds.mass_unit,
+                             time_unit=frb.ds.time_unit,
+                             velocity_unit=frb.ds.velocity_unit,
+                             magnetic_unit=frb.ds.magnetic_unit,
+                             periodicity=(False,False,False),
+                             geometry=frb.ds.geometry,
+                             nprocs=nprocs)
+
 def load_amr_grids(grid_data, domain_dimensions,
                    field_units=None, bbox=None, sim_time=0.0, length_unit=None,
                    mass_unit=None, time_unit=None, velocity_unit=None,


https://bitbucket.org/yt_analysis/yt/commits/fa945e4d1816/
Changeset:   fa945e4d1816
Branch:      yt
User:        jzuhone
Date:        2015-05-09 16:25:37+00:00
Summary:     Bug fixes
Affected #:  1 file

diff -r 7ea8cdd500cce9eff16c49ff2f38f17bc8d23d40 -r fa945e4d1816db4a90c621a4cd8ae5433a0800f8 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -718,11 +718,11 @@
     if fields is None:
         fields = list(frb.keys())
     for field in fields:
-        arr = frb[field].d
-        data[field] = (arr.reshape(nx,ny,1), str(arr.units))
+        arr = frb[field]
+        data[field] = (arr.d.reshape(nx,ny,1), str(arr.units))
     bounds = [b.in_units("code_length").v for b in frb.bounds]
     bbox = np.array([[bounds[0],bounds[1]],[bounds[2],bounds[3]],[0.,1.]])
-    return load_uniform_grid(frb, [nx,ny,1], 
+    return load_uniform_grid(data, [nx,ny,1], 
                              length_unit=frb.ds.length_unit,
                              bbox=bbox,
                              sim_time=frb.ds.current_time.in_units("s").v,


https://bitbucket.org/yt_analysis/yt/commits/2a8cc961b308/
Changeset:   2a8cc961b308
Branch:      yt
User:        jzuhone
Date:        2015-05-09 16:25:50+00:00
Summary:     Adding hooks in the appropriate places
Affected #:  2 files

diff -r fa945e4d1816db4a90c621a4cd8ae5433a0800f8 -r 2a8cc961b30837b6a31721e0d42676c6c9cb1089 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -134,7 +134,7 @@
 from yt.frontends.stream.api import \
     load_uniform_grid, load_amr_grids, \
     load_particles, load_hexahedral_mesh, load_octree, \
-    hexahedral_connectivity
+    hexahedral_connectivity, load_frb
 
 # For backwards compatibility
 GadgetDataset = frontends.gadget.GadgetDataset

diff -r fa945e4d1816db4a90c621a4cd8ae5433a0800f8 -r 2a8cc961b30837b6a31721e0d42676c6c9cb1089 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -24,7 +24,8 @@
       load_hexahedral_mesh, \
       hexahedral_connectivity, \
       load_octree, \
-      refine_amr
+      refine_amr, \
+      load_frb
 
 from .fields import \
       StreamFieldInfo


https://bitbucket.org/yt_analysis/yt/commits/a9e417525bf5/
Changeset:   a9e417525bf5
Branch:      yt
User:        jzuhone
Date:        2015-05-09 16:37:21+00:00
Summary:     Add a docstring. FRB data needs to be transposed first.
Affected #:  1 file

diff -r 2a8cc961b30837b6a31721e0d42676c6c9cb1089 -r a9e417525bf59358ff3bba5313dc859e17546c7e yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -709,17 +709,40 @@
             pdata = pdata_ftype
         # This will update the stream handler too
         assign_particle_data(sds, pdata)
-    
+
     return sds
 
 def load_frb(frb, fields=None, nprocs=1):
+    r"""Load a :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer`
+    instance as a dataset. Unit information and other parameters (e.g., geometry,
+    current_time, etc.) will be taken from the parent dataset. 
+
+    Parameters
+    ----------
+    frb : FixedResolutionBuffer
+        The FRB to be loaded.
+    fields : list of strings, optional
+        The fields to be extracted from the FRB. If "None", the keys of the
+        FRB will be used. 
+    nprocs: integer, optional
+        If greater than 1, will create this number of subarrays out of data
+
+    Examples
+    --------
+
+    >>> import yt
+    >>> ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
+    >>> slc = ds.slice(2, 0.0)
+    >>> frb = slc.to_frb((500.,"kpc"), 500)
+    >>> ds2 = yt.load_frb(frb, fields=["density","temperature"], nprocs=32)
+    """
     nx, ny = frb.buff_size
     data = {}
     if fields is None:
         fields = list(frb.keys())
     for field in fields:
         arr = frb[field]
-        data[field] = (arr.d.reshape(nx,ny,1), str(arr.units))
+        data[field] = (arr.d.T.reshape(nx,ny,1), str(arr.units))
     bounds = [b.in_units("code_length").v for b in frb.bounds]
     bbox = np.array([[bounds[0],bounds[1]],[bounds[2],bounds[3]],[0.,1.]])
     return load_uniform_grid(data, [nx,ny,1], 


https://bitbucket.org/yt_analysis/yt/commits/37ebf48c27f9/
Changeset:   37ebf48c27f9
Branch:      yt
User:        jzuhone
Date:        2015-05-11 16:23:45+00:00
Summary:     Merge
Affected #:  8 files

diff -r a9e417525bf59358ff3bba5313dc859e17546c7e -r 37ebf48c27f94ec56c73ecc5cba27aab750552f7 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -18,13 +18,9 @@
 from libc.string cimport memcpy
 import data_structures
 
-IF UNAME_SYSNAME == "Windows":
-    cdef extern from "malloc.h":
-        void *alloca(int)
-ELSE:
-    cdef extern from "alloca.h":
-        void *alloca(int)
-
+cdef extern from "platform_dep.h":
+    void *alloca(int)
+    
 cdef extern from "cosmology.h":
     ctypedef struct CosmologyParameters "CosmologyParameters" :
         pass

diff -r a9e417525bf59358ff3bba5313dc859e17546c7e -r 37ebf48c27f94ec56c73ecc5cba27aab750552f7 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -566,7 +566,7 @@
     return contents
 
 def download_file(url, filename):
-    class MyURLopener(urllib.FancyURLopener):
+    class MyURLopener(urllib.request.FancyURLopener):
         def http_error_default(self, url, fp, errcode, errmsg, headers):
             raise RuntimeError("Attempt to download file from %s failed with error %s: %s." % \
               (url, errcode, errmsg))

diff -r a9e417525bf59358ff3bba5313dc859e17546c7e -r 37ebf48c27f94ec56c73ecc5cba27aab750552f7 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -23,13 +23,9 @@
 from fp_utils cimport *
 from .oct_container cimport Oct, OctAllocationContainer, OctreeContainer
 
-IF UNAME_SYSNAME == "Windows":
-    cdef extern from "malloc.h":
-        void *alloca(int)
-ELSE:
-    cdef extern from "alloca.h":
-        void *alloca(int)
-
+cdef extern from "platform_dep.h":
+    void *alloca(int)
+    
 cdef inline int gind(int i, int j, int k, int dims[3]):
     # The ordering is such that we want i to vary the slowest in this instance,
     # even though in other instances it varies the fastest.  To see this in

diff -r a9e417525bf59358ff3bba5313dc859e17546c7e -r 37ebf48c27f94ec56c73ecc5cba27aab750552f7 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -24,12 +24,8 @@
 from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
 from .particle_deposit cimport sph_kernel, gind
 
-IF UNAME_SYSNAME == "Windows":
-    cdef extern from "malloc.h":
-        void *alloca(int)
-ELSE:
-    cdef extern from "alloca.h":
-        void *alloca(int)
+cdef extern from "platform_dep.h":
+    void *alloca(int)
 
 cdef struct NeighborList
 cdef struct NeighborList:

diff -r a9e417525bf59358ff3bba5313dc859e17546c7e -r 37ebf48c27f94ec56c73ecc5cba27aab750552f7 yt/utilities/lib/fortran_reader.pyx
--- a/yt/utilities/lib/fortran_reader.pyx
+++ b/yt/utilities/lib/fortran_reader.pyx
@@ -29,12 +29,8 @@
     void FIX_LONG( unsigned )
     void FIX_FLOAT( float )
 
-IF UNAME_SYSNAME == "Windows":
-    cdef extern from "malloc.h":
-        void *alloca(int)
-ELSE:
-    cdef extern from "alloca.h":
-        void *alloca(int)
+cdef extern from "platform_dep.h":
+    void *alloca(int)
 
 cdef extern from "stdio.h":
     cdef int SEEK_SET

diff -r a9e417525bf59358ff3bba5313dc859e17546c7e -r 37ebf48c27f94ec56c73ecc5cba27aab750552f7 yt/utilities/lib/platform_dep.h
--- /dev/null
+++ b/yt/utilities/lib/platform_dep.h
@@ -0,0 +1,5 @@
+#if defined(WIN32) || defined(WIN64)
+#include "malloc.h"
+#else
+#include "alloca.h"
+#endif
\ No newline at end of file

diff -r a9e417525bf59358ff3bba5313dc859e17546c7e -r 37ebf48c27f94ec56c73ecc5cba27aab750552f7 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -213,7 +213,7 @@
 
         """
         if field == 'all':
-            fields = self.plots.keys()
+            fields = list(self.plots.keys())
         else:
             fields = [field]
         for field in self.data_source._determine_fields(fields):
@@ -240,7 +240,7 @@
         """
         log = {}
         if field == 'all':
-            fields = self.plots.keys()
+            fields = list(self.plots.keys())
         else:
             fields = [field]
         for field in self.data_source._determine_fields(fields):
@@ -276,7 +276,7 @@
         """
 
         if field == 'all':
-            fields = self.plots.keys()
+            fields = list(self.plots.keys())
         else:
             fields = [field]
         for field in self.data_source._determine_fields(fields):
@@ -311,7 +311,7 @@
 
         """
         if field is 'all':
-            fields = self.plots.keys()
+            fields = list(self.plots.keys())
         else:
             fields = ensure_list(field)
         for field in self.data_source._determine_fields(fields):
@@ -347,7 +347,7 @@
 
         """
         if field == 'all':
-            fields = self.plots.keys()
+            fields = list(self.plots.keys())
         else:
             fields = [field]
         for field in self.data_source._determine_fields(fields):
@@ -373,7 +373,7 @@
 
         """
         if field == 'all':
-            fields = self.plots.keys()
+            fields = list(self.plots.keys())
         else:
             fields = [field]
         for field in self.data_source._determine_fields(fields):

diff -r a9e417525bf59358ff3bba5313dc859e17546c7e -r 37ebf48c27f94ec56c73ecc5cba27aab750552f7 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -378,8 +378,9 @@
         for vec, color in zip(coord_vectors, colors):
             dx = int(np.dot(vec, self.orienter.unit_vectors[0]))
             dy = int(np.dot(vec, self.orienter.unit_vectors[1]))
-            lines(im.d, np.array([px0, px0+dx]), np.array([py0, py0+dy]),
-                  np.array([color, color]), 1, thickness, flip=1)
+            px = np.array([px0, px0+dx], dtype='int64')
+            py = np.array([py0, py0+dy], dtype='int64')
+            lines(im.d, px, py, np.array([color, color]), 1, thickness, flip=1)
 
     def draw_line(self, im, x0, x1, color=None):
         r"""Draws a line on an existing volume rendering.
@@ -427,11 +428,11 @@
         py1 = int(self.resolution[0]*(dx1/self.width[0]))
         px0 = int(self.resolution[1]*(dy0/self.width[1]))
         px1 = int(self.resolution[1]*(dy1/self.width[1]))
-
+        px = np.array([px0, px1], dtype="int64")
+        py = np.array([py0, py1], dtype="int64")
         # we flipped it in snapshot to get the orientation correct, so
         # flip the lines
-        lines(im.d, np.array([px0, px1]), np.array([py0, py1]),
-              np.array([color,color]), flip=1)
+        lines(im.d, px, py, np.array([color,color]), flip=1)
 
     def draw_domain(self,im,alpha=0.3):
         r"""Draws domain edges on an existing volume rendering.
@@ -515,7 +516,7 @@
        
         # we flipped it in snapshot to get the orientation correct, so
         # flip the lines
-        lines(im.d, px.d, py.d, color.reshape(1,4), 24, flip=1)
+        lines(im.d, px.d.astype("int64"), py.d.astype("int64"), color.reshape(1,4), 24, flip=1)
 
     def look_at(self, new_center, north_vector = None):
         r"""Change the view direction based on a new focal point.


https://bitbucket.org/yt_analysis/yt/commits/895bf7d40ed4/
Changeset:   895bf7d40ed4
Branch:      yt
User:        jzuhone
Date:        2015-05-20 20:10:32+00:00
Summary:     Better idea: make this an FRB method instead.
Affected #:  4 files

diff -r 37ebf48c27f94ec56c73ecc5cba27aab750552f7 -r 895bf7d40ed4140cb81a40dd74a6fe4a97817d16 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -134,7 +134,7 @@
 from yt.frontends.stream.api import \
     load_uniform_grid, load_amr_grids, \
     load_particles, load_hexahedral_mesh, load_octree, \
-    hexahedral_connectivity, load_frb
+    hexahedral_connectivity
 
 # For backwards compatibility
 GadgetDataset = frontends.gadget.GadgetDataset

diff -r 37ebf48c27f94ec56c73ecc5cba27aab750552f7 -r 895bf7d40ed4140cb81a40dd74a6fe4a97817d16 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -24,8 +24,7 @@
       load_hexahedral_mesh, \
       hexahedral_connectivity, \
       load_octree, \
-      refine_amr, \
-      load_frb
+      refine_amr
 
 from .fields import \
       StreamFieldInfo

diff -r 37ebf48c27f94ec56c73ecc5cba27aab750552f7 -r 895bf7d40ed4140cb81a40dd74a6fe4a97817d16 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -712,51 +712,6 @@
 
     return sds
 
-def load_frb(frb, fields=None, nprocs=1):
-    r"""Load a :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer`
-    instance as a dataset. Unit information and other parameters (e.g., geometry,
-    current_time, etc.) will be taken from the parent dataset. 
-
-    Parameters
-    ----------
-    frb : FixedResolutionBuffer
-        The FRB to be loaded.
-    fields : list of strings, optional
-        The fields to be extracted from the FRB. If "None", the keys of the
-        FRB will be used. 
-    nprocs: integer, optional
-        If greater than 1, will create this number of subarrays out of data
-
-    Examples
-    --------
-
-    >>> import yt
-    >>> ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
-    >>> slc = ds.slice(2, 0.0)
-    >>> frb = slc.to_frb((500.,"kpc"), 500)
-    >>> ds2 = yt.load_frb(frb, fields=["density","temperature"], nprocs=32)
-    """
-    nx, ny = frb.buff_size
-    data = {}
-    if fields is None:
-        fields = list(frb.keys())
-    for field in fields:
-        arr = frb[field]
-        data[field] = (arr.d.T.reshape(nx,ny,1), str(arr.units))
-    bounds = [b.in_units("code_length").v for b in frb.bounds]
-    bbox = np.array([[bounds[0],bounds[1]],[bounds[2],bounds[3]],[0.,1.]])
-    return load_uniform_grid(data, [nx,ny,1], 
-                             length_unit=frb.ds.length_unit,
-                             bbox=bbox,
-                             sim_time=frb.ds.current_time.in_units("s").v,
-                             mass_unit=frb.ds.mass_unit,
-                             time_unit=frb.ds.time_unit,
-                             velocity_unit=frb.ds.velocity_unit,
-                             magnetic_unit=frb.ds.magnetic_unit,
-                             periodicity=(False,False,False),
-                             geometry=frb.ds.geometry,
-                             nprocs=nprocs)
-
 def load_amr_grids(grid_data, domain_dimensions,
                    field_units=None, bbox=None, sim_time=0.0, length_unit=None,
                    mass_unit=None, time_unit=None, velocity_unit=None,

diff -r 37ebf48c27f94ec56c73ecc5cba27aab750552f7 -r 895bf7d40ed4140cb81a40dd74a6fe4a97817d16 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -20,6 +20,7 @@
 from yt.utilities.lib.pixelization_routines import \
     pixelize_cylinder
 from yt.utilities.lib.api import add_points_to_greyscale_image
+from yt.frontends.stream.api import load_uniform_grid
 
 from . import _MPL
 import numpy as np
@@ -73,13 +74,13 @@
     To make a projection and then several images, you can generate a
     single FRB and then access multiple fields:
 
-    >>> proj = ds.proj(0, "Density")
+    >>> proj = ds.proj(0, "density")
     >>> frb1 = FixedResolutionBuffer(proj, (0.2, 0.3, 0.4, 0.5),
-                    (1024, 1024))
-    >>> print frb1["Density"].max()
-    1.0914e-9
-    >>> print frb1["Temperature"].max()
-    104923.1
+    ...                              (1024, 1024))
+    >>> print frb1["density"].max()
+    1.0914e-9 g/cm**3
+    >>> print frb1["temperature"].max()
+    104923.1 K
     """
     _exclude_fields = ('pz','pdz','dx','x','y','z',
         'r', 'dr', 'phi', 'dphi', 'theta', 'dtheta',
@@ -330,7 +331,50 @@
             for k,v in other_keys.items():
                 fib.update_all_headers(k,v)
         fib.writeto(filename, clobber=clobber)
-        
+
+    def export_dataset(self, fields=None, nprocs=1):
+        r"""Export a set of pixelized fields to an in-memory dataset that can be
+        analyzed as any other in yt. Unit information and other parameters (e.g., 
+        geometry, current_time, etc.) will be taken from the parent dataset. 
+
+        Parameters
+        ----------
+        fields : list of strings, optional
+            The fields to be extracted from the FRB. If "None", the keys of the
+            FRB will be used. 
+        nprocs: integer, optional
+            If greater than 1, will create this number of subarrays out of data
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
+        >>> slc = ds.slice(2, 0.0)
+        >>> frb = slc.to_frb((500.,"kpc"), 500)
+        >>> ds2 = yt.load_frb(frb, fields=["density","temperature"], nprocs=32)
+        """
+        nx, ny = self.buff_size
+        data = {}
+        if fields is None:
+            fields = list(self.keys())
+        for field in fields:
+            arr = self[field]
+            data[field] = (arr.d.T.reshape(nx,ny,1), str(arr.units))
+        bounds = [b.in_units("code_length").v for b in self.bounds]
+        bbox = np.array([[bounds[0],bounds[1]],[bounds[2],bounds[3]],[0.,1.]])
+        return load_uniform_grid(data, [nx,ny,1],
+                                 length_unit=self.ds.length_unit,
+                                 bbox=bbox,
+                                 sim_time=self.ds.current_time.in_units("s").v,
+                                 mass_unit=self.ds.mass_unit,
+                                 time_unit=self.ds.time_unit,
+                                 velocity_unit=self.ds.velocity_unit,
+                                 magnetic_unit=self.ds.magnetic_unit,
+                                 periodicity=(False,False,False),
+                                 geometry=self.ds.geometry,
+                                 nprocs=nprocs)
+
     @property
     def limits(self):
         rv = dict(x = None, y = None, z = None)


https://bitbucket.org/yt_analysis/yt/commits/d0c9468c1bad/
Changeset:   d0c9468c1bad
Branch:      yt
User:        jzuhone
Date:        2015-05-20 20:17:20+00:00
Summary:     Remove a reference to an option that no longer works
Affected #:  1 file

diff -r 895bf7d40ed4140cb81a40dd74a6fe4a97817d16 -r d0c9468c1badf2f4a6fc3c3b21813570ce795966 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -314,9 +314,7 @@
         other_keys : dictionary, optional
             A set of header keys and values to write into the FITS header.
         units : string, optional
-            the length units that the coordinates are written in, default 'cm'
-            If units are set to "deg" then assume that sky coordinates are
-            requested.
+            the length units that the coordinates are written in, default 'cm'.
         """
 
         from yt.utilities.fits_image import FITSImageBuffer


https://bitbucket.org/yt_analysis/yt/commits/2273632cc3b6/
Changeset:   2273632cc3b6
Branch:      yt
User:        jzuhone
Date:        2015-05-20 20:22:02+00:00
Summary:     A few other small changes. If fields is None in general, just use the FRB's keys.
Affected #:  1 file

diff -r d0c9468c1badf2f4a6fc3c3b21813570ce795966 -r 2273632cc3b67088d5b54f8712c2d3d263672eed yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -290,7 +290,7 @@
             These fields will be pixelized and output.
         """
         import h5py
-        if fields is None: fields = self.data.keys()
+        if fields is None: fields = list(self.data.keys())
         output = h5py.File(filename, "a")
         for field in fields:
             output.create_dataset(field,data=self[field])
@@ -308,7 +308,8 @@
         filename : string
             The name of the FITS file to be written.
         fields : list of strings
-            These fields will be pixelized and output.
+            These fields will be pixelized and output. If "None", the keys of the
+            FRB will be used. 
         clobber : boolean
             If the file exists, this governs whether we will overwrite.
         other_keys : dictionary, optional
@@ -319,10 +320,7 @@
 
         from yt.utilities.fits_image import FITSImageBuffer
 
-        extra_fields = ['x','y','z','px','py','pz','pdx','pdy','pdz','weight_field']
-        if fields is None: 
-            fields = [field[-1] for field in self.data_source.field_data
-                      if field not in extra_fields]
+        if fields is None: fields = list(self.data.keys())
 
         fib = FITSImageBuffer(self, fields=fields, units=units)
         if other_keys is not None:
@@ -338,7 +336,7 @@
         Parameters
         ----------
         fields : list of strings, optional
-            The fields to be extracted from the FRB. If "None", the keys of the
+            These fields will be pixelized and output. If "None", the keys of the
             FRB will be used. 
         nprocs: integer, optional
             If greater than 1, will create this number of subarrays out of data


https://bitbucket.org/yt_analysis/yt/commits/fdee8b799898/
Changeset:   fdee8b799898
Branch:      yt
User:        jzuhone
Date:        2015-05-20 20:33:12+00:00
Summary:     Documenting the options for exporting FRBs
Affected #:  2 files

diff -r 2273632cc3b67088d5b54f8712c2d3d263672eed -r fdee8b7998988debe130f92a42a09c58c016f9cd doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -47,10 +47,29 @@
    frb = FixedResolutionBuffer(sl, (0.3, 0.5, 0.6, 0.8), (512, 512))
    my_image = frb["density"]
 
-This resultant array can be saved out to disk or visualized using a
-hand-constructed Matplotlib image, for instance using
+This image may then be used in a hand-constructed Matplotlib image, for instance using
 :func:`~matplotlib.pyplot.imshow`.
 
+The buffer arrays can be saved out to disk in either HDF5 or FITS format:
+ 
+.. code-block:: python
+
+   frb.export_hdf5("my_images.h5", fields=["density","temperature"])
+   frb.export_fits("my_images.fits", fields=["density","temperature"],
+                   clobber=True, units="kpc")
+
+In the FITS case, there is an option for setting the `units` of the coordinate system in
+the file. If you want to overwrite a file with the same name, set `clobber=True`. 
+
+The :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer` can even be exported
+as a 2D dataset itself, which may be operated on in the same way as any other dataset in yt:
+
+.. code-block:: python
+
+   ds_frb = frb.export_dataset(fields=["density","temperature"], nprocs=8)
+   sp = ds_frb.sphere("c", (100.,"kpc"))
+
+
 .. _generating-profiles-and-histograms:
 
 Profiles and Histograms

diff -r 2273632cc3b67088d5b54f8712c2d3d263672eed -r fdee8b7998988debe130f92a42a09c58c016f9cd yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -348,7 +348,7 @@
         >>> ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
         >>> slc = ds.slice(2, 0.0)
         >>> frb = slc.to_frb((500.,"kpc"), 500)
-        >>> ds2 = yt.load_frb(frb, fields=["density","temperature"], nprocs=32)
+        >>> ds2 = frb.export_dataset(fields=["density","temperature"], nprocs=32)
         """
         nx, ny = self.buff_size
         data = {}


https://bitbucket.org/yt_analysis/yt/commits/f1a5cbfd1f87/
Changeset:   f1a5cbfd1f87
Branch:      yt
User:        jzuhone
Date:        2015-05-20 20:37:29+00:00
Summary:     Forgot these tick marks
Affected #:  1 file

diff -r fdee8b7998988debe130f92a42a09c58c016f9cd -r f1a5cbfd1f87e5ec1ab2e63d62ce56eb18ca2231 doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -58,8 +58,8 @@
    frb.export_fits("my_images.fits", fields=["density","temperature"],
                    clobber=True, units="kpc")
 
-In the FITS case, there is an option for setting the `units` of the coordinate system in
-the file. If you want to overwrite a file with the same name, set `clobber=True`. 
+In the FITS case, there is an option for setting the ``units`` of the coordinate system in
+the file. If you want to overwrite a file with the same name, set ``clobber=True``. 
 
 The :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer` can even be exported
 as a 2D dataset itself, which may be operated on in the same way as any other dataset in yt:


https://bitbucket.org/yt_analysis/yt/commits/12d42c9f1263/
Changeset:   12d42c9f1263
Branch:      yt
User:        jzuhone
Date:        2015-05-20 20:42:16+00:00
Summary:     Document nprocs.
Affected #:  1 file

diff -r f1a5cbfd1f87e5ec1ab2e63d62ce56eb18ca2231 -r 12d42c9f12638a29152004a1518c63102d4b915c doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -69,6 +69,7 @@
    ds_frb = frb.export_dataset(fields=["density","temperature"], nprocs=8)
    sp = ds_frb.sphere("c", (100.,"kpc"))
 
+where the ``nprocs`` parameter can be used to decompose the image into ``nprocs`` number of grids.
 
 .. _generating-profiles-and-histograms:
 


https://bitbucket.org/yt_analysis/yt/commits/0e25b2898af9/
Changeset:   0e25b2898af9
Branch:      yt
User:        jzuhone
Date:        2015-05-20 20:43:26+00:00
Summary:     Remove extra space
Affected #:  1 file

diff -r 12d42c9f12638a29152004a1518c63102d4b915c -r 0e25b2898af9cedbf3463b55980326feb3c6d7b8 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -343,7 +343,6 @@
 
         Examples
         --------
-
         >>> import yt
         >>> ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
         >>> slc = ds.slice(2, 0.0)


https://bitbucket.org/yt_analysis/yt/commits/b32f056c4377/
Changeset:   b32f056c4377
Branch:      yt
User:        jzuhone
Date:        2015-05-20 22:06:45+00:00
Summary:     Unit-test exporting FRB as a dataset
Affected #:  1 file

diff -r 0e25b2898af9cedbf3463b55980326feb3c6d7b8 -r b32f056c437769fee1c137131fa201105a20bfea yt/visualization/tests/test_export_frb.py
--- /dev/null
+++ b/yt/visualization/tests/test_export_frb.py
@@ -0,0 +1,39 @@
+"""
+Tests for exporting an FRB as a dataset
+
+
+
+"""
+from __future__ import absolute_import
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+import numpy as np
+from yt.testing import \
+    fake_random_ds, assert_equal, \
+    assert_allclose
+
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+
+def test_export_frb():
+    test_ds = fake_random_ds(128)
+    slc = test_ds.slice(0,0.5)
+    frb = slc.to_frb((0.5,"unitary"), 64)
+    frb_ds = frb.export_dataset(fields=["density"], nprocs=8)
+    dd_frb = frb_ds.all_data()
+
+    yield assert_equal, frb_ds.domain_left_edge.v, np.array([0.25,0.25,0.0])
+    yield assert_equal, frb_ds.domain_right_edge.v, np.array([0.75,0.75,1.0])
+    yield assert_equal, frb_ds.domain_width.v, np.array([0.5,0.5,1.0])
+    yield assert_equal, frb_ds.domain_dimensions, np.array([64,64,1], dtype="int64")
+    yield assert_allclose, frb["density"].sum(), dd_frb.quantities.total_quantity("density")
+    yield assert_equal, frb_ds.index.num_grids, 8


https://bitbucket.org/yt_analysis/yt/commits/a2f92f40d3de/
Changeset:   a2f92f40d3de
Branch:      yt
User:        jzuhone
Date:        2015-05-29 19:47:35+00:00
Summary:     Merge
Affected #:  34 files

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -13,6 +13,7 @@
 yt/frontends/ramses/_ramses_reader.cpp
 yt/geometry/fake_octree.c
 yt/geometry/grid_container.c
+yt/geometry/grid_visitors.c
 yt/geometry/oct_container.c
 yt/geometry/oct_visitors.c
 yt/geometry/particle_deposit.c
@@ -25,6 +26,7 @@
 yt/utilities/spatial/ckdtree.c
 yt/utilities/lib/alt_ray_tracers.c
 yt/utilities/lib/amr_kdtools.c
+yt/utilities/lib/bitarray.c
 yt/utilities/lib/CICDeposit.c
 yt/utilities/lib/ContourFinding.c
 yt/utilities/lib/DepthFirstOctree.c
@@ -39,6 +41,7 @@
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/Octree.c
 yt/utilities/lib/origami.c
+yt/utilities/lib/pixelization_routines.c
 yt/utilities/lib/png_writer.c
 yt/utilities/lib/PointsInVolume.c
 yt/utilities/lib/QuadTree.c
@@ -59,3 +62,4 @@
 doc/source/reference/api/generated/*
 doc/_temp/*
 doc/source/bootcamp/.ipynb_checkpoints/
+dist

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c .python-version
--- /dev/null
+++ b/.python-version
@@ -0,0 +1,1 @@
+2.7.9

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c README
--- a/README
+++ b/README
@@ -20,4 +20,4 @@
 For more information on installation, what to do if you run into problems, or 
 ways to help development, please visit our website.
 
-Enjoy!
+Enjoy!
\ No newline at end of file

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -116,7 +116,7 @@
   the width of the smallest grid element in the simulation from the
   last data snapshot (i.e. the one where time has evolved the
   longest) in the time series:
-  ``ds_last.index.get_smallest_dx() * ds_last['mpch']``.
+  ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
 * ``total_particles``, if supplied, this is a pre-calculated
   total number of dark matter
   particles present in the simulation. For example, this is useful

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c doc/source/analyzing/time_series_analysis.rst
--- a/doc/source/analyzing/time_series_analysis.rst
+++ b/doc/source/analyzing/time_series_analysis.rst
@@ -79,9 +79,7 @@
 Analyzing an Entire Simulation
 ------------------------------
 
-.. note:: Currently only implemented for Enzo.  Other simulation types coming 
-   soon.  Until then, rely on the above prescription for creating 
-   ``DatasetSeries`` objects.
+.. note:: Implemented for: Enzo, Gadget, OWLS.
 
 The parameter file used to run a simulation contains all the information 
 necessary to know what datasets should be available.  The ``simulation`` 
@@ -93,8 +91,7 @@
 .. code-block:: python
 
   import yt
-  my_sim = yt.simulation('enzo_tiny_cosmology/32Mpc_32.enzo', 'Enzo',
-                         find_outputs=False)
+  my_sim = yt.simulation('enzo_tiny_cosmology/32Mpc_32.enzo', 'Enzo')
 
 Then, create a ``DatasetSeries`` object with the 
 :meth:`frontends.enzo.simulation_handling.EnzoSimulation.get_time_series` 
@@ -123,10 +120,10 @@
 to select a subset of the total data:
 
 * ``time_data`` (*bool*): Whether or not to include time outputs when 
-  gathering datasets for time series.  Default: True.
+  gathering datasets for time series.  Default: True.  (Enzo only)
 
 * ``redshift_data`` (*bool*): Whether or not to include redshift outputs 
-  when gathering datasets for time series.  Default: True.
+  when gathering datasets for time series.  Default: True.  (Enzo only)
 
 * ``initial_time`` (*float*): The earliest time for outputs to be included.  
   If None, the initial time of the simulation is used.  This can be used in 
@@ -139,15 +136,12 @@
 * ``times`` (*list*): A list of times for which outputs will be found.
   Default: None.
 
-* ``time_units`` (*str*): The time units used for requesting outputs by time.
-  Default: '1' (code units).
-
 * ``initial_redshift`` (*float*): The earliest redshift for outputs to be 
   included.  If None, the initial redshift of the simulation is used.  This
   can be used in combination with either ``final_time`` or ``final_redshift``.
   Default: None.
 
-* ``final_time`` (*float*): The latest redshift for outputs to be included.  
+* ``final_redshift`` (*float*): The latest redshift for outputs to be included.  
   If None, the final redshift of the simulation is used.  This can be used 
   in combination with either ``initial_time`` or ``initial_redshift``.  
   Default: None.
@@ -157,11 +151,11 @@
 
 * ``initial_cycle`` (*float*): The earliest cycle for outputs to be 
   included.  If None, the initial cycle of the simulation is used.  This can
-  only be used with final_cycle.  Default: None.
+  only be used with final_cycle.  Default: None.  (Enzo only)
 
 * ``final_cycle`` (*float*): The latest cycle for outputs to be included.  
   If None, the final cycle of the simulation is used.  This can only be used 
-  in combination with initial_cycle.  Default: None.
+  in combination with initial_cycle.  Default: None.  (Enzo only)
 
 * ``tolerance`` (*float*):  Used in combination with ``times`` or ``redshifts`` 
   keywords, this is the tolerance within which outputs are accepted given 

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -469,6 +469,8 @@
   first image in the primary file. If this is not the case,
   yt will raise a warning and will not load this field.
 
+.. _additional_fits_options:
+
 Additional Options
 ^^^^^^^^^^^^^^^^^^
 
@@ -570,6 +572,35 @@
 ``WCSAxes`` is still in an experimental state, but as its functionality improves it will be
 utilized more here.
 
+``create_spectral_slabs``
+"""""""""""""""""""""""""
+
+.. note::
+
+  The following functionality requires the `spectral-cube <http://spectral-cube.readthedocs.org>`_
+  library to be installed. 
+  
+If you have a spectral intensity dataset of some sort, and would like to extract emission in 
+particular slabs along the spectral axis of a certain width, ``create_spectral_slabs`` can be
+used to generate a dataset with these slabs as different fields. In this example, we use it
+to extract individual lines from an intensity cube:
+
+.. code-block:: python
+
+  slab_centers = {'13CN': (218.03117, 'GHz'),
+                  'CH3CH2CHO': (218.284256, 'GHz'),
+                  'CH3NH2': (218.40956, 'GHz')}
+  slab_width = (0.05, "GHz")
+  ds = create_spectral_slabs("intensity_cube.fits",
+                                    slab_centers, slab_width,
+                                    nan_mask=0.0)
+
+All keyword arguments to `create_spectral_slabs` are passed on to `load` when creating the dataset
+(see :ref:`additional_fits_options` above). In the returned dataset, the different slabs will be
+different fields, with the field names taken from the keys in ``slab_centers``. The WCS coordinates 
+on the spectral axis are reset so that the center of the domain along this axis is zero, and the 
+left and right edges of the domain along this axis are :math:`\pm` ``0.5*slab_width``.
+
 Examples of Using FITS Data
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -635,13 +666,14 @@
    import yt
    ds = yt.load("snapshot_061.hdf5")
 
-However, yt cannot detect raw-binary Gadget data, and so you must specify the
-format as being Gadget:
+Gadget data in raw binary format can also be loaded with the ``load`` command. 
+This is only supported for snapshots created with the ``SnapFormat`` parameter 
+set to 1 (the standard for Gadget-2).
 
 .. code-block:: python
 
    import yt
-   ds = yt.GadgetDataset("snapshot_061")
+   ds = yt.load("snapshot_061")
 
 .. _particle-bbox:
 

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -213,10 +213,31 @@
 ++++++++++++++++++++++++++++++++++++++
 
 To install yt from source, you must make sure you have yt's dependencies
-installed on your system.  These include: a C compiler, ``HDF5``, ``python``,
-``Cython``, ``NumPy``, ``matplotlib``, ``sympy``, and ``h5py``. From here, you
-can use ``pip`` (which comes with ``Python``) to install the latest stable
-version of yt:
+installed on your system. 
+
+If you use a Linux OS, use your distro's package manager to install these yt
+dependencies on your system:
+
+- ``HDF5``
+- ``zeromq``
+- ``sqlite`` 
+- ``mercurial``
+
+Then install the required Python packages with ``pip``:
+
+.. code-block:: bash
+
+  $ pip install -r requirements.txt
+
+If you're using IPython notebooks, you can install its dependencies
+with ``pip`` as well:
+
+.. code-block:: bash
+
+  $ pip install -r optional-requirements.txt
+
+From here, you can use ``pip`` (which comes with ``Python``) to install the latest
+stable version of yt:
 
 .. code-block:: bash
 

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c doc/source/visualizing/sketchfab.rst
--- a/doc/source/visualizing/sketchfab.rst
+++ b/doc/source/visualizing/sketchfab.rst
@@ -56,7 +56,7 @@
 
    import yt
    ds = yt.load("/data/workshop2012/IsolatedGalaxy/galaxy0030/galaxy0030")
-   sphere = ds.sphere("max", (1.0, "mpc"))
+   sphere = ds.sphere("max", (1.0, "Mpc"))
    surface = ds.surface(sphere, "density", 1e-27)
 
 This object, ``surface``, can be queried for values on the surface.  For
@@ -172,7 +172,7 @@
    trans = [1.0, 0.5]
    filename = './surfaces'
 
-   sphere = ds.sphere("max", (1.0, "mpc"))
+   sphere = ds.sphere("max", (1.0, "Mpc"))
    for i,r in enumerate(rho):
        surf = ds.surface(sphere, 'density', r)
        surf.export_obj(filename, transparency = trans[i], color_field='temperature', plot_index = i)
@@ -248,7 +248,7 @@
        return (data['density']*data['density']*np.sqrt(data['temperature']))
    add_field("emissivity", function=_Emissivity, units=r"g*K/cm**6")
 
-   sphere = ds.sphere("max", (1.0, "mpc"))
+   sphere = ds.sphere("max", (1.0, "Mpc"))
    for i,r in enumerate(rho):
        surf = ds.surface(sphere, 'density', r)
        surf.export_obj(filename, transparency = trans[i],

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c optional-requirements.txt
--- /dev/null
+++ b/optional-requirements.txt
@@ -0,0 +1,1 @@
+ipython[notebook]
\ No newline at end of file

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c requirements.txt
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,6 @@
+numpy==1.9.2 
+matplotlib==1.4.3 
+Cython==0.22 
+h5py==2.5.0 
+nose==1.3.6 
+sympy==0.7.6 

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -127,8 +127,8 @@
         field_units = {"dl": "cm", "redshift": "", "temperature": "K"}
         field_data = {}
         if use_peculiar_velocity:
-            input_fields.append('los_velocity')
-            field_units["los_velocity"] = "cm/s"
+            input_fields.append('velocity_los')
+            field_units["velocity_los"] = "cm/s"
         for feature in self.line_list + self.continuum_list:
             if not feature['field_name'] in input_fields:
                 input_fields.append(feature['field_name'])
@@ -171,7 +171,7 @@
             if use_peculiar_velocity:
                 # include factor of (1 + z) because our velocity is in proper frame.
                 delta_lambda += continuum['wavelength'] * (1 + field_data['redshift']) * \
-                    field_data['los_velocity'] / speed_of_light_cgs
+                    field_data['velocity_los'] / speed_of_light_cgs
             this_wavelength = delta_lambda + continuum['wavelength']
             right_index = np.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
             left_index = np.digitize((this_wavelength *
@@ -208,7 +208,7 @@
             if use_peculiar_velocity:
                 # include factor of (1 + z) because our velocity is in proper frame.
                 delta_lambda += line['wavelength'] * (1 + field_data['redshift']) * \
-                    field_data['los_velocity'] / speed_of_light_cgs
+                    field_data['velocity_los'] / speed_of_light_cgs
             thermal_b = km_per_cm * np.sqrt((2 * boltzmann_constant_cgs *
                                              field_data['temperature']) /
                                             (amu_cgs * line['atomic_mass']))
@@ -260,7 +260,7 @@
                 if line['label_threshold'] is not None and \
                         column_density[lixel] >= line['label_threshold']:
                     if use_peculiar_velocity:
-                        peculiar_velocity = km_per_cm * field_data['los_velocity'][lixel]
+                        peculiar_velocity = km_per_cm * field_data['velocity_los'][lixel]
                     else:
                         peculiar_velocity = 0.0
                     self.spectrum_line_list.append({'label': line['label'],

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -33,6 +33,13 @@
 
 class LightRay(CosmologySplice):
     """
+    LightRay(parameter_filename, simulation_type=None,
+             near_redshift=None, far_redshift=None,
+             use_minimum_datasets=True, deltaz_min=0.0,
+             minimum_coherent_box_fraction=0.0,
+             time_data=True, redshift_data=True,
+             find_outputs=False, load_kwargs=None):
+
     Create a LightRay object.  A light ray is much like a light cone,
     in that it stacks together multiple datasets in order to extend a
     redshift interval.  Unlike a light cone, which does randomly
@@ -94,6 +101,12 @@
         Whether or not to search for datasets in the current 
         directory.
         Default: False.
+    load_kwargs : optional, dict
+        Optional dictionary of kwargs to be passed to the "load" 
+        function, appropriate for use of certain frontends.  E.g.
+        Tipsy using "bounding_box"
+        Gadget using "unit_base", etc.
+        Default : None
 
     """
     def __init__(self, parameter_filename, simulation_type=None,
@@ -101,7 +114,7 @@
                  use_minimum_datasets=True, deltaz_min=0.0,
                  minimum_coherent_box_fraction=0.0,
                  time_data=True, redshift_data=True,
-                 find_outputs=False):
+                 find_outputs=False, load_kwargs=None):
 
         self.near_redshift = near_redshift
         self.far_redshift = far_redshift
@@ -109,13 +122,16 @@
         self.deltaz_min = deltaz_min
         self.minimum_coherent_box_fraction = minimum_coherent_box_fraction
         self.parameter_filename = parameter_filename
-
+        if load_kwargs is None:
+            self.load_kwargs = {}
+        else:
+            self.load_kwargs = load_kwargs
         self.light_ray_solution = []
         self._data = {}
 
         # Make a light ray from a single, given dataset.        
         if simulation_type is None:
-            ds = load(parameter_filename)
+            ds = load(parameter_filename, **self.load_kwargs)
             if ds.cosmological_simulation:
                 redshift = ds.current_redshift
                 self.cosmology = Cosmology(
@@ -243,6 +259,12 @@
                        get_los_velocity=True, redshift=None,
                        njobs=-1):
         """
+        make_light_ray(seed=None, start_position=None, end_position=None,
+                       trajectory=None, fields=None, setup_function=None,
+                       solution_filename=None, data_filename=None,
+                       get_los_velocity=True, redshift=None,
+                       njobs=-1)
+
         Create a light ray and get field values for each lixel.  A light
         ray consists of a list of field values for cells intersected by
         the ray and the path length of the ray through those cells.
@@ -343,9 +365,9 @@
         all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
         if get_los_velocity:
-            all_fields.extend(['x-velocity', 'y-velocity',
-                               'z-velocity', 'los_velocity'])
-            data_fields.extend(['x-velocity', 'y-velocity', 'z-velocity'])
+            all_fields.extend(['velocity_x', 'velocity_y',
+                               'velocity_z', 'velocity_los'])
+            data_fields.extend(['velocity_x', 'velocity_y', 'velocity_z'])
 
         all_ray_storage = {}
         for my_storage, my_segment in parallel_objects(self.light_ray_solution,
@@ -353,7 +375,7 @@
                                                        njobs=njobs):
 
             # Load dataset for segment.
-            ds = load(my_segment['filename'])
+            ds = load(my_segment['filename'], **self.load_kwargs)
 
             my_segment['unique_identifier'] = ds.unique_identifier
             if redshift is not None:
@@ -364,11 +386,15 @@
 
             if setup_function is not None:
                 setup_function(ds)
-            
-            my_segment["start"] = ds.domain_width * my_segment["start"] + \
-                ds.domain_left_edge
-            my_segment["end"] = ds.domain_width * my_segment["end"] + \
-                ds.domain_left_edge
+
+            if start_position is not None:
+                my_segment["start"] = ds.arr(my_segment["start"], "code_length")
+                my_segment["end"] = ds.arr(my_segment["end"], "code_length")
+            else:
+                my_segment["start"] = ds.domain_width * my_segment["start"] + \
+                  ds.domain_left_edge
+                my_segment["end"] = ds.domain_width * my_segment["end"] + \
+                  ds.domain_left_edge
 
             if not ds.cosmological_simulation:
                 next_redshift = my_segment["redshift"]
@@ -412,10 +438,10 @@
                 if get_los_velocity:
                     line_of_sight = sub_segment[1] - sub_segment[0]
                     line_of_sight /= ((line_of_sight**2).sum())**0.5
-                    sub_vel = ds.arr([sub_ray['x-velocity'],
-                                      sub_ray['y-velocity'],
-                                      sub_ray['z-velocity']])
-                    sub_data['los_velocity'].extend((np.rollaxis(sub_vel, 1) *
+                    sub_vel = ds.arr([sub_ray['velocity_x'],
+                                      sub_ray['velocity_y'],
+                                      sub_ray['velocity_z']])
+                    sub_data['velocity_los'].extend((np.rollaxis(sub_vel, 1) *
                                                      line_of_sight).sum(axis=1)[asort])
                     del sub_vel
 
@@ -423,7 +449,6 @@
                 del sub_ray, asort
 
             for key in sub_data:
-                if key in "xyz": continue
                 sub_data[key] = ds.arr(sub_data[key]).in_cgs()
 
             # Get redshift for each lixel.  Assume linear relation between l and z.
@@ -461,18 +486,32 @@
 
     @parallel_root_only
     def _write_light_ray(self, filename, data):
-        "Write light ray data to hdf5 file."
+        """
+        _write_light_ray(filename, data)
+
+        Write light ray data to hdf5 file.
+        """
 
         mylog.info("Saving light ray data to %s." % filename)
         output = h5py.File(filename, 'w')
         for field in data.keys():
-            output.create_dataset(field, data=data[field])
-            output[field].attrs["units"] = str(data[field].units)
+            # if the field is a tuple, only use the second part of the tuple
+            # in the hdf5 output (i.e. ('gas', 'density') -> 'density')
+            if isinstance(field, tuple):
+                fieldname = field[1]
+            else:
+                fieldname = field
+            output.create_dataset(fieldname, data=data[field])
+            output[fieldname].attrs["units"] = str(data[field].units)
         output.close()
 
     @parallel_root_only
     def _write_light_ray_solution(self, filename, extra_info=None):
-        "Write light ray solution to a file."
+        """
+        _write_light_ray_solution(filename, extra_info=None)
+
+        Write light ray solution to a file.
+        """
 
         mylog.info("Writing light ray solution to %s." % filename)
         f = open(filename, 'w')
@@ -490,7 +529,11 @@
         f.close()
 
 def _flatten_dict_list(data, exceptions=None):
-    "Flatten the list of dicts into one dict."
+    """
+    _flatten_dict_list(data, exceptions=None)
+
+    Flatten the list of dicts into one dict.
+    """
 
     if exceptions is None: exceptions = []
     new_data = {}
@@ -505,12 +548,20 @@
     return new_data
 
 def vector_length(start, end):
-    "Calculate vector length."
+    """
+    vector_length(start, end)
+    
+    Calculate vector length.
+    """
 
     return np.sqrt(np.power((end - start), 2).sum())
 
 def periodic_distance(coord1, coord2):
-    "Calculate length of shortest vector between to points in periodic domain."
+    """
+    periodic_distance(coord1, coord2)
+
+    Calculate length of shortest vector between to points in periodic domain.
+    """
     dif = coord1 - coord2
 
     dim = np.ones(coord1.shape,dtype=int)
@@ -524,6 +575,8 @@
 
 def periodic_ray(start, end, left=None, right=None):
     """
+    periodic_ray(start, end, left=None, right=None)
+
     Break up periodic ray into non-periodic segments. 
     Accepts start and end points of periodic ray as YTArrays.
     Accepts optional left and right edges of periodic volume as YTArrays.

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1232,9 +1232,8 @@
         fglob = path.join(basedir, 'halos_%d.*.bin' % n)
         files = glob.glob(fglob)
         halos = self._get_halos_binary(files)
-        #Jc = mass_sun_cgs/ ds['mpchcm'] * 1e5
         Jc = 1.0
-        length = 1.0 / ds['mpchcm']
+        length = 1.0 / ds['Mpchcm']
         conv = dict(pos = np.array([length, length, length,
                                     1, 1, 1]), # to unitary
                     r=1.0/ds['kpchcm'], # to unitary

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -729,7 +729,7 @@
 
     >>> import yt
     >>> ds = yt.load("RedshiftOutput0005")
-    >>> sp = ds.sphere("max", (1.0, 'mpc'))
+    >>> sp = ds.sphere("max", (1.0, 'Mpc'))
     >>> cr = ds.cut_region(sp, ["obj['temperature'] < 1e3"])
     """
     _type_name = "cut_region"

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -406,6 +406,7 @@
         self.basename = os.path.basename(parameter_filename)
         self.directory = os.path.dirname(parameter_filename)
         self.parameters = {}
+        self.key_parameters = []
 
         # Set some parameter defaults.
         self._set_parameter_defaults()
@@ -420,6 +421,21 @@
         
         self.print_key_parameters()
 
+    def _set_parameter_defaults(self):
+        pass
+
+    def _parse_parameter_file(self):
+        pass
+
+    def _set_units(self):
+        pass
+
+    def _calculate_simulation_bounds(self):
+        pass
+
+    def _get_all_outputs(**kwargs):
+        pass
+        
     def __repr__(self):
         return self.parameter_filename
 
@@ -445,23 +461,78 @@
         """
         Print out some key parameters for the simulation.
         """
-        for a in ["domain_dimensions", "domain_left_edge",
-                  "domain_right_edge", "initial_time", "final_time",
-                  "stop_cycle", "cosmological_simulation"]:
-            if not hasattr(self, a):
-                mylog.error("Missing %s in dataset definition!", a)
-                continue
-            v = getattr(self, a)
-            mylog.info("Parameters: %-25s = %s", a, v)
-        if hasattr(self, "cosmological_simulation") and \
-           getattr(self, "cosmological_simulation"):
+        if self.simulation_type == "grid":
+            for a in ["domain_dimensions", "domain_left_edge",
+                      "domain_right_edge"]:
+                self._print_attr(a)
+        for a in ["initial_time", "final_time",
+                  "cosmological_simulation"]:
+            self._print_attr(a)
+        if getattr(self, "cosmological_simulation", False):
             for a in ["box_size", "omega_lambda",
                       "omega_matter", "hubble_constant",
                       "initial_redshift", "final_redshift"]:
-                if not hasattr(self, a):
-                    mylog.error("Missing %s in dataset definition!", a)
-                    continue
-                v = getattr(self, a)
-                mylog.info("Parameters: %-25s = %s", a, v)
+                self._print_attr(a)
+        for a in self.key_parameters:
+            self._print_attr(a)
         mylog.info("Total datasets: %d." % len(self.all_outputs))
 
+    def _print_attr(self, a):
+        """
+        Print the attribute or warn about it missing.
+        """
+        if not hasattr(self, a):
+            mylog.error("Missing %s in dataset definition!", a)
+            return
+        v = getattr(self, a)
+        mylog.info("Parameters: %-25s = %s", a, v)
+
+    def _get_outputs_by_key(self, key, values, tolerance=None, outputs=None):
+        r"""
+        Get datasets at or near to given values.
+
+        Parameters
+        ----------
+        key: str
+            The key by which to retrieve outputs, usually 'time' or
+            'redshift'.
+        values: array_like
+            A list of values, given as floats.
+        tolerance : float
+            If not None, do not return a dataset unless the value is
+            within the tolerance value.  If None, simply return the
+            nearest dataset.
+            Default: None.
+        outputs : list
+            The list of outputs from which to choose.  If None,
+            self.all_outputs is used.
+            Default: None.
+
+        Examples
+        --------
+        >>> datasets = es.get_outputs_by_key('redshift', [0, 1, 2], tolerance=0.1)
+
+        """
+
+        if not isinstance(values, YTArray):
+            if isinstance(values, tuple) and len(values) == 2:
+                values = self.arr(*values)
+            else:
+                values = self.arr(values)
+        values = values.in_cgs()
+
+        if outputs is None:
+            outputs = self.all_outputs
+        my_outputs = []
+        if not outputs:
+            return my_outputs
+        for value in values:
+            outputs.sort(key=lambda obj:np.abs(value - obj[key]))
+            if (tolerance is None or np.abs(value - outputs[0][key]) <= tolerance) \
+                    and outputs[0] not in my_outputs:
+                my_outputs.append(outputs[0])
+            else:
+                mylog.error("No dataset added for %s = %f.", key, value)
+
+        outputs.sort(key=lambda obj: obj['time'])
+        return my_outputs

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c yt/fields/field_aliases.py
--- a/yt/fields/field_aliases.py
+++ b/yt/fields/field_aliases.py
@@ -141,12 +141,12 @@
     ("CellMassCode",                          "code_mass"),
     ("TotalMassMsun",                         "msun"),
     ("CellVolumeCode",                        "code_length"),
-    ("CellVolumeMpc",                         "mpc**3"),
-    ("ParticleSpecificAngularMomentumXKMSMPC","km/s/mpc"),
-    ("ParticleSpecificAngularMomentumYKMSMPC","km/s/mpc"),
-    ("ParticleSpecificAngularMomentumZKMSMPC","km/s/mpc"),
-    ("RadiusMpc",                             "mpc"),
-    ("ParticleRadiusMpc",                     "mpc"),
+    ("CellVolumeMpc",                         "Mpc**3"),
+    ("ParticleSpecificAngularMomentumXKMSMPC","km/s/Mpc"),
+    ("ParticleSpecificAngularMomentumYKMSMPC","km/s/Mpc"),
+    ("ParticleSpecificAngularMomentumZKMSMPC","km/s/Mpc"),
+    ("RadiusMpc",                             "Mpc"),
+    ("ParticleRadiusMpc",                     "Mpc"),
     ("ParticleRadiuskpc",                     "kpc"),
     ("Radiuskpc",                             "kpc"),
     ("ParticleRadiuskpch",                    "kpc"),

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c yt/fields/fluid_fields.py
--- a/yt/fields/fluid_fields.py
+++ b/yt/fields/fluid_fields.py
@@ -206,7 +206,7 @@
         slice_3dl[axi] = sl_left
         slice_3dr[axi] = sl_right
         def func(field, data):
-            ds = div_fac * data["index", "dx"]
+            ds = div_fac * data["index", "d%s" % ax]
             f  = data[grad_field][slice_3dr]/ds[slice_3d]
             f -= data[grad_field][slice_3dl]/ds[slice_3d]
             new_field = data.ds.arr(np.zeros_like(data[grad_field], dtype=np.float64),

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -808,8 +808,10 @@
         # set the periodicity based on the runtime parameters
         periodicity = [True, True, True]
         if not self.parameters['-x'] == "interior": periodicity[0] = False
-        if not self.parameters['-y'] == "interior": periodicity[1] = False
-        if not self.parameters['-z'] == "interior": periodicity[2] = False
+        if self.dimensionality >= 2:
+            if not self.parameters['-y'] == "interior": periodicity[1] = False
+        if self.dimensionality == 3:
+            if not self.parameters['-z'] == "interior": periodicity[2] = False
 
         self.periodicity = ensure_tuple(periodicity)
     

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -185,7 +185,7 @@
                     element, weight = field[2:4], field[4:-1]
                 else:
                     element, weight = field[2:3], field[3:-1]
-                weight = int(weight)
+
                 # Here we can, later, add number density.
 
 

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -890,7 +890,7 @@
         elif self.dimensionality == 2:
             self._setup_2d()
 
-    def set_code_units(self):
+    def _set_code_unit_attributes(self):
         if self.cosmological_simulation:
             k = self.cosmology_get_units()
             # Now some CGS values
@@ -928,17 +928,6 @@
         magnetic_unit = np.float64(magnetic_unit.in_cgs())
         self.magnetic_unit = self.quan(magnetic_unit, "gauss")
 
-        self._override_code_units()
-
-        self.unit_registry.modify("code_magnetic", self.magnetic_unit)
-        self.unit_registry.modify("code_length", self.length_unit)
-        self.unit_registry.modify("code_mass", self.mass_unit)
-        self.unit_registry.modify("code_time", self.time_unit)
-        self.unit_registry.modify("code_velocity", self.velocity_unit)
-        DW = self.arr(self.domain_right_edge - self.domain_left_edge, "code_length")
-        self.unit_registry.add("unitary", float(DW.max() * DW.units.base_value),
-                               DW.units.dimensions)
-
     def cosmology_get_units(self):
         """
         Return an Enzo-fortran style dictionary of units to feed into custom

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -13,36 +13,34 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.funcs import *
-
 import numpy as np
 import glob
 import os
 
 from yt.convenience import \
-    load
+    load, \
+    only_on_root
 from yt.data_objects.time_series import \
     SimulationTimeSeries, DatasetSeries
 from yt.units import dimensions
 from yt.units.unit_registry import \
-     UnitRegistry
+    UnitRegistry
 from yt.units.yt_array import \
-     YTArray, YTQuantity
+    YTArray, YTQuantity
 from yt.utilities.cosmology import \
     Cosmology
-from yt.utilities.definitions import \
-    sec_conversion
 from yt.utilities.exceptions import \
     InvalidSimulationTimeSeries, \
     MissingParameter, \
     NoStoppingCondition
+from yt.utilities.logger import ytLogger as \
+    mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects
-from yt.utilities.physical_constants import \
-    gravitational_constant_cgs as G
-
+    
 class EnzoSimulation(SimulationTimeSeries):
-    r"""Initialize an Enzo Simulation object.
+    r"""
+    Initialize an Enzo Simulation object.
 
     Upon creation, the parameter file is parsed and the time and redshift
     are calculated and stored in all_outputs.  A time units dictionary is
@@ -63,14 +61,8 @@
 
     Examples
     --------
-    >>> from yt.mods import *
-    >>> es = EnzoSimulation("my_simulation.par")
-    >>> es.get_time_series()
-    >>> for ds in es:
-    ...     print ds.current_time
-
-    >>> from yt.mods import *
-    >>> es = simulation("my_simulation.par", "Enzo")
+    >>> import yt
+    >>> es = yt.simulation("my_simulation.par", "Enzo")
     >>> es.get_time_series()
     >>> for ds in es:
     ...     print ds.current_time
@@ -78,7 +70,8 @@
     """
 
     def __init__(self, parameter_filename, find_outputs=False):
-
+        self.simulation_type = "grid"
+        self.key_parameters = ["stop_cycle"]
         SimulationTimeSeries.__init__(self, parameter_filename,
                                       find_outputs=find_outputs)
 
@@ -87,14 +80,14 @@
         self.unit_registry.lut["code_time"] = (1.0, dimensions.time)
         if self.cosmological_simulation:
             # Instantiate EnzoCosmology object for units and time conversions.
-            self.enzo_cosmology = \
+            self.cosmology = \
               EnzoCosmology(self.parameters['CosmologyHubbleConstantNow'],
                             self.parameters['CosmologyOmegaMatterNow'],
                             self.parameters['CosmologyOmegaLambdaNow'],
                             0.0, self.parameters['CosmologyInitialRedshift'],
                             unit_registry=self.unit_registry)
 
-            self.time_unit = self.enzo_cosmology.time_unit.in_units("s")
+            self.time_unit = self.cosmology.time_unit.in_units("s")
             self.unit_registry.modify("h", self.hubble_constant)
             # Comoving lengths
             for my_unit in ["m", "pc", "AU", "au"]:
@@ -160,7 +153,7 @@
             used in combination with either final_time or
             final_redshift.
             Default: None.
-        final_time : float
+        final_redshift : float
             The latest redshift for outputs to be included.  If None,
             the final redshift of the simulation is used.  This can be
             used in combination with either initial_time or
@@ -197,8 +190,8 @@
         Examples
         --------
 
-        >>> from yt.mods import *
-        >>> es = simulation("my_simulation.par", "Enzo")
+        >>> import yt
+        >>> es = yt.simulation("my_simulation.par", "Enzo")
         
         >>> es.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr"), 
                                redshift_data=False)
@@ -207,8 +200,6 @@
 
         >>> es.get_time_series(final_cycle=100000)
 
-        >>> es.get_time_series(find_outputs=True)
-
         >>> # after calling get_time_series
         >>> for ds in es.piter():
         ...     p = ProjectionPlot(ds, 'x', "density")
@@ -226,7 +217,9 @@
         if (initial_redshift is not None or \
             final_redshift is not None) and \
             not self.cosmological_simulation:
-            raise InvalidSimulationTimeSeries('An initial or final redshift has been given for a noncosmological simulation.')
+            raise InvalidSimulationTimeSeries(
+                "An initial or final redshift has been given for a " +
+                "noncosmological simulation.")
 
         if time_data and redshift_data:
             my_all_outputs = self.all_outputs
@@ -244,12 +237,14 @@
 
         # Apply selection criteria to the set.
         if times is not None:
-            my_outputs = self._get_outputs_by_time(times, tolerance=tolerance,
-                                                   outputs=my_all_outputs)
+            my_outputs = self._get_outputs_by_key("time", times,
+                                                  tolerance=tolerance,
+                                                  outputs=my_all_outputs)
 
         elif redshifts is not None:
-            my_outputs = self._get_outputs_by_redshift(redshifts, tolerance=tolerance,
-                                                       outputs=my_all_outputs)
+            my_outputs = self._get_outputs_by_key("redshift", redshifts,
+                                                  tolerance=tolerance,
+                                                  outputs=my_all_outputs)
 
         elif initial_cycle is not None or final_cycle is not None:
             if initial_cycle is None:
@@ -272,9 +267,11 @@
                 elif isinstance(initial_time, tuple) and len(initial_time) == 2:
                     initial_time = self.quan(*initial_time)
                 elif not isinstance(initial_time, YTArray):
-                    raise RuntimeError("Error: initial_time must be given as a float or tuple of (value, units).")
+                    raise RuntimeError(
+                        "Error: initial_time must be given as a float or " +
+                        "tuple of (value, units).")
             elif initial_redshift is not None:
-                my_initial_time = self.enzo_cosmology.t_from_z(initial_redshift)
+                my_initial_time = self.cosmology.t_from_z(initial_redshift)
             else:
                 my_initial_time = self.initial_time
 
@@ -284,10 +281,12 @@
                 elif isinstance(final_time, tuple) and len(final_time) == 2:
                     final_time = self.quan(*final_time)
                 elif not isinstance(final_time, YTArray):
-                    raise RuntimeError("Error: final_time must be given as a float or tuple of (value, units).")
+                    raise RuntimeError(
+                        "Error: final_time must be given as a float or " +
+                        "tuple of (value, units).")
                 my_final_time = final_time.in_units("s")
             elif final_redshift is not None:
-                my_final_time = self.enzo_cosmology.t_from_z(final_redshift)
+                my_final_time = self.cosmology.t_from_z(final_redshift)
             else:
                 my_final_time = self.final_time
 
@@ -390,8 +389,9 @@
                     raise MissingParameter(self.parameter_filename, v)
                 setattr(self, a, self.parameters[v])
         else:
+            self.cosmological_simulation = 0
             self.omega_lambda = self.omega_matter = \
-                self.hubble_constant = self.cosmological_simulation = 0.0
+                self.hubble_constant = 0.0
 
         # make list of redshift outputs
         self.all_redshift_outputs = []
@@ -405,16 +405,10 @@
             del output['index']
         self.all_redshift_outputs = redshift_outputs
 
-    def _calculate_redshift_dump_times(self):
-        "Calculates time from redshift of redshift outputs."
-
-        if not self.cosmological_simulation: return
-        for output in self.all_redshift_outputs:
-            output['time'] = self.enzo_cosmology.t_from_z(output['redshift'])
-        self.all_redshift_outputs.sort(key=lambda obj:obj['time'])
-
     def _calculate_time_outputs(self):
-        "Calculate time outputs and their redshifts if cosmological."
+        """
+        Calculate time outputs and their redshifts if cosmological.
+        """
 
         self.all_time_outputs = []
         if self.final_time is None or \
@@ -432,7 +426,7 @@
             output = {'index': index, 'filename': filename, 'time': current_time.copy()}
             output['time'] = min(output['time'], self.final_time)
             if self.cosmological_simulation:
-                output['redshift'] = self.enzo_cosmology.z_from_t(current_time)
+                output['redshift'] = self.cosmology.z_from_t(current_time)
 
             self.all_time_outputs.append(output)
             if np.abs(self.final_time - current_time) / self.final_time < 1e-4: break
@@ -440,7 +434,9 @@
             index += 1
 
     def _calculate_cycle_outputs(self):
-        "Calculate cycle outputs."
+        """
+        Calculate cycle outputs.
+        """
 
         mylog.warn('Calculating cycle outputs.  Dataset times will be unavailable.')
 
@@ -460,7 +456,9 @@
             index += 1
 
     def _get_all_outputs(self, find_outputs=False):
-        "Get all potential datasets and combine into a time-sorted list."
+        """
+        Get all potential datasets and combine into a time-sorted list.
+        """
 
         # Create the set of outputs from which further selection will be done.
         if find_outputs:
@@ -468,8 +466,12 @@
 
         elif self.parameters['dtDataDump'] > 0 and \
           self.parameters['CycleSkipDataDump'] > 0:
-            mylog.info("Simulation %s has both dtDataDump and CycleSkipDataDump set.", self.parameter_filename )
-            mylog.info("    Unable to calculate datasets.  Attempting to search in the current directory")
+            mylog.info(
+                "Simulation %s has both dtDataDump and CycleSkipDataDump set.",
+                self.parameter_filename )
+            mylog.info(
+                "    Unable to calculate datasets.  " +
+                "Attempting to search in the current directory")
             self._find_outputs()
 
         else:
@@ -480,7 +482,10 @@
                 self._calculate_time_outputs()
 
             # Calculate times for redshift outputs.
-            self._calculate_redshift_dump_times()
+            if self.cosmological_simulation:
+                for output in self.all_redshift_outputs:
+                    output["time"] = self.cosmology.t_from_z(output["redshift"])
+                self.all_redshift_outputs.sort(key=lambda obj:obj["time"])
 
             self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
             if self.parameters['CycleSkipDataDump'] <= 0:
@@ -496,9 +501,9 @@
 
         # Convert initial/final redshifts to times.
         if self.cosmological_simulation:
-            self.initial_time = self.enzo_cosmology.t_from_z(self.initial_redshift)
+            self.initial_time = self.cosmology.t_from_z(self.initial_redshift)
             self.initial_time.units.registry = self.unit_registry
-            self.final_time = self.enzo_cosmology.t_from_z(self.final_redshift)
+            self.final_time = self.cosmology.t_from_z(self.final_redshift)
             self.final_time.units.registry = self.unit_registry
 
         # If not a cosmology simulation, figure out the stopping criteria.
@@ -516,11 +521,15 @@
                     'StopCycle' in self.parameters):
                 raise NoStoppingCondition(self.parameter_filename)
             if self.final_time is None:
-                mylog.warn('Simulation %s has no stop time set, stopping condition will be based only on cycles.',
-                           self.parameter_filename)
+                mylog.warn(
+                    "Simulation %s has no stop time set, stopping condition " +
+                    "will be based only on cycles.",
+                    self.parameter_filename)
 
     def _set_parameter_defaults(self):
-        "Set some default parameters to avoid problems if they are not in the parameter file."
+        """
+        Set some default parameters to avoid problems if they are not in the parameter file.
+        """
 
         self.parameters['GlobalDir'] = self.directory
         self.parameters['DataDumpName'] = "data"
@@ -570,7 +579,9 @@
                 self.final_redshift = self.all_outputs[-1]['redshift']
 
     def _check_for_outputs(self, potential_outputs):
-        r"""Check a list of files to see if they are valid datasets."""
+        """
+        Check a list of files to see if they are valid datasets.
+        """
 
         only_on_root(mylog.info, "Checking %d potential outputs.", 
                      len(potential_outputs))
@@ -603,112 +614,10 @@
 
         return my_outputs
 
-    def _get_outputs_by_key(self, key, values, tolerance=None, outputs=None):
-        r"""Get datasets at or near to given values.
-
-        Parameters
-        ----------
-        key: str
-            The key by which to retrieve outputs, usually 'time' or
-            'redshift'.
-        values: array_like
-            A list of values, given as floats.
-        tolerance : float
-            If not None, do not return a dataset unless the value is
-            within the tolerance value.  If None, simply return the
-            nearest dataset.
-            Default: None.
-        outputs : list
-            The list of outputs from which to choose.  If None,
-            self.all_outputs is used.
-            Default: None.
-
-        Examples
-        --------
-        >>> datasets = es.get_outputs_by_key('redshift', [0, 1, 2], tolerance=0.1)
-
-        """
-
-        if not isinstance(values, np.ndarray):
-            values = ensure_list(values)
-        if outputs is None:
-            outputs = self.all_outputs
-        my_outputs = []
-        if not outputs:
-            return my_outputs
-        for value in values:
-            outputs.sort(key=lambda obj:np.abs(value - obj[key]))
-            if (tolerance is None or np.abs(value - outputs[0][key]) <= tolerance) \
-                    and outputs[0] not in my_outputs:
-                my_outputs.append(outputs[0])
-            else:
-                mylog.error("No dataset added for %s = %f.", key, value)
-
-        outputs.sort(key=lambda obj: obj['time'])
-        return my_outputs
-
-    def _get_outputs_by_redshift(self, redshifts, tolerance=None, outputs=None):
-        r"""Get datasets at or near to given redshifts.
-
-        Parameters
-        ----------
-        redshifts: array_like
-            A list of redshifts, given as floats.
-        tolerance : float
-            If not None, do not return a dataset unless the value is
-            within the tolerance value.  If None, simply return the
-            nearest dataset.
-            Default: None.
-        outputs : list
-            The list of outputs from which to choose.  If None,
-            self.all_outputs is used.
-            Default: None.
-
-        Examples
-        --------
-        >>> datasets = es.get_outputs_by_redshift([0, 1, 2], tolerance=0.1)
-
-        """
-
-        return self._get_outputs_by_key('redshift', redshifts, tolerance=tolerance,
-                                     outputs=outputs)
-
-    def _get_outputs_by_time(self, times, tolerance=None, outputs=None):
-        r"""Get datasets at or near to given times.
-
-        Parameters
-        ----------
-        times: tuple of type (float array, str)
-            A list of times for which outputs will be found and the units 
-            of those values.  For example, ([0, 1, 2, 3], "s").
-        tolerance : float
-            If not None, do not return a dataset unless the time is
-            within the tolerance value.  If None, simply return the
-            nearest dataset.
-            Default = None.
-        outputs : list
-            The list of outputs from which to choose.  If None,
-            self.all_outputs is used.
-            Default: None.
-
-        Examples
-        --------
-        >>> datasets = es.get_outputs_by_time([600, 500, 400], tolerance=10.)
-
-        """
-
-        if not isinstance(times, YTArray):
-            if isinstance(times, tuple) and len(times) == 2:
-                times = self.arr(*times)
-            else:
-                times = self.arr(times, "code_time")
-        times = times.in_units("s")
-        return self._get_outputs_by_key('time', times, tolerance=tolerance,
-                                        outputs=outputs)
-
     def _write_cosmology_outputs(self, filename, outputs, start_index,
                                  decimals=3):
-        r"""Write cosmology output parameters for a cosmology splice.
+        """
+        Write cosmology output parameters for a cosmology splice.
         """
 
         mylog.info("Writing redshift output list to %s.", filename)

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -165,7 +165,7 @@
                     units = self._determine_image_units(hdu.header, known_units)
                     try:
                         # Grab field name from btype
-                        fname = hdu.header["btype"].lower()
+                        fname = hdu.header["btype"]
                     except KeyError:
                         # Try to guess the name from the units
                         fname = self._guess_name_from_units(units)
@@ -205,18 +205,6 @@
                                   "the same dimensions as the primary and will not be " +
                                   "available as a field.")
 
-        # For line fields, we still read the primary field. Not sure how to extend this
-        # For now, we pick off the first field from the field list.
-        line_db = self.dataset.line_database
-        primary_fname = self.field_list[0][1]
-        for k, v in iteritems(line_db):
-            mylog.info("Adding line field: %s at frequency %g GHz" % (k, v))
-            self.field_list.append((self.dataset_type, k))
-            self._ext_map[k] = self._ext_map[primary_fname]
-            self._axis_map[k] = self._axis_map[primary_fname]
-            self._file_map[k] = self._file_map[primary_fname]
-            self.dataset.field_units[k] = self.dataset.field_units[primary_fname]
-
     def _count_grids(self):
         self.num_grids = self.ds.parameters["nprocs"]
 
@@ -242,19 +230,11 @@
                 bbox = np.array([[le,re] for le, re in zip(ds.domain_left_edge,
                                                            ds.domain_right_edge)])
                 dims = np.array(ds.domain_dimensions)
-                # If we are creating a dataset of lines, only decompose along the position axes
-                if len(ds.line_database) > 0:
-                    dims[ds.spec_axis] = 1
                 psize = get_psize(dims, self.num_grids)
                 gle, gre, shapes, slices = decompose_array(dims, psize, bbox)
                 self.grid_left_edge = self.ds.arr(gle, "code_length")
                 self.grid_right_edge = self.ds.arr(gre, "code_length")
                 self.grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
-                # If we are creating a dataset of lines, only decompose along the position axes
-                if len(ds.line_database) > 0:
-                    self.grid_left_edge[:,ds.spec_axis] = ds.domain_left_edge[ds.spec_axis]
-                    self.grid_right_edge[:,ds.spec_axis] = ds.domain_right_edge[ds.spec_axis]
-                    self.grid_dimensions[:,ds.spec_axis] = ds.domain_dimensions[ds.spec_axis]
         else:
             self.grid_left_edge[0,:] = ds.domain_left_edge
             self.grid_right_edge[0,:] = ds.domain_right_edge
@@ -322,8 +302,6 @@
                  nan_mask=None,
                  spectral_factor=1.0,
                  z_axis_decomp=False,
-                 line_database=None,
-                 line_width=None,
                  suppress_astropy_warnings=True,
                  parameters=None,
                  units_override=None):
@@ -336,19 +314,6 @@
         self.z_axis_decomp = z_axis_decomp
         self.spectral_factor = spectral_factor
 
-        if line_width is not None:
-            self.line_width = YTQuantity(line_width[0], line_width[1])
-            self.line_units = line_width[1]
-            mylog.info("For line folding, spectral_factor = 1.0")
-            self.spectral_factor = 1.0
-        else:
-            self.line_width = None
-
-        self.line_database = {}
-        if line_database is not None:
-            for k in line_database:
-                self.line_database[k] = YTQuantity(line_database[k], self.line_units)
-
         if suppress_astropy_warnings:
             warnings.filterwarnings('ignore', module="astropy", append=True)
         auxiliary_files = ensure_list(auxiliary_files)
@@ -361,13 +326,13 @@
             self.nan_mask = {"all":nan_mask}
         elif isinstance(nan_mask, dict):
             self.nan_mask = nan_mask
-        if isinstance(self.filenames[0], _astropy.pyfits.hdu.image._ImageBaseHDU):
-            self._handle = FITSFileHandler(self.filenames[0])
-            fn = "InMemoryFITSImage_%s" % (uuid.uuid4().hex)
+        self._handle = FITSFileHandler(self.filenames[0])
+        if (isinstance(self.filenames[0], _astropy.pyfits.hdu.image._ImageBaseHDU) or
+            isinstance(self.filenames[0], _astropy.pyfits.HDUList)):
+            fn = "InMemoryFITSFile_%s" % uuid.uuid4().hex
         else:
-            self._handle = FITSFileHandler(self.filenames[0])
             fn = self.filenames[0]
-        self._handle._fits_files = [self._handle]
+        self._handle._fits_files.append(self._handle)
         if self.num_files > 1:
             for fits_file in auxiliary_files:
                 if isinstance(fits_file, _astropy.pyfits.hdu.image._ImageBaseHDU):
@@ -540,20 +505,14 @@
 
         # If nprocs is None, do some automatic decomposition of the domain
         if self.specified_parameters["nprocs"] is None:
-            if len(self.line_database) > 0:
-                dims = 2
-            else:
-                dims = self.dimensionality
             if self.z_axis_decomp:
                 nprocs = np.around(self.domain_dimensions[2]/8).astype("int")
             else:
-                nprocs = np.around(np.prod(self.domain_dimensions)/32**dims).astype("int")
+                nprocs = np.around(np.prod(self.domain_dimensions)/32**self.dimensionality).astype("int")
             self.parameters["nprocs"] = max(min(nprocs, 512), 1)
         else:
             self.parameters["nprocs"] = self.specified_parameters["nprocs"]
 
-        self.reversed = False
-
         # Check to see if this data is in some kind of (Lat,Lon,Vel) format
         self.spec_cube = False
         x = 0
@@ -618,41 +577,23 @@
             self._z0 = self.wcs.wcs.crval[self.spec_axis]
             self.spec_unit = str(self.wcs.wcs.cunit[self.spec_axis])
 
-            if self.line_width is not None:
-                if self._dz < 0.0:
-                    self.reversed = True
-                    le = self.dims[self.spec_axis]+0.5
-                else:
-                    le = 0.5
-                self.line_width = self.line_width.in_units(self.spec_unit)
-                self.freq_begin = (le-self._p0)*self._dz + self._z0
-                # We now reset these so that they are consistent
-                # with the new setup
-                self._dz = np.abs(self._dz)
-                self._p0 = 0.0
-                self._z0 = 0.0
-                nz = np.rint(self.line_width.value/self._dz).astype("int")
-                self.line_width = self._dz*nz
-                self.domain_left_edge[self.spec_axis] = -0.5*float(nz)
-                self.domain_right_edge[self.spec_axis] = 0.5*float(nz)
-                self.domain_dimensions[self.spec_axis] = nz
-            else:
-                if self.spectral_factor == "auto":
-                    self.spectral_factor = float(max(self.domain_dimensions[[self.lon_axis,
-                                                                             self.lat_axis]]))
-                    self.spectral_factor /= self.domain_dimensions[self.spec_axis]
-                    mylog.info("Setting the spectral factor to %f" % (self.spectral_factor))
-                Dz = self.domain_right_edge[self.spec_axis]-self.domain_left_edge[self.spec_axis]
-                self.domain_right_edge[self.spec_axis] = self.domain_left_edge[self.spec_axis] + \
-                                                        self.spectral_factor*Dz
-                self._dz /= self.spectral_factor
-                self._p0 = (self._p0-0.5)*self.spectral_factor + 0.5
+            if self.spectral_factor == "auto":
+                self.spectral_factor = float(max(self.domain_dimensions[[self.lon_axis,
+                                                                         self.lat_axis]]))
+                self.spectral_factor /= self.domain_dimensions[self.spec_axis]
+                mylog.info("Setting the spectral factor to %f" % (self.spectral_factor))
+            Dz = self.domain_right_edge[self.spec_axis]-self.domain_left_edge[self.spec_axis]
+            self.domain_right_edge[self.spec_axis] = self.domain_left_edge[self.spec_axis] + \
+                                                     self.spectral_factor*Dz
+            self._dz /= self.spectral_factor
+            self._p0 = (self._p0-0.5)*self.spectral_factor + 0.5
+            
         else:
 
             self.wcs_2d = self.wcs
             self.spec_axis = 2
             self.spec_name = "z"
-            self.spec_unit = "code length"
+            self.spec_unit = "code_length"
 
     def spec2pixel(self, spec_value):
         sv = self.arr(spec_value).in_units(self.spec_unit)

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -24,12 +24,6 @@
         super(IOHandlerFITS, self).__init__(ds)
         self.ds = ds
         self._handle = ds._handle
-        if self.ds.line_width is not None:
-            self.line_db = self.ds.line_database
-            self.dz = self.ds.line_width/self.domain_dimensions[self.ds.spec_axis]
-        else:
-            self.line_db = None
-            self.dz = 1.
 
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
@@ -79,32 +73,15 @@
         dx = self.ds.domain_width/self.ds.domain_dimensions
         for field in fields:
             ftype, fname = field
-            tmp_fname = fname
-            if fname in self.ds.line_database:
-                fname = self.ds.field_list[0][1]
             f = self.ds.index._file_map[fname]
             ds = f[self.ds.index._ext_map[fname]]
             bzero, bscale = self.ds.index._scale_map[fname]
-            fname = tmp_fname
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:
                     start = ((g.LeftEdge-self.ds.domain_left_edge)/dx).to_ndarray().astype("int")
                     end = start + g.ActiveDimensions
-                    if self.line_db is not None and fname in self.line_db:
-                        my_off = self.line_db.get(fname).in_units(self.ds.spec_unit).value
-                        my_off = my_off - 0.5*self.ds.line_width
-                        my_off = int((my_off-self.ds.freq_begin)/self.dz)
-                        my_off = max(my_off, 0)
-                        my_off = min(my_off, self.ds.dims[self.ds.spec_axis]-1)
-                        start[self.ds.spec_axis] += my_off
-                        end[self.ds.spec_axis] += my_off
-                        mylog.debug("Reading from " + str(start) + str(end))
                     slices = [slice(start[i],end[i]) for i in range(3)]
-                    if self.ds.reversed:
-                        new_start = self.ds.dims[self.ds.spec_axis]-1-start[self.ds.spec_axis]
-                        new_end = max(self.ds.dims[self.ds.spec_axis]-1-end[self.ds.spec_axis],0)
-                        slices[self.ds.spec_axis] = slice(new_start,new_end,-1)
                     if self.ds.dimensionality == 2:
                         nx, ny = g.ActiveDimensions[:2]
                         nz = 1
@@ -115,13 +92,6 @@
                         data = ds.data[idx,slices[2],slices[1],slices[0]].transpose()
                     else:
                         data = ds.data[slices[2],slices[1],slices[0]].transpose()
-                    if self.line_db is not None:
-                        nz1 = data.shape[self.ds.spec_axis]
-                        nz2 = g.ActiveDimensions[self.ds.spec_axis]
-                        if nz1 != nz2:
-                            old_data = data.copy()
-                            data = np.zeros(g.ActiveDimensions)
-                            data[:,:,nz2-nz1:] = old_data
                     if fname in self.ds.nan_mask:
                         data[np.isnan(data)] = self.ds.nan_mask[fname]
                     elif "all" in self.ds.nan_mask:

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -17,6 +17,8 @@
 from yt.utilities.on_demand_imports import _astropy
 from yt.funcs import mylog, get_image_suffix
 from yt.visualization._mpl_imports import FigureCanvasAgg
+from yt.units.yt_array import YTQuantity, YTArray
+from yt.utilities.fits_image import FITSImageBuffer
 
 import os
 
@@ -68,6 +70,70 @@
                      validators = [ValidateSpatial()],
                      display_name="Counts (%s-%s keV)" % (emin, emax))
 
+def create_spectral_slabs(filename, slab_centers, slab_width,
+                          **kwargs):
+    r"""
+    Given a dictionary of spectral slab centers and a width in
+    spectral units, extract data from a spectral cube at these slab
+    centers and return a `FITSDataset` instance containing the different 
+    slabs as separate yt fields. Useful for extracting individual 
+    lines from a spectral cube and separating them out as different fields. 
+
+    Requires the SpectralCube (http://spectral-cube.readthedocs.org)
+    library.
+
+    All keyword arguments will be passed on to the `FITSDataset` constructor.
+
+    Parameters
+    ----------
+    filename : string
+        The spectral cube FITS file to extract the data from.
+    slab_centers : dict of (float, string) tuples or YTQuantities
+        The centers of the slabs, where the keys are the names
+        of the new fields and the values are (float, string) tuples or
+        YTQuantities, specifying a value for each center and its unit.
+    slab_width : YTQuantity or (float, string) tuple
+        The width of the slab along the spectral axis.
+
+    Examples
+    --------
+    >>> slab_centers = {'13CN': (218.03117, 'GHz'),
+    ...                 'CH3CH2CHO': (218.284256, 'GHz'),
+    ...                 'CH3NH2': (218.40956, 'GHz')}
+    >>> slab_width = (0.05, "GHz")
+    >>> ds = create_spectral_slabs("intensity_cube.fits", 
+    ...                            slab_centers, slab_width,
+    ...                            nan_mask=0.0)
+    """
+    from spectral_cube import SpectralCube
+    from yt.frontends.fits.api import FITSDataset
+    cube = SpectralCube.read(filename)
+    if not isinstance(slab_width, YTQuantity):
+        slab_width = YTQuantity(slab_width[0], slab_width[1])
+    slab_data = {}
+    field_units = cube.header.get("bunit", "dimensionless")
+    for k, v in slab_centers.items():
+        if not isinstance(v, YTQuantity):
+            slab_center = YTQuantity(v[0], v[1])
+        else:
+            slab_center = v
+        mylog.info("Adding slab field %s at %g %s" %
+                   (k, slab_center.v, slab_center.units))
+        slab_lo = (slab_center-0.5*slab_width).to_astropy()
+        slab_hi = (slab_center+0.5*slab_width).to_astropy()
+        subcube = cube.spectral_slab(slab_lo, slab_hi)
+        slab_data[k] = YTArray(subcube.filled_data[:,:,:], field_units)
+    width = subcube.header["naxis3"]*cube.header["cdelt3"]
+    w = subcube.wcs.copy()
+    w.wcs.crpix[-1] = 0.5
+    w.wcs.crval[-1] = -0.5*width
+    fid = FITSImageBuffer(slab_data, wcs=w)
+    for hdu in fid:
+        hdu.header.pop("RESTFREQ", None)
+        hdu.header.pop("RESTFRQ", None)
+    ds = FITSDataset(fid, **kwargs)
+    return ds
+
 def ds9_region(ds, reg, obj=None, field_parameters=None):
     r"""
     Create a data container from a ds9 region file. Requires the pyregion

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c yt/frontends/gadget/api.py
--- a/yt/frontends/gadget/api.py
+++ b/yt/frontends/gadget/api.py
@@ -7,7 +7,7 @@
 """
 
 #-----------------------------------------------------------------------------
-# Copyright (c) 2014, yt Development Team.
+# Copyright (c) 2014-2015, yt Development Team.
 #
 # Distributed under the terms of the Modified BSD License.
 #
@@ -23,4 +23,7 @@
     IOHandlerGadgetBinary, \
     IOHandlerGadgetHDF5
 
+from .simulation_handling import \
+    GadgetSimulation
+
 from . import tests

diff -r b32f056c437769fee1c137131fa201105a20bfea -r a2f92f40d3de56f43ade93bd821b48fee9a2340c yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -18,6 +18,7 @@
 import h5py
 import numpy as np
 import stat
+import struct
 import os
 import types
 
@@ -242,10 +243,59 @@
         self.mass_unit = self.quan(mass_unit[0], mass_unit[1])
         self.time_unit = self.length_unit / self.velocity_unit
 
+    @staticmethod
+    def _validate_header(filename):
+        '''
+        This method automatically detects whether the Gadget file is big/little endian 
+        and is not corrupt/invalid using the first 4 bytes in the file.  It returns a 
+        tuple of (Valid, endianswap) where Valid is a boolean that is true if the file 
+        is a Gadget binary file, and endianswap is the endianness character '>' or '<'. 
+        '''
+        try:
+            f = open(filename,'rb')
+        except IOError:
+            try:
+                f = open(filename+".0")
+            except IOError:
+                return False, 1
+        
+        # First int32 is 256 for a Gadget2 binary file with SnapFormat=1,
+        # 8 for a Gadget2 binary file with SnapFormat=2 file, 
+        # or the byte swapped equivalents (65536 and 134217728).
+        # The int32 following the header (first 4+256 bytes) must equal this
+        # number.
+        (rhead,) = struct.unpack('<I',f.read(4))
+        # Use value to check endianess
+        if rhead == 256:
+            endianswap = '<'
+        elif rhead == 65536:
+            endianswap = '>'
+        # Disabled for now (does any one still use SnapFormat=2?)
+        # If so, alternate read would be needed based on header.
+        # elif rhead == 8:
+        #     return True, '<'
+        # elif rhead == 134217728:
+        #     return True, '>'
+        else:
+            f.close()
+            return False, 1
+        # Read in particle number from header
+        np0 = sum(struct.unpack(endianswap+'IIIIII',f.read(6*4)))
+        # Read in size of position block. It should be 4 bytes per float, 
+        # with 3 coordinates (x,y,z) per particle. (12 bytes per particle)
+        f.seek(4+256+4,0)
+        np1 = struct.unpack(endianswap+'I',f.read(4))[0]/(4*3)
+        f.close()
+        # Compare
+        if np0 == np1:
+            return True, endianswap
+        else:
+            return False, 1
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        # We do not allow load() of these files.
-        return False
+        # First 4 bytes used to check load
+        return GadgetDataset._validate_header(args[0])[0]
 
 class GadgetHDF5Dataset(GadgetDataset):
     _file_class = ParticleFile

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/ac0abcc06c42/
Changeset:   ac0abcc06c42
Branch:      yt
User:        MatthewTurk
Date:        2015-06-01 19:26:42+00:00
Summary:     Merged in jzuhone/yt-3.x (pull request #1593)

Export FixedResolutionBuffers as datasets
Affected #:  6 files

diff -r 239abe7c09fe6680691383eddb189965fe6522fe -r ac0abcc06c420713e8e4dba44178a0c5e3854267 doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -47,10 +47,30 @@
    frb = FixedResolutionBuffer(sl, (0.3, 0.5, 0.6, 0.8), (512, 512))
    my_image = frb["density"]
 
-This resultant array can be saved out to disk or visualized using a
-hand-constructed Matplotlib image, for instance using
+This image may then be used in a hand-constructed Matplotlib image, for instance using
 :func:`~matplotlib.pyplot.imshow`.
 
+The buffer arrays can be saved out to disk in either HDF5 or FITS format:
+ 
+.. code-block:: python
+
+   frb.export_hdf5("my_images.h5", fields=["density","temperature"])
+   frb.export_fits("my_images.fits", fields=["density","temperature"],
+                   clobber=True, units="kpc")
+
+In the FITS case, there is an option for setting the ``units`` of the coordinate system in
+the file. If you want to overwrite a file with the same name, set ``clobber=True``. 
+
+The :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer` can even be exported
+as a 2D dataset itself, which may be operated on in the same way as any other dataset in yt:
+
+.. code-block:: python
+
+   ds_frb = frb.export_dataset(fields=["density","temperature"], nprocs=8)
+   sp = ds_frb.sphere("c", (100.,"kpc"))
+
+where the ``nprocs`` parameter can be used to decompose the image into ``nprocs`` number of grids.
+
 .. _generating-profiles-and-histograms:
 
 Profiles and Histograms

diff -r 239abe7c09fe6680691383eddb189965fe6522fe -r ac0abcc06c420713e8e4dba44178a0c5e3854267 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -709,7 +709,7 @@
             pdata = pdata_ftype
         # This will update the stream handler too
         assign_particle_data(sds, pdata)
-    
+
     return sds
 
 def load_amr_grids(grid_data, domain_dimensions,

diff -r 239abe7c09fe6680691383eddb189965fe6522fe -r ac0abcc06c420713e8e4dba44178a0c5e3854267 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -20,6 +20,7 @@
 from yt.utilities.lib.pixelization_routines import \
     pixelize_cylinder
 from yt.utilities.lib.api import add_points_to_greyscale_image
+from yt.frontends.stream.api import load_uniform_grid
 
 from . import _MPL
 import numpy as np
@@ -73,13 +74,13 @@
     To make a projection and then several images, you can generate a
     single FRB and then access multiple fields:
 
-    >>> proj = ds.proj(0, "Density")
+    >>> proj = ds.proj(0, "density")
     >>> frb1 = FixedResolutionBuffer(proj, (0.2, 0.3, 0.4, 0.5),
-                    (1024, 1024))
-    >>> print frb1["Density"].max()
-    1.0914e-9
-    >>> print frb1["Temperature"].max()
-    104923.1
+    ...                              (1024, 1024))
+    >>> print frb1["density"].max()
+    1.0914e-9 g/cm**3
+    >>> print frb1["temperature"].max()
+    104923.1 K
     """
     _exclude_fields = ('pz','pdz','dx','x','y','z',
         'r', 'dr', 'phi', 'dphi', 'theta', 'dtheta',
@@ -289,7 +290,7 @@
             These fields will be pixelized and output.
         """
         import h5py
-        if fields is None: fields = self.data.keys()
+        if fields is None: fields = list(self.data.keys())
         output = h5py.File(filename, "a")
         for field in fields:
             output.create_dataset(field,data=self[field])
@@ -307,30 +308,68 @@
         filename : string
             The name of the FITS file to be written.
         fields : list of strings
-            These fields will be pixelized and output.
+            These fields will be pixelized and output. If "None", the keys of the
+            FRB will be used. 
         clobber : boolean
             If the file exists, this governs whether we will overwrite.
         other_keys : dictionary, optional
             A set of header keys and values to write into the FITS header.
         units : string, optional
-            the length units that the coordinates are written in, default 'cm'
-            If units are set to "deg" then assume that sky coordinates are
-            requested.
+            the length units that the coordinates are written in, default 'cm'.
         """
 
         from yt.utilities.fits_image import FITSImageBuffer
 
-        extra_fields = ['x','y','z','px','py','pz','pdx','pdy','pdz','weight_field']
-        if fields is None: 
-            fields = [field[-1] for field in self.data_source.field_data
-                      if field not in extra_fields]
+        if fields is None: fields = list(self.data.keys())
 
         fib = FITSImageBuffer(self, fields=fields, units=units)
         if other_keys is not None:
             for k,v in other_keys.items():
                 fib.update_all_headers(k,v)
         fib.writeto(filename, clobber=clobber)
-        
+
+    def export_dataset(self, fields=None, nprocs=1):
+        r"""Export a set of pixelized fields to an in-memory dataset that can be
+        analyzed as any other in yt. Unit information and other parameters (e.g., 
+        geometry, current_time, etc.) will be taken from the parent dataset. 
+
+        Parameters
+        ----------
+        fields : list of strings, optional
+            These fields will be pixelized and output. If "None", the keys of the
+            FRB will be used. 
+        nprocs: integer, optional
+            If greater than 1, will create this number of subarrays out of data
+
+        Examples
+        --------
+        >>> import yt
+        >>> ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
+        >>> slc = ds.slice(2, 0.0)
+        >>> frb = slc.to_frb((500.,"kpc"), 500)
+        >>> ds2 = frb.export_dataset(fields=["density","temperature"], nprocs=32)
+        """
+        nx, ny = self.buff_size
+        data = {}
+        if fields is None:
+            fields = list(self.keys())
+        for field in fields:
+            arr = self[field]
+            data[field] = (arr.d.T.reshape(nx,ny,1), str(arr.units))
+        bounds = [b.in_units("code_length").v for b in self.bounds]
+        bbox = np.array([[bounds[0],bounds[1]],[bounds[2],bounds[3]],[0.,1.]])
+        return load_uniform_grid(data, [nx,ny,1],
+                                 length_unit=self.ds.length_unit,
+                                 bbox=bbox,
+                                 sim_time=self.ds.current_time.in_units("s").v,
+                                 mass_unit=self.ds.mass_unit,
+                                 time_unit=self.ds.time_unit,
+                                 velocity_unit=self.ds.velocity_unit,
+                                 magnetic_unit=self.ds.magnetic_unit,
+                                 periodicity=(False,False,False),
+                                 geometry=self.ds.geometry,
+                                 nprocs=nprocs)
+
     @property
     def limits(self):
         rv = dict(x = None, y = None, z = None)

diff -r 239abe7c09fe6680691383eddb189965fe6522fe -r ac0abcc06c420713e8e4dba44178a0c5e3854267 yt/visualization/tests/test_export_frb.py
--- /dev/null
+++ b/yt/visualization/tests/test_export_frb.py
@@ -0,0 +1,39 @@
+"""
+Tests for exporting an FRB as a dataset
+
+
+
+"""
+from __future__ import absolute_import
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+import numpy as np
+from yt.testing import \
+    fake_random_ds, assert_equal, \
+    assert_allclose
+
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+
+def test_export_frb():
+    test_ds = fake_random_ds(128)
+    slc = test_ds.slice(0,0.5)
+    frb = slc.to_frb((0.5,"unitary"), 64)
+    frb_ds = frb.export_dataset(fields=["density"], nprocs=8)
+    dd_frb = frb_ds.all_data()
+
+    yield assert_equal, frb_ds.domain_left_edge.v, np.array([0.25,0.25,0.0])
+    yield assert_equal, frb_ds.domain_right_edge.v, np.array([0.75,0.75,1.0])
+    yield assert_equal, frb_ds.domain_width.v, np.array([0.5,0.5,1.0])
+    yield assert_equal, frb_ds.domain_dimensions, np.array([64,64,1], dtype="int64")
+    yield assert_allclose, frb["density"].sum(), dd_frb.quantities.total_quantity("density")
+    yield assert_equal, frb_ds.index.num_grids, 8

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list