[yt-svn] commit/yt: 18 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Dec 4 12:52:30 PST 2013


18 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/2bb81ae17fea/
Changeset:   2bb81ae17fea
Branch:      yt-3.0
User:        jzuhone
Date:        2013-10-02 18:37:32
Summary:     FITSImageBuffer
Affected #:  1 file

diff -r 59d6af464edc0cb7d6322e930999bf715e54766f -r 2bb81ae17feae81e328ea21b00372be30b8058c7 yt/utilities/fits_image.py
--- /dev/null
+++ b/yt/utilities/fits_image.py
@@ -0,0 +1,191 @@
+"""
+FITSImageBuffer Class
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.funcs import mylog, iterable
+from yt.visualization.fixed_resolution import FixedResolutionBuffer
+try:
+    from astropy.io.fits import HDUList, ImageHDU
+    from astropy import wcs as pywcs
+except ImportError:
+    ImportError("You need to have AstroPy installed.")
+
+class FITSImageBuffer(HDUList):
+
+    def __init__(self, data, fields=None, center=None, proj_type=["x","y"],
+                 units="cm", wcs=None, D_A=None):
+        r""" Initialize a FITSImageBuffer object.
+
+        FITSImageBuffer contains a list of FITS ImageHDU instances, and optionally includes
+        WCS information. It inherits from HDUList, so operations such as `writeto` are
+        enabled. Images can be constructed from ImageArrays, NumPy arrays, dicts of such
+        arrays, or FixedResolutionBuffers. The latter is the most powerful because WCS
+        coordinate information can be constructed from the buffer's coordinates. 
+
+        Parameters
+        ----------
+        data : FixedResolutionBuffer, or ImageArray, numpy.ndarray, or dict of such arrays
+            The data to be made into a FITS image or images.
+        fields : single string or list of strings, optional
+            The field names for the data. If *fields* is none and *data* has keys,
+            it will use these for the fields. If *data* is just a single array one field name
+            must be specified.
+        center : array_like, optional
+            The coordinates [xctr,yctr] of the images in units *units*. If *wcs* is set
+            this is ignored.
+        proj_type : list of strings, optional
+            The tangent projection type of the image, used for constructing WCS
+            information. If *wcs* is set this is ignored.
+        units : string, optional
+            The units of the WCS coordinates. If *wcs* is set this is ignored.
+        wcs : astropy.wcs object, optional
+            A WCS object. If set, it overrides the other coordinate-related keyword arguments.
+        D_A : float or (value, unit) pair, optional
+            Angular diameter distance. Only used when constructing WCS information
+            for a FixedResolutionBuffer and *units* == "deg". 
+
+        Examples
+        --------
+
+        >>> ds = load("sloshing_nomag2_hdf5_plt_cnt_0150")
+        >>> prj = ds.h.proj(2, "TempkeV", weight_field="Density")
+        >>> frb = prj.to_frb((0.5, "mpc"), 800)
+        >>> f_kpc = FITSImageBuffer(frb, fields="TempkeV", units="kpc")
+        >>> D_A = 300.0 # in Mpc
+        >>> f_deg = FITSImageBuffer(frb, fields="TempkeV", units="deg",
+                                    D_A=(D_A, "mpc"), center=(30., 45.))
+        >>> f_deg.writeto("temp.fits")
+        """
+        
+        HDUList.__init__(self)
+
+        if isinstance(fields, basestring): fields = [fields]
+        if units == "deg" and proj_type == ["x","y"]:
+            proj_type = ["RA---TAN","DEC--TAN"]
+            
+        exclude_fields = ['x','y','z','px','py','pz',
+                          'pdx','pdy','pdz','weight_field']
+
+        if center is not None: center = np.array(center)
+
+        self.axes = [None,None]
+        
+        if hasattr(data, 'keys'):
+            img_data = data
+        else:
+            img_data = {}
+            if fields is None:
+                mylog.error("Please specify a field name for this array.")
+                raise KeyError
+            img_data[fields[0]] = data
+
+        if fields is None: fields = img_data.keys()
+        if len(fields) == 0:
+            mylog.error("Please specify one or more fields to write.")
+            raise KeyError
+        
+        for key in fields:
+            if key not in exclude_fields:
+                mylog.info("Making a FITS image of field %s" % (key))
+                hdu = ImageHDU(np.array(img_data[key]), name=key)
+                self.append(hdu)
+
+        self.nx, self.ny = self[0].data.shape
+        
+        if isinstance(img_data, FixedResolutionBuffer):
+            # FRBs are a special case where we have coordinate
+            # information, so we take advantage of this and
+            # construct the WCS object
+            dx = (img_data.bounds[1]-img_data.bounds[0])/self.nx
+            dy = (img_data.bounds[3]-img_data.bounds[2])/self.ny
+            if units == "deg":
+                if center == None:
+                    mylog.error("Please specify center=(RA, Dec)")
+                    raise ValueError
+                if D_A == None:
+                    mylog.error("Please specify D_A.")
+                    raise ValueError
+                if iterable(D_A):
+                    D_A = D_A[0]/img_data.pf.units[D_A[1]]
+                dx /= -D_A
+                dy /= D_A
+                xctr = center[0]
+                yctr = center[1]
+            else:
+                dx *= img_data.pf.units[units]
+                dy *= img_data.pf.units[units]
+                xctr = 0.5*(img_data.bounds[1]+img_data.bounds[0])
+                yctr = 0.5*(img_data.bounds[3]+img_data.bounds[2])
+                xctr *= img_data.pf.units[units]
+                yctr *= img_data.pf.units[units]
+            w = pywcs.WCS(header=self[0].header, naxis=2)
+            w.wcs.crpix = [0.5*(self.nx+1), 0.5*(self.ny+1)]
+            w.wcs.cdelt = [dx,dy]
+            w.wcs.crval = [xctr,yctr]
+            w.wcs.ctype = proj_type
+            w.wcs.cunit = [units]*2
+        else:
+            w = wcs
+
+        if w is not None:
+            self.set_wcs(w)
+            
+    def set_wcs(self, wcs):
+        """
+        Set the WCS coordinate information for all images
+        with a WCS object *wcs*.
+        """
+        self.wcs = wcs
+        xpix, ypix = np.mgrid[1:self.nx:(self.nx-1)*1j,
+                              1:self.ny:(self.ny-1)*1j]
+        xworld, yworld = self.wcs.wcs_pix2world(xpix, ypix, 1,
+                                                ra_dec_order=True)
+        self.xworld = xworld.T
+        self.yworld = yworld.T
+        self.axes = self.wcs.wcs.ctype
+        h = self.wcs.to_header()
+        for img in self:
+            for k, v in h.items():
+                img.header.update(k,v)
+
+    def update_all_headers(self, key, value):
+        """
+        Update the FITS headers for all images with the
+        same *key*, *value* pair.
+        """
+        for img in self: img.header.update(key,value)
+            
+    def keys(self):
+        return [f.name for f in self]
+
+    def has_key(self, key):
+        return key in self.keys()
+
+    def values(self):
+        return [self[k] for k in self.keys()]
+
+    def items(self):
+        return [(k, self[k]) for k in self.keys()]
+
+    def __getitem__(self, key):
+        if key.lower() == self.axes[0].lower():
+            return self.xworld
+        elif key.lower() == self.axes[1].lower():
+            return self.yworld
+        else:
+            return super(FITSImageBuffer, self).__getitem__(key)
+        
+    @property
+    def shape(self):
+        return self.nx, self.ny
+
+    


https://bitbucket.org/yt_analysis/yt/commits/0a35487ee162/
Changeset:   0a35487ee162
Branch:      yt-3.0
User:        jzuhone
Date:        2013-10-05 20:38:18
Summary:     Merged yt_analysis/yt-3.0 into yt-3.0
Affected #:  14 files

diff -r 2bb81ae17feae81e328ea21b00372be30b8058c7 -r 0a35487ee16220dc553865ab8dd9f5454d9e66f0 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -637,7 +637,7 @@
         """
 
         sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
-        if len(sphere._grids) == 0: return None
+        #if len(sphere._grids) == 0: return None
         new_sphere = False
 
         if self.recenter:
@@ -819,8 +819,8 @@
                 for hp in self.projection_fields:
                     projections.append(self.pf.h.proj(hp['field'], w,
                                                       weight_field=hp['weight_field'],
-                                                      source=region, center=halo['center'],
-                                                      serialize=False))
+                                                      data_source=region,
+                                                      center=halo['center']))
 
                 # Set x and y limits, shift image if it overlaps domain boundary.
                 if need_per:

diff -r 2bb81ae17feae81e328ea21b00372be30b8058c7 -r 0a35487ee16220dc553865ab8dd9f5454d9e66f0 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -213,7 +213,7 @@
     _key_fields = YTSelectionContainer2D._key_fields + ['weight_field']
     _type_name = "proj"
     _con_args = ('axis', 'weight_field')
-    _container_fields = ('px', 'py', 'pdx', 'pdy')
+    _container_fields = ('px', 'py', 'pdx', 'pdy', 'weight_field')
     def __init__(self, field, axis, weight_field = None,
                  center = None, pf = None, data_source=None, 
                  style = "integrate", field_parameters = None):

diff -r 2bb81ae17feae81e328ea21b00372be30b8058c7 -r 0a35487ee16220dc553865ab8dd9f5454d9e66f0 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -241,12 +241,40 @@
                 # Note that we're adding *grids*, not individual cells.
                 if level >= min_level:
                     assert(pos.shape[0] == ng)
-                    n = self.oct_handler.add(cpu + 1, level - min_level, pos)
-                    assert(n == ng)
+                    n = self.oct_handler.add(cpu + 1, level - min_level, pos,
+                                count_boundary = 1)
+                    self._error_check(cpu, level, pos, n, ng, (nx, ny, nz))
                     if n > 0: max_level = max(level - min_level, max_level)
         self.max_level = max_level
         self.oct_handler.finalize()
 
+    def _error_check(self, cpu, level, pos, n, ng, nn):
+        # NOTE: We have the second conditional here because internally, it will
+        # not add any octs in that case.
+        if n == ng or cpu + 1 > self.oct_handler.num_domains:
+            return
+        # This is where we now check for issues with creating the new octs, and
+        # we attempt to determine what precisely is going wrong.
+        # These are all print statements.
+        print "We have detected an error with the construction of the Octree."
+        print "  The number of Octs to be added :  %s" % ng
+        print "  The number of Octs added       :  %s" % n
+        print "  Level                          :  %s" % level
+        print "  CPU Number (0-indexed)         :  %s" % cpu
+        for i, ax in enumerate('xyz'):
+            print "  extent [%s]                     :  %s %s" % \
+            (ax, pos[:,i].min(), pos[:,i].max())
+        print "  domain left                    :  %s" % \
+            (self.pf.domain_left_edge,)
+        print "  domain right                   :  %s" % \
+            (self.pf.domain_right_edge,)
+        print "  offset applied                 :  %s %s %s" % \
+            (nn[0], nn[1], nn[2])
+        print "AMR Header:"
+        for key in sorted(self.amr_header):
+            print "   %-30s: %s" % (key, self.amr_header[key])
+        raise RuntimeError
+
     def included(self, selector):
         if getattr(selector, "domain_id", None) is not None:
             return selector.domain_id == self.domain_id

diff -r 2bb81ae17feae81e328ea21b00372be30b8058c7 -r 0a35487ee16220dc553865ab8dd9f5454d9e66f0 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -22,6 +22,7 @@
       load_amr_grids, \
       load_particles, \
       load_hexahedral_mesh, \
+      load_octree, \
       refine_amr
 
 from .fields import \

diff -r 2bb81ae17feae81e328ea21b00372be30b8058c7 -r 0a35487ee16220dc553865ab8dd9f5454d9e66f0 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -21,12 +21,24 @@
 from yt.utilities.io_handler import io_registry
 from yt.funcs import *
 from yt.config import ytcfg
+from yt.data_objects.data_containers import \
+    YTFieldData, \
+    YTDataContainer, \
+    YTSelectionContainer
 from yt.data_objects.grid_patch import \
     AMRGridPatch
+from yt.geometry.geometry_handler import \
+    YTDataChunk
 from yt.geometry.grid_geometry_handler import \
     GridGeometryHandler
+from yt.data_objects.octree_subset import \
+    OctreeSubset
+from yt.geometry.oct_geometry_handler import \
+    OctreeGeometryHandler
 from yt.geometry.particle_geometry_handler import \
     ParticleGeometryHandler
+from yt.geometry.oct_container import \
+    OctreeContainer
 from yt.geometry.unstructured_mesh_handler import \
            UnstructuredGeometryHandler
 from yt.data_objects.static_output import \
@@ -973,3 +985,321 @@
 
     return spf
 
+class StreamOctreeSubset(OctreeSubset):
+    domain_id = 1
+    _domain_offset = 1
+    _num_zones = 2
+
+    def __init__(self, base_region, pf, oct_handler):
+        self.field_data = YTFieldData()
+        self.field_parameters = {}
+        self.pf = pf
+        self.hierarchy = self.pf.hierarchy
+        self.oct_handler = oct_handler
+        self._last_mask = None
+        self._last_selector_id = None
+        self._current_particle_type = 'all'
+        self._current_fluid_type = self.pf.default_fluid_type
+        self.base_region = base_region
+        self.base_selector = base_region.selector
+
+    def fill(self, content, dest, selector, offset):
+        # Here we get a copy of the file, which we skip through and read the
+        # bits we want.
+        oct_handler = self.oct_handler
+        cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
+        levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
+            selector, self.domain_id, cell_count)
+        levels[:] = 0
+        dest.update((field, np.empty(cell_count, dtype="float64"))
+                    for field in content)
+        # Make references ...
+        count = oct_handler.fill_level(0, levels, cell_inds, file_inds, 
+                                       dest, content, offset)
+        return count
+
+class StreamOctreeHandler(OctreeGeometryHandler):
+
+    def __init__(self, pf, data_style = None):
+        self.stream_handler = pf.stream_handler
+        self.data_style = data_style
+        super(StreamOctreeHandler, self).__init__(pf, data_style)
+
+    def _setup_data_io(self):
+        if self.stream_handler.io is not None:
+            self.io = self.stream_handler.io
+        else:
+            self.io = io_registry[self.data_style](self.stream_handler)
+
+    def _initialize_oct_handler(self):
+        header = dict(dims = self.pf.domain_dimensions/2,
+                      left_edge = self.pf.domain_left_edge,
+                      right_edge = self.pf.domain_right_edge,
+                      octree = self.pf.octree_mask)
+        self.oct_handler = OctreeContainer.load_octree(header)
+
+    def _identify_base_chunk(self, dobj):
+        if getattr(dobj, "_chunk_info", None) is None:
+            base_region = getattr(dobj, "base_region", dobj)
+            subset = [StreamOctreeSubset(base_region, self.parameter_file,
+                                         self.oct_handler)]
+            dobj._chunk_info = subset
+        dobj._current_chunk = list(self._chunk_all(dobj))[0]
+
+    def _chunk_all(self, dobj):
+        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        yield YTDataChunk(dobj, "all", oobjs, None)
+
+    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
+        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        # We actually do not really use the data files except as input to the
+        # ParticleOctreeSubset.
+        # This is where we will perform cutting of the Octree and
+        # load-balancing.  That may require a specialized selector object to
+        # cut based on some space-filling curve index.
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            yield YTDataChunk(dobj, "spatial", [g])
+
+    def _chunk_io(self, dobj, cache = True):
+        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for subset in oobjs:
+            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
+
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        super(StreamOctreeHandler, self)._setup_classes(dd)
+
+    def _detect_fields(self):
+        self.field_list = list(set(self.stream_handler.get_fields()))
+
+class StreamOctreeStaticOutput(StreamStaticOutput):
+    _hierarchy_class = StreamOctreeHandler
+    _fieldinfo_fallback = StreamFieldInfo
+    _fieldinfo_known = KnownStreamFields
+    _data_style = "stream_octree"
+
+def load_octree(octree_mask, domain_dimensions, data, sim_unit_to_cm,
+                bbox=None, sim_time=0.0, periodicity=(True, True, True)):
+    r"""Load an octree mask into yt.
+
+    Octrees can be saved out by calling save_octree on an OctreeContainer.
+    This enables them to be loaded back in.
+
+    This will initialize an Octree of data.  Note that fluid fields will not
+    work yet, or possibly ever.
+    
+    Parameters
+    ----------
+    octree_mask : np.ndarray[uint8_t]
+        This is a depth-first refinement mask for an Octree.
+    domain_dimensions : array_like
+        This is the domain dimensions of the grid
+    data : dict
+        A dictionary of 1D arrays.  Note that these must of the size of the
+        number of "False" values in the ``octree_mask``.
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    periodicity : tuple of booleans
+        Determines whether the data will be treated as periodic along
+        each axis
+
+    """
+
+    domain_dimensions = np.array(domain_dimensions)
+    nprocs = 1
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
+    sfh = StreamDictFieldHandler()
+    
+    particle_types = set_particle_types(data)
+    
+    sfh.update({0:data})
+    grid_left_edges = domain_left_edge
+    grid_right_edges = domain_right_edge
+    grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+
+    # I'm not sure we need any of this.
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        -np.ones(nprocs, dtype='int64'),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
+        np.zeros(nprocs).reshape((nprocs,1)),
+        sfh,
+        particle_types=particle_types,
+        periodicity=periodicity
+    )
+
+    handler.name = "OctreeData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamOctreeStaticOutput(handler)
+    spf.octree_mask = octree_mask
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    return spf
+
+def hexahedral_connectivity(xgrid, ygrid, zgrid):
+    nx = len(xgrid)
+    ny = len(ygrid)
+    nz = len(zgrid)
+    coords = np.fromiter(chain.from_iterable(product(xgrid, ygrid, zgrid)),
+                    dtype=np.float64, count = nx*ny*nz*3)
+    coords.shape = (nx*ny*nz, 3)
+    cis = np.fromiter(chain.from_iterable(product([0,1], [0,1], [0,1])),
+                    dtype=np.int64, count = 8*3)
+    cis.shape = (8, 3)
+    cycle = np.fromiter(chain.from_iterable(product(*map(range, (nx-1, ny-1, nz-1)))),
+                    dtype=np.int64, count = (nx-1)*(ny-1)*(nz-1)*3)
+    cycle.shape = ((nx-1)*(ny-1)*(nz-1), 3)
+    off = cis + cycle[:, np.newaxis]
+    connectivity = ((off[:,:,0] * ny) + off[:,:,1]) * nz + off[:,:,2]
+    return coords, connectivity
+
+class StreamHexahedralMesh(SemiStructuredMesh):
+    _connectivity_length = 8
+    _index_offset = 0
+
+class StreamHexahedralHierarchy(UnstructuredGeometryHandler):
+
+    def __init__(self, pf, data_style = None):
+        self.stream_handler = pf.stream_handler
+        super(StreamHexahedralHierarchy, self).__init__(pf, data_style)
+
+    def _initialize_mesh(self):
+        coords = self.stream_handler.fields.pop('coordinates')
+        connec = self.stream_handler.fields.pop('connectivity')
+        self.meshes = [StreamHexahedralMesh(0,
+          self.hierarchy_filename, connec, coords, self)]
+
+    def _setup_data_io(self):
+        if self.stream_handler.io is not None:
+            self.io = self.stream_handler.io
+        else:
+            self.io = io_registry[self.data_style](self.stream_handler)
+
+    def _detect_fields(self):
+        self.field_list = list(set(self.stream_handler.get_fields()))
+
+class StreamHexahedralStaticOutput(StreamStaticOutput):
+    _hierarchy_class = StreamHexahedralHierarchy
+    _fieldinfo_fallback = StreamFieldInfo
+    _fieldinfo_known = KnownStreamFields
+    _data_style = "stream_hexahedral"
+
+def load_hexahedral_mesh(data, connectivity, coordinates,
+                         sim_unit_to_cm, bbox=None,
+                         sim_time=0.0, periodicity=(True, True, True)):
+    r"""Load a hexahedral mesh of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow a semistructured grid of data to be loaded directly into
+    yt and analyzed as would any others.  This comes with several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+        * Particles may be difficult to integrate.
+
+    Particle fields are detected as one-dimensional fields. The number of particles
+    is set by the "number_of_particles" key in data.
+    
+    Parameters
+    ----------
+    data : dict
+        This is a dict of numpy arrays, where the keys are the field names.
+        There must only be one.
+    connectivity : array_like
+        This should be of size (N,8) where N is the number of zones.
+    coordinates : array_like
+        This should be of size (M,3) where M is the number of vertices
+        indicated in the connectivity matrix.
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    periodicity : tuple of booleans
+        Determines whether the data will be treated as periodic along
+        each axis
+
+    """
+
+    domain_dimensions = np.ones(3, "int32") * 2
+    nprocs = 1
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
+    sfh = StreamDictFieldHandler()
+    
+    particle_types = set_particle_types(data)
+    
+    sfh.update({'connectivity': connectivity,
+                'coordinates': coordinates,
+                0: data})
+    grid_left_edges = domain_left_edge
+    grid_right_edges = domain_right_edge
+    grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+
+    # I'm not sure we need any of this.
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        -np.ones(nprocs, dtype='int64'),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
+        np.zeros(nprocs).reshape((nprocs,1)),
+        sfh,
+        particle_types=particle_types,
+        periodicity=periodicity
+    )
+
+    handler.name = "HexahedralMeshData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamHexahedralStaticOutput(handler)
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    return spf
+

diff -r 2bb81ae17feae81e328ea21b00372be30b8058c7 -r 0a35487ee16220dc553865ab8dd9f5454d9e66f0 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -228,3 +228,23 @@
                         ds = self.fields[g.mesh_id][fname]
                     ind += g.select(selector, ds, rv[field], ind) # caches
         return rv
+
+class IOHandlerStreamOctree(BaseIOHandler):
+    _data_style = "stream_octree"
+
+    def __init__(self, stream_handler):
+        self.fields = stream_handler.fields
+        BaseIOHandler.__init__(self)
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        rv = {}
+        ind = 0
+        chunks = list(chunks)
+        assert(len(chunks) == 1)
+        for chunk in chunks:
+            assert(len(chunk.objs) == 1)
+            for subset in chunk.objs:
+                field_vals = self.fields[subset.domain_id -
+                                    subset._domain_offset]
+                subset.fill(field_vals, rv, selector, ind)
+        return rv

diff -r 2bb81ae17feae81e328ea21b00372be30b8058c7 -r 0a35487ee16220dc553865ab8dd9f5454d9e66f0 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -74,7 +74,8 @@
     cdef np.int64_t get_domain_offset(self, int domain_id)
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
-                        OctVisitorData *data)
+                        OctVisitorData *data,
+                        int vc = ?)
     cdef Oct *next_root(self, int domain_id, int ind[3])
     cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
     cdef void setup_data(self, OctVisitorData *data, int domain_id = ?)

diff -r 2bb81ae17feae81e328ea21b00372be30b8058c7 -r 0a35487ee16220dc553865ab8dd9f5454d9e66f0 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -115,6 +115,60 @@
                 for k in range(self.nn[2]):
                     self.root_mesh[i][j][k] = NULL
 
+    @classmethod
+    def load_octree(cls, header):
+        cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
+        ref_mask = header['octree']
+        cdef OctreeContainer obj = cls(header['dims'], header['left_edge'], header['right_edge'])
+        # NOTE: We do not allow domain/file indices to be specified.
+        cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
+        cdef OctVisitorData data
+        obj.setup_data(&data, -1)
+        obj.allocate_domains([ref_mask.shape[0]])
+        cdef int i, j, k, n
+        data.global_index = -1
+        data.level = 0
+        cdef np.float64_t pos[3], dds[3]
+        # This dds is the oct-width
+        for i in range(3):
+            dds[i] = (obj.DRE[i] - obj.DLE[i]) / obj.nn[i]
+        # Pos is the center of the octs
+        cdef OctAllocationContainer *cur = obj.domains[0]
+        cdef Oct *o
+        cdef void *p[4]
+        cdef np.int64_t nfinest = 0
+        p[0] = ref_mask.data
+        p[1] = <void *> cur.my_octs
+        p[2] = <void *> &cur.n_assigned
+        p[3] = <void *> &nfinest
+        data.array = p
+        pos[0] = obj.DLE[0] + dds[0]/2.0
+        for i in range(obj.nn[0]):
+            pos[1] = obj.DLE[1] + dds[1]/2.0
+            for j in range(obj.nn[1]):
+                pos[2] = obj.DLE[2] + dds[2]/2.0
+                for k in range(obj.nn[2]):
+                    if obj.root_mesh[i][j][k] != NULL:
+                        raise RuntimeError
+                    o = &cur.my_octs[cur.n_assigned]
+                    o.domain_ind = o.file_ind = 0
+                    o.domain = 1
+                    obj.root_mesh[i][j][k] = o
+                    cur.n_assigned += 1
+                    data.pos[0] = i
+                    data.pos[1] = j
+                    data.pos[2] = k
+                    selector.recursively_visit_octs(
+                        obj.root_mesh[i][j][k],
+                        pos, dds, 0, oct_visitors.load_octree,
+                        &data, 1)
+                    pos[2] += dds[2]
+                pos[1] += dds[1]
+            pos[0] += dds[0]
+        obj.nocts = cur.n_assigned
+        assert(obj.nocts == ref_mask.size)
+        return obj
+
     cdef void setup_data(self, OctVisitorData *data, int domain_id = -1):
         cdef int i
         data.index = 0
@@ -157,9 +211,11 @@
     @cython.cdivision(True)
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
-                        OctVisitorData *data):
-        cdef int i, j, k, n, vc
-        vc = self.partial_coverage
+                        OctVisitorData *data,
+                        int vc = -1):
+        cdef int i, j, k, n
+        if vc == -1:
+            vc = self.partial_coverage
         data.global_index = -1
         data.level = 0
         cdef np.float64_t pos[3], dds[3]
@@ -346,11 +402,11 @@
     def mask(self, SelectorObject selector, np.int64_t num_cells = -1,
              int domain_id = -1):
         if num_cells == -1:
-            num_cells = selector.count_oct_cells(self, domain_id)
+            num_cells = selector.count_octs(self, domain_id)
         cdef np.ndarray[np.uint8_t, ndim=1] coords
         cdef OctVisitorData data
         self.setup_data(&data, domain_id)
-        coords = np.zeros((num_cells), dtype="uint8")
+        coords = np.zeros((num_cells*8), dtype="uint8")
         data.array = <void *> coords.data
         self.visit_all_octs(selector, oct_visitors.mask_octs, &data)
         return coords.astype("bool")
@@ -431,6 +487,24 @@
             coords[:,i] += self.DLE[i]
         return coords
 
+    def save_octree(self):
+        # Get the header
+        header = dict(dims = (self.nn[0], self.nn[1], self.nn[2]),
+                      left_edge = (self.DLE[0], self.DLE[1], self.DLE[2]),
+                      right_edge = (self.DRE[0], self.DRE[1], self.DRE[2]))
+        if self.partial_coverage == 1:
+            raise NotImplementedError
+        cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
+        # domain_id = -1 here, because we want *every* oct
+        cdef OctVisitorData data
+        self.setup_data(&data, -1)
+        cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
+        ref_mask = np.zeros(self.nocts, dtype="uint8") - 1
+        data.array = <void *> ref_mask.data
+        self.visit_all_octs(selector, oct_visitors.store_octree, &data, 1)
+        header['octree'] = ref_mask
+        return header
+
     def selector_fill(self, SelectorObject selector,
                       np.ndarray source,
                       np.ndarray dest = None,
@@ -499,8 +573,10 @@
     @cython.cdivision(True)
     def add(self, int curdom, int curlevel,
             np.ndarray[np.float64_t, ndim=2] pos,
-            int skip_boundary = 1):
+            int skip_boundary = 1,
+            int count_boundary = 0):
         cdef int level, no, p, i, j, k, ind[3]
+        cdef int nb = 0
         cdef Oct *cur, *next = NULL
         cdef np.float64_t pp[3], cp[3], dds[3]
         no = pos.shape[0] #number of octs
@@ -520,7 +596,9 @@
                 cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
                 if ind[i] < 0 or ind[i] >= self.nn[i]:
                     in_boundary = 1
-            if skip_boundary == in_boundary == 1: continue
+            if skip_boundary == in_boundary == 1:
+                nb += count_boundary
+                continue
             cur = self.next_root(curdom, ind)
             if cur == NULL: raise RuntimeError
             # Now we find the location we want
@@ -542,7 +620,7 @@
             # Now we should be at the right level
             cur.domain = curdom
             cur.file_ind = p
-        return cont.n_assigned - initial
+        return cont.n_assigned - initial + nb
 
     def allocate_domains(self, domain_counts):
         cdef int count, i
@@ -643,19 +721,21 @@
                    np.ndarray[np.uint8_t, ndim=1] levels,
                    np.ndarray[np.uint8_t, ndim=1] cell_inds,
                    np.ndarray[np.int64_t, ndim=1] file_inds,
-                   dest_fields, source_fields):
+                   dest_fields, source_fields,
+                   np.int64_t offset = 0):
         cdef np.ndarray[np.float64_t, ndim=2] source
         cdef np.ndarray[np.float64_t, ndim=1] dest
         cdef int n
         cdef int i, di
-        cdef int local_pos, local_filled
+        cdef np.int64_t local_pos, local_filled = 0
         cdef np.float64_t val
         for key in dest_fields:
             dest = dest_fields[key]
             source = source_fields[key]
             for i in range(levels.shape[0]):
                 if levels[i] != level: continue
-                dest[i] = source[file_inds[i], cell_inds[i]]
+                dest[i + offset] = source[file_inds[i], cell_inds[i]]
+                local_filled += 1
 
     def finalize(self):
         cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
@@ -698,6 +778,13 @@
             self.DRE[i] = domain_right_edge[i] #num_grid
         self.fill_func = oct_visitors.fill_file_indices_rind
 
+    @classmethod
+    def load_octree(self, header):
+        raise NotImplementedError
+
+    def save_octree(self):
+        raise NotImplementedError
+
     cdef int get_root(self, int ind[3], Oct **o):
         o[0] = NULL
         cdef int i
@@ -734,12 +821,14 @@
     @cython.cdivision(True)
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
-                        OctVisitorData *data):
-        cdef int i, j, k, n, vc
+                        OctVisitorData *data,
+                        int vc = -1):
+        cdef int i, j, k, n
         cdef np.int64_t key, ukey
         data.global_index = -1
         data.level = 0
-        vc = self.partial_coverage
+        if vc == -1:
+            vc = self.partial_coverage
         cdef np.float64_t pos[3], dds[3]
         # This dds is the oct-width
         for i in range(3):

diff -r 2bb81ae17feae81e328ea21b00372be30b8058c7 -r 0a35487ee16220dc553865ab8dd9f5454d9e66f0 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -38,7 +38,6 @@
                    # To calculate nzones, 1 << (oref * 3)
     np.int32_t nz
                             
-
 ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
                                    np.uint8_t selected)
 
@@ -58,6 +57,8 @@
 cdef oct_visitor_function fill_file_indices_oind
 cdef oct_visitor_function fill_file_indices_rind
 cdef oct_visitor_function count_by_domain
+cdef oct_visitor_function store_octree
+cdef oct_visitor_function load_octree
 
 cdef inline int cind(int i, int j, int k):
     # THIS ONLY WORKS FOR CHILDREN.  It is not general for zones.

diff -r 2bb81ae17feae81e328ea21b00372be30b8058c7 -r 0a35487ee16220dc553865ab8dd9f5454d9e66f0 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -18,6 +18,7 @@
 cimport numpy
 import numpy
 from fp_utils cimport *
+from libc.stdlib cimport malloc, free
 
 # Now some visitor functions
 
@@ -173,3 +174,39 @@
     # NOTE: We do this for every *cell*.
     arr = <np.int64_t *> data.array
     arr[o.domain - 1] += 1
+
+cdef void store_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    cdef np.uint8_t *arr
+    if data.last != o.domain_ind:
+        data.last = o.domain_ind
+        arr = <np.uint8_t *> data.array
+        if o.children == NULL:
+            arr[data.index] = 0
+        if o.children != NULL:
+            arr[data.index] = 1
+        data.index += 1
+
+cdef void load_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    cdef void **p = <void **> data.array
+    cdef np.uint8_t *arr = <np.uint8_t *> p[0]
+    cdef Oct* octs = <Oct*> p[1]
+    cdef np.int64_t *nocts = <np.int64_t*> p[2]
+    cdef np.int64_t *nfinest = <np.int64_t*> p[3]
+    cdef int i
+   
+    if data.last != o.domain_ind:
+        data.last = o.domain_ind
+        if arr[data.index] == 0:
+            o.children = NULL
+            o.file_ind = nfinest[0]
+            o.domain = 1
+            nfinest[0] += 1
+        if arr[data.index] == 1:
+            o.children = <Oct **> malloc(sizeof(Oct *) * 8)
+            for i in range(8):
+                o.children[i] = &octs[nocts[0]]
+                o.children[i].domain_ind = nocts[0]
+                o.children[i].file_ind = -1
+                o.children[i].domain = -1
+                nocts[0] += 1
+        data.index += 1

diff -r 2bb81ae17feae81e328ea21b00372be30b8058c7 -r 0a35487ee16220dc553865ab8dd9f5454d9e66f0 yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -1,12 +1,14 @@
 from yt.testing import *
 import numpy as np
+from yt.geometry.oct_container import \
+    OctreeContainer
 from yt.geometry.particle_oct_container import \
     ParticleOctreeContainer, \
     ParticleRegions
 from yt.geometry.oct_container import _ORDER_MAX
 from yt.utilities.lib.geometry_utils import get_morton_indices
 from yt.frontends.stream.api import load_particles
-from yt.geometry.selection_routines import RegionSelector
+from yt.geometry.selection_routines import RegionSelector, AlwaysSelector
 import yt.data_objects.api
 import time, os
 
@@ -42,6 +44,34 @@
         #    level_count += octree.count_levels(total_count.size-1, dom, mask)
         yield assert_equal, total_count, [1, 8, 64, 64, 256, 536, 1856, 1672]
 
+def test_save_load_octree():
+    np.random.seed(int(0x4d3d3d3))
+    pos = np.random.normal(0.5, scale=0.05, size=(NPART,3)) * (DRE-DLE) + DLE
+    octree = ParticleOctreeContainer((1, 1, 1), DLE, DRE)
+    octree.n_ref = 32
+    for i in range(3):
+        np.clip(pos[:,i], DLE[i], DRE[i], pos[:,i])
+    # Convert to integers
+    pos = np.floor((pos - DLE)/dx).astype("uint64")
+    morton = get_morton_indices(pos)
+    morton.sort()
+    octree.add(morton)
+    octree.finalize()
+    saved = octree.save_octree()
+    loaded = OctreeContainer.load_octree(saved)
+    always = AlwaysSelector(None)
+    ir1 = octree.ires(always)
+    ir2 = loaded.ires(always)
+    yield assert_equal, ir1, ir2
+
+    fc1 = octree.fcoords(always)
+    fc2 = loaded.fcoords(always)
+    yield assert_equal, fc1, fc2
+
+    fw1 = octree.fwidth(always)
+    fw2 = loaded.fwidth(always)
+    yield assert_equal, fw1, fw2
+
 def test_particle_octree_counts():
     np.random.seed(int(0x4d3d3d3))
     # Eight times as many!

diff -r 2bb81ae17feae81e328ea21b00372be30b8058c7 -r 0a35487ee16220dc553865ab8dd9f5454d9e66f0 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -114,7 +114,7 @@
 from yt.frontends.stream.api import \
     StreamStaticOutput, StreamFieldInfo, add_stream_field, \
     StreamHandler, load_uniform_grid, load_amr_grids, \
-    load_particles, load_hexahedral_mesh
+    load_particles, load_hexahedral_mesh, load_octree
 
 from yt.frontends.sph.api import \
     OWLSStaticOutput, OWLSFieldInfo, add_owls_field, \

diff -r 2bb81ae17feae81e328ea21b00372be30b8058c7 -r 0a35487ee16220dc553865ab8dd9f5454d9e66f0 yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -416,8 +416,8 @@
             coord = center[axis]
         if obj is None:
             if field_parameters is None: field_parameters = {}
-            obj = self.pf.hierarchy.slice(axis, coord, field,
-                            center=center, **field_parameters)
+            obj = self.pf.hierarchy.slice(axis, coord, center=center,
+                            field_parameters = field_parameters)
         p = self._add_plot(PCSlicePlot(
                          obj, field, use_colorbar=use_colorbar,
                          axes=axes, figure=figure,
@@ -577,8 +577,8 @@
             center = self.c
         if not obj:
             if field_parameters is None: field_parameters = {}
-            cp = self.pf.hierarchy.cutting(normal, center, field,
-                    **field_parameters)
+            cp = self.pf.hierarchy.cutting(normal, center, 
+                    field_parameters = field_parameters)
         else:
             cp = obj
         p = self._add_plot(CuttingPlanePlot(cp, field,
@@ -676,8 +676,8 @@
             center = self.c
         if obj is None:
             obj = self.pf.hierarchy.proj(field, axis, weight_field,
-                                         source = data_source, center=center,
-                                         **field_parameters)
+                                         data_source = data_source, center=center,
+                                         field_parameters = field_parameters)
         p = self._add_plot(PCProjectionPlot(obj, field,
                          use_colorbar=use_colorbar, axes=axes, figure=figure,
                          size=fig_size, periodic=periodic))
@@ -770,8 +770,8 @@
         RE[axis] += thickness/2.0
         region = self.pf.h.region(center, LE, RE)
         obj = self.pf.hierarchy.proj(field, axis, weight_field,
-                                     source = region, center=center,
-                                     **field_parameters)
+                                     data_source = region, center=center,
+                                     field_parameters = field_parameters)
         p = self._add_plot(PCProjectionPlot(obj, field,
                          use_colorbar=use_colorbar, axes=axes, figure=figure,
                          size=fig_size, periodic=periodic))
@@ -1326,8 +1326,8 @@
         axis = fix_axis(axis)
         if field_parameters is None: field_parameters = {}
         if plot_options is None: plot_options = {}
-        data_source = self.pf.h.ortho_ray(axis, coords, field,
-                        **field_parameters)
+        data_source = self.pf.h.ortho_ray(axis, coords, 
+                        field_parameters = field_parameters)
         p = self._add_plot(LineQueryPlot(data_source,
                 [axis_names[axis], field], self._get_new_id(),
                 figure=figure, axes=axes, plot_options=plot_options))
@@ -1386,8 +1386,8 @@
         """
         if field_parameters is None: field_parameters = {}
         if plot_options is None: plot_options = {}
-        data_source = self.pf.h.ray(start_point, end_point, field,
-                                    **field_parameters)
+        data_source = self.pf.h.ray(start_point, end_point, 
+                                    field_parameters = field_parameters)
         p = self._add_plot(LineQueryPlot(data_source,
                 ['t', field], self._get_new_id(),
                 figure=figure, axes=axes, plot_options=plot_options))

diff -r 2bb81ae17feae81e328ea21b00372be30b8058c7 -r 0a35487ee16220dc553865ab8dd9f5454d9e66f0 yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -488,7 +488,7 @@
         if self.datalabel is None:
             field_name = self.axis_names["Z"]
             proj = "Proj" in self._type_name and \
-                   self.data._weight is None
+                   self.data.weight_field is None
             data_label = self.pf.field_info[field_name].get_label(proj)
         else: data_label = self.datalabel
         if self.colorbar != None:
@@ -576,8 +576,8 @@
 
     def _generate_prefix(self, prefix):
         VMPlot._generate_prefix(self, prefix)
-        if self.data._weight is not None:
-            self.prefix += "_%s" % (self.data._weight)
+        if self.data.weight_field is not None:
+            self.prefix += "_%s" % (self.data.weight_field)
 
 class PCProjectionPlotNaturalNeighbor(NNVMPlot, PCProjectionPlot):
     _type_name = "NNProj"


https://bitbucket.org/yt_analysis/yt/commits/2847423511d6/
Changeset:   2847423511d6
Branch:      yt-3.0
User:        jzuhone
Date:        2013-10-05 22:41:51
Summary:     Added an __add__ function. Decided to make the x and y coordinates attributes instead of dict-like items.
Affected #:  1 file

diff -r 2bb81ae17feae81e328ea21b00372be30b8058c7 -r 2847423511d6de46d5eb30dcf8167e25a7ebefe8 yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -176,14 +176,26 @@
     def items(self):
         return [(k, self[k]) for k in self.keys()]
 
-    def __getitem__(self, key):
-        if key.lower() == self.axes[0].lower():
-            return self.xworld
-        elif key.lower() == self.axes[1].lower():
-            return self.yworld
-        else:
-            return super(FITSImageBuffer, self).__getitem__(key)
-        
+    def __add__(self, other):
+        if len(set(self.keys()).intersection(set(other.keys()))) > 0:
+            mylog.error("There are duplicate extension names! Don't know which ones you want to keep!")
+            raise KeyError
+        new_buffer = {}
+        for im1 in self:
+            new_buffer[im1.name] = im1.data
+        for im2 in other:
+            new_buffer[im2.name] = im2.data
+        new_wcs = self.wcs
+        return FITSImageBuffer(new_buffer, wcs=new_wcs)
+    
+    @property
+    def xwcs(self):
+        return self.xworld
+
+    @property
+    def ywcs(self):
+        return self.yworld
+            
     @property
     def shape(self):
         return self.nx, self.ny


https://bitbucket.org/yt_analysis/yt/commits/225cbe52cac8/
Changeset:   225cbe52cac8
Branch:      yt-3.0
User:        jzuhone
Date:        2013-10-05 22:42:16
Summary:     Merging
Affected #:  1 file

diff -r 2847423511d6de46d5eb30dcf8167e25a7ebefe8 -r 225cbe52cac8080961236212be1ef147079ff99d yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -15,6 +15,7 @@
 
 import numpy as np
 import h5py
+from yt.utilities.math_utils import prec_accum
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -73,6 +74,7 @@
         for field in fields:
             ftype, fname = field
             dt = f["/%s" % fname].dtype
+            dt = prec_accum[dt]
             if dt == "float32": dt = "float64"
             rv[field] = np.empty(size, dtype=dt)
         ng = sum(len(c.objs) for c in chunks)


https://bitbucket.org/yt_analysis/yt/commits/bc5e34356211/
Changeset:   bc5e34356211
Branch:      yt-3.0
User:        jzuhone
Date:        2013-10-05 22:42:27
Summary:     Merging
Affected #:  14 files

diff -r 225cbe52cac8080961236212be1ef147079ff99d -r bc5e343562115b7354ed0dcd884e2838a89ffb2d yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -637,7 +637,7 @@
         """
 
         sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
-        if len(sphere._grids) == 0: return None
+        #if len(sphere._grids) == 0: return None
         new_sphere = False
 
         if self.recenter:
@@ -819,8 +819,8 @@
                 for hp in self.projection_fields:
                     projections.append(self.pf.h.proj(hp['field'], w,
                                                       weight_field=hp['weight_field'],
-                                                      source=region, center=halo['center'],
-                                                      serialize=False))
+                                                      data_source=region,
+                                                      center=halo['center']))
 
                 # Set x and y limits, shift image if it overlaps domain boundary.
                 if need_per:

diff -r 225cbe52cac8080961236212be1ef147079ff99d -r bc5e343562115b7354ed0dcd884e2838a89ffb2d yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -213,7 +213,7 @@
     _key_fields = YTSelectionContainer2D._key_fields + ['weight_field']
     _type_name = "proj"
     _con_args = ('axis', 'weight_field')
-    _container_fields = ('px', 'py', 'pdx', 'pdy')
+    _container_fields = ('px', 'py', 'pdx', 'pdy', 'weight_field')
     def __init__(self, field, axis, weight_field = None,
                  center = None, pf = None, data_source=None, 
                  style = "integrate", field_parameters = None):

diff -r 225cbe52cac8080961236212be1ef147079ff99d -r bc5e343562115b7354ed0dcd884e2838a89ffb2d yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -241,12 +241,40 @@
                 # Note that we're adding *grids*, not individual cells.
                 if level >= min_level:
                     assert(pos.shape[0] == ng)
-                    n = self.oct_handler.add(cpu + 1, level - min_level, pos)
-                    assert(n == ng)
+                    n = self.oct_handler.add(cpu + 1, level - min_level, pos,
+                                count_boundary = 1)
+                    self._error_check(cpu, level, pos, n, ng, (nx, ny, nz))
                     if n > 0: max_level = max(level - min_level, max_level)
         self.max_level = max_level
         self.oct_handler.finalize()
 
+    def _error_check(self, cpu, level, pos, n, ng, nn):
+        # NOTE: We have the second conditional here because internally, it will
+        # not add any octs in that case.
+        if n == ng or cpu + 1 > self.oct_handler.num_domains:
+            return
+        # This is where we now check for issues with creating the new octs, and
+        # we attempt to determine what precisely is going wrong.
+        # These are all print statements.
+        print "We have detected an error with the construction of the Octree."
+        print "  The number of Octs to be added :  %s" % ng
+        print "  The number of Octs added       :  %s" % n
+        print "  Level                          :  %s" % level
+        print "  CPU Number (0-indexed)         :  %s" % cpu
+        for i, ax in enumerate('xyz'):
+            print "  extent [%s]                     :  %s %s" % \
+            (ax, pos[:,i].min(), pos[:,i].max())
+        print "  domain left                    :  %s" % \
+            (self.pf.domain_left_edge,)
+        print "  domain right                   :  %s" % \
+            (self.pf.domain_right_edge,)
+        print "  offset applied                 :  %s %s %s" % \
+            (nn[0], nn[1], nn[2])
+        print "AMR Header:"
+        for key in sorted(self.amr_header):
+            print "   %-30s: %s" % (key, self.amr_header[key])
+        raise RuntimeError
+
     def included(self, selector):
         if getattr(selector, "domain_id", None) is not None:
             return selector.domain_id == self.domain_id

diff -r 225cbe52cac8080961236212be1ef147079ff99d -r bc5e343562115b7354ed0dcd884e2838a89ffb2d yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -22,6 +22,7 @@
       load_amr_grids, \
       load_particles, \
       load_hexahedral_mesh, \
+      load_octree, \
       refine_amr
 
 from .fields import \

diff -r 225cbe52cac8080961236212be1ef147079ff99d -r bc5e343562115b7354ed0dcd884e2838a89ffb2d yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -21,12 +21,24 @@
 from yt.utilities.io_handler import io_registry
 from yt.funcs import *
 from yt.config import ytcfg
+from yt.data_objects.data_containers import \
+    YTFieldData, \
+    YTDataContainer, \
+    YTSelectionContainer
 from yt.data_objects.grid_patch import \
     AMRGridPatch
+from yt.geometry.geometry_handler import \
+    YTDataChunk
 from yt.geometry.grid_geometry_handler import \
     GridGeometryHandler
+from yt.data_objects.octree_subset import \
+    OctreeSubset
+from yt.geometry.oct_geometry_handler import \
+    OctreeGeometryHandler
 from yt.geometry.particle_geometry_handler import \
     ParticleGeometryHandler
+from yt.geometry.oct_container import \
+    OctreeContainer
 from yt.geometry.unstructured_mesh_handler import \
            UnstructuredGeometryHandler
 from yt.data_objects.static_output import \
@@ -973,3 +985,321 @@
 
     return spf
 
+class StreamOctreeSubset(OctreeSubset):
+    domain_id = 1
+    _domain_offset = 1
+    _num_zones = 2
+
+    def __init__(self, base_region, pf, oct_handler):
+        self.field_data = YTFieldData()
+        self.field_parameters = {}
+        self.pf = pf
+        self.hierarchy = self.pf.hierarchy
+        self.oct_handler = oct_handler
+        self._last_mask = None
+        self._last_selector_id = None
+        self._current_particle_type = 'all'
+        self._current_fluid_type = self.pf.default_fluid_type
+        self.base_region = base_region
+        self.base_selector = base_region.selector
+
+    def fill(self, content, dest, selector, offset):
+        # Here we get a copy of the file, which we skip through and read the
+        # bits we want.
+        oct_handler = self.oct_handler
+        cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
+        levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
+            selector, self.domain_id, cell_count)
+        levels[:] = 0
+        dest.update((field, np.empty(cell_count, dtype="float64"))
+                    for field in content)
+        # Make references ...
+        count = oct_handler.fill_level(0, levels, cell_inds, file_inds, 
+                                       dest, content, offset)
+        return count
+
+class StreamOctreeHandler(OctreeGeometryHandler):
+
+    def __init__(self, pf, data_style = None):
+        self.stream_handler = pf.stream_handler
+        self.data_style = data_style
+        super(StreamOctreeHandler, self).__init__(pf, data_style)
+
+    def _setup_data_io(self):
+        if self.stream_handler.io is not None:
+            self.io = self.stream_handler.io
+        else:
+            self.io = io_registry[self.data_style](self.stream_handler)
+
+    def _initialize_oct_handler(self):
+        header = dict(dims = self.pf.domain_dimensions/2,
+                      left_edge = self.pf.domain_left_edge,
+                      right_edge = self.pf.domain_right_edge,
+                      octree = self.pf.octree_mask)
+        self.oct_handler = OctreeContainer.load_octree(header)
+
+    def _identify_base_chunk(self, dobj):
+        if getattr(dobj, "_chunk_info", None) is None:
+            base_region = getattr(dobj, "base_region", dobj)
+            subset = [StreamOctreeSubset(base_region, self.parameter_file,
+                                         self.oct_handler)]
+            dobj._chunk_info = subset
+        dobj._current_chunk = list(self._chunk_all(dobj))[0]
+
+    def _chunk_all(self, dobj):
+        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        yield YTDataChunk(dobj, "all", oobjs, None)
+
+    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
+        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        # We actually do not really use the data files except as input to the
+        # ParticleOctreeSubset.
+        # This is where we will perform cutting of the Octree and
+        # load-balancing.  That may require a specialized selector object to
+        # cut based on some space-filling curve index.
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            yield YTDataChunk(dobj, "spatial", [g])
+
+    def _chunk_io(self, dobj, cache = True):
+        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for subset in oobjs:
+            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
+
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        super(StreamOctreeHandler, self)._setup_classes(dd)
+
+    def _detect_fields(self):
+        self.field_list = list(set(self.stream_handler.get_fields()))
+
+class StreamOctreeStaticOutput(StreamStaticOutput):
+    _hierarchy_class = StreamOctreeHandler
+    _fieldinfo_fallback = StreamFieldInfo
+    _fieldinfo_known = KnownStreamFields
+    _data_style = "stream_octree"
+
+def load_octree(octree_mask, domain_dimensions, data, sim_unit_to_cm,
+                bbox=None, sim_time=0.0, periodicity=(True, True, True)):
+    r"""Load an octree mask into yt.
+
+    Octrees can be saved out by calling save_octree on an OctreeContainer.
+    This enables them to be loaded back in.
+
+    This will initialize an Octree of data.  Note that fluid fields will not
+    work yet, or possibly ever.
+    
+    Parameters
+    ----------
+    octree_mask : np.ndarray[uint8_t]
+        This is a depth-first refinement mask for an Octree.
+    domain_dimensions : array_like
+        This is the domain dimensions of the grid
+    data : dict
+        A dictionary of 1D arrays.  Note that these must of the size of the
+        number of "False" values in the ``octree_mask``.
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    periodicity : tuple of booleans
+        Determines whether the data will be treated as periodic along
+        each axis
+
+    """
+
+    domain_dimensions = np.array(domain_dimensions)
+    nprocs = 1
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
+    sfh = StreamDictFieldHandler()
+    
+    particle_types = set_particle_types(data)
+    
+    sfh.update({0:data})
+    grid_left_edges = domain_left_edge
+    grid_right_edges = domain_right_edge
+    grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+
+    # I'm not sure we need any of this.
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        -np.ones(nprocs, dtype='int64'),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
+        np.zeros(nprocs).reshape((nprocs,1)),
+        sfh,
+        particle_types=particle_types,
+        periodicity=periodicity
+    )
+
+    handler.name = "OctreeData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamOctreeStaticOutput(handler)
+    spf.octree_mask = octree_mask
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    return spf
+
+def hexahedral_connectivity(xgrid, ygrid, zgrid):
+    nx = len(xgrid)
+    ny = len(ygrid)
+    nz = len(zgrid)
+    coords = np.fromiter(chain.from_iterable(product(xgrid, ygrid, zgrid)),
+                    dtype=np.float64, count = nx*ny*nz*3)
+    coords.shape = (nx*ny*nz, 3)
+    cis = np.fromiter(chain.from_iterable(product([0,1], [0,1], [0,1])),
+                    dtype=np.int64, count = 8*3)
+    cis.shape = (8, 3)
+    cycle = np.fromiter(chain.from_iterable(product(*map(range, (nx-1, ny-1, nz-1)))),
+                    dtype=np.int64, count = (nx-1)*(ny-1)*(nz-1)*3)
+    cycle.shape = ((nx-1)*(ny-1)*(nz-1), 3)
+    off = cis + cycle[:, np.newaxis]
+    connectivity = ((off[:,:,0] * ny) + off[:,:,1]) * nz + off[:,:,2]
+    return coords, connectivity
+
+class StreamHexahedralMesh(SemiStructuredMesh):
+    _connectivity_length = 8
+    _index_offset = 0
+
+class StreamHexahedralHierarchy(UnstructuredGeometryHandler):
+
+    def __init__(self, pf, data_style = None):
+        self.stream_handler = pf.stream_handler
+        super(StreamHexahedralHierarchy, self).__init__(pf, data_style)
+
+    def _initialize_mesh(self):
+        coords = self.stream_handler.fields.pop('coordinates')
+        connec = self.stream_handler.fields.pop('connectivity')
+        self.meshes = [StreamHexahedralMesh(0,
+          self.hierarchy_filename, connec, coords, self)]
+
+    def _setup_data_io(self):
+        if self.stream_handler.io is not None:
+            self.io = self.stream_handler.io
+        else:
+            self.io = io_registry[self.data_style](self.stream_handler)
+
+    def _detect_fields(self):
+        self.field_list = list(set(self.stream_handler.get_fields()))
+
+class StreamHexahedralStaticOutput(StreamStaticOutput):
+    _hierarchy_class = StreamHexahedralHierarchy
+    _fieldinfo_fallback = StreamFieldInfo
+    _fieldinfo_known = KnownStreamFields
+    _data_style = "stream_hexahedral"
+
+def load_hexahedral_mesh(data, connectivity, coordinates,
+                         sim_unit_to_cm, bbox=None,
+                         sim_time=0.0, periodicity=(True, True, True)):
+    r"""Load a hexahedral mesh of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow a semistructured grid of data to be loaded directly into
+    yt and analyzed as would any others.  This comes with several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+        * Particles may be difficult to integrate.
+
+    Particle fields are detected as one-dimensional fields. The number of particles
+    is set by the "number_of_particles" key in data.
+    
+    Parameters
+    ----------
+    data : dict
+        This is a dict of numpy arrays, where the keys are the field names.
+        There must only be one.
+    connectivity : array_like
+        This should be of size (N,8) where N is the number of zones.
+    coordinates : array_like
+        This should be of size (M,3) where M is the number of vertices
+        indicated in the connectivity matrix.
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    periodicity : tuple of booleans
+        Determines whether the data will be treated as periodic along
+        each axis
+
+    """
+
+    domain_dimensions = np.ones(3, "int32") * 2
+    nprocs = 1
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
+    sfh = StreamDictFieldHandler()
+    
+    particle_types = set_particle_types(data)
+    
+    sfh.update({'connectivity': connectivity,
+                'coordinates': coordinates,
+                0: data})
+    grid_left_edges = domain_left_edge
+    grid_right_edges = domain_right_edge
+    grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+
+    # I'm not sure we need any of this.
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        -np.ones(nprocs, dtype='int64'),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
+        np.zeros(nprocs).reshape((nprocs,1)),
+        sfh,
+        particle_types=particle_types,
+        periodicity=periodicity
+    )
+
+    handler.name = "HexahedralMeshData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamHexahedralStaticOutput(handler)
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    return spf
+

diff -r 225cbe52cac8080961236212be1ef147079ff99d -r bc5e343562115b7354ed0dcd884e2838a89ffb2d yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -228,3 +228,23 @@
                         ds = self.fields[g.mesh_id][fname]
                     ind += g.select(selector, ds, rv[field], ind) # caches
         return rv
+
+class IOHandlerStreamOctree(BaseIOHandler):
+    _data_style = "stream_octree"
+
+    def __init__(self, stream_handler):
+        self.fields = stream_handler.fields
+        BaseIOHandler.__init__(self)
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        rv = {}
+        ind = 0
+        chunks = list(chunks)
+        assert(len(chunks) == 1)
+        for chunk in chunks:
+            assert(len(chunk.objs) == 1)
+            for subset in chunk.objs:
+                field_vals = self.fields[subset.domain_id -
+                                    subset._domain_offset]
+                subset.fill(field_vals, rv, selector, ind)
+        return rv

diff -r 225cbe52cac8080961236212be1ef147079ff99d -r bc5e343562115b7354ed0dcd884e2838a89ffb2d yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -74,7 +74,8 @@
     cdef np.int64_t get_domain_offset(self, int domain_id)
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
-                        OctVisitorData *data)
+                        OctVisitorData *data,
+                        int vc = ?)
     cdef Oct *next_root(self, int domain_id, int ind[3])
     cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
     cdef void setup_data(self, OctVisitorData *data, int domain_id = ?)

diff -r 225cbe52cac8080961236212be1ef147079ff99d -r bc5e343562115b7354ed0dcd884e2838a89ffb2d yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -115,6 +115,60 @@
                 for k in range(self.nn[2]):
                     self.root_mesh[i][j][k] = NULL
 
+    @classmethod
+    def load_octree(cls, header):
+        cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
+        ref_mask = header['octree']
+        cdef OctreeContainer obj = cls(header['dims'], header['left_edge'], header['right_edge'])
+        # NOTE: We do not allow domain/file indices to be specified.
+        cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
+        cdef OctVisitorData data
+        obj.setup_data(&data, -1)
+        obj.allocate_domains([ref_mask.shape[0]])
+        cdef int i, j, k, n
+        data.global_index = -1
+        data.level = 0
+        cdef np.float64_t pos[3], dds[3]
+        # This dds is the oct-width
+        for i in range(3):
+            dds[i] = (obj.DRE[i] - obj.DLE[i]) / obj.nn[i]
+        # Pos is the center of the octs
+        cdef OctAllocationContainer *cur = obj.domains[0]
+        cdef Oct *o
+        cdef void *p[4]
+        cdef np.int64_t nfinest = 0
+        p[0] = ref_mask.data
+        p[1] = <void *> cur.my_octs
+        p[2] = <void *> &cur.n_assigned
+        p[3] = <void *> &nfinest
+        data.array = p
+        pos[0] = obj.DLE[0] + dds[0]/2.0
+        for i in range(obj.nn[0]):
+            pos[1] = obj.DLE[1] + dds[1]/2.0
+            for j in range(obj.nn[1]):
+                pos[2] = obj.DLE[2] + dds[2]/2.0
+                for k in range(obj.nn[2]):
+                    if obj.root_mesh[i][j][k] != NULL:
+                        raise RuntimeError
+                    o = &cur.my_octs[cur.n_assigned]
+                    o.domain_ind = o.file_ind = 0
+                    o.domain = 1
+                    obj.root_mesh[i][j][k] = o
+                    cur.n_assigned += 1
+                    data.pos[0] = i
+                    data.pos[1] = j
+                    data.pos[2] = k
+                    selector.recursively_visit_octs(
+                        obj.root_mesh[i][j][k],
+                        pos, dds, 0, oct_visitors.load_octree,
+                        &data, 1)
+                    pos[2] += dds[2]
+                pos[1] += dds[1]
+            pos[0] += dds[0]
+        obj.nocts = cur.n_assigned
+        assert(obj.nocts == ref_mask.size)
+        return obj
+
     cdef void setup_data(self, OctVisitorData *data, int domain_id = -1):
         cdef int i
         data.index = 0
@@ -157,9 +211,11 @@
     @cython.cdivision(True)
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
-                        OctVisitorData *data):
-        cdef int i, j, k, n, vc
-        vc = self.partial_coverage
+                        OctVisitorData *data,
+                        int vc = -1):
+        cdef int i, j, k, n
+        if vc == -1:
+            vc = self.partial_coverage
         data.global_index = -1
         data.level = 0
         cdef np.float64_t pos[3], dds[3]
@@ -346,11 +402,11 @@
     def mask(self, SelectorObject selector, np.int64_t num_cells = -1,
              int domain_id = -1):
         if num_cells == -1:
-            num_cells = selector.count_oct_cells(self, domain_id)
+            num_cells = selector.count_octs(self, domain_id)
         cdef np.ndarray[np.uint8_t, ndim=1] coords
         cdef OctVisitorData data
         self.setup_data(&data, domain_id)
-        coords = np.zeros((num_cells), dtype="uint8")
+        coords = np.zeros((num_cells*8), dtype="uint8")
         data.array = <void *> coords.data
         self.visit_all_octs(selector, oct_visitors.mask_octs, &data)
         return coords.astype("bool")
@@ -431,6 +487,24 @@
             coords[:,i] += self.DLE[i]
         return coords
 
+    def save_octree(self):
+        # Get the header
+        header = dict(dims = (self.nn[0], self.nn[1], self.nn[2]),
+                      left_edge = (self.DLE[0], self.DLE[1], self.DLE[2]),
+                      right_edge = (self.DRE[0], self.DRE[1], self.DRE[2]))
+        if self.partial_coverage == 1:
+            raise NotImplementedError
+        cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
+        # domain_id = -1 here, because we want *every* oct
+        cdef OctVisitorData data
+        self.setup_data(&data, -1)
+        cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
+        ref_mask = np.zeros(self.nocts, dtype="uint8") - 1
+        data.array = <void *> ref_mask.data
+        self.visit_all_octs(selector, oct_visitors.store_octree, &data, 1)
+        header['octree'] = ref_mask
+        return header
+
     def selector_fill(self, SelectorObject selector,
                       np.ndarray source,
                       np.ndarray dest = None,
@@ -499,8 +573,10 @@
     @cython.cdivision(True)
     def add(self, int curdom, int curlevel,
             np.ndarray[np.float64_t, ndim=2] pos,
-            int skip_boundary = 1):
+            int skip_boundary = 1,
+            int count_boundary = 0):
         cdef int level, no, p, i, j, k, ind[3]
+        cdef int nb = 0
         cdef Oct *cur, *next = NULL
         cdef np.float64_t pp[3], cp[3], dds[3]
         no = pos.shape[0] #number of octs
@@ -520,7 +596,9 @@
                 cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
                 if ind[i] < 0 or ind[i] >= self.nn[i]:
                     in_boundary = 1
-            if skip_boundary == in_boundary == 1: continue
+            if skip_boundary == in_boundary == 1:
+                nb += count_boundary
+                continue
             cur = self.next_root(curdom, ind)
             if cur == NULL: raise RuntimeError
             # Now we find the location we want
@@ -542,7 +620,7 @@
             # Now we should be at the right level
             cur.domain = curdom
             cur.file_ind = p
-        return cont.n_assigned - initial
+        return cont.n_assigned - initial + nb
 
     def allocate_domains(self, domain_counts):
         cdef int count, i
@@ -643,19 +721,21 @@
                    np.ndarray[np.uint8_t, ndim=1] levels,
                    np.ndarray[np.uint8_t, ndim=1] cell_inds,
                    np.ndarray[np.int64_t, ndim=1] file_inds,
-                   dest_fields, source_fields):
+                   dest_fields, source_fields,
+                   np.int64_t offset = 0):
         cdef np.ndarray[np.float64_t, ndim=2] source
         cdef np.ndarray[np.float64_t, ndim=1] dest
         cdef int n
         cdef int i, di
-        cdef int local_pos, local_filled
+        cdef np.int64_t local_pos, local_filled = 0
         cdef np.float64_t val
         for key in dest_fields:
             dest = dest_fields[key]
             source = source_fields[key]
             for i in range(levels.shape[0]):
                 if levels[i] != level: continue
-                dest[i] = source[file_inds[i], cell_inds[i]]
+                dest[i + offset] = source[file_inds[i], cell_inds[i]]
+                local_filled += 1
 
     def finalize(self):
         cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
@@ -698,6 +778,13 @@
             self.DRE[i] = domain_right_edge[i] #num_grid
         self.fill_func = oct_visitors.fill_file_indices_rind
 
+    @classmethod
+    def load_octree(self, header):
+        raise NotImplementedError
+
+    def save_octree(self):
+        raise NotImplementedError
+
     cdef int get_root(self, int ind[3], Oct **o):
         o[0] = NULL
         cdef int i
@@ -734,12 +821,14 @@
     @cython.cdivision(True)
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
-                        OctVisitorData *data):
-        cdef int i, j, k, n, vc
+                        OctVisitorData *data,
+                        int vc = -1):
+        cdef int i, j, k, n
         cdef np.int64_t key, ukey
         data.global_index = -1
         data.level = 0
-        vc = self.partial_coverage
+        if vc == -1:
+            vc = self.partial_coverage
         cdef np.float64_t pos[3], dds[3]
         # This dds is the oct-width
         for i in range(3):

diff -r 225cbe52cac8080961236212be1ef147079ff99d -r bc5e343562115b7354ed0dcd884e2838a89ffb2d yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -38,7 +38,6 @@
                    # To calculate nzones, 1 << (oref * 3)
     np.int32_t nz
                             
-
 ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
                                    np.uint8_t selected)
 
@@ -58,6 +57,8 @@
 cdef oct_visitor_function fill_file_indices_oind
 cdef oct_visitor_function fill_file_indices_rind
 cdef oct_visitor_function count_by_domain
+cdef oct_visitor_function store_octree
+cdef oct_visitor_function load_octree
 
 cdef inline int cind(int i, int j, int k):
     # THIS ONLY WORKS FOR CHILDREN.  It is not general for zones.

diff -r 225cbe52cac8080961236212be1ef147079ff99d -r bc5e343562115b7354ed0dcd884e2838a89ffb2d yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -18,6 +18,7 @@
 cimport numpy
 import numpy
 from fp_utils cimport *
+from libc.stdlib cimport malloc, free
 
 # Now some visitor functions
 
@@ -173,3 +174,39 @@
     # NOTE: We do this for every *cell*.
     arr = <np.int64_t *> data.array
     arr[o.domain - 1] += 1
+
+cdef void store_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    cdef np.uint8_t *arr
+    if data.last != o.domain_ind:
+        data.last = o.domain_ind
+        arr = <np.uint8_t *> data.array
+        if o.children == NULL:
+            arr[data.index] = 0
+        if o.children != NULL:
+            arr[data.index] = 1
+        data.index += 1
+
+cdef void load_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    cdef void **p = <void **> data.array
+    cdef np.uint8_t *arr = <np.uint8_t *> p[0]
+    cdef Oct* octs = <Oct*> p[1]
+    cdef np.int64_t *nocts = <np.int64_t*> p[2]
+    cdef np.int64_t *nfinest = <np.int64_t*> p[3]
+    cdef int i
+   
+    if data.last != o.domain_ind:
+        data.last = o.domain_ind
+        if arr[data.index] == 0:
+            o.children = NULL
+            o.file_ind = nfinest[0]
+            o.domain = 1
+            nfinest[0] += 1
+        if arr[data.index] == 1:
+            o.children = <Oct **> malloc(sizeof(Oct *) * 8)
+            for i in range(8):
+                o.children[i] = &octs[nocts[0]]
+                o.children[i].domain_ind = nocts[0]
+                o.children[i].file_ind = -1
+                o.children[i].domain = -1
+                nocts[0] += 1
+        data.index += 1

diff -r 225cbe52cac8080961236212be1ef147079ff99d -r bc5e343562115b7354ed0dcd884e2838a89ffb2d yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -1,12 +1,14 @@
 from yt.testing import *
 import numpy as np
+from yt.geometry.oct_container import \
+    OctreeContainer
 from yt.geometry.particle_oct_container import \
     ParticleOctreeContainer, \
     ParticleRegions
 from yt.geometry.oct_container import _ORDER_MAX
 from yt.utilities.lib.geometry_utils import get_morton_indices
 from yt.frontends.stream.api import load_particles
-from yt.geometry.selection_routines import RegionSelector
+from yt.geometry.selection_routines import RegionSelector, AlwaysSelector
 import yt.data_objects.api
 import time, os
 
@@ -42,6 +44,34 @@
         #    level_count += octree.count_levels(total_count.size-1, dom, mask)
         yield assert_equal, total_count, [1, 8, 64, 64, 256, 536, 1856, 1672]
 
+def test_save_load_octree():
+    np.random.seed(int(0x4d3d3d3))
+    pos = np.random.normal(0.5, scale=0.05, size=(NPART,3)) * (DRE-DLE) + DLE
+    octree = ParticleOctreeContainer((1, 1, 1), DLE, DRE)
+    octree.n_ref = 32
+    for i in range(3):
+        np.clip(pos[:,i], DLE[i], DRE[i], pos[:,i])
+    # Convert to integers
+    pos = np.floor((pos - DLE)/dx).astype("uint64")
+    morton = get_morton_indices(pos)
+    morton.sort()
+    octree.add(morton)
+    octree.finalize()
+    saved = octree.save_octree()
+    loaded = OctreeContainer.load_octree(saved)
+    always = AlwaysSelector(None)
+    ir1 = octree.ires(always)
+    ir2 = loaded.ires(always)
+    yield assert_equal, ir1, ir2
+
+    fc1 = octree.fcoords(always)
+    fc2 = loaded.fcoords(always)
+    yield assert_equal, fc1, fc2
+
+    fw1 = octree.fwidth(always)
+    fw2 = loaded.fwidth(always)
+    yield assert_equal, fw1, fw2
+
 def test_particle_octree_counts():
     np.random.seed(int(0x4d3d3d3))
     # Eight times as many!

diff -r 225cbe52cac8080961236212be1ef147079ff99d -r bc5e343562115b7354ed0dcd884e2838a89ffb2d yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -114,7 +114,7 @@
 from yt.frontends.stream.api import \
     StreamStaticOutput, StreamFieldInfo, add_stream_field, \
     StreamHandler, load_uniform_grid, load_amr_grids, \
-    load_particles, load_hexahedral_mesh
+    load_particles, load_hexahedral_mesh, load_octree
 
 from yt.frontends.sph.api import \
     OWLSStaticOutput, OWLSFieldInfo, add_owls_field, \

diff -r 225cbe52cac8080961236212be1ef147079ff99d -r bc5e343562115b7354ed0dcd884e2838a89ffb2d yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -416,8 +416,8 @@
             coord = center[axis]
         if obj is None:
             if field_parameters is None: field_parameters = {}
-            obj = self.pf.hierarchy.slice(axis, coord, field,
-                            center=center, **field_parameters)
+            obj = self.pf.hierarchy.slice(axis, coord, center=center,
+                            field_parameters = field_parameters)
         p = self._add_plot(PCSlicePlot(
                          obj, field, use_colorbar=use_colorbar,
                          axes=axes, figure=figure,
@@ -577,8 +577,8 @@
             center = self.c
         if not obj:
             if field_parameters is None: field_parameters = {}
-            cp = self.pf.hierarchy.cutting(normal, center, field,
-                    **field_parameters)
+            cp = self.pf.hierarchy.cutting(normal, center, 
+                    field_parameters = field_parameters)
         else:
             cp = obj
         p = self._add_plot(CuttingPlanePlot(cp, field,
@@ -676,8 +676,8 @@
             center = self.c
         if obj is None:
             obj = self.pf.hierarchy.proj(field, axis, weight_field,
-                                         source = data_source, center=center,
-                                         **field_parameters)
+                                         data_source = data_source, center=center,
+                                         field_parameters = field_parameters)
         p = self._add_plot(PCProjectionPlot(obj, field,
                          use_colorbar=use_colorbar, axes=axes, figure=figure,
                          size=fig_size, periodic=periodic))
@@ -770,8 +770,8 @@
         RE[axis] += thickness/2.0
         region = self.pf.h.region(center, LE, RE)
         obj = self.pf.hierarchy.proj(field, axis, weight_field,
-                                     source = region, center=center,
-                                     **field_parameters)
+                                     data_source = region, center=center,
+                                     field_parameters = field_parameters)
         p = self._add_plot(PCProjectionPlot(obj, field,
                          use_colorbar=use_colorbar, axes=axes, figure=figure,
                          size=fig_size, periodic=periodic))
@@ -1326,8 +1326,8 @@
         axis = fix_axis(axis)
         if field_parameters is None: field_parameters = {}
         if plot_options is None: plot_options = {}
-        data_source = self.pf.h.ortho_ray(axis, coords, field,
-                        **field_parameters)
+        data_source = self.pf.h.ortho_ray(axis, coords, 
+                        field_parameters = field_parameters)
         p = self._add_plot(LineQueryPlot(data_source,
                 [axis_names[axis], field], self._get_new_id(),
                 figure=figure, axes=axes, plot_options=plot_options))
@@ -1386,8 +1386,8 @@
         """
         if field_parameters is None: field_parameters = {}
         if plot_options is None: plot_options = {}
-        data_source = self.pf.h.ray(start_point, end_point, field,
-                                    **field_parameters)
+        data_source = self.pf.h.ray(start_point, end_point, 
+                                    field_parameters = field_parameters)
         p = self._add_plot(LineQueryPlot(data_source,
                 ['t', field], self._get_new_id(),
                 figure=figure, axes=axes, plot_options=plot_options))

diff -r 225cbe52cac8080961236212be1ef147079ff99d -r bc5e343562115b7354ed0dcd884e2838a89ffb2d yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -488,7 +488,7 @@
         if self.datalabel is None:
             field_name = self.axis_names["Z"]
             proj = "Proj" in self._type_name and \
-                   self.data._weight is None
+                   self.data.weight_field is None
             data_label = self.pf.field_info[field_name].get_label(proj)
         else: data_label = self.datalabel
         if self.colorbar != None:
@@ -576,8 +576,8 @@
 
     def _generate_prefix(self, prefix):
         VMPlot._generate_prefix(self, prefix)
-        if self.data._weight is not None:
-            self.prefix += "_%s" % (self.data._weight)
+        if self.data.weight_field is not None:
+            self.prefix += "_%s" % (self.data.weight_field)
 
 class PCProjectionPlotNaturalNeighbor(NNVMPlot, PCProjectionPlot):
     _type_name = "NNProj"


https://bitbucket.org/yt_analysis/yt/commits/9761f69f2a93/
Changeset:   9761f69f2a93
Branch:      yt-3.0
User:        jzuhone
Date:        2013-10-05 22:45:41
Summary:     Catching a couple other dtype possibilities
Affected #:  1 file

diff -r bc5e343562115b7354ed0dcd884e2838a89ffb2d -r 9761f69f2a935a90401e70b2e9e27da5e803239d yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -49,6 +49,8 @@
     np.dtype('complex'):    np.complex128,
     np.dtype('complex64'):  np.complex128,
     np.dtype('complex128'): np.complex128,
+    np.dtype('>f4'):        np.float64
+    np.dtype('>f8'):        np.float64
 }
 
 def periodic_position(pos, pf):


https://bitbucket.org/yt_analysis/yt/commits/6e85246777ef/
Changeset:   6e85246777ef
Branch:      yt-3.0
User:        jzuhone
Date:        2013-11-24 04:21:30
Summary:     Merged yt_analysis/yt-3.0 into yt-3.0
Affected #:  202 files

diff -r 9761f69f2a935a90401e70b2e9e27da5e803239d -r 6e85246777efe7681b28277745d001ff673424dc .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -15,6 +15,7 @@
 yt/geometry/oct_visitors.c
 yt/geometry/particle_deposit.c
 yt/geometry/particle_oct_container.c
+yt/geometry/particle_smooth.c
 yt/geometry/selection_routines.c
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h
@@ -31,6 +32,7 @@
 yt/utilities/lib/geometry_utils.c
 yt/utilities/lib/Interpolators.c
 yt/utilities/lib/kdtree.c
+yt/utilities/lib/mesh_utilities.c
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/Octree.c
 yt/utilities/lib/png_writer.c

diff -r 9761f69f2a935a90401e70b2e9e27da5e803239d -r 6e85246777efe7681b28277745d001ff673424dc MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
+include distribute_setup.py README* CREDITS COPYING.txt CITATION
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
-recursive-include yt *.pyx *.pxd *.hh *.h README*
-recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE
\ No newline at end of file
+recursive-include yt *.pyx *.pxd *.h README*
+recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE

diff -r 9761f69f2a935a90401e70b2e9e27da5e803239d -r 6e85246777efe7681b28277745d001ff673424dc README
--- a/README
+++ b/README
@@ -1,11 +1,12 @@
-Hi there!  You've just downloaded yt, an analysis tool for astrophysical
-simulation datasets, generated by simulation platforms like Enzo, Orion, FLASH,
-Nyx, MAESTRO, ART and Ramses.  It's written in python and heavily leverages
-both NumPy and Matplotlib for fast arrays and visualization, respectively.
+Hi there!  You've just downloaded yt, an analysis tool for scientific
+datasets, generated on a variety of data platforms.  It's written in 
+python and heavily leverages both NumPy and Matplotlib for fast arrays and 
+visualization, respectively.
 
 Full documentation and a user community can be found at:
 
 http://yt-project.org/
+
 http://yt-project.org/doc/
 
 If you have used Python before, and are comfortable with installing packages,
@@ -16,9 +17,7 @@
 doc/install_script.sh .  You will have to set the destination directory, and
 there are options available, but it should be straightforward.
 
-In case of any problems, please email the yt-users mailing list, and if you're
-interested in helping out, see the developer documentation:
-
-http://yt-project.org/doc/advanced/developing.html
+For more information on installation, what to do if you run into problems, or 
+ways to help development, please visit our website.
 
 Enjoy!

diff -r 9761f69f2a935a90401e70b2e9e27da5e803239d -r 6e85246777efe7681b28277745d001ff673424dc doc/get_yt.sh
--- a/doc/get_yt.sh
+++ b/doc/get_yt.sh
@@ -263,8 +263,6 @@
 YT_DEPS+=('python')
 YT_DEPS+=('distribute')
 YT_DEPS+=('libpng')
-YT_DEPS+=('freetype')
-YT_DEPS+=('hdf5')
 YT_DEPS+=('numpy')
 YT_DEPS+=('pygments')
 YT_DEPS+=('jinja2')
@@ -275,6 +273,7 @@
 YT_DEPS+=('h5py')
 YT_DEPS+=('matplotlib')
 YT_DEPS+=('cython')
+YT_DEPS+=('nose')
 
 # Here is our dependency list for yt
 log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/free
@@ -301,7 +300,6 @@
   export HDF5_DIR=${DEST_DIR}
   log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
   pushd ${YT_DIR}
-  echo $DEST_DIR > hdf5.cfg
   log_cmd python setup.py develop
   popd
   log_cmd cp ${YT_DIR}/doc/activate ${DEST_DIR}/bin/activate 

diff -r 9761f69f2a935a90401e70b2e9e27da5e803239d -r 6e85246777efe7681b28277745d001ff673424dc doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -555,6 +555,11 @@
 echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
 get_ytdata xray_emissivity.h5
 
+# Set paths to what they should be when yt is activated.
+export PATH=${DEST_DIR}/bin:$PATH
+export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
+export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages
+
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
@@ -918,6 +923,8 @@
 do_setup_py $SYMPY
 [ $INST_PYX -eq 1 ] && do_setup_py $PYX
 
+( ${DEST_DIR}/bin/pip install jinja2 2>&1 ) 1>> ${LOG_FILE}
+
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
 then
@@ -941,9 +948,7 @@
 ( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
 
 echo "Installing yt"
-echo $HDF5_DIR > hdf5.cfg
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
-[ $INST_FTYPE -eq 1 ] && echo $FTYPE_DIR > freetype.cfg
 ( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
 touch done
 cd $MY_PWD

diff -r 9761f69f2a935a90401e70b2e9e27da5e803239d -r 6e85246777efe7681b28277745d001ff673424dc yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -86,6 +86,10 @@
     #Empty fit without any lines
     yFit = na.ones(len(fluxData))
 
+    #Force the first and last flux pixel to be 1 to prevent OOB
+    fluxData[0]=1
+    fluxData[-1]=1
+
     #Find all regions where lines/groups of lines are present
     cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
             complexLim=complexLim, minLength=minLength,
@@ -120,9 +124,10 @@
                     z,fitLim,minError*(b[2]-b[1]),speciesDict)
 
             #Check existence of partner lines if applicable
-            newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, 
-                    b, minError*(b[2]-b[1]),
-                    x0, xRes, speciesDict)
+            if len(speciesDict['wavelength']) != 1:
+                newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, 
+                        b, minError*(b[2]-b[1]),
+                        x0, xRes, speciesDict)
 
             #If flagged as a bad fit, species is lyman alpha,
             #   and it may be a saturated line, use special tools
@@ -548,6 +553,10 @@
         #Index of the redshifted wavelength
         indexRedWl = (redWl-x0)/xRes
 
+        #Check to see if even in flux range
+        if indexRedWl > len(y):
+            return False
+
         #Check if surpasses minimum absorption bound
         if y[int(indexRedWl)]>fluxMin:
             return False

diff -r 9761f69f2a935a90401e70b2e9e27da5e803239d -r 6e85246777efe7681b28277745d001ff673424dc yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -103,5 +103,21 @@
     TwoPointFunctions, \
     FcnSet
 
+from .sunyaev_zeldovich.api import SZProjection
+
 from .radmc3d_export.api import \
     RadMC3DWriter
+
+from .particle_trajectories.api import \
+    ParticleTrajectories
+
+from .photon_simulator.api import \
+     PhotonList, \
+     EventList, \
+     SpectralModel, \
+     XSpecThermalModel, \
+     XSpecAbsorbModel, \
+     TableApecModel, \
+     TableAbsorbModel, \
+     PhotonModel, \
+     ThermalPhotonModel

diff -r 9761f69f2a935a90401e70b2e9e27da5e803239d -r 6e85246777efe7681b28277745d001ff673424dc yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -113,7 +113,18 @@
         self._calculate_deltaz_min(deltaz_min=deltaz_min)
 
         cosmology_splice = []
-
+ 
+        if near_redshift == far_redshift:
+            self.simulation.get_time_series(redshifts=[near_redshift])
+            cosmology_splice.append({'time': self.simulation[0].current_time,
+                                     'redshift': self.simulation[0].current_redshift,
+                                     'filename': os.path.join(self.simulation[0].fullpath,
+                                                              self.simulation[0].basename),
+                                     'next': None})
+            mylog.info("create_cosmology_splice: Using %s for z = %f ." %
+                       (cosmology_splice[0]['filename'], near_redshift))
+            return cosmology_splice
+        
         # Use minimum number of datasets to go from z_i to z_f.
         if minimal:
 

diff -r 9761f69f2a935a90401e70b2e9e27da5e803239d -r 6e85246777efe7681b28277745d001ff673424dc yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -28,6 +28,9 @@
     only_on_root, \
     parallel_objects, \
     parallel_root_only
+from yt.utilities.physical_constants import \
+     speed_of_light_cgs, \
+     cm_per_km
 
 class LightRay(CosmologySplice):
     """
@@ -51,7 +54,9 @@
     near_redshift : float
         The near (lowest) redshift for the light ray.
     far_redshift : float
-        The far (highest) redshift for the light ray.
+        The far (highest) redshift for the light ray.  NOTE: in order 
+        to use only a single dataset in a light ray, set the 
+        near_redshift and far_redshift to be the same.
     use_minimum_datasets : bool
         If True, the minimum number of datasets is used to connect the
         initial and final redshift.  If false, the light ray solution
@@ -111,65 +116,92 @@
                                        time_data=time_data,
                                        redshift_data=redshift_data)
 
-    def _calculate_light_ray_solution(self, seed=None, filename=None):
+    def _calculate_light_ray_solution(self, seed=None, 
+                                      start_position=None, end_position=None,
+                                      trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
 
         # Calculate dataset sizes, and get random dataset axes and centers.
         np.random.seed(seed)
 
-        # For box coherence, keep track of effective depth travelled.
-        box_fraction_used = 0.0
+        # If using only one dataset, set start and stop manually.
+        if start_position is not None:
+            if len(self.light_ray_solution) > 1:
+                raise RuntimeError("LightRay Error: cannot specify start_position if light ray uses more than one dataset.")
+            if not ((end_position is None) ^ (trajectory is None)):
+                raise RuntimeError("LightRay Error: must specify either end_position or trajectory, but not both.")
+            self.light_ray_solution[0]['start'] = np.array(start_position)
+            if end_position is not None:
+                self.light_ray_solution[0]['end'] = np.array(end_position)
+            else:
+                # assume trajectory given as r, theta, phi
+                if len(trajectory) != 3:
+                    raise RuntimeError("LightRay Error: trajectory must have lenght 3.")
+                r, theta, phi = trajectory
+                self.light_ray_solution[0]['end'] = self.light_ray_solution[0]['start'] + \
+                  r * np.array([np.cos(phi) * np.sin(theta),
+                                np.sin(phi) * np.sin(theta),
+                                np.cos(theta)])
+            self.light_ray_solution[0]['traversal_box_fraction'] = \
+              vector_length(self.light_ray_solution[0]['start'], 
+                            self.light_ray_solution[0]['end'])
 
-        for q in range(len(self.light_ray_solution)):
-            if (q == len(self.light_ray_solution) - 1):
-                z_next = self.near_redshift
-            else:
-                z_next = self.light_ray_solution[q+1]['redshift']
+        # the normal way (random start positions and trajectories for each dataset)
+        else:
+            
+            # For box coherence, keep track of effective depth travelled.
+            box_fraction_used = 0.0
 
-            # Calculate fraction of box required for a depth of delta z
-            self.light_ray_solution[q]['traversal_box_fraction'] = \
-                self.cosmology.ComovingRadialDistance(\
-                z_next, self.light_ray_solution[q]['redshift']) * \
-                self.simulation.hubble_constant / \
-                self.simulation.box_size
+            for q in range(len(self.light_ray_solution)):
+                if (q == len(self.light_ray_solution) - 1):
+                    z_next = self.near_redshift
+                else:
+                    z_next = self.light_ray_solution[q+1]['redshift']
 
-            # Simple error check to make sure more than 100% of box depth
-            # is never required.
-            if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
-                            (self.light_ray_solution[q]['redshift'], z_next,
-                             self.light_ray_solution[q]['traversal_box_fraction']))
-                mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
-                            (self.light_ray_solution[q]['deltazMax'],
-                             self.light_ray_solution[q]['redshift']-z_next))
+                # Calculate fraction of box required for a depth of delta z
+                self.light_ray_solution[q]['traversal_box_fraction'] = \
+                    self.cosmology.ComovingRadialDistance(\
+                    z_next, self.light_ray_solution[q]['redshift']) * \
+                    self.simulation.hubble_constant / \
+                    self.simulation.box_size
 
-            # Get dataset axis and center.
-            # If using box coherence, only get start point and vector if
-            # enough of the box has been used,
-            # or if box_fraction_used will be greater than 1 after this slice.
-            if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
-                    (box_fraction_used >
-                     self.minimum_coherent_box_fraction) or \
-                    (box_fraction_used +
-                     self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                # Random start point
-                self.light_ray_solution[q]['start'] = np.random.random(3)
-                theta = np.pi * np.random.random()
-                phi = 2 * np.pi * np.random.random()
-                box_fraction_used = 0.0
-            else:
-                # Use end point of previous segment and same theta and phi.
-                self.light_ray_solution[q]['start'] = \
-                  self.light_ray_solution[q-1]['end'][:]
+                # Simple error check to make sure more than 100% of box depth
+                # is never required.
+                if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+                    mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
+                                (self.light_ray_solution[q]['redshift'], z_next,
+                                 self.light_ray_solution[q]['traversal_box_fraction']))
+                    mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
+                                (self.light_ray_solution[q]['deltazMax'],
+                                 self.light_ray_solution[q]['redshift']-z_next))
 
-            self.light_ray_solution[q]['end'] = \
-              self.light_ray_solution[q]['start'] + \
-                self.light_ray_solution[q]['traversal_box_fraction'] * \
-                np.array([np.cos(phi) * np.sin(theta),
-                          np.sin(phi) * np.sin(theta),
-                          np.cos(theta)])
-            box_fraction_used += \
-              self.light_ray_solution[q]['traversal_box_fraction']
+                # Get dataset axis and center.
+                # If using box coherence, only get start point and vector if
+                # enough of the box has been used,
+                # or if box_fraction_used will be greater than 1 after this slice.
+                if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
+                        (box_fraction_used >
+                         self.minimum_coherent_box_fraction) or \
+                        (box_fraction_used +
+                         self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+                    # Random start point
+                    self.light_ray_solution[q]['start'] = np.random.random(3)
+                    theta = np.pi * np.random.random()
+                    phi = 2 * np.pi * np.random.random()
+                    box_fraction_used = 0.0
+                else:
+                    # Use end point of previous segment and same theta and phi.
+                    self.light_ray_solution[q]['start'] = \
+                      self.light_ray_solution[q-1]['end'][:]
+
+                self.light_ray_solution[q]['end'] = \
+                  self.light_ray_solution[q]['start'] + \
+                    self.light_ray_solution[q]['traversal_box_fraction'] * \
+                    np.array([np.cos(phi) * np.sin(theta),
+                              np.sin(phi) * np.sin(theta),
+                              np.cos(theta)])
+                box_fraction_used += \
+                  self.light_ray_solution[q]['traversal_box_fraction']
 
         if filename is not None:
             self._write_light_ray_solution(filename,
@@ -178,7 +210,10 @@
                             'far_redshift':self.far_redshift,
                             'near_redshift':self.near_redshift})
 
-    def make_light_ray(self, seed=None, fields=None,
+    def make_light_ray(self, seed=None,
+                       start_position=None, end_position=None,
+                       trajectory=None,
+                       fields=None,
                        solution_filename=None, data_filename=None,
                        get_los_velocity=False,
                        get_nearest_halo=False,
@@ -197,6 +232,19 @@
         seed : int
             Seed for the random number generator.
             Default: None.
+        start_position : list of floats
+            Used only if creating a light ray from a single dataset.
+            The coordinates of the starting position of the ray.
+            Default: None.
+        end_position : list of floats
+            Used only if creating a light ray from a single dataset.
+            The coordinates of the ending position of the ray.
+            Default: None.
+        trajectory : list of floats
+            Used only if creating a light ray from a single dataset.
+            The (r, theta, phi) direction of the light ray.  Use either 
+        end_position or trajectory, not both.
+            Default: None.
         fields : list
             A list of fields for which to get data.
             Default: None.
@@ -313,7 +361,11 @@
             nearest_halo_fields = []
 
         # Calculate solution.
-        self._calculate_light_ray_solution(seed=seed, filename=solution_filename)
+        self._calculate_light_ray_solution(seed=seed, 
+                                           start_position=start_position, 
+                                           end_position=end_position,
+                                           trajectory=trajectory,
+                                           filename=solution_filename)
 
         # Initialize data structures.
         self._data = {}
@@ -335,9 +387,18 @@
         for my_storage, my_segment in parallel_objects(self.light_ray_solution,
                                                        storage=all_ray_storage,
                                                        njobs=njobs, dynamic=dynamic):
-            mylog.info("Creating ray segment at z = %f." %
-                       my_segment['redshift'])
-            if my_segment['next'] is None:
+
+            # Load dataset for segment.
+            pf = load(my_segment['filename'])
+
+            if self.near_redshift == self.far_redshift:
+                h_vel = cm_per_km * pf.units['mpc'] * \
+                  vector_length(my_segment['start'], my_segment['end']) * \
+                  self.cosmology.HubbleConstantNow * \
+                  self.cosmology.ExpansionFactor(my_segment['redshift'])
+                next_redshift = np.sqrt((1. + h_vel / speed_of_light_cgs) /
+                                         (1. - h_vel / speed_of_light_cgs)) - 1.
+            elif my_segment['next'] is None:
                 next_redshift = self.near_redshift
             else:
                 next_redshift = my_segment['next']['redshift']
@@ -346,9 +407,6 @@
                        (my_segment['redshift'], my_segment['start'],
                         my_segment['end']))
 
-            # Load dataset for segment.
-            pf = load(my_segment['filename'])
-
             # Break periodic ray into non-periodic segments.
             sub_segments = periodic_ray(my_segment['start'], my_segment['end'])
 

diff -r 9761f69f2a935a90401e70b2e9e27da5e803239d -r 6e85246777efe7681b28277745d001ff673424dc yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -233,7 +233,8 @@
             pmass_min, pmass_max = dd.quantities["Extrema"](
                 (ptype, "ParticleMassMsun"), non_zero = True)[0]
             if pmass_min != pmass_max:
-                raise YTRockstarMultiMassNotSupported
+                raise YTRockstarMultiMassNotSupported(pmass_min, pmass_max,
+                    ptype)
             particle_mass = pmass_min
         # NOTE: We want to take our Msun and turn it into Msun/h .  Its value
         # should be such that dividing by little h gives the original value.
@@ -244,6 +245,7 @@
             # Get total_particles in parallel.
             tp = dd.quantities['TotalQuantity']((ptype, "particle_ones"))[0]
             p['total_particles'] = int(tp)
+            mylog.warning("Total Particle Count: %0.3e", int(tp))
         p['left_edge'] = tpf.domain_left_edge
         p['right_edge'] = tpf.domain_right_edge
         p['center'] = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0

diff -r 9761f69f2a935a90401e70b2e9e27da5e803239d -r 6e85246777efe7681b28277745d001ff673424dc yt/analysis_modules/particle_trajectories/api.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/api.py
@@ -0,0 +1,12 @@
+"""
+API for particle_trajectories
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from particle_trajectories import ParticleTrajectories

diff -r 9761f69f2a935a90401e70b2e9e27da5e803239d -r 6e85246777efe7681b28277745d001ff673424dc yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -0,0 +1,329 @@
+"""
+Particle trajectories
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.data_objects.data_containers import YTFieldData
+from yt.data_objects.time_series import TimeSeriesData
+from yt.utilities.lib import CICSample_3
+from yt.funcs import *
+
+import numpy as np
+import h5py
+
+class ParticleTrajectories(object):
+    r"""A collection of particle trajectories in time over a series of
+    parameter files. 
+
+    The ParticleTrajectories object contains a collection of
+    particle trajectories for a specified set of particle indices. 
+    
+    Parameters
+    ----------
+    filenames : list of strings
+        A time-sorted list of filenames to construct the TimeSeriesData
+        object.
+    indices : array_like
+        An integer array of particle indices whose trajectories we
+        want to track. If they are not sorted they will be sorted.
+    fields : list of strings, optional
+        A set of fields that is retrieved when the trajectory
+        collection is instantiated.
+        Default : None (will default to the fields 'particle_position_x',
+        'particle_position_y', 'particle_position_z')
+
+    Examples
+    ________
+    >>> from yt.mods import *
+    >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
+    >>> my_fns.sort()
+    >>> fields = ["particle_position_x", "particle_position_y",
+    >>>           "particle_position_z", "particle_velocity_x",
+    >>>           "particle_velocity_y", "particle_velocity_z"]
+    >>> pf = load(my_fns[0])
+    >>> init_sphere = pf.h.sphere(pf.domain_center, (.5, "unitary"))
+    >>> indices = init_sphere["particle_index"].astype("int")
+    >>> trajs = ParticleTrajectories(my_fns, indices, fields=fields)
+    >>> for t in trajs :
+    >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
+
+    Notes
+    -----
+    As of this time only particle trajectories that are complete over the
+    set of specified parameter files are supported. If any particle's history
+    ends for some reason (e.g. leaving the simulation domain or being actively
+    destroyed), the whole trajectory collection of which it is a set must end
+    at or before the particle's last timestep. This is a limitation we hope to
+    lift at some point in the future.     
+    """
+    def __init__(self, filenames, indices, fields=None) :
+
+        indices.sort() # Just in case the caller wasn't careful
+        
+        self.field_data = YTFieldData()
+        self.pfs = TimeSeriesData.from_filenames(filenames)
+        self.masks = []
+        self.sorts = []
+        self.indices = indices
+        self.num_indices = len(indices)
+        self.num_steps = len(filenames)
+        self.times = []
+
+        # Default fields 
+        
+        if fields is None: fields = []
+
+        # Must ALWAYS have these fields
+        
+        fields = fields + ["particle_position_x",
+                           "particle_position_y",
+                           "particle_position_z"]
+
+        # Set up the derived field list and the particle field list
+        # so that if the requested field is a particle field, we'll
+        # just copy the field over, but if the field is a grid field,
+        # we will first interpolate the field to the particle positions
+        # and then return the field. 
+
+        pf = self.pfs[0]
+        self.derived_field_list = pf.h.derived_field_list
+        self.particle_fields = [field for field in self.derived_field_list
+                                if pf.field_info[field].particle_type]
+
+        """
+        The following loops through the parameter files
+        and performs two tasks. The first is to isolate
+        the particles with the correct indices, and the
+        second is to create a sorted list of these particles.
+        We also make a list of the current time from each file. 
+        Right now, the code assumes (and checks for) the
+        particle indices existing in each dataset, a limitation I
+        would like to lift at some point since some codes
+        (e.g., FLASH) destroy particles leaving the domain.
+        """
+        
+        for pf in self.pfs:
+            dd = pf.h.all_data()
+            newtags = dd["particle_index"].astype("int")
+            if not np.all(np.in1d(indices, newtags, assume_unique=True)):
+                print "Not all requested particle ids contained in this dataset!"
+                raise IndexError
+            mask = np.in1d(newtags, indices, assume_unique=True)
+            sorts = np.argsort(newtags[mask])
+            self.masks.append(mask)            
+            self.sorts.append(sorts)
+            self.times.append(pf.current_time)
+
+        self.times = np.array(self.times)
+
+        # Now instantiate the requested fields 
+        for field in fields:
+            self._get_data(field)
+            
+    def has_key(self, key):
+        return (key in self.field_data)
+    
+    def keys(self):
+        return self.field_data.keys()
+
+    def __getitem__(self, key):
+        """
+        Get the field associated with key,
+        checking to make sure it is a particle field.
+        """
+        if key == "particle_time":
+            return self.times
+        if not self.field_data.has_key(key):
+            self._get_data(key)
+        return self.field_data[key]
+    
+    def __setitem__(self, key, val):
+        """
+        Sets a field to be some other value.
+        """
+        self.field_data[key] = val
+                        
+    def __delitem__(self, key):
+        """
+        Delete the field from the trajectory
+        """
+        del self.field_data[key]
+
+    def __iter__(self):
+        """
+        This iterates over the trajectories for
+        the different particles, returning dicts
+        of fields for each trajectory
+        """
+        for idx in xrange(self.num_indices):
+            traj = {}
+            traj["particle_index"] = self.indices[idx]
+            traj["particle_time"] = self.times
+            for field in self.field_data.keys():
+                traj[field] = self[field][idx,:]
+            yield traj
+            
+    def __len__(self):
+        """
+        The number of individual trajectories
+        """
+        return self.num_indices
+
+    def add_fields(self, fields):
+        """
+        Add a list of fields to an existing trajectory
+
+        Parameters
+        ----------
+        fields : list of strings
+            A list of fields to be added to the current trajectory
+            collection.
+
+        Examples
+        ________
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.add_fields(["particle_mass", "particle_gpot"])
+        """
+        for field in fields:
+            if not self.field_data.has_key(field):
+                self._get_data(field)
+                
+    def _get_data(self, field):
+        """
+        Get a field to include in the trajectory collection.
+        The trajectory collection itself is a dict of 2D numpy arrays,
+        with shape (num_indices, num_steps)
+        """
+        if not self.field_data.has_key(field):
+            particles = np.empty((0))
+            step = int(0)
+            for pf, mask, sort in zip(self.pfs, self.masks, self.sorts):
+                if field in self.particle_fields:
+                    # This is easy... just get the particle fields
+                    dd = pf.h.all_data()
+                    pfield = dd[field][mask]
+                    particles = np.append(particles, pfield[sort])
+                else:
+                    # This is hard... must loop over grids
+                    pfield = np.zeros((self.num_indices))
+                    x = self["particle_position_x"][:,step]
+                    y = self["particle_position_y"][:,step]
+                    z = self["particle_position_z"][:,step]
+                    particle_grids, particle_grid_inds = pf.h.find_points(x,y,z)
+                    for grid in particle_grids:
+                        cube = grid.retrieve_ghost_zones(1, [field])
+                        CICSample_3(x,y,z,pfield,
+                                    self.num_indices,
+                                    cube[field],
+                                    np.array(grid.LeftEdge).astype(np.float64),
+                                    np.array(grid.ActiveDimensions).astype(np.int32),
+                                    np.float64(grid['dx']))
+                    particles = np.append(particles, pfield)
+                step += 1
+            self[field] = particles.reshape(self.num_steps,
+                                            self.num_indices).transpose()
+        return self.field_data[field]
+
+    def trajectory_from_index(self, index):
+        """
+        Retrieve a single trajectory corresponding to a specific particle
+        index
+
+        Parameters
+        ----------
+        index : int
+            This defines which particle trajectory from the
+            ParticleTrajectories object will be returned.
+
+        Returns
+        -------
+        A dictionary corresponding to the particle's trajectory and the
+        fields along that trajectory
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> import matplotlib.pylab as pl
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> traj = trajs.trajectory_from_index(indices[0])
+        >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
+        >>> pl.savefig("orbit")
+        """
+        mask = np.in1d(self.indices, (index,), assume_unique=True)
+        if not np.any(mask):
+            print "The particle index %d is not in the list!" % (index)
+            raise IndexError
+        fields = [field for field in sorted(self.field_data.keys())]
+        traj = {}
+        traj["particle_time"] = self.times
+        traj["particle_index"] = index
+        for field in fields:
+            traj[field] = self[field][mask,:][0]
+        return traj
+
+    def write_out(self, filename_base):
+        """
+        Write out particle trajectories to tab-separated ASCII files (one
+        for each trajectory) with the field names in the file header. Each
+        file is named with a basename and the index number.
+
+        Parameters
+        ----------
+        filename_base : string
+            The prefix for the outputted ASCII files.
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out("orbit_trajectory")       
+        """
+        fields = [field for field in sorted(self.field_data.keys())]
+        num_fields = len(fields)
+        first_str = "# particle_time\t" + "\t".join(fields)+"\n"
+        template_str = "%g\t"*num_fields+"%g\n"
+        for ix in xrange(self.num_indices):
+            outlines = [first_str]
+            for it in xrange(self.num_steps):
+                outlines.append(template_str %
+                                tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
+            fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
+            fid.writelines(outlines)
+            fid.close()
+            del fid
+            
+    def write_out_h5(self, filename):
+        """
+        Write out all the particle trajectories to a single HDF5 file
+        that contains the indices, the times, and the 2D array for each
+        field individually
+
+        Parameters
+        ----------
+
+        filename : string
+            The output filename for the HDF5 file
+
+        Examples
+        --------
+
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out_h5("orbit_trajectories")                
+        """
+        fid = h5py.File(filename, "w")
+        fields = [field for field in sorted(self.field_data.keys())]
+        fid.create_dataset("particle_indices", dtype=np.int32,
+                           data=self.indices)
+        fid.create_dataset("particle_time", data=self.times)
+        for field in fields:
+            fid.create_dataset("%s" % field, data=self[field])
+        fid.close()

diff -r 9761f69f2a935a90401e70b2e9e27da5e803239d -r 6e85246777efe7681b28277745d001ff673424dc yt/analysis_modules/particle_trajectories/setup.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('particle_trajectories', parent_package, top_path)
+    #config.add_subpackage("tests")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 9761f69f2a935a90401e70b2e9e27da5e803239d -r 6e85246777efe7681b28277745d001ff673424dc yt/analysis_modules/photon_simulator/api.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/api.py
@@ -0,0 +1,26 @@
+"""
+API for yt.analysis_modules.photon_simulator.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .photon_models import \
+     PhotonModel, \
+     ThermalPhotonModel
+
+from .photon_simulator import \
+     PhotonList, \
+     EventList
+
+from .spectral_models import \
+     SpectralModel, \
+     XSpecThermalModel, \
+     XSpecAbsorbModel, \
+     TableApecModel, \
+     TableAbsorbModel

diff -r 9761f69f2a935a90401e70b2e9e27da5e803239d -r 6e85246777efe7681b28277745d001ff673424dc yt/analysis_modules/photon_simulator/photon_models.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -0,0 +1,205 @@
+"""
+Classes for specific photon models
+
+The algorithms used here are based off of the method used by the
+PHOX code (http://www.mpa-garching.mpg.de/~kdolag/Phox/),
+developed by Veronica Biffi and Klaus Dolag. References for
+PHOX may be found at:
+
+Biffi, V., Dolag, K., Bohringer, H., & Lemson, G. 2012, MNRAS, 420, 3545
+http://adsabs.harvard.edu/abs/2012MNRAS.420.3545B
+
+Biffi, V., Dolag, K., Bohringer, H. 2013, MNRAS, 428, 1395
+http://adsabs.harvard.edu/abs/2013MNRAS.428.1395B
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.funcs import *
+from yt.utilities.physical_constants import \
+     mp, cm_per_km, K_per_keV, cm_per_mpc
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+     communication_system
+
+N_TBIN = 10000
+TMIN = 8.08e-2
+TMAX = 50.
+
+comm = communication_system.communicators[-1]
+
+class PhotonModel(object):
+
+    def __init__(self):
+        pass
+
+    def __call__(self, data_source, parameters):
+        photons = {}
+        return photons
+
+class ThermalPhotonModel(PhotonModel):
+    r"""
+    Initialize a ThermalPhotonModel from a thermal spectrum. 
+    
+    Parameters
+    ----------
+
+    spectral_model : `SpectralModel`
+        A thermal spectral model instance, either of `XSpecThermalModel`
+        or `TableApecModel`. 
+    X_H : float, optional
+        The hydrogen mass fraction.
+    Zmet : float or string, optional
+        The metallicity. If a float, assumes a constant metallicity throughout.
+        If a string, is taken to be the name of the metallicity field.
+    """
+    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3):
+        self.X_H = X_H
+        self.Zmet = Zmet
+        self.spectral_model = spectral_model
+
+    def __call__(self, data_source, parameters):
+        
+        pf = data_source.pf
+
+        exp_time = parameters["FiducialExposureTime"]
+        area = parameters["FiducialArea"]
+        redshift = parameters["FiducialRedshift"]
+        D_A = parameters["FiducialAngularDiameterDistance"]*cm_per_mpc
+        dist_fac = 1.0/(4.*np.pi*D_A*D_A*(1.+redshift)**3)
+                
+        vol_scale = pf.units["cm"]**(-3)/np.prod(pf.domain_width)
+        
+        num_cells = data_source["Temperature"].shape[0]
+        start_c = comm.rank*num_cells/comm.size
+        end_c = (comm.rank+1)*num_cells/comm.size
+        
+        kT = data_source["Temperature"][start_c:end_c].copy()/K_per_keV
+        vol = data_source["CellVolume"][start_c:end_c].copy()
+        dx = data_source["dx"][start_c:end_c].copy()
+        EM = (data_source["Density"][start_c:end_c].copy()/mp)**2
+        EM *= 0.5*(1.+self.X_H)*self.X_H*vol
+    
+        data_source.clear_data()
+    
+        x = data_source["x"][start_c:end_c].copy()
+        y = data_source["y"][start_c:end_c].copy()
+        z = data_source["z"][start_c:end_c].copy()
+    
+        data_source.clear_data()
+        
+        vx = data_source["x-velocity"][start_c:end_c].copy()
+        vy = data_source["y-velocity"][start_c:end_c].copy()
+        vz = data_source["z-velocity"][start_c:end_c].copy()
+    
+        if isinstance(self.Zmet, basestring):
+            metalZ = data_source[self.Zmet][start_c:end_c].copy()
+        else:
+            metalZ = self.Zmet*np.ones(EM.shape)
+        
+        data_source.clear_data()
+
+        idxs = np.argsort(kT)
+        dshape = idxs.shape
+
+        kT_bins = np.linspace(TMIN, max(kT[idxs][-1], TMAX), num=N_TBIN+1)
+        dkT = kT_bins[1]-kT_bins[0]
+        kT_idxs = np.digitize(kT[idxs], kT_bins)
+        kT_idxs = np.minimum(np.maximum(1, kT_idxs), N_TBIN) - 1
+        bcounts = np.bincount(kT_idxs).astype("int")
+        bcounts = bcounts[bcounts > 0]
+        n = int(0)
+        bcell = []
+        ecell = []
+        for bcount in bcounts:
+            bcell.append(n)
+            ecell.append(n+bcount)
+            n += bcount
+        kT_idxs = np.unique(kT_idxs)
+        
+        self.spectral_model.prepare()
+        energy = self.spectral_model.ebins
+    
+        cell_em = EM[idxs]*vol_scale
+        cell_vol = vol[idxs]*vol_scale
+    
+        number_of_photons = np.zeros(dshape, dtype='uint64')
+        energies = []
+    
+        u = np.random.random(cell_em.shape)
+        
+        pbar = get_pbar("Generating Photons", dshape[0])
+
+        for i, ikT in enumerate(kT_idxs):
+
+            ncells = int(bcounts[i])
+            ibegin = bcell[i]
+            iend = ecell[i]
+            kT = kT_bins[ikT] + 0.5*dkT
+        
+            em_sum_c = cell_em[ibegin:iend].sum()
+            em_sum_m = (metalZ[ibegin:iend]*cell_em[ibegin:iend]).sum()
+            
+            cspec, mspec = self.spectral_model.get_spectrum(kT)
+            cspec *= dist_fac*em_sum_c/vol_scale
+            mspec *= dist_fac*em_sum_m/vol_scale
+        
+            cumspec_c = np.cumsum(cspec)
+            counts_c = cumspec_c[:]/cumspec_c[-1]
+            counts_c = np.insert(counts_c, 0, 0.0)
+            tot_ph_c = cumspec_c[-1]*area*exp_time
+
+            cumspec_m = np.cumsum(mspec)
+            counts_m = cumspec_m[:]/cumspec_m[-1]
+            counts_m = np.insert(counts_m, 0, 0.0)
+            tot_ph_m = cumspec_m[-1]*area*exp_time
+        
+            for icell in xrange(ibegin, iend):
+            
+                cell_norm_c = tot_ph_c*cell_em[icell]/em_sum_c
+                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u[icell])
+            
+                cell_norm_m = tot_ph_m*metalZ[icell]*cell_em[icell]/em_sum_m
+                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u[icell])
+            
+                cell_n = cell_n_c + cell_n_m
+
+                if cell_n > 0:
+                    number_of_photons[icell] = cell_n
+                    randvec_c = np.random.uniform(size=cell_n_c)
+                    randvec_c.sort()
+                    randvec_m = np.random.uniform(size=cell_n_m)
+                    randvec_m.sort()
+                    cell_e_c = np.interp(randvec_c, counts_c, energy)
+                    cell_e_m = np.interp(randvec_m, counts_m, energy)
+                    energies.append(np.concatenate([cell_e_c,cell_e_m]))
+                
+                pbar.update(icell)
+
+        pbar.finish()
+            
+        active_cells = number_of_photons > 0
+        idxs = idxs[active_cells]
+        
+        photons = {}
+
+        src_ctr = parameters["center"]
+        
+        photons["x"] = (x[idxs]-src_ctr[0])*pf.units["kpc"]
+        photons["y"] = (y[idxs]-src_ctr[1])*pf.units["kpc"]
+        photons["z"] = (z[idxs]-src_ctr[2])*pf.units["kpc"]
+        photons["vx"] = vx[idxs]/cm_per_km
+        photons["vy"] = vy[idxs]/cm_per_km
+        photons["vz"] = vz[idxs]/cm_per_km
+        photons["dx"] = dx[idxs]*pf.units["kpc"]
+        photons["NumberOfPhotons"] = number_of_photons[active_cells]
+        photons["Energy"] = np.concatenate(energies)
+    
+        return photons

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/70f7a5bcf404/
Changeset:   70f7a5bcf404
Branch:      yt-3.0
User:        jzuhone
Date:        2013-11-26 21:27:02
Summary:     First pass at a FITS frontend
Affected #:  4 files

diff -r 6e85246777efe7681b28277745d001ff673424dc -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -12,6 +12,7 @@
     config.add_subpackage("boxlib")
     config.add_subpackage("chombo")
     config.add_subpackage("enzo")
+    config.add_subpackage("fits")
     config.add_subpackage("flash")
     config.add_subpackage("gdf")
     config.add_subpackage("moab")

diff -r 6e85246777efe7681b28277745d001ff673424dc -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -116,6 +116,9 @@
     GadgetFieldInfo, add_gadget_field, \
     TipsyStaticOutput, TipsyFieldInfo, add_tipsy_field
 
+from yt.frontends.fits.api import \
+    FITSStaticOutput, FITSFieldInfo, add_fits_field
+
 from yt.analysis_modules.list_modules import \
     get_available_modules, amods
 available_analysis_modules = get_available_modules()

diff -r 6e85246777efe7681b28277745d001ff673424dc -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -13,6 +13,8 @@
 import numpy as np
 from yt.funcs import mylog, iterable
 from yt.visualization.fixed_resolution import FixedResolutionBuffer
+from yt.data_objects.construction_data_containers import YTCoveringGridBase
+
 try:
     from astropy.io.fits import HDUList, ImageHDU
     from astropy import wcs as pywcs
@@ -21,8 +23,7 @@
 
 class FITSImageBuffer(HDUList):
 
-    def __init__(self, data, fields=None, center=None, proj_type=["x","y"],
-                 units="cm", wcs=None, D_A=None):
+    def __init__(self, data, fields=None, center=None, units="cm", wcs=None, D_A=None):
         r""" Initialize a FITSImageBuffer object.
 
         FITSImageBuffer contains a list of FITS ImageHDU instances, and optionally includes
@@ -42,9 +43,6 @@
         center : array_like, optional
             The coordinates [xctr,yctr] of the images in units *units*. If *wcs* is set
             this is ignored.
-        proj_type : list of strings, optional
-            The tangent projection type of the image, used for constructing WCS
-            information. If *wcs* is set this is ignored.
         units : string, optional
             The units of the WCS coordinates. If *wcs* is set this is ignored.
         wcs : astropy.wcs object, optional
@@ -69,15 +67,11 @@
         HDUList.__init__(self)
 
         if isinstance(fields, basestring): fields = [fields]
-        if units == "deg" and proj_type == ["x","y"]:
-            proj_type = ["RA---TAN","DEC--TAN"]
             
         exclude_fields = ['x','y','z','px','py','pz',
                           'pdx','pdy','pdz','weight_field']
 
         if center is not None: center = np.array(center)
-
-        self.axes = [None,None]
         
         if hasattr(data, 'keys'):
             img_data = data
@@ -96,12 +90,21 @@
         for key in fields:
             if key not in exclude_fields:
                 mylog.info("Making a FITS image of field %s" % (key))
-                hdu = ImageHDU(np.array(img_data[key]), name=key)
+                hdu = ImageHDU(np.array(img_data[key]).transpose(), name=key)
                 self.append(hdu)
 
-        self.nx, self.ny = self[0].data.shape
+        self.dimensionality = len(self[0].data.shape)
         
-        if isinstance(img_data, FixedResolutionBuffer):
+        if self.dimensionality == 2:
+            self.nx, self.ny = self[0].data.shape
+        elif self.dimensionality == 3:
+            self.nx, self.ny, self.nz = self[0].data.shape
+
+        self.axes = [None]*self.dimensionality
+
+        if wcs is not None:
+            w = wcs
+        elif isinstance(img_data, FixedResolutionBuffer):
             # FRBs are a special case where we have coordinate
             # information, so we take advantage of this and
             # construct the WCS object
@@ -118,8 +121,11 @@
                     D_A = D_A[0]/img_data.pf.units[D_A[1]]
                 dx /= -D_A
                 dy /= D_A
+                dx = np.rad2deg(dx)
+                dy = np.rad2deg(dy)
                 xctr = center[0]
                 yctr = center[1]
+                proj_type = ["RA---TAN","DEC--TAN"]
             else:
                 dx *= img_data.pf.units[units]
                 dy *= img_data.pf.units[units]
@@ -127,16 +133,32 @@
                 yctr = 0.5*(img_data.bounds[3]+img_data.bounds[2])
                 xctr *= img_data.pf.units[units]
                 yctr *= img_data.pf.units[units]
+                proj_type = ["linear"]*2
             w = pywcs.WCS(header=self[0].header, naxis=2)
             w.wcs.crpix = [0.5*(self.nx+1), 0.5*(self.ny+1)]
             w.wcs.cdelt = [dx,dy]
             w.wcs.crval = [xctr,yctr]
             w.wcs.ctype = proj_type
             w.wcs.cunit = [units]*2
+        elif isinstance(img_data, YTCoveringGridBase):
+            dx, dy, dz = img_data.dds
+            dx *= img_data.pf.units[units]
+            dy *= img_data.pf.units[units]
+            dz *= img_data.pf.units[units]
+            center = 0.5*(img_data.left_edge+img_data.right_edge)
+            center *= img_data.pf.units[units]
+            w = pywcs.WCS(header=self[0].header, naxis=3)
+            w.wcs.crpix = [0.5*(self.nx+1), 0.5*(self.ny+1), 0.5*(self.ny+1)]
+            w.wcs.cdelt = [dx,dy,dz]
+            w.wcs.crval = center
+            w.wcs.ctype = ["linear"]*3
+            w.wcs.cunit = [units]*3
         else:
-            w = wcs
+            w = None
 
-        if w is not None:
+        if w is None:
+            self.wcs = None
+        else:
             self.set_wcs(w)
             
     def set_wcs(self, wcs):
@@ -145,13 +167,6 @@
         with a WCS object *wcs*.
         """
         self.wcs = wcs
-        xpix, ypix = np.mgrid[1:self.nx:(self.nx-1)*1j,
-                              1:self.ny:(self.ny-1)*1j]
-        xworld, yworld = self.wcs.wcs_pix2world(xpix, ypix, 1,
-                                                ra_dec_order=True)
-        self.xworld = xworld.T
-        self.yworld = yworld.T
-        self.axes = self.wcs.wcs.ctype
         h = self.wcs.to_header()
         for img in self:
             for k, v in h.items():
@@ -189,15 +204,10 @@
         return FITSImageBuffer(new_buffer, wcs=new_wcs)
     
     @property
-    def xwcs(self):
-        return self.xworld
-
-    @property
-    def ywcs(self):
-        return self.yworld
-            
-    @property
     def shape(self):
-        return self.nx, self.ny
+        if self.dimensionality == 2:
+            return self.nx, self.ny
+        elif self.dimensionality == 3:
+            return self.nx, self.ny, self.nz
 
     

diff -r 6e85246777efe7681b28277745d001ff673424dc -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -49,8 +49,6 @@
     np.dtype('complex'):    np.complex128,
     np.dtype('complex64'):  np.complex128,
     np.dtype('complex128'): np.complex128,
-    np.dtype('>f4'):        np.float64
-    np.dtype('>f8'):        np.float64
 }
 
 def periodic_position(pos, pf):


https://bitbucket.org/yt_analysis/yt/commits/a662b5a623b6/
Changeset:   a662b5a623b6
Branch:      yt-3.0
User:        jzuhone
Date:        2013-12-03 05:44:50
Summary:     Merged yt_analysis/yt-3.0 into yt-3.0
Affected #:  22 files

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/analysis_modules/level_sets/api.py
--- a/yt/analysis_modules/level_sets/api.py
+++ b/yt/analysis_modules/level_sets/api.py
@@ -14,7 +14,6 @@
 #-----------------------------------------------------------------------------
 
 from .contour_finder import \
-    coalesce_join_tree, \
     identify_contours
 
 from .clump_handling import \

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -107,10 +107,11 @@
             print "Wiping out existing children clumps."
         self.children = []
         if max_val is None: max_val = self.max_val
-        contour_info = identify_contours(self.data, self.field, min_val, max_val,
-                                         self.cached_fields)
-        for cid in contour_info:
-            new_clump = self.data.extract_region(contour_info[cid])
+        nj, cids = identify_contours(self.data, self.field, min_val, max_val)
+        for cid in range(nj):
+            new_clump = self.data.cut_region(
+                    ["obj['Contours'] == %s" % (cid + 1)],
+                    {'contour_slices': cids})
             self.children.append(Clump(new_clump, self, self.field,
                                        self.cached_fields,function=self.function,
                                        clump_info=self.clump_info))

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -20,120 +20,52 @@
 import yt.utilities.data_point_utilities as data_point_utilities
 import yt.utilities.lib as amr_utils
 
-def coalesce_join_tree(jtree1):
-    joins = defaultdict(set)
-    nj = jtree1.shape[0]
-    for i1 in range(nj):
-        current_new = jtree1[i1, 0]
-        current_old = jtree1[i1, 1]
-        for i2 in range(nj):
-            if jtree1[i2, 1] == current_new:
-                current_new = max(current_new, jtree1[i2, 0])
-        jtree1[i1, 0] = current_new
-    for i1 in range(nj):
-        joins[jtree1[i1, 0]].update([jtree1[i1, 1], jtree1[i1, 0]])
-    updated = -1
-    while updated != 0:
-        keys = list(reversed(sorted(joins.keys())))
-        updated = 0
-        for k1 in keys + keys[::-1]:
-            if k1 not in joins: continue
-            s1 = joins[k1]
-            for k2 in keys + keys[::-1]:
-                if k2 >= k1: continue
-                if k2 not in joins: continue
-                s2 = joins[k2]
-                if k2 in s1:
-                    s1.update(joins.pop(k2))
-                    updated += 1
-                elif not s1.isdisjoint(s2):
-                    s1.update(joins.pop(k2))
-                    s1.update([k2])
-                    updated += 1
-    tr = []
-    for k in joins.keys():
-        v = joins.pop(k)
-        tr.append((k, np.array(list(v), dtype="int64")))
-    return tr
-
 def identify_contours(data_source, field, min_val, max_val,
                           cached_fields=None):
-    cur_max_id = np.sum([g.ActiveDimensions.prod() for g in data_source._grids])
-    pbar = get_pbar("First pass", len(data_source._grids))
-    grids = sorted(data_source._grids, key=lambda g: -g.Level)
+    tree = amr_utils.ContourTree()
+    gct = amr_utils.TileContourTree(min_val, max_val)
     total_contours = 0
-    tree = []
-    for gi,grid in enumerate(grids):
-        pbar.update(gi+1)
-        cm = data_source._get_cut_mask(grid)
-        if cm is True: cm = np.ones(grid.ActiveDimensions, dtype='bool')
-        old_field_parameters = grid.field_parameters
-        grid.field_parameters = data_source.field_parameters
-        local_ind = np.where( (grid[field] > min_val)
-                            & (grid[field] < max_val) & cm )
-        grid.field_parameters = old_field_parameters
-        if local_ind[0].size == 0: continue
-        kk = np.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
-        grid["tempContours"] = np.ones(grid.ActiveDimensions, dtype='int64') * -1
-        grid["tempContours"][local_ind] = kk[:]
-        cur_max_id -= local_ind[0].size
-        xi_u,yi_u,zi_u = np.where(grid["tempContours"] > -1)
-        cor_order = np.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
-        fd_orig = grid["tempContours"].copy()
-        xi = xi_u[cor_order]
-        yi = yi_u[cor_order]
-        zi = zi_u[cor_order]
-        while data_point_utilities.FindContours(grid["tempContours"], xi, yi, zi) < 0:
-            pass
-        total_contours += np.unique(grid["tempContours"][grid["tempContours"] > -1]).size
-        new_contours = np.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
-        tree += zip(new_contours, new_contours)
-    tree = set(tree)
+    contours = {}
+    empty_mask = np.ones((1,1,1), dtype="uint8")
+    node_ids = []
+    for (g, node, (sl, dims, gi)) in data_source.tiles.slice_traverse():
+        node.node_ind = len(node_ids)
+        nid = node.node_id
+        node_ids.append(nid)
+        values = g[field][sl].astype("float64")
+        contour_ids = np.zeros(dims, "int64") - 1
+        gct.identify_contours(values, contour_ids, total_contours)
+        new_contours = tree.cull_candidates(contour_ids)
+        total_contours += new_contours.shape[0]
+        tree.add_contours(new_contours)
+        # Now we can create a partitioned grid with the contours.
+        pg = amr_utils.PartitionedGrid(g.id,
+            [contour_ids.view("float64")],
+            empty_mask, g.dds * gi, g.dds * (gi + dims),
+            dims.astype("int64"))
+        contours[nid] = (g.Level, node.node_ind, pg, sl)
+    node_ids = np.array(node_ids)
+    trunk = data_source.tiles.tree.trunk
+    mylog.info("Linking node (%s) contours.", len(contours))
+    amr_utils.link_node_contours(trunk, contours, tree, node_ids)
+    mylog.info("Linked.")
+    #joins = tree.cull_joins(bt)
+    #tree.add_joins(joins)
+    joins = tree.export()
+    contour_ids = defaultdict(list)
+    pbar = get_pbar("Updating joins ... ", len(contours))
+    final_joins = np.unique(joins[:,1])
+    for i, nid in enumerate(sorted(contours)):
+        level, node_ind, pg, sl = contours[nid]
+        ff = pg.my_data[0].view("int64")
+        amr_utils.update_joins(joins, ff, final_joins)
+        contour_ids[pg.parent_grid_id].append((sl, ff))
+        pbar.update(i)
     pbar.finish()
-    pbar = get_pbar("Calculating joins ", len(data_source._grids))
-    grid_set = set()
-    for gi,grid in enumerate(grids):
-        pbar.update(gi)
-        cg = grid.retrieve_ghost_zones(1, "tempContours", smoothed=False)
-        grid_set.update(set(cg._grids))
-        fd = cg["tempContours"].astype('int64')
-        boundary_tree = amr_utils.construct_boundary_relationships(fd)
-        tree.update(((a, b) for a, b in boundary_tree))
-    pbar.finish()
-    sort_new = np.array(list(tree), dtype='int64')
-    mylog.info("Coalescing %s joins", sort_new.shape[0])
-    joins = coalesce_join_tree(sort_new)
-    #joins = [(i, np.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
-    pbar = get_pbar("Joining ", len(joins))
-    # This process could and should be done faster
-    print "Joining..."
-    t1 = time.time()
-    ff = data_source["tempContours"].astype("int64")
-    amr_utils.update_joins(joins, ff)
-    data_source["tempContours"] = ff.astype("float64")
-    #for i, new in enumerate(sorted(joins.keys())):
-    #    pbar.update(i)
-    #    old_set = joins[new]
-    #    for old in old_set:
-    #        if old == new: continue
-    #        i1 = (data_source["tempContours"] == old)
-    #        data_source["tempContours"][i1] = new
-    t2 = time.time()
-    print "Finished joining in %0.2e seconds" % (t2-t1)
-    pbar.finish()
-    data_source._flush_data_to_grids("tempContours", -1, dtype='int64')
-    del data_source.field_data["tempContours"] # Force a reload from the grids
-    data_source.get_data("tempContours")
-    contour_ind = {}
-    i = 0
-    for contour_id in np.unique(data_source["tempContours"]):
-        if contour_id == -1: continue
-        contour_ind[i] = np.where(data_source["tempContours"] == contour_id)
-        mylog.debug("Contour id %s has %s cells", i, contour_ind[i][0].size)
-        i += 1
-    mylog.info("Identified %s contours between %0.5e and %0.5e",
-               len(contour_ind.keys()),min_val,max_val)
-    for grid in chain(grid_set):
-        grid.field_data.pop("tempContours", None)
-    del data_source.field_data["tempContours"]
-    return contour_ind
+    rv = dict()
+    rv.update(contour_ids)
+    # NOTE: Because joins can appear in both a "final join" and a subsequent
+    # "join", we can't know for sure how many unique joins there are without
+    # checking if no cells match or doing an expensive operation checking for
+    # the unique set of final join values.
+    return final_joins.size, rv

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -167,12 +167,12 @@
 
     Parameters
     ----------
-    axis : int
-        The axis along which to slice.  Can be 0, 1, or 2 for x, y, z.
     field : string
         This is the field which will be "projected" along the axis.  If
         multiple are specified (in a list) they will all be projected in
         the first pass.
+    axis : int
+        The axis along which to slice.  Can be 0, 1, or 2 for x, y, z.
     weight_field : string
         If supplied, the field being projected will be multiplied by this
         weight value before being integrated, and at the conclusion of the
@@ -274,11 +274,12 @@
         for chunk in self.data_source.chunks([], "io"):
             self._initialize_chunk(chunk, tree)
         # This needs to be parallel_objects-ified
-        for chunk in parallel_objects(self.data_source.chunks(
-                chunk_fields, "io")): 
-            mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
-                get_memory_usage()/1024.)
-            self._handle_chunk(chunk, fields, tree)
+        with self.data_source._field_parameter_state(self.field_parameters):
+            for chunk in parallel_objects(self.data_source.chunks(
+                                         chunk_fields, "io")):
+                mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
+                    get_memory_usage()/1024.)
+                self._handle_chunk(chunk, fields, tree)
         # Note that this will briefly double RAM usage
         if self.proj_style == "mip":
             merge_style = -1
@@ -308,6 +309,7 @@
             nvals *= convs[None,:]
         # We now convert to half-widths and center-points
         data = {}
+        #non_nan = ~np.any(np.isnan(nvals), axis=-1)
         data['px'] = px
         data['py'] = py
         data['weight_field'] = nwvals
@@ -319,8 +321,9 @@
         field_data = np.hsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             mylog.debug("Setting field %s", field)
-            self[field] = field_data[fi].ravel()
-        for i in data.keys(): self[i] = data.pop(i)
+            self[field] = field_data[fi].ravel()#[non_nan]
+        for i in data.keys():
+            self[i] = data.pop(i)#[non_nan]
         mylog.info("Projection completed")
 
     def _initialize_chunk(self, chunk, tree):

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -33,6 +33,8 @@
     ParallelAnalysisInterface
 from yt.utilities.parameter_file_storage import \
     ParameterFileStore
+from yt.utilities.amr_kdtree.api import \
+    AMRKDTree
 from .derived_quantities import DerivedQuantityCollection
 from .field_info_container import \
     NeedsGridType, ValidateSpatial
@@ -365,6 +367,13 @@
         return s
 
     @contextmanager
+    def _field_parameter_state(self, field_parameters):
+        old_field_parameters = self.field_parameters
+        self.field_parameters = field_parameters
+        yield
+        self.field_parameters = old_field_parameters
+
+    @contextmanager
     def _field_type_state(self, ftype, finfo, obj = None):
         if obj is None: obj = self
         old_particle_type = obj._current_particle_type
@@ -407,6 +416,14 @@
             explicit_fields.append((ftype, fname))
         return explicit_fields
 
+    _tree = None
+
+    @property
+    def tiles(self):
+        if self._tree is not None: return self._tree
+        self._tree = AMRKDTree(self.pf, data_source=self)
+        return self._tree
+
     @property
     def blocks(self):
         for io_chunk in self.chunks([], "io"):
@@ -751,11 +768,13 @@
         self._grids = None
         self.quantities = DerivedQuantityCollection(self)
 
-    def cut_region(self, field_cuts):
+    def cut_region(self, field_cuts, field_parameters = None):
         """
-        Return an InLineExtractedRegion, where the grid cells are cut on the
-        fly with a set of field_cuts.  It is very useful for applying 
-        conditions to the fields in your data object.
+        Return an InLineExtractedRegion, where the object cells are cut on the
+        fly with a set of field_cuts.  It is very useful for applying
+        conditions to the fields in your data object.  Note that in previous
+        versions of yt, this accepted 'grid' as a variable, but presently it
+        requires 'obj'.
         
         Examples
         --------
@@ -763,19 +782,12 @@
 
         >>> pf = load("RedshiftOutput0005")
         >>> ad = pf.h.all_data()
-        >>> cr = ad.cut_region(["grid['Temperature'] > 1e6"])
+        >>> cr = ad.cut_region(["obj['Temperature'] > 1e6"])
         >>> print cr.quantities["TotalQuantity"]("CellMassMsun")
-
         """
-        return YTValueCutExtractionBase(self, field_cuts)
-
-    def extract_region(self, indices):
-        """
-        Return an ExtractedRegion where the points contained in it are defined
-        as the points in `this` data object with the given *indices*.
-        """
-        fp = self.field_parameters.copy()
-        return YTSelectedIndicesBase(self, indices, field_parameters = fp)
+        cr = self.pf.h.cut_region(self, field_cuts,
+                                  field_parameters = field_parameters)
+        return cr
 
     def extract_isocontours(self, field, value, filename = None,
                             rescale = False, sample_values = None):
@@ -966,12 +978,15 @@
                     ff, mask, grid.LeftEdge, grid.dds)
 
     def extract_connected_sets(self, field, num_levels, min_val, max_val,
-                                log_space=True, cumulative=True, cache=False):
+                               log_space=True, cumulative=True):
         """
         This function will create a set of contour objects, defined
         by having connected cell structures, which can then be
         studied and used to 'paint' their source grids, thus enabling
         them to be plotted.
+
+        Note that this function *can* return a connected set object that has no
+        member values.
         """
         if log_space:
             cons = np.logspace(np.log10(min_val),np.log10(max_val),
@@ -979,8 +994,6 @@
         else:
             cons = np.linspace(min_val, max_val, num_levels+1)
         contours = {}
-        if cache: cached_fields = defaultdict(lambda: dict())
-        else: cached_fields = None
         for level in range(num_levels):
             contours[level] = {}
             if cumulative:
@@ -988,10 +1001,11 @@
             else:
                 mv = cons[level+1]
             from yt.analysis_modules.level_sets.api import identify_contours
-            cids = identify_contours(self, field, cons[level], mv,
-                                     cached_fields)
-            for cid, cid_ind in cids.items():
-                contours[level][cid] = self.extract_region(cid_ind)
+            nj, cids = identify_contours(self, field, cons[level], mv)
+            for cid in range(nj):
+                contours[level][cid] = self.cut_region(
+                    ["obj['Contours'] == %s" % (cid + 1)],
+                    {'contour_slices': cids})
         return cons, contours
 
     def paint_grids(self, field, value, default_value=None):

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -583,3 +583,85 @@
         self.set_field_parameter('e0', e0)
         self.set_field_parameter('e1', e1)
         self.set_field_parameter('e2', e2)
+
+class YTCutRegionBase(YTSelectionContainer3D):
+    """
+    This is a data object designed to allow individuals to apply logical
+    operations to fields or particles and filter as a result of those cuts.
+
+    Parameters
+    ----------
+    base_object : YTSelectionContainer3D
+        The object to which cuts will be applied.
+    conditionals : list of strings
+        A list of conditionals that will be evaluated.  In the namespace
+        available, these conditionals will have access to 'obj' which is a data
+        object of unknown shape, and they must generate a boolean array.  For
+        instance, conditionals = ["obj['temperature'] < 1e3"]
+
+    Examples
+    --------
+
+    >>> pf = load("DD0010/moving7_0010")
+    >>> sp = pf.h.sphere("max", (1.0, 'mpc'))
+    >>> cr = pf.h.cut_region(sp, ["obj['temperature'] < 1e3"])
+    """
+    _type_name = "cut_region"
+    _con_args = ("base_object", "conditionals")
+    def __init__(self, base_object, conditionals, pf = None,
+                 field_parameters = None):
+        super(YTCutRegionBase, self).__init__(base_object.center, pf, field_parameters)
+        self.conditionals = ensure_list(conditionals)
+        self.base_object = base_object
+        self._selector = None
+        # Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,
+        # ires and get_data
+
+    @property
+    def selector(self):
+        raise NotImplementedError
+
+    def chunks(self, fields, chunking_style, **kwargs):
+        # We actually want to chunk the sub-chunk, not ourselves.  We have no
+        # chunks to speak of, as we do not data IO.
+        for chunk in self.hierarchy._chunk(self.base_object,
+                                           chunking_style,
+                                           **kwargs):
+            with self.base_object._chunked_read(chunk):
+                self.get_data(fields)
+                yield self
+
+    def get_data(self, fields = None):
+        fields = ensure_list(fields)
+        self.base_object.get_data(fields)
+        ind = self._cond_ind
+        for field in fields:
+            self.field_data[field] = self.base_object[field][ind]
+
+    @property
+    def _cond_ind(self):
+        ind = None
+        obj = self.base_object
+        with obj._field_parameter_state(self.field_parameters):
+            for cond in self.conditionals:
+                res = eval(cond)
+                if ind is None: ind = res
+                np.logical_and(res, ind, ind)
+        return ind
+
+    @property
+    def icoords(self):
+        return self.base_object.icoords[self._cond_ind,:]
+
+    @property
+    def fcoords(self):
+        return self.base_object.fcoords[self._cond_ind,:]
+
+    @property
+    def ires(self):
+        return self.base_object.ires[self._cond_ind]
+
+    @property
+    def fwidth(self):
+        return self.base_object.fwidth[self._cond_ind,:]
+

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/data_objects/tests/test_extract_regions.py
--- a/yt/data_objects/tests/test_extract_regions.py
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -6,15 +6,14 @@
 
 def test_cut_region():
     # We decompose in different ways
-    return #TESTDISABLED
     for nprocs in [1, 2, 4, 8]:
         pf = fake_random_pf(64, nprocs = nprocs,
             fields = ("Density", "Temperature", "x-velocity"))
         # We'll test two objects
         dd = pf.h.all_data()
-        r = dd.cut_region( [ "grid['Temperature'] > 0.5",
-                             "grid['Density'] < 0.75",
-                             "grid['x-velocity'] > 0.25" ])
+        r = dd.cut_region( [ "obj['Temperature'] > 0.5",
+                             "obj['Density'] < 0.75",
+                             "obj['x-velocity'] > 0.25" ])
         t = ( (dd["Temperature"] > 0.5 ) 
             & (dd["Density"] < 0.75 )
             & (dd["x-velocity"] > 0.25 ) )
@@ -23,33 +22,21 @@
         yield assert_equal, np.all(r["x-velocity"] > 0.25), True
         yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
         yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
-        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        r2 = r.cut_region( [ "obj['Temperature'] < 0.75" ] )
         t2 = (r["Temperature"] < 0.75)
         yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
         yield assert_equal, np.all(r2["Temperature"] < 0.75), True
 
-def test_extract_region():
-    # We decompose in different ways
-    return #TESTDISABLED
-    for nprocs in [1, 2, 4, 8]:
-        pf = fake_random_pf(64, nprocs = nprocs,
-            fields = ("Density", "Temperature", "x-velocity"))
-        # We'll test two objects
+        # Now we can test some projections
         dd = pf.h.all_data()
-        t = ( (dd["Temperature"] > 0.5 ) 
-            & (dd["Density"] < 0.75 )
-            & (dd["x-velocity"] > 0.25 ) )
-        r = dd.extract_region(t)
-        yield assert_equal, np.all(r["Temperature"] > 0.5), True
-        yield assert_equal, np.all(r["Density"] < 0.75), True
-        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
-        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
-        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
-        t2 = (r["Temperature"] < 0.75)
-        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
-        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
-        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
-        t3 = (r["Temperature"] < 0.75)
-        r3 = r.extract_region( t3 )
-        yield assert_equal, np.sort(r3["Temperature"]), np.sort(r["Temperature"][t3])
-        yield assert_equal, np.all(r3["Temperature"] < 0.75), True
+        cr = dd.cut_region(["obj['Ones'] > 0"])
+        for weight in [None, "Density"]:
+            p1 = pf.h.proj("Density", 0, data_source=dd, weight_field=weight)
+            p2 = pf.h.proj("Density", 0, data_source=cr, weight_field=weight)
+            for f in p1.field_data:
+                yield assert_almost_equal, p1[f], p2[f]
+        cr = dd.cut_region(["obj['Density'] > 0.25"])
+        p2 = pf.h.proj("Density", 2, data_source=cr)
+        yield assert_equal, p2["Density"].max() > 0.25, True
+        p2 = pf.h.proj("Density", 2, data_source=cr, weight_field = "Density")
+        yield assert_equal, p2["Density"].max() > 0.25, True

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/extern/setup.py
--- /dev/null
+++ b/yt/extern/setup.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+#----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('extern', parent_package, top_path)
+    config.add_subpackage("progressbar")
+    config.make_config_py()
+    return config

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/fields/universal_fields.py
--- a/yt/fields/universal_fields.py
+++ b/yt/fields/universal_fields.py
@@ -582,12 +582,18 @@
           units=r"\rm{s}^{-1}")
 
 def _Contours(field, data):
-    return -np.ones_like(data["Ones"])
-add_field("Contours", validators=[ValidateSpatial(0)], take_log=False,
-          display_field=False, function=_Contours)
-add_field("tempContours", function=_Contours,
-          validators=[ValidateSpatial(0), ValidateGridType()],
-          take_log=False, display_field=False)
+    fd = data.get_field_parameter("contour_slices")
+    vals = data["Ones"] * -1
+    if fd is None or fd == 0.0:
+        return vals
+    for sl, v in fd.get(data.id, []):
+        vals[sl] = v
+    return vals
+add_field("Contours", validators=[ValidateSpatial(0)],
+          take_log=False,
+          display_field=False,
+          projection_conversion="1",
+          function=_Contours)
 
 def obtain_velocities(data):
     return obtain_rv_vec(data)

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -503,7 +503,19 @@
             self.periodicity = (True, True, True)
         else:
             self.periodicity = (False, False, False)
-
+        tot = sum(self.parameters[ptype] for ptype
+                  in ('nsph', 'ndark', 'nstar'))
+        if tot != self.parameters['nbodies']:
+            print "SOMETHING HAS GONE WRONG.  NBODIES != SUM PARTICLES."
+            print "%s != (%s == %s + %s + %s)" % (
+                self.parameters['nbodies'],
+                tot,
+                self.parameters['nsph'],
+                self.parameters['ndark'],
+                self.parameters['nstar'])
+            print "Often this can be fixed by changing the 'endian' parameter."
+            print "This defaults to '>' but may in fact be '<'."
+            raise RuntimeError
         if self.parameters.get('bComove', True):
             self.cosmological_simulation = 1
             cosm = self._cosmology_parameters or {}

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -861,145 +861,6 @@
 
     return spf
 
-def hexahedral_connectivity(xgrid, ygrid, zgrid):
-    nx = len(xgrid)
-    ny = len(ygrid)
-    nz = len(zgrid)
-    coords = np.fromiter(chain.from_iterable(product(xgrid, ygrid, zgrid)),
-                    dtype=np.float64, count = nx*ny*nz*3)
-    coords.shape = (nx*ny*nz, 3)
-    cis = np.fromiter(chain.from_iterable(product([0,1], [0,1], [0,1])),
-                    dtype=np.int64, count = 8*3)
-    cis.shape = (8, 3)
-    cycle = np.fromiter(chain.from_iterable(product(*map(range, (nx-1, ny-1, nz-1)))),
-                    dtype=np.int64, count = (nx-1)*(ny-1)*(nz-1)*3)
-    cycle.shape = ((nx-1)*(ny-1)*(nz-1), 3)
-    off = cis + cycle[:, np.newaxis]
-    connectivity = ((off[:,:,0] * ny) + off[:,:,1]) * nz + off[:,:,2]
-    return coords, connectivity
-
-class StreamHexahedralMesh(SemiStructuredMesh):
-    _connectivity_length = 8
-    _index_offset = 0
-
-class StreamHexahedralHierarchy(UnstructuredGeometryHandler):
-
-    def __init__(self, pf, data_style = None):
-        self.stream_handler = pf.stream_handler
-        super(StreamHexahedralHierarchy, self).__init__(pf, data_style)
-
-    def _initialize_mesh(self):
-        coords = self.stream_handler.fields.pop('coordinates')
-        connec = self.stream_handler.fields.pop('connectivity')
-        self.meshes = [StreamHexahedralMesh(0,
-          self.hierarchy_filename, connec, coords, self)]
-
-    def _setup_data_io(self):
-        if self.stream_handler.io is not None:
-            self.io = self.stream_handler.io
-        else:
-            self.io = io_registry[self.data_style](self.pf)
-
-    def _detect_fields(self):
-        self.field_list = list(set(self.stream_handler.get_fields()))
-
-class StreamHexahedralStaticOutput(StreamStaticOutput):
-    _hierarchy_class = StreamHexahedralHierarchy
-    _fieldinfo_fallback = StreamFieldInfo
-    _fieldinfo_known = KnownStreamFields
-    _data_style = "stream_hexahedral"
-
-def load_hexahedral_mesh(data, connectivity, coordinates,
-                         sim_unit_to_cm, bbox=None,
-                         sim_time=0.0, periodicity=(True, True, True)):
-    r"""Load a hexahedral mesh of data into yt as a
-    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
-
-    This should allow a semistructured grid of data to be loaded directly into
-    yt and analyzed as would any others.  This comes with several caveats:
-        * Units will be incorrect unless the data has already been converted to
-          cgs.
-        * Some functions may behave oddly, and parallelism will be
-          disappointing or non-existent in most cases.
-        * Particles may be difficult to integrate.
-
-    Particle fields are detected as one-dimensional fields. The number of particles
-    is set by the "number_of_particles" key in data.
-    
-    Parameters
-    ----------
-    data : dict
-        This is a dict of numpy arrays, where the keys are the field names.
-        There must only be one.
-    connectivity : array_like
-        This should be of size (N,8) where N is the number of zones.
-    coordinates : array_like
-        This should be of size (M,3) where M is the number of vertices
-        indicated in the connectivity matrix.
-    sim_unit_to_cm : float
-        Conversion factor from simulation units to centimeters
-    bbox : array_like (xdim:zdim, LE:RE), optional
-        Size of computational domain in units sim_unit_to_cm
-    sim_time : float, optional
-        The simulation time in seconds
-    periodicity : tuple of booleans
-        Determines whether the data will be treated as periodic along
-        each axis
-
-    """
-
-    domain_dimensions = np.ones(3, "int32") * 2
-    nprocs = 1
-    if bbox is None:
-        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
-    domain_left_edge = np.array(bbox[:, 0], 'float64')
-    domain_right_edge = np.array(bbox[:, 1], 'float64')
-    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
-
-    sfh = StreamDictFieldHandler()
-    
-    particle_types = set_particle_types(data)
-    
-    sfh.update({'connectivity': connectivity,
-                'coordinates': coordinates,
-                0: data})
-    grid_left_edges = domain_left_edge
-    grid_right_edges = domain_right_edge
-    grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
-
-    # I'm not sure we need any of this.
-    handler = StreamHandler(
-        grid_left_edges,
-        grid_right_edges,
-        grid_dimensions,
-        grid_levels,
-        -np.ones(nprocs, dtype='int64'),
-        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
-        np.zeros(nprocs).reshape((nprocs,1)),
-        sfh,
-        particle_types=particle_types,
-        periodicity=periodicity
-    )
-
-    handler.name = "HexahedralMeshData"
-    handler.domain_left_edge = domain_left_edge
-    handler.domain_right_edge = domain_right_edge
-    handler.refine_by = 2
-    handler.dimensionality = 3
-    handler.domain_dimensions = domain_dimensions
-    handler.simulation_time = sim_time
-    handler.cosmology_simulation = 0
-
-    spf = StreamHexahedralStaticOutput(handler)
-    spf.units["cm"] = sim_unit_to_cm
-    spf.units['1'] = 1.0
-    spf.units["unitary"] = 1.0
-    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
-    for unit in mpc_conversion.keys():
-        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
-
-    return spf
-
 class StreamOctreeSubset(OctreeSubset):
     domain_id = 1
     _domain_offset = 1
@@ -1190,20 +1051,22 @@
 
     return spf
 
+_cis = np.fromiter(chain.from_iterable(product([0,1], [0,1], [0,1])),
+                dtype=np.int64, count = 8*3)
+_cis.shape = (8, 3)
+
 def hexahedral_connectivity(xgrid, ygrid, zgrid):
     nx = len(xgrid)
     ny = len(ygrid)
     nz = len(zgrid)
-    coords = np.fromiter(chain.from_iterable(product(xgrid, ygrid, zgrid)),
-                    dtype=np.float64, count = nx*ny*nz*3)
-    coords.shape = (nx*ny*nz, 3)
-    cis = np.fromiter(chain.from_iterable(product([0,1], [0,1], [0,1])),
-                    dtype=np.int64, count = 8*3)
-    cis.shape = (8, 3)
-    cycle = np.fromiter(chain.from_iterable(product(*map(range, (nx-1, ny-1, nz-1)))),
-                    dtype=np.int64, count = (nx-1)*(ny-1)*(nz-1)*3)
+    coords = np.zeros((nx, ny, nz, 3), dtype="float64", order="C")
+    coords[:,:,:,0] = xgrid[:,None,None]
+    coords[:,:,:,1] = ygrid[None,:,None]
+    coords[:,:,:,2] = zgrid[None,None,:]
+    coords.shape = (nx * ny * nz, 3)
+    cycle = np.rollaxis(np.indices((nx-1,ny-1,nz-1)), 0, 4)
     cycle.shape = ((nx-1)*(ny-1)*(nz-1), 3)
-    off = cis + cycle[:, np.newaxis]
+    off = _cis + cycle[:, np.newaxis]
     connectivity = ((off[:,:,0] * ny) + off[:,:,1]) * nz + off[:,:,2]
     return coords, connectivity
 

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -688,7 +688,7 @@
             if len(self.queue) == 0: raise StopIteration
             chunk = YTDataChunk(None, "cache", self.queue, cache=False)
             self.cache = self.geometry_handler.io._read_chunk_data(
-                chunk, self.preload_fields)
+                chunk, self.preload_fields) or {}
         g = self.queue.pop(0)
         g._initialize_cache(self.cache.pop(g.id, {}))
         return g

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -273,7 +273,9 @@
             giter = sorted(gobjs, key = -g.Level)
         elif sort is None:
             giter = gobjs
-        if self._preload_implemented and preload_fields is not None and ngz == 0:
+        if preload_fields is None: preload_fields = []
+        preload_fields, _ = self._split_fields(preload_fields)
+        if self._preload_implemented and len(preload_fields) > 0 and ngz == 0:
             giter = ChunkDataCache(list(giter), preload_fields, self)
         for i, og in enumerate(giter):
             if ngz > 0:

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -184,6 +184,24 @@
         for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
             yield self.get_brick_data(node)
 
+    def slice_traverse(self, viewpoint = None):
+        if not hasattr(self.pf.h, "grid"):
+            raise NotImplementedError
+        for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
+            grid = self.pf.h.grids[node.grid - self._id_offset]
+            dds = grid.dds
+            gle = grid.LeftEdge
+            nle = get_left_edge(node)
+            nre = get_right_edge(node)
+            li = np.rint((nle-gle)/dds).astype('int32')
+            ri = np.rint((nre-gle)/dds).astype('int32')
+            dims = (ri - li).astype('int32')
+            sl = (slice(li[0], ri[0]),
+                  slice(li[1], ri[1]),
+                  slice(li[2], ri[2]))
+            gi = grid.get_global_startindex() + li
+            yield grid, node, (sl, dims, gi)
+
     def get_node(self, nodeid):
         path = np.binary_repr(nodeid)
         depth = 1

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -444,19 +444,28 @@
         if new_result is None:
             return
         assert(len(new_result) == len(old_result))
+        nind, oind = None, None
         for k in new_result:
             assert (k in old_result)
+            if oind is None:
+                oind = np.array(np.isnan(old_result[k]))
+            np.logical_or(oind, np.isnan(old_result[k]), oind)
+            if nind is None:
+                nind = np.array(np.isnan(new_result[k]))
+            np.logical_or(nind, np.isnan(new_result[k]), nind)
+        oind = ~oind
+        nind = ~nind
         for k in new_result:
             err_msg = "%s values of %s (%s weighted) projection (axis %s) not equal." % \
               (k, self.field, self.weight_field, self.axis)
             if k == 'weight_field' and self.weight_field is None:
                 continue
+            nres, ores = new_result[k][nind], old_result[k][oind]
             if self.decimals is None:
-                assert_equal(new_result[k], old_result[k],
-                             err_msg=err_msg)
+                assert_equal(nres, ores, err_msg=err_msg)
             else:
-                assert_allclose(new_result[k], old_result[k],
-                                 10.**-(self.decimals), err_msg=err_msg)
+                assert_allclose(nres, ores, 10.**-(self.decimals),
+                                err_msg=err_msg)
 
 class PixelizedProjectionValuesTest(AnswerTestingTest):
     _type_name = "PixelizedProjectionValues"

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -18,9 +18,17 @@
 cimport cython
 from libc.stdlib cimport malloc, free
 
+from amr_kdtools cimport _find_node, Node
+from grid_traversal cimport VolumeContainer, PartitionedGrid, \
+    vc_index, vc_pos_index
+
 cdef extern from "math.h":
     double fabs(double x)
 
+cdef extern from "stdlib.h":
+    # NOTE that size_t might not be int
+    void *alloca(int)
+
 cdef inline np.int64_t i64max(np.int64_t i0, np.int64_t i1):
     if i0 > i1: return i0
     return i1
@@ -29,87 +37,407 @@
     if i0 < i1: return i0
     return i1
 
-cdef extern from "union_find.h":
-    ctypedef struct forest_node:
-        void *value
-        forest_node *parent
-        int rank
+cdef struct ContourID
 
-    forest_node* MakeSet(void* value)
-    void Union(forest_node* node1, forest_node* node2)
-    forest_node* Find(forest_node* node)
+cdef struct ContourID:
+    np.int64_t contour_id
+    ContourID *parent
+    ContourID *next
+    ContourID *prev
 
-ctypedef struct CellIdentifier:
-    np.int64_t hindex
-    int level
+cdef ContourID *contour_create(np.int64_t contour_id,
+                               ContourID *prev = NULL):
+    node = <ContourID *> malloc(sizeof(ContourID))
+    #print "Creating contour with id", contour_id
+    node.contour_id = contour_id
+    node.next = node.parent = NULL
+    node.prev = prev
+    if prev != NULL: prev.next = node
+    return node
 
-cdef class GridContourContainer:
-    cdef np.int64_t dims[3]
-    cdef np.int64_t start_indices[3]
-    cdef forest_node **join_tree
-    cdef np.int64_t ncells
+cdef void contour_delete(ContourID *node):
+    if node.prev != NULL: node.prev.next = node.next
+    if node.next != NULL: node.next.prev = node.prev
+    free(node)
 
-    def __init__(self, dimensions, indices):
-        cdef int i
-        self.ncells = 1
-        for i in range(3):
-            self.ncells *= dimensions[i]
-            self.dims[i] = dimensions[i]
-            self.start_indices[i] = indices[i]
-        self.join_tree = <forest_node **> malloc(sizeof(forest_node) 
-                                                 * self.ncells)
-        for i in range(self.ncells): self.join_tree[i] = NULL
+cdef ContourID *contour_find(ContourID *node):
+    cdef ContourID *temp, *root
+    root = node
+    while root.parent != NULL and root.parent != root:
+        root = root.parent
+    if root == root.parent: root.parent = NULL
+    while node.parent != NULL:
+        temp = node.parent
+        node.parent = root
+        node = temp
+    return root
 
+cdef void contour_union(ContourID *node1, ContourID *node2):
+    if node1.contour_id < node2.contour_id:
+        node2.parent = node1
+    elif node2.contour_id < node1.contour_id:
+        node1.parent = node2
+
+cdef struct CandidateContour
+
+cdef struct CandidateContour:
+    np.int64_t contour_id
+    np.int64_t join_id
+    CandidateContour *next
+
+cdef int candidate_contains(CandidateContour *first,
+                            np.int64_t contour_id,
+                            np.int64_t join_id = -1):
+    while first != NULL:
+        if first.contour_id == contour_id \
+            and first.join_id == join_id: return 1
+        first = first.next
+    return 0
+
+cdef CandidateContour *candidate_add(CandidateContour *first,
+                                     np.int64_t contour_id,
+                                     np.int64_t join_id = -1):
+    cdef CandidateContour *node
+    node = <CandidateContour *> malloc(sizeof(CandidateContour))
+    node.contour_id = contour_id
+    node.join_id = join_id
+    node.next = first
+    return node
+
+cdef class ContourTree:
+    # This class is essentially a Union-Find algorithm.  What we want to do is
+    # to, given a connection between two objects, identify the unique ID for
+    # those two objects.  So what we have is a collection of contours, and they
+    # eventually all get joined and contain lots of individual IDs.  But it's
+    # easy to find the *first* contour, i.e., the primary ID, for each of the
+    # subsequent IDs.
+    #
+    # This means that we can connect id 202483 to id 2472, and if id 2472 is
+    # connected to id 143, the connection will *actually* be from 202483 to
+    # 143.  In this way we can speed up joining things and knowing their
+    # "canonical" id.
+    #
+    # This is a multi-step process, since we first want to connect all of the
+    # contours, then we end up wanting to coalesce them, and ultimately we join
+    # them at the end.  The join produces a table that maps the initial to the
+    # final, and we can go through and just update all of those.
+    cdef ContourID *first
+    cdef ContourID *last
+
+    def clear(self):
+        # Here, we wipe out ALL of our contours, but not the pointers to them
+        cdef ContourID *cur, *next
+        cur = self.first
+        while cur != NULL:
+            next = cur.next
+            free(cur)
+            cur = next
+        self.first = self.last = NULL
+
+    def __init__(self):
+        self.first = self.last = NULL
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def add_contours(self, np.ndarray[np.int64_t, ndim=1] contour_ids):
+        # This adds new contours, from the given contour IDs, to the tree.
+        # Each one can be connected to a parent, as well as to next/prev in the
+        # set of contours belonging to this tree.
+        cdef int i, n
+        n = contour_ids.shape[0]
+        cdef ContourID *cur = self.last
+        for i in range(n):
+            #print i, contour_ids[i]
+            cur = contour_create(contour_ids[i], cur)
+            if self.first == NULL: self.first = cur
+        self.last = cur
+
+    def add_contour(self, np.int64_t contour_id):
+        self.last = contour_create(contour_id, self.last)
+
+    def cull_candidates(self, np.ndarray[np.int64_t, ndim=3] candidates):
+        # This function looks at each preliminary contour ID belonging to a
+        # given collection of values, and then if need be it creates a new
+        # contour for it.
+        cdef int i, j, k, ni, nj, nk, nc
+        cdef CandidateContour *first = NULL
+        cdef CandidateContour *temp
+        cdef np.int64_t cid
+        nc = 0
+        ni = candidates.shape[0]
+        nj = candidates.shape[1]
+        nk = candidates.shape[2]
+        for i in range(ni):
+            for j in range(nj):
+                for k in range(nk):
+                    cid = candidates[i,j,k]
+                    if cid == -1: continue
+                    if candidate_contains(first, cid) == 0:
+                        nc += 1
+                        first = candidate_add(first, cid)
+        cdef np.ndarray[np.int64_t, ndim=1] contours
+        contours = np.empty(nc, dtype="int64")
+        i = 0
+        # This removes all the temporary contours for this set of contours and
+        # instead constructs a final list of them.
+        while first != NULL:
+            contours[i] = first.contour_id
+            i += 1
+            temp = first.next
+            free(first)
+            first = temp
+        return contours
+
+    def cull_joins(self, np.ndarray[np.int64_t, ndim=2] cjoins):
+        # This coalesces contour IDs, so that we have only the final name
+        # resolutions -- the .join_id from a candidate.  So many items will map
+        # to a single join_id.
+        cdef int i, j, k, ni, nj, nk, nc
+        cdef CandidateContour *first = NULL
+        cdef CandidateContour *temp
+        cdef np.int64_t cid1, cid2
+        nc = 0
+        ni = cjoins.shape[0]
+        for i in range(ni):
+            cid1 = cjoins[i,0]
+            cid2 = cjoins[i,1]
+            if cid1 == -1: continue
+            if cid2 == -1: continue
+            if candidate_contains(first, cid1, cid2) == 0:
+                nc += 1
+                first = candidate_add(first, cid1, cid2)
+        cdef np.ndarray[np.int64_t, ndim=2] contours
+        contours = np.empty((nc,2), dtype="int64")
+        i = 0
+        while first != NULL:
+            contours[i,0] = first.contour_id
+            contours[i,1] = first.join_id
+            i += 1
+            temp = first.next
+            free(first)
+            first = temp
+        return contours
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def add_joins(self, np.ndarray[np.int64_t, ndim=2] join_tree):
+        cdef int i, n, ins
+        cdef np.int64_t cid1, cid2
+        # Okay, this requires lots of iteration, unfortunately
+        cdef ContourID *cur, *root
+        n = join_tree.shape[0]
+        #print "Counting"
+        #print "Checking", self.count()
+        for i in range(n):
+            ins = 0
+            cid1 = join_tree[i, 0]
+            cid2 = join_tree[i, 1]
+            c1 = c2 = NULL
+            cur = self.first
+            #print "Looking for ", cid1, cid2
+            while c1 == NULL or c2 == NULL:
+                if cur.contour_id == cid1:
+                    c1 = contour_find(cur)
+                if cur.contour_id == cid2:
+                    c2 = contour_find(cur)
+                ins += 1
+                cur = cur.next
+                if cur == NULL: break
+            if c1 == NULL or c2 == NULL:
+                if c1 == NULL: print "  Couldn't find ", cid1
+                if c2 == NULL: print "  Couldn't find ", cid2
+                print "  Inspected ", ins
+                raise RuntimeError
+            else:
+                contour_union(c1, c2)
+
+    def count(self):
+        cdef int n = 0
+        cdef ContourID *cur = self.first
+        while cur != NULL:
+            cur = cur.next
+            n += 1
+        return n
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def export(self):
+        cdef int n = self.count()
+        cdef ContourID *cur, *root
+        cur = self.first
+        cdef np.ndarray[np.int64_t, ndim=2] joins 
+        joins = np.empty((n, 2), dtype="int64")
+        n = 0
+        while cur != NULL:
+            root = contour_find(cur)
+            joins[n, 0] = cur.contour_id
+            joins[n, 1] = root.contour_id
+            cur = cur.next
+            n += 1
+        return joins
+    
     def __dealloc__(self):
-        cdef int i
-        for i in range(self.ncells):
-            if self.join_tree[i] != NULL: free(self.join_tree[i])
-        free(self.join_tree)
+        self.clear()
 
-    #def construct_join_tree(self,
-    #            np.ndarray[np.float64_t, ndim=3] field,
-    #            np.ndarray[np.bool_t, ndim=3] mask):
-    #    # This only looks at the components of the grid that are actually
-    #    # inside this grid -- boundary conditions are handled later.
-    #    pass
+cdef class TileContourTree:
+    cdef np.float64_t min_val
+    cdef np.float64_t max_val
 
-#@cython.boundscheck(False)
-#@cython.wraparound(False)
-def construct_boundary_relationships(
-        np.ndarray[dtype=np.int64_t, ndim=3] contour_ids):
-    # We only look at the boundary and one cell in
-    cdef int i, j, nx, ny, nz, offset_i, offset_j, oi, oj
+    def __init__(self, np.float64_t min_val, np.float64_t max_val):
+        self.min_val = min_val
+        self.max_val = max_val
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def identify_contours(self, np.ndarray[np.float64_t, ndim=3] values,
+                                np.ndarray[np.int64_t, ndim=3] contour_ids,
+                                np.int64_t start):
+        # This just looks at neighbor values and tries to identify which zones
+        # are touching by face within a given brick.
+        cdef int i, j, k, ni, nj, nk, offset
+        cdef int off_i, off_j, off_k, oi, ok, oj
+        cdef ContourID *cur = NULL
+        cdef ContourID *c1, *c2
+        cdef np.float64_t v
+        cdef np.int64_t nc
+        ni = values.shape[0]
+        nj = values.shape[1]
+        nk = values.shape[2]
+        nc = 0
+        cdef ContourID **container = <ContourID**> malloc(
+                sizeof(ContourID*)*ni*nj*nk)
+        for i in range(ni*nj*nk): container[i] = NULL
+        for i in range(ni):
+            for j in range(nj):
+                for k in range(nk):
+                    v = values[i,j,k]
+                    if v < self.min_val or v > self.max_val: continue
+                    nc += 1
+                    c1 = contour_create(nc + start)
+                    cur = container[i*nj*nk + j*nk + k] = c1
+                    for oi in range(3):
+                        off_i = oi - 1 + i
+                        if not (0 <= off_i < ni): continue
+                        for oj in range(3):
+                            off_j = oj - 1 + j
+                            if not (0 <= off_j < nj): continue
+                            for ok in range(3):
+                                if oi == oj == ok == 1: continue
+                                if off_k > k and off_j > j and off_i > i:
+                                    continue
+                                off_k = ok - 1 + k
+                                if not (0 <= off_k < nk): continue
+                                offset = off_i*nj*nk + off_j*nk + off_k
+                                c2 = container[offset]
+                                if c2 == NULL: continue
+                                c2 = contour_find(c2)
+                                contour_union(cur, c2)
+                                cur = contour_find(cur)
+        for i in range(ni):
+            for j in range(nj):
+                for k in range(nk):
+                    c1 = container[i*nj*nk + j*nk + k]
+                    if c1 == NULL: continue
+                    cur = c1
+                    c1 = contour_find(c1)
+                    contour_ids[i,j,k] = c1.contour_id
+        
+        for i in range(ni*nj*nk): 
+            if container[i] != NULL: free(container[i])
+        free(container)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def link_node_contours(Node trunk, contours, ContourTree tree,
+        np.ndarray[np.int64_t, ndim=1] node_ids):
+    cdef int n_nodes = node_ids.shape[0]
+    cdef np.int64_t node_ind
+    cdef VolumeContainer **vcs = <VolumeContainer **> malloc(
+        sizeof(VolumeContainer*) * n_nodes)
+    cdef int i
+    cdef PartitionedGrid pg
+    for i in range(n_nodes):
+        pg = contours[node_ids[i]][2]
+        vcs[i] = pg.container
+    cdef np.ndarray[np.uint8_t] examined = np.zeros(n_nodes, "uint8")
+    for nid, cinfo in sorted(contours.items(), key = lambda a: -a[1][0]):
+        level, node_ind, pg, sl = cinfo
+        construct_boundary_relationships(trunk, tree, node_ind,
+            examined, vcs, node_ids)
+        examined[node_ind] = 1
+
+cdef inline void get_spos(VolumeContainer *vc, int i, int j, int k,
+                          int axis, np.float64_t *spos):
+    spos[0] = vc.left_edge[0] + i * vc.dds[0]
+    spos[1] = vc.left_edge[1] + j * vc.dds[1]
+    spos[2] = vc.left_edge[2] + k * vc.dds[2]
+    spos[axis] += 0.5 * vc.dds[axis]
+
+cdef inline int spos_contained(VolumeContainer *vc, np.float64_t *spos):
+    cdef int i
+    for i in range(3):
+        if spos[i] < vc.left_edge[i] or spos[i] > vc.right_edge[i]: return 0
+    return 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void construct_boundary_relationships(Node trunk, ContourTree tree, 
+                np.int64_t nid, np.ndarray[np.uint8_t, ndim=1] examined,
+                VolumeContainer **vcs,
+                np.ndarray[np.int64_t, ndim=1] node_ids):
+    # We only look at the boundary and find the nodes next to it.
+    # Contours is a dict, keyed by the node.id.
+    cdef int i, j, nx, ny, nz, offset_i, offset_j, oi, oj, level
     cdef np.int64_t c1, c2
-    nx = contour_ids.shape[0]
-    ny = contour_ids.shape[1]
-    nz = contour_ids.shape[2]
+    cdef Node adj_node
+    cdef VolumeContainer *vc1, *vc0 = vcs[nid]
+    nx = vc0.dims[0]
+    ny = vc0.dims[1]
+    nz = vc0.dims[2]
+    cdef int s = (ny*nx + nx*nz + ny*nz) * 18
     # We allocate an array of fixed (maximum) size
-    cdef int s = (ny*nx + nx*nz + ny*nz - 2) * 18
-    cdef np.ndarray[np.int64_t, ndim=2] tree = np.zeros((s, 2), dtype="int64")
+    cdef np.ndarray[np.int64_t, ndim=2] joins = np.zeros((s, 2), dtype="int64")
     cdef int ti = 0
-    # First x-pass
+    cdef int index
+    cdef np.float64_t spos[3]
+
+    # First the x-pass
     for i in range(ny):
         for j in range(nz):
             for offset_i in range(3):
                 oi = offset_i - 1
-                if i == 0 and oi == -1: continue
-                if i == ny - 1 and oi == 1: continue
                 for offset_j in range(3):
                     oj = offset_j - 1
-                    if j == 0 and oj == -1: continue
-                    if j == nz - 1 and oj == 1: continue
-                    c1 = contour_ids[0, i, j]
-                    c2 = contour_ids[1, i + oi, j + oj]
-                    if c1 > -1 and c2 > -1:
-                        tree[ti,0] = i64max(c1,c2)
-                        tree[ti,1] = i64min(c1,c2)
-                        ti += 1
-                    c1 = contour_ids[nx-1, i, j]
-                    c2 = contour_ids[nx-2, i + oi, j + oj]
-                    if c1 > -1 and c2 > -1:
-                        tree[ti,0] = i64max(c1,c2)
-                        tree[ti,1] = i64min(c1,c2)
-                        ti += 1
+                    # Adjust by -1 in x, then oi and oj in y and z
+                    get_spos(vc0, -1, i + oi, j + oj, 0, spos)
+                    adj_node = _find_node(trunk, spos)
+                    vc1 = vcs[adj_node.node_ind]
+                    if examined[adj_node.node_ind] == 0 and \
+                       spos_contained(vc1, spos):
+                        # This is outside our VC, as 0 is a boundary layer
+                        index = vc_index(vc0, 0, i, j)
+                        c1 = (<np.int64_t*>vc0.data[0])[index]
+                        index = vc_pos_index(vc1, spos)
+                        c2 = (<np.int64_t*>vc1.data[0])[index]
+                        if c1 > -1 and c2 > -1:
+                            joins[ti,0] = i64max(c1,c2)
+                            joins[ti,1] = i64min(c1,c2)
+                            ti += 1
+                    # This is outside our vc
+                    get_spos(vc0, nx, i + oi, j + oj, 0, spos)
+                    adj_node = _find_node(trunk, spos)
+                    vc1 = vcs[adj_node.node_ind]
+                    if examined[adj_node.node_ind] == 0 and \
+                       spos_contained(vc1, spos):
+                        # This is outside our VC, as 0 is a boundary layer
+                        index = vc_index(vc0, nx - 1, i, j)
+                        c1 = (<np.int64_t*>vc0.data[0])[index]
+                        index = vc_pos_index(vc1, spos)
+                        c2 = (<np.int64_t*>vc1.data[0])[index]
+                        if c1 > -1 and c2 > -1:
+                            joins[ti,0] = i64max(c1,c2)
+                            joins[ti,1] = i64min(c1,c2)
+                            ti += 1
     # Now y-pass
     for i in range(nx):
         for j in range(nz):
@@ -119,43 +447,75 @@
                 if i == nx - 1 and oi == 1: continue
                 for offset_j in range(3):
                     oj = offset_j - 1
-                    if j == 0 and oj == -1: continue
-                    if j == nz - 1 and oj == 1: continue
-                    c1 = contour_ids[i, 0, j]
-                    c2 = contour_ids[i + oi, 1, j + oj]
-                    if c1 > -1 and c2 > -1:
-                        tree[ti,0] = i64max(c1,c2)
-                        tree[ti,1] = i64min(c1,c2)
-                        ti += 1
-                    c1 = contour_ids[i, ny-1, j]
-                    c2 = contour_ids[i + oi, ny-2, j + oj]
-                    if c1 > -1 and c2 > -1:
-                        tree[ti,0] = i64max(c1,c2)
-                        tree[ti,1] = i64min(c1,c2)
-                        ti += 1
+                    get_spos(vc0, i + oi, -1, j + oj, 1, spos)
+                    adj_node = _find_node(trunk, spos)
+                    vc1 = vcs[adj_node.node_ind]
+                    if examined[adj_node.node_ind] == 0 and \
+                       spos_contained(vc1, spos):
+                        # This is outside our VC, as 0 is a boundary layer
+                        index = vc_index(vc0, i, 0, j)
+                        c1 = (<np.int64_t*>vc0.data[0])[index]
+                        index = vc_pos_index(vc1, spos)
+                        c2 = (<np.int64_t*>vc1.data[0])[index]
+                        if c1 > -1 and c2 > -1:
+                            joins[ti,0] = i64max(c1,c2)
+                            joins[ti,1] = i64min(c1,c2)
+                            ti += 1
+
+                    get_spos(vc0, i + oi, ny, j + oj, 1, spos)
+                    adj_node = _find_node(trunk, spos)
+                    vc1 = vcs[adj_node.node_ind]
+                    if examined[adj_node.node_ind] == 0 and \
+                       spos_contained(vc1, spos):
+                        # This is outside our VC, as 0 is a boundary layer
+                        index = vc_index(vc0, i, ny - 1, j)
+                        c1 = (<np.int64_t*>vc0.data[0])[index]
+                        index = vc_pos_index(vc1, spos)
+                        c2 = (<np.int64_t*>vc1.data[0])[index]
+                        if c1 > -1 and c2 > -1:
+                            joins[ti,0] = i64max(c1,c2)
+                            joins[ti,1] = i64min(c1,c2)
+                            ti += 1
+
+    # Now z-pass
     for i in range(nx):
         for j in range(ny):
             for offset_i in range(3):
                 oi = offset_i - 1
-                if i == 0 and oi == -1: continue
-                if i == nx - 1 and oi == 1: continue
                 for offset_j in range(3):
                     oj = offset_j - 1
-                    if j == 0 and oj == -1: continue
-                    if j == ny - 1 and oj == 1: continue
-                    c1 = contour_ids[i, j, 0]
-                    c2 = contour_ids[i + oi, j + oj, 1]
-                    if c1 > -1 and c2 > -1:
-                        tree[ti,0] = i64max(c1,c2)
-                        tree[ti,1] = i64min(c1,c2)
-                        ti += 1
-                    c1 = contour_ids[i, j, nz-1]
-                    c2 = contour_ids[i + oi, j + oj, nz-2]
-                    if c1 > -1 and c2 > -1:
-                        tree[ti,0] = i64max(c1,c2)
-                        tree[ti,1] = i64min(c1,c2)
-                        ti += 1
-    return tree[:ti,:]
+                    get_spos(vc0, i + oi,  j + oj, -1, 2, spos)
+                    adj_node = _find_node(trunk, spos)
+                    vc1 = vcs[adj_node.node_ind]
+                    if examined[adj_node.node_ind] == 0 and \
+                       spos_contained(vc1, spos):
+                        # This is outside our VC, as 0 is a boundary layer
+                        index = vc_index(vc0, i, j, 0)
+                        c1 = (<np.int64_t*>vc0.data[0])[index]
+                        index = vc_pos_index(vc1, spos)
+                        c2 = (<np.int64_t*>vc1.data[0])[index]
+                        if c1 > -1 and c2 > -1:
+                            joins[ti,0] = i64max(c1,c2)
+                            joins[ti,1] = i64min(c1,c2)
+                            ti += 1
+
+                    get_spos(vc0, i + oi, j + oj, nz, 2, spos)
+                    adj_node = _find_node(trunk, spos)
+                    vc1 = vcs[adj_node.node_ind]
+                    if examined[adj_node.node_ind] == 0 and \
+                       spos_contained(vc1, spos):
+                        # This is outside our VC, as 0 is a boundary layer
+                        index = vc_index(vc0, i, j, nz - 1)
+                        c1 = (<np.int64_t*>vc0.data[0])[index]
+                        index = vc_pos_index(vc1, spos)
+                        c2 = (<np.int64_t*>vc1.data[0])[index]
+                        if c1 > -1 and c2 > -1:
+                            joins[ti,0] = i64max(c1,c2)
+                            joins[ti,1] = i64min(c1,c2)
+                            ti += 1
+    if ti == 0: return
+    new_joins = tree.cull_joins(joins[:ti,:])
+    tree.add_joins(new_joins)
 
 cdef inline int are_neighbors(
             np.float64_t x1, np.float64_t y1, np.float64_t z1,
@@ -228,16 +588,23 @@
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def update_joins(joins, np.ndarray[np.int64_t, ndim=1] contour_ids):
-    cdef np.int64_t new, old, i, oi
-    cdef int n, on
-    cdef np.ndarray[np.int64_t, ndim=1] old_set
-    #print contour_ids.shape[0]
-    n = contour_ids.shape[0]
-    for new, old_set in joins:
-        #print new
-        on = old_set.shape[0]
-        for i in range(n):
-            for oi in range(on):
-                old = old_set[oi]
-                if contour_ids[i] == old: contour_ids[i] = new
+def update_joins(np.ndarray[np.int64_t, ndim=2] joins,
+                 np.ndarray[np.int64_t, ndim=3] contour_ids,
+                 np.ndarray[np.int64_t, ndim=1] final_joins):
+    cdef np.int64_t new, old
+    cdef int i, j, nj, nf
+    cdef int ci, cj, ck
+    nj = joins.shape[0]
+    nf = final_joins.shape[0]
+    for ci in range(contour_ids.shape[0]):
+        for cj in range(contour_ids.shape[1]):
+            for ck in range(contour_ids.shape[2]):
+                if contour_ids[ci,cj,ck] == -1: continue
+                for j in range(nj):
+                    if contour_ids[ci,cj,ck] == joins[j,0]:
+                        contour_ids[ci,cj,ck] = joins[j,1]
+                        break
+                for j in range(nf):
+                    if contour_ids[ci,cj,ck] == final_joins[j]:
+                        contour_ids[ci,cj,ck] = j + 1
+                        break

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/utilities/lib/amr_kdtools.pxd
--- /dev/null
+++ b/yt/utilities/lib/amr_kdtools.pxd
@@ -0,0 +1,39 @@
+"""
+AMR kD-Tree Cython Tools
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+cimport numpy as np
+
+cdef struct Split:
+    int dim
+    np.float64_t pos
+
+cdef class Node
+
+cdef class Node:
+    cdef public Node left
+    cdef public Node right
+    cdef public Node parent
+    cdef public int grid
+    cdef public np.int64_t node_id
+    cdef public np.int64_t node_ind
+    cdef np.float64_t left_edge[3]
+    cdef np.float64_t right_edge[3]
+    cdef public data
+    cdef Split * split
+    cdef int level
+
+cdef int point_in_node(Node node, np.ndarray[np.float64_t, ndim=1] point)
+cdef Node _find_node(Node node, np.float64_t *point)
+cdef int _kd_is_leaf(Node node)

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -25,25 +25,11 @@
 
 DEF Nch = 4
 
-cdef struct Split:
-    int dim
-    np.float64_t pos
-
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
 cdef class Node:
 
-    cdef public Node left
-    cdef public Node right
-    cdef public Node parent
-    cdef public int grid
-    cdef public np.int64_t node_id
-    cdef np.float64_t left_edge[3]
-    cdef np.float64_t right_edge[3]
-    cdef public data
-    cdef Split * split
-
     def __cinit__(self, 
                   Node parent, 
                   Node left, 
@@ -152,11 +138,11 @@
 def kd_traverse(Node trunk, viewpoint=None):
     if viewpoint is None:
         for node in depth_traverse(trunk):
-            if kd_is_leaf(node) and node.grid != -1:
+            if _kd_is_leaf(node) == 1 and node.grid != -1:
                 yield node
     else:
         for node in viewpoint_traverse(trunk, viewpoint):
-            if kd_is_leaf(node) and node.grid != -1:
+            if _kd_is_leaf(node) == 1 and node.grid != -1:
                 yield node
 
 @cython.boundscheck(False)
@@ -172,7 +158,7 @@
     if not should_i_build(node, rank, size):
         return
 
-    if kd_is_leaf(node):
+    if _kd_is_leaf(node) == 1:
         insert_grid(node, gle, gre, gid, rank, size)
     else:
         less_id = gle[node.split.dim] < node.split.pos
@@ -295,7 +281,7 @@
     if not should_i_build(node, rank, size):
         return
 
-    if kd_is_leaf(node):
+    if _kd_is_leaf(node) == 1:
         insert_grids(node, ngrids, gles, gres, gids, rank, size)
         return
 
@@ -766,11 +752,16 @@
     assert has_l_child == has_r_child
     return has_l_child
 
+cdef int _kd_is_leaf(Node node):
+    if node.left is None or node.right is None:
+        return 1
+    return 0
+
 def step_depth(Node current, Node previous):
     '''
     Takes a single step in the depth-first traversal
     '''
-    if kd_is_leaf(current): # At a leaf, move back up
+    if _kd_is_leaf(current) == 1: # At a leaf, move back up
         previous = current
         current = current.parent
 
@@ -862,7 +853,7 @@
     Takes a single step in the viewpoint based traversal.  Always
     goes to the node furthest away from viewpoint first.
     '''
-    if kd_is_leaf(current): # At a leaf, move back up
+    if _kd_is_leaf(current) == 1: # At a leaf, move back up
         previous = current
         current = current.parent
     elif current.split.dim is None: # This is a dead node
@@ -913,6 +904,13 @@
         inside *= node.right_edge[i] > point[i]
     return inside
 
+cdef Node _find_node(Node node, np.float64_t *point):
+    while _kd_is_leaf(node) == 0:
+        if point[node.split.dim] < node.split.pos:
+            node = node.left
+        else:
+            node = node.right
+    return node
 
 def find_node(Node node,
               np.ndarray[np.float64_t, ndim=1] point):
@@ -920,12 +918,5 @@
     Find the AMRKDTree node enclosing a position
     """
     assert(point_in_node(node, point))
-    while not kd_is_leaf(node):
-        if point[node.split.dim] < node.split.pos:
-            node = node.left
-        else:
-            node = node.right
-    return node
+    return _find_node(node, <np.float64_t *> point.data)
 
-
-

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -17,6 +17,7 @@
 import numpy as np
 cimport numpy as np
 cimport cython
+cimport kdtree_utils
 
 cdef struct VolumeContainer:
     int n_fields
@@ -29,6 +30,20 @@
     np.float64_t idds[3]
     int dims[3]
 
+cdef class PartitionedGrid:
+    cdef public object my_data
+    cdef public object source_mask 
+    cdef public object LeftEdge
+    cdef public object RightEdge
+    cdef public int parent_grid_id
+    cdef VolumeContainer *container
+    cdef kdtree_utils.kdtree *star_list
+    cdef np.float64_t star_er
+    cdef np.float64_t star_sigma_num
+    cdef np.float64_t star_coeff
+    cdef void get_vector_field(self, np.float64_t pos[3],
+                               np.float64_t *vel, np.float64_t *vel_mag)
+
 ctypedef void sample_function(
                 VolumeContainer *vc,
                 np.float64_t v_pos[3],
@@ -45,3 +60,12 @@
                      void *data,
                      np.float64_t *return_t = *,
                      np.float64_t enter_t = *) nogil
+
+cdef inline int vc_index(VolumeContainer *vc, int i, int j, int k):
+    return (i*vc.dims[1]+j)*vc.dims[2]+k
+
+cdef inline int vc_pos_index(VolumeContainer *vc, np.float64_t *spos):
+    cdef int i, index[3]
+    for i in range(3):
+        index[i] = <int> ((spos[i] - vc.left_edge[i]) * vc.idds[i])
+    return vc_index(vc, index[0], index[1], index[2])

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -16,7 +16,6 @@
 import numpy as np
 cimport numpy as np
 cimport cython
-cimport kdtree_utils
 #cimport healpix_interface
 from libc.stdlib cimport malloc, free, abs
 from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
@@ -58,16 +57,6 @@
                 void *data) nogil
 
 cdef class PartitionedGrid:
-    cdef public object my_data
-    cdef public object source_mask 
-    cdef public object LeftEdge
-    cdef public object RightEdge
-    cdef public int parent_grid_id
-    cdef VolumeContainer *container
-    cdef kdtree_utils.kdtree *star_list
-    cdef np.float64_t star_er
-    cdef np.float64_t star_sigma_num
-    cdef np.float64_t star_coeff
 
     @cython.boundscheck(False)
     @cython.wraparound(False)

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/utilities/lib/mesh_utilities.pyx
--- a/yt/utilities/lib/mesh_utilities.pyx
+++ b/yt/utilities/lib/mesh_utilities.pyx
@@ -76,7 +76,7 @@
 def smallest_fwidth(np.ndarray[np.float64_t, ndim=2] coords,
                     np.ndarray[np.int64_t, ndim=2] indices,
                     int offset = 0):
-    cdef np.float64_t fwidth = 1e60
+    cdef np.float64_t fwidth = 1e60, pos
     cdef int nc = indices.shape[0]
     cdef int nv = indices.shape[1]
     if nv != 8:

diff -r 70f7a5bcf40402958fb956f9d19f59f5e1d4293c -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -65,7 +65,9 @@
                 ["yt/utilities/lib/ContourFinding.pyx",
                  "yt/utilities/lib/union_find.c"],
                 include_dirs=["yt/utilities/lib/"],
-                libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+                libraries=["m"],
+                depends=["yt/utilities/lib/fp_utils.pxd",
+                         "yt/utilities/lib/amr_kdtools.pxd"])
     config.add_extension("DepthFirstOctree", 
                 ["yt/utilities/lib/DepthFirstOctree.pyx"],
                 libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])


https://bitbucket.org/yt_analysis/yt/commits/5478735e553f/
Changeset:   5478735e553f
Branch:      yt-3.0
User:        jzuhone
Date:        2013-12-03 05:56:16
Summary:     Tidying up fits_image and making sure the S-Z module uses it.
Affected #:  2 files

diff -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae -r 5478735e553f939c5a1c496f855432050f2b6a4a yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -19,11 +19,11 @@
 #-----------------------------------------------------------------------------
 
 from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
+from yt.utilities.fits_image import FITSImageBuffer
 from yt.data_objects.image_array import ImageArray
 from yt.data_objects.field_info_container import add_field
 from yt.funcs import fix_axis, mylog, iterable, get_pbar
 from yt.utilities.definitions import inv_axis_names
-from yt.visualization.image_writer import write_fits, write_projection
 from yt.visualization.volume_rendering.camera import off_axis_projection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only
@@ -272,32 +272,52 @@
         self.data["TeSZ"] = ImageArray(Te)
 
     @parallel_root_only
-    def write_fits(self, filename, clobber=True):
+    def write_fits(self, filename, sky_center=None, sky_scale=None, clobber=True):
         r""" Export images to a FITS file. Writes the SZ distortion in all
         specified frequencies as well as the mass-weighted temperature and the
-        optical depth. Distance units are in kpc.
+        optical depth. Distance units are in kpc, unless *sky_center*
+        and *scale* are specified. 
 
         Parameters
         ----------
         filename : string
             The name of the FITS file to be written. 
+        sky_center : tuple of floats, optional
+            The center of the observation in (RA, Dec) in degrees. Only used if
+            converting to sky coordinates.          
+        sky_scale : float, optional
+            Scale between degrees and kpc. Only used if
+            converting to sky coordinates.
         clobber : boolean, optional
             If the file already exists, do we overwrite?
 
         Examples
         --------
+        >>> # This example just writes out a FITS file with kpc coords
         >>> szprj.write_fits("SZbullet.fits", clobber=False)
+        >>> # This example uses sky coords
+        >>> sky_scale = 1./3600. # One arcsec per kpc
+        >>> sky_center = (30., 45.) # In degrees
+        >>> szprj.write_fits("SZbullet.fits", sky_center=sky_center, sky_scale=sky_scale)
         """
-        coords = {}
-        coords["dx"] = self.dx*self.pf.units["kpc"]
-        coords["dy"] = self.dy*self.pf.units["kpc"]
-        coords["xctr"] = 0.0
-        coords["yctr"] = 0.0
-        coords["units"] = "kpc"
-        other_keys = {"Time" : self.pf.current_time}
-        write_fits(self.data, filename, clobber=clobber, coords=coords,
-                   other_keys=other_keys)
 
+        deltas = np.array([self.dx*self.pf.units["kpc"],
+                           self.dy*self.pf.units["kpc"]])
+
+        if sky_center is None:
+            center = [0.0]*2
+            units = "kpc"
+        else:
+            center = sky_center
+            units = "deg"
+            deltas *= sky_scale
+            
+        fib = FITSImageBuffer(self.data, fields=self.data.keys(),
+                              center=center, units=units,
+                              scale=deltas)
+        fib.update_all_headers("Time", self.pf.current_time)
+        fib.writeto(filename, clobber=clobber)
+        
     @parallel_root_only
     def write_png(self, filename_prefix, cmap_name="algae",
                   log_fields=None):

diff -r a662b5a623b6867921ec3dc7774b86a6e9dc89ae -r 5478735e553f939c5a1c496f855432050f2b6a4a yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -19,37 +19,38 @@
     from astropy.io.fits import HDUList, ImageHDU
     from astropy import wcs as pywcs
 except ImportError:
-    ImportError("You need to have AstroPy installed.")
+    pass
 
 class FITSImageBuffer(HDUList):
 
-    def __init__(self, data, fields=None, center=None, units="cm", wcs=None, D_A=None):
+    def __init__(self, data, fields=None, units="cm",
+                 center=None, scale=None):
         r""" Initialize a FITSImageBuffer object.
 
         FITSImageBuffer contains a list of FITS ImageHDU instances, and optionally includes
         WCS information. It inherits from HDUList, so operations such as `writeto` are
         enabled. Images can be constructed from ImageArrays, NumPy arrays, dicts of such
-        arrays, or FixedResolutionBuffers. The latter is the most powerful because WCS
-        coordinate information can be constructed from the buffer's coordinates. 
+        arrays, FixedResolutionBuffers, and YTCoveringGrids. The latter
+        two are the most powerful because WCS information can be constructed from their coordinates.
 
         Parameters
         ----------
-        data : FixedResolutionBuffer, or ImageArray, numpy.ndarray, or dict of such arrays
+        data : FixedResolutionBuffer or a YTCoveringGrid. Or, an
+            ImageArray, an numpy.ndarray, or dict of such arrays
             The data to be made into a FITS image or images.
         fields : single string or list of strings, optional
             The field names for the data. If *fields* is none and *data* has keys,
             it will use these for the fields. If *data* is just a single array one field name
             must be specified.
+        units : string
+            The units of the WCS coordinates, default "cm". 
         center : array_like, optional
-            The coordinates [xctr,yctr] of the images in units *units*. If *wcs* is set
-            this is ignored.
-        units : string, optional
-            The units of the WCS coordinates. If *wcs* is set this is ignored.
-        wcs : astropy.wcs object, optional
-            A WCS object. If set, it overrides the other coordinate-related keyword arguments.
-        D_A : float or (value, unit) pair, optional
-            Angular diameter distance. Only used when constructing WCS information
-            for a FixedResolutionBuffer and *units* == "deg". 
+            The coordinates [xctr,yctr] of the images in units
+            *units*. If *units* is not specified, defaults to the origin. 
+        scale : tuple of floats, optional
+            Pixel scale in unit *units*. Will be ignored if *data* is
+            a FixedResolutionBuffer or a YTCoveringGrid. Must be
+            specified otherwise, or if *units* is "deg".
 
         Examples
         --------
@@ -57,21 +58,21 @@
         >>> ds = load("sloshing_nomag2_hdf5_plt_cnt_0150")
         >>> prj = ds.h.proj(2, "TempkeV", weight_field="Density")
         >>> frb = prj.to_frb((0.5, "mpc"), 800)
+        >>> # This example just uses the FRB and puts the coords in kpc.
         >>> f_kpc = FITSImageBuffer(frb, fields="TempkeV", units="kpc")
-        >>> D_A = 300.0 # in Mpc
+        >>> # This example specifies sky coordinates.
+        >>> scale = [1./3600.]*2 # One arcsec per pixel
         >>> f_deg = FITSImageBuffer(frb, fields="TempkeV", units="deg",
-                                    D_A=(D_A, "mpc"), center=(30., 45.))
+                                    scale=scale, center=(30., 45.))
         >>> f_deg.writeto("temp.fits")
         """
         
-        HDUList.__init__(self)
+        super(HDUList, self).__init__()
 
         if isinstance(fields, basestring): fields = [fields]
             
         exclude_fields = ['x','y','z','px','py','pz',
                           'pdx','pdy','pdz','weight_field']
-
-        if center is not None: center = np.array(center)
         
         if hasattr(data, 'keys'):
             img_data = data
@@ -86,11 +87,17 @@
         if len(fields) == 0:
             mylog.error("Please specify one or more fields to write.")
             raise KeyError
-        
+
+        first = False
+    
         for key in fields:
             if key not in exclude_fields:
                 mylog.info("Making a FITS image of field %s" % (key))
-                hdu = ImageHDU(np.array(img_data[key]).transpose(), name=key)
+                if first:
+                    hdu = PrimaryHDU(np.array(img_data[key]))
+                    hdu.name = key
+                else:
+                    hdu = ImageHDU(np.array(img_data[key]), name=key)
                 self.append(hdu)
 
         self.dimensionality = len(self[0].data.shape)
@@ -100,46 +107,40 @@
         elif self.dimensionality == 3:
             self.nx, self.ny, self.nz = self[0].data.shape
 
-        self.axes = [None]*self.dimensionality
+        has_coords = (isinstance(img_data, FixedResolutionBuffer) or
+                      isinstance(img_data, YTCoveringGridBase))
+        
+        if center is None:
+            if units == "deg":
+                mylog.error("Please specify center=(RA, Dec) in degrees.")
+                raise ValueError
+            elif not has_coords:
+                mylog.warning("Setting center to the origin.")
+                center = [0.0]*self.dimensionality
 
-        if wcs is not None:
-            w = wcs
-        elif isinstance(img_data, FixedResolutionBuffer):
+        if scale is None:
+            if units == "deg" or not has_coords:
+                mylog.error("Please specify scale=(dx,dy[,dz]) in %s." % (units))
+                raise ValueError
+
+        w = pywcs.WCS(header=self[0].header, naxis=self.dimensionality)
+        w.wcs.crpix = 0.5*(np.array(self.shape)+1)
+
+        proj_type = ["linear"]*self.dimensionality
+
+        if isinstance(img_data, FixedResolutionBuffer) and units != "deg":
             # FRBs are a special case where we have coordinate
             # information, so we take advantage of this and
             # construct the WCS object
             dx = (img_data.bounds[1]-img_data.bounds[0])/self.nx
             dy = (img_data.bounds[3]-img_data.bounds[2])/self.ny
-            if units == "deg":
-                if center == None:
-                    mylog.error("Please specify center=(RA, Dec)")
-                    raise ValueError
-                if D_A == None:
-                    mylog.error("Please specify D_A.")
-                    raise ValueError
-                if iterable(D_A):
-                    D_A = D_A[0]/img_data.pf.units[D_A[1]]
-                dx /= -D_A
-                dy /= D_A
-                dx = np.rad2deg(dx)
-                dy = np.rad2deg(dy)
-                xctr = center[0]
-                yctr = center[1]
-                proj_type = ["RA---TAN","DEC--TAN"]
-            else:
-                dx *= img_data.pf.units[units]
-                dy *= img_data.pf.units[units]
-                xctr = 0.5*(img_data.bounds[1]+img_data.bounds[0])
-                yctr = 0.5*(img_data.bounds[3]+img_data.bounds[2])
-                xctr *= img_data.pf.units[units]
-                yctr *= img_data.pf.units[units]
-                proj_type = ["linear"]*2
-            w = pywcs.WCS(header=self[0].header, naxis=2)
-            w.wcs.crpix = [0.5*(self.nx+1), 0.5*(self.ny+1)]
-            w.wcs.cdelt = [dx,dy]
-            w.wcs.crval = [xctr,yctr]
-            w.wcs.ctype = proj_type
-            w.wcs.cunit = [units]*2
+            dx *= img_data.pf.units[units]
+            dy *= img_data.pf.units[units]
+            xctr = 0.5*(img_data.bounds[1]+img_data.bounds[0])
+            yctr = 0.5*(img_data.bounds[3]+img_data.bounds[2])
+            xctr *= img_data.pf.units[units]
+            yctr *= img_data.pf.units[units]
+            center = [xctr, yctr]
         elif isinstance(img_data, YTCoveringGridBase):
             dx, dy, dz = img_data.dds
             dx *= img_data.pf.units[units]
@@ -147,21 +148,27 @@
             dz *= img_data.pf.units[units]
             center = 0.5*(img_data.left_edge+img_data.right_edge)
             center *= img_data.pf.units[units]
-            w = pywcs.WCS(header=self[0].header, naxis=3)
-            w.wcs.crpix = [0.5*(self.nx+1), 0.5*(self.ny+1), 0.5*(self.ny+1)]
+        elif units == "deg" and self.dimensionality == 2:
+            dx = -scale[0]
+            dy = scale[1]
+            proj_type = ["RA---TAN","DEC--TAN"]
+        else:
+            dx = scale[0]
+            dy = scale[1]
+            if self.dimensionality == 3: dz = scale[2]
+            
+        w.wcs.crval = center
+        w.wcs.cunit = [units]*self.dimensionality
+        w.wcs.ctype = proj_type
+        
+        if self.dimensionality == 2:
+            w.wcs.cdelt = [dx,dy]
+        elif self.dimensionality == 3:
             w.wcs.cdelt = [dx,dy,dz]
-            w.wcs.crval = center
-            w.wcs.ctype = ["linear"]*3
-            w.wcs.cunit = [units]*3
-        else:
-            w = None
 
-        if w is None:
-            self.wcs = None
-        else:
-            self.set_wcs(w)
+        self._set_wcs(w)
             
-    def set_wcs(self, wcs):
+    def _set_wcs(self, wcs):
         """
         Set the WCS coordinate information for all images
         with a WCS object *wcs*.
@@ -202,7 +209,10 @@
             new_buffer[im2.name] = im2.data
         new_wcs = self.wcs
         return FITSImageBuffer(new_buffer, wcs=new_wcs)
-    
+
+    def writeto(self, fileobj, **kwargs):
+        HDUList(self).writeto(fileobj, **kwargs)
+        
     @property
     def shape(self):
         if self.dimensionality == 2:
@@ -210,4 +220,23 @@
         elif self.dimensionality == 3:
             return self.nx, self.ny, self.nz
 
+    def to_glue(self, label="yt"):
+        from glue.core import DataCollection, Data, Component
+        from glue.core.coordinates import coordinates_from_header
+        from glue.qt.glue_application import GlueApplication
+
+        field_dict = dict((key,self[key].data) for key in self.keys())
+        
+        image = Data(label=label)
+        image.coords = coordinates_from_header(self.wcs.to_header())
+        for component_name in field_dict:
+            comp = Component(field_dict[component_name])
+            image.add_component(comp, component_name)
+        dc = DataCollection([image])
+
+        app = GlueApplication(dc)
+        app.start()
+
+        
+
     


https://bitbucket.org/yt_analysis/yt/commits/0600dcadd54a/
Changeset:   0600dcadd54a
Branch:      yt-3.0
User:        jzuhone
Date:        2013-12-03 06:00:41
Summary:     Fixing up cmap names to prevent conflicts
Affected #:  1 file

diff -r ce44c24bd28d3d79f7c2d87091902edabbf00bac -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 yt/visualization/_colormap_data.py
--- a/yt/visualization/_colormap_data.py
+++ b/yt/visualization/_colormap_data.py
@@ -6,7 +6,7 @@
 
 ### RdBu ###
 
-color_map_luts['RdBu'] = \
+color_map_luts['RdBu_alt'] = \
    (
 array([ 0.40392157,  0.4154556 ,  0.42698963,  0.43852365,  0.45005768,
         0.4615917 ,  0.47312573,  0.48465976,  0.49619378,  0.50772781,
@@ -403,7 +403,7 @@
 
 ### gist_stern ###
 
-color_map_luts['gist_stern'] = \
+color_map_luts['gist_stern_alt'] = \
    (
 array([ 0.        ,  0.0716923 ,  0.14338459,  0.21507689,  0.28676919,
         0.35846148,  0.43015378,  0.50184608,  0.57353837,  0.64523067,
@@ -582,7 +582,7 @@
 
 ### hot ###
 
-color_map_luts['hot'] = \
+color_map_luts['hot_alt'] = \
    (
 array([ 0.0416    ,  0.05189484,  0.06218969,  0.07248453,  0.08277938,
         0.09307422,  0.10336906,  0.11366391,  0.12395875,  0.1342536 ,
@@ -761,7 +761,7 @@
 
 ### jet ###
 
-color_map_luts['jet'] = \
+color_map_luts['jet_alt'] = \
    (
 array([ 0.        ,  0.        ,  0.        ,  0.        ,  0.        ,
         0.        ,  0.        ,  0.        ,  0.        ,  0.        ,
@@ -7757,34 +7757,34 @@
          1.0, 1.0, 1.0, 1.0, 1.0, 1.0]),
    )
 
-color_map_luts['B-W LINEAR'] = color_map_luts['idl00']
+color_map_luts['B-W_LINEAR'] = color_map_luts['idl00']
 color_map_luts['BLUE'] = color_map_luts['idl01']
 color_map_luts['GRN-RED-BLU-WHT'] = color_map_luts['idl02']
-color_map_luts['RED TEMPERATURE'] = color_map_luts['idl03']
+color_map_luts['RED_TEMPERATURE'] = color_map_luts['idl03']
 color_map_luts['BLUE'] = color_map_luts['idl04']
-color_map_luts['STD GAMMA-II'] = color_map_luts['idl05']
+color_map_luts['STD_GAMMA-II'] = color_map_luts['idl05']
 color_map_luts['PRISM'] = color_map_luts['idl06']
 color_map_luts['RED-PURPLE'] = color_map_luts['idl07']
 color_map_luts['GREEN'] = color_map_luts['idl08']
 color_map_luts['GRN'] = color_map_luts['idl09']
 color_map_luts['GREEN-PINK'] = color_map_luts['idl10']
 color_map_luts['BLUE-RED'] = color_map_luts['idl11']
-color_map_luts['16 LEVEL'] = color_map_luts['idl12']
+color_map_luts['16_LEVEL'] = color_map_luts['idl12']
 color_map_luts['RAINBOW'] = color_map_luts['idl13']
 color_map_luts['STEPS'] = color_map_luts['idl14']
-color_map_luts['STERN SPECIAL'] = color_map_luts['idl15']
+color_map_luts['STERN_SPECIAL'] = color_map_luts['idl15']
 color_map_luts['Haze'] = color_map_luts['idl16']
-color_map_luts['Blue - Pastel - Red'] = color_map_luts['idl17']
+color_map_luts['Blue-Pastel-Red'] = color_map_luts['idl17']
 color_map_luts['Pastels'] = color_map_luts['idl18']
-color_map_luts['Hue Sat Lightness 1'] = color_map_luts['idl19']
-color_map_luts['Hue Sat Lightness 2'] = color_map_luts['idl20']
-color_map_luts['Hue Sat Value 1'] = color_map_luts['idl21']
-color_map_luts['Hue Sat Value 2'] = color_map_luts['idl22']
-color_map_luts['Purple-Red + Stripes'] = color_map_luts['idl23']
+color_map_luts['Hue_Sat_Lightness_1'] = color_map_luts['idl19']
+color_map_luts['Hue_Sat_Lightness_2'] = color_map_luts['idl20']
+color_map_luts['Hue_Sat_Value_1'] = color_map_luts['idl21']
+color_map_luts['Hue_Sat_Value_2'] = color_map_luts['idl22']
+color_map_luts['Purple-Red+Stripes'] = color_map_luts['idl23']
 color_map_luts['Beach'] = color_map_luts['idl24']
-color_map_luts['Mac Style'] = color_map_luts['idl25']
-color_map_luts['Eos A'] = color_map_luts['idl26']
-color_map_luts['Eos B'] = color_map_luts['idl27']
+color_map_luts['Mac_Style'] = color_map_luts['idl25']
+color_map_luts['Eos_A'] = color_map_luts['idl26']
+color_map_luts['Eos_B'] = color_map_luts['idl27']
 color_map_luts['Hardcandy'] = color_map_luts['idl28']
 color_map_luts['Nature'] = color_map_luts['idl29']
 color_map_luts['Ocean'] = color_map_luts['idl30']
@@ -7792,12 +7792,12 @@
 color_map_luts['Plasma'] = color_map_luts['idl32']
 color_map_luts['Blue-Red'] = color_map_luts['idl33']
 color_map_luts['Rainbow'] = color_map_luts['idl34']
-color_map_luts['Blue Waves'] = color_map_luts['idl35']
+color_map_luts['Blue_Waves'] = color_map_luts['idl35']
 color_map_luts['Volcano'] = color_map_luts['idl36']
 color_map_luts['Waves'] = color_map_luts['idl37']
 color_map_luts['Rainbow18'] = color_map_luts['idl38']
-color_map_luts['Rainbow + white'] = color_map_luts['idl39']
-color_map_luts['Rainbow + black'] = color_map_luts['idl40']
+color_map_luts['Rainbow+white'] = color_map_luts['idl39']
+color_map_luts['Rainbow+black'] = color_map_luts['idl40']
 
 # Create a reversed LUT for each of the above defined LUTs
 # and append a "_r" (for reversal. consistent with MPL convention).


https://bitbucket.org/yt_analysis/yt/commits/844bffed131e/
Changeset:   844bffed131e
Branch:      yt-3.0
User:        jzuhone
Date:        2013-12-03 06:00:54
Summary:     Merging
Affected #:  24 files

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/analysis_modules/level_sets/api.py
--- a/yt/analysis_modules/level_sets/api.py
+++ b/yt/analysis_modules/level_sets/api.py
@@ -14,7 +14,6 @@
 #-----------------------------------------------------------------------------
 
 from .contour_finder import \
-    coalesce_join_tree, \
     identify_contours
 
 from .clump_handling import \

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -107,10 +107,11 @@
             print "Wiping out existing children clumps."
         self.children = []
         if max_val is None: max_val = self.max_val
-        contour_info = identify_contours(self.data, self.field, min_val, max_val,
-                                         self.cached_fields)
-        for cid in contour_info:
-            new_clump = self.data.extract_region(contour_info[cid])
+        nj, cids = identify_contours(self.data, self.field, min_val, max_val)
+        for cid in range(nj):
+            new_clump = self.data.cut_region(
+                    ["obj['Contours'] == %s" % (cid + 1)],
+                    {'contour_slices': cids})
             self.children.append(Clump(new_clump, self, self.field,
                                        self.cached_fields,function=self.function,
                                        clump_info=self.clump_info))

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -20,120 +20,52 @@
 import yt.utilities.data_point_utilities as data_point_utilities
 import yt.utilities.lib as amr_utils
 
-def coalesce_join_tree(jtree1):
-    joins = defaultdict(set)
-    nj = jtree1.shape[0]
-    for i1 in range(nj):
-        current_new = jtree1[i1, 0]
-        current_old = jtree1[i1, 1]
-        for i2 in range(nj):
-            if jtree1[i2, 1] == current_new:
-                current_new = max(current_new, jtree1[i2, 0])
-        jtree1[i1, 0] = current_new
-    for i1 in range(nj):
-        joins[jtree1[i1, 0]].update([jtree1[i1, 1], jtree1[i1, 0]])
-    updated = -1
-    while updated != 0:
-        keys = list(reversed(sorted(joins.keys())))
-        updated = 0
-        for k1 in keys + keys[::-1]:
-            if k1 not in joins: continue
-            s1 = joins[k1]
-            for k2 in keys + keys[::-1]:
-                if k2 >= k1: continue
-                if k2 not in joins: continue
-                s2 = joins[k2]
-                if k2 in s1:
-                    s1.update(joins.pop(k2))
-                    updated += 1
-                elif not s1.isdisjoint(s2):
-                    s1.update(joins.pop(k2))
-                    s1.update([k2])
-                    updated += 1
-    tr = []
-    for k in joins.keys():
-        v = joins.pop(k)
-        tr.append((k, np.array(list(v), dtype="int64")))
-    return tr
-
 def identify_contours(data_source, field, min_val, max_val,
                           cached_fields=None):
-    cur_max_id = np.sum([g.ActiveDimensions.prod() for g in data_source._grids])
-    pbar = get_pbar("First pass", len(data_source._grids))
-    grids = sorted(data_source._grids, key=lambda g: -g.Level)
+    tree = amr_utils.ContourTree()
+    gct = amr_utils.TileContourTree(min_val, max_val)
     total_contours = 0
-    tree = []
-    for gi,grid in enumerate(grids):
-        pbar.update(gi+1)
-        cm = data_source._get_cut_mask(grid)
-        if cm is True: cm = np.ones(grid.ActiveDimensions, dtype='bool')
-        old_field_parameters = grid.field_parameters
-        grid.field_parameters = data_source.field_parameters
-        local_ind = np.where( (grid[field] > min_val)
-                            & (grid[field] < max_val) & cm )
-        grid.field_parameters = old_field_parameters
-        if local_ind[0].size == 0: continue
-        kk = np.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
-        grid["tempContours"] = np.ones(grid.ActiveDimensions, dtype='int64') * -1
-        grid["tempContours"][local_ind] = kk[:]
-        cur_max_id -= local_ind[0].size
-        xi_u,yi_u,zi_u = np.where(grid["tempContours"] > -1)
-        cor_order = np.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
-        fd_orig = grid["tempContours"].copy()
-        xi = xi_u[cor_order]
-        yi = yi_u[cor_order]
-        zi = zi_u[cor_order]
-        while data_point_utilities.FindContours(grid["tempContours"], xi, yi, zi) < 0:
-            pass
-        total_contours += np.unique(grid["tempContours"][grid["tempContours"] > -1]).size
-        new_contours = np.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
-        tree += zip(new_contours, new_contours)
-    tree = set(tree)
+    contours = {}
+    empty_mask = np.ones((1,1,1), dtype="uint8")
+    node_ids = []
+    for (g, node, (sl, dims, gi)) in data_source.tiles.slice_traverse():
+        node.node_ind = len(node_ids)
+        nid = node.node_id
+        node_ids.append(nid)
+        values = g[field][sl].astype("float64")
+        contour_ids = np.zeros(dims, "int64") - 1
+        gct.identify_contours(values, contour_ids, total_contours)
+        new_contours = tree.cull_candidates(contour_ids)
+        total_contours += new_contours.shape[0]
+        tree.add_contours(new_contours)
+        # Now we can create a partitioned grid with the contours.
+        pg = amr_utils.PartitionedGrid(g.id,
+            [contour_ids.view("float64")],
+            empty_mask, g.dds * gi, g.dds * (gi + dims),
+            dims.astype("int64"))
+        contours[nid] = (g.Level, node.node_ind, pg, sl)
+    node_ids = np.array(node_ids)
+    trunk = data_source.tiles.tree.trunk
+    mylog.info("Linking node (%s) contours.", len(contours))
+    amr_utils.link_node_contours(trunk, contours, tree, node_ids)
+    mylog.info("Linked.")
+    #joins = tree.cull_joins(bt)
+    #tree.add_joins(joins)
+    joins = tree.export()
+    contour_ids = defaultdict(list)
+    pbar = get_pbar("Updating joins ... ", len(contours))
+    final_joins = np.unique(joins[:,1])
+    for i, nid in enumerate(sorted(contours)):
+        level, node_ind, pg, sl = contours[nid]
+        ff = pg.my_data[0].view("int64")
+        amr_utils.update_joins(joins, ff, final_joins)
+        contour_ids[pg.parent_grid_id].append((sl, ff))
+        pbar.update(i)
     pbar.finish()
-    pbar = get_pbar("Calculating joins ", len(data_source._grids))
-    grid_set = set()
-    for gi,grid in enumerate(grids):
-        pbar.update(gi)
-        cg = grid.retrieve_ghost_zones(1, "tempContours", smoothed=False)
-        grid_set.update(set(cg._grids))
-        fd = cg["tempContours"].astype('int64')
-        boundary_tree = amr_utils.construct_boundary_relationships(fd)
-        tree.update(((a, b) for a, b in boundary_tree))
-    pbar.finish()
-    sort_new = np.array(list(tree), dtype='int64')
-    mylog.info("Coalescing %s joins", sort_new.shape[0])
-    joins = coalesce_join_tree(sort_new)
-    #joins = [(i, np.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
-    pbar = get_pbar("Joining ", len(joins))
-    # This process could and should be done faster
-    print "Joining..."
-    t1 = time.time()
-    ff = data_source["tempContours"].astype("int64")
-    amr_utils.update_joins(joins, ff)
-    data_source["tempContours"] = ff.astype("float64")
-    #for i, new in enumerate(sorted(joins.keys())):
-    #    pbar.update(i)
-    #    old_set = joins[new]
-    #    for old in old_set:
-    #        if old == new: continue
-    #        i1 = (data_source["tempContours"] == old)
-    #        data_source["tempContours"][i1] = new
-    t2 = time.time()
-    print "Finished joining in %0.2e seconds" % (t2-t1)
-    pbar.finish()
-    data_source._flush_data_to_grids("tempContours", -1, dtype='int64')
-    del data_source.field_data["tempContours"] # Force a reload from the grids
-    data_source.get_data("tempContours")
-    contour_ind = {}
-    i = 0
-    for contour_id in np.unique(data_source["tempContours"]):
-        if contour_id == -1: continue
-        contour_ind[i] = np.where(data_source["tempContours"] == contour_id)
-        mylog.debug("Contour id %s has %s cells", i, contour_ind[i][0].size)
-        i += 1
-    mylog.info("Identified %s contours between %0.5e and %0.5e",
-               len(contour_ind.keys()),min_val,max_val)
-    for grid in chain(grid_set):
-        grid.field_data.pop("tempContours", None)
-    del data_source.field_data["tempContours"]
-    return contour_ind
+    rv = dict()
+    rv.update(contour_ids)
+    # NOTE: Because joins can appear in both a "final join" and a subsequent
+    # "join", we can't know for sure how many unique joins there are without
+    # checking if no cells match or doing an expensive operation checking for
+    # the unique set of final join values.
+    return final_joins.size, rv

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -19,11 +19,11 @@
 #-----------------------------------------------------------------------------
 
 from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
+from yt.utilities.fits_image import FITSImageBuffer
 from yt.data_objects.image_array import ImageArray
 from yt.data_objects.field_info_container import add_field
 from yt.funcs import fix_axis, mylog, iterable, get_pbar
 from yt.utilities.definitions import inv_axis_names
-from yt.visualization.image_writer import write_fits, write_projection
 from yt.visualization.volume_rendering.camera import off_axis_projection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only
@@ -272,32 +272,52 @@
         self.data["TeSZ"] = ImageArray(Te)
 
     @parallel_root_only
-    def write_fits(self, filename, clobber=True):
+    def write_fits(self, filename, sky_center=None, sky_scale=None, clobber=True):
         r""" Export images to a FITS file. Writes the SZ distortion in all
         specified frequencies as well as the mass-weighted temperature and the
-        optical depth. Distance units are in kpc.
+        optical depth. Distance units are in kpc, unless *sky_center*
+        and *scale* are specified. 
 
         Parameters
         ----------
         filename : string
             The name of the FITS file to be written. 
+        sky_center : tuple of floats, optional
+            The center of the observation in (RA, Dec) in degrees. Only used if
+            converting to sky coordinates.          
+        sky_scale : float, optional
+            Scale between degrees and kpc. Only used if
+            converting to sky coordinates.
         clobber : boolean, optional
             If the file already exists, do we overwrite?
 
         Examples
         --------
+        >>> # This example just writes out a FITS file with kpc coords
         >>> szprj.write_fits("SZbullet.fits", clobber=False)
+        >>> # This example uses sky coords
+        >>> sky_scale = 1./3600. # One arcsec per kpc
+        >>> sky_center = (30., 45.) # In degrees
+        >>> szprj.write_fits("SZbullet.fits", sky_center=sky_center, sky_scale=sky_scale)
         """
-        coords = {}
-        coords["dx"] = self.dx*self.pf.units["kpc"]
-        coords["dy"] = self.dy*self.pf.units["kpc"]
-        coords["xctr"] = 0.0
-        coords["yctr"] = 0.0
-        coords["units"] = "kpc"
-        other_keys = {"Time" : self.pf.current_time}
-        write_fits(self.data, filename, clobber=clobber, coords=coords,
-                   other_keys=other_keys)
 
+        deltas = np.array([self.dx*self.pf.units["kpc"],
+                           self.dy*self.pf.units["kpc"]])
+
+        if sky_center is None:
+            center = [0.0]*2
+            units = "kpc"
+        else:
+            center = sky_center
+            units = "deg"
+            deltas *= sky_scale
+            
+        fib = FITSImageBuffer(self.data, fields=self.data.keys(),
+                              center=center, units=units,
+                              scale=deltas)
+        fib.update_all_headers("Time", self.pf.current_time)
+        fib.writeto(filename, clobber=clobber)
+        
     @parallel_root_only
     def write_png(self, filename_prefix, cmap_name="algae",
                   log_fields=None):

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -167,12 +167,12 @@
 
     Parameters
     ----------
-    axis : int
-        The axis along which to slice.  Can be 0, 1, or 2 for x, y, z.
     field : string
         This is the field which will be "projected" along the axis.  If
         multiple are specified (in a list) they will all be projected in
         the first pass.
+    axis : int
+        The axis along which to slice.  Can be 0, 1, or 2 for x, y, z.
     weight_field : string
         If supplied, the field being projected will be multiplied by this
         weight value before being integrated, and at the conclusion of the
@@ -274,11 +274,12 @@
         for chunk in self.data_source.chunks([], "io"):
             self._initialize_chunk(chunk, tree)
         # This needs to be parallel_objects-ified
-        for chunk in parallel_objects(self.data_source.chunks(
-                chunk_fields, "io")): 
-            mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
-                get_memory_usage()/1024.)
-            self._handle_chunk(chunk, fields, tree)
+        with self.data_source._field_parameter_state(self.field_parameters):
+            for chunk in parallel_objects(self.data_source.chunks(
+                                         chunk_fields, "io")):
+                mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
+                    get_memory_usage()/1024.)
+                self._handle_chunk(chunk, fields, tree)
         # Note that this will briefly double RAM usage
         if self.proj_style == "mip":
             merge_style = -1
@@ -308,6 +309,7 @@
             nvals *= convs[None,:]
         # We now convert to half-widths and center-points
         data = {}
+        #non_nan = ~np.any(np.isnan(nvals), axis=-1)
         data['px'] = px
         data['py'] = py
         data['weight_field'] = nwvals
@@ -319,8 +321,9 @@
         field_data = np.hsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             mylog.debug("Setting field %s", field)
-            self[field] = field_data[fi].ravel()
-        for i in data.keys(): self[i] = data.pop(i)
+            self[field] = field_data[fi].ravel()#[non_nan]
+        for i in data.keys():
+            self[i] = data.pop(i)#[non_nan]
         mylog.info("Projection completed")
 
     def _initialize_chunk(self, chunk, tree):

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -33,6 +33,8 @@
     ParallelAnalysisInterface
 from yt.utilities.parameter_file_storage import \
     ParameterFileStore
+from yt.utilities.amr_kdtree.api import \
+    AMRKDTree
 from .derived_quantities import DerivedQuantityCollection
 from .field_info_container import \
     NeedsGridType, ValidateSpatial
@@ -365,6 +367,13 @@
         return s
 
     @contextmanager
+    def _field_parameter_state(self, field_parameters):
+        old_field_parameters = self.field_parameters
+        self.field_parameters = field_parameters
+        yield
+        self.field_parameters = old_field_parameters
+
+    @contextmanager
     def _field_type_state(self, ftype, finfo, obj = None):
         if obj is None: obj = self
         old_particle_type = obj._current_particle_type
@@ -407,6 +416,14 @@
             explicit_fields.append((ftype, fname))
         return explicit_fields
 
+    _tree = None
+
+    @property
+    def tiles(self):
+        if self._tree is not None: return self._tree
+        self._tree = AMRKDTree(self.pf, data_source=self)
+        return self._tree
+
     @property
     def blocks(self):
         for io_chunk in self.chunks([], "io"):
@@ -751,11 +768,13 @@
         self._grids = None
         self.quantities = DerivedQuantityCollection(self)
 
-    def cut_region(self, field_cuts):
+    def cut_region(self, field_cuts, field_parameters = None):
         """
-        Return an InLineExtractedRegion, where the grid cells are cut on the
-        fly with a set of field_cuts.  It is very useful for applying 
-        conditions to the fields in your data object.
+        Return an InLineExtractedRegion, where the object cells are cut on the
+        fly with a set of field_cuts.  It is very useful for applying
+        conditions to the fields in your data object.  Note that in previous
+        versions of yt, this accepted 'grid' as a variable, but presently it
+        requires 'obj'.
         
         Examples
         --------
@@ -763,19 +782,12 @@
 
         >>> pf = load("RedshiftOutput0005")
         >>> ad = pf.h.all_data()
-        >>> cr = ad.cut_region(["grid['Temperature'] > 1e6"])
+        >>> cr = ad.cut_region(["obj['Temperature'] > 1e6"])
         >>> print cr.quantities["TotalQuantity"]("CellMassMsun")
-
         """
-        return YTValueCutExtractionBase(self, field_cuts)
-
-    def extract_region(self, indices):
-        """
-        Return an ExtractedRegion where the points contained in it are defined
-        as the points in `this` data object with the given *indices*.
-        """
-        fp = self.field_parameters.copy()
-        return YTSelectedIndicesBase(self, indices, field_parameters = fp)
+        cr = self.pf.h.cut_region(self, field_cuts,
+                                  field_parameters = field_parameters)
+        return cr
 
     def extract_isocontours(self, field, value, filename = None,
                             rescale = False, sample_values = None):
@@ -966,12 +978,15 @@
                     ff, mask, grid.LeftEdge, grid.dds)
 
     def extract_connected_sets(self, field, num_levels, min_val, max_val,
-                                log_space=True, cumulative=True, cache=False):
+                               log_space=True, cumulative=True):
         """
         This function will create a set of contour objects, defined
         by having connected cell structures, which can then be
         studied and used to 'paint' their source grids, thus enabling
         them to be plotted.
+
+        Note that this function *can* return a connected set object that has no
+        member values.
         """
         if log_space:
             cons = np.logspace(np.log10(min_val),np.log10(max_val),
@@ -979,8 +994,6 @@
         else:
             cons = np.linspace(min_val, max_val, num_levels+1)
         contours = {}
-        if cache: cached_fields = defaultdict(lambda: dict())
-        else: cached_fields = None
         for level in range(num_levels):
             contours[level] = {}
             if cumulative:
@@ -988,10 +1001,11 @@
             else:
                 mv = cons[level+1]
             from yt.analysis_modules.level_sets.api import identify_contours
-            cids = identify_contours(self, field, cons[level], mv,
-                                     cached_fields)
-            for cid, cid_ind in cids.items():
-                contours[level][cid] = self.extract_region(cid_ind)
+            nj, cids = identify_contours(self, field, cons[level], mv)
+            for cid in range(nj):
+                contours[level][cid] = self.cut_region(
+                    ["obj['Contours'] == %s" % (cid + 1)],
+                    {'contour_slices': cids})
         return cons, contours
 
     def paint_grids(self, field, value, default_value=None):

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -583,3 +583,85 @@
         self.set_field_parameter('e0', e0)
         self.set_field_parameter('e1', e1)
         self.set_field_parameter('e2', e2)
+
+class YTCutRegionBase(YTSelectionContainer3D):
+    """
+    This is a data object designed to allow individuals to apply logical
+    operations to fields or particles and filter as a result of those cuts.
+
+    Parameters
+    ----------
+    base_object : YTSelectionContainer3D
+        The object to which cuts will be applied.
+    conditionals : list of strings
+        A list of conditionals that will be evaluated.  In the namespace
+        available, these conditionals will have access to 'obj' which is a data
+        object of unknown shape, and they must generate a boolean array.  For
+        instance, conditionals = ["obj['temperature'] < 1e3"]
+
+    Examples
+    --------
+
+    >>> pf = load("DD0010/moving7_0010")
+    >>> sp = pf.h.sphere("max", (1.0, 'mpc'))
+    >>> cr = pf.h.cut_region(sp, ["obj['temperature'] < 1e3"])
+    """
+    _type_name = "cut_region"
+    _con_args = ("base_object", "conditionals")
+    def __init__(self, base_object, conditionals, pf = None,
+                 field_parameters = None):
+        super(YTCutRegionBase, self).__init__(base_object.center, pf, field_parameters)
+        self.conditionals = ensure_list(conditionals)
+        self.base_object = base_object
+        self._selector = None
+        # Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,
+        # ires and get_data
+
+    @property
+    def selector(self):
+        raise NotImplementedError
+
+    def chunks(self, fields, chunking_style, **kwargs):
+        # We actually want to chunk the sub-chunk, not ourselves.  We have no
+        # chunks to speak of, as we do not data IO.
+        for chunk in self.hierarchy._chunk(self.base_object,
+                                           chunking_style,
+                                           **kwargs):
+            with self.base_object._chunked_read(chunk):
+                self.get_data(fields)
+                yield self
+
+    def get_data(self, fields = None):
+        fields = ensure_list(fields)
+        self.base_object.get_data(fields)
+        ind = self._cond_ind
+        for field in fields:
+            self.field_data[field] = self.base_object[field][ind]
+
+    @property
+    def _cond_ind(self):
+        ind = None
+        obj = self.base_object
+        with obj._field_parameter_state(self.field_parameters):
+            for cond in self.conditionals:
+                res = eval(cond)
+                if ind is None: ind = res
+                np.logical_and(res, ind, ind)
+        return ind
+
+    @property
+    def icoords(self):
+        return self.base_object.icoords[self._cond_ind,:]
+
+    @property
+    def fcoords(self):
+        return self.base_object.fcoords[self._cond_ind,:]
+
+    @property
+    def ires(self):
+        return self.base_object.ires[self._cond_ind]
+
+    @property
+    def fwidth(self):
+        return self.base_object.fwidth[self._cond_ind,:]
+

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/data_objects/tests/test_extract_regions.py
--- a/yt/data_objects/tests/test_extract_regions.py
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -6,15 +6,14 @@
 
 def test_cut_region():
     # We decompose in different ways
-    return #TESTDISABLED
     for nprocs in [1, 2, 4, 8]:
         pf = fake_random_pf(64, nprocs = nprocs,
             fields = ("Density", "Temperature", "x-velocity"))
         # We'll test two objects
         dd = pf.h.all_data()
-        r = dd.cut_region( [ "grid['Temperature'] > 0.5",
-                             "grid['Density'] < 0.75",
-                             "grid['x-velocity'] > 0.25" ])
+        r = dd.cut_region( [ "obj['Temperature'] > 0.5",
+                             "obj['Density'] < 0.75",
+                             "obj['x-velocity'] > 0.25" ])
         t = ( (dd["Temperature"] > 0.5 ) 
             & (dd["Density"] < 0.75 )
             & (dd["x-velocity"] > 0.25 ) )
@@ -23,33 +22,21 @@
         yield assert_equal, np.all(r["x-velocity"] > 0.25), True
         yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
         yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
-        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+        r2 = r.cut_region( [ "obj['Temperature'] < 0.75" ] )
         t2 = (r["Temperature"] < 0.75)
         yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
         yield assert_equal, np.all(r2["Temperature"] < 0.75), True
 
-def test_extract_region():
-    # We decompose in different ways
-    return #TESTDISABLED
-    for nprocs in [1, 2, 4, 8]:
-        pf = fake_random_pf(64, nprocs = nprocs,
-            fields = ("Density", "Temperature", "x-velocity"))
-        # We'll test two objects
+        # Now we can test some projections
         dd = pf.h.all_data()
-        t = ( (dd["Temperature"] > 0.5 ) 
-            & (dd["Density"] < 0.75 )
-            & (dd["x-velocity"] > 0.25 ) )
-        r = dd.extract_region(t)
-        yield assert_equal, np.all(r["Temperature"] > 0.5), True
-        yield assert_equal, np.all(r["Density"] < 0.75), True
-        yield assert_equal, np.all(r["x-velocity"] > 0.25), True
-        yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
-        yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
-        t2 = (r["Temperature"] < 0.75)
-        r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
-        yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
-        yield assert_equal, np.all(r2["Temperature"] < 0.75), True
-        t3 = (r["Temperature"] < 0.75)
-        r3 = r.extract_region( t3 )
-        yield assert_equal, np.sort(r3["Temperature"]), np.sort(r["Temperature"][t3])
-        yield assert_equal, np.all(r3["Temperature"] < 0.75), True
+        cr = dd.cut_region(["obj['Ones'] > 0"])
+        for weight in [None, "Density"]:
+            p1 = pf.h.proj("Density", 0, data_source=dd, weight_field=weight)
+            p2 = pf.h.proj("Density", 0, data_source=cr, weight_field=weight)
+            for f in p1.field_data:
+                yield assert_almost_equal, p1[f], p2[f]
+        cr = dd.cut_region(["obj['Density'] > 0.25"])
+        p2 = pf.h.proj("Density", 2, data_source=cr)
+        yield assert_equal, p2["Density"].max() > 0.25, True
+        p2 = pf.h.proj("Density", 2, data_source=cr, weight_field = "Density")
+        yield assert_equal, p2["Density"].max() > 0.25, True

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/fields/universal_fields.py
--- a/yt/fields/universal_fields.py
+++ b/yt/fields/universal_fields.py
@@ -582,12 +582,18 @@
           units=r"\rm{s}^{-1}")
 
 def _Contours(field, data):
-    return -np.ones_like(data["Ones"])
-add_field("Contours", validators=[ValidateSpatial(0)], take_log=False,
-          display_field=False, function=_Contours)
-add_field("tempContours", function=_Contours,
-          validators=[ValidateSpatial(0), ValidateGridType()],
-          take_log=False, display_field=False)
+    fd = data.get_field_parameter("contour_slices")
+    vals = data["Ones"] * -1
+    if fd is None or fd == 0.0:
+        return vals
+    for sl, v in fd.get(data.id, []):
+        vals[sl] = v
+    return vals
+add_field("Contours", validators=[ValidateSpatial(0)],
+          take_log=False,
+          display_field=False,
+          projection_conversion="1",
+          function=_Contours)
 
 def obtain_velocities(data):
     return obtain_rv_vec(data)

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -15,6 +15,7 @@
 
 import numpy as np
 import h5py
+from yt.utilities.math_utils import prec_accum
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -71,6 +72,7 @@
         for field in fields:
             ftype, fname = field
             dt = f["/%s" % fname].dtype
+            dt = prec_accum[dt]
             if dt == "float32": dt = "float64"
             rv[field] = np.empty(size, dtype=dt)
         ng = sum(len(c.objs) for c in chunks)

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -12,6 +12,7 @@
     config.add_subpackage("boxlib")
     config.add_subpackage("chombo")
     config.add_subpackage("enzo")
+    config.add_subpackage("fits")
     config.add_subpackage("flash")
     config.add_subpackage("gdf")
     config.add_subpackage("moab")

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -688,7 +688,7 @@
             if len(self.queue) == 0: raise StopIteration
             chunk = YTDataChunk(None, "cache", self.queue, cache=False)
             self.cache = self.geometry_handler.io._read_chunk_data(
-                chunk, self.preload_fields)
+                chunk, self.preload_fields) or {}
         g = self.queue.pop(0)
         g._initialize_cache(self.cache.pop(g.id, {}))
         return g

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -273,7 +273,9 @@
             giter = sorted(gobjs, key = -g.Level)
         elif sort is None:
             giter = gobjs
-        if self._preload_implemented and preload_fields is not None and ngz == 0:
+        if preload_fields is None: preload_fields = []
+        preload_fields, _ = self._split_fields(preload_fields)
+        if self._preload_implemented and len(preload_fields) > 0 and ngz == 0:
             giter = ChunkDataCache(list(giter), preload_fields, self)
         for i, og in enumerate(giter):
             if ngz > 0:

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -116,6 +116,9 @@
     GadgetFieldInfo, add_gadget_field, \
     TipsyStaticOutput, TipsyFieldInfo, add_tipsy_field
 
+from yt.frontends.fits.api import \
+    FITSStaticOutput, FITSFieldInfo, add_fits_field
+
 from yt.analysis_modules.list_modules import \
     get_available_modules, amods
 available_analysis_modules = get_available_modules()

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -184,6 +184,24 @@
         for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
             yield self.get_brick_data(node)
 
+    def slice_traverse(self, viewpoint = None):
+        if not hasattr(self.pf.h, "grid"):
+            raise NotImplementedError
+        for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
+            grid = self.pf.h.grids[node.grid - self._id_offset]
+            dds = grid.dds
+            gle = grid.LeftEdge
+            nle = get_left_edge(node)
+            nre = get_right_edge(node)
+            li = np.rint((nle-gle)/dds).astype('int32')
+            ri = np.rint((nre-gle)/dds).astype('int32')
+            dims = (ri - li).astype('int32')
+            sl = (slice(li[0], ri[0]),
+                  slice(li[1], ri[1]),
+                  slice(li[2], ri[2]))
+            gi = grid.get_global_startindex() + li
+            yield grid, node, (sl, dims, gi)
+
     def get_node(self, nodeid):
         path = np.binary_repr(nodeid)
         depth = 1

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -444,19 +444,28 @@
         if new_result is None:
             return
         assert(len(new_result) == len(old_result))
+        nind, oind = None, None
         for k in new_result:
             assert (k in old_result)
+            if oind is None:
+                oind = np.array(np.isnan(old_result[k]))
+            np.logical_or(oind, np.isnan(old_result[k]), oind)
+            if nind is None:
+                nind = np.array(np.isnan(new_result[k]))
+            np.logical_or(nind, np.isnan(new_result[k]), nind)
+        oind = ~oind
+        nind = ~nind
         for k in new_result:
             err_msg = "%s values of %s (%s weighted) projection (axis %s) not equal." % \
               (k, self.field, self.weight_field, self.axis)
             if k == 'weight_field' and self.weight_field is None:
                 continue
+            nres, ores = new_result[k][nind], old_result[k][oind]
             if self.decimals is None:
-                assert_equal(new_result[k], old_result[k],
-                             err_msg=err_msg)
+                assert_equal(nres, ores, err_msg=err_msg)
             else:
-                assert_allclose(new_result[k], old_result[k],
-                                 10.**-(self.decimals), err_msg=err_msg)
+                assert_allclose(nres, ores, 10.**-(self.decimals),
+                                err_msg=err_msg)
 
 class PixelizedProjectionValuesTest(AnswerTestingTest):
     _type_name = "PixelizedProjectionValues"

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/utilities/fits_image.py
--- /dev/null
+++ b/yt/utilities/fits_image.py
@@ -0,0 +1,242 @@
+"""
+FITSImageBuffer Class
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.funcs import mylog, iterable
+from yt.visualization.fixed_resolution import FixedResolutionBuffer
+from yt.data_objects.construction_data_containers import YTCoveringGridBase
+
+try:
+    from astropy.io.fits import HDUList, ImageHDU
+    from astropy import wcs as pywcs
+except ImportError:
+    pass
+
+class FITSImageBuffer(HDUList):
+
+    def __init__(self, data, fields=None, units="cm",
+                 center=None, scale=None):
+        r""" Initialize a FITSImageBuffer object.
+
+        FITSImageBuffer contains a list of FITS ImageHDU instances, and optionally includes
+        WCS information. It inherits from HDUList, so operations such as `writeto` are
+        enabled. Images can be constructed from ImageArrays, NumPy arrays, dicts of such
+        arrays, FixedResolutionBuffers, and YTCoveringGrids. The latter
+        two are the most powerful because WCS information can be constructed from their coordinates.
+
+        Parameters
+        ----------
+        data : FixedResolutionBuffer or a YTCoveringGrid. Or, an
+            ImageArray, an numpy.ndarray, or dict of such arrays
+            The data to be made into a FITS image or images.
+        fields : single string or list of strings, optional
+            The field names for the data. If *fields* is none and *data* has keys,
+            it will use these for the fields. If *data* is just a single array one field name
+            must be specified.
+        units : string
+            The units of the WCS coordinates, default "cm". 
+        center : array_like, optional
+            The coordinates [xctr,yctr] of the images in units
+            *units*. If *units* is not specified, defaults to the origin. 
+        scale : tuple of floats, optional
+            Pixel scale in unit *units*. Will be ignored if *data* is
+            a FixedResolutionBuffer or a YTCoveringGrid. Must be
+            specified otherwise, or if *units* is "deg".
+
+        Examples
+        --------
+
+        >>> ds = load("sloshing_nomag2_hdf5_plt_cnt_0150")
+        >>> prj = ds.h.proj(2, "TempkeV", weight_field="Density")
+        >>> frb = prj.to_frb((0.5, "mpc"), 800)
+        >>> # This example just uses the FRB and puts the coords in kpc.
+        >>> f_kpc = FITSImageBuffer(frb, fields="TempkeV", units="kpc")
+        >>> # This example specifies sky coordinates.
+        >>> scale = [1./3600.]*2 # One arcsec per pixel
+        >>> f_deg = FITSImageBuffer(frb, fields="TempkeV", units="deg",
+                                    scale=scale, center=(30., 45.))
+        >>> f_deg.writeto("temp.fits")
+        """
+        
+        super(HDUList, self).__init__()
+
+        if isinstance(fields, basestring): fields = [fields]
+            
+        exclude_fields = ['x','y','z','px','py','pz',
+                          'pdx','pdy','pdz','weight_field']
+        
+        if hasattr(data, 'keys'):
+            img_data = data
+        else:
+            img_data = {}
+            if fields is None:
+                mylog.error("Please specify a field name for this array.")
+                raise KeyError
+            img_data[fields[0]] = data
+
+        if fields is None: fields = img_data.keys()
+        if len(fields) == 0:
+            mylog.error("Please specify one or more fields to write.")
+            raise KeyError
+
+        first = False
+    
+        for key in fields:
+            if key not in exclude_fields:
+                mylog.info("Making a FITS image of field %s" % (key))
+                if first:
+                    hdu = PrimaryHDU(np.array(img_data[key]))
+                    hdu.name = key
+                else:
+                    hdu = ImageHDU(np.array(img_data[key]), name=key)
+                self.append(hdu)
+
+        self.dimensionality = len(self[0].data.shape)
+        
+        if self.dimensionality == 2:
+            self.nx, self.ny = self[0].data.shape
+        elif self.dimensionality == 3:
+            self.nx, self.ny, self.nz = self[0].data.shape
+
+        has_coords = (isinstance(img_data, FixedResolutionBuffer) or
+                      isinstance(img_data, YTCoveringGridBase))
+        
+        if center is None:
+            if units == "deg":
+                mylog.error("Please specify center=(RA, Dec) in degrees.")
+                raise ValueError
+            elif not has_coords:
+                mylog.warning("Setting center to the origin.")
+                center = [0.0]*self.dimensionality
+
+        if scale is None:
+            if units == "deg" or not has_coords:
+                mylog.error("Please specify scale=(dx,dy[,dz]) in %s." % (units))
+                raise ValueError
+
+        w = pywcs.WCS(header=self[0].header, naxis=self.dimensionality)
+        w.wcs.crpix = 0.5*(np.array(self.shape)+1)
+
+        proj_type = ["linear"]*self.dimensionality
+
+        if isinstance(img_data, FixedResolutionBuffer) and units != "deg":
+            # FRBs are a special case where we have coordinate
+            # information, so we take advantage of this and
+            # construct the WCS object
+            dx = (img_data.bounds[1]-img_data.bounds[0])/self.nx
+            dy = (img_data.bounds[3]-img_data.bounds[2])/self.ny
+            dx *= img_data.pf.units[units]
+            dy *= img_data.pf.units[units]
+            xctr = 0.5*(img_data.bounds[1]+img_data.bounds[0])
+            yctr = 0.5*(img_data.bounds[3]+img_data.bounds[2])
+            xctr *= img_data.pf.units[units]
+            yctr *= img_data.pf.units[units]
+            center = [xctr, yctr]
+        elif isinstance(img_data, YTCoveringGridBase):
+            dx, dy, dz = img_data.dds
+            dx *= img_data.pf.units[units]
+            dy *= img_data.pf.units[units]
+            dz *= img_data.pf.units[units]
+            center = 0.5*(img_data.left_edge+img_data.right_edge)
+            center *= img_data.pf.units[units]
+        elif units == "deg" and self.dimensionality == 2:
+            dx = -scale[0]
+            dy = scale[1]
+            proj_type = ["RA---TAN","DEC--TAN"]
+        else:
+            dx = scale[0]
+            dy = scale[1]
+            if self.dimensionality == 3: dz = scale[2]
+            
+        w.wcs.crval = center
+        w.wcs.cunit = [units]*self.dimensionality
+        w.wcs.ctype = proj_type
+        
+        if self.dimensionality == 2:
+            w.wcs.cdelt = [dx,dy]
+        elif self.dimensionality == 3:
+            w.wcs.cdelt = [dx,dy,dz]
+
+        self._set_wcs(w)
+            
+    def _set_wcs(self, wcs):
+        """
+        Set the WCS coordinate information for all images
+        with a WCS object *wcs*.
+        """
+        self.wcs = wcs
+        h = self.wcs.to_header()
+        for img in self:
+            for k, v in h.items():
+                img.header.update(k,v)
+
+    def update_all_headers(self, key, value):
+        """
+        Update the FITS headers for all images with the
+        same *key*, *value* pair.
+        """
+        for img in self: img.header.update(key,value)
+            
+    def keys(self):
+        return [f.name for f in self]
+
+    def has_key(self, key):
+        return key in self.keys()
+
+    def values(self):
+        return [self[k] for k in self.keys()]
+
+    def items(self):
+        return [(k, self[k]) for k in self.keys()]
+
+    def __add__(self, other):
+        if len(set(self.keys()).intersection(set(other.keys()))) > 0:
+            mylog.error("There are duplicate extension names! Don't know which ones you want to keep!")
+            raise KeyError
+        new_buffer = {}
+        for im1 in self:
+            new_buffer[im1.name] = im1.data
+        for im2 in other:
+            new_buffer[im2.name] = im2.data
+        new_wcs = self.wcs
+        return FITSImageBuffer(new_buffer, wcs=new_wcs)
+
+    def writeto(self, fileobj, **kwargs):
+        HDUList(self).writeto(fileobj, **kwargs)
+        
+    @property
+    def shape(self):
+        if self.dimensionality == 2:
+            return self.nx, self.ny
+        elif self.dimensionality == 3:
+            return self.nx, self.ny, self.nz
+
+    def to_glue(self, label="yt"):
+        from glue.core import DataCollection, Data, Component
+        from glue.core.coordinates import coordinates_from_header
+        from glue.qt.glue_application import GlueApplication
+
+        field_dict = dict((key,self[key].data) for key in self.keys())
+        
+        image = Data(label=label)
+        image.coords = coordinates_from_header(self.wcs.to_header())
+        for component_name in field_dict:
+            comp = Component(field_dict[component_name])
+            image.add_component(comp, component_name)
+        dc = DataCollection([image])
+
+        app = GlueApplication(dc)
+        app.start()
+
+        
+
+    

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -18,9 +18,17 @@
 cimport cython
 from libc.stdlib cimport malloc, free
 
+from amr_kdtools cimport _find_node, Node
+from grid_traversal cimport VolumeContainer, PartitionedGrid, \
+    vc_index, vc_pos_index
+
 cdef extern from "math.h":
     double fabs(double x)
 
+cdef extern from "stdlib.h":
+    # NOTE that size_t might not be int
+    void *alloca(int)
+
 cdef inline np.int64_t i64max(np.int64_t i0, np.int64_t i1):
     if i0 > i1: return i0
     return i1
@@ -29,87 +37,407 @@
     if i0 < i1: return i0
     return i1
 
-cdef extern from "union_find.h":
-    ctypedef struct forest_node:
-        void *value
-        forest_node *parent
-        int rank
+cdef struct ContourID
 
-    forest_node* MakeSet(void* value)
-    void Union(forest_node* node1, forest_node* node2)
-    forest_node* Find(forest_node* node)
+cdef struct ContourID:
+    np.int64_t contour_id
+    ContourID *parent
+    ContourID *next
+    ContourID *prev
 
-ctypedef struct CellIdentifier:
-    np.int64_t hindex
-    int level
+cdef ContourID *contour_create(np.int64_t contour_id,
+                               ContourID *prev = NULL):
+    node = <ContourID *> malloc(sizeof(ContourID))
+    #print "Creating contour with id", contour_id
+    node.contour_id = contour_id
+    node.next = node.parent = NULL
+    node.prev = prev
+    if prev != NULL: prev.next = node
+    return node
 
-cdef class GridContourContainer:
-    cdef np.int64_t dims[3]
-    cdef np.int64_t start_indices[3]
-    cdef forest_node **join_tree
-    cdef np.int64_t ncells
+cdef void contour_delete(ContourID *node):
+    if node.prev != NULL: node.prev.next = node.next
+    if node.next != NULL: node.next.prev = node.prev
+    free(node)
 
-    def __init__(self, dimensions, indices):
-        cdef int i
-        self.ncells = 1
-        for i in range(3):
-            self.ncells *= dimensions[i]
-            self.dims[i] = dimensions[i]
-            self.start_indices[i] = indices[i]
-        self.join_tree = <forest_node **> malloc(sizeof(forest_node) 
-                                                 * self.ncells)
-        for i in range(self.ncells): self.join_tree[i] = NULL
+cdef ContourID *contour_find(ContourID *node):
+    cdef ContourID *temp, *root
+    root = node
+    while root.parent != NULL and root.parent != root:
+        root = root.parent
+    if root == root.parent: root.parent = NULL
+    while node.parent != NULL:
+        temp = node.parent
+        node.parent = root
+        node = temp
+    return root
 
+cdef void contour_union(ContourID *node1, ContourID *node2):
+    if node1.contour_id < node2.contour_id:
+        node2.parent = node1
+    elif node2.contour_id < node1.contour_id:
+        node1.parent = node2
+
+cdef struct CandidateContour
+
+cdef struct CandidateContour:
+    np.int64_t contour_id
+    np.int64_t join_id
+    CandidateContour *next
+
+cdef int candidate_contains(CandidateContour *first,
+                            np.int64_t contour_id,
+                            np.int64_t join_id = -1):
+    while first != NULL:
+        if first.contour_id == contour_id \
+            and first.join_id == join_id: return 1
+        first = first.next
+    return 0
+
+cdef CandidateContour *candidate_add(CandidateContour *first,
+                                     np.int64_t contour_id,
+                                     np.int64_t join_id = -1):
+    cdef CandidateContour *node
+    node = <CandidateContour *> malloc(sizeof(CandidateContour))
+    node.contour_id = contour_id
+    node.join_id = join_id
+    node.next = first
+    return node
+
+cdef class ContourTree:
+    # This class is essentially a Union-Find algorithm.  What we want to do is
+    # to, given a connection between two objects, identify the unique ID for
+    # those two objects.  So what we have is a collection of contours, and they
+    # eventually all get joined and contain lots of individual IDs.  But it's
+    # easy to find the *first* contour, i.e., the primary ID, for each of the
+    # subsequent IDs.
+    #
+    # This means that we can connect id 202483 to id 2472, and if id 2472 is
+    # connected to id 143, the connection will *actually* be from 202483 to
+    # 143.  In this way we can speed up joining things and knowing their
+    # "canonical" id.
+    #
+    # This is a multi-step process, since we first want to connect all of the
+    # contours, then we end up wanting to coalesce them, and ultimately we join
+    # them at the end.  The join produces a table that maps the initial to the
+    # final, and we can go through and just update all of those.
+    cdef ContourID *first
+    cdef ContourID *last
+
+    def clear(self):
+        # Here, we wipe out ALL of our contours, but not the pointers to them
+        cdef ContourID *cur, *next
+        cur = self.first
+        while cur != NULL:
+            next = cur.next
+            free(cur)
+            cur = next
+        self.first = self.last = NULL
+
+    def __init__(self):
+        self.first = self.last = NULL
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def add_contours(self, np.ndarray[np.int64_t, ndim=1] contour_ids):
+        # This adds new contours, from the given contour IDs, to the tree.
+        # Each one can be connected to a parent, as well as to next/prev in the
+        # set of contours belonging to this tree.
+        cdef int i, n
+        n = contour_ids.shape[0]
+        cdef ContourID *cur = self.last
+        for i in range(n):
+            #print i, contour_ids[i]
+            cur = contour_create(contour_ids[i], cur)
+            if self.first == NULL: self.first = cur
+        self.last = cur
+
+    def add_contour(self, np.int64_t contour_id):
+        self.last = contour_create(contour_id, self.last)
+
+    def cull_candidates(self, np.ndarray[np.int64_t, ndim=3] candidates):
+        # This function looks at each preliminary contour ID belonging to a
+        # given collection of values, and then if need be it creates a new
+        # contour for it.
+        cdef int i, j, k, ni, nj, nk, nc
+        cdef CandidateContour *first = NULL
+        cdef CandidateContour *temp
+        cdef np.int64_t cid
+        nc = 0
+        ni = candidates.shape[0]
+        nj = candidates.shape[1]
+        nk = candidates.shape[2]
+        for i in range(ni):
+            for j in range(nj):
+                for k in range(nk):
+                    cid = candidates[i,j,k]
+                    if cid == -1: continue
+                    if candidate_contains(first, cid) == 0:
+                        nc += 1
+                        first = candidate_add(first, cid)
+        cdef np.ndarray[np.int64_t, ndim=1] contours
+        contours = np.empty(nc, dtype="int64")
+        i = 0
+        # This removes all the temporary contours for this set of contours and
+        # instead constructs a final list of them.
+        while first != NULL:
+            contours[i] = first.contour_id
+            i += 1
+            temp = first.next
+            free(first)
+            first = temp
+        return contours
+
+    def cull_joins(self, np.ndarray[np.int64_t, ndim=2] cjoins):
+        # This coalesces contour IDs, so that we have only the final name
+        # resolutions -- the .join_id from a candidate.  So many items will map
+        # to a single join_id.
+        cdef int i, j, k, ni, nj, nk, nc
+        cdef CandidateContour *first = NULL
+        cdef CandidateContour *temp
+        cdef np.int64_t cid1, cid2
+        nc = 0
+        ni = cjoins.shape[0]
+        for i in range(ni):
+            cid1 = cjoins[i,0]
+            cid2 = cjoins[i,1]
+            if cid1 == -1: continue
+            if cid2 == -1: continue
+            if candidate_contains(first, cid1, cid2) == 0:
+                nc += 1
+                first = candidate_add(first, cid1, cid2)
+        cdef np.ndarray[np.int64_t, ndim=2] contours
+        contours = np.empty((nc,2), dtype="int64")
+        i = 0
+        while first != NULL:
+            contours[i,0] = first.contour_id
+            contours[i,1] = first.join_id
+            i += 1
+            temp = first.next
+            free(first)
+            first = temp
+        return contours
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def add_joins(self, np.ndarray[np.int64_t, ndim=2] join_tree):
+        cdef int i, n, ins
+        cdef np.int64_t cid1, cid2
+        # Okay, this requires lots of iteration, unfortunately
+        cdef ContourID *cur, *root
+        n = join_tree.shape[0]
+        #print "Counting"
+        #print "Checking", self.count()
+        for i in range(n):
+            ins = 0
+            cid1 = join_tree[i, 0]
+            cid2 = join_tree[i, 1]
+            c1 = c2 = NULL
+            cur = self.first
+            #print "Looking for ", cid1, cid2
+            while c1 == NULL or c2 == NULL:
+                if cur.contour_id == cid1:
+                    c1 = contour_find(cur)
+                if cur.contour_id == cid2:
+                    c2 = contour_find(cur)
+                ins += 1
+                cur = cur.next
+                if cur == NULL: break
+            if c1 == NULL or c2 == NULL:
+                if c1 == NULL: print "  Couldn't find ", cid1
+                if c2 == NULL: print "  Couldn't find ", cid2
+                print "  Inspected ", ins
+                raise RuntimeError
+            else:
+                contour_union(c1, c2)
+
+    def count(self):
+        cdef int n = 0
+        cdef ContourID *cur = self.first
+        while cur != NULL:
+            cur = cur.next
+            n += 1
+        return n
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def export(self):
+        cdef int n = self.count()
+        cdef ContourID *cur, *root
+        cur = self.first
+        cdef np.ndarray[np.int64_t, ndim=2] joins 
+        joins = np.empty((n, 2), dtype="int64")
+        n = 0
+        while cur != NULL:
+            root = contour_find(cur)
+            joins[n, 0] = cur.contour_id
+            joins[n, 1] = root.contour_id
+            cur = cur.next
+            n += 1
+        return joins
+    
     def __dealloc__(self):
-        cdef int i
-        for i in range(self.ncells):
-            if self.join_tree[i] != NULL: free(self.join_tree[i])
-        free(self.join_tree)
+        self.clear()
 
-    #def construct_join_tree(self,
-    #            np.ndarray[np.float64_t, ndim=3] field,
-    #            np.ndarray[np.bool_t, ndim=3] mask):
-    #    # This only looks at the components of the grid that are actually
-    #    # inside this grid -- boundary conditions are handled later.
-    #    pass
+cdef class TileContourTree:
+    cdef np.float64_t min_val
+    cdef np.float64_t max_val
 
-#@cython.boundscheck(False)
-#@cython.wraparound(False)
-def construct_boundary_relationships(
-        np.ndarray[dtype=np.int64_t, ndim=3] contour_ids):
-    # We only look at the boundary and one cell in
-    cdef int i, j, nx, ny, nz, offset_i, offset_j, oi, oj
+    def __init__(self, np.float64_t min_val, np.float64_t max_val):
+        self.min_val = min_val
+        self.max_val = max_val
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def identify_contours(self, np.ndarray[np.float64_t, ndim=3] values,
+                                np.ndarray[np.int64_t, ndim=3] contour_ids,
+                                np.int64_t start):
+        # This just looks at neighbor values and tries to identify which zones
+        # are touching by face within a given brick.
+        cdef int i, j, k, ni, nj, nk, offset
+        cdef int off_i, off_j, off_k, oi, ok, oj
+        cdef ContourID *cur = NULL
+        cdef ContourID *c1, *c2
+        cdef np.float64_t v
+        cdef np.int64_t nc
+        ni = values.shape[0]
+        nj = values.shape[1]
+        nk = values.shape[2]
+        nc = 0
+        cdef ContourID **container = <ContourID**> malloc(
+                sizeof(ContourID*)*ni*nj*nk)
+        for i in range(ni*nj*nk): container[i] = NULL
+        for i in range(ni):
+            for j in range(nj):
+                for k in range(nk):
+                    v = values[i,j,k]
+                    if v < self.min_val or v > self.max_val: continue
+                    nc += 1
+                    c1 = contour_create(nc + start)
+                    cur = container[i*nj*nk + j*nk + k] = c1
+                    for oi in range(3):
+                        off_i = oi - 1 + i
+                        if not (0 <= off_i < ni): continue
+                        for oj in range(3):
+                            off_j = oj - 1 + j
+                            if not (0 <= off_j < nj): continue
+                            for ok in range(3):
+                                if oi == oj == ok == 1: continue
+                                if off_k > k and off_j > j and off_i > i:
+                                    continue
+                                off_k = ok - 1 + k
+                                if not (0 <= off_k < nk): continue
+                                offset = off_i*nj*nk + off_j*nk + off_k
+                                c2 = container[offset]
+                                if c2 == NULL: continue
+                                c2 = contour_find(c2)
+                                contour_union(cur, c2)
+                                cur = contour_find(cur)
+        for i in range(ni):
+            for j in range(nj):
+                for k in range(nk):
+                    c1 = container[i*nj*nk + j*nk + k]
+                    if c1 == NULL: continue
+                    cur = c1
+                    c1 = contour_find(c1)
+                    contour_ids[i,j,k] = c1.contour_id
+        
+        for i in range(ni*nj*nk): 
+            if container[i] != NULL: free(container[i])
+        free(container)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def link_node_contours(Node trunk, contours, ContourTree tree,
+        np.ndarray[np.int64_t, ndim=1] node_ids):
+    cdef int n_nodes = node_ids.shape[0]
+    cdef np.int64_t node_ind
+    cdef VolumeContainer **vcs = <VolumeContainer **> malloc(
+        sizeof(VolumeContainer*) * n_nodes)
+    cdef int i
+    cdef PartitionedGrid pg
+    for i in range(n_nodes):
+        pg = contours[node_ids[i]][2]
+        vcs[i] = pg.container
+    cdef np.ndarray[np.uint8_t] examined = np.zeros(n_nodes, "uint8")
+    for nid, cinfo in sorted(contours.items(), key = lambda a: -a[1][0]):
+        level, node_ind, pg, sl = cinfo
+        construct_boundary_relationships(trunk, tree, node_ind,
+            examined, vcs, node_ids)
+        examined[node_ind] = 1
+
+cdef inline void get_spos(VolumeContainer *vc, int i, int j, int k,
+                          int axis, np.float64_t *spos):
+    spos[0] = vc.left_edge[0] + i * vc.dds[0]
+    spos[1] = vc.left_edge[1] + j * vc.dds[1]
+    spos[2] = vc.left_edge[2] + k * vc.dds[2]
+    spos[axis] += 0.5 * vc.dds[axis]
+
+cdef inline int spos_contained(VolumeContainer *vc, np.float64_t *spos):
+    cdef int i
+    for i in range(3):
+        if spos[i] < vc.left_edge[i] or spos[i] > vc.right_edge[i]: return 0
+    return 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void construct_boundary_relationships(Node trunk, ContourTree tree, 
+                np.int64_t nid, np.ndarray[np.uint8_t, ndim=1] examined,
+                VolumeContainer **vcs,
+                np.ndarray[np.int64_t, ndim=1] node_ids):
+    # We only look at the boundary and find the nodes next to it.
+    # Contours is a dict, keyed by the node.id.
+    cdef int i, j, nx, ny, nz, offset_i, offset_j, oi, oj, level
     cdef np.int64_t c1, c2
-    nx = contour_ids.shape[0]
-    ny = contour_ids.shape[1]
-    nz = contour_ids.shape[2]
+    cdef Node adj_node
+    cdef VolumeContainer *vc1, *vc0 = vcs[nid]
+    nx = vc0.dims[0]
+    ny = vc0.dims[1]
+    nz = vc0.dims[2]
+    cdef int s = (ny*nx + nx*nz + ny*nz) * 18
     # We allocate an array of fixed (maximum) size
-    cdef int s = (ny*nx + nx*nz + ny*nz - 2) * 18
-    cdef np.ndarray[np.int64_t, ndim=2] tree = np.zeros((s, 2), dtype="int64")
+    cdef np.ndarray[np.int64_t, ndim=2] joins = np.zeros((s, 2), dtype="int64")
     cdef int ti = 0
-    # First x-pass
+    cdef int index
+    cdef np.float64_t spos[3]
+
+    # First the x-pass
     for i in range(ny):
         for j in range(nz):
             for offset_i in range(3):
                 oi = offset_i - 1
-                if i == 0 and oi == -1: continue
-                if i == ny - 1 and oi == 1: continue
                 for offset_j in range(3):
                     oj = offset_j - 1
-                    if j == 0 and oj == -1: continue
-                    if j == nz - 1 and oj == 1: continue
-                    c1 = contour_ids[0, i, j]
-                    c2 = contour_ids[1, i + oi, j + oj]
-                    if c1 > -1 and c2 > -1:
-                        tree[ti,0] = i64max(c1,c2)
-                        tree[ti,1] = i64min(c1,c2)
-                        ti += 1
-                    c1 = contour_ids[nx-1, i, j]
-                    c2 = contour_ids[nx-2, i + oi, j + oj]
-                    if c1 > -1 and c2 > -1:
-                        tree[ti,0] = i64max(c1,c2)
-                        tree[ti,1] = i64min(c1,c2)
-                        ti += 1
+                    # Adjust by -1 in x, then oi and oj in y and z
+                    get_spos(vc0, -1, i + oi, j + oj, 0, spos)
+                    adj_node = _find_node(trunk, spos)
+                    vc1 = vcs[adj_node.node_ind]
+                    if examined[adj_node.node_ind] == 0 and \
+                       spos_contained(vc1, spos):
+                        # This is outside our VC, as 0 is a boundary layer
+                        index = vc_index(vc0, 0, i, j)
+                        c1 = (<np.int64_t*>vc0.data[0])[index]
+                        index = vc_pos_index(vc1, spos)
+                        c2 = (<np.int64_t*>vc1.data[0])[index]
+                        if c1 > -1 and c2 > -1:
+                            joins[ti,0] = i64max(c1,c2)
+                            joins[ti,1] = i64min(c1,c2)
+                            ti += 1
+                    # This is outside our vc
+                    get_spos(vc0, nx, i + oi, j + oj, 0, spos)
+                    adj_node = _find_node(trunk, spos)
+                    vc1 = vcs[adj_node.node_ind]
+                    if examined[adj_node.node_ind] == 0 and \
+                       spos_contained(vc1, spos):
+                        # This is outside our VC, as 0 is a boundary layer
+                        index = vc_index(vc0, nx - 1, i, j)
+                        c1 = (<np.int64_t*>vc0.data[0])[index]
+                        index = vc_pos_index(vc1, spos)
+                        c2 = (<np.int64_t*>vc1.data[0])[index]
+                        if c1 > -1 and c2 > -1:
+                            joins[ti,0] = i64max(c1,c2)
+                            joins[ti,1] = i64min(c1,c2)
+                            ti += 1
     # Now y-pass
     for i in range(nx):
         for j in range(nz):
@@ -119,43 +447,75 @@
                 if i == nx - 1 and oi == 1: continue
                 for offset_j in range(3):
                     oj = offset_j - 1
-                    if j == 0 and oj == -1: continue
-                    if j == nz - 1 and oj == 1: continue
-                    c1 = contour_ids[i, 0, j]
-                    c2 = contour_ids[i + oi, 1, j + oj]
-                    if c1 > -1 and c2 > -1:
-                        tree[ti,0] = i64max(c1,c2)
-                        tree[ti,1] = i64min(c1,c2)
-                        ti += 1
-                    c1 = contour_ids[i, ny-1, j]
-                    c2 = contour_ids[i + oi, ny-2, j + oj]
-                    if c1 > -1 and c2 > -1:
-                        tree[ti,0] = i64max(c1,c2)
-                        tree[ti,1] = i64min(c1,c2)
-                        ti += 1
+                    get_spos(vc0, i + oi, -1, j + oj, 1, spos)
+                    adj_node = _find_node(trunk, spos)
+                    vc1 = vcs[adj_node.node_ind]
+                    if examined[adj_node.node_ind] == 0 and \
+                       spos_contained(vc1, spos):
+                        # This is outside our VC, as 0 is a boundary layer
+                        index = vc_index(vc0, i, 0, j)
+                        c1 = (<np.int64_t*>vc0.data[0])[index]
+                        index = vc_pos_index(vc1, spos)
+                        c2 = (<np.int64_t*>vc1.data[0])[index]
+                        if c1 > -1 and c2 > -1:
+                            joins[ti,0] = i64max(c1,c2)
+                            joins[ti,1] = i64min(c1,c2)
+                            ti += 1
+
+                    get_spos(vc0, i + oi, ny, j + oj, 1, spos)
+                    adj_node = _find_node(trunk, spos)
+                    vc1 = vcs[adj_node.node_ind]
+                    if examined[adj_node.node_ind] == 0 and \
+                       spos_contained(vc1, spos):
+                        # This is outside our VC, as 0 is a boundary layer
+                        index = vc_index(vc0, i, ny - 1, j)
+                        c1 = (<np.int64_t*>vc0.data[0])[index]
+                        index = vc_pos_index(vc1, spos)
+                        c2 = (<np.int64_t*>vc1.data[0])[index]
+                        if c1 > -1 and c2 > -1:
+                            joins[ti,0] = i64max(c1,c2)
+                            joins[ti,1] = i64min(c1,c2)
+                            ti += 1
+
+    # Now z-pass
     for i in range(nx):
         for j in range(ny):
             for offset_i in range(3):
                 oi = offset_i - 1
-                if i == 0 and oi == -1: continue
-                if i == nx - 1 and oi == 1: continue
                 for offset_j in range(3):
                     oj = offset_j - 1
-                    if j == 0 and oj == -1: continue
-                    if j == ny - 1 and oj == 1: continue
-                    c1 = contour_ids[i, j, 0]
-                    c2 = contour_ids[i + oi, j + oj, 1]
-                    if c1 > -1 and c2 > -1:
-                        tree[ti,0] = i64max(c1,c2)
-                        tree[ti,1] = i64min(c1,c2)
-                        ti += 1
-                    c1 = contour_ids[i, j, nz-1]
-                    c2 = contour_ids[i + oi, j + oj, nz-2]
-                    if c1 > -1 and c2 > -1:
-                        tree[ti,0] = i64max(c1,c2)
-                        tree[ti,1] = i64min(c1,c2)
-                        ti += 1
-    return tree[:ti,:]
+                    get_spos(vc0, i + oi,  j + oj, -1, 2, spos)
+                    adj_node = _find_node(trunk, spos)
+                    vc1 = vcs[adj_node.node_ind]
+                    if examined[adj_node.node_ind] == 0 and \
+                       spos_contained(vc1, spos):
+                        # This is outside our VC, as 0 is a boundary layer
+                        index = vc_index(vc0, i, j, 0)
+                        c1 = (<np.int64_t*>vc0.data[0])[index]
+                        index = vc_pos_index(vc1, spos)
+                        c2 = (<np.int64_t*>vc1.data[0])[index]
+                        if c1 > -1 and c2 > -1:
+                            joins[ti,0] = i64max(c1,c2)
+                            joins[ti,1] = i64min(c1,c2)
+                            ti += 1
+
+                    get_spos(vc0, i + oi, j + oj, nz, 2, spos)
+                    adj_node = _find_node(trunk, spos)
+                    vc1 = vcs[adj_node.node_ind]
+                    if examined[adj_node.node_ind] == 0 and \
+                       spos_contained(vc1, spos):
+                        # This is outside our VC, as 0 is a boundary layer
+                        index = vc_index(vc0, i, j, nz - 1)
+                        c1 = (<np.int64_t*>vc0.data[0])[index]
+                        index = vc_pos_index(vc1, spos)
+                        c2 = (<np.int64_t*>vc1.data[0])[index]
+                        if c1 > -1 and c2 > -1:
+                            joins[ti,0] = i64max(c1,c2)
+                            joins[ti,1] = i64min(c1,c2)
+                            ti += 1
+    if ti == 0: return
+    new_joins = tree.cull_joins(joins[:ti,:])
+    tree.add_joins(new_joins)
 
 cdef inline int are_neighbors(
             np.float64_t x1, np.float64_t y1, np.float64_t z1,
@@ -228,16 +588,23 @@
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def update_joins(joins, np.ndarray[np.int64_t, ndim=1] contour_ids):
-    cdef np.int64_t new, old, i, oi
-    cdef int n, on
-    cdef np.ndarray[np.int64_t, ndim=1] old_set
-    #print contour_ids.shape[0]
-    n = contour_ids.shape[0]
-    for new, old_set in joins:
-        #print new
-        on = old_set.shape[0]
-        for i in range(n):
-            for oi in range(on):
-                old = old_set[oi]
-                if contour_ids[i] == old: contour_ids[i] = new
+def update_joins(np.ndarray[np.int64_t, ndim=2] joins,
+                 np.ndarray[np.int64_t, ndim=3] contour_ids,
+                 np.ndarray[np.int64_t, ndim=1] final_joins):
+    cdef np.int64_t new, old
+    cdef int i, j, nj, nf
+    cdef int ci, cj, ck
+    nj = joins.shape[0]
+    nf = final_joins.shape[0]
+    for ci in range(contour_ids.shape[0]):
+        for cj in range(contour_ids.shape[1]):
+            for ck in range(contour_ids.shape[2]):
+                if contour_ids[ci,cj,ck] == -1: continue
+                for j in range(nj):
+                    if contour_ids[ci,cj,ck] == joins[j,0]:
+                        contour_ids[ci,cj,ck] = joins[j,1]
+                        break
+                for j in range(nf):
+                    if contour_ids[ci,cj,ck] == final_joins[j]:
+                        contour_ids[ci,cj,ck] = j + 1
+                        break

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/utilities/lib/amr_kdtools.pxd
--- /dev/null
+++ b/yt/utilities/lib/amr_kdtools.pxd
@@ -0,0 +1,39 @@
+"""
+AMR kD-Tree Cython Tools
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+cimport numpy as np
+
+cdef struct Split:
+    int dim
+    np.float64_t pos
+
+cdef class Node
+
+cdef class Node:
+    cdef public Node left
+    cdef public Node right
+    cdef public Node parent
+    cdef public int grid
+    cdef public np.int64_t node_id
+    cdef public np.int64_t node_ind
+    cdef np.float64_t left_edge[3]
+    cdef np.float64_t right_edge[3]
+    cdef public data
+    cdef Split * split
+    cdef int level
+
+cdef int point_in_node(Node node, np.ndarray[np.float64_t, ndim=1] point)
+cdef Node _find_node(Node node, np.float64_t *point)
+cdef int _kd_is_leaf(Node node)

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -25,25 +25,11 @@
 
 DEF Nch = 4
 
-cdef struct Split:
-    int dim
-    np.float64_t pos
-
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
 cdef class Node:
 
-    cdef public Node left
-    cdef public Node right
-    cdef public Node parent
-    cdef public int grid
-    cdef public np.int64_t node_id
-    cdef np.float64_t left_edge[3]
-    cdef np.float64_t right_edge[3]
-    cdef public data
-    cdef Split * split
-
     def __cinit__(self, 
                   Node parent, 
                   Node left, 
@@ -152,11 +138,11 @@
 def kd_traverse(Node trunk, viewpoint=None):
     if viewpoint is None:
         for node in depth_traverse(trunk):
-            if kd_is_leaf(node) and node.grid != -1:
+            if _kd_is_leaf(node) == 1 and node.grid != -1:
                 yield node
     else:
         for node in viewpoint_traverse(trunk, viewpoint):
-            if kd_is_leaf(node) and node.grid != -1:
+            if _kd_is_leaf(node) == 1 and node.grid != -1:
                 yield node
 
 @cython.boundscheck(False)
@@ -172,7 +158,7 @@
     if not should_i_build(node, rank, size):
         return
 
-    if kd_is_leaf(node):
+    if _kd_is_leaf(node) == 1:
         insert_grid(node, gle, gre, gid, rank, size)
     else:
         less_id = gle[node.split.dim] < node.split.pos
@@ -295,7 +281,7 @@
     if not should_i_build(node, rank, size):
         return
 
-    if kd_is_leaf(node):
+    if _kd_is_leaf(node) == 1:
         insert_grids(node, ngrids, gles, gres, gids, rank, size)
         return
 
@@ -766,11 +752,16 @@
     assert has_l_child == has_r_child
     return has_l_child
 
+cdef int _kd_is_leaf(Node node):
+    if node.left is None or node.right is None:
+        return 1
+    return 0
+
 def step_depth(Node current, Node previous):
     '''
     Takes a single step in the depth-first traversal
     '''
-    if kd_is_leaf(current): # At a leaf, move back up
+    if _kd_is_leaf(current) == 1: # At a leaf, move back up
         previous = current
         current = current.parent
 
@@ -862,7 +853,7 @@
     Takes a single step in the viewpoint based traversal.  Always
     goes to the node furthest away from viewpoint first.
     '''
-    if kd_is_leaf(current): # At a leaf, move back up
+    if _kd_is_leaf(current) == 1: # At a leaf, move back up
         previous = current
         current = current.parent
     elif current.split.dim is None: # This is a dead node
@@ -913,6 +904,13 @@
         inside *= node.right_edge[i] > point[i]
     return inside
 
+cdef Node _find_node(Node node, np.float64_t *point):
+    while _kd_is_leaf(node) == 0:
+        if point[node.split.dim] < node.split.pos:
+            node = node.left
+        else:
+            node = node.right
+    return node
 
 def find_node(Node node,
               np.ndarray[np.float64_t, ndim=1] point):
@@ -920,12 +918,5 @@
     Find the AMRKDTree node enclosing a position
     """
     assert(point_in_node(node, point))
-    while not kd_is_leaf(node):
-        if point[node.split.dim] < node.split.pos:
-            node = node.left
-        else:
-            node = node.right
-    return node
+    return _find_node(node, <np.float64_t *> point.data)
 
-
-

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -17,6 +17,7 @@
 import numpy as np
 cimport numpy as np
 cimport cython
+cimport kdtree_utils
 
 cdef struct VolumeContainer:
     int n_fields
@@ -29,6 +30,20 @@
     np.float64_t idds[3]
     int dims[3]
 
+cdef class PartitionedGrid:
+    cdef public object my_data
+    cdef public object source_mask 
+    cdef public object LeftEdge
+    cdef public object RightEdge
+    cdef public int parent_grid_id
+    cdef VolumeContainer *container
+    cdef kdtree_utils.kdtree *star_list
+    cdef np.float64_t star_er
+    cdef np.float64_t star_sigma_num
+    cdef np.float64_t star_coeff
+    cdef void get_vector_field(self, np.float64_t pos[3],
+                               np.float64_t *vel, np.float64_t *vel_mag)
+
 ctypedef void sample_function(
                 VolumeContainer *vc,
                 np.float64_t v_pos[3],
@@ -45,3 +60,12 @@
                      void *data,
                      np.float64_t *return_t = *,
                      np.float64_t enter_t = *) nogil
+
+cdef inline int vc_index(VolumeContainer *vc, int i, int j, int k):
+    return (i*vc.dims[1]+j)*vc.dims[2]+k
+
+cdef inline int vc_pos_index(VolumeContainer *vc, np.float64_t *spos):
+    cdef int i, index[3]
+    for i in range(3):
+        index[i] = <int> ((spos[i] - vc.left_edge[i]) * vc.idds[i])
+    return vc_index(vc, index[0], index[1], index[2])

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -16,7 +16,6 @@
 import numpy as np
 cimport numpy as np
 cimport cython
-cimport kdtree_utils
 #cimport healpix_interface
 from libc.stdlib cimport malloc, free, abs
 from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
@@ -58,16 +57,6 @@
                 void *data) nogil
 
 cdef class PartitionedGrid:
-    cdef public object my_data
-    cdef public object source_mask 
-    cdef public object LeftEdge
-    cdef public object RightEdge
-    cdef public int parent_grid_id
-    cdef VolumeContainer *container
-    cdef kdtree_utils.kdtree *star_list
-    cdef np.float64_t star_er
-    cdef np.float64_t star_sigma_num
-    cdef np.float64_t star_coeff
 
     @cython.boundscheck(False)
     @cython.wraparound(False)

diff -r 0600dcadd54ac1b7c98553b38b000e0702cb1275 -r 844bffed131e7c6d167ef676212377b906f1e042 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -65,7 +65,9 @@
                 ["yt/utilities/lib/ContourFinding.pyx",
                  "yt/utilities/lib/union_find.c"],
                 include_dirs=["yt/utilities/lib/"],
-                libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+                libraries=["m"],
+                depends=["yt/utilities/lib/fp_utils.pxd",
+                         "yt/utilities/lib/amr_kdtools.pxd"])
     config.add_extension("DepthFirstOctree", 
                 ["yt/utilities/lib/DepthFirstOctree.pyx"],
                 libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])


https://bitbucket.org/yt_analysis/yt/commits/ce455881f9c7/
Changeset:   ce455881f9c7
Branch:      yt-3.0
User:        jzuhone
Date:        2013-12-03 06:03:07
Summary:     REALLY add in the FITS frontend
Affected #:  7 files

diff -r 5478735e553f939c5a1c496f855432050f2b6a4a -r ce455881f9c7d02e52e8c991140348dbd22ea91c yt/frontends/fits/__init__.py
--- /dev/null
+++ b/yt/frontends/fits/__init__.py
@@ -0,0 +1,14 @@
+"""
+API for yt.frontends.flash
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 5478735e553f939c5a1c496f855432050f2b6a4a -r ce455881f9c7d02e52e8c991140348dbd22ea91c yt/frontends/fits/api.py
--- /dev/null
+++ b/yt/frontends/fits/api.py
@@ -0,0 +1,23 @@
+"""
+API for yt.frontends.fits
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+      FITSGrid, \
+      FITSHierarchy, \
+      FITSStaticOutput
+
+from .fields import \
+      FITSFieldInfo, \
+      add_fits_field
+
+from .io import \
+      IOHandlerFITS

diff -r 5478735e553f939c5a1c496f855432050f2b6a4a -r ce455881f9c7d02e52e8c991140348dbd22ea91c yt/frontends/fits/data_structures.py
--- /dev/null
+++ b/yt/frontends/fits/data_structures.py
@@ -0,0 +1,298 @@
+"""
+FITS-specific data structures
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+try:
+    import astropy.io.fits as pyfits
+    import astropy.wcs as pywcs
+except ImportError:
+    pass
+
+import stat
+import numpy as np
+import weakref
+
+from yt.config import ytcfg
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.geometry.grid_geometry_handler import \
+    GridGeometryHandler
+from yt.geometry.geometry_handler import \
+    YTDataChunk
+from yt.data_objects.static_output import \
+    StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.io_handler import \
+    io_registry
+from yt.utilities.physical_constants import cm_per_mpc
+from .fields import FITSFieldInfo, add_fits_field, KnownFITSFields
+from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc, \
+     ValidateDataField, TranslationFunc
+
+class FITSGrid(AMRGridPatch):
+    _id_offset = 0
+    def __init__(self, id, hierarchy, level):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = None
+        self.Children = []
+        self.Level = 0
+
+    def __repr__(self):
+        return "FITSGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
+    
+class FITSHierarchy(GridGeometryHandler):
+
+    grid = FITSGrid
+    
+    def __init__(self,pf,data_style='fits'):
+        self.data_style = data_style
+        self.field_indexes = {}
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = self.parameter_file.parameter_filename
+        self.directory = os.path.dirname(self.hierarchy_filename)
+        self._handle = pf._handle
+        self.float_type = np.float64
+        GridGeometryHandler.__init__(self,pf,data_style)
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        self.field_list = []
+        for h in self._handle[self.parameter_file.first_image:]:
+            if h.is_image:
+                self.field_list.append(h.name.lower())
+                        
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        GridGeometryHandler._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _count_grids(self):
+        self.num_grids = 1
+                
+    def _parse_hierarchy(self):
+        f = self._handle # shortcut
+        pf = self.parameter_file # shortcut
+        
+        # Initialize to the domain left / domain right
+        self.grid_left_edge[0,:] = pf.domain_left_edge
+        self.grid_right_edge[0,:] = pf.domain_right_edge
+        self.grid_dimensions[0] = pf.domain_dimensions
+        
+        # This will become redundant, as _prepare_grid will reset it to its
+        # current value.  Note that FLASH uses 1-based indexing for refinement
+        # levels, but we do not, so we reduce the level by 1.
+        self.grid_levels.flat[:] = 0
+        self.grids = np.empty(self.num_grids, dtype='object')
+        for i in xrange(self.num_grids):
+            self.grids[i] = self.grid(i, self, self.grid_levels[i,0])
+        
+    def _populate_grid_objects(self):
+        self.grids[0]._prepare_grid()
+        self.grids[0]._setup_dx()
+        self.max_level = 0 
+
+    def _setup_derived_fields(self):
+        super(FITSHierarchy, self)._setup_derived_fields()
+        [self.parameter_file.conversion_factors[field] 
+         for field in self.field_list]
+        for field in self.field_list:
+            if field not in self.derived_field_list:
+                self.derived_field_list.append(field)
+
+        for field in self.derived_field_list:
+            f = self.parameter_file.field_info[field]
+            if f._function.func_name == "_TranslationFunc":
+                # Translating an already-converted field
+                self.parameter_file.conversion_factors[field] = 1.0 
+                
+    def _setup_data_io(self):
+        self.io = io_registry[self.data_style](self.parameter_file)
+
+class FITSStaticOutput(StaticOutput):
+    _hierarchy_class = FITSHierarchy
+    _fieldinfo_fallback = FITSFieldInfo
+    _fieldinfo_known = KnownFITSFields
+    _handle = None
+    
+    def __init__(self, filename, data_style='fits',
+                 primary_header = None,
+                 sky_conversion = None,
+                 storage_filename = None,
+                 conversion_override = None):
+
+        if isinstance(filename, pyfits.HDUList):
+            self._handle = filename
+            fname = filename.filename()
+        else:
+            self._handle = pyfits.open(filename)
+            fname = filename
+        for i, h in enumerate(self._handle):
+            if h.is_image and h.data is not None:
+                self.first_image = i
+                break
+            
+        if primary_header is None:
+            self.primary_header = self._handle[self.first_image].header
+        else:
+            self.primary_header = primary_header
+        self.shape = self._handle[self.first_image].shape
+        if conversion_override is None: conversion_override = {}
+        self._conversion_override = conversion_override
+
+        self.wcs = pywcs.WCS(self.primary_header)
+
+        if self.wcs.wcs.cunit[0].name in ["deg","arcsec","arcmin","mas"]:
+            self.sky_wcs = self.wcs.deepcopy()
+            if sky_conversion is None:
+                self._set_minimalist_wcs()
+            else:
+                dims = np.array(self.shape)
+                ndims = len(self.shape)
+                new_unit = sky_conversion[1]
+                new_deltx = np.abs(self.wcs.wcs.cdelt[0])*sky_conversion[0]
+                new_delty = np.abs(self.wcs.wcs.cdelt[1])*sky_conversion[0]
+                self.wcs.wcs.cdelt = [new_deltx, new_delty]
+                self.wcs.wcs.crpix = 0.5*(dims+1)
+                self.wcs.wcs.crval = [0.0]*2
+                self.wcs.wcs.cunit = [new_unit]*2
+                self.wcs.wcs.ctype = ["LINEAR"]*2
+
+        if not all(key in self.primary_header for key in
+                   ["CRPIX1","CRVAL1","CDELT1","CUNIT1"]):
+            self._set_minimalist_wcs()
+
+        StaticOutput.__init__(self, fname, data_style)
+        self.storage_filename = storage_filename
+            
+        self.refine_by = 2
+        self._set_units()
+
+    def _set_minimalist_wcs(self):
+        mylog.warning("Could not determine WCS information. Using pixel units.")
+        dims = np.array(self.shape)
+        ndims = len(dims)
+        self.wcs.wcs.crpix = 0.5*(dims+1)
+        self.wcs.wcs.cdelt = [1.,1.]
+        self.wcs.wcs.crval = 0.5*(dims+1)
+        self.wcs.wcs.cunit = ["pixel"]*ndims
+        self.wcs.wcs.ctype = ["LINEAR"]*ndims
+
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        file_unit = self.wcs.wcs.cunit[0].name.lower()
+        if file_unit in mpc_conversion:
+            self._setup_getunits_units()
+        else:
+            self._setup_nounits_units()
+        self.parameters["Time"] = self.conversion_factors["Time"]
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = self.conversion_factors["Time"] / sec_conversion[unit]
+        for p, v in self._conversion_override.items():
+            self.conversion_factors[p] = v
+
+    def _setup_comoving_units(self):
+        pass
+
+    def _setup_getunits_units(self):
+        file_unit = self.wcs.wcs.cunit[0].name.lower()
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit]/mpc_conversion[file_unit]
+        self.conversion_factors["Time"] = 1.0
+                                            
+    def _setup_nounits_units(self):
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+        self.conversion_factors["Time"] = 1.0
+
+    def _parse_parameter_file(self):
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        for k, v in self.primary_header.items():
+            self.parameters[k] = v
+
+        # Determine dimensionality
+
+        self.dimensionality = self.primary_header["naxis"]
+        self.geometry = "cartesian"
+
+        self.domain_dimensions = np.array(self._handle[self.first_image].shape)
+        if self.dimensionality == 2:
+            self.domain_dimensions = np.append(self.domain_dimensions,
+                                               [int(1)])
+        ND = self.dimensionality
+        
+        le = [0.5]*ND
+        re = [float(dim)+0.5 for dim in self.domain_dimensions]
+        if ND == 2:
+            xe, ye = self.wcs.wcs_pix2world([le[0],re[0]],
+                                            [le[1],re[1]], 1)
+            self.domain_left_edge = np.array([xe[0], ye[0], 0.0])
+            self.domain_right_edge = np.array([xe[1], ye[1], 1.0]) 
+        elif ND == 3:
+            xe, ye, ze = world_edges = self.wcs.wcs_pix2world([le[0],re[0]],
+                                                              [le[1],re[1]],
+                                                              [le[2],re[2]], 1)
+            self.domain_left_edge = np.array([xe[0], ye[0], ze[0]])
+            self.domain_right_edge = np.array([xe[1], ye[1], ze[1]])
+
+        # Get the simulation time
+        try:
+            self.current_time = self.parameters["time"]
+        except:
+            mylog.warning("Cannot find time")
+            self.current_time = 0.0
+            pass
+        
+        # For now we'll ignore these
+        self.periodicity = (False,)*3
+        self.current_redshift = self.omega_lambda = self.omega_matter = \
+            self.hubble_constant = self.cosmological_simulation = 0.0
+
+    def __del__(self):
+        self._handle.close()
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            if isinstance(args[0], pyfits.HDUList):
+                for h in args[0]:
+                    if h.is_image and h.data is not None:
+                        return True
+        except:
+            pass
+        try:
+            fileh = pyfits.open(args[0])
+            for h in fileh:
+                if h.is_image and h.data is not None:
+                    fileh.close()
+                    return True
+            fileh.close()
+        except:
+            pass
+        return False
+
+

diff -r 5478735e553f939c5a1c496f855432050f2b6a4a -r ce455881f9c7d02e52e8c991140348dbd22ea91c yt/frontends/fits/fields.py
--- /dev/null
+++ b/yt/frontends/fits/fields.py
@@ -0,0 +1,31 @@
+"""
+FITS-specific fields
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.utilities.exceptions import *
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    NullFunc, \
+    TranslationFunc, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.fields.universal_fields
+KnownFITSFields = FieldInfoContainer()
+add_fits_field = KnownFITSFields.add_field
+
+FITSFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = FITSFieldInfo.add_field
+

diff -r 5478735e553f939c5a1c496f855432050f2b6a4a -r ce455881f9c7d02e52e8c991140348dbd22ea91c yt/frontends/fits/io.py
--- /dev/null
+++ b/yt/frontends/fits/io.py
@@ -0,0 +1,82 @@
+"""
+FITS-specific IO functions
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+try:
+    import astropy.io.fits as pyfits
+except ImportError:
+    pass
+
+from yt.utilities.math_utils import prec_accum
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+from yt.utilities.logger import ytLogger as mylog
+
+class IOHandlerFITS(BaseIOHandler):
+    _particle_reader = False
+    _data_style = "fits"
+
+    def __init__(self, pf):
+        super(IOHandlerFITS, self).__init__(pf)
+        self.pf = pf
+        self._handle = pf._handle
+        
+    def _read_particles(self, fields_to_read, type, args, grid_list,
+            count_list, conv_factors):
+        pass
+
+    def _read_data_set(self, grid, field):
+        f = self._handle
+        if self.pf.dimensionality == 2:
+            nx,ny = f[field].data.tranpose().shape
+            tr = f[field].data.transpose().reshape(nx,ny,1)
+        elif self.pf.dimensionality == 3:
+            tr = f[field].data.transpose()
+        return tr.astype("float64")
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        sl = [slice(None), slice(None), slice(None)]
+        sl[axis] = slice(coord, coord + 1)
+        f = self._handle
+        if self.pf.dimensionality == 2:
+            nx,ny = f[field].data.transpose().shape
+            tr = f[field].data.transpose().reshape(nx,ny,1)[sl]
+        elif self.pf.dimensionality == 3:
+            tr = f[field].data.transpose()[sl]
+        return tr.astype("float64")
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        chunks = list(chunks)
+        if any((ftype != "gas" for ftype, fname in fields)):
+            raise NotImplementedError
+        f = self._handle
+        rv = {}
+        dt = "float64"
+        for field in fields:
+            rv[field] = np.empty(size, dtype=dt)
+        ng = sum(len(c.objs) for c in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s blocks",
+                    size, [f2 for f1, f2 in fields], ng)
+        for field in fields:
+            ftype, fname = field
+            ds = f[fname].data.astype("float64")
+            ind = 0
+            for chunk in chunks:
+                for g in chunk.objs:
+                    if self.pf.dimensionality == 2:
+                        nx,ny = ds.transpose().shape
+                        data = ds.transpose().reshape(nx,ny,1)
+                    elif self.pf.dimensionality == 3:
+                        data = ds.transpose()
+                    ind += g.select(selector, data, rv[field], ind) # caches
+        return rv


https://bitbucket.org/yt_analysis/yt/commits/69ebd406f8c0/
Changeset:   69ebd406f8c0
Branch:      yt-3.0
User:        jzuhone
Date:        2013-12-03 06:03:40
Summary:     Merging
Affected #:  7 files

diff -r 844bffed131e7c6d167ef676212377b906f1e042 -r 69ebd406f8c051e3f14b19eb1deef3946718028a yt/frontends/fits/__init__.py
--- /dev/null
+++ b/yt/frontends/fits/__init__.py
@@ -0,0 +1,14 @@
+"""
+API for yt.frontends.flash
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 844bffed131e7c6d167ef676212377b906f1e042 -r 69ebd406f8c051e3f14b19eb1deef3946718028a yt/frontends/fits/api.py
--- /dev/null
+++ b/yt/frontends/fits/api.py
@@ -0,0 +1,23 @@
+"""
+API for yt.frontends.fits
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+      FITSGrid, \
+      FITSHierarchy, \
+      FITSStaticOutput
+
+from .fields import \
+      FITSFieldInfo, \
+      add_fits_field
+
+from .io import \
+      IOHandlerFITS

diff -r 844bffed131e7c6d167ef676212377b906f1e042 -r 69ebd406f8c051e3f14b19eb1deef3946718028a yt/frontends/fits/data_structures.py
--- /dev/null
+++ b/yt/frontends/fits/data_structures.py
@@ -0,0 +1,298 @@
+"""
+FITS-specific data structures
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+try:
+    import astropy.io.fits as pyfits
+    import astropy.wcs as pywcs
+except ImportError:
+    pass
+
+import stat
+import numpy as np
+import weakref
+
+from yt.config import ytcfg
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.geometry.grid_geometry_handler import \
+    GridGeometryHandler
+from yt.geometry.geometry_handler import \
+    YTDataChunk
+from yt.data_objects.static_output import \
+    StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.io_handler import \
+    io_registry
+from yt.utilities.physical_constants import cm_per_mpc
+from .fields import FITSFieldInfo, add_fits_field, KnownFITSFields
+from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc, \
+     ValidateDataField, TranslationFunc
+
+class FITSGrid(AMRGridPatch):
+    _id_offset = 0
+    def __init__(self, id, hierarchy, level):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = None
+        self.Children = []
+        self.Level = 0
+
+    def __repr__(self):
+        return "FITSGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
+    
+class FITSHierarchy(GridGeometryHandler):
+
+    grid = FITSGrid
+    
+    def __init__(self,pf,data_style='fits'):
+        self.data_style = data_style
+        self.field_indexes = {}
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = self.parameter_file.parameter_filename
+        self.directory = os.path.dirname(self.hierarchy_filename)
+        self._handle = pf._handle
+        self.float_type = np.float64
+        GridGeometryHandler.__init__(self,pf,data_style)
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        self.field_list = []
+        for h in self._handle[self.parameter_file.first_image:]:
+            if h.is_image:
+                self.field_list.append(h.name.lower())
+                        
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        GridGeometryHandler._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _count_grids(self):
+        self.num_grids = 1
+                
+    def _parse_hierarchy(self):
+        f = self._handle # shortcut
+        pf = self.parameter_file # shortcut
+        
+        # Initialize to the domain left / domain right
+        self.grid_left_edge[0,:] = pf.domain_left_edge
+        self.grid_right_edge[0,:] = pf.domain_right_edge
+        self.grid_dimensions[0] = pf.domain_dimensions
+        
+        # This will become redundant, as _prepare_grid will reset it to its
+        # current value.  Note that FLASH uses 1-based indexing for refinement
+        # levels, but we do not, so we reduce the level by 1.
+        self.grid_levels.flat[:] = 0
+        self.grids = np.empty(self.num_grids, dtype='object')
+        for i in xrange(self.num_grids):
+            self.grids[i] = self.grid(i, self, self.grid_levels[i,0])
+        
+    def _populate_grid_objects(self):
+        self.grids[0]._prepare_grid()
+        self.grids[0]._setup_dx()
+        self.max_level = 0 
+
+    def _setup_derived_fields(self):
+        super(FITSHierarchy, self)._setup_derived_fields()
+        [self.parameter_file.conversion_factors[field] 
+         for field in self.field_list]
+        for field in self.field_list:
+            if field not in self.derived_field_list:
+                self.derived_field_list.append(field)
+
+        for field in self.derived_field_list:
+            f = self.parameter_file.field_info[field]
+            if f._function.func_name == "_TranslationFunc":
+                # Translating an already-converted field
+                self.parameter_file.conversion_factors[field] = 1.0 
+                
+    def _setup_data_io(self):
+        self.io = io_registry[self.data_style](self.parameter_file)
+
+class FITSStaticOutput(StaticOutput):
+    _hierarchy_class = FITSHierarchy
+    _fieldinfo_fallback = FITSFieldInfo
+    _fieldinfo_known = KnownFITSFields
+    _handle = None
+    
+    def __init__(self, filename, data_style='fits',
+                 primary_header = None,
+                 sky_conversion = None,
+                 storage_filename = None,
+                 conversion_override = None):
+
+        if isinstance(filename, pyfits.HDUList):
+            self._handle = filename
+            fname = filename.filename()
+        else:
+            self._handle = pyfits.open(filename)
+            fname = filename
+        for i, h in enumerate(self._handle):
+            if h.is_image and h.data is not None:
+                self.first_image = i
+                break
+            
+        if primary_header is None:
+            self.primary_header = self._handle[self.first_image].header
+        else:
+            self.primary_header = primary_header
+        self.shape = self._handle[self.first_image].shape
+        if conversion_override is None: conversion_override = {}
+        self._conversion_override = conversion_override
+
+        self.wcs = pywcs.WCS(self.primary_header)
+
+        if self.wcs.wcs.cunit[0].name in ["deg","arcsec","arcmin","mas"]:
+            self.sky_wcs = self.wcs.deepcopy()
+            if sky_conversion is None:
+                self._set_minimalist_wcs()
+            else:
+                dims = np.array(self.shape)
+                ndims = len(self.shape)
+                new_unit = sky_conversion[1]
+                new_deltx = np.abs(self.wcs.wcs.cdelt[0])*sky_conversion[0]
+                new_delty = np.abs(self.wcs.wcs.cdelt[1])*sky_conversion[0]
+                self.wcs.wcs.cdelt = [new_deltx, new_delty]
+                self.wcs.wcs.crpix = 0.5*(dims+1)
+                self.wcs.wcs.crval = [0.0]*2
+                self.wcs.wcs.cunit = [new_unit]*2
+                self.wcs.wcs.ctype = ["LINEAR"]*2
+
+        if not all(key in self.primary_header for key in
+                   ["CRPIX1","CRVAL1","CDELT1","CUNIT1"]):
+            self._set_minimalist_wcs()
+
+        StaticOutput.__init__(self, fname, data_style)
+        self.storage_filename = storage_filename
+            
+        self.refine_by = 2
+        self._set_units()
+
+    def _set_minimalist_wcs(self):
+        mylog.warning("Could not determine WCS information. Using pixel units.")
+        dims = np.array(self.shape)
+        ndims = len(dims)
+        self.wcs.wcs.crpix = 0.5*(dims+1)
+        self.wcs.wcs.cdelt = [1.,1.]
+        self.wcs.wcs.crval = 0.5*(dims+1)
+        self.wcs.wcs.cunit = ["pixel"]*ndims
+        self.wcs.wcs.ctype = ["LINEAR"]*ndims
+
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        file_unit = self.wcs.wcs.cunit[0].name.lower()
+        if file_unit in mpc_conversion:
+            self._setup_getunits_units()
+        else:
+            self._setup_nounits_units()
+        self.parameters["Time"] = self.conversion_factors["Time"]
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = self.conversion_factors["Time"] / sec_conversion[unit]
+        for p, v in self._conversion_override.items():
+            self.conversion_factors[p] = v
+
+    def _setup_comoving_units(self):
+        pass
+
+    def _setup_getunits_units(self):
+        file_unit = self.wcs.wcs.cunit[0].name.lower()
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit]/mpc_conversion[file_unit]
+        self.conversion_factors["Time"] = 1.0
+                                            
+    def _setup_nounits_units(self):
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+        self.conversion_factors["Time"] = 1.0
+
+    def _parse_parameter_file(self):
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        for k, v in self.primary_header.items():
+            self.parameters[k] = v
+
+        # Determine dimensionality
+
+        self.dimensionality = self.primary_header["naxis"]
+        self.geometry = "cartesian"
+
+        self.domain_dimensions = np.array(self._handle[self.first_image].shape)
+        if self.dimensionality == 2:
+            self.domain_dimensions = np.append(self.domain_dimensions,
+                                               [int(1)])
+        ND = self.dimensionality
+        
+        le = [0.5]*ND
+        re = [float(dim)+0.5 for dim in self.domain_dimensions]
+        if ND == 2:
+            xe, ye = self.wcs.wcs_pix2world([le[0],re[0]],
+                                            [le[1],re[1]], 1)
+            self.domain_left_edge = np.array([xe[0], ye[0], 0.0])
+            self.domain_right_edge = np.array([xe[1], ye[1], 1.0]) 
+        elif ND == 3:
+            xe, ye, ze = world_edges = self.wcs.wcs_pix2world([le[0],re[0]],
+                                                              [le[1],re[1]],
+                                                              [le[2],re[2]], 1)
+            self.domain_left_edge = np.array([xe[0], ye[0], ze[0]])
+            self.domain_right_edge = np.array([xe[1], ye[1], ze[1]])
+
+        # Get the simulation time
+        try:
+            self.current_time = self.parameters["time"]
+        except:
+            mylog.warning("Cannot find time")
+            self.current_time = 0.0
+            pass
+        
+        # For now we'll ignore these
+        self.periodicity = (False,)*3
+        self.current_redshift = self.omega_lambda = self.omega_matter = \
+            self.hubble_constant = self.cosmological_simulation = 0.0
+
+    def __del__(self):
+        self._handle.close()
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            if isinstance(args[0], pyfits.HDUList):
+                for h in args[0]:
+                    if h.is_image and h.data is not None:
+                        return True
+        except:
+            pass
+        try:
+            fileh = pyfits.open(args[0])
+            for h in fileh:
+                if h.is_image and h.data is not None:
+                    fileh.close()
+                    return True
+            fileh.close()
+        except:
+            pass
+        return False
+
+

diff -r 844bffed131e7c6d167ef676212377b906f1e042 -r 69ebd406f8c051e3f14b19eb1deef3946718028a yt/frontends/fits/fields.py
--- /dev/null
+++ b/yt/frontends/fits/fields.py
@@ -0,0 +1,31 @@
+"""
+FITS-specific fields
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.utilities.exceptions import *
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    NullFunc, \
+    TranslationFunc, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.fields.universal_fields
+KnownFITSFields = FieldInfoContainer()
+add_fits_field = KnownFITSFields.add_field
+
+FITSFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = FITSFieldInfo.add_field
+

diff -r 844bffed131e7c6d167ef676212377b906f1e042 -r 69ebd406f8c051e3f14b19eb1deef3946718028a yt/frontends/fits/io.py
--- /dev/null
+++ b/yt/frontends/fits/io.py
@@ -0,0 +1,82 @@
+"""
+FITS-specific IO functions
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+try:
+    import astropy.io.fits as pyfits
+except ImportError:
+    pass
+
+from yt.utilities.math_utils import prec_accum
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+from yt.utilities.logger import ytLogger as mylog
+
+class IOHandlerFITS(BaseIOHandler):
+    _particle_reader = False
+    _data_style = "fits"
+
+    def __init__(self, pf):
+        super(IOHandlerFITS, self).__init__(pf)
+        self.pf = pf
+        self._handle = pf._handle
+        
+    def _read_particles(self, fields_to_read, type, args, grid_list,
+            count_list, conv_factors):
+        pass
+
+    def _read_data_set(self, grid, field):
+        f = self._handle
+        if self.pf.dimensionality == 2:
+            nx,ny = f[field].data.tranpose().shape
+            tr = f[field].data.transpose().reshape(nx,ny,1)
+        elif self.pf.dimensionality == 3:
+            tr = f[field].data.transpose()
+        return tr.astype("float64")
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        sl = [slice(None), slice(None), slice(None)]
+        sl[axis] = slice(coord, coord + 1)
+        f = self._handle
+        if self.pf.dimensionality == 2:
+            nx,ny = f[field].data.transpose().shape
+            tr = f[field].data.transpose().reshape(nx,ny,1)[sl]
+        elif self.pf.dimensionality == 3:
+            tr = f[field].data.transpose()[sl]
+        return tr.astype("float64")
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        chunks = list(chunks)
+        if any((ftype != "gas" for ftype, fname in fields)):
+            raise NotImplementedError
+        f = self._handle
+        rv = {}
+        dt = "float64"
+        for field in fields:
+            rv[field] = np.empty(size, dtype=dt)
+        ng = sum(len(c.objs) for c in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s blocks",
+                    size, [f2 for f1, f2 in fields], ng)
+        for field in fields:
+            ftype, fname = field
+            ds = f[fname].data.astype("float64")
+            ind = 0
+            for chunk in chunks:
+                for g in chunk.objs:
+                    if self.pf.dimensionality == 2:
+                        nx,ny = ds.transpose().shape
+                        data = ds.transpose().reshape(nx,ny,1)
+                    elif self.pf.dimensionality == 3:
+                        data = ds.transpose()
+                    ind += g.select(selector, data, rv[field], ind) # caches
+        return rv


https://bitbucket.org/yt_analysis/yt/commits/996cece1f58a/
Changeset:   996cece1f58a
Branch:      yt-3.0
User:        jzuhone
Date:        2013-12-04 01:50:02
Summary:     Reverting this back since Matt had a better fix
Affected #:  1 file

diff -r 69ebd406f8c051e3f14b19eb1deef3946718028a -r 996cece1f58acc92b0e05fcdce8cd185da641309 yt/visualization/_colormap_data.py
--- a/yt/visualization/_colormap_data.py
+++ b/yt/visualization/_colormap_data.py
@@ -6,7 +6,7 @@
 
 ### RdBu ###
 
-color_map_luts['RdBu_alt'] = \
+color_map_luts['RdBu'] = \
    (
 array([ 0.40392157,  0.4154556 ,  0.42698963,  0.43852365,  0.45005768,
         0.4615917 ,  0.47312573,  0.48465976,  0.49619378,  0.50772781,
@@ -403,7 +403,7 @@
 
 ### gist_stern ###
 
-color_map_luts['gist_stern_alt'] = \
+color_map_luts['gist_stern'] = \
    (
 array([ 0.        ,  0.0716923 ,  0.14338459,  0.21507689,  0.28676919,
         0.35846148,  0.43015378,  0.50184608,  0.57353837,  0.64523067,
@@ -582,7 +582,7 @@
 
 ### hot ###
 
-color_map_luts['hot_alt'] = \
+color_map_luts['hot'] = \
    (
 array([ 0.0416    ,  0.05189484,  0.06218969,  0.07248453,  0.08277938,
         0.09307422,  0.10336906,  0.11366391,  0.12395875,  0.1342536 ,
@@ -761,7 +761,7 @@
 
 ### jet ###
 
-color_map_luts['jet_alt'] = \
+color_map_luts['jet'] = \
    (
 array([ 0.        ,  0.        ,  0.        ,  0.        ,  0.        ,
         0.        ,  0.        ,  0.        ,  0.        ,  0.        ,
@@ -7757,34 +7757,34 @@
          1.0, 1.0, 1.0, 1.0, 1.0, 1.0]),
    )
 
-color_map_luts['B-W_LINEAR'] = color_map_luts['idl00']
+color_map_luts['B-W LINEAR'] = color_map_luts['idl00']
 color_map_luts['BLUE'] = color_map_luts['idl01']
 color_map_luts['GRN-RED-BLU-WHT'] = color_map_luts['idl02']
-color_map_luts['RED_TEMPERATURE'] = color_map_luts['idl03']
+color_map_luts['RED TEMPERATURE'] = color_map_luts['idl03']
 color_map_luts['BLUE'] = color_map_luts['idl04']
-color_map_luts['STD_GAMMA-II'] = color_map_luts['idl05']
+color_map_luts['STD GAMMA-II'] = color_map_luts['idl05']
 color_map_luts['PRISM'] = color_map_luts['idl06']
 color_map_luts['RED-PURPLE'] = color_map_luts['idl07']
 color_map_luts['GREEN'] = color_map_luts['idl08']
 color_map_luts['GRN'] = color_map_luts['idl09']
 color_map_luts['GREEN-PINK'] = color_map_luts['idl10']
 color_map_luts['BLUE-RED'] = color_map_luts['idl11']
-color_map_luts['16_LEVEL'] = color_map_luts['idl12']
+color_map_luts['16 LEVEL'] = color_map_luts['idl12']
 color_map_luts['RAINBOW'] = color_map_luts['idl13']
 color_map_luts['STEPS'] = color_map_luts['idl14']
-color_map_luts['STERN_SPECIAL'] = color_map_luts['idl15']
+color_map_luts['STERN SPECIAL'] = color_map_luts['idl15']
 color_map_luts['Haze'] = color_map_luts['idl16']
-color_map_luts['Blue-Pastel-Red'] = color_map_luts['idl17']
+color_map_luts['Blue - Pastel - Red'] = color_map_luts['idl17']
 color_map_luts['Pastels'] = color_map_luts['idl18']
-color_map_luts['Hue_Sat_Lightness_1'] = color_map_luts['idl19']
-color_map_luts['Hue_Sat_Lightness_2'] = color_map_luts['idl20']
-color_map_luts['Hue_Sat_Value_1'] = color_map_luts['idl21']
-color_map_luts['Hue_Sat_Value_2'] = color_map_luts['idl22']
-color_map_luts['Purple-Red+Stripes'] = color_map_luts['idl23']
+color_map_luts['Hue Sat Lightness 1'] = color_map_luts['idl19']
+color_map_luts['Hue Sat Lightness 2'] = color_map_luts['idl20']
+color_map_luts['Hue Sat Value 1'] = color_map_luts['idl21']
+color_map_luts['Hue Sat Value 2'] = color_map_luts['idl22']
+color_map_luts['Purple-Red + Stripes'] = color_map_luts['idl23']
 color_map_luts['Beach'] = color_map_luts['idl24']
-color_map_luts['Mac_Style'] = color_map_luts['idl25']
-color_map_luts['Eos_A'] = color_map_luts['idl26']
-color_map_luts['Eos_B'] = color_map_luts['idl27']
+color_map_luts['Mac Style'] = color_map_luts['idl25']
+color_map_luts['Eos A'] = color_map_luts['idl26']
+color_map_luts['Eos B'] = color_map_luts['idl27']
 color_map_luts['Hardcandy'] = color_map_luts['idl28']
 color_map_luts['Nature'] = color_map_luts['idl29']
 color_map_luts['Ocean'] = color_map_luts['idl30']
@@ -7792,12 +7792,12 @@
 color_map_luts['Plasma'] = color_map_luts['idl32']
 color_map_luts['Blue-Red'] = color_map_luts['idl33']
 color_map_luts['Rainbow'] = color_map_luts['idl34']
-color_map_luts['Blue_Waves'] = color_map_luts['idl35']
+color_map_luts['Blue Waves'] = color_map_luts['idl35']
 color_map_luts['Volcano'] = color_map_luts['idl36']
 color_map_luts['Waves'] = color_map_luts['idl37']
 color_map_luts['Rainbow18'] = color_map_luts['idl38']
-color_map_luts['Rainbow+white'] = color_map_luts['idl39']
-color_map_luts['Rainbow+black'] = color_map_luts['idl40']
+color_map_luts['Rainbow + white'] = color_map_luts['idl39']
+color_map_luts['Rainbow + black'] = color_map_luts['idl40']
 
 # Create a reversed LUT for each of the above defined LUTs
 # and append a "_r" (for reversal. consistent with MPL convention).


https://bitbucket.org/yt_analysis/yt/commits/82b562735470/
Changeset:   82b562735470
Branch:      yt-3.0
User:        jzuhone
Date:        2013-12-04 06:19:31
Summary:     Updating FITS writing throughout yt to use FITSImageBuffer. Yanking write_fits and open_in_ds9.
Affected #:  8 files

diff -r 996cece1f58acc92b0e05fcdce8cd185da641309 -r 82b5627354701e86f86fbfe31b1c0ea2ce5e922d yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -250,6 +250,7 @@
             hubble = getattr(pf, "hubble_constant", None)
             omega_m = getattr(pf, "omega_matter", None)
             omega_l = getattr(pf, "omega_lambda", None)
+            if hubble == 0: hubble = None
             if hubble is not None and \
                omega_m is not None and \
                omega_l is not None:
@@ -948,9 +949,9 @@
         col1 = pyfits.Column(name='ENERGY', format='E',
                              array=self["eobs"])
         col2 = pyfits.Column(name='DEC', format='D',
+                             array=self["ysky"])
+        col3 = pyfits.Column(name='RA', format='D',
                              array=self["xsky"])
-        col3 = pyfits.Column(name='RA', format='D',
-                             array=self["ysky"])
 
         coldefs = pyfits.ColDefs([col1, col2, col3])
 

diff -r 996cece1f58acc92b0e05fcdce8cd185da641309 -r 82b5627354701e86f86fbfe31b1c0ea2ce5e922d yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -353,6 +353,24 @@
         else:
             self.hierarchy.save_object(self, name)
 
+    def to_glue(self, label="yt", fields=None):
+        from glue.core import DataCollection, Data, Component
+        from glue.core.coordinates import coordinates_from_header
+        from glue.qt.glue_application import GlueApplication
+
+        if fields is None: fields=sorted(self.field_data.keys())
+
+        print fields
+        
+        gdata = Data(label=label)
+        for component_name in fields:
+            comp = Component(self[component_name])
+            gdata.add_component(comp, component_name)
+        dc = DataCollection([gdata])
+
+        app = GlueApplication(dc)
+        app.start()
+
     def __reduce__(self):
         args = tuple([self.pf._hash(), self._type_name] +
                      [getattr(self, n) for n in self._con_args] +

diff -r 996cece1f58acc92b0e05fcdce8cd185da641309 -r 82b5627354701e86f86fbfe31b1c0ea2ce5e922d yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -135,7 +135,7 @@
     PlotCollection, PlotCollectionInteractive, \
     get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
     callback_registry, write_bitmap, write_image, \
-    apply_colormap, scale_image, write_projection, write_fits, \
+    apply_colormap, scale_image, write_projection, \
     SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
     ProjectionPlot, OffAxisProjectionPlot, \
     show_colormaps

diff -r 996cece1f58acc92b0e05fcdce8cd185da641309 -r 82b5627354701e86f86fbfe31b1c0ea2ce5e922d yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -34,8 +34,7 @@
     splat_points, \
     apply_colormap, \
     scale_image, \
-    write_projection, \
-    write_fits
+    write_projection
 
 from plot_modifications import \
     PlotCallback, \

diff -r 996cece1f58acc92b0e05fcdce8cd185da641309 -r 82b5627354701e86f86fbfe31b1c0ea2ce5e922d yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -120,7 +120,7 @@
 # Add colormaps in _colormap_data.py that weren't defined here
 _vs = np.linspace(0,1,255)
 for k,v in _cm.color_map_luts.iteritems():
-    if k not in yt_colormaps:
+    if k not in yt_colormaps and k not in mcm.cmap_d:
         cdict = { 'red': zip(_vs,v[0],v[0]),
                   'green': zip(_vs,v[1],v[1]),
                   'blue': zip(_vs,v[2],v[2]) }
@@ -140,7 +140,7 @@
     Displays the colormaps available to yt.  Note, most functions can use
     both the matplotlib and the native yt colormaps; however, there are 
     some special functions existing within image_writer.py (e.g. write_image()
-    write_fits(), write_bitmap(), etc.), which cannot access the matplotlib
+    write_bitmap(), etc.), which cannot access the matplotlib
     colormaps.
 
     In addition to the colormaps listed, one can access the reverse of each 

diff -r 996cece1f58acc92b0e05fcdce8cd185da641309 -r 82b5627354701e86f86fbfe31b1c0ea2ce5e922d yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -19,7 +19,6 @@
     y_dict, \
     axis_names
 from .volume_rendering.api import off_axis_projection
-from image_writer import write_fits
 from yt.data_objects.image_array import ImageArray
 from yt.utilities.lib.misc_utilities import \
     pixelize_cylinder
@@ -271,7 +270,7 @@
         output.close()
 
     def export_fits(self, filename, fields=None, clobber=False,
-                    other_keys=None, units="cm", sky_center=(0.0,0.0), D_A=None):
+                    other_keys=None, units="cm"):
         r"""Export a set of pixelized fields to a FITS file.
 
         This will export a set of FITS images of either the fields specified
@@ -291,13 +290,6 @@
             the length units that the coordinates are written in, default 'cm'
             If units are set to "deg" then assume that sky coordinates are
             requested.
-        sky_center : array_like, optional
-            Center of the image in (ra,dec) in degrees if sky coordinates
-            (units="deg") are requested.
-        D_A : float or tuple, optional
-            Angular diameter distance, given in code units as a float or
-            a tuple containing the value and the length unit. Required if
-            using sky coordinates.
         """
 
         try:
@@ -305,77 +297,19 @@
         except:
             mylog.error("You don't have AstroPy installed!")
             raise ImportError
-        
-        if units == "deg" and D_A is None:
-            mylog.error("Sky coordinates require an angular diameter distance. Please specify D_A.")    
-            raise ValueError
-    
-        if iterable(D_A):
-            dist = D_A[0]/self.pf.units[D_A[1]]
-        else:
-            dist = D_A
-
-        if other_keys is None:
-            hdu_keys = {}
-        else:
-            hdu_keys = other_keys
+        from yt.utilities.fits_image import FITSImageBuffer
 
         extra_fields = ['x','y','z','px','py','pz','pdx','pdy','pdz','weight_field']
         if fields is None: 
             fields = [field for field in self.data_source.fields 
                       if field not in extra_fields]
 
-        coords = {}
-        nx, ny = self.buff_size
-        dx = (self.bounds[1]-self.bounds[0])/nx
-        dy = (self.bounds[3]-self.bounds[2])/ny
-        if units == "deg":  
-            coords["dx"] = -np.rad2deg(dx/dist)
-            coords["dy"] = np.rad2deg(dy/dist)
-            coords["xctr"] = sky_center[0]
-            coords["yctr"] = sky_center[1]
-            hdu_keys["MTYPE1"] = "EQPOS"
-            hdu_keys["MFORM1"] = "RA,DEC"
-            hdu_keys["CTYPE1"] = "RA---TAN"
-            hdu_keys["CTYPE2"] = "DEC--TAN"
-        else:
-            coords["dx"] = dx*self.pf.units[units]
-            coords["dy"] = dy*self.pf.units[units]
-            coords["xctr"] = 0.5*(self.bounds[0]+self.bounds[1])*self.pf.units[units]
-            coords["yctr"] = 0.5*(self.bounds[2]+self.bounds[3])*self.pf.units[units]
-        coords["units"] = units
+        fib = FITSImageBuffer(self, fields=fields, units=units)
+        if other_keys is not None:
+            for k,v in other_keys.items():
+                fib.update_all_headers(k,v)
+        fib.writeto(filename, clobber=clobber)
         
-        hdu_keys["Time"] = self.pf.current_time
-
-        data = dict([(field,self[field]) for field in fields])
-        write_fits(data, filename, clobber=clobber, coords=coords,
-                   other_keys=hdu_keys)
-
-    def open_in_ds9(self, field, take_log=True):
-        """
-        This will open a given field in the DS9 viewer.
-
-        Displaying fields can often be much easier in an interactive viewer,
-        particularly one as versatile as DS9.  This function will pixelize a
-        field and export it to an interactive DS9 package.  This requires the
-        *numdisplay* package, which is a simple download from STSci.
-        Furthermore, it presupposed that it can connect to DS9 -- that is, that
-        DS9 is already open.
-
-        Parameters
-        ----------
-        field : strings
-            This field will be pixelized and displayed.
-        take_log : boolean
-            DS9 seems to have issues with logging fields in-memory.  This will
-            pre-log the field before sending it to DS9.
-        """
-        import numdisplay
-        numdisplay.open()
-        if take_log: data=np.log10(self[field])
-        else: data=self[field]
-        numdisplay.display(data)    
-
     @property
     def limits(self):
         rv = dict(x = None, y = None, z = None)

diff -r 996cece1f58acc92b0e05fcdce8cd185da641309 -r 82b5627354701e86f86fbfe31b1c0ea2ce5e922d yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -386,76 +386,6 @@
     canvas.print_figure(filename, dpi=dpi)
     return filename
 
-
-def write_fits(image, filename, clobber=True, coords=None,
-               other_keys=None):
-    r"""Write out floating point arrays directly to a FITS file, optionally
-    adding coordinates and header keywords.
-        
-    Parameters
-    ----------
-    image : array_like, or dict of array_like objects
-        This is either an (unscaled) array of floating point values, or a dict of
-        such arrays, shape (N,N,) to save in a FITS file. 
-    filename : string
-        This name of the FITS file to be written.
-    clobber : boolean
-        If the file exists, this governs whether we will overwrite.
-    coords : dictionary, optional
-        A set of header keys and values to write to the FITS header to set up
-        a coordinate system, which is assumed to be linear unless specified otherwise
-        in *other_keys*
-        "units": the length units
-        "xctr","yctr": the center of the image
-        "dx","dy": the pixel width in each direction                                                
-    other_keys : dictionary, optional
-        A set of header keys and values to write into the FITS header.    
-    """
-
-    try:
-        import astropy.io.fits as pyfits
-    except:
-        mylog.error("You don't have AstroPy installed!")
-        raise ImportError
-    
-    try:
-        image.keys()
-        image_dict = image
-    except:
-        image_dict = dict(yt_data=image)
-
-    hdulist = [pyfits.PrimaryHDU()]
-
-    for key in image_dict.keys():
-
-        mylog.info("Writing image block \"%s\"" % (key))
-        hdu = pyfits.ImageHDU(image_dict[key])
-        hdu.update_ext_name(key)
-        
-        if coords is not None:
-            nx, ny = image_dict[key].shape
-            hdu.header.update('CUNIT1', coords["units"])
-            hdu.header.update('CUNIT2', coords["units"])
-            hdu.header.update('CRPIX1', 0.5*(nx+1))
-            hdu.header.update('CRPIX2', 0.5*(ny+1))
-            hdu.header.update('CRVAL1', coords["xctr"])
-            hdu.header.update('CRVAL2', coords["yctr"])
-            hdu.header.update('CDELT1', coords["dx"])
-            hdu.header.update('CDELT2', coords["dy"])
-            # These are the defaults, but will get overwritten if
-            # the caller has specified them
-            hdu.header.update('CTYPE1', "LINEAR")
-            hdu.header.update('CTYPE2', "LINEAR")
-                                    
-        if other_keys is not None:
-            for k,v in other_keys.items():
-                hdu.header.update(k,v)
-
-        hdulist.append(hdu)
-
-    hdulist = pyfits.HDUList(hdulist)
-    hdulist.writeto(filename, clobber=clobber)                    
-
 def display_in_notebook(image, max_val=None):
     """
     A helper function to display images in an IPython notebook

diff -r 996cece1f58acc92b0e05fcdce8cd185da641309 -r 82b5627354701e86f86fbfe31b1c0ea2ce5e922d yt/visualization/volume_rendering/image_handling.py
--- a/yt/visualization/volume_rendering/image_handling.py
+++ b/yt/visualization/volume_rendering/image_handling.py
@@ -23,6 +23,8 @@
     and saves to *fn*.  If *h5* is True, then it will save in hdf5 format.  If
     *fits* is True, it will save in fits format.
     """
+    if (not h5 and not fits) or (h5 and fits):
+        raise ValueError("Choose either HDF5 or FITS format!")
     if h5:
         f = h5py.File('%s.h5'%fn, "w")
         f.create_dataset("R", data=image[:,:,0])
@@ -31,17 +33,17 @@
         f.create_dataset("A", data=image[:,:,3])
         f.close()
     if fits:
-        try:
-            import pyfits
-        except ImportError:
-            mylog.error('You do not have pyfits, install before attempting to use fits exporter')
-            raise
-        hdur = pyfits.PrimaryHDU(image[:,:,0])
-        hdug = pyfits.ImageHDU(image[:,:,1])
-        hdub = pyfits.ImageHDU(image[:,:,2])
-        hdua = pyfits.ImageHDU(image[:,:,3])
-        hdulist = pyfits.HDUList([hdur,hdug,hdub,hdua])
-        hdulist.writeto('%s.fits'%fn,clobber=True)
+        from yt.utilities.fits_image import FITSImageBuffer
+        data = {}
+        data["r"] = image[:,:,0]
+        data["g"] = image[:,:,1]
+        data["b"] = image[:,:,2]
+        data["a"] = image[:,:,3]
+        nx, ny = data["r"].shape
+        fib = FITSImageBuffer(data, units="pixel",
+                              center=[0.5*(nx+1), 0.5*(ny+1)],
+                              scale=[1.]*2)
+        fib.writeto('%s.fits'%fn,clobber=True)
 
 def import_rgba(name, h5=True):
     """


https://bitbucket.org/yt_analysis/yt/commits/c851d95ab986/
Changeset:   c851d95ab986
Branch:      yt-3.0
User:        jzuhone
Date:        2013-12-04 21:44:08
Summary:     Updating the PR based on Matt and Chris's suggestions
Affected #:  4 files

diff -r 82b5627354701e86f86fbfe31b1c0ea2ce5e922d -r c851d95ab9862cf24c761044953f657840b1c4fb yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -353,19 +353,19 @@
         else:
             self.hierarchy.save_object(self, name)
 
-    def to_glue(self, label="yt", fields=None):
-        from glue.core import DataCollection, Data, Component
+    def to_glue(self, fields, label="yt"):
+        """
+        Takes specific *fields* in the container and exports them to
+        Glue (http://www.glueviz.org) for interactive
+        analysis. Optionally add a *label*.  
+        """
+        from glue.core import DataCollection, Data
         from glue.core.coordinates import coordinates_from_header
         from glue.qt.glue_application import GlueApplication
-
-        if fields is None: fields=sorted(self.field_data.keys())
-
-        print fields
         
         gdata = Data(label=label)
         for component_name in fields:
-            comp = Component(self[component_name])
-            gdata.add_component(comp, component_name)
+            gdata.add_component(self[component_name], component_name)
         dc = DataCollection([gdata])
 
         app = GlueApplication(dc)

diff -r 82b5627354701e86f86fbfe31b1c0ea2ce5e922d -r c851d95ab9862cf24c761044953f657840b1c4fb yt/frontends/fits/__init__.py
--- a/yt/frontends/fits/__init__.py
+++ b/yt/frontends/fits/__init__.py
@@ -1,14 +0,0 @@
-"""
-API for yt.frontends.flash
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r 82b5627354701e86f86fbfe31b1c0ea2ce5e922d -r c851d95ab9862cf24c761044953f657840b1c4fb yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -116,8 +116,8 @@
     GadgetFieldInfo, add_gadget_field, \
     TipsyStaticOutput, TipsyFieldInfo, add_tipsy_field
 
-from yt.frontends.fits.api import \
-    FITSStaticOutput, FITSFieldInfo, add_fits_field
+#from yt.frontends.fits.api import \
+#    FITSStaticOutput, FITSFieldInfo, add_fits_field
 
 from yt.analysis_modules.list_modules import \
     get_available_modules, amods

diff -r 82b5627354701e86f86fbfe31b1c0ea2ce5e922d -r c851d95ab9862cf24c761044953f657840b1c4fb yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -221,7 +221,12 @@
             return self.nx, self.ny, self.nz
 
     def to_glue(self, label="yt"):
-        from glue.core import DataCollection, Data, Component
+        """
+        Takes the data in the FITSImageBuffer and exports it to
+        Glue (http://www.glueviz.org) for interactive
+        analysis. Optionally add a *label*. 
+        """
+        from glue.core import DataCollection, Data
         from glue.core.coordinates import coordinates_from_header
         from glue.qt.glue_application import GlueApplication
 
@@ -229,9 +234,8 @@
         
         image = Data(label=label)
         image.coords = coordinates_from_header(self.wcs.to_header())
-        for component_name in field_dict:
-            comp = Component(field_dict[component_name])
-            image.add_component(comp, component_name)
+        for k,v in field_dict.items():
+            image.add_component(v, k)
         dc = DataCollection([image])
 
         app = GlueApplication(dc)


https://bitbucket.org/yt_analysis/yt/commits/69c14f7397ba/
Changeset:   69c14f7397ba
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-12-04 21:52:23
Summary:     Merged in jzuhone/yt-3.x/yt-3.0 (pull request #668)

Improved support for reading/writing FITS files and some support for Glue.
Affected #:  21 files

diff -r 9537359f38e2c9f5245e7342bf0de2185e522f37 -r 69c14f7397bab48edf255be9b9ef07e2a7b143f3 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -250,6 +250,7 @@
             hubble = getattr(pf, "hubble_constant", None)
             omega_m = getattr(pf, "omega_matter", None)
             omega_l = getattr(pf, "omega_lambda", None)
+            if hubble == 0: hubble = None
             if hubble is not None and \
                omega_m is not None and \
                omega_l is not None:
@@ -948,9 +949,9 @@
         col1 = pyfits.Column(name='ENERGY', format='E',
                              array=self["eobs"])
         col2 = pyfits.Column(name='DEC', format='D',
+                             array=self["ysky"])
+        col3 = pyfits.Column(name='RA', format='D',
                              array=self["xsky"])
-        col3 = pyfits.Column(name='RA', format='D',
-                             array=self["ysky"])
 
         coldefs = pyfits.ColDefs([col1, col2, col3])
 

diff -r 9537359f38e2c9f5245e7342bf0de2185e522f37 -r 69c14f7397bab48edf255be9b9ef07e2a7b143f3 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -19,11 +19,11 @@
 #-----------------------------------------------------------------------------
 
 from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
+from yt.utilities.fits_image import FITSImageBuffer
 from yt.data_objects.image_array import ImageArray
 from yt.data_objects.field_info_container import add_field
 from yt.funcs import fix_axis, mylog, iterable, get_pbar
 from yt.utilities.definitions import inv_axis_names
-from yt.visualization.image_writer import write_fits, write_projection
 from yt.visualization.volume_rendering.camera import off_axis_projection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only
@@ -272,32 +272,52 @@
         self.data["TeSZ"] = ImageArray(Te)
 
     @parallel_root_only
-    def write_fits(self, filename, clobber=True):
+    def write_fits(self, filename, sky_center=None, sky_scale=None, clobber=True):
         r""" Export images to a FITS file. Writes the SZ distortion in all
         specified frequencies as well as the mass-weighted temperature and the
-        optical depth. Distance units are in kpc.
+        optical depth. Distance units are in kpc, unless *sky_center*
+        and *scale* are specified. 
 
         Parameters
         ----------
         filename : string
             The name of the FITS file to be written. 
+        sky_center : tuple of floats, optional
+            The center of the observation in (RA, Dec) in degrees. Only used if
+            converting to sky coordinates.          
+        sky_scale : float, optional
+            Scale between degrees and kpc. Only used if
+            converting to sky coordinates.
         clobber : boolean, optional
             If the file already exists, do we overwrite?
 
         Examples
         --------
+        >>> # This example just writes out a FITS file with kpc coords
         >>> szprj.write_fits("SZbullet.fits", clobber=False)
+        >>> # This example uses sky coords
+        >>> sky_scale = 1./3600. # One arcsec per kpc
+        >>> sky_center = (30., 45.) # In degrees
+        >>> szprj.write_fits("SZbullet.fits", sky_center=sky_center, sky_scale=sky_scale)
         """
-        coords = {}
-        coords["dx"] = self.dx*self.pf.units["kpc"]
-        coords["dy"] = self.dy*self.pf.units["kpc"]
-        coords["xctr"] = 0.0
-        coords["yctr"] = 0.0
-        coords["units"] = "kpc"
-        other_keys = {"Time" : self.pf.current_time}
-        write_fits(self.data, filename, clobber=clobber, coords=coords,
-                   other_keys=other_keys)
 
+        deltas = np.array([self.dx*self.pf.units["kpc"],
+                           self.dy*self.pf.units["kpc"]])
+
+        if sky_center is None:
+            center = [0.0]*2
+            units = "kpc"
+        else:
+            center = sky_center
+            units = "deg"
+            deltas *= sky_scale
+            
+        fib = FITSImageBuffer(self.data, fields=self.data.keys(),
+                              center=center, units=units,
+                              scale=deltas)
+        fib.update_all_headers("Time", self.pf.current_time)
+        fib.writeto(filename, clobber=clobber)
+        
     @parallel_root_only
     def write_png(self, filename_prefix, cmap_name="algae",
                   log_fields=None):

diff -r 9537359f38e2c9f5245e7342bf0de2185e522f37 -r 69c14f7397bab48edf255be9b9ef07e2a7b143f3 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -353,6 +353,24 @@
         else:
             self.hierarchy.save_object(self, name)
 
+    def to_glue(self, fields, label="yt"):
+        """
+        Takes specific *fields* in the container and exports them to
+        Glue (http://www.glueviz.org) for interactive
+        analysis. Optionally add a *label*.  
+        """
+        from glue.core import DataCollection, Data
+        from glue.core.coordinates import coordinates_from_header
+        from glue.qt.glue_application import GlueApplication
+        
+        gdata = Data(label=label)
+        for component_name in fields:
+            gdata.add_component(self[component_name], component_name)
+        dc = DataCollection([gdata])
+
+        app = GlueApplication(dc)
+        app.start()
+
     def __reduce__(self):
         args = tuple([self.pf._hash(), self._type_name] +
                      [getattr(self, n) for n in self._con_args] +

diff -r 9537359f38e2c9f5245e7342bf0de2185e522f37 -r 69c14f7397bab48edf255be9b9ef07e2a7b143f3 yt/frontends/fits/api.py
--- /dev/null
+++ b/yt/frontends/fits/api.py
@@ -0,0 +1,23 @@
+"""
+API for yt.frontends.fits
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+      FITSGrid, \
+      FITSHierarchy, \
+      FITSStaticOutput
+
+from .fields import \
+      FITSFieldInfo, \
+      add_fits_field
+
+from .io import \
+      IOHandlerFITS

diff -r 9537359f38e2c9f5245e7342bf0de2185e522f37 -r 69c14f7397bab48edf255be9b9ef07e2a7b143f3 yt/frontends/fits/data_structures.py
--- /dev/null
+++ b/yt/frontends/fits/data_structures.py
@@ -0,0 +1,298 @@
+"""
+FITS-specific data structures
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+try:
+    import astropy.io.fits as pyfits
+    import astropy.wcs as pywcs
+except ImportError:
+    pass
+
+import stat
+import numpy as np
+import weakref
+
+from yt.config import ytcfg
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.geometry.grid_geometry_handler import \
+    GridGeometryHandler
+from yt.geometry.geometry_handler import \
+    YTDataChunk
+from yt.data_objects.static_output import \
+    StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.io_handler import \
+    io_registry
+from yt.utilities.physical_constants import cm_per_mpc
+from .fields import FITSFieldInfo, add_fits_field, KnownFITSFields
+from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc, \
+     ValidateDataField, TranslationFunc
+
+class FITSGrid(AMRGridPatch):
+    _id_offset = 0
+    def __init__(self, id, hierarchy, level):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = None
+        self.Children = []
+        self.Level = 0
+
+    def __repr__(self):
+        return "FITSGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
+    
+class FITSHierarchy(GridGeometryHandler):
+
+    grid = FITSGrid
+    
+    def __init__(self,pf,data_style='fits'):
+        self.data_style = data_style
+        self.field_indexes = {}
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = self.parameter_file.parameter_filename
+        self.directory = os.path.dirname(self.hierarchy_filename)
+        self._handle = pf._handle
+        self.float_type = np.float64
+        GridGeometryHandler.__init__(self,pf,data_style)
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        self.field_list = []
+        for h in self._handle[self.parameter_file.first_image:]:
+            if h.is_image:
+                self.field_list.append(h.name.lower())
+                        
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        GridGeometryHandler._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _count_grids(self):
+        self.num_grids = 1
+                
+    def _parse_hierarchy(self):
+        f = self._handle # shortcut
+        pf = self.parameter_file # shortcut
+        
+        # Initialize to the domain left / domain right
+        self.grid_left_edge[0,:] = pf.domain_left_edge
+        self.grid_right_edge[0,:] = pf.domain_right_edge
+        self.grid_dimensions[0] = pf.domain_dimensions
+        
+        # This will become redundant, as _prepare_grid will reset it to its
+        # current value.  Note that FLASH uses 1-based indexing for refinement
+        # levels, but we do not, so we reduce the level by 1.
+        self.grid_levels.flat[:] = 0
+        self.grids = np.empty(self.num_grids, dtype='object')
+        for i in xrange(self.num_grids):
+            self.grids[i] = self.grid(i, self, self.grid_levels[i,0])
+        
+    def _populate_grid_objects(self):
+        self.grids[0]._prepare_grid()
+        self.grids[0]._setup_dx()
+        self.max_level = 0 
+
+    def _setup_derived_fields(self):
+        super(FITSHierarchy, self)._setup_derived_fields()
+        [self.parameter_file.conversion_factors[field] 
+         for field in self.field_list]
+        for field in self.field_list:
+            if field not in self.derived_field_list:
+                self.derived_field_list.append(field)
+
+        for field in self.derived_field_list:
+            f = self.parameter_file.field_info[field]
+            if f._function.func_name == "_TranslationFunc":
+                # Translating an already-converted field
+                self.parameter_file.conversion_factors[field] = 1.0 
+                
+    def _setup_data_io(self):
+        self.io = io_registry[self.data_style](self.parameter_file)
+
+class FITSStaticOutput(StaticOutput):
+    _hierarchy_class = FITSHierarchy
+    _fieldinfo_fallback = FITSFieldInfo
+    _fieldinfo_known = KnownFITSFields
+    _handle = None
+    
+    def __init__(self, filename, data_style='fits',
+                 primary_header = None,
+                 sky_conversion = None,
+                 storage_filename = None,
+                 conversion_override = None):
+
+        if isinstance(filename, pyfits.HDUList):
+            self._handle = filename
+            fname = filename.filename()
+        else:
+            self._handle = pyfits.open(filename)
+            fname = filename
+        for i, h in enumerate(self._handle):
+            if h.is_image and h.data is not None:
+                self.first_image = i
+                break
+            
+        if primary_header is None:
+            self.primary_header = self._handle[self.first_image].header
+        else:
+            self.primary_header = primary_header
+        self.shape = self._handle[self.first_image].shape
+        if conversion_override is None: conversion_override = {}
+        self._conversion_override = conversion_override
+
+        self.wcs = pywcs.WCS(self.primary_header)
+
+        if self.wcs.wcs.cunit[0].name in ["deg","arcsec","arcmin","mas"]:
+            self.sky_wcs = self.wcs.deepcopy()
+            if sky_conversion is None:
+                self._set_minimalist_wcs()
+            else:
+                dims = np.array(self.shape)
+                ndims = len(self.shape)
+                new_unit = sky_conversion[1]
+                new_deltx = np.abs(self.wcs.wcs.cdelt[0])*sky_conversion[0]
+                new_delty = np.abs(self.wcs.wcs.cdelt[1])*sky_conversion[0]
+                self.wcs.wcs.cdelt = [new_deltx, new_delty]
+                self.wcs.wcs.crpix = 0.5*(dims+1)
+                self.wcs.wcs.crval = [0.0]*2
+                self.wcs.wcs.cunit = [new_unit]*2
+                self.wcs.wcs.ctype = ["LINEAR"]*2
+
+        if not all(key in self.primary_header for key in
+                   ["CRPIX1","CRVAL1","CDELT1","CUNIT1"]):
+            self._set_minimalist_wcs()
+
+        StaticOutput.__init__(self, fname, data_style)
+        self.storage_filename = storage_filename
+            
+        self.refine_by = 2
+        self._set_units()
+
+    def _set_minimalist_wcs(self):
+        mylog.warning("Could not determine WCS information. Using pixel units.")
+        dims = np.array(self.shape)
+        ndims = len(dims)
+        self.wcs.wcs.crpix = 0.5*(dims+1)
+        self.wcs.wcs.cdelt = [1.,1.]
+        self.wcs.wcs.crval = 0.5*(dims+1)
+        self.wcs.wcs.cunit = ["pixel"]*ndims
+        self.wcs.wcs.ctype = ["LINEAR"]*ndims
+
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        file_unit = self.wcs.wcs.cunit[0].name.lower()
+        if file_unit in mpc_conversion:
+            self._setup_getunits_units()
+        else:
+            self._setup_nounits_units()
+        self.parameters["Time"] = self.conversion_factors["Time"]
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = self.conversion_factors["Time"] / sec_conversion[unit]
+        for p, v in self._conversion_override.items():
+            self.conversion_factors[p] = v
+
+    def _setup_comoving_units(self):
+        pass
+
+    def _setup_getunits_units(self):
+        file_unit = self.wcs.wcs.cunit[0].name.lower()
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit]/mpc_conversion[file_unit]
+        self.conversion_factors["Time"] = 1.0
+                                            
+    def _setup_nounits_units(self):
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+        self.conversion_factors["Time"] = 1.0
+
+    def _parse_parameter_file(self):
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        for k, v in self.primary_header.items():
+            self.parameters[k] = v
+
+        # Determine dimensionality
+
+        self.dimensionality = self.primary_header["naxis"]
+        self.geometry = "cartesian"
+
+        self.domain_dimensions = np.array(self._handle[self.first_image].shape)
+        if self.dimensionality == 2:
+            self.domain_dimensions = np.append(self.domain_dimensions,
+                                               [int(1)])
+        ND = self.dimensionality
+        
+        le = [0.5]*ND
+        re = [float(dim)+0.5 for dim in self.domain_dimensions]
+        if ND == 2:
+            xe, ye = self.wcs.wcs_pix2world([le[0],re[0]],
+                                            [le[1],re[1]], 1)
+            self.domain_left_edge = np.array([xe[0], ye[0], 0.0])
+            self.domain_right_edge = np.array([xe[1], ye[1], 1.0]) 
+        elif ND == 3:
+            xe, ye, ze = world_edges = self.wcs.wcs_pix2world([le[0],re[0]],
+                                                              [le[1],re[1]],
+                                                              [le[2],re[2]], 1)
+            self.domain_left_edge = np.array([xe[0], ye[0], ze[0]])
+            self.domain_right_edge = np.array([xe[1], ye[1], ze[1]])
+
+        # Get the simulation time
+        try:
+            self.current_time = self.parameters["time"]
+        except:
+            mylog.warning("Cannot find time")
+            self.current_time = 0.0
+            pass
+        
+        # For now we'll ignore these
+        self.periodicity = (False,)*3
+        self.current_redshift = self.omega_lambda = self.omega_matter = \
+            self.hubble_constant = self.cosmological_simulation = 0.0
+
+    def __del__(self):
+        self._handle.close()
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            if isinstance(args[0], pyfits.HDUList):
+                for h in args[0]:
+                    if h.is_image and h.data is not None:
+                        return True
+        except:
+            pass
+        try:
+            fileh = pyfits.open(args[0])
+            for h in fileh:
+                if h.is_image and h.data is not None:
+                    fileh.close()
+                    return True
+            fileh.close()
+        except:
+            pass
+        return False
+
+

diff -r 9537359f38e2c9f5245e7342bf0de2185e522f37 -r 69c14f7397bab48edf255be9b9ef07e2a7b143f3 yt/frontends/fits/fields.py
--- /dev/null
+++ b/yt/frontends/fits/fields.py
@@ -0,0 +1,31 @@
+"""
+FITS-specific fields
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.utilities.exceptions import *
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    NullFunc, \
+    TranslationFunc, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.fields.universal_fields
+KnownFITSFields = FieldInfoContainer()
+add_fits_field = KnownFITSFields.add_field
+
+FITSFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = FITSFieldInfo.add_field
+

diff -r 9537359f38e2c9f5245e7342bf0de2185e522f37 -r 69c14f7397bab48edf255be9b9ef07e2a7b143f3 yt/frontends/fits/io.py
--- /dev/null
+++ b/yt/frontends/fits/io.py
@@ -0,0 +1,82 @@
+"""
+FITS-specific IO functions
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+try:
+    import astropy.io.fits as pyfits
+except ImportError:
+    pass
+
+from yt.utilities.math_utils import prec_accum
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+from yt.utilities.logger import ytLogger as mylog
+
+class IOHandlerFITS(BaseIOHandler):
+    _particle_reader = False
+    _data_style = "fits"
+
+    def __init__(self, pf):
+        super(IOHandlerFITS, self).__init__(pf)
+        self.pf = pf
+        self._handle = pf._handle
+        
+    def _read_particles(self, fields_to_read, type, args, grid_list,
+            count_list, conv_factors):
+        pass
+
+    def _read_data_set(self, grid, field):
+        f = self._handle
+        if self.pf.dimensionality == 2:
+            nx,ny = f[field].data.tranpose().shape
+            tr = f[field].data.transpose().reshape(nx,ny,1)
+        elif self.pf.dimensionality == 3:
+            tr = f[field].data.transpose()
+        return tr.astype("float64")
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        sl = [slice(None), slice(None), slice(None)]
+        sl[axis] = slice(coord, coord + 1)
+        f = self._handle
+        if self.pf.dimensionality == 2:
+            nx,ny = f[field].data.transpose().shape
+            tr = f[field].data.transpose().reshape(nx,ny,1)[sl]
+        elif self.pf.dimensionality == 3:
+            tr = f[field].data.transpose()[sl]
+        return tr.astype("float64")
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        chunks = list(chunks)
+        if any((ftype != "gas" for ftype, fname in fields)):
+            raise NotImplementedError
+        f = self._handle
+        rv = {}
+        dt = "float64"
+        for field in fields:
+            rv[field] = np.empty(size, dtype=dt)
+        ng = sum(len(c.objs) for c in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s blocks",
+                    size, [f2 for f1, f2 in fields], ng)
+        for field in fields:
+            ftype, fname = field
+            ds = f[fname].data.astype("float64")
+            ind = 0
+            for chunk in chunks:
+                for g in chunk.objs:
+                    if self.pf.dimensionality == 2:
+                        nx,ny = ds.transpose().shape
+                        data = ds.transpose().reshape(nx,ny,1)
+                    elif self.pf.dimensionality == 3:
+                        data = ds.transpose()
+                    ind += g.select(selector, data, rv[field], ind) # caches
+        return rv

diff -r 9537359f38e2c9f5245e7342bf0de2185e522f37 -r 69c14f7397bab48edf255be9b9ef07e2a7b143f3 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -15,6 +15,7 @@
 
 import numpy as np
 import h5py
+from yt.utilities.math_utils import prec_accum
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -71,6 +72,7 @@
         for field in fields:
             ftype, fname = field
             dt = f["/%s" % fname].dtype
+            dt = prec_accum[dt]
             if dt == "float32": dt = "float64"
             rv[field] = np.empty(size, dtype=dt)
         ng = sum(len(c.objs) for c in chunks)

diff -r 9537359f38e2c9f5245e7342bf0de2185e522f37 -r 69c14f7397bab48edf255be9b9ef07e2a7b143f3 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -12,6 +12,7 @@
     config.add_subpackage("boxlib")
     config.add_subpackage("chombo")
     config.add_subpackage("enzo")
+    config.add_subpackage("fits")
     config.add_subpackage("flash")
     config.add_subpackage("gdf")
     config.add_subpackage("moab")

diff -r 9537359f38e2c9f5245e7342bf0de2185e522f37 -r 69c14f7397bab48edf255be9b9ef07e2a7b143f3 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -116,6 +116,9 @@
     GadgetFieldInfo, add_gadget_field, \
     TipsyStaticOutput, TipsyFieldInfo, add_tipsy_field
 
+#from yt.frontends.fits.api import \
+#    FITSStaticOutput, FITSFieldInfo, add_fits_field
+
 from yt.analysis_modules.list_modules import \
     get_available_modules, amods
 available_analysis_modules = get_available_modules()
@@ -132,7 +135,7 @@
     PlotCollection, PlotCollectionInteractive, \
     get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
     callback_registry, write_bitmap, write_image, \
-    apply_colormap, scale_image, write_projection, write_fits, \
+    apply_colormap, scale_image, write_projection, \
     SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
     ProjectionPlot, OffAxisProjectionPlot, \
     show_colormaps

diff -r 9537359f38e2c9f5245e7342bf0de2185e522f37 -r 69c14f7397bab48edf255be9b9ef07e2a7b143f3 yt/utilities/fits_image.py
--- /dev/null
+++ b/yt/utilities/fits_image.py
@@ -0,0 +1,246 @@
+"""
+FITSImageBuffer Class
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.funcs import mylog, iterable
+from yt.visualization.fixed_resolution import FixedResolutionBuffer
+from yt.data_objects.construction_data_containers import YTCoveringGridBase
+
+try:
+    from astropy.io.fits import HDUList, ImageHDU
+    from astropy import wcs as pywcs
+except ImportError:
+    pass
+
+class FITSImageBuffer(HDUList):
+
+    def __init__(self, data, fields=None, units="cm",
+                 center=None, scale=None):
+        r""" Initialize a FITSImageBuffer object.
+
+        FITSImageBuffer contains a list of FITS ImageHDU instances, and optionally includes
+        WCS information. It inherits from HDUList, so operations such as `writeto` are
+        enabled. Images can be constructed from ImageArrays, NumPy arrays, dicts of such
+        arrays, FixedResolutionBuffers, and YTCoveringGrids. The latter
+        two are the most powerful because WCS information can be constructed from their coordinates.
+
+        Parameters
+        ----------
+        data : FixedResolutionBuffer or a YTCoveringGrid. Or, an
+            ImageArray, an numpy.ndarray, or dict of such arrays
+            The data to be made into a FITS image or images.
+        fields : single string or list of strings, optional
+            The field names for the data. If *fields* is none and *data* has keys,
+            it will use these for the fields. If *data* is just a single array one field name
+            must be specified.
+        units : string
+            The units of the WCS coordinates, default "cm". 
+        center : array_like, optional
+            The coordinates [xctr,yctr] of the images in units
+            *units*. If *units* is not specified, defaults to the origin. 
+        scale : tuple of floats, optional
+            Pixel scale in unit *units*. Will be ignored if *data* is
+            a FixedResolutionBuffer or a YTCoveringGrid. Must be
+            specified otherwise, or if *units* is "deg".
+
+        Examples
+        --------
+
+        >>> ds = load("sloshing_nomag2_hdf5_plt_cnt_0150")
+        >>> prj = ds.h.proj(2, "TempkeV", weight_field="Density")
+        >>> frb = prj.to_frb((0.5, "mpc"), 800)
+        >>> # This example just uses the FRB and puts the coords in kpc.
+        >>> f_kpc = FITSImageBuffer(frb, fields="TempkeV", units="kpc")
+        >>> # This example specifies sky coordinates.
+        >>> scale = [1./3600.]*2 # One arcsec per pixel
+        >>> f_deg = FITSImageBuffer(frb, fields="TempkeV", units="deg",
+                                    scale=scale, center=(30., 45.))
+        >>> f_deg.writeto("temp.fits")
+        """
+        
+        super(HDUList, self).__init__()
+
+        if isinstance(fields, basestring): fields = [fields]
+            
+        exclude_fields = ['x','y','z','px','py','pz',
+                          'pdx','pdy','pdz','weight_field']
+        
+        if hasattr(data, 'keys'):
+            img_data = data
+        else:
+            img_data = {}
+            if fields is None:
+                mylog.error("Please specify a field name for this array.")
+                raise KeyError
+            img_data[fields[0]] = data
+
+        if fields is None: fields = img_data.keys()
+        if len(fields) == 0:
+            mylog.error("Please specify one or more fields to write.")
+            raise KeyError
+
+        first = False
+    
+        for key in fields:
+            if key not in exclude_fields:
+                mylog.info("Making a FITS image of field %s" % (key))
+                if first:
+                    hdu = PrimaryHDU(np.array(img_data[key]))
+                    hdu.name = key
+                else:
+                    hdu = ImageHDU(np.array(img_data[key]), name=key)
+                self.append(hdu)
+
+        self.dimensionality = len(self[0].data.shape)
+        
+        if self.dimensionality == 2:
+            self.nx, self.ny = self[0].data.shape
+        elif self.dimensionality == 3:
+            self.nx, self.ny, self.nz = self[0].data.shape
+
+        has_coords = (isinstance(img_data, FixedResolutionBuffer) or
+                      isinstance(img_data, YTCoveringGridBase))
+        
+        if center is None:
+            if units == "deg":
+                mylog.error("Please specify center=(RA, Dec) in degrees.")
+                raise ValueError
+            elif not has_coords:
+                mylog.warning("Setting center to the origin.")
+                center = [0.0]*self.dimensionality
+
+        if scale is None:
+            if units == "deg" or not has_coords:
+                mylog.error("Please specify scale=(dx,dy[,dz]) in %s." % (units))
+                raise ValueError
+
+        w = pywcs.WCS(header=self[0].header, naxis=self.dimensionality)
+        w.wcs.crpix = 0.5*(np.array(self.shape)+1)
+
+        proj_type = ["linear"]*self.dimensionality
+
+        if isinstance(img_data, FixedResolutionBuffer) and units != "deg":
+            # FRBs are a special case where we have coordinate
+            # information, so we take advantage of this and
+            # construct the WCS object
+            dx = (img_data.bounds[1]-img_data.bounds[0])/self.nx
+            dy = (img_data.bounds[3]-img_data.bounds[2])/self.ny
+            dx *= img_data.pf.units[units]
+            dy *= img_data.pf.units[units]
+            xctr = 0.5*(img_data.bounds[1]+img_data.bounds[0])
+            yctr = 0.5*(img_data.bounds[3]+img_data.bounds[2])
+            xctr *= img_data.pf.units[units]
+            yctr *= img_data.pf.units[units]
+            center = [xctr, yctr]
+        elif isinstance(img_data, YTCoveringGridBase):
+            dx, dy, dz = img_data.dds
+            dx *= img_data.pf.units[units]
+            dy *= img_data.pf.units[units]
+            dz *= img_data.pf.units[units]
+            center = 0.5*(img_data.left_edge+img_data.right_edge)
+            center *= img_data.pf.units[units]
+        elif units == "deg" and self.dimensionality == 2:
+            dx = -scale[0]
+            dy = scale[1]
+            proj_type = ["RA---TAN","DEC--TAN"]
+        else:
+            dx = scale[0]
+            dy = scale[1]
+            if self.dimensionality == 3: dz = scale[2]
+            
+        w.wcs.crval = center
+        w.wcs.cunit = [units]*self.dimensionality
+        w.wcs.ctype = proj_type
+        
+        if self.dimensionality == 2:
+            w.wcs.cdelt = [dx,dy]
+        elif self.dimensionality == 3:
+            w.wcs.cdelt = [dx,dy,dz]
+
+        self._set_wcs(w)
+            
+    def _set_wcs(self, wcs):
+        """
+        Set the WCS coordinate information for all images
+        with a WCS object *wcs*.
+        """
+        self.wcs = wcs
+        h = self.wcs.to_header()
+        for img in self:
+            for k, v in h.items():
+                img.header.update(k,v)
+
+    def update_all_headers(self, key, value):
+        """
+        Update the FITS headers for all images with the
+        same *key*, *value* pair.
+        """
+        for img in self: img.header.update(key,value)
+            
+    def keys(self):
+        return [f.name for f in self]
+
+    def has_key(self, key):
+        return key in self.keys()
+
+    def values(self):
+        return [self[k] for k in self.keys()]
+
+    def items(self):
+        return [(k, self[k]) for k in self.keys()]
+
+    def __add__(self, other):
+        if len(set(self.keys()).intersection(set(other.keys()))) > 0:
+            mylog.error("There are duplicate extension names! Don't know which ones you want to keep!")
+            raise KeyError
+        new_buffer = {}
+        for im1 in self:
+            new_buffer[im1.name] = im1.data
+        for im2 in other:
+            new_buffer[im2.name] = im2.data
+        new_wcs = self.wcs
+        return FITSImageBuffer(new_buffer, wcs=new_wcs)
+
+    def writeto(self, fileobj, **kwargs):
+        HDUList(self).writeto(fileobj, **kwargs)
+        
+    @property
+    def shape(self):
+        if self.dimensionality == 2:
+            return self.nx, self.ny
+        elif self.dimensionality == 3:
+            return self.nx, self.ny, self.nz
+
+    def to_glue(self, label="yt"):
+        """
+        Takes the data in the FITSImageBuffer and exports it to
+        Glue (http://www.glueviz.org) for interactive
+        analysis. Optionally add a *label*. 
+        """
+        from glue.core import DataCollection, Data
+        from glue.core.coordinates import coordinates_from_header
+        from glue.qt.glue_application import GlueApplication
+
+        field_dict = dict((key,self[key].data) for key in self.keys())
+        
+        image = Data(label=label)
+        image.coords = coordinates_from_header(self.wcs.to_header())
+        for k,v in field_dict.items():
+            image.add_component(v, k)
+        dc = DataCollection([image])
+
+        app = GlueApplication(dc)
+        app.start()
+
+        
+
+    

diff -r 9537359f38e2c9f5245e7342bf0de2185e522f37 -r 69c14f7397bab48edf255be9b9ef07e2a7b143f3 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -34,8 +34,7 @@
     splat_points, \
     apply_colormap, \
     scale_image, \
-    write_projection, \
-    write_fits
+    write_projection
 
 from plot_modifications import \
     PlotCallback, \

diff -r 9537359f38e2c9f5245e7342bf0de2185e522f37 -r 69c14f7397bab48edf255be9b9ef07e2a7b143f3 yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -120,7 +120,7 @@
 # Add colormaps in _colormap_data.py that weren't defined here
 _vs = np.linspace(0,1,255)
 for k,v in _cm.color_map_luts.iteritems():
-    if k not in yt_colormaps:
+    if k not in yt_colormaps and k not in mcm.cmap_d:
         cdict = { 'red': zip(_vs,v[0],v[0]),
                   'green': zip(_vs,v[1],v[1]),
                   'blue': zip(_vs,v[2],v[2]) }
@@ -140,7 +140,7 @@
     Displays the colormaps available to yt.  Note, most functions can use
     both the matplotlib and the native yt colormaps; however, there are 
     some special functions existing within image_writer.py (e.g. write_image()
-    write_fits(), write_bitmap(), etc.), which cannot access the matplotlib
+    write_bitmap(), etc.), which cannot access the matplotlib
     colormaps.
 
     In addition to the colormaps listed, one can access the reverse of each 

diff -r 9537359f38e2c9f5245e7342bf0de2185e522f37 -r 69c14f7397bab48edf255be9b9ef07e2a7b143f3 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -19,7 +19,6 @@
     y_dict, \
     axis_names
 from .volume_rendering.api import off_axis_projection
-from image_writer import write_fits
 from yt.data_objects.image_array import ImageArray
 from yt.utilities.lib.misc_utilities import \
     pixelize_cylinder
@@ -271,7 +270,7 @@
         output.close()
 
     def export_fits(self, filename, fields=None, clobber=False,
-                    other_keys=None, units="cm", sky_center=(0.0,0.0), D_A=None):
+                    other_keys=None, units="cm"):
         r"""Export a set of pixelized fields to a FITS file.
 
         This will export a set of FITS images of either the fields specified
@@ -291,13 +290,6 @@
             the length units that the coordinates are written in, default 'cm'
             If units are set to "deg" then assume that sky coordinates are
             requested.
-        sky_center : array_like, optional
-            Center of the image in (ra,dec) in degrees if sky coordinates
-            (units="deg") are requested.
-        D_A : float or tuple, optional
-            Angular diameter distance, given in code units as a float or
-            a tuple containing the value and the length unit. Required if
-            using sky coordinates.
         """
 
         try:
@@ -305,77 +297,19 @@
         except:
             mylog.error("You don't have AstroPy installed!")
             raise ImportError
-        
-        if units == "deg" and D_A is None:
-            mylog.error("Sky coordinates require an angular diameter distance. Please specify D_A.")    
-            raise ValueError
-    
-        if iterable(D_A):
-            dist = D_A[0]/self.pf.units[D_A[1]]
-        else:
-            dist = D_A
-
-        if other_keys is None:
-            hdu_keys = {}
-        else:
-            hdu_keys = other_keys
+        from yt.utilities.fits_image import FITSImageBuffer
 
         extra_fields = ['x','y','z','px','py','pz','pdx','pdy','pdz','weight_field']
         if fields is None: 
             fields = [field for field in self.data_source.fields 
                       if field not in extra_fields]
 
-        coords = {}
-        nx, ny = self.buff_size
-        dx = (self.bounds[1]-self.bounds[0])/nx
-        dy = (self.bounds[3]-self.bounds[2])/ny
-        if units == "deg":  
-            coords["dx"] = -np.rad2deg(dx/dist)
-            coords["dy"] = np.rad2deg(dy/dist)
-            coords["xctr"] = sky_center[0]
-            coords["yctr"] = sky_center[1]
-            hdu_keys["MTYPE1"] = "EQPOS"
-            hdu_keys["MFORM1"] = "RA,DEC"
-            hdu_keys["CTYPE1"] = "RA---TAN"
-            hdu_keys["CTYPE2"] = "DEC--TAN"
-        else:
-            coords["dx"] = dx*self.pf.units[units]
-            coords["dy"] = dy*self.pf.units[units]
-            coords["xctr"] = 0.5*(self.bounds[0]+self.bounds[1])*self.pf.units[units]
-            coords["yctr"] = 0.5*(self.bounds[2]+self.bounds[3])*self.pf.units[units]
-        coords["units"] = units
+        fib = FITSImageBuffer(self, fields=fields, units=units)
+        if other_keys is not None:
+            for k,v in other_keys.items():
+                fib.update_all_headers(k,v)
+        fib.writeto(filename, clobber=clobber)
         
-        hdu_keys["Time"] = self.pf.current_time
-
-        data = dict([(field,self[field]) for field in fields])
-        write_fits(data, filename, clobber=clobber, coords=coords,
-                   other_keys=hdu_keys)
-
-    def open_in_ds9(self, field, take_log=True):
-        """
-        This will open a given field in the DS9 viewer.
-
-        Displaying fields can often be much easier in an interactive viewer,
-        particularly one as versatile as DS9.  This function will pixelize a
-        field and export it to an interactive DS9 package.  This requires the
-        *numdisplay* package, which is a simple download from STSci.
-        Furthermore, it presupposed that it can connect to DS9 -- that is, that
-        DS9 is already open.
-
-        Parameters
-        ----------
-        field : strings
-            This field will be pixelized and displayed.
-        take_log : boolean
-            DS9 seems to have issues with logging fields in-memory.  This will
-            pre-log the field before sending it to DS9.
-        """
-        import numdisplay
-        numdisplay.open()
-        if take_log: data=np.log10(self[field])
-        else: data=self[field]
-        numdisplay.display(data)    
-
     @property
     def limits(self):
         rv = dict(x = None, y = None, z = None)

diff -r 9537359f38e2c9f5245e7342bf0de2185e522f37 -r 69c14f7397bab48edf255be9b9ef07e2a7b143f3 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -386,76 +386,6 @@
     canvas.print_figure(filename, dpi=dpi)
     return filename
 
-
-def write_fits(image, filename, clobber=True, coords=None,
-               other_keys=None):
-    r"""Write out floating point arrays directly to a FITS file, optionally
-    adding coordinates and header keywords.
-        
-    Parameters
-    ----------
-    image : array_like, or dict of array_like objects
-        This is either an (unscaled) array of floating point values, or a dict of
-        such arrays, shape (N,N,) to save in a FITS file. 
-    filename : string
-        This name of the FITS file to be written.
-    clobber : boolean
-        If the file exists, this governs whether we will overwrite.
-    coords : dictionary, optional
-        A set of header keys and values to write to the FITS header to set up
-        a coordinate system, which is assumed to be linear unless specified otherwise
-        in *other_keys*
-        "units": the length units
-        "xctr","yctr": the center of the image
-        "dx","dy": the pixel width in each direction                                                
-    other_keys : dictionary, optional
-        A set of header keys and values to write into the FITS header.    
-    """
-
-    try:
-        import astropy.io.fits as pyfits
-    except:
-        mylog.error("You don't have AstroPy installed!")
-        raise ImportError
-    
-    try:
-        image.keys()
-        image_dict = image
-    except:
-        image_dict = dict(yt_data=image)
-
-    hdulist = [pyfits.PrimaryHDU()]
-
-    for key in image_dict.keys():
-
-        mylog.info("Writing image block \"%s\"" % (key))
-        hdu = pyfits.ImageHDU(image_dict[key])
-        hdu.update_ext_name(key)
-        
-        if coords is not None:
-            nx, ny = image_dict[key].shape
-            hdu.header.update('CUNIT1', coords["units"])
-            hdu.header.update('CUNIT2', coords["units"])
-            hdu.header.update('CRPIX1', 0.5*(nx+1))
-            hdu.header.update('CRPIX2', 0.5*(ny+1))
-            hdu.header.update('CRVAL1', coords["xctr"])
-            hdu.header.update('CRVAL2', coords["yctr"])
-            hdu.header.update('CDELT1', coords["dx"])
-            hdu.header.update('CDELT2', coords["dy"])
-            # These are the defaults, but will get overwritten if
-            # the caller has specified them
-            hdu.header.update('CTYPE1', "LINEAR")
-            hdu.header.update('CTYPE2', "LINEAR")
-                                    
-        if other_keys is not None:
-            for k,v in other_keys.items():
-                hdu.header.update(k,v)
-
-        hdulist.append(hdu)
-
-    hdulist = pyfits.HDUList(hdulist)
-    hdulist.writeto(filename, clobber=clobber)                    
-
 def display_in_notebook(image, max_val=None):
     """
     A helper function to display images in an IPython notebook

diff -r 9537359f38e2c9f5245e7342bf0de2185e522f37 -r 69c14f7397bab48edf255be9b9ef07e2a7b143f3 yt/visualization/volume_rendering/image_handling.py
--- a/yt/visualization/volume_rendering/image_handling.py
+++ b/yt/visualization/volume_rendering/image_handling.py
@@ -23,6 +23,8 @@
     and saves to *fn*.  If *h5* is True, then it will save in hdf5 format.  If
     *fits* is True, it will save in fits format.
     """
+    if (not h5 and not fits) or (h5 and fits):
+        raise ValueError("Choose either HDF5 or FITS format!")
     if h5:
         f = h5py.File('%s.h5'%fn, "w")
         f.create_dataset("R", data=image[:,:,0])
@@ -31,17 +33,17 @@
         f.create_dataset("A", data=image[:,:,3])
         f.close()
     if fits:
-        try:
-            import pyfits
-        except ImportError:
-            mylog.error('You do not have pyfits, install before attempting to use fits exporter')
-            raise
-        hdur = pyfits.PrimaryHDU(image[:,:,0])
-        hdug = pyfits.ImageHDU(image[:,:,1])
-        hdub = pyfits.ImageHDU(image[:,:,2])
-        hdua = pyfits.ImageHDU(image[:,:,3])
-        hdulist = pyfits.HDUList([hdur,hdug,hdub,hdua])
-        hdulist.writeto('%s.fits'%fn,clobber=True)
+        from yt.utilities.fits_image import FITSImageBuffer
+        data = {}
+        data["r"] = image[:,:,0]
+        data["g"] = image[:,:,1]
+        data["b"] = image[:,:,2]
+        data["a"] = image[:,:,3]
+        nx, ny = data["r"].shape
+        fib = FITSImageBuffer(data, units="pixel",
+                              center=[0.5*(nx+1), 0.5*(ny+1)],
+                              scale=[1.]*2)
+        fib.writeto('%s.fits'%fn,clobber=True)
 
 def import_rgba(name, h5=True):
     """

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list