[yt-svn] commit/yt: 133 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Jul 17 06:16:45 PDT 2014


133 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/a81c6d6f7808/
Changeset:   a81c6d6f7808
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-02 23:17:53
Summary:     Quiet things down, put in a maximum number of chunks to concatenate to avoid giant reads for now. This needs to be parameterized.
Affected #:  1 file

diff -r b51ac6e82937bfe567dfd1989ef40838d64def07 -r a81c6d6f7808a9efd197450ab56ef413dc07538e yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -418,11 +418,11 @@
         """
         mask = np.zeros(self.indexdata['index'].shape, dtype='bool')
 
-        print 'Getting data from ileft to iright:',  ileft, iright
+        #print 'Getting data from ileft to iright:',  ileft, iright
 
-        X, Y, Z = np.mgrid[ileft[0]:iright[0]+1,
+        Z, Y, X = np.mgrid[ileft[2]:iright[2]+1,
                            ileft[1]:iright[1]+1,
-                           ileft[2]:iright[2]+1]
+                           ileft[0]:iright[0]+1]
 
         X = X.ravel()
         Y = Y.ravel()
@@ -438,7 +438,8 @@
         print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()
 
         indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
-        indices = indices[indices < self.indexdata['index'].shape[0]]
+        # Here we sort the indices to batch consecutive reads together.
+        indices = np.sort(indices[indices < self.indexdata['index'].shape[0]])
         return indices
 
     def get_bbox(self, left, right):
@@ -472,7 +473,7 @@
             while nexti < len(inds):
                 nextind = inds[nexti]
                 #        print 'b: %i l: %i end: %i  next: %i' % ( base, length, base + length, self.indexdata['base'][nextind] )
-                if base + length == self.indexdata['base'][nextind]:
+                if combined < 1024 and base + length == self.indexdata['base'][nextind]:
                     length += self.indexdata['len'][nextind]
                     i += 1
                     nexti += 1
@@ -507,6 +508,16 @@
         right_key = min(right_key, self.indexdata['index'][-1])
         length = self.indexdata['base'][right_key] + \
             self.indexdata['len'][right_key] - base
+        if length > 0:
+            print 'Getting contiguous chunk of size %i starting at %i' % (length, base)
+        return self.get_data(slice(base, base + length), fields)
+
+    def get_key_data(self, key, fields):
+        max_key = self.indexdata['index'][-1]
+        if left_key > max_key:
+            raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (left_key, max_key))
+        base = self.indexdata['base'][left_key]
+        length = self.indexdata['len'][left_key] - base
         print 'Getting contiguous chunk of size %i starting at %i' % (length, base)
         return self.get_data(slice(base, base + length), fields)
 
@@ -562,3 +573,5 @@
         cell_iarr = np.array(cell_iarr)
         lk, rk =self.get_key_bounds(level, cell_iarr)
         return self.get_contiguous_chunk(lk, rk, fields)
+    def get_cell_width(self, level):
+        return self.domain_width / 2**level


https://bitbucket.org/yt_analysis/yt/commits/331f992753b6/
Changeset:   331f992753b6
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-06 20:46:16
Summary:     Faster, vectorized bit interleaving. Also handle the ibbox more correctly with an mgrid with complex stride.
Affected #:  2 files

diff -r a81c6d6f7808a9efd197450ab56ef413dc07538e -r 331f992753b64a345e08c21c75001adb5498e608 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -122,8 +122,7 @@
                 self._sindex = SDFIndex(self.sdf_container, indexdata, level=self.idx_level)
             else:
                 raise RuntimeError("SDF index0 file not supplied in load.")
-        else:
-            return self._sindex
+        return self._sindex
 
     def _set_code_unit_attributes(self):
         self.length_unit = self.quan(1.0, "kpc")

diff -r a81c6d6f7808a9efd197450ab56ef413dc07538e -r 331f992753b64a345e08c21c75001adb5498e608 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -363,10 +363,40 @@
         self.domain_active_dims = self.domain_dims - 2*self.domain_buffer
         print 'Domain stuff:', self.domain_width, self.domain_dims, self.domain_active_dims
 
+    def spread_bits(self, ival, level=None):
+        if level is None:
+            level = self.level
+        res = 0
+        for i in range(level):
+            res |= ((ival>>i)&1)<<(i*3);
+        return res
+
     def get_key(self, iarr, level=None):
         if level is None:
             level = self.level
         i1, i2, i3 = iarr
+        return self.spread_bits(i1, level) | self.spread_bits(i2, level) << 1 | self.spread_bits(i3, level) << 2
+
+    def spread_bitsv(self, ival, level=None):
+        if level is None:
+            level = self.level
+        res = np.zeros_like(ival, dtype='int64')
+        for i in range(level):
+            res |= np.bitwise_and((ival>>i), 1)<<(i*3);
+        return res
+
+    def get_keyv(self, iarr, level=None):
+        if level is None:
+            level = self.level
+        i1, i2, i3 = iarr
+        return np.bitwise_or(
+            np.bitwise_or(self.spread_bits(i1, level) , self.spread_bits(i2, level) << 1 ),
+            self.spread_bits(i3, level) << 2)
+
+    def get_key_slow(self, iarr, level=None):
+        if level is None:
+            level = self.level
+        i1, i2, i3 = iarr
         rep1 = np.binary_repr(i1, width=self.level)
         rep2 = np.binary_repr(i2, width=self.level)
         rep3 = np.binary_repr(i3, width=self.level)
@@ -420,13 +450,16 @@
 
         #print 'Getting data from ileft to iright:',  ileft, iright
 
-        Z, Y, X = np.mgrid[ileft[2]:iright[2]+1,
-                           ileft[1]:iright[1]+1,
-                           ileft[0]:iright[0]+1]
+        ix, iy, iz = (iright-ileft)*1j
+        print 'IBBOX:', ileft, iright, ix, iy, iz
 
-        X = X.ravel()
-        Y = Y.ravel()
-        Z = Z.ravel()
+        Z, Y, X = np.mgrid[ileft[2]:iright[2]:ix,
+                           ileft[1]:iright[1]:iy,
+                           ileft[0]:iright[0]:iz]
+
+        X = X.astype('int64').ravel()
+        Y = Y.astype('int64').ravel()
+        Z = Z.astype('int64').ravel()
         # Correct For periodicity
         X[X < self.domain_buffer] += self.domain_active_dims
         X[X >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
@@ -437,9 +470,11 @@
 
         print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()
 
-        indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
+        indices = self.get_keyv([X, Y, Z])
+        #indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
         # Here we sort the indices to batch consecutive reads together.
         indices = np.sort(indices[indices < self.indexdata['index'].shape[0]])
+        indices = indices[self.indexdata['len'][indices] > 0]
         return indices
 
     def get_bbox(self, left, right):
@@ -470,7 +505,7 @@
             # Concatenate aligned reads
             nexti = i+1
             combined = 0
-            while nexti < len(inds):
+            while nexti < num_inds:
                 nextind = inds[nexti]
                 #        print 'b: %i l: %i end: %i  next: %i' % ( base, length, base + length, self.indexdata['base'][nextind] )
                 if combined < 1024 and base + length == self.indexdata['base'][nextind]:
@@ -484,9 +519,10 @@
             chunk = slice(base, base+length)
             print 'Reading chunk %i of length %i after catting %i' % (i, length, combined)
             num_reads += 1
-            data = self.get_data(chunk, fields)
-            yield data
-            del data
+            if length > 0:
+                data = self.get_data(chunk, fields)
+                yield data
+                del data
             i += 1
         print 'Read %i chunks, batched into %i reads' % (num_inds, num_reads)
 


https://bitbucket.org/yt_analysis/yt/commits/bc062a85a993/
Changeset:   bc062a85a993
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-06 20:48:30
Summary:     Fixes for sindex files that have empty cells. Have to squeeze start/end keys together to get the correct contiguous read. Also implement a get_padded_cell_data to get a main cell at a particular level + a padding.
Affected #:  1 file

diff -r 331f992753b64a345e08c21c75001adb5498e608 -r bc062a85a993062ed82d99fdb545f238bbb23eae yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -487,6 +487,19 @@
 
         return self.get_ibbox(ileft, iright)
 
+    def get_nparticles_bbox(self, left, right):
+        """
+        Given left and right edges, return total
+        number of particles present.
+        """
+        ileft = np.floor((left - self.rmin) / self.domain_width *  self.domain_dims)
+        iright = np.floor((right - self.rmin) / self.domain_width * self.domain_dims)
+        indices = self.get_ibbox(ileft, iright)
+        npart = 0
+        for ind in indices:
+            npart += self.indexdata['len'][ind]
+        return npart
+
     def get_data(self, chunk, fields):
         data = {}
         for field in fields:
@@ -540,13 +553,28 @@
         max_key = self.indexdata['index'][-1]
         if left_key > max_key:
             raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (left_key, max_key))
-        base = self.indexdata['base'][left_key]
+        # These next two while loops are to squeeze the keys if they are empty. Would be better
+        # to go through and set base equal to the last non-zero base, i think.
+        while left_key < max_key:
+            lbase = self.indexdata['base'][left_key]
+            llen = self.indexdata['len'][left_key]
+            if lbase == 0 and llen == 0:
+                left_key += 1
+            else:
+                break
         right_key = min(right_key, self.indexdata['index'][-1])
-        length = self.indexdata['base'][right_key] + \
-            self.indexdata['len'][right_key] - base
+        while right_key > left_key:
+            rbase = self.indexdata['base'][right_key]
+            rlen = self.indexdata['len'][right_key]
+            if rbase == 0 and rlen == 0:
+                right_key -= 1
+            else:
+                break
+        print "Left, right keys:", left_key, right_key
+        length = rbase + rlen - lbase
         if length > 0:
-            print 'Getting contiguous chunk of size %i starting at %i' % (length, base)
-        return self.get_data(slice(base, base + length), fields)
+            print 'Getting contiguous chunk of size %i starting at %i' % (length, lbase)
+        return self.get_data(slice(lbase, lbase + length), fields)
 
     def get_key_data(self, key, fields):
         max_key = self.indexdata['index'][-1]
@@ -609,5 +637,76 @@
         cell_iarr = np.array(cell_iarr)
         lk, rk =self.get_key_bounds(level, cell_iarr)
         return self.get_contiguous_chunk(lk, rk, fields)
+
+    def get_cell_bbox(self, level, cell_iarr):
+        """Get floating point bounding box for a given sindex cell
+
+        Returns:
+            bbox: array-like, shape (3,2)
+
+        """
+        cell_iarr = np.array(cell_iarr)
+        cell_width = self.get_cell_width(level)
+        le = self.rmin + cell_iarr*cell_width
+        re = le+cell_width
+        bbox = np.array([le, re]).T
+        assert bbox.shape == (3, 2)
+        return bbox
+
+    def get_padded_bbox_data(self, level, cell_iarr, pad, fields):
+        """Get floating point bounding box for a given sindex cell
+
+        Returns:
+            bbox: array-like, shape (3,2)
+
+        """
+        bbox = self.get_cell_bbox(level, cell_iarr)
+        data = []
+        data.append(self.get_cell_data(level, cell_iarr, fields))
+        #for dd in self.iter_bbox_data(bbox[:,0], bbox[:,1], fields):
+        #    data.append(dd)
+        #assert data[0]['x'].shape[0] > 0
+
+        # Bottom & Top
+        pbox = bbox.copy()
+        pbox[0, 0] -= pad[0]
+        pbox[0, 1] += pad[0]
+        pbox[1, 0] -= pad[1]
+        pbox[1, 1] += pad[1]
+        pbox[2, 0] -= pad[2]
+        pbox[2, 1] = pbox[2, 0] + pad[2]
+        for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
+            data.append(dd)
+        pbox[2, 0] = bbox[2, 1]
+        pbox[2, 1] = pbox[2, 0] + pad[2]
+        for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
+            data.append(dd)
+
+        # Front & Back 
+        pbox = bbox.copy()
+        pbox[0, 0] -= pad[0]
+        pbox[0, 1] += pad[0]
+        pbox[1, 0] -= pad[1]
+        pbox[1, 1] = pbox[1, 0] + pad[1]
+        for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
+            data.append(dd)
+        pbox[1, 0] = bbox[1, 1]
+        pbox[1, 1] = pbox[1, 0] + pad[1]
+        for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
+            data.append(dd)
+
+        # Left & Right 
+        pbox = bbox.copy()
+        pbox[0, 0] -= pad[0]
+        pbox[0, 1] = pbox[0, 0] + pad[0]
+        for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
+            data.append(dd)
+        pbox[0, 0] = bbox[0, 1]
+        pbox[0, 1] = pbox[0, 0] + pad[0]
+        for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
+            data.append(dd)
+
+        return data
+
     def get_cell_width(self, level):
         return self.domain_width / 2**level


https://bitbucket.org/yt_analysis/yt/commits/279fb799b7a7/
Changeset:   279fb799b7a7
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-06 23:24:56
Summary:     Few fixes.
Affected #:  1 file

diff -r bc062a85a993062ed82d99fdb545f238bbb23eae -r 279fb799b7a7aa228289ee5d46b8ecaf2c96268a yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -415,6 +415,13 @@
         expanded[self.dim_slices[dim]] = slb
         return int(expanded.tostring(), 2)
 
+    def get_ind_from_key(self, key, dim='r'):
+        ind = [0,0,0]
+        br = np.binary_repr(key, width=self.level*3)
+        for dim in range(3):
+            ind[dim] = int(br[self.dim_slices[dim]],2)
+        return ind
+
     def get_slice_chunks(self, slice_dim, slice_index):
         sl_key = self.get_slice_key(slice_index, dim=slice_dim)
         mask = (self.indexdata['index'] & ~self.masks[slice_dim]) == sl_key
@@ -550,6 +557,13 @@
         return self.iter_data(inds, fields)
 
     def get_contiguous_chunk(self, left_key, right_key, fields):
+        print 'Getting contiguous chunk.'
+        liarr = self.get_ind_from_key(left_key)
+        riarr = self.get_ind_from_key(right_key)
+        print "From left to right:", liarr, riarr 
+
+        lbase=0
+        llen = 0
         max_key = self.indexdata['index'][-1]
         if left_key > max_key:
             raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (left_key, max_key))
@@ -563,6 +577,8 @@
             else:
                 break
         right_key = min(right_key, self.indexdata['index'][-1])
+        rbase = 0
+        rlen = 0
         while right_key > left_key:
             rbase = self.indexdata['base'][right_key]
             rlen = self.indexdata['len'][right_key]
@@ -578,10 +594,10 @@
 
     def get_key_data(self, key, fields):
         max_key = self.indexdata['index'][-1]
-        if left_key > max_key:
-            raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (left_key, max_key))
-        base = self.indexdata['base'][left_key]
-        length = self.indexdata['len'][left_key] - base
+        if key > max_key:
+            raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (key, max_key))
+        base = self.indexdata['base'][key]
+        length = self.indexdata['len'][key] - base
         print 'Getting contiguous chunk of size %i starting at %i' % (length, base)
         return self.get_data(slice(base, base + length), fields)
 


https://bitbucket.org/yt_analysis/yt/commits/23af16777be1/
Changeset:   23af16777be1
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-07 00:45:28
Summary:     Additional fixes. mgrid was reading too much.
Affected #:  1 file

diff -r 279fb799b7a7aa228289ee5d46b8ecaf2c96268a -r 23af16777be1e6fd4b0f5a092a4d323602ab07df yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -460,13 +460,14 @@
         ix, iy, iz = (iright-ileft)*1j
         print 'IBBOX:', ileft, iright, ix, iy, iz
 
-        Z, Y, X = np.mgrid[ileft[2]:iright[2]:ix,
-                           ileft[1]:iright[1]:iy,
-                           ileft[0]:iright[0]:iz]
+        Z, Y, X = np.mgrid[ileft[2]:iright[2]+1,
+                           ileft[1]:iright[1]+1,
+                           ileft[0]:iright[0]+1]
 
-        X = X.astype('int64').ravel()
-        Y = Y.astype('int64').ravel()
-        Z = Z.astype('int64').ravel()
+        mask = slice(0, -1, None)
+        X = X[mask, mask, mask].astype('int64').ravel()
+        Y = Y[mask, mask, mask].astype('int64').ravel()
+        Z = Z[mask, mask, mask].astype('int64').ravel()
         # Correct For periodicity
         X[X < self.domain_buffer] += self.domain_active_dims
         X[X >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
@@ -478,10 +479,11 @@
         print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()
 
         indices = self.get_keyv([X, Y, Z])
+        indices = indices[indices < self.indexdata['index'][-1]]
+        indices = indices[self.indexdata['len'][indices] > 0]
         #indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
         # Here we sort the indices to batch consecutive reads together.
-        indices = np.sort(indices[indices < self.indexdata['index'].shape[0]])
-        indices = indices[self.indexdata['len'][indices] > 0]
+        indices = np.sort(indices)
         return indices
 
     def get_bbox(self, left, right):
@@ -513,6 +515,34 @@
             data[field] = self.sdfdata[field][chunk]
         return data
 
+    def get_next_nonzero_chunk(self, key, stop=None):
+        # These next two while loops are to squeeze the keys if they are empty. Would be better
+        # to go through and set base equal to the last non-zero base, i think.
+        if stop is None:
+            stop = self.indexdata['index'][-1]
+        while key < stop:
+            base = self.indexdata['base'][key]
+            length = self.indexdata['len'][key]
+            if base == 0 and length == 0:
+                key += 1
+            else:
+                break
+        return key
+
+    def get_previous_nonzero_chunk(self, key, stop=None):
+        # These next two while loops are to squeeze the keys if they are empty. Would be better
+        # to go through and set base equal to the last non-zero base, i think.
+        if stop is None:
+            stop = self.indexdata['index'][0]
+        while key > stop:
+            base = self.indexdata['base'][key]
+            length = self.indexdata['len'][key]
+            if base == 0 and length == 0:
+                key -= 1
+            else:
+                break
+        return key
+
     def iter_data(self, inds, fields):
         num_inds = len(inds)
         num_reads = 0
@@ -537,7 +567,7 @@
                     break
 
             chunk = slice(base, base+length)
-            print 'Reading chunk %i of length %i after catting %i' % (i, length, combined)
+            print 'Reading chunk %i of length %i after catting %i starting at %i' % (i, length, combined, ind)
             num_reads += 1
             if length > 0:
                 data = self.get_data(chunk, fields)
@@ -567,25 +597,17 @@
         max_key = self.indexdata['index'][-1]
         if left_key > max_key:
             raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (left_key, max_key))
-        # These next two while loops are to squeeze the keys if they are empty. Would be better
-        # to go through and set base equal to the last non-zero base, i think.
-        while left_key < max_key:
-            lbase = self.indexdata['base'][left_key]
-            llen = self.indexdata['len'][left_key]
-            if lbase == 0 and llen == 0:
-                left_key += 1
-            else:
-                break
-        right_key = min(right_key, self.indexdata['index'][-1])
-        rbase = 0
-        rlen = 0
-        while right_key > left_key:
-            rbase = self.indexdata['base'][right_key]
-            rlen = self.indexdata['len'][right_key]
-            if rbase == 0 and rlen == 0:
-                right_key -= 1
-            else:
-                break
+        right_key = min(right_key, max_key)
+
+        left_key = self.get_next_nonzero_chunk(left_key)
+        right_key = self.get_previous_nonzero_chunk(right_key, left_key)
+
+        lbase = self.indexdata['base'][left_key]
+        llen = self.indexdata['len'][left_key]
+
+        rbase = self.indexdata['base'][right_key]
+        rlen = self.indexdata['len'][right_key]
+
         print "Left, right keys:", left_key, right_key
         length = rbase + rlen - lbase
         if length > 0:
@@ -690,7 +712,7 @@
         pbox[1, 0] -= pad[1]
         pbox[1, 1] += pad[1]
         pbox[2, 0] -= pad[2]
-        pbox[2, 1] = pbox[2, 0] + pad[2]
+        pbox[2, 1] = bbox[2, 0]
         for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
             data.append(dd)
         pbox[2, 0] = bbox[2, 1]
@@ -703,7 +725,7 @@
         pbox[0, 0] -= pad[0]
         pbox[0, 1] += pad[0]
         pbox[1, 0] -= pad[1]
-        pbox[1, 1] = pbox[1, 0] + pad[1]
+        pbox[1, 1] = bbox[1, 0]
         for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
             data.append(dd)
         pbox[1, 0] = bbox[1, 1]
@@ -714,7 +736,7 @@
         # Left & Right 
         pbox = bbox.copy()
         pbox[0, 0] -= pad[0]
-        pbox[0, 1] = pbox[0, 0] + pad[0]
+        pbox[0, 1] = bbox[0, 0]
         for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
             data.append(dd)
         pbox[0, 0] = bbox[0, 1]


https://bitbucket.org/yt_analysis/yt/commits/6d0e404f87c5/
Changeset:   6d0e404f87c5
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-07 00:58:57
Summary:     Merging in from matt, YTPositionArray
Affected #:  2 files

diff -r 2c9549b58b9f0b8d7789813474093b62f59d49c0 -r 6d0e404f87c594ae9d51cde84b38a1aaba4f771f yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -122,8 +122,7 @@
                 self._sindex = SDFIndex(self.sdf_container, indexdata, level=self.idx_level)
             else:
                 raise RuntimeError("SDF index0 file not supplied in load.")
-        else:
-            return self._sindex
+        return self._sindex
 
     def _set_code_unit_attributes(self):
         self.length_unit = self.quan(1.0, "kpc")

diff -r 2c9549b58b9f0b8d7789813474093b62f59d49c0 -r 6d0e404f87c594ae9d51cde84b38a1aaba4f771f yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -363,10 +363,40 @@
         self.domain_active_dims = self.domain_dims - 2*self.domain_buffer
         print 'Domain stuff:', self.domain_width, self.domain_dims, self.domain_active_dims
 
+    def spread_bits(self, ival, level=None):
+        if level is None:
+            level = self.level
+        res = 0
+        for i in range(level):
+            res |= ((ival>>i)&1)<<(i*3);
+        return res
+
     def get_key(self, iarr, level=None):
         if level is None:
             level = self.level
         i1, i2, i3 = iarr
+        return self.spread_bits(i1, level) | self.spread_bits(i2, level) << 1 | self.spread_bits(i3, level) << 2
+
+    def spread_bitsv(self, ival, level=None):
+        if level is None:
+            level = self.level
+        res = np.zeros_like(ival, dtype='int64')
+        for i in range(level):
+            res |= np.bitwise_and((ival>>i), 1)<<(i*3);
+        return res
+
+    def get_keyv(self, iarr, level=None):
+        if level is None:
+            level = self.level
+        i1, i2, i3 = iarr
+        return np.bitwise_or(
+            np.bitwise_or(self.spread_bits(i1, level) , self.spread_bits(i2, level) << 1 ),
+            self.spread_bits(i3, level) << 2)
+
+    def get_key_slow(self, iarr, level=None):
+        if level is None:
+            level = self.level
+        i1, i2, i3 = iarr
         rep1 = np.binary_repr(i1, width=self.level)
         rep2 = np.binary_repr(i2, width=self.level)
         rep3 = np.binary_repr(i3, width=self.level)
@@ -385,6 +415,13 @@
         expanded[self.dim_slices[dim]] = slb
         return int(expanded.tostring(), 2)
 
+    def get_ind_from_key(self, key, dim='r'):
+        ind = [0,0,0]
+        br = np.binary_repr(key, width=self.level*3)
+        for dim in range(3):
+            ind[dim] = int(br[self.dim_slices[dim]],2)
+        return ind
+
     def get_slice_chunks(self, slice_dim, slice_index):
         sl_key = self.get_slice_key(slice_index, dim=slice_dim)
         mask = (self.indexdata['index'] & ~self.masks[slice_dim]) == sl_key
@@ -418,15 +455,19 @@
         """
         mask = np.zeros(self.indexdata['index'].shape, dtype='bool')
 
-        print 'Getting data from ileft to iright:',  ileft, iright
+        #print 'Getting data from ileft to iright:',  ileft, iright
 
-        X, Y, Z = np.mgrid[ileft[0]:iright[0]+1,
+        ix, iy, iz = (iright-ileft)*1j
+        print 'IBBOX:', ileft, iright, ix, iy, iz
+
+        Z, Y, X = np.mgrid[ileft[2]:iright[2]+1,
                            ileft[1]:iright[1]+1,
-                           ileft[2]:iright[2]+1]
+                           ileft[0]:iright[0]+1]
 
-        X = X.ravel()
-        Y = Y.ravel()
-        Z = Z.ravel()
+        mask = slice(0, -1, None)
+        X = X[mask, mask, mask].astype('int64').ravel()
+        Y = Y[mask, mask, mask].astype('int64').ravel()
+        Z = Z[mask, mask, mask].astype('int64').ravel()
         # Correct For periodicity
         X[X < self.domain_buffer] += self.domain_active_dims
         X[X >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
@@ -437,8 +478,12 @@
 
         print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()
 
-        indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
-        indices = indices[indices < self.indexdata['index'].shape[0]]
+        indices = self.get_keyv([X, Y, Z])
+        indices = indices[indices < self.indexdata['index'][-1]]
+        indices = indices[self.indexdata['len'][indices] > 0]
+        #indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
+        # Here we sort the indices to batch consecutive reads together.
+        indices = np.sort(indices)
         return indices
 
     def get_bbox(self, left, right):
@@ -451,12 +496,53 @@
 
         return self.get_ibbox(ileft, iright)
 
+    def get_nparticles_bbox(self, left, right):
+        """
+        Given left and right edges, return total
+        number of particles present.
+        """
+        ileft = np.floor((left - self.rmin) / self.domain_width *  self.domain_dims)
+        iright = np.floor((right - self.rmin) / self.domain_width * self.domain_dims)
+        indices = self.get_ibbox(ileft, iright)
+        npart = 0
+        for ind in indices:
+            npart += self.indexdata['len'][ind]
+        return npart
+
     def get_data(self, chunk, fields):
         data = {}
         for field in fields:
             data[field] = self.sdfdata[field][chunk]
         return data
 
+    def get_next_nonzero_chunk(self, key, stop=None):
+        # These next two while loops are to squeeze the keys if they are empty. Would be better
+        # to go through and set base equal to the last non-zero base, i think.
+        if stop is None:
+            stop = self.indexdata['index'][-1]
+        while key < stop:
+            base = self.indexdata['base'][key]
+            length = self.indexdata['len'][key]
+            if base == 0 and length == 0:
+                key += 1
+            else:
+                break
+        return key
+
+    def get_previous_nonzero_chunk(self, key, stop=None):
+        # These next two while loops are to squeeze the keys if they are empty. Would be better
+        # to go through and set base equal to the last non-zero base, i think.
+        if stop is None:
+            stop = self.indexdata['index'][0]
+        while key > stop:
+            base = self.indexdata['base'][key]
+            length = self.indexdata['len'][key]
+            if base == 0 and length == 0:
+                key -= 1
+            else:
+                break
+        return key
+
     def iter_data(self, inds, fields):
         num_inds = len(inds)
         num_reads = 0
@@ -469,10 +555,10 @@
             # Concatenate aligned reads
             nexti = i+1
             combined = 0
-            while nexti < len(inds):
+            while nexti < num_inds:
                 nextind = inds[nexti]
                 #        print 'b: %i l: %i end: %i  next: %i' % ( base, length, base + length, self.indexdata['base'][nextind] )
-                if base + length == self.indexdata['base'][nextind]:
+                if combined < 1024 and base + length == self.indexdata['base'][nextind]:
                     length += self.indexdata['len'][nextind]
                     i += 1
                     nexti += 1
@@ -481,11 +567,12 @@
                     break
 
             chunk = slice(base, base+length)
-            print 'Reading chunk %i of length %i after catting %i' % (i, length, combined)
+            print 'Reading chunk %i of length %i after catting %i starting at %i' % (i, length, combined, ind)
             num_reads += 1
-            data = self.get_data(chunk, fields)
-            yield data
-            del data
+            if length > 0:
+                data = self.get_data(chunk, fields)
+                yield data
+                del data
             i += 1
         print 'Read %i chunks, batched into %i reads' % (num_inds, num_reads)
 
@@ -500,13 +587,39 @@
         return self.iter_data(inds, fields)
 
     def get_contiguous_chunk(self, left_key, right_key, fields):
+        print 'Getting contiguous chunk.'
+        liarr = self.get_ind_from_key(left_key)
+        riarr = self.get_ind_from_key(right_key)
+        print "From left to right:", liarr, riarr 
+
+        lbase=0
+        llen = 0
         max_key = self.indexdata['index'][-1]
         if left_key > max_key:
             raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (left_key, max_key))
-        base = self.indexdata['base'][left_key]
-        right_key = min(right_key, self.indexdata['index'][-1])
-        length = self.indexdata['base'][right_key] + \
-            self.indexdata['len'][right_key] - base
+        right_key = min(right_key, max_key)
+
+        left_key = self.get_next_nonzero_chunk(left_key)
+        right_key = self.get_previous_nonzero_chunk(right_key, left_key)
+
+        lbase = self.indexdata['base'][left_key]
+        llen = self.indexdata['len'][left_key]
+
+        rbase = self.indexdata['base'][right_key]
+        rlen = self.indexdata['len'][right_key]
+
+        print "Left, right keys:", left_key, right_key
+        length = rbase + rlen - lbase
+        if length > 0:
+            print 'Getting contiguous chunk of size %i starting at %i' % (length, lbase)
+        return self.get_data(slice(lbase, lbase + length), fields)
+
+    def get_key_data(self, key, fields):
+        max_key = self.indexdata['index'][-1]
+        if key > max_key:
+            raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (key, max_key))
+        base = self.indexdata['base'][key]
+        length = self.indexdata['len'][key] - base
         print 'Getting contiguous chunk of size %i starting at %i' % (length, base)
         return self.get_data(slice(base, base + length), fields)
 
@@ -562,3 +675,76 @@
         cell_iarr = np.array(cell_iarr)
         lk, rk =self.get_key_bounds(level, cell_iarr)
         return self.get_contiguous_chunk(lk, rk, fields)
+
+    def get_cell_bbox(self, level, cell_iarr):
+        """Get floating point bounding box for a given sindex cell
+
+        Returns:
+            bbox: array-like, shape (3,2)
+
+        """
+        cell_iarr = np.array(cell_iarr)
+        cell_width = self.get_cell_width(level)
+        le = self.rmin + cell_iarr*cell_width
+        re = le+cell_width
+        bbox = np.array([le, re]).T
+        assert bbox.shape == (3, 2)
+        return bbox
+
+    def get_padded_bbox_data(self, level, cell_iarr, pad, fields):
+        """Get floating point bounding box for a given sindex cell
+
+        Returns:
+            bbox: array-like, shape (3,2)
+
+        """
+        bbox = self.get_cell_bbox(level, cell_iarr)
+        data = []
+        data.append(self.get_cell_data(level, cell_iarr, fields))
+        #for dd in self.iter_bbox_data(bbox[:,0], bbox[:,1], fields):
+        #    data.append(dd)
+        #assert data[0]['x'].shape[0] > 0
+
+        # Bottom & Top
+        pbox = bbox.copy()
+        pbox[0, 0] -= pad[0]
+        pbox[0, 1] += pad[0]
+        pbox[1, 0] -= pad[1]
+        pbox[1, 1] += pad[1]
+        pbox[2, 0] -= pad[2]
+        pbox[2, 1] = bbox[2, 0]
+        for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
+            data.append(dd)
+        pbox[2, 0] = bbox[2, 1]
+        pbox[2, 1] = pbox[2, 0] + pad[2]
+        for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
+            data.append(dd)
+
+        # Front & Back 
+        pbox = bbox.copy()
+        pbox[0, 0] -= pad[0]
+        pbox[0, 1] += pad[0]
+        pbox[1, 0] -= pad[1]
+        pbox[1, 1] = bbox[1, 0]
+        for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
+            data.append(dd)
+        pbox[1, 0] = bbox[1, 1]
+        pbox[1, 1] = pbox[1, 0] + pad[1]
+        for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
+            data.append(dd)
+
+        # Left & Right 
+        pbox = bbox.copy()
+        pbox[0, 0] -= pad[0]
+        pbox[0, 1] = bbox[0, 0]
+        for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
+            data.append(dd)
+        pbox[0, 0] = bbox[0, 1]
+        pbox[0, 1] = pbox[0, 0] + pad[0]
+        for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
+            data.append(dd)
+
+        return data
+
+    def get_cell_width(self, level):
+        return self.domain_width / 2**level


https://bitbucket.org/yt_analysis/yt/commits/f8705cc5a957/
Changeset:   f8705cc5a957
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-07 01:05:11
Summary:     Quiet down the output, put it in debug.
Affected #:  1 file

diff -r 6d0e404f87c594ae9d51cde84b38a1aaba4f771f -r f8705cc5a957afe85e8419c36570544791144e99 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -354,14 +354,14 @@
             f2 = 1<<int(np.log2(ic_Nmesh-1)+1)
             if (f2 != ic_Nmesh):
                 expand_root = 1.0*f2/ic_Nmesh - 1.0;
-            print 'Expanding: ', f2, ic_Nmesh, expand_root
+            mylog.debug("Expanding: %s, %s, %s" % (f2, ic_Nmesh, expand_root))
         self.rmin *= 1.0 + expand_root
         self.rmax *= 1.0 + expand_root
         self.domain_width = self.rmax - self.rmin
         self.domain_dims = 1 << self.level
         self.domain_buffer = (self.domain_dims - int(self.domain_dims/(1.0 + expand_root)))/2
         self.domain_active_dims = self.domain_dims - 2*self.domain_buffer
-        print 'Domain stuff:', self.domain_width, self.domain_dims, self.domain_active_dims
+        mylog.debug("SINDEX: %s, %s, %s " % (self.domain_width, self.domain_dims, self.domain_active_dims))
 
     def spread_bits(self, ival, level=None):
         if level is None:
@@ -458,7 +458,7 @@
         #print 'Getting data from ileft to iright:',  ileft, iright
 
         ix, iy, iz = (iright-ileft)*1j
-        print 'IBBOX:', ileft, iright, ix, iy, iz
+        #print 'IBBOX:', ileft, iright, ix, iy, iz
 
         Z, Y, X = np.mgrid[ileft[2]:iright[2]+1,
                            ileft[1]:iright[1]+1,
@@ -476,7 +476,7 @@
         Z[Z < self.domain_buffer] += self.domain_active_dims
         Z[Z >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
 
-        print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()
+        #print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()
 
         indices = self.get_keyv([X, Y, Z])
         indices = indices[indices < self.indexdata['index'][-1]]
@@ -546,7 +546,7 @@
     def iter_data(self, inds, fields):
         num_inds = len(inds)
         num_reads = 0
-        print 'Reading %i chunks' % num_inds
+        mylog.debug('SINDEX Reading %i chunks' % num_inds)
         i = 0
         while (i < num_inds):
             ind = inds[i]
@@ -567,30 +567,28 @@
                     break
 
             chunk = slice(base, base+length)
-            print 'Reading chunk %i of length %i after catting %i starting at %i' % (i, length, combined, ind)
+            mylog.debug('Reading chunk %i of length %i after catting %i starting at %i' % (i, length, combined, ind))
             num_reads += 1
             if length > 0:
                 data = self.get_data(chunk, fields)
                 yield data
                 del data
             i += 1
-        print 'Read %i chunks, batched into %i reads' % (num_inds, num_reads)
+        mylog.debug('Read %i chunks, batched into %i reads' % (num_inds, num_reads))
 
     def iter_bbox_data(self, left, right, fields):
-        print 'Loading region from ', left, 'to', right
+        mylog.debug('SINDEX Loading region from %s to %s' %(left, right))
         inds = self.get_bbox(left, right)
         return self.iter_data(inds, fields)
 
     def iter_ibbox_data(self, left, right, fields):
-        print 'Loading region from ', left, 'to', right
+        mylog.debug('SINDEX Loading region from %s to %s' %(left, right))
         inds = self.get_ibbox(left, right)
         return self.iter_data(inds, fields)
 
     def get_contiguous_chunk(self, left_key, right_key, fields):
-        print 'Getting contiguous chunk.'
         liarr = self.get_ind_from_key(left_key)
         riarr = self.get_ind_from_key(right_key)
-        print "From left to right:", liarr, riarr 
 
         lbase=0
         llen = 0
@@ -608,10 +606,9 @@
         rbase = self.indexdata['base'][right_key]
         rlen = self.indexdata['len'][right_key]
 
-        print "Left, right keys:", left_key, right_key
         length = rbase + rlen - lbase
         if length > 0:
-            print 'Getting contiguous chunk of size %i starting at %i' % (length, lbase)
+            mylog.debug('Getting contiguous chunk of size %i starting at %i' % (length, lbase))
         return self.get_data(slice(lbase, lbase + length), fields)
 
     def get_key_data(self, key, fields):
@@ -620,7 +617,8 @@
             raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (key, max_key))
         base = self.indexdata['base'][key]
         length = self.indexdata['len'][key] - base
-        print 'Getting contiguous chunk of size %i starting at %i' % (length, base)
+        if length > 0:
+            mylog.debug('Getting contiguous chunk of size %i starting at %i' % (length, base))
         return self.get_data(slice(base, base + length), fields)
 
     def iter_slice_data(self, slice_dim, slice_index, fields):


https://bitbucket.org/yt_analysis/yt/commits/418b45ed36cb/
Changeset:   418b45ed36cb
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-07 19:18:10
Summary:     Replacing ind with i, since ind goes above particle count.
Affected #:  1 file

diff -r f8705cc5a957afe85e8419c36570544791144e99 -r 418b45ed36cb32646f237f1061771f9fd51ba023 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -252,10 +252,11 @@
         cdef np.int64_t next_tag, local_tag, last_fof_tag = -1
         fof_obj.num_p = 0
         j = 0
+        max_count = pind.shape[0]
         # We're going to do one iteration to get the most frequent value.
         for i in range(pind.shape[0]):
-            ind = pind[i]
-            local_tag = fof_tags[ind]
+            ind = i #pind[i]
+            local_tag = fof_tags[i]
             # Don't count the null group
             if local_tag == -1: continue
             if local_tag != last_fof_tag:
@@ -276,7 +277,7 @@
         cdef np.int64_t frac = <np.int64_t> (pcounts.shape[0] / 20.0)
         free_halos()
         for i in range(pind.shape[0]):
-            ind = pind[i]
+            ind = i #pind[i]
             local_tag = fof_tags[ind]
             # Skip this one -- it means no group.
             if local_tag == -1:
@@ -284,7 +285,7 @@
             if i == pind.shape[0] - 1:
                 next_tag = local_tag + 1
             else:
-                next_tag = fof_tags[pind[i+1]]
+                next_tag = fof_tags[i+1]
             for k in range(3):
                 fof_obj.particles[j].pos[k] = pos[ind,k]
                 fof_obj.particles[j].pos[k+3] = vel[ind,k]


https://bitbucket.org/yt_analysis/yt/commits/715cc1184ccd/
Changeset:   715cc1184ccd
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-07 23:17:32
Summary:     Merging in from matt
Affected #:  1 file

diff -r 418b45ed36cb32646f237f1061771f9fd51ba023 -r 715cc1184ccde7afc89b03c570b4ba5a3c5e42da yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -751,7 +751,6 @@
                 c1 = container[offset]
                 if c1 == NULL: continue
                 c0 = contour_find(c1)
-                offset = pind[offset]
                 if c0.count < minimum_count:
                     contour_ids[offset] = -1
         free(container)


https://bitbucket.org/yt_analysis/yt/commits/dd7402239fba/
Changeset:   dd7402239fba
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-08 01:02:03
Summary:     Scale padded bounds by max vals to avoid the fuzz.
Affected #:  1 file

diff -r 715cc1184ccde7afc89b03c570b4ba5a3c5e42da -r dd7402239fbafbc9e900807bd5c6152c3eb8b90f yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -342,8 +342,10 @@
     def morton(self):
         self.validate()
         eps = np.finfo(self.dtype).eps
-        LE = self.min(axis=0) - eps * self.uq
-        RE = self.max(axis=0) + eps * self.uq
+        LE = self.min(axis=0)
+        LE -= np.abs(LE) * eps
+        RE = self.max(axis=0)
+        RE += np.abs(RE) * eps
         morton = compute_morton(
             self[:,0], self[:,1], self[:,2],
             LE, RE)
@@ -354,8 +356,10 @@
         mi = self.morton
         mi.sort()
         eps = np.finfo(self.dtype).eps
-        LE = self.min(axis=0) - eps * self.uq
-        RE = self.max(axis=0) + eps * self.uq
+        LE = self.min(axis=0)
+        LE -= np.abs(LE) * eps
+        RE = self.max(axis=0)
+        RE += np.abs(RE) * eps
         octree = ParticleOctreeContainer(dims, LE, RE, 
             over_refine = over_refine_factor)
         octree.n_ref = n_ref


https://bitbucket.org/yt_analysis/yt/commits/83cc4529599d/
Changeset:   83cc4529599d
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-08 01:19:21
Summary:     Merging
Affected #:  1 file

diff -r dd7402239fbafbc9e900807bd5c6152c3eb8b90f -r 83cc4529599d00b533e19a5abbf23c8be605185e yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -6,6 +6,10 @@
 from libc.stdlib cimport malloc, free
 import sys
 
+ctypedef fused anyfloat:
+    np.float32_t
+    np.float64_t
+
 # Importing relevant rockstar data types particle, fof halo, halo
 
 cdef import from "particle.h":
@@ -235,8 +239,8 @@
     @cython.wraparound(False)
     def make_rockstar_fof(self, np.ndarray[np.int64_t, ndim=1] pind,
                                 np.ndarray[np.int64_t, ndim=1] fof_tags,
-                                np.ndarray[np.float64_t, ndim=2] pos,
-                                np.ndarray[np.float64_t, ndim=2] vel):
+                                np.ndarray[anyfloat, ndim=2] pos,
+                                np.ndarray[anyfloat, ndim=2] vel):
 
         # Define fof object
 


https://bitbucket.org/yt_analysis/yt/commits/1b0919cc7431/
Changeset:   1b0919cc7431
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-08 02:32:06
Summary:     Reverting bad pinds.
Affected #:  1 file

diff -r 83cc4529599d00b533e19a5abbf23c8be605185e -r 1b0919cc74316d863c14e0d26b94c53141c12f74 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -256,11 +256,10 @@
         cdef np.int64_t next_tag, local_tag, last_fof_tag = -1
         fof_obj.num_p = 0
         j = 0
-        max_count = pind.shape[0]
         # We're going to do one iteration to get the most frequent value.
         for i in range(pind.shape[0]):
-            ind = i #pind[i]
-            local_tag = fof_tags[i]
+            ind = pind[i]
+            local_tag = fof_tags[ind]
             # Don't count the null group
             if local_tag == -1: continue
             if local_tag != last_fof_tag:
@@ -272,7 +271,7 @@
                 j += 1
         if j > max_count:
             max_count = j
-        #print >> sys.stderr, "Most frequent occurrance: %s" % max_count
+        print >> sys.stderr, "Most frequent occurrance: %s" % max_count
         fof_obj.particles = <particle*> malloc(max_count * sizeof(particle))
         j = 0
         cdef int counter = 0, ndone = 0
@@ -281,7 +280,7 @@
         cdef np.int64_t frac = <np.int64_t> (pcounts.shape[0] / 20.0)
         free_halos()
         for i in range(pind.shape[0]):
-            ind = i #pind[i]
+            ind = pind[i]
             local_tag = fof_tags[ind]
             # Skip this one -- it means no group.
             if local_tag == -1:
@@ -289,7 +288,7 @@
             if i == pind.shape[0] - 1:
                 next_tag = local_tag + 1
             else:
-                next_tag = fof_tags[i+1]
+                next_tag = fof_tags[pind[i+1]]
             for k in range(3):
                 fof_obj.particles[j].pos[k] = pos[ind,k]
                 fof_obj.particles[j].pos[k+3] = vel[ind,k]


https://bitbucket.org/yt_analysis/yt/commits/1daa76b39297/
Changeset:   1daa76b39297
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-08 22:47:41
Summary:     Add filtering of particles to be strictly within the bbox. Also find particles that may have wandered since the sort.
Affected #:  1 file

diff -r 1b0919cc74316d863c14e0d26b94c53141c12f74 -r 1daa76b3929733f3657bbe9f78c27068b2d8289e yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -355,6 +355,9 @@
             if (f2 != ic_Nmesh):
                 expand_root = 1.0*f2/ic_Nmesh - 1.0;
             mylog.debug("Expanding: %s, %s, %s" % (f2, ic_Nmesh, expand_root))
+        self.true_domain_left = self.rmin.copy()
+        self.true_domain_right = self.rmax.copy()
+        self.true_domain_width = self.rmax - self.rmin
         self.rmin *= 1.0 + expand_root
         self.rmax *= 1.0 + expand_root
         self.domain_width = self.rmax - self.rmin
@@ -448,7 +451,7 @@
         lengths = self.indexdata['len'][mask]
         return mask, offsets, lengths
 
-    def get_ibbox(self, ileft, iright):
+    def get_ibbox(self, ileft, iright, wandering_particles=True):
         """
         Given left and right indicies, return a mask and
         set of offsets+lengths into the sdf data.
@@ -468,6 +471,21 @@
         X = X[mask, mask, mask].astype('int64').ravel()
         Y = Y[mask, mask, mask].astype('int64').ravel()
         Z = Z[mask, mask, mask].astype('int64').ravel()
+
+        if wandering_particles:
+            # Need to get padded bbox around the border to catch
+            # wandering particles.
+            dmask = X == self.domain_buffer-1
+            dmask += Y == self.domain_buffer-1
+            dmask += Z == self.domain_buffer-1
+            dmask += X == self.domain_dims
+            dmask += Y == self.domain_dims
+            dmask += Z == self.domain_dims
+            dinds = self.get_keyv([X[dmask], Y[dmask], Z[dmask]])
+            dinds = dinds[dinds < self.indexdata['index'][-1]]
+            dinds = dinds[self.indexdata['len'][dinds] > 0]
+            print 'Getting boundary layers for wanderers, cells: %i' % dinds.size
+
         # Correct For periodicity
         X[X < self.domain_buffer] += self.domain_active_dims
         X[X >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
@@ -481,9 +499,13 @@
         indices = self.get_keyv([X, Y, Z])
         indices = indices[indices < self.indexdata['index'][-1]]
         indices = indices[self.indexdata['len'][indices] > 0]
+
         #indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
         # Here we sort the indices to batch consecutive reads together.
-        indices = np.sort(indices)
+        if wandering_particles:
+            indices = np.sort(np.append(indices, dinds))
+        else:
+            indices = np.sort(indices)
         return indices
 
     def get_bbox(self, left, right):
@@ -576,6 +598,41 @@
             i += 1
         mylog.debug('Read %i chunks, batched into %i reads' % (num_inds, num_reads))
 
+    def filter_bbox(self, left, right, iter):
+        """
+        Filter data by masking out data outside of a bbox defined
+        by left/right. Account for periodicity of data, allowing left/right
+        to be outside of the domain.
+        """
+        for data in iter:
+            mask = np.zeros_like(data, dtype='bool')
+            pos = np.array([data['x'], data['y'], data['z']]).T
+            # Now make pos periodic
+            for i in range(3):
+                pos[i][pos[i] < left[i]] += self.true_domain_width[i]
+                pos[i][pos[i] >= right[i]] -= self.true_domain_width[i]
+
+            # First mask out the particles outside the bbox
+            mask = np.all(pos >= left, axis=1) * \
+                np.all(pos < right, axis=1)
+
+            mylog.debug("Filtering particles, returning %i out of %i" % (mask.sum(), mask.shape[0]))
+
+            if not np.any(mask):
+                continue
+
+            filtered = {ax: pos[:, i][mask] for i, ax in enumerate('xyz')}
+            for f in data.keys():
+                if f in 'xyz': continue
+                filtered[f] = data[f][mask]
+
+            for i, ax in enumerate('xyz'):
+                print left, right
+                assert np.all(filtered[ax] >= left[i])
+                assert np.all(filtered[ax] < right[i])
+
+            yield filtered
+
     def iter_bbox_data(self, left, right, fields):
         mylog.debug('SINDEX Loading region from %s to %s' %(left, right))
         inds = self.get_bbox(left, right)
@@ -697,8 +754,14 @@
 
         """
         bbox = self.get_cell_bbox(level, cell_iarr)
+        filter_left = bbox[:, 0] - pad
+        filter_right = bbox[:, 1] + pad
+
         data = []
-        data.append(self.get_cell_data(level, cell_iarr, fields))
+        for dd in self.filter_bbox(
+            filter_left, filter_right,
+            [self.get_cell_data(level, cell_iarr, fields)]):
+            data.append(dd)
         #for dd in self.iter_bbox_data(bbox[:,0], bbox[:,1], fields):
         #    data.append(dd)
         #assert data[0]['x'].shape[0] > 0
@@ -711,11 +774,15 @@
         pbox[1, 1] += pad[1]
         pbox[2, 0] -= pad[2]
         pbox[2, 1] = bbox[2, 0]
-        for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
+        for dd in self.filter_bbox(
+            filter_left, filter_right,
+            self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
             data.append(dd)
         pbox[2, 0] = bbox[2, 1]
         pbox[2, 1] = pbox[2, 0] + pad[2]
-        for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
+        for dd in self.filter_bbox(
+            filter_left, filter_right,
+            self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
             data.append(dd)
 
         # Front & Back 
@@ -724,22 +791,30 @@
         pbox[0, 1] += pad[0]
         pbox[1, 0] -= pad[1]
         pbox[1, 1] = bbox[1, 0]
-        for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
+        for dd in self.filter_bbox(
+            filter_left, filter_right,
+            self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
             data.append(dd)
         pbox[1, 0] = bbox[1, 1]
         pbox[1, 1] = pbox[1, 0] + pad[1]
-        for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
+        for dd in self.filter_bbox(
+            filter_left, filter_right,
+            self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
             data.append(dd)
 
         # Left & Right 
         pbox = bbox.copy()
         pbox[0, 0] -= pad[0]
         pbox[0, 1] = bbox[0, 0]
-        for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
+        for dd in self.filter_bbox(
+            filter_left, filter_right,
+            self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
             data.append(dd)
         pbox[0, 0] = bbox[0, 1]
         pbox[0, 1] = pbox[0, 0] + pad[0]
-        for dd in self.iter_bbox_data(pbox[:,0], pbox[:,1], fields):
+        for dd in self.filter_bbox(
+            filter_left, filter_right,
+            self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
             data.append(dd)
 
         return data


https://bitbucket.org/yt_analysis/yt/commits/05a9d2cffc1b/
Changeset:   05a9d2cffc1b
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-09 01:48:47
Summary:     Few bugs for wandering particles/expanded boxes.
Affected #:  1 file

diff -r 1daa76b3929733f3657bbe9f78c27068b2d8289e -r 05a9d2cffc1bf557c11119b6037df91ee5737862 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -347,6 +347,9 @@
             self.rmax[1] += self.sdfdata.parameters.get('Ry', r_0)
             self.rmax[2] += self.sdfdata.parameters.get('Rz', r_0)
 
+        self.rmin *= self.sdfdata.parameters.get("a", 1.0)
+        self.rmax *= self.sdfdata.parameters.get("a", 1.0)
+
         #/* expand root for non-power-of-two */
         expand_root = 0.0
         ic_Nmesh = self.sdfdata.parameters.get('ic_Nmesh',0)
@@ -543,9 +546,8 @@
         if stop is None:
             stop = self.indexdata['index'][-1]
         while key < stop:
-            base = self.indexdata['base'][key]
-            length = self.indexdata['len'][key]
-            if base == 0 and length == 0:
+            if self.indexdata['index'][key] == 0:
+                #print 'Squeezing keys, incrementing'
                 key += 1
             else:
                 break
@@ -557,9 +559,9 @@
         if stop is None:
             stop = self.indexdata['index'][0]
         while key > stop:
-            base = self.indexdata['base'][key]
-            length = self.indexdata['len'][key]
-            if base == 0 and length == 0:
+            #self.indexdata['index'][-1]:
+            if self.indexdata['index'][key] == 0:
+                #print 'Squeezing keys, decrementing'
                 key -= 1
             else:
                 break


https://bitbucket.org/yt_analysis/yt/commits/c56a451d95ba/
Changeset:   c56a451d95ba
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-09 02:34:17
Summary:     Merging from mainline
Affected #:  31 files

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -15,11 +15,12 @@
 details on the relative differences between these halo finders see 
 :ref:`halo_finding`.
 
-.. code-block:: 
-    from yt.mods import *
-    from yt.analysis_modules.halo_analysis.api import HaloCatalog
-    data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
-    hc = HaloCatalog(data_pf=data_pf, finder_method='hop')
+.. code-block:: python
+
+   from yt.mods import *
+   from yt.analysis_modules.halo_analysis.api import HaloCatalog
+   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_pf=data_pf, finder_method='hop')
 
 A halo catalog may also be created from already run rockstar outputs. 
 This method is not implemented for previously run friends-of-friends or 
@@ -28,9 +29,10 @@
 only specify the file output by the processor with ID 0. Note that the 
 argument for supplying a rockstar output is `halos_pf`, not `data_pf`.
 
-.. code-block:: 
-    halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
-    hc = HaloCatalog(halos_pf=halos_pf)
+.. code-block:: python
+
+   halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
+   hc = HaloCatalog(halos_pf=halos_pf)
 
 Although supplying only the binary output of the rockstar halo finder 
 is sufficient for creating a halo catalog, it is not possible to find 
@@ -38,10 +40,11 @@
 with the dataset from which they were found, supply arguments to both 
 halos_pf and data_pf.
 
-.. code-block::
-    halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
-    data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
-    hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf)
+.. code-block:: python
+
+   halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
+   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf)
 
 A data container can also be supplied via keyword data_source, 
 associated with either dataset, to control the spatial region in 
@@ -72,9 +75,9 @@
 
 An example of adding a filter:
 
-.. code-block::
+.. code-block:: python
 
-    hc.add_filter('quantity_value', 'particle_mass', '>', 1E13, 'Msun')
+   hc.add_filter('quantity_value', 'particle_mass', '>', 1E13, 'Msun')
 
 Currently quantity_value is the only available filter, but more can be 
 added by the user by defining a function that accepts a halo object as 
@@ -85,20 +88,21 @@
 
 An example of defining your own filter:
 
-.. code-block::
-    def my_filter_function(halo):
-        
-        # Define condition for filter
-        filter_value = True
-        
-        # Return a boolean value 
-        return filter_value
+.. code-block:: python
 
-    # Add your filter to the filter registry
-    add_filter("my_filter", my_filter_function)
+   def my_filter_function(halo):
+       
+       # Define condition for filter
+       filter_value = True
+       
+       # Return a boolean value 
+       return filter_value
 
-    # ... Later on in your script
-    hc.add_filter("my_filter")
+   # Add your filter to the filter registry
+   add_filter("my_filter", my_filter_function)
+
+   # ... Later on in your script
+   hc.add_filter("my_filter")
 
 Quantities
 ----------
@@ -118,25 +122,26 @@
 
 An example of adding a quantity:
 
-.. code-block::
-    hc.add_quantity('center_of_mass')
+.. code-block:: python
+
+   hc.add_quantity('center_of_mass')
 
 An example of defining your own quantity:
 
-.. code-block::
+.. code-block:: python
 
-    def my_quantity_function(halo):
-        # Define quantity to return
-        quantity = 5
-        
-        return quantity
+   def my_quantity_function(halo):
+       # Define quantity to return
+       quantity = 5
+       
+       return quantity
 
-    # Add your filter to the filter registry
-    add_quantity('my_quantity', my_quantity_function)
+   # Add your filter to the filter registry
+   add_quantity('my_quantity', my_quantity_function)
 
 
-    # ... Later on in your script
-    hc.add_quantity("my_quantity") 
+   # ... Later on in your script
+   hc.add_quantity("my_quantity") 
 
 Callbacks
 ---------
@@ -150,8 +155,9 @@
 An example of using a pre-defined callback where we create a sphere for 
 each halo with a radius that is twice the saved “radius”.
 
-.. code-block::
-    hc.add_callback("sphere", factor=2.0)
+.. code-block:: python
+
+   hc.add_callback("sphere", factor=2.0)
     
 Currently available callbacks are located in 
 yt/analysis_modules/halo_analysis/halo_callbacks.py. New callbacks may 
@@ -161,19 +167,19 @@
 
 An example of defining your own callback:
 
-.. code-block::
+.. code-block:: python
 
-    def my_callback_function(halo):
-        # Perform some callback actions here
-        x = 2
-        halo.x_val = x
+   def my_callback_function(halo):
+       # Perform some callback actions here
+       x = 2
+       halo.x_val = x
 
-    # Add the callback to the callback registry
-    add_callback('my_callback', my_callback_function)
+   # Add the callback to the callback registry
+   add_callback('my_callback', my_callback_function)
 
 
-    # ...  Later on in your script
-    hc.add_callback("my_callback")
+   # ...  Later on in your script
+   hc.add_callback("my_callback")
 
 Running Analysis
 ================
@@ -181,8 +187,9 @@
 After all callbacks, quantities, and filters have been added, the 
 analysis begins with a call to HaloCatalog.create.
 
-.. code-block::
-    hc.create()
+.. code-block:: python
+
+   hc.create()
 
 The save_halos keyword determines whether the actual Halo objects 
 are saved after analysis on them has completed or whether just the 
@@ -206,13 +213,14 @@
 standard call to load. Any side data, such as profiles, can be reloaded 
 with a load_profiles callback and a call to HaloCatalog.load.
 
-.. code-block::
-    hpf = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
-    hc = HaloCatalog(halos_pf=hpf,
-                     output_dir="halo_catalogs/catalog_0046")
-    hc.add_callback("load_profiles", output_dir="profiles",
-                    filename="virial_profiles")
-    hc.load()
+.. code-block:: python
+
+   hpf = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
+   hc = HaloCatalog(halos_pf=hpf,
+                    output_dir="halo_catalogs/catalog_0046")
+   hc.add_callback("load_profiles", output_dir="profiles",
+                   filename="virial_profiles")
+   hc.load()
 
 Summary
 =======

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 doc/source/cookbook/fits_radio_cubes.ipynb
--- a/doc/source/cookbook/fits_radio_cubes.ipynb
+++ b/doc/source/cookbook/fits_radio_cubes.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:ded7d47bf5a74c9ea5431a37b6d371a631909d2b95214cd8053617762f62e2e4"
+  "signature": "sha256:2f774139560d94508c2c51b70930d46941d9ceef7228655de32a69634f6c6d83"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -73,14 +73,43 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Note that the x and y axes are in units of \"code length\", which in the case of FITS datasets are equal to the width of one pixel. Currently, the `yt` plotting routines don't understand datasets with non-length units on the axes (such as RA, Dec, velocity, etc.), so it defaults to the pixel scale. This will be changed in a future release. When making plots of FITS data, to see the image coordinates as they are in the file, it is helpful to set the keyword `origin = \"native\"`."
+      "The x and y axes are in units of the image pixel. When making plots of FITS data, to see the image coordinates as they are in the file, it is helpful to set the keyword `origin = \"native\"`. If you want to see the celestial coordinates along the axes, you can import the `PlotWindowWCS` class and feed it the `SlicePlot`. For this to work, the [WCSAxes](http://wcsaxes.readthedocs.org/en/latest/) package needs to be installed."
      ]
     },
     {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.frontends.fits.misc import PlotWindowWCS\n",
+      "wcs_slc = PlotWindowWCS(slc)\n",
+      "wcs_slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "We can take slices of this dataset at a few different values along the \"z\" axis (corresponding to the velocity), so let's try a few. First, we'll check what the value along the velocity axis at the domain center is, as well as the range of possible values. This is the third value of each array. "
+      "Generally, it is best to get the plot in the shape you want it before feeding it to `PlotWindowWCS`. Once it looks the way you want, you can save it just like a normal `PlotWindow` plot:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "wcs_slc.save()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also take slices of this dataset at a few different values along the \"z\" axis (corresponding to the velocity), so let's try a few. First, we'll check what the value along the velocity axis at the domain center is, as well as the range of possible values. This is the third value of each array. "
      ]
     },
     {
@@ -147,6 +176,44 @@
      ]
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also look at the slices perpendicular to the other axes, which will show us the structure along the velocity axis:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = yt.SlicePlot(ds, \"x\", [\"intensity\"], origin=\"native\", \n",
+      "                   aspect=\"auto\", window_size=(8.0,8.0))\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = yt.SlicePlot(ds, \"y\", [\"intensity\"], origin=\"native\", \n",
+      "                   aspect=\"auto\", window_size=(8.0,8.0))\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "In these cases, we needed to set `aspect=\"auto\"` and explicitly declare a square `window_size` to get a figure that looks good. "
+     ]
+    },
+    {
      "cell_type": "heading",
      "level": 2,
      "metadata": {},
@@ -298,6 +365,78 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally, we can also take an existing [ds9](http://ds9.si.edu/site/Home.html) region and use it to create a \"cut region\" as well, using `ds9_region` (the [pyregion](http://leejjoon.github.io/pyregion/) package needs to be installed for this):"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.frontends.fits.misc import ds9_region"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "For this example we'll create a ds9 region from scratch and load it up:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "region = 'galactic;box(+49:26:35.150,-0:30:04.410,1926.1927\",1483.3701\",0.0)'\n",
+      "box_reg = ds9_region(ds, region)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This region may now be used to compute derived quantities:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print box_reg.quantities.extrema(\"temperature\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Or in projections:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"temperature\"], origin=\"native\", \n",
+      "                        data_source=box_reg, weight_field=\"ones\") # \"ones\" weights each cell by 1\n",
+      "prj.set_log(\"temperature\", True)\n",
+      "prj.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 doc/source/cookbook/fits_xray_images.ipynb
--- a/doc/source/cookbook/fits_xray_images.ipynb
+++ b/doc/source/cookbook/fits_xray_images.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:564cb1986609d8bb76397a18219974504231b260f912bed483b87c1f896e92ac"
+  "signature": "sha256:650e3fc7f66951a5fcdb18332bbc625f6f6e449fc919acd01da01e1fbbf92ee1"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -71,19 +71,18 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds.index\n",
       "def _counts(field, data):\n",
       "    exposure_time = data.get_field_parameter(\"exposure_time\")\n",
       "    return data[\"flux\"]*data[\"pixel\"]*exposure_time\n",
-      "ds.field_info.add_field(name=\"counts\", function=_counts, units=\"counts\")\n",
+      "ds.add_field(name=\"counts\", function=_counts, units=\"counts\", take_log=False)\n",
       "\n",
       "def _pp(field, data):\n",
       "    return np.sqrt(data[\"counts\"])*data[\"projected_temperature\"]\n",
-      "ds.field_info.add_field(name=\"pseudo_pressure\", function=_pp, units=\"sqrt(counts)*keV\")\n",
+      "ds.add_field(name=\"pseudo_pressure\", function=_pp, units=\"sqrt(counts)*keV\", take_log=False)\n",
       "\n",
       "def _pe(field, data):\n",
       "    return data[\"projected_temperature\"]*data[\"counts\"]**(-1./3.)\n",
-      "ds.field_info.add_field(name=\"pseudo_entropy\", function=_pe, units=\"keV*(counts)**(-1/3)\")"
+      "ds.add_field(name=\"pseudo_entropy\", function=_pe, units=\"keV*(counts)**(-1/3)\", take_log=False)"
      ],
      "language": "python",
      "metadata": {},
@@ -131,6 +130,152 @@
      "outputs": []
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To add the celestial coordinates to the image, we can use `PlotWindowWCS`, if you have the [WCSAxes](http://wcsaxes.readthedocs.org/en/latest/) package installed:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.frontends.fits.misc import PlotWindowWCS\n",
+      "wcs_slc = PlotWindowWCS(slc)\n",
+      "wcs_slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can make use of yt's facilities for profile plotting as well."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "v, c = ds.find_max(\"flux\") # Find the maxmimum flux and its center\n",
+      "my_sphere = ds.sphere(c, (100.,\"code_length\")) # Radius of 150 pixels\n",
+      "my_sphere.set_field_parameter(\"exposure_time\", exposure_time)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Such as a radial profile plot:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "radial_profile = yt.ProfilePlot(my_sphere, \"radius\", \n",
+      "                                [\"counts\",\"pseudo_pressure\",\"pseudo_entropy\"], \n",
+      "                                n_bins=50, weight_field=\"ones\")\n",
+      "radial_profile.set_log(\"counts\", True)\n",
+      "radial_profile.set_log(\"pseudo_pressure\", True)\n",
+      "radial_profile.set_log(\"pseudo_entropy\", True)\n",
+      "radial_profile.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Or a phase plot:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "phase_plot = yt.PhasePlot(my_sphere, \"pseudo_pressure\", \"pseudo_entropy\", [\"counts\"], weight_field=None)\n",
+      "phase_plot.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally, we can also take an existing [ds9](http://ds9.si.edu/site/Home.html) region and use it to create a \"cut region\", using `ds9_region` (the [pyregion](http://leejjoon.github.io/pyregion/) package needs to be installed for this):"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.frontends.fits.misc import ds9_region\n",
+      "reg_file = [\"# Region file format: DS9 version 4.1\\n\",\n",
+      "            \"global color=green dashlist=8 3 width=3 include=1 source=1 fk5\\n\",\n",
+      "            \"circle(15:16:44.817,+7:01:19.62,34.6256\\\")\"]\n",
+      "f = open(\"circle.reg\",\"w\")\n",
+      "f.writelines(reg_file)\n",
+      "f.close()\n",
+      "circle_reg = ds9_region(ds, \"circle.reg\", field_parameters={\"exposure_time\":exposure_time})"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This region may now be used to compute derived quantities:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print circle_reg.quantities.weighted_average_quantity(\"projected_temperature\", \"counts\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Or used in projections:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj = yt.ProjectionPlot(ds, \"z\", \n",
+      "                   [\"flux\",\"projected_temperature\",\"pseudo_pressure\",\"pseudo_entropy\"], \n",
+      "                   origin=\"native\", field_parameters={\"exposure_time\":exposure_time},\n",
+      "                   data_source=circle_reg,\n",
+      "                   proj_style=\"sum\")\n",
+      "prj.set_log(\"flux\",True)\n",
+      "prj.set_log(\"pseudo_pressure\",False)\n",
+      "prj.set_log(\"pseudo_entropy\",False)\n",
+      "prj.set_width(250.)\n",
+      "prj.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
      "cell_type": "heading",
      "level": 2,
      "metadata": {},
@@ -159,15 +304,14 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "`setup_counts_fields` will take a list of energy bounds (emin, emax) in keV and create a new field from each where the photons in that energy range will be deposited onto the image grid. "
+      "`load` will handle the events file as FITS image files, and will set up a grid using the WCS information in the file. Optionally, the events may be reblocked to a new resolution. by setting the `\"reblock\"` parameter in the `parameters` dictionary in `load`. `\"reblock\"` must be a power of 2. "
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ebounds = [(0.1,2.0),(2.0,5.0)]\n",
-      "setup_counts_fields(ebounds)"
+      "ds2 = yt.load(\"xray_fits/acisf05356N003_evt2.fits.gz\", parameters={\"reblock\":2})"
      ],
      "language": "python",
      "metadata": {},
@@ -177,14 +321,15 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "`load` will handle the events file as FITS image files, and will set up a grid using the WCS information in the file. Optionally, the events may be reblocked to a new resolution. by setting the `\"reblock\"` parameter in the `parameters` dictionary in `load`. `\"reblock\"` must be a power of 2. "
+      "`setup_counts_fields` will take a list of energy bounds (emin, emax) in keV and create a new field from each where the photons in that energy range will be deposited onto the image grid. "
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds2 = yt.load(\"xray_fits/acisf05356N003_evt2.fits.gz\", parameters={\"reblock\":2})"
+      "ebounds = [(0.1,2.0),(2.0,5.0)]\n",
+      "setup_counts_fields(ds2, ebounds)"
      ],
      "language": "python",
      "metadata": {},

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 doc/source/cookbook/halo_finding.py
--- a/doc/source/cookbook/halo_finding.py
+++ /dev/null
@@ -1,10 +0,0 @@
-"""
-This script shows the simplest way of getting halo information.  For more
-information, see :ref:`halo_finding`.
-"""
-import yt
-
-ds = yt.load("Enzo_64/DD0043/data0043")
-
-halos = yt.HaloFinder(ds)
-halos.write_out("%s_halos.txt" % ds)

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 doc/source/cookbook/halo_mass_info.py
--- a/doc/source/cookbook/halo_mass_info.py
+++ /dev/null
@@ -1,34 +0,0 @@
-"""
-Title: Halo Mass Info
-Description: This recipe finds halos and then prints out information about
-             them.  Note that this recipe will take advantage of multiple CPUs
-             if executed with mpirun and supplied the --parallel command line
-             argument.  
-Outputs: [RedshiftOutput0006_halo_info.txt]
-"""
-from yt.mods import *
-
-fn = "Enzo_64/RD0006/RedshiftOutput0006" # parameter file to load
-pf = load(fn) # load data
-
-# First we run our halo finder to identify all the halos in the dataset.  This
-# can take arguments, but the default are pretty sane.
-halos = HaloFinder(pf)
-
-f = open("%s_halo_info.txt" % pf, "w")
-
-# Now, for every halo, we get the baryon data and examine it.
-for halo in halos:
-    # The halo has a property called 'get_sphere' that obtains a sphere
-    # centered on the point of maximum density (or the center of mass, if that
-    # argument is supplied) and with the radius the maximum particle radius of
-    # that halo.
-    sphere = halo.get_sphere()
-    # We use the quantities[] method to get the total mass in baryons and in
-    # particles.
-    baryon_mass, particle_mass = sphere.quantities["TotalQuantity"](
-            ["cell_mass", "particle_mass"])
-    # Now we print out this information, along with the ID.
-    f.write("Total mass in HOP group %s is %0.5e (gas = %0.5e / particles = %0.5e)\n" % \
-            (halo.id, baryon_mass + particle_mass, baryon_mass, particle_mass))
-f.close()

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 doc/source/cookbook/halo_particle_plotting.py
--- a/doc/source/cookbook/halo_particle_plotting.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-This is a simple mechanism for overplotting the particles belonging only to
-halos.  For more information, see :ref:`halo_finding`.
-"""
-from yt.mods import * # set up our namespace
-
-pf = load("Enzo_64/DD0043/data0043")
-
-halos = HaloFinder(pf)
-
-p = ProjectionPlot(pf, "x", "density")
-p.annotate_hop_circles(halos)
-p.annotate_hop_particles(halos, max_number=100)
-p.save()

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 doc/source/cookbook/halo_plotting.py
--- a/doc/source/cookbook/halo_plotting.py
+++ b/doc/source/cookbook/halo_plotting.py
@@ -4,10 +4,13 @@
 """
 from yt.mods import * # set up our namespace
 
-pf = load("Enzo_64/DD0043/data0043")
+data_pf = load("Enzo_64/RD0006/RedshiftOutput0006")
 
-halos = HaloFinder(pf)
+halo_pf = load('rockstar_halos/halos_0.0.bin')
 
-p = ProjectionPlot(pf, "z", "density")
-p.annotate_hop_circles(halos)
+hc - HaloCatalog(halos_pf = halo_pf)
+hc.load()
+
+p = ProjectionPlot(pf, "x", "density")
+p.annotate_halos(hc)
 p.save()

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 doc/source/cookbook/simple_contour_in_slice.py
--- a/doc/source/cookbook/simple_contour_in_slice.py
+++ b/doc/source/cookbook/simple_contour_in_slice.py
@@ -4,20 +4,20 @@
 pf = load("Sedov_3d/sedov_hdf5_chk_0002")
 
 # Make a traditional slice plot.
-sp = SlicePlot(pf,"x","dens")
+sp = SlicePlot(pf,"x","density")
 
 # Overlay the slice plot with thick red contours of density.
-sp.annotate_contour("dens", ncont=3, clim=(1e-2,1e-1), label=True,
+sp.annotate_contour("density", ncont=3, clim=(1e-2,1e-1), label=True,
                     plot_args={"colors": "red",
                                "linewidths": 2})
 
 # What about some nice temperature contours in blue?
-sp.annotate_contour("temp", ncont=3, clim=(1e-8,1e-6), label=True,
+sp.annotate_contour("temperature", ncont=3, clim=(1e-8,1e-6), label=True,
                     plot_args={"colors": "blue",
                                "linewidths": 2})
 
 # This is the plot object.
-po = sp.plots["dens"]
+po = sp.plots["density"]
 
 # Turn off the colormap image, leaving just the contours.
 po.axes.images[0].set_visible(False)

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 doc/source/cookbook/simple_off_axis_projection.py
--- a/doc/source/cookbook/simple_off_axis_projection.py
+++ b/doc/source/cookbook/simple_off_axis_projection.py
@@ -11,7 +11,7 @@
 # Get the angular momentum vector for the sphere.
 L = sp.quantities["AngularMomentumVector"]()
 
-print "Angular momentum vector: %s" % (L)
+print "Angular momentum vector: {0}".format(L)
 
 # Create an OffAxisSlicePlot on the object with the L vector as its normal
 p = OffAxisProjectionPlot(pf, L, "density", sp.center, (25, "kpc"))

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 doc/source/cookbook/simple_slice_with_multiple_fields.py
--- a/doc/source/cookbook/simple_slice_with_multiple_fields.py
+++ b/doc/source/cookbook/simple_slice_with_multiple_fields.py
@@ -4,5 +4,5 @@
 pf = load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
 
 # Create density slices of several fields along the x axis
-SlicePlot(pf, 'x', ['density','temperature','Pressure','VorticitySquared'], 
+SlicePlot(pf, 'x', ['density','temperature','pressure','vorticity_squared'], 
           width = (800.0, 'kpc')).save()

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 doc/source/cookbook/thin_slice_projection.py
--- a/doc/source/cookbook/thin_slice_projection.py
+++ b/doc/source/cookbook/thin_slice_projection.py
@@ -17,10 +17,9 @@
 right_corner = pf.domain_right_edge
 
 # Now adjust the size of the region along the line of sight (x axis).
-depth = 10.0 # in Mpc
-left_corner[0] = center[0] - 0.5 * depth / pf.units['mpc']
-left_corner[0] = center[0] + 0.5 * depth / pf.units['mpc']
-
+depth = pf.quan(10.0,'Mpc') 
+left_corner[0] = center[0] - 0.5 * depth 
+left_corner[0] = center[0] + 0.5 * depth 
 # Create the region
 region = pf.region(center, left_corner, right_corner)
 

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -282,7 +282,7 @@
 
 .. code-block:: python
 
-    ( "Gas", "Halo", "Disk", "Bulge", "Stars", "Bndry" )
+   ( "Gas", "Halo", "Disk", "Bulge", "Stars", "Bndry" )
 
 You can specify alternate names, but note that this may cause problems with the
 field specification if none of the names match old names.
@@ -300,23 +300,23 @@
 
 .. code-block:: python
    
-    default      = (('Npart', 6, 'i'),
-                    ('Massarr', 6, 'd'),
-                    ('Time', 1, 'd'),
-                    ('Redshift', 1, 'd'),
-                    ('FlagSfr', 1, 'i'),
-                    ('FlagFeedback', 1, 'i'),
-                    ('Nall', 6, 'i'),
-                    ('FlagCooling', 1, 'i'),
-                    ('NumFiles', 1, 'i'),
-                    ('BoxSize', 1, 'd'),
-                    ('Omega0', 1, 'd'),
-                    ('OmegaLambda', 1, 'd'),
-                    ('HubbleParam', 1, 'd'),
-                    ('FlagAge', 1, 'i'),
-                    ('FlagMEtals', 1, 'i'),
-                    ('NallHW', 6, 'i'),
-                    ('unused', 16, 'i'))
+   default      = (('Npart', 6, 'i'),
+                   ('Massarr', 6, 'd'),
+                   ('Time', 1, 'd'),
+                   ('Redshift', 1, 'd'),
+                   ('FlagSfr', 1, 'i'),
+                   ('FlagFeedback', 1, 'i'),
+                   ('Nall', 6, 'i'),
+                   ('FlagCooling', 1, 'i'),
+                   ('NumFiles', 1, 'i'),
+                   ('BoxSize', 1, 'd'),
+                   ('Omega0', 1, 'd'),
+                   ('OmegaLambda', 1, 'd'),
+                   ('HubbleParam', 1, 'd'),
+                   ('FlagAge', 1, 'i'),
+                   ('FlagMEtals', 1, 'i'),
+                   ('NallHW', 6, 'i'),
+                   ('unused', 16, 'i'))
 
 These items will all be accessible inside the object ``pf.parameters``, which
 is a dictionary.  You can add combinations of new items, specified in the same
@@ -371,7 +371,7 @@
 
 .. code-block:: python
 
-    ds = load("./halo1e11_run1.00400")
+   ds = load("./halo1e11_run1.00400")
 
 .. _specifying-cosmology-tipsy:
 
@@ -414,7 +414,7 @@
 
 .. code-block:: python
 
-    ds = load("./A11QR1/s11Qzm1h2_a1.0000.art")
+   ds = load("./A11QR1/s11Qzm1h2_a1.0000.art")
 
 .. _loading-art-data:
 
@@ -551,21 +551,21 @@
   installations of this package and the `PyWCS <http://stsdas.stsci
   .edu/astrolib/pywcs/>`_ package are not supported.
 
-Though FITS a image is composed of one data cube in the FITS file,
+Though a FITS image is composed of a single array in the FITS file,
 upon being loaded into yt it is automatically decomposed into grids:
 
 .. code-block:: python
 
-  from yt.mods import *
-  ds = load("m33_hi.fits")
-  ds.print_stats()
+   from yt.mods import *
+   ds = load("m33_hi.fits")
+   ds.print_stats()
 
 .. parsed-literal::
 
-  level	  # grids	    # cells	   # cells^3
-  ----------------------------------------------
-    0	     512	  981940800	         994
-  ----------------------------------------------
+   level  # grids         # cells     # cells^3
+   ----------------------------------------------
+     0	     512	  981940800       994
+   ----------------------------------------------
              512	  981940800
 
 yt will generate its own domain decomposition, but the number of grids can be
@@ -573,7 +573,7 @@
 
 .. code-block:: python
 
-  ds = load("m33_hi.fits", nprocs=1024)
+   ds = load("m33_hi.fits", nprocs=1024)
 
 Making the Most of `yt` for FITS Data
 +++++++++++++++++++++++++++++++++++++
@@ -596,12 +596,12 @@
 
 .. code-block:: python
 
-    import astropy.io.fits as pyfits
-    f = pyfits.open("xray_flux_image.fits", mode="update")
-    f[0].header["BUNIT"] = "cts/s/pixel"
-    f[0].header["BTYPE"] = "flux"
-    f.flush()
-    f.close()
+   import astropy.io.fits as pyfits
+   f = pyfits.open("xray_flux_image.fits", mode="update")
+   f[0].header["BUNIT"] = "cts/s/pixel"
+   f[0].header["BTYPE"] = "flux"
+   f.flush()
+   f.close()
 
 FITS Coordinates
 ++++++++++++++++
@@ -651,7 +651,7 @@
 
 .. code-block:: python
 
-    ds = load("flux.fits", auxiliary_files=["temp.fits","metal.fits"])
+   ds = load("flux.fits", auxiliary_files=["temp.fits","metal.fits"])
 
 The image blocks in each of these files will be loaded as a separate field,
 provided they have the same dimensions as the image blocks in the main file.
@@ -681,22 +681,80 @@
 single floating-point number (applies to all fields) or a Python dictionary
 containing different mask values for different fields:
 
-.. code-block::
+.. code-block:: python
 
-  # passing a single float
-  ds = load("m33_hi.fits", nan_mask=0.0)
+   # passing a single float
+   ds = load("m33_hi.fits", nan_mask=0.0)
 
-  # passing a dict
-  ds = load("m33_hi.fits", nan_mask={"intensity":-1.0,"temperature":0.0})
+   # passing a dict
+   ds = load("m33_hi.fits", nan_mask={"intensity":-1.0,"temperature":0.0})
 
 Generally, AstroPy may generate a lot of warnings about individual FITS
 files, many of which you may want to ignore. If you want to see these
 warnings, set ``suppress_astropy_warnings = False`` in the call to ``load``.
 
+Miscellaneous Tools for Use with FITS Data
+++++++++++++++++++++++++++++++++++++++++++
+
+A number of tools have been prepared for use with FITS data that enhance yt's visualization and
+analysis capabilities for this particular type of data. These are included in the ``yt.frontends.fits.misc`` module, and can be imported like so:
+
+.. code-block:: python
+
+  from yt.frontends.fits.misc import setup_counts_fields, PlotWindowWCS, ds9_region
+
+
+``setup_counts_fields``
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This function can be used to create image fields from X-ray counts data in different energy bands:
+
+.. code-block:: python
+
+  ebounds = [(0.1,2.0),(2.0,5.0)] # Energies are in keV
+  setup_counts_fields(ds, ebounds)
+
+which would make two fields, ``"counts_0.1-2.0"`` and ``"counts_2.0-5.0"``,
+and add them to the field registry for the dataset ``ds``.
+
+
+``ds9_region``
+~~~~~~~~~~~~~~
+
+This function takes a `ds9 <http://ds9.si.edu/site/Home.html>`_ region and creates a "cut region"
+data container from it, that can be used to select the cells in the FITS dataset that fall within
+the region. To use this functionality, the `pyregion <http://leejjoon.github.io/pyregion/>`_
+package must be installed.
+
+.. code-block:: python
+
+  ds = yt.load("m33_hi.fits")
+  circle_region = ds9_region(ds, "circle.reg")
+  print circle_region.quantities.extrema("flux")
+
+
+``PlotWindowWCS``
+~~~~~~~~~~~~~~~~~
+
+This class takes a on-axis ``SlicePlot`` or ``ProjectionPlot`` of FITS data and adds celestial
+coordinates to the plot axes. To use it, the `WCSAxes <http://wcsaxes.readthedocs.org>`_
+package must be installed.
+
+.. code-block:: python
+
+  wcs_slc = PlotWindowWCS(slc)
+  wcs_slc.show() # for the IPython notebook
+  wcs_slc.save()
+
+``WCSAxes`` is still in an experimental state, but as its functionality improves it will be
+utilized more here.
+
+
 Examples of Using FITS Data
 +++++++++++++++++++++++++++
 
-The following IPython notebooks show examples of working with FITS data in yt:
+The following IPython notebooks show examples of working with FITS data in yt,
+which we recommend you look at in the following order:
 
 * :ref:`radio_cubes`
 * :ref:`xray_fits`
@@ -798,9 +856,9 @@
 
 .. code-block:: python
 
-    for g in grid_data:
-        g["number_of_particles"] = 100000
-        g["particle_position_x"] = np.random.random((g["number_of_particles"]))
+   for g in grid_data:
+       g["number_of_particles"] = 100000
+       g["particle_position_x"] = np.random.random((g["number_of_particles"]))
 
 .. rubric:: Caveats
 

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -200,10 +200,10 @@
 .. autosummary::
    :toctree: generated/
 
-   ~yt.frontends.halo_catalogs.data_structures.RockstarBinaryFile
-   ~yt.frontends.halo_catalogs.data_structures.RockstarDataset
-   ~yt.frontends.halo_catalogs.fields.RockstarFieldInfo
-   ~yt.frontends.halo_catalogs.io.IOHandlerRockstarBinary
+   ~yt.frontends.halo_catalogs.rockstar.data_structures.RockstarBinaryFile
+   ~yt.frontends.halo_catalogs.rockstar.data_structures.RockstarDataset
+   ~yt.frontends.halo_catalogs.rockstar.fields.RockstarFieldInfo
+   ~yt.frontends.halo_catalogs.rockstar.io.IOHandlerRockstarBinary
 
 MOAB
 ^^^^
@@ -313,7 +313,7 @@
    ~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder
    ~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder
    ~yt.analysis_modules.halo_finding.halo_objects.parallelHF
-   ~yt.analysis_modules.halo_finding.rockstar.api.RockstarHaloFinder
+   ~yt.analysis_modules.halo_finding.rockstar.rockstar.RockstarHaloFinder
 
 You can also operate on the Halo and HAloList objects themselves:
 
@@ -616,11 +616,8 @@
    ~yt.visualization.plot_modifications.ArrowCallback
    ~yt.visualization.plot_modifications.ClumpContourCallback
    ~yt.visualization.plot_modifications.ContourCallback
-   ~yt.visualization.plot_modifications.CoordAxesCallback
    ~yt.visualization.plot_modifications.CuttingQuiverCallback
    ~yt.visualization.plot_modifications.GridBoundaryCallback
-   ~yt.visualization.plot_modifications.HopCircleCallback
-   ~yt.visualization.plot_modifications.HopParticleCallback
    ~yt.visualization.plot_modifications.LabelCallback
    ~yt.visualization.plot_modifications.LinePlotCallback
    ~yt.visualization.plot_modifications.MarkerAnnotateCallback
@@ -630,7 +627,6 @@
    ~yt.visualization.plot_modifications.SphereCallback
    ~yt.visualization.plot_modifications.TextLabelCallback
    ~yt.visualization.plot_modifications.TitleCallback
-   ~yt.visualization.plot_modifications.UnitBoundaryCallback
    ~yt.visualization.plot_modifications.VelocityCallback
 
 Function List

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 doc/source/yt3differences.rst
--- a/doc/source/yt3differences.rst
+++ b/doc/source/yt3differences.rst
@@ -27,7 +27,7 @@
     FieldName)``.
   * Previously, yt would use "Enzo-isms" for field names.  We now very
     specifically define fields as lowercase with underscores.  For instance,
-    what used to be ``VelocityMagnitude`` would not be ``velocity_magnitude``.
+    what used to be ``VelocityMagnitude`` would now be ``velocity_magnitude``.
   * Particles are either named by their type or default to the type ``io``.
   * Axis names are now at the *end* of field names, not the beginning.
     ``x-velocity`` is now ``velocity_x``.

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -32,9 +32,9 @@
     
     Parameters
     ----------
-    filenames : list of strings
-        A time-sorted list of filenames to construct the DatasetSeries
-        object.
+    outputs : `yt.data_objects.time_series.DatasetSeries` or list of strings
+        DatasetSeries object, or a time-sorted list of filenames to
+        construct a new DatasetSeries object.
     indices : array_like
         An integer array of particle indices whose trajectories we
         want to track. If they are not sorted they will be sorted.
@@ -59,11 +59,14 @@
     >>> for t in trajs :
     >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
     """
-    def __init__(self, filenames, indices, fields=None) :
+    def __init__(self, outputs, indices, fields=None) :
 
         indices.sort() # Just in case the caller wasn't careful
         self.field_data = YTFieldData()
-        self.data_series = DatasetSeries.from_filenames(filenames)
+        if isinstance(outputs, DatasetSeries):
+            self.data_series = outputs
+        else:
+            self.data_series = DatasetSeries.from_filenames(outputs)
         self.masks = []
         self.sorts = []
         self.array_indices = []

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -154,7 +154,7 @@
     simulation domain.
 
     This object is typically accessed through the `proj` object that
-    hangs off of index objects.  AMRQuadProj is a projection of a
+    hangs off of index objects.  YTQuadTreeProj is a projection of a
     `field` along an `axis`.  The field can have an associated
     `weight_field`, in which case the values are multiplied by a weight
     before being summed, and then divided by the sum of that weight; the
@@ -185,18 +185,21 @@
     data_source : `yt.data_objects.api.AMRData`, optional
         If specified, this will be the data source used for selecting
         regions to project.
-    serialize : bool, optional
-        Whether we should store this projection in the .yt file or not.
-    kwargs : dict of items
-        Any additional values are passed as field parameters that can be
+    style : string, optional
+        The style of projection to be performed.
+        "integrate" : integration along the axis
+        "mip" : maximum intensity projection
+        "sum" : same as "integrate", except that we don't multiply by the path length
+    field_parameters : dict of items
+        Values to be passed as field parameters that can be
         accessed by generated fields.
 
     Examples
     --------
 
-    >>> pf = load("RedshiftOutput0005")
-    >>> qproj = pf.h.quad_proj(0, "Density")
-    >>> print qproj["Density"]
+    >>> ds = load("RedshiftOutput0005")
+    >>> prj = ds.proj(0, "density")
+    >>> print proj["density"]
     """
     _key_fields = YTSelectionContainer2D._key_fields + ['weight_field']
     _type_name = "proj"
@@ -206,10 +209,15 @@
                  center = None, pf = None, data_source = None,
                  style = "integrate", field_parameters = None):
         YTSelectionContainer2D.__init__(self, axis, pf, field_parameters)
-        self.proj_style = style
+        if style == "sum":
+            self.proj_style = "integrate"
+            self._sum_only = True
+        else:
+            self.proj_style = style
+            self._sum_only = False
         if style == "mip":
             self.func = np.max
-        elif style == "integrate":
+        elif style == "integrate" or style == "sum":
             self.func = np.sum # for the future
         else:
             raise NotImplementedError(style)
@@ -343,7 +351,7 @@
         tree.initialize_chunk(i1, i2, ilevel)
 
     def _handle_chunk(self, chunk, fields, tree):
-        if self.proj_style == "mip":
+        if self.proj_style == "mip" or self._sum_only:
             dl = 1.0
         else:
             # This gets explicitly converted to cm

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -853,7 +853,10 @@
         if fname is None:
             raise KeyError(field)
         else:
-            return self.field_data[fname].in_units(self.field_units[fname])
+            if getattr(self, 'fractional', False):
+                return self.field_data[fname]
+            else:
+                return self.field_data[fname].in_units(self.field_units[fname])
 
     def items(self):
         return [(k,self[k]) for k in self.field_data.keys()]

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -55,6 +55,8 @@
     SphericalCoordinateHandler
 from yt.geometry.geographic_coordinates import \
     GeographicCoordinateHandler
+from yt.geometry.ppv_coordinates import \
+    PPVCoordinateHandler
 
 # We want to support the movie format in the future.
 # When such a thing comes to pass, I'll move all the stuff that is contant up
@@ -359,6 +361,8 @@
             self.coordinates = SphericalCoordinateHandler(self)
         elif self.geometry == "geographic":
             self.coordinates = GeographicCoordinateHandler(self)
+        elif self.geometry == "ppv":
+            self.coordinates = PPVCoordinateHandler(self)
         else:
             raise YTGeometryNotSupported(self.geometry)
 

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 yt/data_objects/tests/test_ortho_rays.py
--- a/yt/data_objects/tests/test_ortho_rays.py
+++ b/yt/data_objects/tests/test_ortho_rays.py
@@ -11,8 +11,7 @@
 
         my_oray = pf.ortho_ray(ax, ocoord)
 
-        my_axes = range(3)
-        del my_axes[ax]
+        my_axes = pf.coordinates.x_axis[ax], pf.coordinates.y_axis[ax]
 
         # find the cells intersected by the ortho ray
         my_all = pf.h.all_data()

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -95,7 +95,7 @@
             # the right case by comparing against known units. This
             # only really works for common units.
             units = set(re.split(regex_pattern, field_units))
-            units.remove('')
+            if '' in units: units.remove('')
             n = int(0)
             for unit in units:
                 if unit in known_units:
@@ -494,6 +494,8 @@
     def _setup_ppv(self):
 
         self.ppv_data = True
+        self.geometry = "ppv"
+
         end = min(self.dimensionality+1,4)
         if self.events_data:
             ctypes = self.axis_names

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -54,7 +54,7 @@
         z = np.ones(x.shape)
         x = (x-0.5)/self.pf.reblock+0.5
         y = (y-0.5)/self.pf.reblock+0.5
-        mask = selector.select_points(x, y, z)
+        mask = selector.select_points(x, y, z, 0.0)
         if mask is None: return
         for field in field_list:
             fd = field.split("_")[-1]

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -11,10 +11,11 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-from yt.fields.api import add_field
 from yt.fields.derived_field import ValidateSpatial
-from yt.funcs import mylog
 from yt.utilities.on_demand_imports import _astropy
+from yt.funcs import mylog, get_image_suffix
+from yt.visualization._mpl_imports import FigureCanvasAgg
+import os
 
 def _make_counts(emin, emax):
     def _counts(field, data):
@@ -46,6 +47,8 @@
     ebounds : list of tuples
         A list of tuples, one for each field, with (emin, emax) as the
         energy bounds for the image.
+    ftype : string, optional
+        The field type of the resulting field. Defaults to "gas".
 
     Examples
     --------
@@ -60,4 +63,135 @@
         ds.add_field((ftype,fname), function=cfunc,
                      units="counts/pixel",
                      validators = [ValidateSpatial()],
-                     display_name="Counts (%s-%s keV)" % (emin, emax))
\ No newline at end of file
+                     display_name="Counts (%s-%s keV)" % (emin, emax))
+
+def ds9_region(ds, reg, obj=None, field_parameters=None):
+    r"""
+    Create a data container from a ds9 region file. Requires the pyregion
+    package (http://leejjoon.github.io/pyregion/) to be installed.
+
+    Parameters
+    ----------
+    ds : FITSDataset
+        The Dataset to create the region from.
+    reg : string
+        The filename of the ds9 region, or a region string to be parsed.
+    obj : data container, optional
+        The data container that will be used to create the new region.
+        Defaults to ds.all_data.
+    field_parameters : dictionary, optional
+        A set of field parameters to apply to the region.
+
+    Examples
+    --------
+
+    >>> ds = yt.load("m33_hi.fits")
+    >>> circle_region = ds9_region(ds, "circle.reg")
+    >>> print circle_region.quantities.extrema("flux")
+    """
+    import pyregion
+    if os.path.exists(reg):
+        r = pyregion.open(reg)
+    else:
+        r = pyregion.parse(reg)
+    filter = r.get_filter(header=ds.wcs_2d.to_header())
+    reg_name = reg.split(".")[0]
+    nx = ds.domain_dimensions[ds.lon_axis]
+    ny = ds.domain_dimensions[ds.lat_axis]
+    mask = filter.mask((ny,nx)).transpose()
+    def _reg_field(field, data):
+        i = data["xyz"[ds.lon_axis]].ndarray_view().astype("int")-1
+        j = data["xyz"[ds.lat_axis]].ndarray_view().astype("int")-1
+        new_mask = mask[i,j]
+        ret = data["zeros"].copy()
+        ret[new_mask] = 1.
+        return ret
+    ds.add_field(("gas",reg_name), function=_reg_field)
+    if obj is None:
+        obj = ds.all_data()
+    if field_parameters is not None:
+        for k,v in field_parameters.items():
+            obj.set_field_parameter(k,v)
+    return obj.cut_region(["obj['%s'] > 0" % (reg_name)])
+
+class PlotWindowWCS(object):
+    r"""
+    Use the wcsaxes library to plot celestial coordinates on the axes of a
+    on-axis PlotWindow plot. See http://wcsaxes.readthedocs.org for details.
+
+    Parameters
+    ----------
+    pw : on-axis PlotWindow instance
+        The PlotWindow instance to add celestial axes to.
+    """
+    def __init__(self, pw):
+        from wcsaxes import WCSAxes
+        if pw.oblique:
+            raise NotImplementedError("WCS axes are not implemented for oblique plots.")
+        if not hasattr(pw.pf, "wcs_2d"):
+            raise NotImplementedError("WCS axes are not implemented for this dataset.")
+        if pw.data_source.axis != pw.pf.vel_axis:
+            raise NotImplementedError("WCS axes are not implemented for this axis.")
+        self.pf = pw.pf
+        self.pw = pw
+        self.plots = {}
+        self.wcs_axes = []
+        for f in pw.plots:
+            rect = pw.plots[f]._get_best_layout()[1]
+            fig = pw.plots[f].figure
+            ax = WCSAxes(fig, rect, wcs=pw.pf.wcs_2d, frameon=False)
+            fig.add_axes(ax)
+            self.wcs_axes.append(ax)
+        self._setup_plots()
+
+    def _setup_plots(self):
+        pw = self.pw
+        for f, ax in zip(pw.plots, self.wcs_axes):
+            wcs = ax.wcs.wcs
+            pw.plots[f].axes.get_xaxis().set_visible(False)
+            pw.plots[f].axes.get_yaxis().set_visible(False)
+            xax = pw.pf.coordinates.x_axis[pw.data_source.axis]
+            yax = pw.pf.coordinates.y_axis[pw.data_source.axis]
+            xlabel = "%s (%s)" % (wcs.ctype[xax].split("-")[0],
+                                  wcs.cunit[xax])
+            ylabel = "%s (%s)" % (wcs.ctype[yax].split("-")[0],
+                                  wcs.cunit[yax])
+            fp = pw._font_properties
+            ax.coords[0].set_axislabel(xlabel, fontproperties=fp)
+            ax.coords[1].set_axislabel(ylabel, fontproperties=fp)
+            ax.set_xlim(pw.xlim[0].value, pw.xlim[1].value)
+            ax.set_ylim(pw.ylim[0].value, pw.ylim[1].value)
+            ax.coords[0].ticklabels.set_fontproperties(fp)
+            ax.coords[1].ticklabels.set_fontproperties(fp)
+            self.plots[f] = pw.plots[f]
+        self.pw = pw
+        self.pf = self.pw.pf
+
+    def refresh(self):
+        self._setup_plots(self)
+
+    def keys(self):
+        return self.plots.keys()
+
+    def values(self):
+        return self.plots.values()
+
+    def items(self):
+        return self.plots.items()
+
+    def __getitem__(self, key):
+        for k in self.keys():
+            if k[1] == key:
+                return self.plots[k]
+
+    def show(self):
+        from IPython.core.display import display
+        for k, v in sorted(self.plots.iteritems()):
+            canvas = FigureCanvasAgg(v.figure)
+            display(v.figure)
+
+    def save(self, name=None, mpl_kwargs=None):
+        if mpl_kwargs is None:
+            mpl_kwargs = {}
+        mpl_kwargs["bbox_inches"] = "tight"
+        self.pw.save(name=name, mpl_kwargs=mpl_kwargs)

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 yt/geometry/cartesian_coordinates.py
--- a/yt/geometry/cartesian_coordinates.py
+++ b/yt/geometry/cartesian_coordinates.py
@@ -110,11 +110,11 @@
     axis_id = { 'x' : 0, 'y' : 1, 'z' : 2,
                  0  : 0,  1  : 1,  2  : 2}
 
-    x_axis = { 'x' : 1, 'y' : 0, 'z' : 0,
-                0  : 1,  1  : 0,  2  : 0}
+    x_axis = { 'x' : 1, 'y' : 2, 'z' : 0,
+                0  : 1,  1  : 2,  2  : 0}
 
-    y_axis = { 'x' : 2, 'y' : 2, 'z' : 1,
-                0  : 2,  1  : 2,  2  : 1}
+    y_axis = { 'x' : 2, 'y' : 0, 'z' : 1,
+                0  : 2,  1  : 0,  2  : 1}
 
     @property
     def period(self):

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 yt/geometry/ppv_coordinates.py
--- /dev/null
+++ b/yt/geometry/ppv_coordinates.py
@@ -0,0 +1,77 @@
+"""
+Cartesian fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from .cartesian_coordinates import \
+    CartesianCoordinateHandler
+
+class PPVCoordinateHandler(CartesianCoordinateHandler):
+
+    def __init__(self, pf):
+        super(PPVCoordinateHandler, self).__init__(pf)
+
+        self.axis_name = {}
+        self.axis_id = {}
+        self.x_axis = {}
+        self.y_axis = {}
+
+        for axis, axis_name in zip([pf.lon_axis, pf.lat_axis, pf.vel_axis],
+                                   ["Image\ x", "Image\ y", pf.vel_name]):
+            lower_ax = "xyz"[axis]
+            upper_ax = lower_ax.upper()
+
+            self.axis_name[axis] = axis_name
+            self.axis_name[lower_ax] = axis_name
+            self.axis_name[upper_ax] = axis_name
+            self.axis_name[axis_name] = axis_name
+
+            self.axis_id[lower_ax] = axis
+            self.axis_id[axis] = axis
+            self.axis_id[axis_name] = axis
+
+            if axis == 0:
+                self.x_axis[axis] = 1
+                self.x_axis[lower_ax] = 1
+                self.x_axis[axis_name] = 1
+                self.y_axis[axis] = 2
+                self.y_axis[lower_ax] = 2
+                self.y_axis[axis_name] = 2
+            elif axis == 1:
+                self.x_axis[axis] = 2
+                self.x_axis[lower_ax] = 2
+                self.x_axis[axis_name] = 2
+                self.y_axis[axis] = 0
+                self.y_axis[lower_ax] = 0
+                self.y_axis[axis_name] = 0
+            elif axis == 2:
+                self.x_axis[axis] = 0
+                self.x_axis[lower_ax] = 0
+                self.x_axis[axis_name] = 0
+                self.y_axis[axis] = 1
+                self.y_axis[lower_ax] = 1
+                self.y_axis[axis_name] = 1
+
+        self.default_unit_label = {}
+        self.default_unit_label[pf.lon_axis] = "pixel"
+        self.default_unit_label[pf.lat_axis] = "pixel"
+        self.default_unit_label[pf.vel_axis] = pf.vel_unit
+
+    def convert_to_cylindrical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_cylindrical(self, coord):
+        raise NotImplementedError
+

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -360,7 +360,8 @@
         for ex in self.expr.free_symbols:
             symbol_table[ex] = latex_symbol_lut[str(ex)]
         return latex(self.expr, symbol_names=symbol_table,
-                     fold_frac_powers=True, fold_short_frac=True)
+                     mul_symbol="dot", fold_frac_powers=True,
+                     fold_short_frac=True)
 #
 # Unit manipulation functions
 #

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -17,7 +17,7 @@
 from ._mpl_imports import \
     FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
 from yt.funcs import \
-    get_image_suffix, mylog
+    get_image_suffix, mylog, iterable
 import numpy as np
 
 class CallbackWrapper(object):
@@ -140,12 +140,16 @@
             top_buff_size = 0.0
 
         # Ensure the figure size along the long axis is always equal to _figure_size
-        if self._aspect >= 1.0:
-            x_fig_size = self._figure_size
-            y_fig_size = self._figure_size/self._aspect
-        if self._aspect < 1.0:
-            x_fig_size = self._figure_size*self._aspect
-            y_fig_size = self._figure_size
+        if iterable(self._figure_size):
+            x_fig_size = self._figure_size[0]
+            y_fig_size = self._figure_size[1]
+        else:
+            if self._aspect >= 1.0:
+                x_fig_size = self._figure_size
+                y_fig_size = self._figure_size/self._aspect
+            if self._aspect < 1.0:
+                x_fig_size = self._figure_size*self._aspect
+                y_fig_size = self._figure_size
 
         xbins = np.array([x_axis_size, x_fig_size, cb_size, cb_text_size])
         ybins = np.array([y_axis_size, y_fig_size, top_buff_size])

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -16,7 +16,7 @@
 
 from yt.funcs import \
     defaultdict, get_image_suffix, \
-    get_ipython_api_version
+    get_ipython_api_version, iterable
 from yt.utilities.exceptions import \
     YTNotInsideNotebook
 from ._mpl_imports import FigureCanvasAgg
@@ -111,7 +111,10 @@
 
     def __init__(self, data_source, figure_size, fontsize):
         self.data_source = data_source
-        self.figure_size = float(figure_size)
+        if iterable(figure_size):
+            self.figure_size = float(figure_size[0]), float(figure_size[1])
+        else:
+            self.figure_size = float(figure_size)
         self.plots = PlotDictionary(data_source)
         self._callbacks = []
         self._field_transform = {}

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -170,7 +170,7 @@
     return center
 
 def get_window_parameters(axis, center, width, pf):
-    if pf.geometry == "cartesian":
+    if pf.geometry == "cartesian" or pf.geometry == "ppv":
         width = get_sanitized_width(axis, width, None, pf)
         center = get_sanitized_center(center, pf)
     elif pf.geometry in ("polar", "cylindrical"):
@@ -278,7 +278,7 @@
     frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True,
                  periodic=True, origin='center-window', oblique=False,
-                 window_size=8.0, fields=None, fontsize=18, setup=False):
+                 window_size=8.0, fields=None, fontsize=18, aspect=None, setup=False):
         if not hasattr(self, "pf"):
             self.pf = data_source.pf
             ts = self._initialize_dataset(self.pf)
@@ -290,6 +290,7 @@
         self.oblique = oblique
         self.buff_size = buff_size
         self.antialias = antialias
+        self.aspect = aspect
         skip = list(FixedResolutionBuffer._exclude_fields) + data_source._key_fields
         if fields is None:
             fields = []
@@ -633,7 +634,7 @@
         Examples
         --------
 
-        >>> p = ProjectionPlot(pf, "y", "Density")
+        >>> p = ProjectionPlot(pf, "y", "density")
         >>> p.show()
         >>> p.set_axes_unit("kpc")
         >>> p.show()
@@ -751,7 +752,11 @@
             else:
                 (unit_x, unit_y) = self._axes_unit_names
 
-            aspect = np.float64(self.pf.quan(1.0, unit_y)/self.pf.quan(1.0, unit_x))
+            # For some plots we may set aspect by hand, such as for PPV data.
+            # This will likely be replaced at some point by the coordinate handler
+            # setting plot aspect.
+            if self.aspect is None:
+                self.aspect = np.float64(self.pf.quan(1.0, unit_y)/(self.pf.quan(1.0, unit_x)))
 
             extentx = [(self.xlim[i] - xc).in_units(unit_x) for i in (0, 1)]
             extenty = [(self.ylim[i] - yc).in_units(unit_y) for i in (0, 1)]
@@ -792,12 +797,17 @@
                 image, self._field_transform[f].name,
                 self._colormaps[f], extent, zlim,
                 self.figure_size, fp.get_size(),
-                aspect, fig, axes, cax)
+                self.aspect, fig, axes, cax)
 
             axes_unit_labels = ['', '']
             comoving = False
             hinv = False
             for i, un in enumerate((unit_x, unit_y)):
+                if hasattr(self.pf.coordinates, "default_unit_label"):
+                    axax = getattr(self.pf.coordinates, "%s_axis" % ("xy"[i]))[axis_index]
+                    un = self.pf.coordinates.default_unit_label[axax]
+                    axes_unit_labels[i] = '\/\/('+un+')'
+                    continue
                 # Use sympy to factor h out of the unit.  In this context 'un'
                 # is a string, so we call the Unit constructor.
                 expr = Unit(un, registry=self.pf.unit_registry).expr
@@ -832,6 +842,9 @@
                 axis_names = self.pf.coordinates.axis_name
                 xax = self.pf.coordinates.x_axis[axis_index]
                 yax = self.pf.coordinates.y_axis[axis_index]
+                if hasattr(self.pf.coordinates, "axis_default_unit_label"):
+                    axes_unit_labels = [self.pf.coordinates.axis_default_unit_name[xax],
+                                        self.pf.coordinates.axis_default_unit_name[yax]]
                 labels = [r'$\rm{'+axis_names[xax]+axes_unit_labels[0] + r'}$',
                           r'$\rm{'+axis_names[yax]+axes_unit_labels[1] + r'}$']
 
@@ -1009,7 +1022,8 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 origin='center-window', fontsize=18, field_parameters=None):
+                 origin='center-window', fontsize=18, field_parameters=None,
+                 window_size=8.0, aspect=None):
         # this will handle time series data and controllers
         ts = self._initialize_dataset(pf)
         self.ts = ts
@@ -1021,7 +1035,8 @@
             field_parameters = field_parameters, center=center)
         slc.get_data(fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin,
-                             fontsize=fontsize, fields=fields)
+                             fontsize=fontsize, fields=fields,
+                             window_size=window_size, aspect=aspect)
         if axes_unit is None:
             axes_unit = get_axes_unit(width, pf)
         self.set_axes_unit(axes_unit)
@@ -1136,7 +1151,7 @@
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  weight_field=None, max_level=None, origin='center-window',
                  fontsize=18, field_parameters=None, data_source=None,
-                 proj_style = "integrate"):
+                 proj_style = "integrate", window_size=8.0, aspect=None):
         ts = self._initialize_dataset(pf)
         self.ts = ts
         pf = self.pf = ts[0]
@@ -1147,7 +1162,7 @@
                          center=center, data_source=data_source,
                          field_parameters = field_parameters, style = proj_style)
         PWViewerMPL.__init__(self, proj, bounds, fields=fields, origin=origin,
-                             fontsize=fontsize)
+                             fontsize=fontsize, window_size=window_size, aspect=aspect)
         if axes_unit is None:
             axes_unit = get_axes_unit(width, pf)
         self.set_axes_unit(axes_unit)
@@ -1620,7 +1635,11 @@
         if fontscale < 1.0:
             fontscale = np.sqrt(fontscale)
 
-        self._cb_size = 0.0375*figure_size
+        if iterable(figure_size):
+            fsize = figure_size[0]
+        else:
+            fsize = figure_size
+        self._cb_size = 0.0375*fsize
         self._ax_text_size = [0.9*fontscale, 0.7*fontscale]
         self._top_buff_size = 0.30*fontscale
         self._aspect = ((extent[1] - extent[0])/(extent[3] - extent[2]))

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -473,7 +473,7 @@
         scales = {True: 'log', False: 'linear'}
         return scales[x_log], scales[y_log]
 
-    def _get_field_label(self, field, field_info, field_unit):
+    def _get_field_label(self, field, field_info, field_unit, fractional=False):
         field_unit = field_unit.latex_representation()
         field_name = field_info.display_name
         if isinstance(field, tuple): field = field[1]
@@ -483,7 +483,9 @@
         elif field_name.find('$') == -1:
             field_name = field_name.replace(' ','\/')
             field_name = r'$\rm{'+field_name+r'}$'
-        if field_unit is None or field_unit == '':
+        if fractional:
+            label = field_name + r'$\rm{\/Probability\/Density}$'
+        elif field_unit is None or field_unit == '':
             label = field_name
         else:
             label = field_name+r'$\/\/('+field_unit+r')$'
@@ -498,9 +500,10 @@
         yfi = pf._get_field_info(*yf)
         x_unit = profile.x.units
         y_unit = profile.field_units[field_y]
+        fractional = profile.fractional
         x_title = self.x_title or self._get_field_label(field_x, xfi, x_unit)
         y_title = self.y_title.get(field_y, None) or \
-                    self._get_field_label(field_y, yfi, y_unit)
+                    self._get_field_label(field_y, yfi, y_unit, fractional)
 
         return (x_title, y_title)
             
@@ -623,13 +626,14 @@
         x_unit = profile.x.units
         y_unit = profile.y.units
         z_unit = profile.field_units[field_z]
+        fractional = profile.fractional
         x_title = self.x_title or self._get_field_label(field_x, xfi, x_unit)
         y_title = self.y_title or self._get_field_label(field_y, yfi, y_unit)
         z_title = self.z_title.get(field_z, None) or \
-                    self._get_field_label(field_z, zfi, z_unit)
+                    self._get_field_label(field_z, zfi, z_unit, fractional)
         return (x_title, y_title, z_title)
 
-    def _get_field_label(self, field, field_info, field_unit):
+    def _get_field_label(self, field, field_info, field_unit, fractional=False):
         field_unit = field_unit.latex_representation()
         field_name = field_info.display_name
         if isinstance(field, tuple): field = field[1]
@@ -639,7 +643,9 @@
         elif field_name.find('$') == -1:
             field_name = field_name.replace(' ','\/')
             field_name = r'$\rm{'+field_name+r'}$'
-        if field_unit is None or field_unit == '':
+        if fractional:
+            label = field_name + r'$\rm{\/Probability\/Density}$'
+        elif field_unit is None or field_unit is '':
             label = field_name
         else:
             label = field_name+r'$\/\/('+field_unit+r')$'


https://bitbucket.org/yt_analysis/yt/commits/84ee1cdab7aa/
Changeset:   84ee1cdab7aa
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-09 02:43:22
Summary:     Feeding anyfloat through the particle contour tree.
Affected #:  3 files

diff -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 -r 84ee1cdab7aa95a427505908eaa9e9ba9424744d yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -18,6 +18,10 @@
 from oct_visitors cimport Oct, OctVisitorData, \
     oct_visitor_function
 
+ctypedef fused anyfloat:
+    np.float32_t
+    np.float64_t
+
 cdef class SelectorObject:
     cdef public np.int32_t min_level
     cdef public np.int32_t max_level

diff -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 -r 84ee1cdab7aa95a427505908eaa9e9ba9424744d yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -36,10 +36,6 @@
     long int lrint(double x) nogil
     double fabs(double x) nogil
 
-ctypedef fused anyfloat:
-    np.float32_t
-    np.float64_t
-
 # These routines are separated into a couple different categories:
 #
 #   * Routines for identifying intersections of an object with a bounding box

diff -r c56a451d95ba8c33bf12686a8c76003391e2dcb7 -r 84ee1cdab7aa95a427505908eaa9e9ba9424744d yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -18,7 +18,8 @@
 cimport cython
 from libc.stdlib cimport malloc, free, realloc
 from yt.geometry.selection_routines cimport \
-    SelectorObject, AlwaysSelector, OctreeSubsetSelector
+    SelectorObject, AlwaysSelector, OctreeSubsetSelector, \
+    anyfloat
 from yt.utilities.lib.fp_utils cimport imax
 from yt.geometry.oct_container cimport \
     OctreeContainer, OctInfo
@@ -620,22 +621,26 @@
     cdef np.float64_t linking_length, linking_length2
     cdef np.float64_t DW[3], DLE[3], DRE[3]
     cdef bint periodicity[3]
+    cdef int minimum_count
 
-    def __init__(self, linking_length):
+    def __init__(self, linking_length, periodicity = (True, True, True),
+                 int minimum_count = 8):
+        cdef int i
         self.linking_length = linking_length
         self.linking_length2 = linking_length * linking_length
         self.first = self.last = NULL
+        for i in range(3):
+            self.periodicity[i] = periodicity[i]
+        self.minimum_count = minimum_count
 
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def identify_contours(self, OctreeContainer octree,
                                 np.ndarray[np.int64_t, ndim=1] dom_ind,
-                                np.ndarray[np.float64_t, ndim=2] positions,
+                                np.ndarray[anyfloat, ndim=2] positions,
                                 np.ndarray[np.int64_t, ndim=1] particle_ids,
-                                int domain_id = -1, int domain_offset = 0,
-                                periodicity = (True, True, True),
-                                int minimum_count = 8):
+                                int domain_id, int domain_offset):
         cdef np.ndarray[np.int64_t, ndim=1] pdoms, pcount, pind, doff
         cdef np.float64_t pos[3]
         cdef Oct *oct = NULL, **neighbors = NULL
@@ -657,7 +662,6 @@
             self.DW[i] = (octree.DRE[i] - octree.DLE[i])
             self.DLE[i] = octree.DLE[i]
             self.DRE[i] = octree.DRE[i]
-            self.periodicity[i] = periodicity[i]
         for i in range(positions.shape[0]):
             counter += 1
             container[i] = NULL
@@ -671,7 +675,7 @@
             pdoms[i] = offset
         pind = np.argsort(pdoms)
         cdef np.int64_t *ipind = <np.int64_t*> pind.data
-        cdef np.float64_t *fpos = <np.float64_t*> positions.data
+        cdef anyfloat *fpos = <anyfloat*> positions.data
         # pind is now the pointer into the position and particle_ids array.
         for i in range(positions.shape[0]):
             offset = pdoms[pind[i]]
@@ -751,7 +755,7 @@
                 c1 = container[offset]
                 if c1 == NULL: continue
                 c0 = contour_find(c1)
-                if c0.count < minimum_count:
+                if c0.count < self.minimum_count:
                     contour_ids[offset] = -1
         free(container)
         del pind
@@ -761,7 +765,7 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     cdef void link_particles(self, ContourID **container, 
-                                   np.float64_t *positions,
+                                   anyfloat *positions,
                                    np.int64_t *pind,
                                    np.int64_t pcount, 
                                    np.int64_t noffset,


https://bitbucket.org/yt_analysis/yt/commits/e76f31595b1f/
Changeset:   e76f31595b1f
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-09 03:02:42
Summary:     Quiet down, and don't be so assertive.
Affected #:  1 file

diff -r 05a9d2cffc1bf557c11119b6037df91ee5737862 -r e76f31595b1f814b90075b2ad63ebd49813dc366 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -487,7 +487,7 @@
             dinds = self.get_keyv([X[dmask], Y[dmask], Z[dmask]])
             dinds = dinds[dinds < self.indexdata['index'][-1]]
             dinds = dinds[self.indexdata['len'][dinds] > 0]
-            print 'Getting boundary layers for wanderers, cells: %i' % dinds.size
+            #print 'Getting boundary layers for wanderers, cells: %i' % dinds.size
 
         # Correct For periodicity
         X[X < self.domain_buffer] += self.domain_active_dims
@@ -628,10 +628,10 @@
                 if f in 'xyz': continue
                 filtered[f] = data[f][mask]
 
-            for i, ax in enumerate('xyz'):
-                print left, right
-                assert np.all(filtered[ax] >= left[i])
-                assert np.all(filtered[ax] < right[i])
+            #for i, ax in enumerate('xyz'):
+            #    print left, right
+            #    assert np.all(filtered[ax] >= left[i])
+            #    assert np.all(filtered[ax] < right[i])
 
             yield filtered
 


https://bitbucket.org/yt_analysis/yt/commits/ffcfa5425fc3/
Changeset:   ffcfa5425fc3
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-09 04:18:14
Summary:     Gah, this now respects periodicity.
Affected #:  1 file

diff -r e76f31595b1f814b90075b2ad63ebd49813dc366 -r ffcfa5425fc354c650dadb0aeb328bb10c1f5a32 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -471,19 +471,19 @@
                            ileft[0]:iright[0]+1]
 
         mask = slice(0, -1, None)
-        X = X[mask, mask, mask].astype('int64').ravel()
-        Y = Y[mask, mask, mask].astype('int64').ravel()
-        Z = Z[mask, mask, mask].astype('int64').ravel()
+        X = X[mask, mask, mask].astype('int32').ravel()
+        Y = Y[mask, mask, mask].astype('int32').ravel()
+        Z = Z[mask, mask, mask].astype('int32').ravel()
 
         if wandering_particles:
             # Need to get padded bbox around the border to catch
             # wandering particles.
-            dmask = X == self.domain_buffer-1
-            dmask += Y == self.domain_buffer-1
-            dmask += Z == self.domain_buffer-1
-            dmask += X == self.domain_dims
-            dmask += Y == self.domain_dims
-            dmask += Z == self.domain_dims
+            dmask = X < self.domain_buffer
+            dmask += Y < self.domain_buffer
+            dmask += Z < self.domain_buffer
+            dmask += X >= self.domain_dims
+            dmask += Y >= self.domain_dims
+            dmask += Z >= self.domain_dims
             dinds = self.get_keyv([X[dmask], Y[dmask], Z[dmask]])
             dinds = dinds[dinds < self.indexdata['index'][-1]]
             dinds = dinds[self.indexdata['len'][dinds] > 0]
@@ -491,11 +491,11 @@
 
         # Correct For periodicity
         X[X < self.domain_buffer] += self.domain_active_dims
-        X[X >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
         Y[Y < self.domain_buffer] += self.domain_active_dims
-        Y[Y >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
         Z[Z < self.domain_buffer] += self.domain_active_dims
-        Z[Z >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
+        X[X >= self.domain_buffer + self.domain_active_dims] -= self.domain_active_dims
+        Y[Y >= self.domain_buffer + self.domain_active_dims] -= self.domain_active_dims
+        Z[Z >= self.domain_buffer + self.domain_active_dims] -= self.domain_active_dims
 
         #print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()
 
@@ -600,25 +600,36 @@
             i += 1
         mylog.debug('Read %i chunks, batched into %i reads' % (num_inds, num_reads))
 
-    def filter_bbox(self, left, right, iter):
+    def filter_bbox(self, left, right, myiter):
         """
         Filter data by masking out data outside of a bbox defined
         by left/right. Account for periodicity of data, allowing left/right
         to be outside of the domain.
         """
-        for data in iter:
+        for data in myiter:
             mask = np.zeros_like(data, dtype='bool')
-            pos = np.array([data['x'], data['y'], data['z']]).T
-            # Now make pos periodic
-            for i in range(3):
-                pos[i][pos[i] < left[i]] += self.true_domain_width[i]
-                pos[i][pos[i] >= right[i]] -= self.true_domain_width[i]
+            pos = np.array([data['x'].copy(), data['y'].copy(), data['z'].copy()]).T
 
-            # First mask out the particles outside the bbox
-            mask = np.all(pos >= left, axis=1) * \
-                np.all(pos < right, axis=1)
+            # Get count of particles already inside the bounds.
+            #mask = np.all(pos >= left, axis=1) * \
+            #    np.all(pos < right, axis=1)
+            #pre_fix = mask.sum()
 
-            mylog.debug("Filtering particles, returning %i out of %i" % (mask.sum(), mask.shape[0]))
+            tmp = np.mod(pos[:,0] - left[0], self.true_domain_width[0]) + left[0]
+            pmask = (tmp >= left[0]) * (tmp < right[0])
+            pos[:,0] = tmp
+            tmp = np.mod(pos[:,1] - left[1], self.true_domain_width[1]) + left[1]
+            pmask *= (tmp >= left[1]) * (tmp < right[1])
+            pos[:,1] = tmp
+            tmp = np.mod(pos[:,2] - left[2], self.true_domain_width[2]) + left[2]
+            pmask *= (tmp >= left[2]) * (tmp < right[2])
+            pos[:,2] = tmp
+
+            # Now get all particles that are within the bbox
+            mask = pmask
+            pre_fix = mask.sum()
+
+            mylog.info("Filtering particles, originally %i, now returning %i out of %i" % (pre_fix, mask.sum(), mask.shape[0]))
 
             if not np.any(mask):
                 continue
@@ -787,7 +798,7 @@
             self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
             data.append(dd)
 
-        # Front & Back 
+        # Front & Back
         pbox = bbox.copy()
         pbox[0, 0] -= pad[0]
         pbox[0, 1] += pad[0]
@@ -804,7 +815,7 @@
             self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
             data.append(dd)
 
-        # Left & Right 
+        # Left & Right
         pbox = bbox.copy()
         pbox[0, 0] -= pad[0]
         pbox[0, 1] = bbox[0, 0]


https://bitbucket.org/yt_analysis/yt/commits/5fc3b2a10665/
Changeset:   5fc3b2a10665
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-09 04:22:04
Summary:     Clean things up a bit.
Affected #:  1 file

diff -r ffcfa5425fc354c650dadb0aeb328bb10c1f5a32 -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -610,26 +610,16 @@
             mask = np.zeros_like(data, dtype='bool')
             pos = np.array([data['x'].copy(), data['y'].copy(), data['z'].copy()]).T
 
-            # Get count of particles already inside the bounds.
-            #mask = np.all(pos >= left, axis=1) * \
-            #    np.all(pos < right, axis=1)
-            #pre_fix = mask.sum()
 
-            tmp = np.mod(pos[:,0] - left[0], self.true_domain_width[0]) + left[0]
-            pmask = (tmp >= left[0]) * (tmp < right[0])
-            pos[:,0] = tmp
-            tmp = np.mod(pos[:,1] - left[1], self.true_domain_width[1]) + left[1]
-            pmask *= (tmp >= left[1]) * (tmp < right[1])
-            pos[:,1] = tmp
-            tmp = np.mod(pos[:,2] - left[2], self.true_domain_width[2]) + left[2]
-            pmask *= (tmp >= left[2]) * (tmp < right[2])
-            pos[:,2] = tmp
+            # This hurts, but is useful for periodicity. Probably should check first
+            # if it is even needed for a given left/right
+            for i in range(3):
+                pos[:,i] = np.mod(pos[:,i] - left[i], self.true_domain_width[i]) + left[i]
 
             # Now get all particles that are within the bbox
-            mask = pmask
-            pre_fix = mask.sum()
+            mask = np.all(pos >= left, axis=1) * np.all(pos < right, axis=1)
 
-            mylog.info("Filtering particles, originally %i, now returning %i out of %i" % (pre_fix, mask.sum(), mask.shape[0]))
+            mylog.debug("Filtering particles, returning %i out of %i" % (mask.sum(), mask.shape[0]))
 
             if not np.any(mask):
                 continue


https://bitbucket.org/yt_analysis/yt/commits/b2d7be90a94c/
Changeset:   b2d7be90a94c
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-09 04:24:10
Summary:     Merging in anyfloat
Affected #:  34 files

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -15,11 +15,12 @@
 details on the relative differences between these halo finders see 
 :ref:`halo_finding`.
 
-.. code-block:: 
-    from yt.mods import *
-    from yt.analysis_modules.halo_analysis.api import HaloCatalog
-    data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
-    hc = HaloCatalog(data_pf=data_pf, finder_method='hop')
+.. code-block:: python
+
+   from yt.mods import *
+   from yt.analysis_modules.halo_analysis.api import HaloCatalog
+   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_pf=data_pf, finder_method='hop')
 
 A halo catalog may also be created from already run rockstar outputs. 
 This method is not implemented for previously run friends-of-friends or 
@@ -28,9 +29,10 @@
 only specify the file output by the processor with ID 0. Note that the 
 argument for supplying a rockstar output is `halos_pf`, not `data_pf`.
 
-.. code-block:: 
-    halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
-    hc = HaloCatalog(halos_pf=halos_pf)
+.. code-block:: python
+
+   halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
+   hc = HaloCatalog(halos_pf=halos_pf)
 
 Although supplying only the binary output of the rockstar halo finder 
 is sufficient for creating a halo catalog, it is not possible to find 
@@ -38,10 +40,11 @@
 with the dataset from which they were found, supply arguments to both 
 halos_pf and data_pf.
 
-.. code-block::
-    halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
-    data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
-    hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf)
+.. code-block:: python
+
+   halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
+   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf)
 
 A data container can also be supplied via keyword data_source, 
 associated with either dataset, to control the spatial region in 
@@ -72,9 +75,9 @@
 
 An example of adding a filter:
 
-.. code-block::
+.. code-block:: python
 
-    hc.add_filter('quantity_value', 'particle_mass', '>', 1E13, 'Msun')
+   hc.add_filter('quantity_value', 'particle_mass', '>', 1E13, 'Msun')
 
 Currently quantity_value is the only available filter, but more can be 
 added by the user by defining a function that accepts a halo object as 
@@ -85,20 +88,21 @@
 
 An example of defining your own filter:
 
-.. code-block::
-    def my_filter_function(halo):
-        
-        # Define condition for filter
-        filter_value = True
-        
-        # Return a boolean value 
-        return filter_value
+.. code-block:: python
 
-    # Add your filter to the filter registry
-    add_filter("my_filter", my_filter_function)
+   def my_filter_function(halo):
+       
+       # Define condition for filter
+       filter_value = True
+       
+       # Return a boolean value 
+       return filter_value
 
-    # ... Later on in your script
-    hc.add_filter("my_filter")
+   # Add your filter to the filter registry
+   add_filter("my_filter", my_filter_function)
+
+   # ... Later on in your script
+   hc.add_filter("my_filter")
 
 Quantities
 ----------
@@ -118,25 +122,26 @@
 
 An example of adding a quantity:
 
-.. code-block::
-    hc.add_quantity('center_of_mass')
+.. code-block:: python
+
+   hc.add_quantity('center_of_mass')
 
 An example of defining your own quantity:
 
-.. code-block::
+.. code-block:: python
 
-    def my_quantity_function(halo):
-        # Define quantity to return
-        quantity = 5
-        
-        return quantity
+   def my_quantity_function(halo):
+       # Define quantity to return
+       quantity = 5
+       
+       return quantity
 
-    # Add your filter to the filter registry
-    add_quantity('my_quantity', my_quantity_function)
+   # Add your filter to the filter registry
+   add_quantity('my_quantity', my_quantity_function)
 
 
-    # ... Later on in your script
-    hc.add_quantity("my_quantity") 
+   # ... Later on in your script
+   hc.add_quantity("my_quantity") 
 
 Callbacks
 ---------
@@ -150,8 +155,9 @@
 An example of using a pre-defined callback where we create a sphere for 
 each halo with a radius that is twice the saved “radius”.
 
-.. code-block::
-    hc.add_callback("sphere", factor=2.0)
+.. code-block:: python
+
+   hc.add_callback("sphere", factor=2.0)
     
 Currently available callbacks are located in 
 yt/analysis_modules/halo_analysis/halo_callbacks.py. New callbacks may 
@@ -161,19 +167,19 @@
 
 An example of defining your own callback:
 
-.. code-block::
+.. code-block:: python
 
-    def my_callback_function(halo):
-        # Perform some callback actions here
-        x = 2
-        halo.x_val = x
+   def my_callback_function(halo):
+       # Perform some callback actions here
+       x = 2
+       halo.x_val = x
 
-    # Add the callback to the callback registry
-    add_callback('my_callback', my_callback_function)
+   # Add the callback to the callback registry
+   add_callback('my_callback', my_callback_function)
 
 
-    # ...  Later on in your script
-    hc.add_callback("my_callback")
+   # ...  Later on in your script
+   hc.add_callback("my_callback")
 
 Running Analysis
 ================
@@ -181,8 +187,9 @@
 After all callbacks, quantities, and filters have been added, the 
 analysis begins with a call to HaloCatalog.create.
 
-.. code-block::
-    hc.create()
+.. code-block:: python
+
+   hc.create()
 
 The save_halos keyword determines whether the actual Halo objects 
 are saved after analysis on them has completed or whether just the 
@@ -206,13 +213,14 @@
 standard call to load. Any side data, such as profiles, can be reloaded 
 with a load_profiles callback and a call to HaloCatalog.load.
 
-.. code-block::
-    hpf = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
-    hc = HaloCatalog(halos_pf=hpf,
-                     output_dir="halo_catalogs/catalog_0046")
-    hc.add_callback("load_profiles", output_dir="profiles",
-                    filename="virial_profiles")
-    hc.load()
+.. code-block:: python
+
+   hpf = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
+   hc = HaloCatalog(halos_pf=hpf,
+                    output_dir="halo_catalogs/catalog_0046")
+   hc.add_callback("load_profiles", output_dir="profiles",
+                   filename="virial_profiles")
+   hc.load()
 
 Summary
 =======

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 doc/source/cookbook/fits_radio_cubes.ipynb
--- a/doc/source/cookbook/fits_radio_cubes.ipynb
+++ b/doc/source/cookbook/fits_radio_cubes.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:ded7d47bf5a74c9ea5431a37b6d371a631909d2b95214cd8053617762f62e2e4"
+  "signature": "sha256:2f774139560d94508c2c51b70930d46941d9ceef7228655de32a69634f6c6d83"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -73,14 +73,43 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Note that the x and y axes are in units of \"code length\", which in the case of FITS datasets are equal to the width of one pixel. Currently, the `yt` plotting routines don't understand datasets with non-length units on the axes (such as RA, Dec, velocity, etc.), so it defaults to the pixel scale. This will be changed in a future release. When making plots of FITS data, to see the image coordinates as they are in the file, it is helpful to set the keyword `origin = \"native\"`."
+      "The x and y axes are in units of the image pixel. When making plots of FITS data, to see the image coordinates as they are in the file, it is helpful to set the keyword `origin = \"native\"`. If you want to see the celestial coordinates along the axes, you can import the `PlotWindowWCS` class and feed it the `SlicePlot`. For this to work, the [WCSAxes](http://wcsaxes.readthedocs.org/en/latest/) package needs to be installed."
      ]
     },
     {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.frontends.fits.misc import PlotWindowWCS\n",
+      "wcs_slc = PlotWindowWCS(slc)\n",
+      "wcs_slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "We can take slices of this dataset at a few different values along the \"z\" axis (corresponding to the velocity), so let's try a few. First, we'll check what the value along the velocity axis at the domain center is, as well as the range of possible values. This is the third value of each array. "
+      "Generally, it is best to get the plot in the shape you want it before feeding it to `PlotWindowWCS`. Once it looks the way you want, you can save it just like a normal `PlotWindow` plot:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "wcs_slc.save()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also take slices of this dataset at a few different values along the \"z\" axis (corresponding to the velocity), so let's try a few. First, we'll check what the value along the velocity axis at the domain center is, as well as the range of possible values. This is the third value of each array. "
      ]
     },
     {
@@ -147,6 +176,44 @@
      ]
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also look at the slices perpendicular to the other axes, which will show us the structure along the velocity axis:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = yt.SlicePlot(ds, \"x\", [\"intensity\"], origin=\"native\", \n",
+      "                   aspect=\"auto\", window_size=(8.0,8.0))\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = yt.SlicePlot(ds, \"y\", [\"intensity\"], origin=\"native\", \n",
+      "                   aspect=\"auto\", window_size=(8.0,8.0))\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "In these cases, we needed to set `aspect=\"auto\"` and explicitly declare a square `window_size` to get a figure that looks good. "
+     ]
+    },
+    {
      "cell_type": "heading",
      "level": 2,
      "metadata": {},
@@ -298,6 +365,78 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally, we can also take an existing [ds9](http://ds9.si.edu/site/Home.html) region and use it to create a \"cut region\" as well, using `ds9_region` (the [pyregion](http://leejjoon.github.io/pyregion/) package needs to be installed for this):"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.frontends.fits.misc import ds9_region"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "For this example we'll create a ds9 region from scratch and load it up:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "region = 'galactic;box(+49:26:35.150,-0:30:04.410,1926.1927\",1483.3701\",0.0)'\n",
+      "box_reg = ds9_region(ds, region)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This region may now be used to compute derived quantities:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print box_reg.quantities.extrema(\"temperature\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Or in projections:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"temperature\"], origin=\"native\", \n",
+      "                        data_source=box_reg, weight_field=\"ones\") # \"ones\" weights each cell by 1\n",
+      "prj.set_log(\"temperature\", True)\n",
+      "prj.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 doc/source/cookbook/fits_xray_images.ipynb
--- a/doc/source/cookbook/fits_xray_images.ipynb
+++ b/doc/source/cookbook/fits_xray_images.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:564cb1986609d8bb76397a18219974504231b260f912bed483b87c1f896e92ac"
+  "signature": "sha256:650e3fc7f66951a5fcdb18332bbc625f6f6e449fc919acd01da01e1fbbf92ee1"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -71,19 +71,18 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds.index\n",
       "def _counts(field, data):\n",
       "    exposure_time = data.get_field_parameter(\"exposure_time\")\n",
       "    return data[\"flux\"]*data[\"pixel\"]*exposure_time\n",
-      "ds.field_info.add_field(name=\"counts\", function=_counts, units=\"counts\")\n",
+      "ds.add_field(name=\"counts\", function=_counts, units=\"counts\", take_log=False)\n",
       "\n",
       "def _pp(field, data):\n",
       "    return np.sqrt(data[\"counts\"])*data[\"projected_temperature\"]\n",
-      "ds.field_info.add_field(name=\"pseudo_pressure\", function=_pp, units=\"sqrt(counts)*keV\")\n",
+      "ds.add_field(name=\"pseudo_pressure\", function=_pp, units=\"sqrt(counts)*keV\", take_log=False)\n",
       "\n",
       "def _pe(field, data):\n",
       "    return data[\"projected_temperature\"]*data[\"counts\"]**(-1./3.)\n",
-      "ds.field_info.add_field(name=\"pseudo_entropy\", function=_pe, units=\"keV*(counts)**(-1/3)\")"
+      "ds.add_field(name=\"pseudo_entropy\", function=_pe, units=\"keV*(counts)**(-1/3)\", take_log=False)"
      ],
      "language": "python",
      "metadata": {},
@@ -131,6 +130,152 @@
      "outputs": []
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To add the celestial coordinates to the image, we can use `PlotWindowWCS`, if you have the [WCSAxes](http://wcsaxes.readthedocs.org/en/latest/) package installed:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.frontends.fits.misc import PlotWindowWCS\n",
+      "wcs_slc = PlotWindowWCS(slc)\n",
+      "wcs_slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can make use of yt's facilities for profile plotting as well."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "v, c = ds.find_max(\"flux\") # Find the maxmimum flux and its center\n",
+      "my_sphere = ds.sphere(c, (100.,\"code_length\")) # Radius of 150 pixels\n",
+      "my_sphere.set_field_parameter(\"exposure_time\", exposure_time)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Such as a radial profile plot:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "radial_profile = yt.ProfilePlot(my_sphere, \"radius\", \n",
+      "                                [\"counts\",\"pseudo_pressure\",\"pseudo_entropy\"], \n",
+      "                                n_bins=50, weight_field=\"ones\")\n",
+      "radial_profile.set_log(\"counts\", True)\n",
+      "radial_profile.set_log(\"pseudo_pressure\", True)\n",
+      "radial_profile.set_log(\"pseudo_entropy\", True)\n",
+      "radial_profile.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Or a phase plot:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "phase_plot = yt.PhasePlot(my_sphere, \"pseudo_pressure\", \"pseudo_entropy\", [\"counts\"], weight_field=None)\n",
+      "phase_plot.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally, we can also take an existing [ds9](http://ds9.si.edu/site/Home.html) region and use it to create a \"cut region\", using `ds9_region` (the [pyregion](http://leejjoon.github.io/pyregion/) package needs to be installed for this):"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.frontends.fits.misc import ds9_region\n",
+      "reg_file = [\"# Region file format: DS9 version 4.1\\n\",\n",
+      "            \"global color=green dashlist=8 3 width=3 include=1 source=1 fk5\\n\",\n",
+      "            \"circle(15:16:44.817,+7:01:19.62,34.6256\\\")\"]\n",
+      "f = open(\"circle.reg\",\"w\")\n",
+      "f.writelines(reg_file)\n",
+      "f.close()\n",
+      "circle_reg = ds9_region(ds, \"circle.reg\", field_parameters={\"exposure_time\":exposure_time})"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This region may now be used to compute derived quantities:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print circle_reg.quantities.weighted_average_quantity(\"projected_temperature\", \"counts\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Or used in projections:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj = yt.ProjectionPlot(ds, \"z\", \n",
+      "                   [\"flux\",\"projected_temperature\",\"pseudo_pressure\",\"pseudo_entropy\"], \n",
+      "                   origin=\"native\", field_parameters={\"exposure_time\":exposure_time},\n",
+      "                   data_source=circle_reg,\n",
+      "                   proj_style=\"sum\")\n",
+      "prj.set_log(\"flux\",True)\n",
+      "prj.set_log(\"pseudo_pressure\",False)\n",
+      "prj.set_log(\"pseudo_entropy\",False)\n",
+      "prj.set_width(250.)\n",
+      "prj.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
      "cell_type": "heading",
      "level": 2,
      "metadata": {},
@@ -159,15 +304,14 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "`setup_counts_fields` will take a list of energy bounds (emin, emax) in keV and create a new field from each where the photons in that energy range will be deposited onto the image grid. "
+      "`load` will handle the events file as FITS image files, and will set up a grid using the WCS information in the file. Optionally, the events may be reblocked to a new resolution. by setting the `\"reblock\"` parameter in the `parameters` dictionary in `load`. `\"reblock\"` must be a power of 2. "
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ebounds = [(0.1,2.0),(2.0,5.0)]\n",
-      "setup_counts_fields(ebounds)"
+      "ds2 = yt.load(\"xray_fits/acisf05356N003_evt2.fits.gz\", parameters={\"reblock\":2})"
      ],
      "language": "python",
      "metadata": {},
@@ -177,14 +321,15 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "`load` will handle the events file as FITS image files, and will set up a grid using the WCS information in the file. Optionally, the events may be reblocked to a new resolution. by setting the `\"reblock\"` parameter in the `parameters` dictionary in `load`. `\"reblock\"` must be a power of 2. "
+      "`setup_counts_fields` will take a list of energy bounds (emin, emax) in keV and create a new field from each where the photons in that energy range will be deposited onto the image grid. "
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds2 = yt.load(\"xray_fits/acisf05356N003_evt2.fits.gz\", parameters={\"reblock\":2})"
+      "ebounds = [(0.1,2.0),(2.0,5.0)]\n",
+      "setup_counts_fields(ds2, ebounds)"
      ],
      "language": "python",
      "metadata": {},

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 doc/source/cookbook/halo_finding.py
--- a/doc/source/cookbook/halo_finding.py
+++ /dev/null
@@ -1,10 +0,0 @@
-"""
-This script shows the simplest way of getting halo information.  For more
-information, see :ref:`halo_finding`.
-"""
-import yt
-
-ds = yt.load("Enzo_64/DD0043/data0043")
-
-halos = yt.HaloFinder(ds)
-halos.write_out("%s_halos.txt" % ds)

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 doc/source/cookbook/halo_mass_info.py
--- a/doc/source/cookbook/halo_mass_info.py
+++ /dev/null
@@ -1,34 +0,0 @@
-"""
-Title: Halo Mass Info
-Description: This recipe finds halos and then prints out information about
-             them.  Note that this recipe will take advantage of multiple CPUs
-             if executed with mpirun and supplied the --parallel command line
-             argument.  
-Outputs: [RedshiftOutput0006_halo_info.txt]
-"""
-from yt.mods import *
-
-fn = "Enzo_64/RD0006/RedshiftOutput0006" # parameter file to load
-pf = load(fn) # load data
-
-# First we run our halo finder to identify all the halos in the dataset.  This
-# can take arguments, but the default are pretty sane.
-halos = HaloFinder(pf)
-
-f = open("%s_halo_info.txt" % pf, "w")
-
-# Now, for every halo, we get the baryon data and examine it.
-for halo in halos:
-    # The halo has a property called 'get_sphere' that obtains a sphere
-    # centered on the point of maximum density (or the center of mass, if that
-    # argument is supplied) and with the radius the maximum particle radius of
-    # that halo.
-    sphere = halo.get_sphere()
-    # We use the quantities[] method to get the total mass in baryons and in
-    # particles.
-    baryon_mass, particle_mass = sphere.quantities["TotalQuantity"](
-            ["cell_mass", "particle_mass"])
-    # Now we print out this information, along with the ID.
-    f.write("Total mass in HOP group %s is %0.5e (gas = %0.5e / particles = %0.5e)\n" % \
-            (halo.id, baryon_mass + particle_mass, baryon_mass, particle_mass))
-f.close()

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 doc/source/cookbook/halo_particle_plotting.py
--- a/doc/source/cookbook/halo_particle_plotting.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-This is a simple mechanism for overplotting the particles belonging only to
-halos.  For more information, see :ref:`halo_finding`.
-"""
-from yt.mods import * # set up our namespace
-
-pf = load("Enzo_64/DD0043/data0043")
-
-halos = HaloFinder(pf)
-
-p = ProjectionPlot(pf, "x", "density")
-p.annotate_hop_circles(halos)
-p.annotate_hop_particles(halos, max_number=100)
-p.save()

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 doc/source/cookbook/halo_plotting.py
--- a/doc/source/cookbook/halo_plotting.py
+++ b/doc/source/cookbook/halo_plotting.py
@@ -4,10 +4,13 @@
 """
 from yt.mods import * # set up our namespace
 
-pf = load("Enzo_64/DD0043/data0043")
+data_pf = load("Enzo_64/RD0006/RedshiftOutput0006")
 
-halos = HaloFinder(pf)
+halo_pf = load('rockstar_halos/halos_0.0.bin')
 
-p = ProjectionPlot(pf, "z", "density")
-p.annotate_hop_circles(halos)
+hc - HaloCatalog(halos_pf = halo_pf)
+hc.load()
+
+p = ProjectionPlot(pf, "x", "density")
+p.annotate_halos(hc)
 p.save()

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 doc/source/cookbook/simple_contour_in_slice.py
--- a/doc/source/cookbook/simple_contour_in_slice.py
+++ b/doc/source/cookbook/simple_contour_in_slice.py
@@ -4,20 +4,20 @@
 pf = load("Sedov_3d/sedov_hdf5_chk_0002")
 
 # Make a traditional slice plot.
-sp = SlicePlot(pf,"x","dens")
+sp = SlicePlot(pf,"x","density")
 
 # Overlay the slice plot with thick red contours of density.
-sp.annotate_contour("dens", ncont=3, clim=(1e-2,1e-1), label=True,
+sp.annotate_contour("density", ncont=3, clim=(1e-2,1e-1), label=True,
                     plot_args={"colors": "red",
                                "linewidths": 2})
 
 # What about some nice temperature contours in blue?
-sp.annotate_contour("temp", ncont=3, clim=(1e-8,1e-6), label=True,
+sp.annotate_contour("temperature", ncont=3, clim=(1e-8,1e-6), label=True,
                     plot_args={"colors": "blue",
                                "linewidths": 2})
 
 # This is the plot object.
-po = sp.plots["dens"]
+po = sp.plots["density"]
 
 # Turn off the colormap image, leaving just the contours.
 po.axes.images[0].set_visible(False)

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 doc/source/cookbook/simple_off_axis_projection.py
--- a/doc/source/cookbook/simple_off_axis_projection.py
+++ b/doc/source/cookbook/simple_off_axis_projection.py
@@ -11,7 +11,7 @@
 # Get the angular momentum vector for the sphere.
 L = sp.quantities["AngularMomentumVector"]()
 
-print "Angular momentum vector: %s" % (L)
+print "Angular momentum vector: {0}".format(L)
 
 # Create an OffAxisSlicePlot on the object with the L vector as its normal
 p = OffAxisProjectionPlot(pf, L, "density", sp.center, (25, "kpc"))

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 doc/source/cookbook/simple_slice_with_multiple_fields.py
--- a/doc/source/cookbook/simple_slice_with_multiple_fields.py
+++ b/doc/source/cookbook/simple_slice_with_multiple_fields.py
@@ -4,5 +4,5 @@
 pf = load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
 
 # Create density slices of several fields along the x axis
-SlicePlot(pf, 'x', ['density','temperature','Pressure','VorticitySquared'], 
+SlicePlot(pf, 'x', ['density','temperature','pressure','vorticity_squared'], 
           width = (800.0, 'kpc')).save()

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 doc/source/cookbook/thin_slice_projection.py
--- a/doc/source/cookbook/thin_slice_projection.py
+++ b/doc/source/cookbook/thin_slice_projection.py
@@ -17,10 +17,9 @@
 right_corner = pf.domain_right_edge
 
 # Now adjust the size of the region along the line of sight (x axis).
-depth = 10.0 # in Mpc
-left_corner[0] = center[0] - 0.5 * depth / pf.units['mpc']
-left_corner[0] = center[0] + 0.5 * depth / pf.units['mpc']
-
+depth = pf.quan(10.0,'Mpc') 
+left_corner[0] = center[0] - 0.5 * depth 
+left_corner[0] = center[0] + 0.5 * depth 
 # Create the region
 region = pf.region(center, left_corner, right_corner)
 

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -282,7 +282,7 @@
 
 .. code-block:: python
 
-    ( "Gas", "Halo", "Disk", "Bulge", "Stars", "Bndry" )
+   ( "Gas", "Halo", "Disk", "Bulge", "Stars", "Bndry" )
 
 You can specify alternate names, but note that this may cause problems with the
 field specification if none of the names match old names.
@@ -300,23 +300,23 @@
 
 .. code-block:: python
    
-    default      = (('Npart', 6, 'i'),
-                    ('Massarr', 6, 'd'),
-                    ('Time', 1, 'd'),
-                    ('Redshift', 1, 'd'),
-                    ('FlagSfr', 1, 'i'),
-                    ('FlagFeedback', 1, 'i'),
-                    ('Nall', 6, 'i'),
-                    ('FlagCooling', 1, 'i'),
-                    ('NumFiles', 1, 'i'),
-                    ('BoxSize', 1, 'd'),
-                    ('Omega0', 1, 'd'),
-                    ('OmegaLambda', 1, 'd'),
-                    ('HubbleParam', 1, 'd'),
-                    ('FlagAge', 1, 'i'),
-                    ('FlagMEtals', 1, 'i'),
-                    ('NallHW', 6, 'i'),
-                    ('unused', 16, 'i'))
+   default      = (('Npart', 6, 'i'),
+                   ('Massarr', 6, 'd'),
+                   ('Time', 1, 'd'),
+                   ('Redshift', 1, 'd'),
+                   ('FlagSfr', 1, 'i'),
+                   ('FlagFeedback', 1, 'i'),
+                   ('Nall', 6, 'i'),
+                   ('FlagCooling', 1, 'i'),
+                   ('NumFiles', 1, 'i'),
+                   ('BoxSize', 1, 'd'),
+                   ('Omega0', 1, 'd'),
+                   ('OmegaLambda', 1, 'd'),
+                   ('HubbleParam', 1, 'd'),
+                   ('FlagAge', 1, 'i'),
+                   ('FlagMEtals', 1, 'i'),
+                   ('NallHW', 6, 'i'),
+                   ('unused', 16, 'i'))
 
 These items will all be accessible inside the object ``pf.parameters``, which
 is a dictionary.  You can add combinations of new items, specified in the same
@@ -371,7 +371,7 @@
 
 .. code-block:: python
 
-    ds = load("./halo1e11_run1.00400")
+   ds = load("./halo1e11_run1.00400")
 
 .. _specifying-cosmology-tipsy:
 
@@ -414,7 +414,7 @@
 
 .. code-block:: python
 
-    ds = load("./A11QR1/s11Qzm1h2_a1.0000.art")
+   ds = load("./A11QR1/s11Qzm1h2_a1.0000.art")
 
 .. _loading-art-data:
 
@@ -551,21 +551,21 @@
   installations of this package and the `PyWCS <http://stsdas.stsci
   .edu/astrolib/pywcs/>`_ package are not supported.
 
-Though FITS a image is composed of one data cube in the FITS file,
+Though a FITS image is composed of a single array in the FITS file,
 upon being loaded into yt it is automatically decomposed into grids:
 
 .. code-block:: python
 
-  from yt.mods import *
-  ds = load("m33_hi.fits")
-  ds.print_stats()
+   from yt.mods import *
+   ds = load("m33_hi.fits")
+   ds.print_stats()
 
 .. parsed-literal::
 
-  level	  # grids	    # cells	   # cells^3
-  ----------------------------------------------
-    0	     512	  981940800	         994
-  ----------------------------------------------
+   level  # grids         # cells     # cells^3
+   ----------------------------------------------
+     0	     512	  981940800       994
+   ----------------------------------------------
              512	  981940800
 
 yt will generate its own domain decomposition, but the number of grids can be
@@ -573,7 +573,7 @@
 
 .. code-block:: python
 
-  ds = load("m33_hi.fits", nprocs=1024)
+   ds = load("m33_hi.fits", nprocs=1024)
 
 Making the Most of `yt` for FITS Data
 +++++++++++++++++++++++++++++++++++++
@@ -596,12 +596,12 @@
 
 .. code-block:: python
 
-    import astropy.io.fits as pyfits
-    f = pyfits.open("xray_flux_image.fits", mode="update")
-    f[0].header["BUNIT"] = "cts/s/pixel"
-    f[0].header["BTYPE"] = "flux"
-    f.flush()
-    f.close()
+   import astropy.io.fits as pyfits
+   f = pyfits.open("xray_flux_image.fits", mode="update")
+   f[0].header["BUNIT"] = "cts/s/pixel"
+   f[0].header["BTYPE"] = "flux"
+   f.flush()
+   f.close()
 
 FITS Coordinates
 ++++++++++++++++
@@ -651,7 +651,7 @@
 
 .. code-block:: python
 
-    ds = load("flux.fits", auxiliary_files=["temp.fits","metal.fits"])
+   ds = load("flux.fits", auxiliary_files=["temp.fits","metal.fits"])
 
 The image blocks in each of these files will be loaded as a separate field,
 provided they have the same dimensions as the image blocks in the main file.
@@ -681,22 +681,80 @@
 single floating-point number (applies to all fields) or a Python dictionary
 containing different mask values for different fields:
 
-.. code-block::
+.. code-block:: python
 
-  # passing a single float
-  ds = load("m33_hi.fits", nan_mask=0.0)
+   # passing a single float
+   ds = load("m33_hi.fits", nan_mask=0.0)
 
-  # passing a dict
-  ds = load("m33_hi.fits", nan_mask={"intensity":-1.0,"temperature":0.0})
+   # passing a dict
+   ds = load("m33_hi.fits", nan_mask={"intensity":-1.0,"temperature":0.0})
 
 Generally, AstroPy may generate a lot of warnings about individual FITS
 files, many of which you may want to ignore. If you want to see these
 warnings, set ``suppress_astropy_warnings = False`` in the call to ``load``.
 
+Miscellaneous Tools for Use with FITS Data
+++++++++++++++++++++++++++++++++++++++++++
+
+A number of tools have been prepared for use with FITS data that enhance yt's visualization and
+analysis capabilities for this particular type of data. These are included in the ``yt.frontends.fits.misc`` module, and can be imported like so:
+
+.. code-block:: python
+
+  from yt.frontends.fits.misc import setup_counts_fields, PlotWindowWCS, ds9_region
+
+
+``setup_counts_fields``
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This function can be used to create image fields from X-ray counts data in different energy bands:
+
+.. code-block:: python
+
+  ebounds = [(0.1,2.0),(2.0,5.0)] # Energies are in keV
+  setup_counts_fields(ds, ebounds)
+
+which would make two fields, ``"counts_0.1-2.0"`` and ``"counts_2.0-5.0"``,
+and add them to the field registry for the dataset ``ds``.
+
+
+``ds9_region``
+~~~~~~~~~~~~~~
+
+This function takes a `ds9 <http://ds9.si.edu/site/Home.html>`_ region and creates a "cut region"
+data container from it, that can be used to select the cells in the FITS dataset that fall within
+the region. To use this functionality, the `pyregion <http://leejjoon.github.io/pyregion/>`_
+package must be installed.
+
+.. code-block:: python
+
+  ds = yt.load("m33_hi.fits")
+  circle_region = ds9_region(ds, "circle.reg")
+  print circle_region.quantities.extrema("flux")
+
+
+``PlotWindowWCS``
+~~~~~~~~~~~~~~~~~
+
+This class takes a on-axis ``SlicePlot`` or ``ProjectionPlot`` of FITS data and adds celestial
+coordinates to the plot axes. To use it, the `WCSAxes <http://wcsaxes.readthedocs.org>`_
+package must be installed.
+
+.. code-block:: python
+
+  wcs_slc = PlotWindowWCS(slc)
+  wcs_slc.show() # for the IPython notebook
+  wcs_slc.save()
+
+``WCSAxes`` is still in an experimental state, but as its functionality improves it will be
+utilized more here.
+
+
 Examples of Using FITS Data
 +++++++++++++++++++++++++++
 
-The following IPython notebooks show examples of working with FITS data in yt:
+The following IPython notebooks show examples of working with FITS data in yt,
+which we recommend you look at in the following order:
 
 * :ref:`radio_cubes`
 * :ref:`xray_fits`
@@ -798,9 +856,9 @@
 
 .. code-block:: python
 
-    for g in grid_data:
-        g["number_of_particles"] = 100000
-        g["particle_position_x"] = np.random.random((g["number_of_particles"]))
+   for g in grid_data:
+       g["number_of_particles"] = 100000
+       g["particle_position_x"] = np.random.random((g["number_of_particles"]))
 
 .. rubric:: Caveats
 

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -200,10 +200,10 @@
 .. autosummary::
    :toctree: generated/
 
-   ~yt.frontends.halo_catalogs.data_structures.RockstarBinaryFile
-   ~yt.frontends.halo_catalogs.data_structures.RockstarDataset
-   ~yt.frontends.halo_catalogs.fields.RockstarFieldInfo
-   ~yt.frontends.halo_catalogs.io.IOHandlerRockstarBinary
+   ~yt.frontends.halo_catalogs.rockstar.data_structures.RockstarBinaryFile
+   ~yt.frontends.halo_catalogs.rockstar.data_structures.RockstarDataset
+   ~yt.frontends.halo_catalogs.rockstar.fields.RockstarFieldInfo
+   ~yt.frontends.halo_catalogs.rockstar.io.IOHandlerRockstarBinary
 
 MOAB
 ^^^^
@@ -313,7 +313,7 @@
    ~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder
    ~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder
    ~yt.analysis_modules.halo_finding.halo_objects.parallelHF
-   ~yt.analysis_modules.halo_finding.rockstar.api.RockstarHaloFinder
+   ~yt.analysis_modules.halo_finding.rockstar.rockstar.RockstarHaloFinder
 
 You can also operate on the Halo and HAloList objects themselves:
 
@@ -616,11 +616,8 @@
    ~yt.visualization.plot_modifications.ArrowCallback
    ~yt.visualization.plot_modifications.ClumpContourCallback
    ~yt.visualization.plot_modifications.ContourCallback
-   ~yt.visualization.plot_modifications.CoordAxesCallback
    ~yt.visualization.plot_modifications.CuttingQuiverCallback
    ~yt.visualization.plot_modifications.GridBoundaryCallback
-   ~yt.visualization.plot_modifications.HopCircleCallback
-   ~yt.visualization.plot_modifications.HopParticleCallback
    ~yt.visualization.plot_modifications.LabelCallback
    ~yt.visualization.plot_modifications.LinePlotCallback
    ~yt.visualization.plot_modifications.MarkerAnnotateCallback
@@ -630,7 +627,6 @@
    ~yt.visualization.plot_modifications.SphereCallback
    ~yt.visualization.plot_modifications.TextLabelCallback
    ~yt.visualization.plot_modifications.TitleCallback
-   ~yt.visualization.plot_modifications.UnitBoundaryCallback
    ~yt.visualization.plot_modifications.VelocityCallback
 
 Function List

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 doc/source/yt3differences.rst
--- a/doc/source/yt3differences.rst
+++ b/doc/source/yt3differences.rst
@@ -27,7 +27,7 @@
     FieldName)``.
   * Previously, yt would use "Enzo-isms" for field names.  We now very
     specifically define fields as lowercase with underscores.  For instance,
-    what used to be ``VelocityMagnitude`` would not be ``velocity_magnitude``.
+    what used to be ``VelocityMagnitude`` would now be ``velocity_magnitude``.
   * Particles are either named by their type or default to the type ``io``.
   * Axis names are now at the *end* of field names, not the beginning.
     ``x-velocity`` is now ``velocity_x``.

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -32,9 +32,9 @@
     
     Parameters
     ----------
-    filenames : list of strings
-        A time-sorted list of filenames to construct the DatasetSeries
-        object.
+    outputs : `yt.data_objects.time_series.DatasetSeries` or list of strings
+        DatasetSeries object, or a time-sorted list of filenames to
+        construct a new DatasetSeries object.
     indices : array_like
         An integer array of particle indices whose trajectories we
         want to track. If they are not sorted they will be sorted.
@@ -59,11 +59,14 @@
     >>> for t in trajs :
     >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
     """
-    def __init__(self, filenames, indices, fields=None) :
+    def __init__(self, outputs, indices, fields=None) :
 
         indices.sort() # Just in case the caller wasn't careful
         self.field_data = YTFieldData()
-        self.data_series = DatasetSeries.from_filenames(filenames)
+        if isinstance(outputs, DatasetSeries):
+            self.data_series = outputs
+        else:
+            self.data_series = DatasetSeries.from_filenames(outputs)
         self.masks = []
         self.sorts = []
         self.array_indices = []

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -154,7 +154,7 @@
     simulation domain.
 
     This object is typically accessed through the `proj` object that
-    hangs off of index objects.  AMRQuadProj is a projection of a
+    hangs off of index objects.  YTQuadTreeProj is a projection of a
     `field` along an `axis`.  The field can have an associated
     `weight_field`, in which case the values are multiplied by a weight
     before being summed, and then divided by the sum of that weight; the
@@ -185,18 +185,21 @@
     data_source : `yt.data_objects.api.AMRData`, optional
         If specified, this will be the data source used for selecting
         regions to project.
-    serialize : bool, optional
-        Whether we should store this projection in the .yt file or not.
-    kwargs : dict of items
-        Any additional values are passed as field parameters that can be
+    style : string, optional
+        The style of projection to be performed.
+        "integrate" : integration along the axis
+        "mip" : maximum intensity projection
+        "sum" : same as "integrate", except that we don't multiply by the path length
+    field_parameters : dict of items
+        Values to be passed as field parameters that can be
         accessed by generated fields.
 
     Examples
     --------
 
-    >>> pf = load("RedshiftOutput0005")
-    >>> qproj = pf.h.quad_proj(0, "Density")
-    >>> print qproj["Density"]
+    >>> ds = load("RedshiftOutput0005")
+    >>> prj = ds.proj(0, "density")
+    >>> print proj["density"]
     """
     _key_fields = YTSelectionContainer2D._key_fields + ['weight_field']
     _type_name = "proj"
@@ -206,10 +209,15 @@
                  center = None, pf = None, data_source = None,
                  style = "integrate", field_parameters = None):
         YTSelectionContainer2D.__init__(self, axis, pf, field_parameters)
-        self.proj_style = style
+        if style == "sum":
+            self.proj_style = "integrate"
+            self._sum_only = True
+        else:
+            self.proj_style = style
+            self._sum_only = False
         if style == "mip":
             self.func = np.max
-        elif style == "integrate":
+        elif style == "integrate" or style == "sum":
             self.func = np.sum # for the future
         else:
             raise NotImplementedError(style)
@@ -343,7 +351,7 @@
         tree.initialize_chunk(i1, i2, ilevel)
 
     def _handle_chunk(self, chunk, fields, tree):
-        if self.proj_style == "mip":
+        if self.proj_style == "mip" or self._sum_only:
             dl = 1.0
         else:
             # This gets explicitly converted to cm

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -853,7 +853,10 @@
         if fname is None:
             raise KeyError(field)
         else:
-            return self.field_data[fname].in_units(self.field_units[fname])
+            if getattr(self, 'fractional', False):
+                return self.field_data[fname]
+            else:
+                return self.field_data[fname].in_units(self.field_units[fname])
 
     def items(self):
         return [(k,self[k]) for k in self.field_data.keys()]

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -55,6 +55,8 @@
     SphericalCoordinateHandler
 from yt.geometry.geographic_coordinates import \
     GeographicCoordinateHandler
+from yt.geometry.ppv_coordinates import \
+    PPVCoordinateHandler
 
 # We want to support the movie format in the future.
 # When such a thing comes to pass, I'll move all the stuff that is contant up
@@ -359,6 +361,8 @@
             self.coordinates = SphericalCoordinateHandler(self)
         elif self.geometry == "geographic":
             self.coordinates = GeographicCoordinateHandler(self)
+        elif self.geometry == "ppv":
+            self.coordinates = PPVCoordinateHandler(self)
         else:
             raise YTGeometryNotSupported(self.geometry)
 

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 yt/data_objects/tests/test_ortho_rays.py
--- a/yt/data_objects/tests/test_ortho_rays.py
+++ b/yt/data_objects/tests/test_ortho_rays.py
@@ -11,8 +11,7 @@
 
         my_oray = pf.ortho_ray(ax, ocoord)
 
-        my_axes = range(3)
-        del my_axes[ax]
+        my_axes = pf.coordinates.x_axis[ax], pf.coordinates.y_axis[ax]
 
         # find the cells intersected by the ortho ray
         my_all = pf.h.all_data()

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -95,7 +95,7 @@
             # the right case by comparing against known units. This
             # only really works for common units.
             units = set(re.split(regex_pattern, field_units))
-            units.remove('')
+            if '' in units: units.remove('')
             n = int(0)
             for unit in units:
                 if unit in known_units:
@@ -494,6 +494,8 @@
     def _setup_ppv(self):
 
         self.ppv_data = True
+        self.geometry = "ppv"
+
         end = min(self.dimensionality+1,4)
         if self.events_data:
             ctypes = self.axis_names

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -54,7 +54,7 @@
         z = np.ones(x.shape)
         x = (x-0.5)/self.pf.reblock+0.5
         y = (y-0.5)/self.pf.reblock+0.5
-        mask = selector.select_points(x, y, z)
+        mask = selector.select_points(x, y, z, 0.0)
         if mask is None: return
         for field in field_list:
             fd = field.split("_")[-1]

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -11,10 +11,11 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-from yt.fields.api import add_field
 from yt.fields.derived_field import ValidateSpatial
-from yt.funcs import mylog
 from yt.utilities.on_demand_imports import _astropy
+from yt.funcs import mylog, get_image_suffix
+from yt.visualization._mpl_imports import FigureCanvasAgg
+import os
 
 def _make_counts(emin, emax):
     def _counts(field, data):
@@ -46,6 +47,8 @@
     ebounds : list of tuples
         A list of tuples, one for each field, with (emin, emax) as the
         energy bounds for the image.
+    ftype : string, optional
+        The field type of the resulting field. Defaults to "gas".
 
     Examples
     --------
@@ -60,4 +63,135 @@
         ds.add_field((ftype,fname), function=cfunc,
                      units="counts/pixel",
                      validators = [ValidateSpatial()],
-                     display_name="Counts (%s-%s keV)" % (emin, emax))
\ No newline at end of file
+                     display_name="Counts (%s-%s keV)" % (emin, emax))
+
+def ds9_region(ds, reg, obj=None, field_parameters=None):
+    r"""
+    Create a data container from a ds9 region file. Requires the pyregion
+    package (http://leejjoon.github.io/pyregion/) to be installed.
+
+    Parameters
+    ----------
+    ds : FITSDataset
+        The Dataset to create the region from.
+    reg : string
+        The filename of the ds9 region, or a region string to be parsed.
+    obj : data container, optional
+        The data container that will be used to create the new region.
+        Defaults to ds.all_data.
+    field_parameters : dictionary, optional
+        A set of field parameters to apply to the region.
+
+    Examples
+    --------
+
+    >>> ds = yt.load("m33_hi.fits")
+    >>> circle_region = ds9_region(ds, "circle.reg")
+    >>> print circle_region.quantities.extrema("flux")
+    """
+    import pyregion
+    if os.path.exists(reg):
+        r = pyregion.open(reg)
+    else:
+        r = pyregion.parse(reg)
+    filter = r.get_filter(header=ds.wcs_2d.to_header())
+    reg_name = reg.split(".")[0]
+    nx = ds.domain_dimensions[ds.lon_axis]
+    ny = ds.domain_dimensions[ds.lat_axis]
+    mask = filter.mask((ny,nx)).transpose()
+    def _reg_field(field, data):
+        i = data["xyz"[ds.lon_axis]].ndarray_view().astype("int")-1
+        j = data["xyz"[ds.lat_axis]].ndarray_view().astype("int")-1
+        new_mask = mask[i,j]
+        ret = data["zeros"].copy()
+        ret[new_mask] = 1.
+        return ret
+    ds.add_field(("gas",reg_name), function=_reg_field)
+    if obj is None:
+        obj = ds.all_data()
+    if field_parameters is not None:
+        for k,v in field_parameters.items():
+            obj.set_field_parameter(k,v)
+    return obj.cut_region(["obj['%s'] > 0" % (reg_name)])
+
+class PlotWindowWCS(object):
+    r"""
+    Use the wcsaxes library to plot celestial coordinates on the axes of a
+    on-axis PlotWindow plot. See http://wcsaxes.readthedocs.org for details.
+
+    Parameters
+    ----------
+    pw : on-axis PlotWindow instance
+        The PlotWindow instance to add celestial axes to.
+    """
+    def __init__(self, pw):
+        from wcsaxes import WCSAxes
+        if pw.oblique:
+            raise NotImplementedError("WCS axes are not implemented for oblique plots.")
+        if not hasattr(pw.pf, "wcs_2d"):
+            raise NotImplementedError("WCS axes are not implemented for this dataset.")
+        if pw.data_source.axis != pw.pf.vel_axis:
+            raise NotImplementedError("WCS axes are not implemented for this axis.")
+        self.pf = pw.pf
+        self.pw = pw
+        self.plots = {}
+        self.wcs_axes = []
+        for f in pw.plots:
+            rect = pw.plots[f]._get_best_layout()[1]
+            fig = pw.plots[f].figure
+            ax = WCSAxes(fig, rect, wcs=pw.pf.wcs_2d, frameon=False)
+            fig.add_axes(ax)
+            self.wcs_axes.append(ax)
+        self._setup_plots()
+
+    def _setup_plots(self):
+        pw = self.pw
+        for f, ax in zip(pw.plots, self.wcs_axes):
+            wcs = ax.wcs.wcs
+            pw.plots[f].axes.get_xaxis().set_visible(False)
+            pw.plots[f].axes.get_yaxis().set_visible(False)
+            xax = pw.pf.coordinates.x_axis[pw.data_source.axis]
+            yax = pw.pf.coordinates.y_axis[pw.data_source.axis]
+            xlabel = "%s (%s)" % (wcs.ctype[xax].split("-")[0],
+                                  wcs.cunit[xax])
+            ylabel = "%s (%s)" % (wcs.ctype[yax].split("-")[0],
+                                  wcs.cunit[yax])
+            fp = pw._font_properties
+            ax.coords[0].set_axislabel(xlabel, fontproperties=fp)
+            ax.coords[1].set_axislabel(ylabel, fontproperties=fp)
+            ax.set_xlim(pw.xlim[0].value, pw.xlim[1].value)
+            ax.set_ylim(pw.ylim[0].value, pw.ylim[1].value)
+            ax.coords[0].ticklabels.set_fontproperties(fp)
+            ax.coords[1].ticklabels.set_fontproperties(fp)
+            self.plots[f] = pw.plots[f]
+        self.pw = pw
+        self.pf = self.pw.pf
+
+    def refresh(self):
+        self._setup_plots(self)
+
+    def keys(self):
+        return self.plots.keys()
+
+    def values(self):
+        return self.plots.values()
+
+    def items(self):
+        return self.plots.items()
+
+    def __getitem__(self, key):
+        for k in self.keys():
+            if k[1] == key:
+                return self.plots[k]
+
+    def show(self):
+        from IPython.core.display import display
+        for k, v in sorted(self.plots.iteritems()):
+            canvas = FigureCanvasAgg(v.figure)
+            display(v.figure)
+
+    def save(self, name=None, mpl_kwargs=None):
+        if mpl_kwargs is None:
+            mpl_kwargs = {}
+        mpl_kwargs["bbox_inches"] = "tight"
+        self.pw.save(name=name, mpl_kwargs=mpl_kwargs)

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 yt/geometry/cartesian_coordinates.py
--- a/yt/geometry/cartesian_coordinates.py
+++ b/yt/geometry/cartesian_coordinates.py
@@ -110,11 +110,11 @@
     axis_id = { 'x' : 0, 'y' : 1, 'z' : 2,
                  0  : 0,  1  : 1,  2  : 2}
 
-    x_axis = { 'x' : 1, 'y' : 0, 'z' : 0,
-                0  : 1,  1  : 0,  2  : 0}
+    x_axis = { 'x' : 1, 'y' : 2, 'z' : 0,
+                0  : 1,  1  : 2,  2  : 0}
 
-    y_axis = { 'x' : 2, 'y' : 2, 'z' : 1,
-                0  : 2,  1  : 2,  2  : 1}
+    y_axis = { 'x' : 2, 'y' : 0, 'z' : 1,
+                0  : 2,  1  : 0,  2  : 1}
 
     @property
     def period(self):

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 yt/geometry/ppv_coordinates.py
--- /dev/null
+++ b/yt/geometry/ppv_coordinates.py
@@ -0,0 +1,77 @@
+"""
+Cartesian fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from .cartesian_coordinates import \
+    CartesianCoordinateHandler
+
+class PPVCoordinateHandler(CartesianCoordinateHandler):
+
+    def __init__(self, pf):
+        super(PPVCoordinateHandler, self).__init__(pf)
+
+        self.axis_name = {}
+        self.axis_id = {}
+        self.x_axis = {}
+        self.y_axis = {}
+
+        for axis, axis_name in zip([pf.lon_axis, pf.lat_axis, pf.vel_axis],
+                                   ["Image\ x", "Image\ y", pf.vel_name]):
+            lower_ax = "xyz"[axis]
+            upper_ax = lower_ax.upper()
+
+            self.axis_name[axis] = axis_name
+            self.axis_name[lower_ax] = axis_name
+            self.axis_name[upper_ax] = axis_name
+            self.axis_name[axis_name] = axis_name
+
+            self.axis_id[lower_ax] = axis
+            self.axis_id[axis] = axis
+            self.axis_id[axis_name] = axis
+
+            if axis == 0:
+                self.x_axis[axis] = 1
+                self.x_axis[lower_ax] = 1
+                self.x_axis[axis_name] = 1
+                self.y_axis[axis] = 2
+                self.y_axis[lower_ax] = 2
+                self.y_axis[axis_name] = 2
+            elif axis == 1:
+                self.x_axis[axis] = 2
+                self.x_axis[lower_ax] = 2
+                self.x_axis[axis_name] = 2
+                self.y_axis[axis] = 0
+                self.y_axis[lower_ax] = 0
+                self.y_axis[axis_name] = 0
+            elif axis == 2:
+                self.x_axis[axis] = 0
+                self.x_axis[lower_ax] = 0
+                self.x_axis[axis_name] = 0
+                self.y_axis[axis] = 1
+                self.y_axis[lower_ax] = 1
+                self.y_axis[axis_name] = 1
+
+        self.default_unit_label = {}
+        self.default_unit_label[pf.lon_axis] = "pixel"
+        self.default_unit_label[pf.lat_axis] = "pixel"
+        self.default_unit_label[pf.vel_axis] = pf.vel_unit
+
+    def convert_to_cylindrical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_cylindrical(self, coord):
+        raise NotImplementedError
+

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -18,6 +18,10 @@
 from oct_visitors cimport Oct, OctVisitorData, \
     oct_visitor_function
 
+ctypedef fused anyfloat:
+    np.float32_t
+    np.float64_t
+
 cdef class SelectorObject:
     cdef public np.int32_t min_level
     cdef public np.int32_t max_level

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -36,10 +36,6 @@
     long int lrint(double x) nogil
     double fabs(double x) nogil
 
-ctypedef fused anyfloat:
-    np.float32_t
-    np.float64_t
-
 # These routines are separated into a couple different categories:
 #
 #   * Routines for identifying intersections of an object with a bounding box

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -360,7 +360,8 @@
         for ex in self.expr.free_symbols:
             symbol_table[ex] = latex_symbol_lut[str(ex)]
         return latex(self.expr, symbol_names=symbol_table,
-                     fold_frac_powers=True, fold_short_frac=True)
+                     mul_symbol="dot", fold_frac_powers=True,
+                     fold_short_frac=True)
 #
 # Unit manipulation functions
 #

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -18,7 +18,8 @@
 cimport cython
 from libc.stdlib cimport malloc, free, realloc
 from yt.geometry.selection_routines cimport \
-    SelectorObject, AlwaysSelector, OctreeSubsetSelector
+    SelectorObject, AlwaysSelector, OctreeSubsetSelector, \
+    anyfloat
 from yt.utilities.lib.fp_utils cimport imax
 from yt.geometry.oct_container cimport \
     OctreeContainer, OctInfo
@@ -620,22 +621,26 @@
     cdef np.float64_t linking_length, linking_length2
     cdef np.float64_t DW[3], DLE[3], DRE[3]
     cdef bint periodicity[3]
+    cdef int minimum_count
 
-    def __init__(self, linking_length):
+    def __init__(self, linking_length, periodicity = (True, True, True),
+                 int minimum_count = 8):
+        cdef int i
         self.linking_length = linking_length
         self.linking_length2 = linking_length * linking_length
         self.first = self.last = NULL
+        for i in range(3):
+            self.periodicity[i] = periodicity[i]
+        self.minimum_count = minimum_count
 
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def identify_contours(self, OctreeContainer octree,
                                 np.ndarray[np.int64_t, ndim=1] dom_ind,
-                                np.ndarray[np.float64_t, ndim=2] positions,
+                                np.ndarray[anyfloat, ndim=2] positions,
                                 np.ndarray[np.int64_t, ndim=1] particle_ids,
-                                int domain_id = -1, int domain_offset = 0,
-                                periodicity = (True, True, True),
-                                int minimum_count = 8):
+                                int domain_id, int domain_offset):
         cdef np.ndarray[np.int64_t, ndim=1] pdoms, pcount, pind, doff
         cdef np.float64_t pos[3]
         cdef Oct *oct = NULL, **neighbors = NULL
@@ -657,7 +662,6 @@
             self.DW[i] = (octree.DRE[i] - octree.DLE[i])
             self.DLE[i] = octree.DLE[i]
             self.DRE[i] = octree.DRE[i]
-            self.periodicity[i] = periodicity[i]
         for i in range(positions.shape[0]):
             counter += 1
             container[i] = NULL
@@ -671,7 +675,7 @@
             pdoms[i] = offset
         pind = np.argsort(pdoms)
         cdef np.int64_t *ipind = <np.int64_t*> pind.data
-        cdef np.float64_t *fpos = <np.float64_t*> positions.data
+        cdef anyfloat *fpos = <anyfloat*> positions.data
         # pind is now the pointer into the position and particle_ids array.
         for i in range(positions.shape[0]):
             offset = pdoms[pind[i]]
@@ -751,7 +755,7 @@
                 c1 = container[offset]
                 if c1 == NULL: continue
                 c0 = contour_find(c1)
-                if c0.count < minimum_count:
+                if c0.count < self.minimum_count:
                     contour_ids[offset] = -1
         free(container)
         del pind
@@ -761,7 +765,7 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     cdef void link_particles(self, ContourID **container, 
-                                   np.float64_t *positions,
+                                   anyfloat *positions,
                                    np.int64_t *pind,
                                    np.int64_t pcount, 
                                    np.int64_t noffset,

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -17,7 +17,7 @@
 from ._mpl_imports import \
     FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
 from yt.funcs import \
-    get_image_suffix, mylog
+    get_image_suffix, mylog, iterable
 import numpy as np
 
 class CallbackWrapper(object):
@@ -140,12 +140,16 @@
             top_buff_size = 0.0
 
         # Ensure the figure size along the long axis is always equal to _figure_size
-        if self._aspect >= 1.0:
-            x_fig_size = self._figure_size
-            y_fig_size = self._figure_size/self._aspect
-        if self._aspect < 1.0:
-            x_fig_size = self._figure_size*self._aspect
-            y_fig_size = self._figure_size
+        if iterable(self._figure_size):
+            x_fig_size = self._figure_size[0]
+            y_fig_size = self._figure_size[1]
+        else:
+            if self._aspect >= 1.0:
+                x_fig_size = self._figure_size
+                y_fig_size = self._figure_size/self._aspect
+            if self._aspect < 1.0:
+                x_fig_size = self._figure_size*self._aspect
+                y_fig_size = self._figure_size
 
         xbins = np.array([x_axis_size, x_fig_size, cb_size, cb_text_size])
         ybins = np.array([y_axis_size, y_fig_size, top_buff_size])

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -16,7 +16,7 @@
 
 from yt.funcs import \
     defaultdict, get_image_suffix, \
-    get_ipython_api_version
+    get_ipython_api_version, iterable
 from yt.utilities.exceptions import \
     YTNotInsideNotebook
 from ._mpl_imports import FigureCanvasAgg
@@ -111,7 +111,10 @@
 
     def __init__(self, data_source, figure_size, fontsize):
         self.data_source = data_source
-        self.figure_size = float(figure_size)
+        if iterable(figure_size):
+            self.figure_size = float(figure_size[0]), float(figure_size[1])
+        else:
+            self.figure_size = float(figure_size)
         self.plots = PlotDictionary(data_source)
         self._callbacks = []
         self._field_transform = {}

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -170,7 +170,7 @@
     return center
 
 def get_window_parameters(axis, center, width, pf):
-    if pf.geometry == "cartesian":
+    if pf.geometry == "cartesian" or pf.geometry == "ppv":
         width = get_sanitized_width(axis, width, None, pf)
         center = get_sanitized_center(center, pf)
     elif pf.geometry in ("polar", "cylindrical"):
@@ -278,7 +278,7 @@
     frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True,
                  periodic=True, origin='center-window', oblique=False,
-                 window_size=8.0, fields=None, fontsize=18, setup=False):
+                 window_size=8.0, fields=None, fontsize=18, aspect=None, setup=False):
         if not hasattr(self, "pf"):
             self.pf = data_source.pf
             ts = self._initialize_dataset(self.pf)
@@ -290,6 +290,7 @@
         self.oblique = oblique
         self.buff_size = buff_size
         self.antialias = antialias
+        self.aspect = aspect
         skip = list(FixedResolutionBuffer._exclude_fields) + data_source._key_fields
         if fields is None:
             fields = []
@@ -633,7 +634,7 @@
         Examples
         --------
 
-        >>> p = ProjectionPlot(pf, "y", "Density")
+        >>> p = ProjectionPlot(pf, "y", "density")
         >>> p.show()
         >>> p.set_axes_unit("kpc")
         >>> p.show()
@@ -751,7 +752,11 @@
             else:
                 (unit_x, unit_y) = self._axes_unit_names
 
-            aspect = np.float64(self.pf.quan(1.0, unit_y)/self.pf.quan(1.0, unit_x))
+            # For some plots we may set aspect by hand, such as for PPV data.
+            # This will likely be replaced at some point by the coordinate handler
+            # setting plot aspect.
+            if self.aspect is None:
+                self.aspect = np.float64(self.pf.quan(1.0, unit_y)/(self.pf.quan(1.0, unit_x)))
 
             extentx = [(self.xlim[i] - xc).in_units(unit_x) for i in (0, 1)]
             extenty = [(self.ylim[i] - yc).in_units(unit_y) for i in (0, 1)]
@@ -792,12 +797,17 @@
                 image, self._field_transform[f].name,
                 self._colormaps[f], extent, zlim,
                 self.figure_size, fp.get_size(),
-                aspect, fig, axes, cax)
+                self.aspect, fig, axes, cax)
 
             axes_unit_labels = ['', '']
             comoving = False
             hinv = False
             for i, un in enumerate((unit_x, unit_y)):
+                if hasattr(self.pf.coordinates, "default_unit_label"):
+                    axax = getattr(self.pf.coordinates, "%s_axis" % ("xy"[i]))[axis_index]
+                    un = self.pf.coordinates.default_unit_label[axax]
+                    axes_unit_labels[i] = '\/\/('+un+')'
+                    continue
                 # Use sympy to factor h out of the unit.  In this context 'un'
                 # is a string, so we call the Unit constructor.
                 expr = Unit(un, registry=self.pf.unit_registry).expr
@@ -832,6 +842,9 @@
                 axis_names = self.pf.coordinates.axis_name
                 xax = self.pf.coordinates.x_axis[axis_index]
                 yax = self.pf.coordinates.y_axis[axis_index]
+                if hasattr(self.pf.coordinates, "axis_default_unit_label"):
+                    axes_unit_labels = [self.pf.coordinates.axis_default_unit_name[xax],
+                                        self.pf.coordinates.axis_default_unit_name[yax]]
                 labels = [r'$\rm{'+axis_names[xax]+axes_unit_labels[0] + r'}$',
                           r'$\rm{'+axis_names[yax]+axes_unit_labels[1] + r'}$']
 
@@ -1009,7 +1022,8 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 origin='center-window', fontsize=18, field_parameters=None):
+                 origin='center-window', fontsize=18, field_parameters=None,
+                 window_size=8.0, aspect=None):
         # this will handle time series data and controllers
         ts = self._initialize_dataset(pf)
         self.ts = ts
@@ -1021,7 +1035,8 @@
             field_parameters = field_parameters, center=center)
         slc.get_data(fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin,
-                             fontsize=fontsize, fields=fields)
+                             fontsize=fontsize, fields=fields,
+                             window_size=window_size, aspect=aspect)
         if axes_unit is None:
             axes_unit = get_axes_unit(width, pf)
         self.set_axes_unit(axes_unit)
@@ -1136,7 +1151,7 @@
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  weight_field=None, max_level=None, origin='center-window',
                  fontsize=18, field_parameters=None, data_source=None,
-                 proj_style = "integrate"):
+                 proj_style = "integrate", window_size=8.0, aspect=None):
         ts = self._initialize_dataset(pf)
         self.ts = ts
         pf = self.pf = ts[0]
@@ -1147,7 +1162,7 @@
                          center=center, data_source=data_source,
                          field_parameters = field_parameters, style = proj_style)
         PWViewerMPL.__init__(self, proj, bounds, fields=fields, origin=origin,
-                             fontsize=fontsize)
+                             fontsize=fontsize, window_size=window_size, aspect=aspect)
         if axes_unit is None:
             axes_unit = get_axes_unit(width, pf)
         self.set_axes_unit(axes_unit)
@@ -1620,7 +1635,11 @@
         if fontscale < 1.0:
             fontscale = np.sqrt(fontscale)
 
-        self._cb_size = 0.0375*figure_size
+        if iterable(figure_size):
+            fsize = figure_size[0]
+        else:
+            fsize = figure_size
+        self._cb_size = 0.0375*fsize
         self._ax_text_size = [0.9*fontscale, 0.7*fontscale]
         self._top_buff_size = 0.30*fontscale
         self._aspect = ((extent[1] - extent[0])/(extent[3] - extent[2]))

diff -r 5fc3b2a10665e45a7bcc93c4f7f5c0cb6b380fdd -r b2d7be90a94c503ea9166de95d833f961e329d88 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -473,7 +473,7 @@
         scales = {True: 'log', False: 'linear'}
         return scales[x_log], scales[y_log]
 
-    def _get_field_label(self, field, field_info, field_unit):
+    def _get_field_label(self, field, field_info, field_unit, fractional=False):
         field_unit = field_unit.latex_representation()
         field_name = field_info.display_name
         if isinstance(field, tuple): field = field[1]
@@ -483,7 +483,9 @@
         elif field_name.find('$') == -1:
             field_name = field_name.replace(' ','\/')
             field_name = r'$\rm{'+field_name+r'}$'
-        if field_unit is None or field_unit == '':
+        if fractional:
+            label = field_name + r'$\rm{\/Probability\/Density}$'
+        elif field_unit is None or field_unit == '':
             label = field_name
         else:
             label = field_name+r'$\/\/('+field_unit+r')$'
@@ -498,9 +500,10 @@
         yfi = pf._get_field_info(*yf)
         x_unit = profile.x.units
         y_unit = profile.field_units[field_y]
+        fractional = profile.fractional
         x_title = self.x_title or self._get_field_label(field_x, xfi, x_unit)
         y_title = self.y_title.get(field_y, None) or \
-                    self._get_field_label(field_y, yfi, y_unit)
+                    self._get_field_label(field_y, yfi, y_unit, fractional)
 
         return (x_title, y_title)
             
@@ -623,13 +626,14 @@
         x_unit = profile.x.units
         y_unit = profile.y.units
         z_unit = profile.field_units[field_z]
+        fractional = profile.fractional
         x_title = self.x_title or self._get_field_label(field_x, xfi, x_unit)
         y_title = self.y_title or self._get_field_label(field_y, yfi, y_unit)
         z_title = self.z_title.get(field_z, None) or \
-                    self._get_field_label(field_z, zfi, z_unit)
+                    self._get_field_label(field_z, zfi, z_unit, fractional)
         return (x_title, y_title, z_title)
 
-    def _get_field_label(self, field, field_info, field_unit):
+    def _get_field_label(self, field, field_info, field_unit, fractional=False):
         field_unit = field_unit.latex_representation()
         field_name = field_info.display_name
         if isinstance(field, tuple): field = field[1]
@@ -639,7 +643,9 @@
         elif field_name.find('$') == -1:
             field_name = field_name.replace(' ','\/')
             field_name = r'$\rm{'+field_name+r'}$'
-        if field_unit is None or field_unit == '':
+        if fractional:
+            label = field_name + r'$\rm{\/Probability\/Density}$'
+        elif field_unit is None or field_unit is '':
             label = field_name
         else:
             label = field_name+r'$\/\/('+field_unit+r')$'


https://bitbucket.org/yt_analysis/yt/commits/4fc01852e3b2/
Changeset:   4fc01852e3b2
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-12 19:42:30
Summary:     Adding iter_sphere_data and iter_bbos_data to use a particle filtering mechanism. Functional, but maybe should be somewhere else.
Affected #:  2 files

diff -r b2d7be90a94c503ea9166de95d833f961e329d88 -r 4fc01852e3b29c166b9bff527aea0e7782e494a4 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -27,6 +27,7 @@
 from yt.utilities.lib.geometry_utils import compute_morton
 
 from yt.geometry.oct_container import _ORDER_MAX
+from particle_filters import bbox_filter, sphere_filter
 CHUNKSIZE = 32**3
 
 class IOHandlerSDF(BaseIOHandler):
@@ -600,6 +601,19 @@
             i += 1
         mylog.debug('Read %i chunks, batched into %i reads' % (num_inds, num_reads))
 
+
+    def filter_particles(self, myiter, myfilter):
+        for data in myiter:
+            mask = myfilter(data)
+
+            if mask.sum() == 0:
+                continue
+            filtered = {}
+            for f in data.keys():
+                filtered[f] = data[f][mask]
+
+            yield filtered
+
     def filter_bbox(self, left, right, myiter):
         """
         Filter data by masking out data outside of a bbox defined
@@ -639,7 +653,24 @@
     def iter_bbox_data(self, left, right, fields):
         mylog.debug('SINDEX Loading region from %s to %s' %(left, right))
         inds = self.get_bbox(left, right)
-        return self.iter_data(inds, fields)
+
+        my_filter = bbox_filter(left, right)
+
+        for dd in self.filter_particles(
+            self.iter_data(inds, fields),
+            my_filter):
+            yield dd
+
+    def iter_sphere_data(self, center, radius, fields):
+        mylog.debug('SINDEX Loading spherical region %s to %s' %(center, radius))
+        inds = self.get_bbox(center-radius, center+radius)
+
+        my_filter = sphere_filter(center, radius)
+
+        for dd in self.filter_particles(
+            self.iter_data(inds, fields),
+            my_filter):
+            yield dd
 
     def iter_ibbox_data(self, left, right, fields):
         mylog.debug('SINDEX Loading region from %s to %s' %(left, right))

diff -r b2d7be90a94c503ea9166de95d833f961e329d88 -r 4fc01852e3b29c166b9bff527aea0e7782e494a4 yt/frontends/sdf/particle_filters.py
--- /dev/null
+++ b/yt/frontends/sdf/particle_filters.py
@@ -0,0 +1,31 @@
+import numpy as np
+
+
+def bbox_filter(left, right):
+
+    def myfilter(chunk, mask=None):
+        pos = np.array([chunk['x'], chunk['y'], chunk['z']]).T
+
+        # Now get all particles that are within the bbox
+        if mask is None:
+            mask = np.all(pos >= left, axis=1) * np.all(pos < right, axis=1)
+        else:
+            np.multiply(mask, np.all(pos >= left, axis=1), mask)
+            np.multiply(mask, np.all(pos < right, axis=1), mask)
+        return mask
+
+    return myfilter
+
+def sphere_filter(center, radius):
+
+    def myfilter(chunk, mask=None):
+        pos = np.array([chunk['x'], chunk['y'], chunk['z']]).T
+
+        # Now get all particles that are within the radius
+        if mask is None:
+            mask = ((pos-center)**2).sum(axis=1)**0.5 < radius
+        else:
+            np.multiply(mask, np.linalg.norm(pos - center, 2) < radius, mask)
+        return mask
+
+    return myfilter


https://bitbucket.org/yt_analysis/yt/commits/9a440fe7a97b/
Changeset:   9a440fe7a97b
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-12 23:02:36
Summary:     Add sindex_sdf_dataset as a new datatype that is enabled when an sindex file is provided. When combined with a bounding_box, this will load up a small subset of the full dataset, allowing the creation of the index object.
Affected #:  2 files

diff -r 4fc01852e3b29c166b9bff527aea0e7782e494a4 -r 9a440fe7a97be0376dd47f595dd8be5ebb44b7ed yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -74,6 +74,8 @@
         self.idx_filename = idx_filename
         self.idx_header = idx_header
         self.idx_level = idx_level
+        if self.idx_filename is not None:
+            dataset_type = 'sindex_sdf_particles'
         super(SDFDataset, self).__init__(filename, dataset_type)
 
     def _parse_parameter_file(self):

diff -r 4fc01852e3b29c166b9bff527aea0e7782e494a4 -r 9a440fe7a97be0376dd47f595dd8be5ebb44b7ed yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -113,6 +113,98 @@
         fields.append(("dark_matter", "mass"))
         return fields, {}
 
+class IOHandlerSIndexSDF(IOHandlerSDF):
+    _dataset_type = "sindex_sdf_particles"
+
+
+    def _read_particle_coords(self, chunks, ptf):
+        dle = self.pf.domain_left_edge.in_units("code_length").d
+        dre = self.pf.domain_right_edge.in_units("code_length").d
+        for dd in self.pf.sindex.iter_bbox_data(
+            dle, dre,
+            ['x','y','z']):
+            yield "dark_matter", (
+                dd['x'], dd['y'], dd['z'])
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        dle = self.pf.domain_left_edge.in_units("code_length").d
+        dre = self.pf.domain_right_edge.in_units("code_length").d
+        required_fields = ['x','y','z']
+        for ptype, field_list in sorted(ptf.items()):
+            for field in field_list:
+                if field == "mass": continue
+                required_fields.append(field)
+
+        for dd in self.pf.sindex.iter_bbox_data(
+            dle, dre,
+            required_fields):
+
+            for ptype, field_list in sorted(ptf.items()):
+                x = dd['x']
+                y = dd['y']
+                z = dd['z']
+                mask = selector.select_points(x, y, z, 0.0)
+                del x, y, z
+                if mask is None: continue
+                for field in field_list:
+                    if field == "mass":
+                        data = np.ones(mask.sum(), dtype="float64")
+                        data *= self.pf.parameters["particle_mass"]
+                    else:
+                        data = dd[field][mask]
+                    yield (ptype, field), data
+
+    def _initialize_index(self, data_file, regions):
+        dle = self.pf.domain_left_edge.in_units("code_length").d
+        dre = self.pf.domain_right_edge.in_units("code_length").d
+        pcount = 0
+        for dd in self.pf.sindex.iter_bbox_data(
+            dle, dre,
+            ['x','y','z']):
+            pcount += dd['x'].size
+
+        morton = np.empty(pcount, dtype='uint64')
+        ind = 0
+
+        chunk_id = 0
+        for dd in self.pf.sindex.iter_bbox_data(
+            dle, dre,
+            ['x','y','z']):
+            npart = dd['x'].size
+            pos = np.empty((npart, 3), dtype=dd['x'].dtype)
+            pos[:,0] = dd['x']
+            pos[:,1] = dd['y']
+            pos[:,2] = dd['z']
+            if np.any(pos.min(axis=0) < self.pf.domain_left_edge) or \
+               np.any(pos.max(axis=0) > self.pf.domain_right_edge):
+                raise YTDomainOverflow(pos.min(axis=0),
+                                       pos.max(axis=0),
+                                       self.pf.domain_left_edge,
+                                       self.pf.domain_right_edge)
+            regions.add_data_file(pos, chunk_id)
+            morton[ind:ind+npart] = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.pf.domain_left_edge,
+                data_file.pf.domain_right_edge)
+            ind += npart
+        return morton
+
+    def _count_particles(self, data_file):
+        dle = self.pf.domain_left_edge.in_units("code_length").d
+        dre = self.pf.domain_right_edge.in_units("code_length").d
+        pcount = 0
+        for dd in self.pf.sindex.iter_bbox_data(
+            dle, dre,
+            ['x','y','z']):
+            pcount += dd['x'].size
+        return {'dark_matter': pcount}
+
+    def _identify_fields(self, data_file):
+        fields = [("dark_matter", v) for v in self._handle.keys()]
+        fields.append(("dark_matter", "mass"))
+        return fields, {}
+
+
 import re
 import os
 


https://bitbucket.org/yt_analysis/yt/commits/d406fcf2a3f6/
Changeset:   d406fcf2a3f6
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-13 00:07:18
Summary:     Periodicity. Do not really like this solution
Affected #:  2 files

diff -r 9a440fe7a97be0376dd47f595dd8be5ebb44b7ed -r d406fcf2a3f683b8d31e930da6895f9374a3ef48 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -746,7 +746,7 @@
         mylog.debug('SINDEX Loading region from %s to %s' %(left, right))
         inds = self.get_bbox(left, right)
 
-        my_filter = bbox_filter(left, right)
+        my_filter = bbox_filter(left, right, self.true_domain_width)
 
         for dd in self.filter_particles(
             self.iter_data(inds, fields),
@@ -757,7 +757,7 @@
         mylog.debug('SINDEX Loading spherical region %s to %s' %(center, radius))
         inds = self.get_bbox(center-radius, center+radius)
 
-        my_filter = sphere_filter(center, radius)
+        my_filter = sphere_filter(center, radius, self.true_domain_width)
 
         for dd in self.filter_particles(
             self.iter_data(inds, fields),

diff -r 9a440fe7a97be0376dd47f595dd8be5ebb44b7ed -r d406fcf2a3f683b8d31e930da6895f9374a3ef48 yt/frontends/sdf/particle_filters.py
--- a/yt/frontends/sdf/particle_filters.py
+++ b/yt/frontends/sdf/particle_filters.py
@@ -1,11 +1,16 @@
 import numpy as np
 
 
-def bbox_filter(left, right):
+def bbox_filter(left, right, domain_width):
 
     def myfilter(chunk, mask=None):
         pos = np.array([chunk['x'], chunk['y'], chunk['z']]).T
 
+        # This hurts, but is useful for periodicity. Probably should check first
+        # if it is even needed for a given left/right
+        for i in range(3):
+            pos[:,i] = np.mod(pos[:,i] - left[i], domain_width[i]) + left[i]
+
         # Now get all particles that are within the bbox
         if mask is None:
             mask = np.all(pos >= left, axis=1) * np.all(pos < right, axis=1)
@@ -16,11 +21,16 @@
 
     return myfilter
 
-def sphere_filter(center, radius):
+def sphere_filter(center, radius, domain_width):
 
     def myfilter(chunk, mask=None):
         pos = np.array([chunk['x'], chunk['y'], chunk['z']]).T
 
+        # This hurts, but is useful for periodicity. Probably should check first
+        # if it is even needed for a given left/right
+        for i in range(3):
+            pos[:,i] = np.mod(pos[:,i] - left[i], domain_width[i]) + left[i]
+
         # Now get all particles that are within the radius
         if mask is None:
             mask = ((pos-center)**2).sum(axis=1)**0.5 < radius


https://bitbucket.org/yt_analysis/yt/commits/b86100922c42/
Changeset:   b86100922c42
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-09 17:57:14
Summary:     Fixing RAMSES density and mass units.
Affected #:  1 file

diff -r d6681af789175edcbcf4360dd03b1a8ec726ea25 -r b86100922c426d81953273ba8dc0c1761e414e83 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -489,7 +489,8 @@
         length_unit = self.parameters['unit_l'] * self.parameters['boxlen']
         rho_u = self.parameters['unit_d']
         # We're not multiplying by the boxlength here.
-        mass_unit = rho_u * self.parameters['unit_l']**3
+        mass_unit = rho_u * (self.parameters['unit_l'] *
+                             self.parameters['boxlen'])**3
         time_unit = self.parameters['unit_t']
 
         magnetic_unit = np.sqrt(4*np.pi * mass_unit /


https://bitbucket.org/yt_analysis/yt/commits/8847f04588b1/
Changeset:   8847f04588b1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-14 14:20:20
Summary:     Adding a code_density unit to RAMSES for boxlen!=1.0.
Affected #:  2 files

diff -r b86100922c426d81953273ba8dc0c1761e414e83 -r 8847f04588b1c548c06b72efec6fd2b82788e68b yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -486,11 +486,11 @@
         # Note that unit_l *already* converts to proper!
         # Also note that unit_l must be multiplied by the boxlen parameter to
         # ensure we are correctly set up for the current domain.
+        import yt.units.dimensions as dimensions
         length_unit = self.parameters['unit_l'] * self.parameters['boxlen']
         rho_u = self.parameters['unit_d']
         # We're not multiplying by the boxlength here.
-        mass_unit = rho_u * (self.parameters['unit_l'] *
-                             self.parameters['boxlen'])**3
+        mass_unit = rho_u * self.parameters['unit_l']**3
         time_unit = self.parameters['unit_t']
 
         magnetic_unit = np.sqrt(4*np.pi * mass_unit /
@@ -500,6 +500,11 @@
         self.mass_unit = self.quan(mass_unit, "g")
         self.time_unit = self.quan(time_unit, "s")
         self.velocity_unit = self.length_unit / self.time_unit
+        self.unit_registry.add('code_density', 1.0,
+            dimensions.mass / dimensions.length**3)
+        self.density_unit = self.quan(rho_u * self.parameters["boxlen"]**3,
+                                      "g/cm**3")
+        self.unit_registry.modify("code_density", self.density_unit)
 
     def _parse_parameter_file(self):
         # hardcoded for now

diff -r b86100922c426d81953273ba8dc0c1761e414e83 -r 8847f04588b1c548c06b72efec6fd2b82788e68b yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -32,7 +32,7 @@
 
 b_units = "code_magnetic"
 ra_units = "code_length / code_time**2"
-rho_units = "code_mass / code_length**3"
+rho_units = "code_density"
 vel_units = "code_length / code_time"
 
 known_species_masses = dict(


https://bitbucket.org/yt_analysis/yt/commits/e2baba5c8637/
Changeset:   e2baba5c8637
Branch:      yt-3.0
User:        mswarren
Date:        2014-05-15 07:56:58
Summary:     Update halo struct to current RC3+ version.
Enable dumping config.
Add interface to thresh_densities.
Don't free halos on output.
Affected #:  1 file

diff -r 8847f04588b1c548c06b72efec6fd2b82788e68b -r e2baba5c863759d697eca828d5d5e4051eacf896 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -38,7 +38,9 @@
         float alt_m[4]
         float Xoff, Voff, b_to_a, c_to_a
         float A[3]
-        float bullock_spin, kin_to_pot
+        float b_to_a2, c_to_a2
+        float A2[3]
+        float bullock_spin, kin_to_pot, m_pe_b, m_pe_d
         np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
         float min_pos_err, min_vel_err, min_bulkvel_err
 
@@ -53,7 +55,8 @@
     float alt_m1, alt_m2, alt_m3, alt_m4
     float Xoff, Voff, b_to_a, c_to_a
     float A1, A2, A3
-    float bullock_spin, kin_to_pot
+    float b_to_a2, c_to_a2, A21, A22, A23
+    float bullock_spin, kin_to_pot, m_pe_b, m_pe_d
     np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
     float min_pos_err, min_vel_err, min_bulkvel_err
 
@@ -68,6 +71,11 @@
     void free_particle_copies() nogil
     void alloc_particle_copies(np.int64_t total_copies) nogil
     void free_halos() nogil
+    float max_halo_radius(halo *h) nogil
+
+
+# global in groupies.c
+cdef extern double particle_thresh_dens[5]
 
 # For outputing halos, rockstar style
 
@@ -87,13 +95,21 @@
     np.float64_t PARTICLE_MASS
 
     char *MASS_DEFINITION
+    char *MASS_DEFINITION2
+    char *MASS_DEFINITION3
+    char *MASS_DEFINITION4
+    char *MASS_DEFINITION5
+    np.int64_t STRICT_SO_MASSES
     np.int64_t MIN_HALO_OUTPUT_SIZE
     np.float64_t FORCE_RES
+    np.float64_t FORCE_RES_PHYS_MAX
 
     np.float64_t SCALE_NOW
     np.float64_t h0
     np.float64_t Ol
     np.float64_t Om
+    np.float64_t W0
+    np.float64_t WA
 
     np.int64_t GADGET_ID_BYTES
     np.float64_t GADGET_MASS_CONVERSION
@@ -111,6 +127,7 @@
     char *INBASE
     char *FILENAME
     np.int64_t STARTING_SNAP
+    np.int64_t RESTART_SNAP
     np.int64_t NUM_SNAPS
     np.int64_t NUM_BLOCKS
     np.int64_t NUM_READERS
@@ -130,10 +147,13 @@
     np.int64_t FULL_PARTICLE_CHUNKS
     char *BGC2_SNAPNAMES
 
+    np.int64_t SHAPE_ITERATIONS
+    np.int64_t WEIGHTED_SHAPES
     np.int64_t BOUND_PROPS
     np.int64_t BOUND_OUT_TO_HALO_EDGE
     np.int64_t DO_MERGER_TREE_ONLY
     np.int64_t IGNORE_PARTICLE_IDS
+    np.float64_t EXACT_LL_CALC
     np.float64_t TRIM_OVERLAP
     np.float64_t ROUND_AFTER_TRIM
     np.int64_t LIGHTCONE
@@ -147,20 +167,20 @@
 
     np.int64_t SWAP_ENDIANNESS
     np.int64_t GADGET_VARIANT
+    np.int64_t ART_VARIANT
 
     np.float64_t FOF_FRACTION
     np.float64_t FOF_LINKING_LENGTH
+    np.float64_t INITIAL_METRIC_SCALING
     np.float64_t INCLUDE_HOST_POTENTIAL_RATIO
-    np.float64_t DOUBLE_COUNT_SUBHALO_MASS_RATIO
     np.int64_t TEMPORAL_HALO_FINDING
     np.int64_t MIN_HALO_PARTICLES
     np.float64_t UNBOUND_THRESHOLD
     np.int64_t ALT_NFW_METRIC
+    np.int64_t EXTRA_PROFILING
 
     np.int64_t TOTAL_PARTICLES
     np.float64_t BOX_SIZE
-    np.int64_t OUTPUT_HMAD
-    np.int64_t OUTPUT_PARTICLES
     np.int64_t OUTPUT_LEVELS
     np.float64_t DUMP_PARTICLES[3]
 
@@ -179,16 +199,18 @@
         self.pf = pf
 
     def setup_rockstar(self,
-                        particle_mass,
-                        int periodic = 1, force_res=None,
-                        int min_halo_size = 25, outbase = "None",
-                        callbacks = None):
+                       particle_mass,
+                       int periodic = 1, force_res = None,
+                       int min_halo_size = 25, outbase = "None",
+                       write_config = False,  exact_ll_calc = False,
+                       callbacks = None):
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
         global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
         global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
-        
+        global OUTPUT_FORMAT, EXTRA_PROFILING
+        global STRICT_SO_MASSES, EXACT_LL_CALC
 
         if force_res is not None:
             FORCE_RES=np.float64(force_res)
@@ -218,6 +240,10 @@
         PERIODIC = periodic
         BOX_SIZE = pf.domain_width.in_units('Mpccm/h')[0]
 
+        if exact_ll_calc: EXACT_LL_CALC = 1
+        STRICT_SO_MASSES = 1    # presumably unused in our code path
+        EXTRA_PROFILING = 0
+
         # Set up the configuration options
         setup_config()
 
@@ -225,15 +251,33 @@
         # to calculate virial quantities properly
         calc_mass_definition()
 
+        if write_config: output_config(NULL)
+
+    def particle_thresh_dens(self):
+        cdef np.ndarray d = np.array([particle_thresh_dens[0],
+                                      particle_thresh_dens[1],
+                                      particle_thresh_dens[2],
+                                      particle_thresh_dens[3],
+                                      particle_thresh_dens[4]],
+                                     dtype=np.float64)
+        return d
+        
+    def max_halo_radius(self, int i):
+        return max_halo_radius(&halos[i])
+
     def output_halos(self):
         output_halos(0, 0, 0, NULL) 
 
+    def output_config(self):
+        output_config(NULL) 
+
     def return_halos(self):
         cdef haloflat[:] haloview = <haloflat[:num_halos]> (<haloflat*> halos)
-        rv = np.asarray(haloview).copy()
+        return np.asarray(haloview)
+
+    def finish(self):
         rockstar_cleanup()
         free_halos()
-        return rv
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -271,7 +315,7 @@
                 j += 1
         if j > max_count:
             max_count = j
-        #print >> sys.stderr, "Most frequent occurrance: %s" % max_count
+        #print >> sys.stderr, "Most frequent occurrence: %s" % max_count
         fof_obj.particles = <particle*> malloc(max_count * sizeof(particle))
         j = 0
         cdef int counter = 0, ndone = 0


https://bitbucket.org/yt_analysis/yt/commits/542f700f5506/
Changeset:   542f700f5506
Branch:      yt-3.0
User:        mswarren
Date:        2014-05-15 08:05:02
Summary:     Be more generous with _is_valid for SDF.
Handle Omega0_r and Omega0_fld.
Get time from header rather than cosmology.
Affected #:  1 file

diff -r e2baba5c863759d697eca828d5d5e4051eacf896 -r 542f700f55069d926c681117ea8cc528d544ca32 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -29,11 +29,10 @@
     ParticleIndex
 from yt.data_objects.static_output import \
     Dataset, ParticleFile
-from yt.utilities.physical_constants import \
-    G, \
+from yt.utilities.physical_ratios import \
     cm_per_kpc, \
-    mass_sun_cgs
-from yt.utilities.cosmology import Cosmology
+    mass_sun_grams, \
+    sec_per_Gyr
 from .fields import \
     SDFFieldInfo
 from .io import \
@@ -41,6 +40,12 @@
     SDFRead,\
     SDFIndex
 
+# currently specified by units_2HOT == 2 in header
+# in future will read directly from file
+units_2HOT_v2_length = 3.08567802e21
+units_2HOT_v2_mass = 1.98892e43
+units_2HOT_v2_time = 3.1558149984e16
+
 class SDFFile(ParticleFile):
     pass
 
@@ -104,11 +109,13 @@
         self.current_redshift = self.parameters.get("redshift", 0.0)
         self.omega_lambda = self.parameters["Omega0_lambda"]
         self.omega_matter = self.parameters["Omega0_m"]
+        if "Omega0_fld" in self.parameters:
+            self.omega_lambda += self.parameters["Omega0_fld"]
+        if "Omega0_r" in self.parameters:
+            # not correct, but most codes can't handle Omega0_r
+            self.omega_matter += self.parameters["Omega0_r"]
         self.hubble_constant = self.parameters["h_100"]
-        # Now we calculate our time based on the cosmology.
-        cosmo = Cosmology(self.hubble_constant,
-                          self.omega_matter, self.omega_lambda)
-        self.current_time = cosmo.hubble_time(self.current_redshift)
+        self.current_time = units_2HOT_v2_time * self.parameters["tpos"]
         mylog.info("Calculating time to be %0.3e seconds", self.current_time)
         self.filename_template = self.parameter_filename
         self.file_count = 1
@@ -122,8 +129,7 @@
                 self._sindex = SDFIndex(self.sdf_container, indexdata, level=self.idx_level)
             else:
                 raise RuntimeError("SDF index0 file not supplied in load.")
-        else:
-            return self._sindex
+        return self._sindex
 
     def _set_code_unit_attributes(self):
         self.length_unit = self.quan(1.0, "kpc")
@@ -136,4 +142,4 @@
         if not os.path.isfile(args[0]): return False
         with open(args[0], "r") as f:
             line = f.readline().strip()
-            return line == "# SDF 1.0"
+            return line[:5] == "# SDF"


https://bitbucket.org/yt_analysis/yt/commits/cc6e6e252ebb/
Changeset:   cc6e6e252ebb
Branch:      yt-3.0
User:        mswarren
Date:        2014-05-15 17:25:08
Summary:     merge samskillman/yt-dark
Affected #:  8 files

diff -r 542f700f55069d926c681117ea8cc528d544ca32 -r cc6e6e252ebb2b28f8a2ebe40de0032c912b8118 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -342,8 +342,10 @@
     def morton(self):
         self.validate()
         eps = np.finfo(self.dtype).eps
-        LE = self.min(axis=0) - eps * self.uq
-        RE = self.max(axis=0) + eps * self.uq
+        LE = self.min(axis=0)
+        LE -= np.abs(LE) * eps
+        RE = self.max(axis=0)
+        RE += np.abs(RE) * eps
         morton = compute_morton(
             self[:,0], self[:,1], self[:,2],
             LE, RE)
@@ -354,8 +356,10 @@
         mi = self.morton
         mi.sort()
         eps = np.finfo(self.dtype).eps
-        LE = self.min(axis=0) - eps * self.uq
-        RE = self.max(axis=0) + eps * self.uq
+        LE = self.min(axis=0)
+        LE -= np.abs(LE) * eps
+        RE = self.max(axis=0)
+        RE += np.abs(RE) * eps
         octree = ParticleOctreeContainer(dims, LE, RE, 
             over_refine = over_refine_factor)
         octree.n_ref = n_ref

diff -r 542f700f55069d926c681117ea8cc528d544ca32 -r cc6e6e252ebb2b28f8a2ebe40de0032c912b8118 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -79,6 +79,8 @@
         self.idx_filename = idx_filename
         self.idx_header = idx_header
         self.idx_level = idx_level
+        if self.idx_filename is not None:
+            dataset_type = 'sindex_sdf_particles'
         super(SDFDataset, self).__init__(filename, dataset_type)
 
     def _parse_parameter_file(self):

diff -r 542f700f55069d926c681117ea8cc528d544ca32 -r cc6e6e252ebb2b28f8a2ebe40de0032c912b8118 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -27,6 +27,7 @@
 from yt.utilities.lib.geometry_utils import compute_morton
 
 from yt.geometry.oct_container import _ORDER_MAX
+from particle_filters import bbox_filter, sphere_filter
 CHUNKSIZE = 32**3
 
 class IOHandlerSDF(BaseIOHandler):
@@ -112,6 +113,98 @@
         fields.append(("dark_matter", "mass"))
         return fields, {}
 
+class IOHandlerSIndexSDF(IOHandlerSDF):
+    _dataset_type = "sindex_sdf_particles"
+
+
+    def _read_particle_coords(self, chunks, ptf):
+        dle = self.pf.domain_left_edge.in_units("code_length").d
+        dre = self.pf.domain_right_edge.in_units("code_length").d
+        for dd in self.pf.sindex.iter_bbox_data(
+            dle, dre,
+            ['x','y','z']):
+            yield "dark_matter", (
+                dd['x'], dd['y'], dd['z'])
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        dle = self.pf.domain_left_edge.in_units("code_length").d
+        dre = self.pf.domain_right_edge.in_units("code_length").d
+        required_fields = ['x','y','z']
+        for ptype, field_list in sorted(ptf.items()):
+            for field in field_list:
+                if field == "mass": continue
+                required_fields.append(field)
+
+        for dd in self.pf.sindex.iter_bbox_data(
+            dle, dre,
+            required_fields):
+
+            for ptype, field_list in sorted(ptf.items()):
+                x = dd['x']
+                y = dd['y']
+                z = dd['z']
+                mask = selector.select_points(x, y, z, 0.0)
+                del x, y, z
+                if mask is None: continue
+                for field in field_list:
+                    if field == "mass":
+                        data = np.ones(mask.sum(), dtype="float64")
+                        data *= self.pf.parameters["particle_mass"]
+                    else:
+                        data = dd[field][mask]
+                    yield (ptype, field), data
+
+    def _initialize_index(self, data_file, regions):
+        dle = self.pf.domain_left_edge.in_units("code_length").d
+        dre = self.pf.domain_right_edge.in_units("code_length").d
+        pcount = 0
+        for dd in self.pf.sindex.iter_bbox_data(
+            dle, dre,
+            ['x','y','z']):
+            pcount += dd['x'].size
+
+        morton = np.empty(pcount, dtype='uint64')
+        ind = 0
+
+        chunk_id = 0
+        for dd in self.pf.sindex.iter_bbox_data(
+            dle, dre,
+            ['x','y','z']):
+            npart = dd['x'].size
+            pos = np.empty((npart, 3), dtype=dd['x'].dtype)
+            pos[:,0] = dd['x']
+            pos[:,1] = dd['y']
+            pos[:,2] = dd['z']
+            if np.any(pos.min(axis=0) < self.pf.domain_left_edge) or \
+               np.any(pos.max(axis=0) > self.pf.domain_right_edge):
+                raise YTDomainOverflow(pos.min(axis=0),
+                                       pos.max(axis=0),
+                                       self.pf.domain_left_edge,
+                                       self.pf.domain_right_edge)
+            regions.add_data_file(pos, chunk_id)
+            morton[ind:ind+npart] = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.pf.domain_left_edge,
+                data_file.pf.domain_right_edge)
+            ind += npart
+        return morton
+
+    def _count_particles(self, data_file):
+        dle = self.pf.domain_left_edge.in_units("code_length").d
+        dre = self.pf.domain_right_edge.in_units("code_length").d
+        pcount = 0
+        for dd in self.pf.sindex.iter_bbox_data(
+            dle, dre,
+            ['x','y','z']):
+            pcount += dd['x'].size
+        return {'dark_matter': pcount}
+
+    def _identify_fields(self, data_file):
+        fields = [("dark_matter", v) for v in self._handle.keys()]
+        fields.append(("dark_matter", "mass"))
+        return fields, {}
+
+
 import re
 import os
 
@@ -347,6 +440,9 @@
             self.rmax[1] += self.sdfdata.parameters.get('Ry', r_0)
             self.rmax[2] += self.sdfdata.parameters.get('Rz', r_0)
 
+        self.rmin *= self.sdfdata.parameters.get("a", 1.0)
+        self.rmax *= self.sdfdata.parameters.get("a", 1.0)
+
         #/* expand root for non-power-of-two */
         expand_root = 0.0
         ic_Nmesh = self.sdfdata.parameters.get('ic_Nmesh',0)
@@ -354,19 +450,52 @@
             f2 = 1<<int(np.log2(ic_Nmesh-1)+1)
             if (f2 != ic_Nmesh):
                 expand_root = 1.0*f2/ic_Nmesh - 1.0;
-            print 'Expanding: ', f2, ic_Nmesh, expand_root
+            mylog.debug("Expanding: %s, %s, %s" % (f2, ic_Nmesh, expand_root))
+        self.true_domain_left = self.rmin.copy()
+        self.true_domain_right = self.rmax.copy()
+        self.true_domain_width = self.rmax - self.rmin
         self.rmin *= 1.0 + expand_root
         self.rmax *= 1.0 + expand_root
         self.domain_width = self.rmax - self.rmin
         self.domain_dims = 1 << self.level
         self.domain_buffer = (self.domain_dims - int(self.domain_dims/(1.0 + expand_root)))/2
         self.domain_active_dims = self.domain_dims - 2*self.domain_buffer
-        print 'Domain stuff:', self.domain_width, self.domain_dims, self.domain_active_dims
+        mylog.debug("SINDEX: %s, %s, %s " % (self.domain_width, self.domain_dims, self.domain_active_dims))
+
+    def spread_bits(self, ival, level=None):
+        if level is None:
+            level = self.level
+        res = 0
+        for i in range(level):
+            res |= ((ival>>i)&1)<<(i*3);
+        return res
 
     def get_key(self, iarr, level=None):
         if level is None:
             level = self.level
         i1, i2, i3 = iarr
+        return self.spread_bits(i1, level) | self.spread_bits(i2, level) << 1 | self.spread_bits(i3, level) << 2
+
+    def spread_bitsv(self, ival, level=None):
+        if level is None:
+            level = self.level
+        res = np.zeros_like(ival, dtype='int64')
+        for i in range(level):
+            res |= np.bitwise_and((ival>>i), 1)<<(i*3);
+        return res
+
+    def get_keyv(self, iarr, level=None):
+        if level is None:
+            level = self.level
+        i1, i2, i3 = iarr
+        return np.bitwise_or(
+            np.bitwise_or(self.spread_bits(i1, level) , self.spread_bits(i2, level) << 1 ),
+            self.spread_bits(i3, level) << 2)
+
+    def get_key_slow(self, iarr, level=None):
+        if level is None:
+            level = self.level
+        i1, i2, i3 = iarr
         rep1 = np.binary_repr(i1, width=self.level)
         rep2 = np.binary_repr(i2, width=self.level)
         rep3 = np.binary_repr(i3, width=self.level)
@@ -385,6 +514,13 @@
         expanded[self.dim_slices[dim]] = slb
         return int(expanded.tostring(), 2)
 
+    def get_ind_from_key(self, key, dim='r'):
+        ind = [0,0,0]
+        br = np.binary_repr(key, width=self.level*3)
+        for dim in range(3):
+            ind[dim] = int(br[self.dim_slices[dim]],2)
+        return ind
+
     def get_slice_chunks(self, slice_dim, slice_index):
         sl_key = self.get_slice_key(slice_index, dim=slice_dim)
         mask = (self.indexdata['index'] & ~self.masks[slice_dim]) == sl_key
@@ -411,34 +547,61 @@
         lengths = self.indexdata['len'][mask]
         return mask, offsets, lengths
 
-    def get_ibbox(self, ileft, iright):
+    def get_ibbox(self, ileft, iright, wandering_particles=True):
         """
         Given left and right indicies, return a mask and
         set of offsets+lengths into the sdf data.
         """
         mask = np.zeros(self.indexdata['index'].shape, dtype='bool')
 
-        print 'Getting data from ileft to iright:',  ileft, iright
+        #print 'Getting data from ileft to iright:',  ileft, iright
 
-        X, Y, Z = np.mgrid[ileft[0]:iright[0]+1,
+        ix, iy, iz = (iright-ileft)*1j
+        #print 'IBBOX:', ileft, iright, ix, iy, iz
+
+        Z, Y, X = np.mgrid[ileft[2]:iright[2]+1,
                            ileft[1]:iright[1]+1,
-                           ileft[2]:iright[2]+1]
+                           ileft[0]:iright[0]+1]
 
-        X = X.ravel()
-        Y = Y.ravel()
-        Z = Z.ravel()
+        mask = slice(0, -1, None)
+        X = X[mask, mask, mask].astype('int32').ravel()
+        Y = Y[mask, mask, mask].astype('int32').ravel()
+        Z = Z[mask, mask, mask].astype('int32').ravel()
+
+        if wandering_particles:
+            # Need to get padded bbox around the border to catch
+            # wandering particles.
+            dmask = X < self.domain_buffer
+            dmask += Y < self.domain_buffer
+            dmask += Z < self.domain_buffer
+            dmask += X >= self.domain_dims
+            dmask += Y >= self.domain_dims
+            dmask += Z >= self.domain_dims
+            dinds = self.get_keyv([X[dmask], Y[dmask], Z[dmask]])
+            dinds = dinds[dinds < self.indexdata['index'][-1]]
+            dinds = dinds[self.indexdata['len'][dinds] > 0]
+            #print 'Getting boundary layers for wanderers, cells: %i' % dinds.size
+
         # Correct For periodicity
         X[X < self.domain_buffer] += self.domain_active_dims
-        X[X >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
         Y[Y < self.domain_buffer] += self.domain_active_dims
-        Y[Y >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
         Z[Z < self.domain_buffer] += self.domain_active_dims
-        Z[Z >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
+        X[X >= self.domain_buffer + self.domain_active_dims] -= self.domain_active_dims
+        Y[Y >= self.domain_buffer + self.domain_active_dims] -= self.domain_active_dims
+        Z[Z >= self.domain_buffer + self.domain_active_dims] -= self.domain_active_dims
 
-        print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()
+        #print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()
 
-        indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
-        indices = indices[indices < self.indexdata['index'].shape[0]]
+        indices = self.get_keyv([X, Y, Z])
+        indices = indices[indices < self.indexdata['index'][-1]]
+        indices = indices[self.indexdata['len'][indices] > 0]
+
+        #indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
+        # Here we sort the indices to batch consecutive reads together.
+        if wandering_particles:
+            indices = np.sort(np.append(indices, dinds))
+        else:
+            indices = np.sort(indices)
         return indices
 
     def get_bbox(self, left, right):
@@ -451,16 +614,56 @@
 
         return self.get_ibbox(ileft, iright)
 
+    def get_nparticles_bbox(self, left, right):
+        """
+        Given left and right edges, return total
+        number of particles present.
+        """
+        ileft = np.floor((left - self.rmin) / self.domain_width *  self.domain_dims)
+        iright = np.floor((right - self.rmin) / self.domain_width * self.domain_dims)
+        indices = self.get_ibbox(ileft, iright)
+        npart = 0
+        for ind in indices:
+            npart += self.indexdata['len'][ind]
+        return npart
+
     def get_data(self, chunk, fields):
         data = {}
         for field in fields:
             data[field] = self.sdfdata[field][chunk]
         return data
 
+    def get_next_nonzero_chunk(self, key, stop=None):
+        # These next two while loops are to squeeze the keys if they are empty. Would be better
+        # to go through and set base equal to the last non-zero base, i think.
+        if stop is None:
+            stop = self.indexdata['index'][-1]
+        while key < stop:
+            if self.indexdata['index'][key] == 0:
+                #print 'Squeezing keys, incrementing'
+                key += 1
+            else:
+                break
+        return key
+
+    def get_previous_nonzero_chunk(self, key, stop=None):
+        # These next two while loops are to squeeze the keys if they are empty. Would be better
+        # to go through and set base equal to the last non-zero base, i think.
+        if stop is None:
+            stop = self.indexdata['index'][0]
+        while key > stop:
+            #self.indexdata['index'][-1]:
+            if self.indexdata['index'][key] == 0:
+                #print 'Squeezing keys, decrementing'
+                key -= 1
+            else:
+                break
+        return key
+
     def iter_data(self, inds, fields):
         num_inds = len(inds)
         num_reads = 0
-        print 'Reading %i chunks' % num_inds
+        mylog.debug('SINDEX Reading %i chunks' % num_inds)
         i = 0
         while (i < num_inds):
             ind = inds[i]
@@ -469,10 +672,10 @@
             # Concatenate aligned reads
             nexti = i+1
             combined = 0
-            while nexti < len(inds):
+            while nexti < num_inds:
                 nextind = inds[nexti]
                 #        print 'b: %i l: %i end: %i  next: %i' % ( base, length, base + length, self.indexdata['base'][nextind] )
-                if base + length == self.indexdata['base'][nextind]:
+                if combined < 1024 and base + length == self.indexdata['base'][nextind]:
                     length += self.indexdata['len'][nextind]
                     i += 1
                     nexti += 1
@@ -481,33 +684,124 @@
                     break
 
             chunk = slice(base, base+length)
-            print 'Reading chunk %i of length %i after catting %i' % (i, length, combined)
+            mylog.debug('Reading chunk %i of length %i after catting %i starting at %i' % (i, length, combined, ind))
             num_reads += 1
-            data = self.get_data(chunk, fields)
-            yield data
-            del data
+            if length > 0:
+                data = self.get_data(chunk, fields)
+                yield data
+                del data
             i += 1
-        print 'Read %i chunks, batched into %i reads' % (num_inds, num_reads)
+        mylog.debug('Read %i chunks, batched into %i reads' % (num_inds, num_reads))
+
+
+    def filter_particles(self, myiter, myfilter):
+        for data in myiter:
+            mask = myfilter(data)
+
+            if mask.sum() == 0:
+                continue
+            filtered = {}
+            for f in data.keys():
+                filtered[f] = data[f][mask]
+
+            yield filtered
+
+    def filter_bbox(self, left, right, myiter):
+        """
+        Filter data by masking out data outside of a bbox defined
+        by left/right. Account for periodicity of data, allowing left/right
+        to be outside of the domain.
+        """
+        for data in myiter:
+            mask = np.zeros_like(data, dtype='bool')
+            pos = np.array([data['x'].copy(), data['y'].copy(), data['z'].copy()]).T
+
+
+            # This hurts, but is useful for periodicity. Probably should check first
+            # if it is even needed for a given left/right
+            for i in range(3):
+                pos[:,i] = np.mod(pos[:,i] - left[i], self.true_domain_width[i]) + left[i]
+
+            # Now get all particles that are within the bbox
+            mask = np.all(pos >= left, axis=1) * np.all(pos < right, axis=1)
+
+            mylog.debug("Filtering particles, returning %i out of %i" % (mask.sum(), mask.shape[0]))
+
+            if not np.any(mask):
+                continue
+
+            filtered = {ax: pos[:, i][mask] for i, ax in enumerate('xyz')}
+            for f in data.keys():
+                if f in 'xyz': continue
+                filtered[f] = data[f][mask]
+
+            #for i, ax in enumerate('xyz'):
+            #    print left, right
+            #    assert np.all(filtered[ax] >= left[i])
+            #    assert np.all(filtered[ax] < right[i])
+
+            yield filtered
 
     def iter_bbox_data(self, left, right, fields):
-        print 'Loading region from ', left, 'to', right
+        mylog.debug('SINDEX Loading region from %s to %s' %(left, right))
         inds = self.get_bbox(left, right)
-        return self.iter_data(inds, fields)
+
+        my_filter = bbox_filter(left, right, self.true_domain_width)
+
+        for dd in self.filter_particles(
+            self.iter_data(inds, fields),
+            my_filter):
+            yield dd
+
+    def iter_sphere_data(self, center, radius, fields):
+        mylog.debug('SINDEX Loading spherical region %s to %s' %(center, radius))
+        inds = self.get_bbox(center-radius, center+radius)
+
+        my_filter = sphere_filter(center, radius, self.true_domain_width)
+
+        for dd in self.filter_particles(
+            self.iter_data(inds, fields),
+            my_filter):
+            yield dd
 
     def iter_ibbox_data(self, left, right, fields):
-        print 'Loading region from ', left, 'to', right
+        mylog.debug('SINDEX Loading region from %s to %s' %(left, right))
         inds = self.get_ibbox(left, right)
         return self.iter_data(inds, fields)
 
     def get_contiguous_chunk(self, left_key, right_key, fields):
+        liarr = self.get_ind_from_key(left_key)
+        riarr = self.get_ind_from_key(right_key)
+
+        lbase=0
+        llen = 0
         max_key = self.indexdata['index'][-1]
         if left_key > max_key:
             raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (left_key, max_key))
-        base = self.indexdata['base'][left_key]
-        right_key = min(right_key, self.indexdata['index'][-1])
-        length = self.indexdata['base'][right_key] + \
-            self.indexdata['len'][right_key] - base
-        print 'Getting contiguous chunk of size %i starting at %i' % (length, base)
+        right_key = min(right_key, max_key)
+
+        left_key = self.get_next_nonzero_chunk(left_key)
+        right_key = self.get_previous_nonzero_chunk(right_key, left_key)
+
+        lbase = self.indexdata['base'][left_key]
+        llen = self.indexdata['len'][left_key]
+
+        rbase = self.indexdata['base'][right_key]
+        rlen = self.indexdata['len'][right_key]
+
+        length = rbase + rlen - lbase
+        if length > 0:
+            mylog.debug('Getting contiguous chunk of size %i starting at %i' % (length, lbase))
+        return self.get_data(slice(lbase, lbase + length), fields)
+
+    def get_key_data(self, key, fields):
+        max_key = self.indexdata['index'][-1]
+        if key > max_key:
+            raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (key, max_key))
+        base = self.indexdata['base'][key]
+        length = self.indexdata['len'][key] - base
+        if length > 0:
+            mylog.debug('Getting contiguous chunk of size %i starting at %i' % (length, base))
         return self.get_data(slice(base, base + length), fields)
 
     def iter_slice_data(self, slice_dim, slice_index, fields):
@@ -562,3 +856,94 @@
         cell_iarr = np.array(cell_iarr)
         lk, rk =self.get_key_bounds(level, cell_iarr)
         return self.get_contiguous_chunk(lk, rk, fields)
+
+    def get_cell_bbox(self, level, cell_iarr):
+        """Get floating point bounding box for a given sindex cell
+
+        Returns:
+            bbox: array-like, shape (3,2)
+
+        """
+        cell_iarr = np.array(cell_iarr)
+        cell_width = self.get_cell_width(level)
+        le = self.rmin + cell_iarr*cell_width
+        re = le+cell_width
+        bbox = np.array([le, re]).T
+        assert bbox.shape == (3, 2)
+        return bbox
+
+    def get_padded_bbox_data(self, level, cell_iarr, pad, fields):
+        """Get floating point bounding box for a given sindex cell
+
+        Returns:
+            bbox: array-like, shape (3,2)
+
+        """
+        bbox = self.get_cell_bbox(level, cell_iarr)
+        filter_left = bbox[:, 0] - pad
+        filter_right = bbox[:, 1] + pad
+
+        data = []
+        for dd in self.filter_bbox(
+            filter_left, filter_right,
+            [self.get_cell_data(level, cell_iarr, fields)]):
+            data.append(dd)
+        #for dd in self.iter_bbox_data(bbox[:,0], bbox[:,1], fields):
+        #    data.append(dd)
+        #assert data[0]['x'].shape[0] > 0
+
+        # Bottom & Top
+        pbox = bbox.copy()
+        pbox[0, 0] -= pad[0]
+        pbox[0, 1] += pad[0]
+        pbox[1, 0] -= pad[1]
+        pbox[1, 1] += pad[1]
+        pbox[2, 0] -= pad[2]
+        pbox[2, 1] = bbox[2, 0]
+        for dd in self.filter_bbox(
+            filter_left, filter_right,
+            self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
+            data.append(dd)
+        pbox[2, 0] = bbox[2, 1]
+        pbox[2, 1] = pbox[2, 0] + pad[2]
+        for dd in self.filter_bbox(
+            filter_left, filter_right,
+            self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
+            data.append(dd)
+
+        # Front & Back
+        pbox = bbox.copy()
+        pbox[0, 0] -= pad[0]
+        pbox[0, 1] += pad[0]
+        pbox[1, 0] -= pad[1]
+        pbox[1, 1] = bbox[1, 0]
+        for dd in self.filter_bbox(
+            filter_left, filter_right,
+            self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
+            data.append(dd)
+        pbox[1, 0] = bbox[1, 1]
+        pbox[1, 1] = pbox[1, 0] + pad[1]
+        for dd in self.filter_bbox(
+            filter_left, filter_right,
+            self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
+            data.append(dd)
+
+        # Left & Right
+        pbox = bbox.copy()
+        pbox[0, 0] -= pad[0]
+        pbox[0, 1] = bbox[0, 0]
+        for dd in self.filter_bbox(
+            filter_left, filter_right,
+            self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
+            data.append(dd)
+        pbox[0, 0] = bbox[0, 1]
+        pbox[0, 1] = pbox[0, 0] + pad[0]
+        for dd in self.filter_bbox(
+            filter_left, filter_right,
+            self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
+            data.append(dd)
+
+        return data
+
+    def get_cell_width(self, level):
+        return self.domain_width / 2**level

diff -r 542f700f55069d926c681117ea8cc528d544ca32 -r cc6e6e252ebb2b28f8a2ebe40de0032c912b8118 yt/frontends/sdf/particle_filters.py
--- /dev/null
+++ b/yt/frontends/sdf/particle_filters.py
@@ -0,0 +1,41 @@
+import numpy as np
+
+
+def bbox_filter(left, right, domain_width):
+
+    def myfilter(chunk, mask=None):
+        pos = np.array([chunk['x'], chunk['y'], chunk['z']]).T
+
+        # This hurts, but is useful for periodicity. Probably should check first
+        # if it is even needed for a given left/right
+        for i in range(3):
+            pos[:,i] = np.mod(pos[:,i] - left[i], domain_width[i]) + left[i]
+
+        # Now get all particles that are within the bbox
+        if mask is None:
+            mask = np.all(pos >= left, axis=1) * np.all(pos < right, axis=1)
+        else:
+            np.multiply(mask, np.all(pos >= left, axis=1), mask)
+            np.multiply(mask, np.all(pos < right, axis=1), mask)
+        return mask
+
+    return myfilter
+
+def sphere_filter(center, radius, domain_width):
+
+    def myfilter(chunk, mask=None):
+        pos = np.array([chunk['x'], chunk['y'], chunk['z']]).T
+
+        # This hurts, but is useful for periodicity. Probably should check first
+        # if it is even needed for a given left/right
+        for i in range(3):
+            pos[:,i] = np.mod(pos[:,i] - left[i], domain_width[i]) + left[i]
+
+        # Now get all particles that are within the radius
+        if mask is None:
+            mask = ((pos-center)**2).sum(axis=1)**0.5 < radius
+        else:
+            np.multiply(mask, np.linalg.norm(pos - center, 2) < radius, mask)
+        return mask
+
+    return myfilter

diff -r 542f700f55069d926c681117ea8cc528d544ca32 -r cc6e6e252ebb2b28f8a2ebe40de0032c912b8118 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -18,6 +18,10 @@
 from oct_visitors cimport Oct, OctVisitorData, \
     oct_visitor_function
 
+ctypedef fused anyfloat:
+    np.float32_t
+    np.float64_t
+
 cdef class SelectorObject:
     cdef public np.int32_t min_level
     cdef public np.int32_t max_level

diff -r 542f700f55069d926c681117ea8cc528d544ca32 -r cc6e6e252ebb2b28f8a2ebe40de0032c912b8118 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -36,10 +36,6 @@
     long int lrint(double x) nogil
     double fabs(double x) nogil
 
-ctypedef fused anyfloat:
-    np.float32_t
-    np.float64_t
-
 # These routines are separated into a couple different categories:
 #
 #   * Routines for identifying intersections of an object with a bounding box

diff -r 542f700f55069d926c681117ea8cc528d544ca32 -r cc6e6e252ebb2b28f8a2ebe40de0032c912b8118 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -18,7 +18,8 @@
 cimport cython
 from libc.stdlib cimport malloc, free, realloc
 from yt.geometry.selection_routines cimport \
-    SelectorObject, AlwaysSelector, OctreeSubsetSelector
+    SelectorObject, AlwaysSelector, OctreeSubsetSelector, \
+    anyfloat
 from yt.utilities.lib.fp_utils cimport imax
 from yt.geometry.oct_container cimport \
     OctreeContainer, OctInfo
@@ -620,22 +621,26 @@
     cdef np.float64_t linking_length, linking_length2
     cdef np.float64_t DW[3], DLE[3], DRE[3]
     cdef bint periodicity[3]
+    cdef int minimum_count
 
-    def __init__(self, linking_length):
+    def __init__(self, linking_length, periodicity = (True, True, True),
+                 int minimum_count = 8):
+        cdef int i
         self.linking_length = linking_length
         self.linking_length2 = linking_length * linking_length
         self.first = self.last = NULL
+        for i in range(3):
+            self.periodicity[i] = periodicity[i]
+        self.minimum_count = minimum_count
 
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def identify_contours(self, OctreeContainer octree,
                                 np.ndarray[np.int64_t, ndim=1] dom_ind,
-                                np.ndarray[np.float64_t, ndim=2] positions,
+                                np.ndarray[anyfloat, ndim=2] positions,
                                 np.ndarray[np.int64_t, ndim=1] particle_ids,
-                                int domain_id = -1, int domain_offset = 0,
-                                periodicity = (True, True, True),
-                                int minimum_count = 8):
+                                int domain_id, int domain_offset):
         cdef np.ndarray[np.int64_t, ndim=1] pdoms, pcount, pind, doff
         cdef np.float64_t pos[3]
         cdef Oct *oct = NULL, **neighbors = NULL
@@ -657,7 +662,6 @@
             self.DW[i] = (octree.DRE[i] - octree.DLE[i])
             self.DLE[i] = octree.DLE[i]
             self.DRE[i] = octree.DRE[i]
-            self.periodicity[i] = periodicity[i]
         for i in range(positions.shape[0]):
             counter += 1
             container[i] = NULL
@@ -671,7 +675,7 @@
             pdoms[i] = offset
         pind = np.argsort(pdoms)
         cdef np.int64_t *ipind = <np.int64_t*> pind.data
-        cdef np.float64_t *fpos = <np.float64_t*> positions.data
+        cdef anyfloat *fpos = <anyfloat*> positions.data
         # pind is now the pointer into the position and particle_ids array.
         for i in range(positions.shape[0]):
             offset = pdoms[pind[i]]
@@ -751,7 +755,7 @@
                 c1 = container[offset]
                 if c1 == NULL: continue
                 c0 = contour_find(c1)
-                if c0.count < minimum_count:
+                if c0.count < self.minimum_count:
                     contour_ids[offset] = -1
         free(container)
         del pind
@@ -761,7 +765,7 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     cdef void link_particles(self, ContourID **container, 
-                                   np.float64_t *positions,
+                                   anyfloat *positions,
                                    np.int64_t *pind,
                                    np.int64_t pcount, 
                                    np.int64_t noffset,


https://bitbucket.org/yt_analysis/yt/commits/2a43724dfed5/
Changeset:   2a43724dfed5
Branch:      yt-3.0
User:        mswarren
Date:        2014-05-18 15:23:23
Summary:     rename velocity fields in haloflat
pass offset to output_halos
Affected #:  1 file

diff -r cc6e6e252ebb2b28f8a2ebe40de0032c912b8118 -r 2a43724dfed5c29f09877f1f64a601948e082720 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -46,7 +46,7 @@
 
 ctypedef packed struct haloflat:
     np.int64_t id
-    float pos_x, pos_y, pos_z, pos_v, pos_u, pos_w
+    float pos_x, pos_y, pos_z, vel_x, vel_y, vel_z
     float corevel_x, corevel_y, corevel_z
     float bulkvel_x, bulkvel_y, bulkvel_z
     float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
@@ -265,8 +265,8 @@
     def max_halo_radius(self, int i):
         return max_halo_radius(&halos[i])
 
-    def output_halos(self):
-        output_halos(0, 0, 0, NULL) 
+    def output_halos(self, np.int64_t idoffset):
+        output_halos(idoffset, 0, 0, NULL) 
 
     def output_config(self):
         output_config(NULL) 


https://bitbucket.org/yt_analysis/yt/commits/39697406e012/
Changeset:   39697406e012
Branch:      yt-3.0
User:        mswarren
Date:        2014-05-19 22:17:29
Summary:     Update rockstar frontend to current binary format
Affected #:  3 files

diff -r 2a43724dfed5c29f09877f1f64a601948e082720 -r 39697406e012d42f4ce823a3dfa78dd99ac04f55 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -49,13 +49,13 @@
     float pos_x, pos_y, pos_z, vel_x, vel_y, vel_z
     float corevel_x, corevel_y, corevel_z
     float bulkvel_x, bulkvel_y, bulkvel_z
-    float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
-    float J1, J2, J3
+    float m, r, child_r, vmax_r, mgrav, vmax, rvmax, rs, klypin_rs, vrms
+    float Jx, Jy, Jz
     float energy, spin
     float alt_m1, alt_m2, alt_m3, alt_m4
     float Xoff, Voff, b_to_a, c_to_a
-    float A1, A2, A3
-    float b_to_a2, c_to_a2, A21, A22, A23
+    float Ax, Ay, Az
+    float b_to_a2, c_to_a2, A2x, A2y, A2z
     float bullock_spin, kin_to_pot, m_pe_b, m_pe_d
     np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
     float min_pos_err, min_vel_err, min_bulkvel_err
@@ -203,6 +203,7 @@
                        int periodic = 1, force_res = None,
                        int min_halo_size = 25, outbase = "None",
                        write_config = False,  exact_ll_calc = False,
+                       lightcone = False, lightcone_origin = [0,0,0],
                        callbacks = None):
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
@@ -211,6 +212,7 @@
         global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
         global OUTPUT_FORMAT, EXTRA_PROFILING
         global STRICT_SO_MASSES, EXACT_LL_CALC
+        global LIGHTCONE, LIGHTCONE_ORIGIN
 
         if force_res is not None:
             FORCE_RES=np.float64(force_res)
@@ -219,7 +221,7 @@
         
         FILENAME = "inline.<block>"
         FILE_FORMAT = "GENERIC"
-        OUTPUT_FORMAT = "ASCII"
+        OUTPUT_FORMAT = "BOTH"
         MIN_HALO_OUTPUT_SIZE=min_halo_size
         
         pf = self.pf
@@ -244,6 +246,12 @@
         STRICT_SO_MASSES = 1    # presumably unused in our code path
         EXTRA_PROFILING = 0
 
+        if lightcone:
+            LIGHTCONE = 1
+            LIGHTCONE_ORIGIN[0] = lightcone_origin[0]
+            LIGHTCONE_ORIGIN[1] = lightcone_origin[1]
+            LIGHTCONE_ORIGIN[2] = lightcone_origin[2]
+
         # Set up the configuration options
         setup_config()
 
@@ -265,8 +273,16 @@
     def max_halo_radius(self, int i):
         return max_halo_radius(&halos[i])
 
-    def output_halos(self, np.int64_t idoffset):
-        output_halos(idoffset, 0, 0, NULL) 
+    def output_halos(self, np.int64_t idoffset, np.ndarray[np.float32_t, ndim=2] bbox):
+        cdef float bounds[6]
+        if idoffset is None: idoffset = 0
+        if bbox is None:
+            output_halos(idoffset, 0, 0, NULL) 
+        else:
+            for i in range(3):
+                bounds[i] = bbox[i,0]
+                bounds[i+3] = bbox[i,1]
+            output_halos(idoffset, 0, 0, bounds) 
 
     def output_config(self):
         output_config(NULL) 

diff -r 2a43724dfed5c29f09877f1f64a601948e082720 -r 39697406e012d42f4ce823a3dfa78dd99ac04f55 yt/frontends/halo_catalogs/rockstar/definitions.py
--- a/yt/frontends/halo_catalogs/rockstar/definitions.py
+++ b/yt/frontends/halo_catalogs/rockstar/definitions.py
@@ -30,7 +30,9 @@
     ("box_size", 1, "f"),
     ("particle_mass", 1, "f"),
     ("particle_type", 1, "q"),
-    ("unused", BINARY_HEADER_SIZE - 4*12 - 8*6, "c")
+    ("format_revision", 1, "i"),
+    ("version", 12, "c"),
+    ("unused", BINARY_HEADER_SIZE - 4*12 - 4 - 8*6 - 12, "c")
 )
 
 halo_dt = np.dtype([
@@ -38,17 +40,17 @@
     ('particle_position_x', np.float32),
     ('particle_position_y', np.float32),
     ('particle_position_z', np.float32),
-    ('particle_mposition_x', np.float32),
-    ('particle_mposition_y', np.float32),
-    ('particle_mposition_z', np.float32),
     ('particle_velocity_x', np.float32),
     ('particle_velocity_y', np.float32),
     ('particle_velocity_z', np.float32),
-    ('particle_bvelocity_x', np.float32),
-    ('particle_bvelocity_y', np.float32),
-    ('particle_bvelocity_z', np.float32),
+    ('particle_corevel_x', np.float32),
+    ('particle_corevel_y', np.float32),
+    ('particle_corevel_z', np.float32),
+    ('particle_bulkvel_x', np.float32),
+    ('particle_bulkvel_y', np.float32),
+    ('particle_bulkvel_z', np.float32),
     ('particle_mass', np.float32),
-    ('virial_radius', np.float32),
+    ('radius', np.float32),
     ('child_r', np.float32),
     ('vmax_r', np.float32),
     ('mgrav', np.float32),
@@ -57,9 +59,9 @@
     ('rs', np.float32),
     ('klypin_rs', np.float32),
     ('vrms', np.float32),
-    ('JX', np.float32),
-    ('JY', np.float32),
-    ('JZ', np.float32),
+    ('Jx', np.float32),
+    ('Jy', np.float32),
+    ('Jz', np.float32),
     ('energy', np.float32),
     ('spin', np.float32),
     ('alt_m1', np.float32),
@@ -73,8 +75,15 @@
     ('Ax', np.float32),
     ('Ay', np.float32),
     ('Az', np.float32),
+    ('b_to_a2', np.float32),
+    ('c_to_a2', np.float32),
+    ('A2x', np.float32),
+    ('A2y', np.float32),
+    ('A2z', np.float32),
     ('bullock_spin', np.float32),
     ('kin_to_pot', np.float32),
+    ('m_pe_b', np.float32),
+    ('m_pe_d', np.float32),
     ('num_p', np.int64),
     ('num_child_particles', np.int64),
     ('p_start', np.int64),
@@ -84,7 +93,6 @@
     ('min_pos_err', np.float32),
     ('min_vel_err', np.float32),
     ('min_bulkvel_err', np.float32),
-    ('padding2', np.float32),
 ])
 
 particle_dt = np.dtype([

diff -r 2a43724dfed5c29f09877f1f64a601948e082720 -r 39697406e012d42f4ce823a3dfa78dd99ac04f55 yt/frontends/halo_catalogs/rockstar/fields.py
--- a/yt/frontends/halo_catalogs/rockstar/fields.py
+++ b/yt/frontends/halo_catalogs/rockstar/fields.py
@@ -40,17 +40,17 @@
         ("particle_position_x", (p_units, [], None)),
         ("particle_position_y", (p_units, [], None)),
         ("particle_position_z", (p_units, [], None)),
-        ("particle_mposition_x", (p_units, [], None)),
-        ("particle_mposition_y", (p_units, [], None)),
-        ("particle_mposition_z", (p_units, [], None)),
         ("particle_velocity_x", (v_units, [], None)),
         ("particle_velocity_y", (v_units, [], None)),
         ("particle_velocity_z", (v_units, [], None)),
-        ("particle_bvelocity_x", (v_units, [], None)),
-        ("particle_bvelocity_y", (v_units, [], None)),
-        ("particle_bvelocity_z", (v_units, [], None)),
-        ("particle_mass", (m_units, [], "Virial Mass")),
-        ("virial_radius", (r_units, [], "Virial Radius")),
+        ("particle_corevel_x", (v_units, [], None)),
+        ("particle_corevel_y", (v_units, [], None)),
+        ("particle_corevel_z", (v_units, [], None)),
+        ("particle_bulkvel_x", (v_units, [], None)),
+        ("particle_bulkvel_y", (v_units, [], None)),
+        ("particle_bulkvel_z", (v_units, [], None)),
+        ("particle_mass", (m_units, [], "Mass")),
+        ("virial_radius", (r_units, [], "Radius")),
         ("child_r", (r_units, [], None)),
         ("vmax_r", (v_units, [], None)),
     # These fields I don't have good definitions for yet.
@@ -60,9 +60,9 @@
     ('rs', (r_units, [], "R_s")),
     ('klypin_rs', (r_units, [], "Klypin R_s")),
     ('vrms', (v_units, [], "V_{rms}")),
-    ('JX', ("", [], "J_x")),
-    ('JY', ("", [], "J_y")),
-    ('JZ', ("", [], "J_z")),
+    ('Jx', ("", [], "J_x")),
+    ('Jy', ("", [], "J_y")),
+    ('Jz', ("", [], "J_z")),
     ('energy', ("", [], None)),
     ('spin', ("", [], "Spin Parameter")),
     ('alt_m1', (m_units, [], None)),
@@ -76,8 +76,15 @@
     ('Ax', ("", [], "A_x")),
     ('Ay', ("", [], "A_y")),
     ('Az', ("", [], "A_z")),
+    ('b_to_a2', ("", [], None)),
+    ('c_to_a2', ("", [], None)),
+    ('A2x', ("", [], "A2_x")),
+    ('A2y', ("", [], "A2_y")),
+    ('A2z', ("", [], "A2_z")),
     ('bullock_spin', ("", [], "Bullock Spin Parameter")),
     ('kin_to_pot', ("", [], "Kinetic to Potential")),
+    ('m_pe_b', ("", [], None)),
+    ('m_pe_d', ("", [], None)),
     ('num_p', ("", [], "Number of Particles")),
     ('num_child_particles', ("", [], "Number of Child Particles")),
     ('p_start', ("", [], None)),


https://bitbucket.org/yt_analysis/yt/commits/121428f8a618/
Changeset:   121428f8a618
Branch:      yt-3.0
User:        mswarren
Date:        2014-05-20 00:56:41
Summary:     Set periodicity
Affected #:  1 file

diff -r 39697406e012d42f4ce823a3dfa78dd99ac04f55 -r 121428f8a6184b22a4b0e3bf492e64ea6fb926c2 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -104,7 +104,10 @@
 
         nz = 1 << self.over_refine_factor
         self.domain_dimensions = np.ones(3, "int32") * nz
-        self.periodicity = (True, True, True)
+        if "do_periodic" in self.parameters and self.parameters["do_periodic"]:
+            self.periodicity = (True, True, True)
+        else:
+            self.periodicity = (False, False, False)
 
         self.cosmological_simulation = 1
 


https://bitbucket.org/yt_analysis/yt/commits/ce385f1aa394/
Changeset:   ce385f1aa394
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-20 03:12:59
Summary:     Adding key iterator for a padded bbox.

keys = []
for k in sindex.iter_padded_bbox_keys(level, iarr, pad):
    keys.append(k)
Affected #:  1 file

diff -r 121428f8a6184b22a4b0e3bf492e64ea6fb926c2 -r ce385f1aa39470e82ef66878a44594c3762e44e8 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -947,3 +947,60 @@
 
     def get_cell_width(self, level):
         return self.domain_width / 2**level
+
+    def iter_padded_bbox_keys(self, level, cell_iarr, pad):
+        """
+
+        Returns:
+            bbox: array-like, shape (3,2)
+
+        """
+        bbox = self.get_cell_bbox(level, cell_iarr)
+        filter_left = bbox[:, 0] - pad
+        filter_right = bbox[:, 1] + pad
+
+        # Need to get all of these
+        low_key, high_key = self.get_key_bounds(level, cell_iarr)
+        for k in xrange(low_key, high_key):
+            yield k
+
+        # Bottom & Top
+        pbox = bbox.copy()
+        pbox[0, 0] -= pad[0]
+        pbox[0, 1] += pad[0]
+        pbox[1, 0] -= pad[1]
+        pbox[1, 1] += pad[1]
+        pbox[2, 0] -= pad[2]
+        pbox[2, 1] = bbox[2, 0]
+        for k in self.get_bbox(pbox[:,0], pbox[:,1]):
+            yield k
+
+        pbox[2, 0] = bbox[2, 1]
+        pbox[2, 1] = pbox[2, 0] + pad[2]
+        for k in self.get_bbox(pbox[:,0], pbox[:,1]):
+            yield k
+
+        # Front & Back
+        pbox = bbox.copy()
+        pbox[0, 0] -= pad[0]
+        pbox[0, 1] += pad[0]
+        pbox[1, 0] -= pad[1]
+        pbox[1, 1] = bbox[1, 0]
+        for k in self.get_bbox(pbox[:,0], pbox[:,1]):
+            yield k
+        pbox[1, 0] = bbox[1, 1]
+        pbox[1, 1] = pbox[1, 0] + pad[1]
+        for k in self.get_bbox(pbox[:,0], pbox[:,1]):
+            yield k
+
+        # Left & Right
+        pbox = bbox.copy()
+        pbox[0, 0] -= pad[0]
+        pbox[0, 1] = bbox[0, 0]
+        for k in self.get_bbox(pbox[:,0], pbox[:,1]):
+            yield k
+        pbox[0, 0] = bbox[0, 1]
+        pbox[0, 1] = pbox[0, 0] + pad[0]
+        for k in self.get_bbox(pbox[:,0], pbox[:,1]):
+            yield k
+


https://bitbucket.org/yt_analysis/yt/commits/b41d5e789a35/
Changeset:   b41d5e789a35
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-21 20:07:03
Summary:     Allow for sindex to reference a non-valid (different level) index file. Also put wandering_particles in sindex attrs so that it can be turned off.
Affected #:  1 file

diff -r ce385f1aa39470e82ef66878a44594c3762e44e8 -r b41d5e789a35ce959ecb1d56187ffad172be1f65 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -398,6 +398,8 @@
         self.domain_buffer = 0
         self.domain_dims = 0
         self.domain_active_dims = 0
+        self.wandering_particles = True
+        self.valid_indexdata = True
         self.masks = {
             "p" : int("011"*level, 2),
             "t" : int("101"*level, 2),
@@ -547,13 +549,11 @@
         lengths = self.indexdata['len'][mask]
         return mask, offsets, lengths
 
-    def get_ibbox(self, ileft, iright, wandering_particles=True):
+    def get_ibbox(self, ileft, iright):
         """
         Given left and right indicies, return a mask and
         set of offsets+lengths into the sdf data.
         """
-        mask = np.zeros(self.indexdata['index'].shape, dtype='bool')
-
         #print 'Getting data from ileft to iright:',  ileft, iright
 
         ix, iy, iz = (iright-ileft)*1j
@@ -568,7 +568,7 @@
         Y = Y[mask, mask, mask].astype('int32').ravel()
         Z = Z[mask, mask, mask].astype('int32').ravel()
 
-        if wandering_particles:
+        if self.wandering_particles:
             # Need to get padded bbox around the border to catch
             # wandering particles.
             dmask = X < self.domain_buffer
@@ -593,12 +593,15 @@
         #print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()
 
         indices = self.get_keyv([X, Y, Z])
-        indices = indices[indices < self.indexdata['index'][-1]]
-        indices = indices[self.indexdata['len'][indices] > 0]
+#       # Only mask out if we are actually getting data rather than getting indices into
+        # a space.
+        if self.valid_indexdata:
+            indices = indices[indices < self.indexdata['index'][-1]]
+            indices = indices[self.indexdata['len'][indices] > 0]
 
         #indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
         # Here we sort the indices to batch consecutive reads together.
-        if wandering_particles:
+        if self.wandering_particles:
             indices = np.sort(np.append(indices, dinds))
         else:
             indices = np.sort(indices)


https://bitbucket.org/yt_analysis/yt/commits/dce14ea907ea/
Changeset:   dce14ea907ea
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-10 00:09:12
Summary:     Initial pass at a more intelligent FOF particle thing.
Affected #:  1 file

diff -r b2d7be90a94c503ea9166de95d833f961e329d88 -r dce14ea907ead9d4e36f17f4cfad8d7d10e58f33 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -38,7 +38,7 @@
     node.contour_id = contour_id
     node.next = node.parent = NULL
     node.prev = prev
-    node.count = 0
+    node.count = 1
     if prev != NULL: prev.next = node
     return node
 
@@ -66,10 +66,23 @@
 cdef inline void contour_union(ContourID *node1, ContourID *node2):
     node1 = contour_find(node1)
     node2 = contour_find(node2)
-    if node1.contour_id < node2.contour_id:
-        node2.parent = node1
-    elif node2.contour_id < node1.contour_id:
-        node1.parent = node2
+    cdef ContourID *pri, *sec
+    if node1.count > node2.count:
+        pri = node1
+        sec = node2
+    elif node2.count > node1.count:
+        pri = node2
+        sec = node1
+    # might be a tie
+    elif node1.contour_id < node2.contour_id:
+        pri = node1
+        sec = node2
+    else:
+        pri = node2
+        sec = node1
+    pri.count += sec.count
+    sec.count = 0
+    sec.parent = pri
 
 cdef inline int candidate_contains(CandidateContour *first,
                             np.int64_t contour_id,
@@ -617,6 +630,12 @@
                         contour_ids[ci,cj,ck] = j + 1
                         break
 
+cdef class FOFNode:
+    cdef np.int64_t tag, count
+    def __init__(self, np.int64_t tag):
+        self.tag = tag
+        self.count = 0
+
 cdef class ParticleContourTree(ContourTree):
     cdef np.float64_t linking_length, linking_length2
     cdef np.float64_t DW[3], DLE[3], DRE[3]
@@ -759,6 +778,12 @@
                     contour_ids[offset] = -1
         free(container)
         del pind
+        # We can now remake our contour IDs, count the number of them, and
+        # reassign.
+        cdef np.ndarray[np.int64_t, ndim=1] ufof_tags = np.unique(contour_ids)
+        cdef np.int64_t nfof_tags = ufof_tags.size
+        # This is, at most, how many tags we'll have.  Now we just need to
+        # assign to them.
         return contour_ids
 
     @cython.cdivision(True)
@@ -781,7 +806,7 @@
         # Note that pind0 will not monotonically increase, but 
         c0 = container[pind0]
         if c0 == NULL:
-            c0 = container[pind0] = contour_create(poffset, self.last)
+            c0 = container[pind0] = contour_create(pind0, self.last)
             self.last = c0
             if self.first == NULL:
                 self.first = c0
@@ -811,6 +836,7 @@
             if link == 0: continue
             if c1 == NULL:
                 container[pind1] = c0
+                c0.count += 1
             elif c0.contour_id != c1.contour_id:
                 contour_union(c0, c1)
                 c0 = container[pind1] = container[pind0] = contour_find(c0)


https://bitbucket.org/yt_analysis/yt/commits/d1f4551ba70e/
Changeset:   d1f4551ba70e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-10 00:09:17
Summary:     Merging
Affected #:  16 files

diff -r dce14ea907ead9d4e36f17f4cfad8d7d10e58f33 -r d1f4551ba70ed874f0f272fc5c58e9c262e56f7a doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
--- a/doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
+++ b/doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
@@ -36,10 +36,15 @@
      "input": [
       "from yt.mods import *\n",
       "from yt.analysis_modules.halo_analysis.api import *\n",
-      "path = ytcfg.get(\"yt\", \"test_data_dir\")\n",
+      "import tempfile\n",
+      "import shutil\n",
+      "import os\n",
+      "\n",
+      "# Create temporary directory for storing files\n",
+      "tmpdir = tempfile.mkdtemp()\n",
       "\n",
       "# Load the data set with the full simulation information\n",
-      "data_pf = load(path+'Enzo_64/RD0006/RedshiftOutput0006')"
+      "data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')"
      ],
      "language": "python",
      "metadata": {},
@@ -57,7 +62,7 @@
      "collapsed": false,
      "input": [
       "# Load the rockstar data files\n",
-      "halos_pf = load(path+'rockstar_halos/halos_0.0.bin')"
+      "halos_pf = load('rockstar_halos/halos_0.0.bin')"
      ],
      "language": "python",
      "metadata": {},
@@ -76,7 +81,7 @@
      "input": [
       "# Instantiate a catalog using those two paramter files\n",
       "hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf, \n",
-      "                 output_dir = path+'halo_catalog')"
+      "                 output_dir=os.path.join(tmpdir, 'halo_catalog'))"
      ],
      "language": "python",
      "metadata": {},
@@ -202,8 +207,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "hc.add_callback('sphere', radius_field='radius_200', factor = 5,\n",
-      "        field_parameters = dict(virial_radius=('quantity','radius_200')))"
+      "hc.add_callback('sphere', radius_field='radius_200', factor=5,\n",
+      "                field_parameters=dict(virial_radius=('quantity', 'radius_200')))"
      ],
      "language": "python",
      "metadata": {},
@@ -221,9 +226,9 @@
      "collapsed": false,
      "input": [
       "hc.add_callback('profile', 'virial_radius', [('gas','temperature')],\n",
-      "        storage = 'virial_profiles',\n",
-      "        weight_field = 'cell_mass', \n",
-      "        accumulation=False, output_dir='profiles')\n"
+      "                storage='virial_profiles',\n",
+      "                weight_field='cell_mass', \n",
+      "                accumulation=False, output_dir='profiles')\n"
      ],
      "language": "python",
      "metadata": {},
@@ -290,9 +295,10 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "halos_pf =  load(path+'halo_catalog/halo_catalog.0.h5')\n",
+      "halos_pf =  load(os.path.join(tmpdir, 'halo_catalog/halo_catalog.0.h5'))\n",
       "\n",
-      "hc_reloaded = HaloCatalog(halos_pf=halos_pf, output_dir=path+'halo_catalog')"
+      "hc_reloaded = HaloCatalog(halos_pf=halos_pf,\n",
+      "                          output_dir=os.path.join(tmpdir, 'halo_catalog'))"
      ],
      "language": "python",
      "metadata": {},
@@ -309,8 +315,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "hc_reloaded.add_callback('load_profiles',storage='virial_profiles',\n",
-      "        output_dir='profiles')"
+      "hc_reloaded.add_callback('load_profiles', storage='virial_profiles',\n",
+      "                         output_dir='profiles')"
      ],
      "language": "python",
      "metadata": {},
@@ -362,7 +368,10 @@
       "halo = hc_reloaded.halo_list[0]\n",
       "\n",
       "radius = halo.virial_profiles['virial_radius']\n",
-      "temperature = halo.virial_profiles[u\"('gas', 'temperature')\"]"
+      "temperature = halo.virial_profiles[u\"('gas', 'temperature')\"]\n",
+      "\n",
+      "# Remove output files, that are no longer needed\n",
+      "shutil.rmtree(tmpdir)"
      ],
      "language": "python",
      "metadata": {},
@@ -382,7 +391,7 @@
       "%matplotlib inline\n",
       "import matplotlib.pyplot as plt\n",
       "\n",
-      "plt.plot(radius,temperature)\n",
+      "plt.plot(radius, temperature)\n",
       "\n",
       "plt.semilogy()\n",
       "plt.xlabel('$\\mathrm{R/R_{vir}}$')\n",

diff -r dce14ea907ead9d4e36f17f4cfad8d7d10e58f33 -r d1f4551ba70ed874f0f272fc5c58e9c262e56f7a doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -58,7 +58,7 @@
   from yt.mods import *
   from yt.analysis_modules.halo_finding.api import *
 
-  pf=load('RD0006/RD0006')
+  pf=load('Enzo_64/RD0006/RedshiftOutput0006')
   halo_list = parallelHF(pf)
   halo_list.dump('MyHaloList')
 
@@ -69,7 +69,7 @@
   from yt.mods import *
   from yt.analysis_modules.halo_finding.api import *
 
-  pf=load('RD0006/RD0006')
+  pf=load('Enzo_64/RD0006/RedshiftOutput0006')
   haloes = LoadHaloes(pf, 'MyHaloList')
 
 Once the halo information is saved you can load it into the data

diff -r dce14ea907ead9d4e36f17f4cfad8d7d10e58f33 -r d1f4551ba70ed874f0f272fc5c58e9c262e56f7a doc/source/analyzing/particles.rst
--- a/doc/source/analyzing/particles.rst
+++ b/doc/source/analyzing/particles.rst
@@ -28,8 +28,7 @@
 the quantities (:ref:`derived-quantities`) in those objects will operate on
 particle fields.
 
-(For information on halo finding, see :ref:`cookbook-halo_finding` and
-:ref:`cookbook-halo_mass_info`.)
+(For information on halo finding, see :ref:`cookbook-halo_finding`)
 
 .. warning:: If you use the built-in methods of interacting with particles, you
              should be well off.  Otherwise, there are caveats!

diff -r dce14ea907ead9d4e36f17f4cfad8d7d10e58f33 -r d1f4551ba70ed874f0f272fc5c58e9c262e56f7a doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -4,14 +4,6 @@
 These scripts demonstrate some basic and more advanced analysis that can be 
 performed on cosmological simulations.
 
-.. _cookbook-halo_finding:
-
-Simple Halo Finding
-~~~~~~~~~~~~~~~~~~~
-This script shows how to create a halo catalog for a single dataset.
-
-.. yt_cookbook:: halo_finding.py
-
 Plotting Halos
 ~~~~~~~~~~~~~~
 This is a mechanism for plotting circles representing identified particle halos
@@ -19,20 +11,7 @@
 
 .. yt_cookbook:: halo_plotting.py
 
-Plotting Halo Particles
-~~~~~~~~~~~~~~~~~~~~~~~
-This is a simple mechanism for overplotting the particles belonging only to
-halos.
-
-.. yt_cookbook:: halo_particle_plotting.py
-
-.. _cookbook-halo_mass_info:
-
-Halo Information
-~~~~~~~~~~~~~~~~
-This recipe finds halos and then prints out information about them.
-
-.. yt_cookbook:: halo_mass_info.py
+.. _cookbook-halo_finding:
 
 Halo Profiling and Custom Analysis
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

diff -r dce14ea907ead9d4e36f17f4cfad8d7d10e58f33 -r d1f4551ba70ed874f0f272fc5c58e9c262e56f7a doc/source/cookbook/index.rst
--- a/doc/source/cookbook/index.rst
+++ b/doc/source/cookbook/index.rst
@@ -21,9 +21,8 @@
 If you want to take a look at more complex recipes, or submit your own,
 check out the `yt Hub <http://hub.yt-project.org>`_.
 
-.. note:: To contribute your own recipes, please 
-   `fork <http://bitbucket.org/yt_analysis/yt-doc/fork>`_
-   the documentation repository!
+.. note:: To contribute your own recipes, please follow the instructions 
+    on how to contribute documentation code: :ref:`writing_documentation`.
 
 Example Scripts
 ---------------

diff -r dce14ea907ead9d4e36f17f4cfad8d7d10e58f33 -r d1f4551ba70ed874f0f272fc5c58e9c262e56f7a doc/source/developing/building_the_docs.rst
--- a/doc/source/developing/building_the_docs.rst
+++ b/doc/source/developing/building_the_docs.rst
@@ -1,8 +1,8 @@
 .. _docs_build:
 
-=================
-Building the Docs
-=================
+==========================
+Building the Documentation
+==========================
 
 The yt documentation makes heavy use of the sphinx documentation automation
 suite.  Sphinx, written in python, was originally created for the documentation
@@ -11,13 +11,64 @@
 
 While much of the yt documentation is static text, we make heavy use of
 cross-referencing with API documentation that is automatically generated at
-build time by sphinx.  We also use sphinx to run code snippets and embed
-resulting images and example data.
+build time by sphinx.  We also use sphinx to run code snippets (e.g. the 
+cookbook and the notebooks) and embed resulting images and example data.
 
-yt Sphinx extensions
---------------------
+Quick versus full documentation builds
+--------------------------------------
 
-The documentation makes heavy use of custom sphinx extensions to transform
+Building the entire set of yt documentation is a laborious task, since you 
+need to have a large number of packages in order to successfully execute
+and render all of the notebooks and yt recipes drawing from every corner
+of the yt source.  As an quick alternative, one can do a ``quick`` build
+of the documentation, which eschews the need for downloading all of these
+dependencies, but it only produces the static docs.  The static docs do 
+not include the cookbook outputs and the notebooks, but this is good
+enough for most cases of people testing out whether or not their documentation
+contributions look OK before submitting them to the yt repository.
+
+If you want to create the full documentation locally, then you'll need
+to follow the instructions for building the ``full`` docs, so that you can
+dynamically execute and render the cookbook recipes, the notebooks, etc.
+
+Building the docs (quick)
+-------------------------
+
+You will need to have the yt repository available on your computer, which
+is done by default if you have yt installed.  In addition, you need a 
+current version of Sphinx_ (1.1.3) documentation software installed.
+
+In order to tell sphinx not to do all of the dynamical building, you must
+set the ``$READTHEDOCS`` environment variable to be True by typing this at 
+the command line:
+
+.. code-block:: bash
+
+   export READTHEDOCS=True  # for bash
+   setenv READTHEDOCS True  # for csh
+
+This variable is set for automated builds on the free ReadTheDocs service but
+can be used by anyone to force a quick, minimal build.
+
+Now all you need to do is execute sphinx on the yt doc source.  Go to the 
+documentation directory and build the docs:
+
+.. code-block:: bash
+
+   cd $YT_DEST/src/yt-hg/doc
+   make html
+
+This will produce an html version of the documentation locally in the 
+``$YT_DEST/src/yt-hg/doc/build/html`` directory.  You can now go there and open
+up ``index.html`` or whatever file you wish in your web browser.
+
+Building the docs (full)
+------------------------
+
+As alluded to earlier, building the full documentation is a bit more involved
+than simply building the static documentation.  
+
+The full documentation makes heavy use of custom sphinx extensions to transform
 recipes, notebooks, and inline code snippets into python scripts, IPython_
 notebooks, or notebook cells that are executed when the docs are built.
 
@@ -30,12 +81,9 @@
 .. _runipy: https://github.com/paulgb/runipy
 .. _IPython: http://ipython.org/
 
-Dependencies
-------------
-
-To build the docs, you will need yt, IPython, runipy, and all supplementary yt
-analysis modules installed. The following dependencies were used to generate the
-yt documentation during the release of yt 2.6 in late 2013.
+To build the full documentation, you will need yt, IPython, runipy, and all 
+supplementary yt analysis modules installed. The following dependencies were 
+used to generate the yt documentation during the release of yt 2.6 in late 2013.
 
 - Sphinx_ 1.1.3
 - IPython_ 1.1
@@ -58,49 +106,32 @@
 <http://yt-project.org/data/>`_, including the larger datasets that are not used
 in the answer tests.
 
-Building the docs
------------------
-
-First, you will need to ensure that your testing configuration is properly
+You will need to ensure that your testing configuration is properly
 configured and that all of the yt test data is in the testing directory.  See
 :ref:`run_answer_testing` for more details on how to set up the testing
 configuration.
 
-Next, clone the yt-doc repository, navigate to the root of the repository, and
-do :code:`make html`.
+Now that you have everything set up properly, go to the documentation directory
+and build it using sphinx:
 
 .. code-block:: bash
 
-   hg clone https://bitbucket.org/yt_analysis/yt-doc ./yt-doc
-   cd yt-doc
+   cd $YT_DEST/src/yt-hg/doc
    make html
 
 If all of the dependencies are installed and all of the test data is in the
-testing directory, this should churn away for a while and eventually generate a
-docs build.  This process is lengthy but shouldn't last more than an hour.  We
-suggest setting :code:`suppressStreamLogging = True` in your yt configuration
-(See :ref:`configuration-file`) to suppress large amounts of debug output from
+testing directory, this should churn away for a while (~ 1 hour) and 
+eventually generate a docs build.  We suggest setting 
+:code:`suppressStreamLogging = True` in your yt configuration (See 
+:ref:`configuration-file`) to suppress large amounts of debug output from
 yt.
 
 To clean the docs build, use :code:`make clean`.  By default, :code:`make clean`
 will not delete the autogenerated API docs, so use :code:`make fullclean` to
 delete those as well.
 
-
-Quick docs builds
------------------
-
-Clearly, building the complete docs is something of an undertaking.  If you are
-adding new static content building the complete docs build is probably
-overkill.  To skip some of the lengthier operations, you can do the following
-from the bash prompt:
-
-.. code-block:: bash
-
-   export READTHEDOCS=True
-
-This variable is set for automated builds on the free ReadTheDocs service but
-can be used by anyone to force a quick, minimal build.
+Building the docs (hybrid)
+--------------------------
 
 It's also possible to create a custom sphinx build that builds a restricted set
 of notebooks or scripts.  This can be accomplished by editing the Sphinx

diff -r dce14ea907ead9d4e36f17f4cfad8d7d10e58f33 -r d1f4551ba70ed874f0f272fc5c58e9c262e56f7a doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -211,16 +211,22 @@
   #. Update your pull request by visiting
      https://bitbucket.org/YourUsername/yt/pull-request/new
 
+.. _writing_documentation:
+
 How to Write Documentation
 ++++++++++++++++++++++++++
 
 The process for writing documentation is identical to the above, except that
-instead of ``yt_analysis/yt`` you should be forking and pushing to
-``yt_analysis/yt-doc``.  All the source for the documentation is written in
+you're modifying source files in the doc directory (i.e. ``$YT_DEST/src/yt-hg/doc``) 
+instead of the src directory (i.e. ``$YT_DEST/src/yt-hg/yt``) of the yt repository.
+All the source for the documentation is written in 
 `Sphinx <http://sphinx-doc.org/>`_, which uses ReST for markup.
 
 Cookbook recipes go in ``source/cookbook/`` and must be added to one of the
-``.rst`` files in that directory.
+``.rst`` files in that directory.  
+
+For more information on how to build the documentation to make sure it looks
+the way you expect it to after modifying it, see :ref:`docs_build`.
 
 How To Get The Source Code For Editing
 --------------------------------------

diff -r dce14ea907ead9d4e36f17f4cfad8d7d10e58f33 -r d1f4551ba70ed874f0f272fc5c58e9c262e56f7a doc/source/developing/intro.rst
--- a/doc/source/developing/intro.rst
+++ b/doc/source/developing/intro.rst
@@ -66,10 +66,8 @@
 typo or grammatical fixes, adding a FAQ, or increasing coverage of
 functionality, it would be very helpful if you wanted to help out.
 
-The easiest way to help out is to fork the repository:
-
-http://hg.yt-project.org/yt-doc/fork
-
+The easiest way to help out is to fork the main yt repository (where 
+the documentation lives in the ``$YT_DEST/src/yt-hg/doc`` directory,
 and then make your changes in your own fork.  When you are done, issue a pull
 request through the website for your new fork, and we can comment back and
 forth and eventually accept your changes.

diff -r dce14ea907ead9d4e36f17f4cfad8d7d10e58f33 -r d1f4551ba70ed874f0f272fc5c58e9c262e56f7a yt/data_objects/image_array.py
--- a/yt/data_objects/image_array.py
+++ b/yt/data_objects/image_array.py
@@ -12,7 +12,6 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-import h5py as h5
 from yt.visualization.image_writer import write_bitmap, write_image
 from yt.units.yt_array import YTArray
 
@@ -26,7 +25,7 @@
     Parameters
     ----------
     input_array: array_like
-        A numpy ndarray, or list. 
+        A numpy ndarray, or list.
 
     Other Parameters
     ----------------
@@ -35,7 +34,7 @@
 
     Returns
     -------
-    obj: ImageArray object 
+    obj: ImageArray object
 
     Raises
     ------
@@ -55,15 +54,15 @@
     --------
     These are written in doctest format, and should illustrate how to
     use the function.  Use the variables 'pf' for the parameter file, 'pc' for
-    a plot collection, 'c' for a center, and 'L' for a vector. 
+    a plot collection, 'c' for a center, and 'L' for a vector.
 
     >>> im = np.zeros([64,128,3])
     >>> for i in xrange(im.shape[0]):
     ...     for k in xrange(im.shape[2]):
     ...         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
 
-    >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-    ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+    >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]),
+    ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),
     ...     'width':0.245, 'units':'cm', 'type':'rendering'}
 
     >>> im_arr = ImageArray(im, info=myinfo)
@@ -84,38 +83,36 @@
         super(ImageArray, self).__array_finalize__(obj)
         self.info = getattr(obj, 'info', None)
 
-    def write_hdf5(self, filename):
+    def write_hdf5(self, filename, dataset_name=None):
         r"""Writes ImageArray to hdf5 file.
 
         Parameters
         ----------
         filename: string
-            Note filename not be modified.
-       
+        The filename to create and write a dataset to
+
+        dataset_name: string
+            The name of the dataset to create in the file.
+
         Examples
-        -------- 
+        --------
         >>> im = np.zeros([64,128,3])
         >>> for i in xrange(im.shape[0]):
         ...     for k in xrange(im.shape[2]):
         ...         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
 
-        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]),
+        ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),
         ...     'width':0.245, 'units':'cm', 'type':'rendering'}
 
         >>> im_arr = ImageArray(im, info=myinfo)
         >>> im_arr.write_hdf5('test_ImageArray.h5')
 
         """
-        array_name = self.info.get("name","image")
-
-        f = h5.File(filename)
-        if array_name in f.keys():
-            del f[array_name]
-        d = f.create_dataset(array_name, data=self)
-        for k, v in self.info.iteritems():
-            d.attrs.create(k, v)
-        f.close()
+        if dataset_name is None:
+            dataset_name = self.info.get("name", "image")
+        super(ImageArray, self).write_hdf5(filename, dataset_name=dataset_name,
+                                           info=self.info)
 
     def add_background_color(self, background='black', inline=True):
         r"""Adds a background color to a 4-channel ImageArray
@@ -126,7 +123,7 @@
 
         Parameters
         ----------
-        background: 
+        background:
             This can be used to set a background color for the image, and can
             take several types of values:
 
@@ -144,7 +141,7 @@
         -------
         out: ImageArray
             The modified ImageArray with a background color added.
-       
+
         Examples
         --------
         >>> im = np.zeros([64,128,4])
@@ -160,8 +157,8 @@
         >>> im_arr.write_png('black_bg.png')
         """
         assert(self.shape[-1] == 4)
-        
-        if background == None:
+
+        if background is None:
             background = (0., 0., 0., 0.)
         elif background == 'white':
             background = (1., 1., 1., 1.)
@@ -175,11 +172,10 @@
             out = self.copy()
 
         for i in range(3):
-            out[:,:,i] = self[:,:,i]*self[:,:,3] + \
-                    background[i]*background[3]*(1.0-self[:,:,3])
-        out[:,:,3] = self[:,:,3] + background[3]*(1.0-self[:,:,3]) 
-        return out 
-
+            out[:, :, i] = self[:, :, i]*self[:, :, 3]
+            out[:, :, i] += background[i]*background[3]*(1.0-self[:, :, 3])
+        out[:, :, 3] = self[:, :, 3]+background[3]*(1.0-self[:, :, 3])
+        return out
 
     def rescale(self, cmax=None, amax=None, inline=True):
         r"""Rescales the image to be in [0,1] range.
@@ -194,7 +190,7 @@
             corresponding to using the maximum value in the alpha channel.
         inline: boolean, optional
             Specifies whether or not the rescaling is done inline. If false,
-            a new copy of the ImageArray will be created, returned. 
+            a new copy of the ImageArray will be created, returned.
             Default:True.
 
         Returns
@@ -207,17 +203,18 @@
         This requires that the shape of the ImageArray to have a length of 3,
         and for the third dimension to be >= 3.  If the third dimension has
         a shape of 4, the alpha channel will also be rescaled.
-       
+
         Examples
-        -------- 
+        --------
         >>> im = np.zeros([64,128,4])
         >>> for i in xrange(im.shape[0]):
         ...     for k in xrange(im.shape[2]):
         ...         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
 
-        >>> im_arr.write_png('original.png')
-        >>> im_arr.rescale()
-        >>> im_arr.write_png('normalized.png')
+        >>> im = ImageArray(im)
+        >>> im.write_png('original.png')
+        >>> im.rescale()
+        >>> im.write_png('normalized.png')
 
         """
         assert(len(self.shape) == 3)
@@ -226,22 +223,22 @@
             out = self
         else:
             out = self.copy()
-        if cmax is None: 
-            cmax = self[:,:,:3].sum(axis=2).max()
+        if cmax is None:
+            cmax = self[:, :, :3].sum(axis=2).max()
 
-        np.multiply(self[:,:,:3], 1./cmax, out[:,:,:3])
+        np.multiply(self[:, :, :3], 1.0/cmax, out[:, :, :3])
 
         if self.shape[2] == 4:
             if amax is None:
-                amax = self[:,:,3].max()
+                amax = self[:, :, 3].max()
             if amax > 0.0:
-                np.multiply(self[:,:,3], 1./amax, out[:,:,3])
-        
+                np.multiply(self[:, :, 3], 1.0/amax, out[:, :, 3])
+
         np.clip(out, 0.0, 1.0, out)
         return out
 
     def write_png(self, filename, clip_ratio=None, background='black',
-                 rescale=True):
+                  rescale=True):
         r"""Writes ImageArray to png file.
 
         Parameters
@@ -250,9 +247,9 @@
             Note filename not be modified.
         clip_ratio: float, optional
             Image will be clipped before saving to the standard deviation
-            of the image multiplied by this value.  Useful for enhancing 
+            of the image multiplied by this value.  Useful for enhancing
             images. Default: None
-        background: 
+        background:
             This can be used to set a background color for the image, and can
             take several types of values:
 
@@ -265,7 +262,7 @@
         rescale: boolean, optional
             If True, will write out a rescaled image (without modifying the
             original image). Default: True
-       
+
         Examples
         --------
         >>> im = np.zeros([64,128,4])
@@ -292,25 +289,25 @@
         else:
             out = scaled
 
-        if filename[-4:] != '.png': 
+        if filename[-4:] != '.png':
             filename += '.png'
 
         if clip_ratio is not None:
-            nz = out[:,:,:3][out[:,:,:3].nonzero()]
+            nz = out[:, :, :3][out[:, :, :3].nonzero()]
             return write_bitmap(out.swapaxes(0, 1), filename,
-                                nz.mean() + \
-                                clip_ratio * nz.std())
+                                nz.mean() + clip_ratio*nz.std())
         else:
             return write_bitmap(out.swapaxes(0, 1), filename)
 
-    def write_image(self, filename, color_bounds=None, channel=None,  cmap_name="algae", func=lambda x: x):
+    def write_image(self, filename, color_bounds=None, channel=None,
+                    cmap_name="algae", func=lambda x: x):
         r"""Writes a single channel of the ImageArray to a png file.
 
         Parameters
         ----------
         filename: string
             Note filename not be modified.
-       
+
         Other Parameters
         ----------------
         channel: int
@@ -323,43 +320,44 @@
             An acceptable colormap.  See either yt.visualization.color_maps or
             http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
         func : function, optional
-            A function to transform the buffer before applying a colormap. 
+            A function to transform the buffer before applying a colormap.
 
         Returns
         -------
         scaled_image : uint8 image that has been saved
-        
+
         Examples
         --------
-        
+
         >>> im = np.zeros([64,128])
         >>> for i in xrange(im.shape[0]):
-        ...     im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+        ...     im[i,:] = np.linspace(0.,0.3*i, im.shape[1])
 
-        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]),
+        ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),
         ...     'width':0.245, 'units':'cm', 'type':'rendering'}
 
         >>> im_arr = ImageArray(im, info=myinfo)
         >>> im_arr.write_image('test_ImageArray.png')
 
         """
-        if filename[-4:] != '.png': 
+        if filename[-4:] != '.png':
             filename += '.png'
 
+        #TODO: Write info dict as png metadata
         if channel is None:
-            return write_image(self.swapaxes(0,1).to_ndarray(), filename,
+            return write_image(self.swapaxes(0, 1).to_ndarray(), filename,
                                color_bounds=color_bounds, cmap_name=cmap_name,
                                func=func)
         else:
-            return write_image(self.swapaxes(0,1)[:,:,channel].to_ndarray(),
+            return write_image(self.swapaxes(0, 1)[:, :, channel].to_ndarray(),
                                filename,
-                               color_bounds=color_bounds, cmap_name=cmap_name, 
+                               color_bounds=color_bounds, cmap_name=cmap_name,
                                func=func)
 
     def save(self, filename, png=True, hdf5=True):
         """
-        Saves ImageArray. 
+        Saves ImageArray.
 
         Arguments:
           filename: string
@@ -380,6 +378,3 @@
                 self.write_image("%s.png" % filename)
         if hdf5:
             self.write_hdf5("%s.h5" % filename)
-
-    __doc__ += np.ndarray.__doc__
-

diff -r dce14ea907ead9d4e36f17f4cfad8d7d10e58f33 -r d1f4551ba70ed874f0f272fc5c58e9c262e56f7a yt/frontends/halo_catalogs/setup.py
--- a/yt/frontends/halo_catalogs/setup.py
+++ b/yt/frontends/halo_catalogs/setup.py
@@ -5,6 +5,7 @@
 def configuration(parent_package='', top_path=None):
     config = Configuration('halo_catalogs', parent_package, top_path)
     config.add_subpackage("halo_catalog")
+    config.add_subpackage("owls_subfind")
     config.add_subpackage("rockstar")
     config.make_config_py()
     return config

diff -r dce14ea907ead9d4e36f17f4cfad8d7d10e58f33 -r d1f4551ba70ed874f0f272fc5c58e9c262e56f7a yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -14,7 +14,15 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+import copy
+import cPickle as pickle
+import itertools
+import numpy as np
+import operator
 import os
+import shutil
+import tempfile
+
 from nose.tools import assert_true
 from numpy.testing import \
     assert_array_equal, \
@@ -28,12 +36,6 @@
     YTUnitOperationError, YTUfuncUnitError
 from yt.testing import fake_random_pf, requires_module
 from yt.funcs import fix_length
-import numpy as np
-import copy
-import operator
-import cPickle as pickle
-import tempfile
-import itertools
 
 
 def operate_and_compare(a, b, op, answer):
@@ -675,3 +677,54 @@
     yield assert_equal, yt_quan, YTQuantity(yt_quan.to_astropy())
 
 
+def test_subclass():
+
+    class YTASubclass(YTArray):
+        pass
+
+    a = YTASubclass([4, 5, 6], 'g')
+    b = YTASubclass([7, 8, 9], 'kg')
+    nu = YTASubclass([10, 11, 12], '')
+    nda = np.array([3, 4, 5])
+    yta = YTArray([6, 7, 8], 'mg')
+    ytq = YTQuantity(4, 'cm')
+    ndf = np.float64(3)
+
+    def op_comparison(op, inst1, inst2, compare_class):
+        assert_isinstance(op(inst1, inst2), compare_class)
+        assert_isinstance(op(inst2, inst1), compare_class)
+
+    for op in (operator.mul, operator.div, operator.truediv):
+        for inst in (b, ytq, ndf, yta, nda):
+            yield op_comparison, op, a, inst, YTASubclass
+
+        yield op_comparison, op, ytq, nda, YTArray
+        yield op_comparison, op, ytq, yta, YTArray
+
+    for op in (operator.add, operator.sub):
+        yield op_comparison, op, nu, nda, YTASubclass
+        yield op_comparison, op, a, b, YTASubclass
+        yield op_comparison, op, a, yta, YTASubclass
+
+    yield assert_isinstance, a[0], YTQuantity
+    yield assert_isinstance, a[:], YTASubclass
+    yield assert_isinstance, a[:2], YTASubclass
+
+def test_h5_io():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = fake_random_pf(64, nprocs=1, length_unit=10)
+
+    warr = ds.arr(np.random.random((256, 256)), 'code_length')
+
+    warr.write_hdf5('test.h5')
+
+    iarr = YTArray.from_hdf5('test.h5')
+
+    yield assert_equal, warr, iarr
+    yield assert_equal, warr.units.registry['code_length'], iarr.units.registry['code_length']
+
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

diff -r dce14ea907ead9d4e36f17f4cfad8d7d10e58f33 -r d1f4551ba70ed874f0f272fc5c58e9c262e56f7a yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -74,7 +74,8 @@
         if ret.shape == ():
             return YTQuantity(ret, units)
         else:
-            return YTArray(ret, units)
+            # This could be a subclass, so don't call YTArray directly.
+            return type(args[0])(ret, units)
     return wrapped
 
 def sqrt_unit(unit):
@@ -464,6 +465,92 @@
     # End unit conversion methods
     #
 
+    def write_hdf5(self, filename, dataset_name=None, info=None):
+        r"""Writes ImageArray to hdf5 file.
+
+        Parameters
+        ----------
+        filename: string
+            The filename to create and write a dataset to
+
+        dataset_name: string
+            The name of the dataset to create in the file.
+
+        info: dictionary
+            A dictionary of supplementary info to write to append as attributes
+            to the dataset.
+
+        Examples
+        --------
+        >>> a = YTArray([1,2,3], 'cm')
+
+        >>> myinfo = {'field':'dinosaurs', 'type':'field_data'}
+
+        >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
+        ...              info=myinfo)
+
+        """
+        import h5py
+        from yt.extern.six.moves import cPickle as pickle
+        if info is None:
+            info = {}
+
+        info['units'] = str(self.units)
+        info['unit_registry'] = pickle.dumps(self.units.registry.lut)
+
+        if dataset_name is None:
+            dataset_name = 'array_data'
+
+        f = h5py.File(filename)
+        if dataset_name in f.keys():
+            d = f[dataset_name]
+            # Overwrite without deleting if we can get away with it.
+            if d.shape == self.shape and d.dtype == self.dtype:
+                d[:] = self
+                for k in d.attrs.keys():
+                    del d.attrs[k]
+            else:
+                del f[dataset_name]
+                d = f.create_dataset(dataset_name, data=self)
+        else:
+            d = f.create_dataset(dataset_name, data=self)
+
+        for k, v in info.iteritems():
+            d.attrs.create(k, v)
+        f.close()
+
+    @classmethod
+    def from_hdf5(cls, filename, dataset_name=None):
+        r"""Attempts read in and convert a dataset in an hdf5 file into a YTArray.
+
+        Parameters
+        ----------
+        filename: string
+        The filename to of the hdf5 file.
+
+        dataset_name: string
+            The name of the dataset to read from.  If the dataset has a units
+            attribute, attempt to infer units as well.
+
+        """
+        import h5py
+        from yt.extern.six.moves import cPickle as pickle
+
+        if dataset_name is None:
+            dataset_name = 'array_data'
+
+        f = h5py.File(filename)
+        dataset = f[dataset_name]
+        data = dataset[:]
+        units = dataset.attrs.get('units', '')
+        if 'unit_registry' in dataset.attrs.keys():
+            unit_lut = pickle.loads(dataset.attrs['unit_registry'])
+        else:
+            unit_lut = None
+
+        registry = UnitRegistry(lut=unit_lut, add_default_symbols=False)
+        return cls(data, units, registry=registry)
+
     #
     # Start convenience methods
     #
@@ -766,7 +853,7 @@
 
     @return_arr
     def prod(self, axis=None, dtype=None, out=None):
-        if axis:
+        if axis is not None:
             units = self.units**self.shape[axis]
         else:
             units = self.units**self.size
@@ -814,9 +901,13 @@
             # Raise YTUnitOperationError up here since we know the context now
             except RuntimeError:
                 raise YTUnitOperationError(context[0], u)
+            ret_class = type(self)
         elif context[0] in binary_operators:
             unit1 = getattr(context[1][0], 'units', None)
             unit2 = getattr(context[1][1], 'units', None)
+            cls1 = type(context[1][0])
+            cls2 = type(context[1][1])
+            ret_class = get_binary_op_return_class(cls1, cls2)
             if unit1 is None:
                 unit1 = Unit(registry=getattr(unit2, 'registry', None))
             if unit2 is None and context[0] is not power:
@@ -849,10 +940,15 @@
             out_arr = np.array(out_arr)
             return out_arr
         out_arr.units = unit
-        if out_arr.size > 1:
-            return YTArray(np.array(out_arr), unit)
+        if out_arr.size == 1:
+            return YTQuantity(np.array(out_arr), unit)
         else:
-            return YTQuantity(np.array(out_arr), unit)
+            if ret_class is YTQuantity:
+                # This happens if you do ndarray * YTQuantity. Explicitly
+                # casting to YTArray avoids creating a YTQuantity with size > 1
+                return YTArray(np.array(out_arr, unit))
+            return ret_class(np.array(out_arr), unit)
+
 
     def __reduce__(self):
         """Pickle reduction method
@@ -929,3 +1025,22 @@
         return data.pf.arr(x, units)
     else:
         return data.pf.quan(x, units)
+
+def get_binary_op_return_class(cls1, cls2):
+    if cls1 is cls2:
+        return cls1
+    if cls1 is np.ndarray or issubclass(cls1, numeric_type):
+        return cls2
+    if cls2 is np.ndarray or issubclass(cls2, numeric_type):
+        return cls1
+    if issubclass(cls1, YTQuantity):
+        return cls2
+    if issubclass(cls2, YTQuantity):
+        return cls1
+    if issubclass(cls1, cls2):
+        return cls1
+    if issubclass(cls2, cls1):
+        return cls2
+    else:
+        raise RuntimeError("Operations are only defined on pairs of objects"
+                           "in which one is a subclass of the other")


https://bitbucket.org/yt_analysis/yt/commits/6367f4780571/
Changeset:   6367f4780571
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-10 01:07:41
Summary:     Attempting to name contours after friendliest particle.
Affected #:  1 file

diff -r d1f4551ba70ed874f0f272fc5c58e9c262e56f7a -r 6367f47805712fff6d85636d85ee66de59c90ce4 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -765,17 +765,8 @@
                 offset = pind[doff[i] + j]
                 c1 = container[offset]
                 c0 = contour_find(c1)
-                contour_ids[offset] = c0.contour_id
-                c0.count += 1
-        for i in range(doff.shape[0]):
-            if doff[i] < 0: continue
-            for j in range(pcount[i]):
-                offset = pind[doff[i] + j]
-                c1 = container[offset]
-                if c1 == NULL: continue
-                c0 = contour_find(c1)
-                if c0.count < self.minimum_count:
-                    contour_ids[offset] = -1
+                # Set to the ID of the friendliest particle.
+                contour_ids[offset] = particle_ids[pind[c0.contour_id]]
         free(container)
         del pind
         # We can now remake our contour IDs, count the number of them, and
@@ -806,7 +797,7 @@
         # Note that pind0 will not monotonically increase, but 
         c0 = container[pind0]
         if c0 == NULL:
-            c0 = container[pind0] = contour_create(pind0, self.last)
+            c0 = container[pind0] = contour_create(poffset, self.last)
             self.last = c0
             if self.first == NULL:
                 self.first = c0


https://bitbucket.org/yt_analysis/yt/commits/995b50f1d233/
Changeset:   995b50f1d233
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-10 16:37:21
Summary:     Adding period and nMembers to RunFOF arguments.
Affected #:  1 file

diff -r 6367f47805712fff6d85636d85ee66de59c90ce4 -r 995b50f1d2332d1ddb24de9ee198ce6f01ee3ef8 yt/analysis_modules/halo_finding/fof/EnzoFOF.c
--- a/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
+++ b/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
@@ -32,11 +32,15 @@
     PyArrayObject    *xpos, *ypos, *zpos;
     xpos=ypos=zpos=NULL;
     float link = 0.2;
+    float fPeriod[3] = {1.0, 1.0, 1.0};
+	int nMembers = 8;
 
     int i;
 
-    if (!PyArg_ParseTuple(args, "OOO|f",
-        &oxpos, &oypos, &ozpos, &link))
+    if (!PyArg_ParseTuple(args, "OOO|f(fff)i",
+        &oxpos, &oypos, &ozpos, &link,
+        &fPeriod[0], &fPeriod[1], &fPeriod[2],
+        &nMembers))
     return PyErr_Format(_FOFerror,
             "EnzoFOF: Invalid parameters.");
 
@@ -74,8 +78,8 @@
 
 	KDFOF kd;
 	int nBucket,j;
-	float fPeriod[3],fEps;
-	int nMembers,nGroup,bVerbose=1;
+	float fEps;
+	int nGroup,bVerbose=1;
 	int sec,usec;
 	
 	/* linking length */
@@ -83,9 +87,6 @@
 	fEps = link;
 	
 	nBucket = 16;
-	nMembers = 8;
-
-	for (j=0;j<3;++j) fPeriod[j] = 1.0;
 
     /* initialize the kd FOF structure */
 


https://bitbucket.org/yt_analysis/yt/commits/eeb2352baab8/
Changeset:   eeb2352baab8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-10 16:54:29
Summary:     Enable minimum count.
Affected #:  1 file

diff -r 995b50f1d2332d1ddb24de9ee198ce6f01ee3ef8 -r eeb2352baab8c5636814f53ecdec72cff5fbef5a yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -765,8 +765,9 @@
                 offset = pind[doff[i] + j]
                 c1 = container[offset]
                 c0 = contour_find(c1)
-                # Set to the ID of the friendliest particle.
-                contour_ids[offset] = particle_ids[pind[c0.contour_id]]
+                if c0.count >= self.minimum_count:
+                    # Set to the ID of the friendliest particle.
+                    contour_ids[offset] = particle_ids[pind[c0.contour_id]]
         free(container)
         del pind
         # We can now remake our contour IDs, count the number of them, and


https://bitbucket.org/yt_analysis/yt/commits/01a26e2dbf4f/
Changeset:   01a26e2dbf4f
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-10 18:54:43
Summary:     Make our FOF tags work again.
Affected #:  1 file

diff -r eeb2352baab8c5636814f53ecdec72cff5fbef5a -r 01a26e2dbf4f4dbd78242b34b93d3959f0c5210b yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -59,13 +59,19 @@
     # root.
     while node.parent != NULL:
         temp = node.parent
+        root.count += node.count
+        node.count = 0
         node.parent = root
         node = temp
     return root
 
 cdef inline void contour_union(ContourID *node1, ContourID *node2):
+    if node1 == node2:
+        return
     node1 = contour_find(node1)
     node2 = contour_find(node2)
+    if node1 == node2:
+        return
     cdef ContourID *pri, *sec
     if node1.count > node2.count:
         pri = node1
@@ -758,24 +764,18 @@
         cdef np.ndarray[np.int64_t, ndim=1] contour_ids
         contour_ids = np.ones(positions.shape[0], dtype="int64")
         contour_ids *= -1
-        # Sort on our particle IDs.
-        for i in range(doff.shape[0]):
-            if doff[i] < 0: continue
-            for j in range(pcount[i]):
-                offset = pind[doff[i] + j]
-                c1 = container[offset]
-                c0 = contour_find(c1)
-                if c0.count >= self.minimum_count:
-                    # Set to the ID of the friendliest particle.
-                    contour_ids[offset] = particle_ids[pind[c0.contour_id]]
+        # Perform one last contour_find on each.  Note that we no longer need
+        # to look at any of the doff or internal offset stuff.
+        for i in range(positions.shape[0]):
+            if container[i] == NULL: continue
+            container[i] = contour_find(container[i])
+        for i in range(positions.shape[0]):
+            if container[i] == NULL: continue
+            c0 = container[i]
+            if c0.count < self.minimum_count: continue
+            contour_ids[i] = particle_ids[pind[c0.contour_id]]
         free(container)
         del pind
-        # We can now remake our contour IDs, count the number of them, and
-        # reassign.
-        cdef np.ndarray[np.int64_t, ndim=1] ufof_tags = np.unique(contour_ids)
-        cdef np.int64_t nfof_tags = ufof_tags.size
-        # This is, at most, how many tags we'll have.  Now we just need to
-        # assign to them.
         return contour_ids
 
     @cython.cdivision(True)
@@ -827,8 +827,8 @@
                                 self.linking_length2, edges)
             if link == 0: continue
             if c1 == NULL:
+                c0.count += 1
                 container[pind1] = c0
-                c0.count += 1
             elif c0.contour_id != c1.contour_id:
                 contour_union(c0, c1)
                 c0 = container[pind1] = container[pind0] = contour_find(c0)


https://bitbucket.org/yt_analysis/yt/commits/220e0757d55f/
Changeset:   220e0757d55f
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-12 21:26:37
Summary:     Micro-optimizations for 2x speedup in sphere selection.
Affected #:  2 files

diff -r 80b6a3399f45da6e13828a947813cbe0e057f63d -r 220e0757d55f8285eba7819112c20561a1f35867 yt/fields/field_functions.py
--- a/yt/fields/field_functions.py
+++ b/yt/fields/field_functions.py
@@ -34,7 +34,7 @@
             np.subtract(r, DW[i], rdw)
             np.abs(rdw, rdw)
             np.minimum(r, rdw, r)
-        np.power(r, 2.0, r)
+        np.multiply(r, r, r)
         np.add(radius2, r, radius2)
         if data.pf.dimensionality < i+1:
             break

diff -r 80b6a3399f45da6e13828a947813cbe0e057f63d -r 220e0757d55f8285eba7819112c20561a1f35867 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -592,16 +592,25 @@
             left_edge[1] <= self.center[1] <= right_edge[1] and
             left_edge[2] <= self.center[2] <= right_edge[2]):
             return 1
+        if right_edge[0] < self.bbox[0][0] or \
+           right_edge[1] < self.bbox[1][0] or \
+           right_edge[2] < self.bbox[2][0]:
+            return 0
+        if left_edge[0] > self.bbox[0][1] or \
+           left_edge[1] > self.bbox[1][1] or \
+           left_edge[2] > self.bbox[2][1]:
+            return 0
         # http://www.gamedev.net/topic/335465-is-this-the-simplest-sphere-aabb-collision-test/
         dist = 0
         for i in range(3):
+            # Early terminate
             box_center = (right_edge[i] + left_edge[i])/2.0
             relcenter = self.difference(box_center, self.center[i], i)
             edge = right_edge[i] - left_edge[i]
             closest = relcenter - fclip(relcenter, -edge/2.0, edge/2.0)
             dist += closest*closest
-        if dist <= self.radius2: return 1
-        return 0
+            if dist > self.radius2: return 0
+        return 1
 
     def _hash_vals(self):
         return (self.radius, self.radius2,


https://bitbucket.org/yt_analysis/yt/commits/5ab289b26c4e/
Changeset:   5ab289b26c4e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-12 21:58:22
Summary:     Fixing some partial commits.
Affected #:  1 file

diff -r 220e0757d55f8285eba7819112c20561a1f35867 -r 5ab289b26c4e6a8ee89d9b0e418ab3f863b4a4bf yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -336,9 +336,9 @@
         # is too.
         cdef np.float64_t rel = x1 - x2
         if self.periodicity[d] :
-            if rel > self.domain_width[d]/2.0 :
+            if rel > self.domain_width[d] * 0.5:
                 rel -= self.domain_width[d]
-            elif rel < -self.domain_width[d]/2.0 :
+            elif rel < -self.domain_width[d] * 0.5:
                 rel += self.domain_width[d]
         return rel
 
@@ -538,12 +538,16 @@
     cdef np.float64_t radius
     cdef np.float64_t radius2
     cdef np.float64_t center[3]
+    cdef np.float64_t bbox[3][2]
 
     def __init__(self, dobj):
-        for i in range(3):
-            self.center[i] = dobj.center[i]
         self.radius = _ensure_code(dobj.radius)
         self.radius2 = self.radius * self.radius
+        center = _ensure_code(dobj.center)
+        for i in range(3):
+            self.center[i] = center[i]
+            self.bbox[i][0] = self.center[i] - self.radius
+            self.bbox[i][1] = self.center[i] + self.radius
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -563,10 +567,12 @@
         cdef int i
         cdef np.float64_t dist, dist2 = 0
         for i in range(3):
+            if pos[i] < self.bbox[i][0] or pos[i] > self.bbox[i][1]:
+                return 0
             dist = self.difference(pos[i], self.center[i], i)
             dist2 += dist*dist
-        if dist2 <= self.radius2: return 1
-        return 0
+            if dist2 > self.radius2: return 0
+        return 1
    
     @cython.boundscheck(False)
     @cython.wraparound(False)


https://bitbucket.org/yt_analysis/yt/commits/a32018489046/
Changeset:   a32018489046
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-13 02:51:14
Summary:     Make box checks not function under periodicity.
Affected #:  1 file

diff -r 5ab289b26c4e6a8ee89d9b0e418ab3f863b4a4bf -r a32018489046fe837d6905c1a4b9fa1d92ceccaf yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -539,15 +539,24 @@
     cdef np.float64_t radius2
     cdef np.float64_t center[3]
     cdef np.float64_t bbox[3][2]
+    cdef bint check_box[3]
 
     def __init__(self, dobj):
         self.radius = _ensure_code(dobj.radius)
         self.radius2 = self.radius * self.radius
         center = _ensure_code(dobj.center)
+        cdef np.float64_t mi = np.finfo("float64").min
+        cdef np.float64_t ma = np.finfo("float64").max
         for i in range(3):
             self.center[i] = center[i]
             self.bbox[i][0] = self.center[i] - self.radius
             self.bbox[i][1] = self.center[i] + self.radius
+            if self.bbox[i][0] < dobj.pf.domain_left_edge[i]:
+                self.check_box[i] = False
+            elif self.bbox[i][1] > dobj.pf.domain_right_edge[i]:
+                self.check_box[i] = False
+            else:
+                self.check_box[i] = True
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -568,7 +577,7 @@
         cdef np.float64_t dist, dist2 = 0
         for i in range(3):
             if pos[i] < self.bbox[i][0] or pos[i] > self.bbox[i][1]:
-                return 0
+                if self.check_box[i]: return 0
             dist = self.difference(pos[i], self.center[i], i)
             dist2 += dist*dist
             if dist2 > self.radius2: return 0
@@ -598,14 +607,11 @@
             left_edge[1] <= self.center[1] <= right_edge[1] and
             left_edge[2] <= self.center[2] <= right_edge[2]):
             return 1
-        if right_edge[0] < self.bbox[0][0] or \
-           right_edge[1] < self.bbox[1][0] or \
-           right_edge[2] < self.bbox[2][0]:
-            return 0
-        if left_edge[0] > self.bbox[0][1] or \
-           left_edge[1] > self.bbox[1][1] or \
-           left_edge[2] > self.bbox[2][1]:
-            return 0
+        for i in range(3):
+            if not self.check_box[i]: continue
+            if right_edge[i] < self.bbox[i][0] or \
+               left_edge[i] > self.bbox[i][1]:
+                return 0
         # http://www.gamedev.net/topic/335465-is-this-the-simplest-sphere-aabb-collision-test/
         dist = 0
         for i in range(3):


https://bitbucket.org/yt_analysis/yt/commits/e773029e60d5/
Changeset:   e773029e60d5
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-12 18:39:10
Summary:     Merging from mainline
Affected #:  16 files

diff -r 01a26e2dbf4f4dbd78242b34b93d3959f0c5210b -r e773029e60d5faded262db6946ad5cdf6d2e8e3a doc/source/cookbook/aligned_cutting_plane.py
--- a/doc/source/cookbook/aligned_cutting_plane.py
+++ b/doc/source/cookbook/aligned_cutting_plane.py
@@ -3,10 +3,10 @@
 # Load the dataset.
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
-# Create a 1 kpc radius sphere, centered on the max density.  Note that this
-# sphere is very small compared to the size of our final plot, and it has a
-# non-axially aligned L vector.
-sp = ds.sphere("center", (15.0, "kpc"))
+# Create a 1 kpc radius sphere, centered on the maximum gas density.  Note
+# that this sphere is very small compared to the size of our final plot,
+# and it has a non-axially aligned L vector.
+sp = ds.sphere("m", (1.0, "kpc"))
 
 # Get the angular momentum vector for the sphere.
 L = sp.quantities.angular_momentum_vector()
@@ -14,5 +14,5 @@
 print "Angular momentum vector: {0}".format(L)
 
 # Create an OffAxisSlicePlot on the object with the L vector as its normal
-p = yt.OffAxisSlicePlot(ds, L, "density", sp.center, (25, "kpc"))
+p = yt.OffAxisSlicePlot(ds, L, "density", sp.center, (15, "kpc"))
 p.save()

diff -r 01a26e2dbf4f4dbd78242b34b93d3959f0c5210b -r e773029e60d5faded262db6946ad5cdf6d2e8e3a doc/source/cookbook/fits_radio_cubes.ipynb
--- a/doc/source/cookbook/fits_radio_cubes.ipynb
+++ b/doc/source/cookbook/fits_radio_cubes.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:2f774139560d94508c2c51b70930d46941d9ceef7228655de32a69634f6c6d83"
+  "signature": "sha256:dbc41f6f836cdeb88a549d85e389d6e4e43d163d8c4c267baea8cce0ebdbf441"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -45,7 +45,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = yt.load(\"radio_fits/m33_hi.fits\", nan_mask=0.0)"
+      "ds = yt.load(\"radio_fits/m33_hi.fits\", nan_mask=0.0, z_axis_decomp=True)"
      ],
      "language": "python",
      "metadata": {},
@@ -179,6 +179,31 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
+      "We can also make a projection of all the emission along the line of sight:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"intensity\"], origin=\"native\", proj_style=\"sum\")\n",
+      "prj.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Since we're not doing an integration along a path length, we needed to specify `proj_style = \"sum\"`. "
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
       "We can also look at the slices perpendicular to the other axes, which will show us the structure along the velocity axis:"
      ]
     },

diff -r 01a26e2dbf4f4dbd78242b34b93d3959f0c5210b -r e773029e60d5faded262db6946ad5cdf6d2e8e3a yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -66,13 +66,13 @@
         if isinstance(outputs, DatasetSeries):
             self.data_series = outputs
         else:
-            self.data_series = DatasetSeries.from_filenames(outputs)
+            self.data_series = DatasetSeries(outputs)
         self.masks = []
         self.sorts = []
         self.array_indices = []
         self.indices = indices
         self.num_indices = len(indices)
-        self.num_steps = len(filenames)
+        self.num_steps = len(outputs)
         self.times = []
 
         # Default fields 

diff -r 01a26e2dbf4f4dbd78242b34b93d3959f0c5210b -r e773029e60d5faded262db6946ad5cdf6d2e8e3a yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -317,7 +317,7 @@
             finfo = self.pf._get_field_info(*field)
             mylog.debug("Setting field %s", field)
             units = finfo.units
-            if self.weight_field is None:
+            if self.weight_field is None and not self._sum_only:
                 # See _handle_chunk where we mandate cm
                 if units == '':
                     input_units = "cm"
@@ -329,7 +329,7 @@
             self[field] = YTArray(field_data[fi].ravel(),
                                   input_units=input_units,
                                   registry=self.pf.unit_registry)
-            if self.weight_field is None:
+            if self.weight_field is None and not self._sum_only:
                 u_obj = Unit(units, registry=self.pf.unit_registry)
                 if u_obj.is_code_unit and input_units != units \
                     or self.pf.no_cgs_equiv_length:

diff -r 01a26e2dbf4f4dbd78242b34b93d3959f0c5210b -r e773029e60d5faded262db6946ad5cdf6d2e8e3a yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -139,12 +139,14 @@
             return
         elif isinstance(center, (types.ListType, types.TupleType, np.ndarray)):
             center = self.pf.arr(center, 'code_length')
-        elif center in ("c", "center"):
-            center = self.pf.domain_center
-        elif center == ("max"): # is this dangerous for race conditions?
-            center = self.pf.h.find_max("density")[1]
-        elif center.startswith("max_"):
-            center = self.pf.h.find_max(center[4:])[1]
+        elif isinstance(center, basestring):
+            if center.lower() in ("c", "center"):
+                center = self.pf.domain_center
+             # is this dangerous for race conditions?
+            elif center.lower() in ("max", "m"):
+                center = self.pf.h.find_max(("gas", "density"))[1]
+            elif center.startswith("max_"):
+                center = self.pf.h.find_max(center[4:])[1]
         else:
             center = np.array(center, dtype='float64')
         self.center = self.pf.arr(center, 'code_length')

diff -r 01a26e2dbf4f4dbd78242b34b93d3959f0c5210b -r e773029e60d5faded262db6946ad5cdf6d2e8e3a yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -447,12 +447,12 @@
         >>> write_image(np.log10(frb["Density"]), 'density_1pc.png')
         """
         if iterable(width):
-            assert_valid_width_tuple(width)
+            validate_width_tuple(width)
             width = self.pf.quan(width[0], width[1])
         if height is None:
             height = width
         elif iterable(height):
-            assert_valid_width_tuple(height)
+            validate_width_tuple(height)
             height = self.pf.quan(height[0], height[1])
         if not iterable(resolution):
             resolution = (resolution, resolution)

diff -r 01a26e2dbf4f4dbd78242b34b93d3959f0c5210b -r e773029e60d5faded262db6946ad5cdf6d2e8e3a yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -17,6 +17,7 @@
 import weakref
 import warnings
 import re
+import uuid
 
 from yt.config import ytcfg
 from yt.funcs import *
@@ -200,37 +201,49 @@
             self.parameter_file.field_units[k] = self.parameter_file.field_units[primary_fname]
 
     def _count_grids(self):
-        self.num_grids = self.pf.nprocs
+        self.num_grids = self.pf.parameters["nprocs"]
 
     def _parse_index(self):
         f = self._handle # shortcut
         pf = self.parameter_file # shortcut
 
         # If nprocs > 1, decompose the domain into virtual grids
-        if pf.nprocs > 1:
-            bbox = np.array([[le,re] for le, re in zip(pf.domain_left_edge,
-                                                       pf.domain_right_edge)])
-            dims = np.array(pf.domain_dimensions)
-            # If we are creating a dataset of lines, only decompose along the position axes
-            if len(pf.line_database) > 0:
-                dims[pf.vel_axis] = 1
-            psize = get_psize(dims, pf.nprocs)
-            gle, gre, shapes, slices = decompose_array(dims, psize, bbox)
-            self.grid_left_edge = self.pf.arr(gle, "code_length")
-            self.grid_right_edge = self.pf.arr(gre, "code_length")
-            self.grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
-            # If we are creating a dataset of lines, only decompose along the position axes
-            if len(pf.line_database) > 0:
-                self.grid_left_edge[:,pf.vel_axis] = pf.domain_left_edge[pf.vel_axis]
-                self.grid_right_edge[:,pf.vel_axis] = pf.domain_right_edge[pf.vel_axis]
-                self.grid_dimensions[:,pf.vel_axis] = pf.domain_dimensions[pf.vel_axis]
-
+        if self.num_grids > 1:
+            if self.pf.z_axis_decomp:
+                dz = (pf.domain_width/pf.domain_dimensions)[2]
+                self.grid_dimensions[:,2] = np.around(float(pf.domain_dimensions[2])/
+                                                            self.num_grids).astype("int")
+                self.grid_dimensions[-1,2] += (pf.domain_dimensions[2] % self.num_grids)
+                self.grid_left_edge[0,2] = pf.domain_left_edge[2]
+                self.grid_left_edge[1:,2] = pf.domain_left_edge[2] + \
+                                            np.cumsum(self.grid_dimensions[:-1,2])*dz
+                self.grid_right_edge[:,2] = self.grid_left_edge[:,2]+self.grid_dimensions[:,2]*dz
+                self.grid_left_edge[:,:2] = pf.domain_left_edge[:2]
+                self.grid_right_edge[:,:2] = pf.domain_right_edge[:2]
+                self.grid_dimensions[:,:2] = pf.domain_dimensions[:2]
+            else:
+                bbox = np.array([[le,re] for le, re in zip(pf.domain_left_edge,
+                                                           pf.domain_right_edge)])
+                dims = np.array(pf.domain_dimensions)
+                # If we are creating a dataset of lines, only decompose along the position axes
+                if len(pf.line_database) > 0:
+                    dims[pf.vel_axis] = 1
+                psize = get_psize(dims, self.num_grids)
+                gle, gre, shapes, slices = decompose_array(dims, psize, bbox)
+                self.grid_left_edge = self.pf.arr(gle, "code_length")
+                self.grid_right_edge = self.pf.arr(gre, "code_length")
+                self.grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
+                # If we are creating a dataset of lines, only decompose along the position axes
+                if len(pf.line_database) > 0:
+                    self.grid_left_edge[:,pf.vel_axis] = pf.domain_left_edge[pf.vel_axis]
+                    self.grid_right_edge[:,pf.vel_axis] = pf.domain_right_edge[pf.vel_axis]
+                    self.grid_dimensions[:,pf.vel_axis] = pf.domain_dimensions[pf.vel_axis]
         else:
             self.grid_left_edge[0,:] = pf.domain_left_edge
             self.grid_right_edge[0,:] = pf.domain_right_edge
             self.grid_dimensions[0] = pf.domain_dimensions
 
-        if self.pf.events_data:
+        if pf.events_data:
             try:
                 self.grid_particle_count[:] = pf.primary_header["naxis2"]
             except KeyError:
@@ -290,6 +303,7 @@
                  nprocs = None,
                  storage_filename = None,
                  nan_mask = None,
+                 z_axis_decomp = False,
                  line_database = None,
                  line_width = None,
                  suppress_astropy_warnings = True,
@@ -297,8 +311,11 @@
 
         if parameters is None:
             parameters = {}
+        parameters["nprocs"] = nprocs
         self.specified_parameters = parameters
 
+        self.z_axis_decomp = z_axis_decomp
+
         if line_width is not None:
             self.line_width = YTQuantity(line_width[0], line_width[1])
             self.line_units = line_width[1]
@@ -322,11 +339,15 @@
             self.nan_mask = {"all":nan_mask}
         elif isinstance(nan_mask, dict):
             self.nan_mask = nan_mask
-        self.nprocs = nprocs
-        self._handle = _astropy.pyfits.open(self.filenames[0],
-                                      memmap=True,
-                                      do_not_scale_image_data=True,
-                                      ignore_blank=True)
+        if isinstance(self.filenames[0], _astropy.pyfits.PrimaryHDU):
+            self._handle = _astropy.pyfits.HDUList(self.filenames[0])
+            fn = "InMemoryFITSImage_%s" % (uuid.uuid4().hex)
+        else:
+            self._handle = _astropy.pyfits.open(self.filenames[0],
+                                                memmap=True,
+                                                do_not_scale_image_data=True,
+                                                ignore_blank=True)
+            fn = self.filenames[0]
         self._fits_files = [self._handle]
         if self.num_files > 1:
             for fits_file in auxiliary_files:
@@ -387,7 +408,7 @@
 
         self.refine_by = 2
 
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, fn, dataset_type)
         self.storage_filename = storage_filename
 
     def _set_code_unit_attributes(self):
@@ -435,8 +456,11 @@
 
     def _parse_parameter_file(self):
 
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        if self.parameter_filename.startswith("InMemory"):
+            self.unique_identifier = time.time()
+        else:
+            self.unique_identifier = \
+                int(os.stat(self.parameter_filename)[stat.ST_CTIME])
 
         # Determine dimensionality
 
@@ -472,14 +496,26 @@
         self.current_redshift = self.omega_lambda = self.omega_matter = \
             self.hubble_constant = self.cosmological_simulation = 0.0
 
-        # If this is a 2D events file, no need to decompose
-        if self.events_data: self.nprocs = 1
+        if self.dimensionality == 2 and self.z_axis_decomp:
+            mylog.warning("You asked to decompose along the z-axis, but this is a 2D dataset. " +
+                          "Ignoring.")
+            self.z_axis_decomp = False
+
+        if self.events_data: self.specified_parameters["nprocs"] = 1
 
         # If nprocs is None, do some automatic decomposition of the domain
-        if self.nprocs is None:
-            self.nprocs = np.around(np.prod(self.domain_dimensions) /
-                                    32**self.dimensionality).astype("int")
-            self.nprocs = max(min(self.nprocs, 512), 1)
+        if self.specified_parameters["nprocs"] is None:
+            if len(self.line_database) > 0:
+                dims = 2
+            else:
+                dims = self.dimensionality
+            if self.z_axis_decomp:
+                nprocs = np.around(self.domain_dimensions[2]/8).astype("int")
+            else:
+                nprocs = np.around(np.prod(self.domain_dimensions)/32**dims).astype("int")
+            self.parameters["nprocs"] = max(min(nprocs, 512), 1)
+        else:
+            self.parameters["nprocs"] = self.specified_parameters["nprocs"]
 
         self.reversed = False
 

diff -r 01a26e2dbf4f4dbd78242b34b93d3959f0c5210b -r e773029e60d5faded262db6946ad5cdf6d2e8e3a yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -88,7 +88,7 @@
             for chunk in chunks:
                 for g in chunk.objs:
                     start = ((g.LeftEdge-self.pf.domain_left_edge)/dx).to_ndarray().astype("int")
-                    end = ((g.RightEdge-self.pf.domain_left_edge)/dx).to_ndarray().astype("int")
+                    end = start + g.ActiveDimensions
                     if self.line_db is not None and fname in self.line_db:
                         my_off = self.line_db.get(fname).in_units(self.pf.vel_unit).value
                         my_off = my_off - 0.5*self.pf.line_width

diff -r 01a26e2dbf4f4dbd78242b34b93d3959f0c5210b -r e773029e60d5faded262db6946ad5cdf6d2e8e3a yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -660,17 +660,14 @@
     if not os.path.exists(path):
         only_on_root(os.makedirs, path)
     return path
-        
-def assert_valid_width_tuple(width):
-    try:
-        assert iterable(width) and len(width) == 2, \
-            "width (%s) is not a two element tuple" % width
-        valid = isinstance(width[0], numeric_type) and isinstance(width[1], str)
+
+def validate_width_tuple(width):
+    if not iterable(width) or len(width) != 2:
+        raise YTInvalidWidthError("width (%s) is not a two element tuple" % width)
+    if not isinstance(width[0], numeric_type) and isinstance(width[1], basestring):
         msg = "width (%s) is invalid. " % str(width)
         msg += "Valid widths look like this: (12, 'au')"
-        assert valid, msg
-    except AssertionError, e:
-        raise YTInvalidWidthError(e)
+        raise YTInvalidWidthError(msg)
 
 def camelcase_to_underscore(name):
     s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)

diff -r 01a26e2dbf4f4dbd78242b34b93d3959f0c5210b -r e773029e60d5faded262db6946ad5cdf6d2e8e3a yt/geometry/ppv_coordinates.py
--- a/yt/geometry/ppv_coordinates.py
+++ b/yt/geometry/ppv_coordinates.py
@@ -25,8 +25,6 @@
 
         self.axis_name = {}
         self.axis_id = {}
-        self.x_axis = {}
-        self.y_axis = {}
 
         for axis, axis_name in zip([pf.lon_axis, pf.lat_axis, pf.vel_axis],
                                    ["Image\ x", "Image\ y", pf.vel_name]):
@@ -42,28 +40,6 @@
             self.axis_id[axis] = axis
             self.axis_id[axis_name] = axis
 
-            if axis == 0:
-                self.x_axis[axis] = 1
-                self.x_axis[lower_ax] = 1
-                self.x_axis[axis_name] = 1
-                self.y_axis[axis] = 2
-                self.y_axis[lower_ax] = 2
-                self.y_axis[axis_name] = 2
-            elif axis == 1:
-                self.x_axis[axis] = 2
-                self.x_axis[lower_ax] = 2
-                self.x_axis[axis_name] = 2
-                self.y_axis[axis] = 0
-                self.y_axis[lower_ax] = 0
-                self.y_axis[axis_name] = 0
-            elif axis == 2:
-                self.x_axis[axis] = 0
-                self.x_axis[lower_ax] = 0
-                self.x_axis[axis_name] = 0
-                self.y_axis[axis] = 1
-                self.y_axis[lower_ax] = 1
-                self.y_axis[axis_name] = 1
-
         self.default_unit_label = {}
         self.default_unit_label[pf.lon_axis] = "pixel"
         self.default_unit_label[pf.lat_axis] = "pixel"
@@ -75,3 +51,8 @@
     def convert_from_cylindrical(self, coord):
         raise NotImplementedError
 
+    x_axis = { 'x' : 1, 'y' : 0, 'z' : 0,
+                0  : 1,  1  : 0,  2  : 0}
+
+    y_axis = { 'x' : 2, 'y' : 2, 'z' : 1,
+                0  : 2,  1  : 2,  2  : 1}

diff -r 01a26e2dbf4f4dbd78242b34b93d3959f0c5210b -r e773029e60d5faded262db6946ad5cdf6d2e8e3a yt/utilities/lib/ragged_arrays.pyx
--- /dev/null
+++ b/yt/utilities/lib/ragged_arrays.pyx
@@ -0,0 +1,97 @@
+"""
+Some simple operations for operating on ragged arrays
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+cimport cython
+
+cdef fused numpy_dt:
+    np.float32_t
+    np.float64_t
+    np.int32_t
+    np.int64_t
+
+cdef numpy_dt r_min(numpy_dt a, numpy_dt b):
+    if a < b: return a
+    return b
+
+cdef numpy_dt r_max(numpy_dt a, numpy_dt b):
+    if a > b: return a
+    return b
+
+cdef numpy_dt r_add(numpy_dt a, numpy_dt b):
+    return a + b
+
+cdef numpy_dt r_subtract(numpy_dt a, numpy_dt b):
+    return a - b
+
+cdef numpy_dt r_multiply(numpy_dt a, numpy_dt b):
+    return a * b
+
+ at cython.cdivision(True)
+cdef numpy_dt r_divide(numpy_dt a, numpy_dt b):
+    return a / b
+
+def index_unop(np.ndarray[numpy_dt, ndim=1] values,
+              np.ndarray[np.int64_t, ndim=1] indices,
+              np.ndarray[np.int64_t, ndim=1] sizes,
+              operation):
+    cdef numpy_dt mi, ma
+    if numpy_dt == np.float32_t:
+        dt = "float32"
+        mi = np.finfo(dt).min
+        ma = np.finfo(dt).max
+    elif numpy_dt == np.float64_t:
+        dt = "float64"
+        mi = np.finfo(dt).min
+        ma = np.finfo(dt).max
+    elif numpy_dt == np.int32_t:
+        dt = "int32"
+        mi = np.iinfo(dt).min
+        ma = np.iinfo(dt).max
+    elif numpy_dt == np.int64_t:
+        dt = "int64"
+        mi = np.iinfo(dt).min
+        ma = np.iinfo(dt).max
+    cdef np.ndarray[numpy_dt] out_values = np.zeros(sizes.size, dtype=dt)
+    cdef numpy_dt (*func)(numpy_dt a, numpy_dt b)
+    # Now we figure out our function.  At present, we only allow addition and
+    # multiplication, because they are commutative and easy to bootstrap.
+    cdef numpy_dt ival, val
+    if operation == "sum":
+        ival = 0
+        func = r_add
+    elif operation == "prod":
+        ival = 1
+        func = r_multiply
+    elif operation == "max":
+        ival = mi
+        func = r_max
+    elif operation == "min":
+        ival = ma
+        func = r_min
+    else:
+        raise NotImplementedError
+    cdef np.int64_t i, j, ind_ind, ind_arr
+    ind_ind = 0
+    for i in range(sizes.size):
+        # Each entry in sizes is the size of the array
+        val = ival
+        for j in range(sizes[i]):
+            ind_arr = indices[ind_ind]
+            val = func(val, values[ind_arr])
+            ind_ind += 1
+        out_values[i] = val
+    return out_values

diff -r 01a26e2dbf4f4dbd78242b34b93d3959f0c5210b -r e773029e60d5faded262db6946ad5cdf6d2e8e3a yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -139,6 +139,8 @@
           )
     config.add_extension("write_array",
                          ["yt/utilities/lib/write_array.pyx"])
+    config.add_extension("ragged_arrays",
+                         ["yt/utilities/lib/ragged_arrays.pyx"])
     config.add_extension("GridTree", 
     ["yt/utilities/lib/GridTree.pyx"],
         libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])

diff -r 01a26e2dbf4f4dbd78242b34b93d3959f0c5210b -r e773029e60d5faded262db6946ad5cdf6d2e8e3a yt/utilities/lib/tests/test_ragged_arrays.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_ragged_arrays.py
@@ -0,0 +1,36 @@
+from yt.testing import *
+import numpy as np
+from yt.utilities.lib.ragged_arrays import index_unop
+
+operations = ((np.sum, "sum"),
+              (np.prod, "prod"),
+              (np.max, "max"),
+              (np.min, "min"))
+dtypes = ((-1e8, 1e8, "float32"),
+          (-1e8, 1e8, "float64"),
+          (-10000, 10000, "int32"),
+          (-100000000, 100000000, "int64"))
+
+def test_index_unop():
+    np.random.seed(0x4d3d3d3)
+    indices = np.arange(1000)
+    np.random.shuffle(indices)
+    sizes = np.array([
+        200, 50, 50, 100, 32, 32, 32, 32, 32, 64, 376], dtype="int64")
+    for mi, ma, dtype in dtypes:
+        for op, operation in operations:
+            # Create a random set of values
+            values = np.random.random(1000)
+            if operation != "prod":
+                values = values * ma + (ma - mi)
+            if operation == "prod" and dtype.startswith("int"):
+                values = values.astype(dtype)
+                values[values != 0] = 1
+                values[values == 0] = -1
+            values = values.astype(dtype)
+            out_values = index_unop(values, indices, sizes, operation)
+            i = 0
+            for j, v in enumerate(sizes):
+                arr = values[indices[i:i+v]]
+                yield assert_equal, op(arr), out_values[j]
+                i += v

diff -r 01a26e2dbf4f4dbd78242b34b93d3959f0c5210b -r e773029e60d5faded262db6946ad5cdf6d2e8e3a yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -1,3 +1,17 @@
+"""
+A base class for "image" plots with colorbars.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 import __builtin__
 import base64
 import numpy as np

diff -r 01a26e2dbf4f4dbd78242b34b93d3959f0c5210b -r e773029e60d5faded262db6946ad5cdf6d2e8e3a yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -17,8 +17,6 @@
 import matplotlib
 import types
 import sys
-import os
-from yt.extern.six.moves import builtins, StringIO
 import warnings
 
 from matplotlib.delaunay.triangulate import Triangulation as triang
@@ -39,12 +37,19 @@
     ImagePlotContainer, log_transform, linear_transform, \
     invalidate_data, invalidate_plot, apply_callback
 
+from yt.data_objects.time_series import \
+    DatasetSeries
+from yt.extern.six.moves import \
+    StringIO
 from yt.funcs import \
     mylog, iterable, ensure_list, \
-    fix_axis, assert_valid_width_tuple
-from yt.units.unit_object import Unit
+    fix_axis, validate_width_tuple
+from yt.units.unit_object import \
+    Unit
 from yt.units.unit_registry import \
-     UnitParseError
+    UnitParseError
+from yt.units.yt_array import \
+    YTArray, YTQuantity
 from yt.utilities.png_writer import \
     write_png_to_string
 from yt.utilities.definitions import \
@@ -57,10 +62,6 @@
     YTCannotParseUnitDisplayName, \
     YTUnitConversionError
 
-from yt.data_objects.time_series import \
-    DatasetSeries
-from yt.units.yt_array import YTArray, YTQuantity
-
 # Some magic for dealing with pyparsing being included or not
 # included in matplotlib (not in gentoo, yes in everything else)
 # Also accounting for the fact that in 1.2.0, pyparsing got renamed.
@@ -82,18 +83,10 @@
     else:
         return u
 
-def assert_valid_width_tuple(width):
-    if not iterable(width) or len(width) != 2:
-        raise YTInvalidWidthError("width (%s) is not a two element tuple" % width)
-    if not isinstance(width[0], Number) and isinstance(width[1], basestring):
-        msg = "width (%s) is invalid. " % str(width)
-        msg += "Valid widths look like this: (12, 'au')"
-        raise YTInvalidWidthError(msg)
-
 def validate_iterable_width(width, pf, unit=None):
     if isinstance(width[0], tuple) and isinstance(width[1], tuple):
-        assert_valid_width_tuple(width[0])
-        assert_valid_width_tuple(width[1])
+        validate_width_tuple(width[0])
+        validate_width_tuple(width[1])
         return (pf.quan(width[0][0], fix_unitary(width[0][1])),
                 pf.quan(width[1][0], fix_unitary(width[1][1])))
     elif isinstance(width[0], Number) and isinstance(width[1], Number):
@@ -102,11 +95,11 @@
     elif isinstance(width[0], YTQuantity) and isinstance(width[1], YTQuantity):
         return (pf.quan(width[0]), pf.quan(width[1]))
     else:
-        assert_valid_width_tuple(width)
+        validate_width_tuple(width)
         # If width and unit are both valid width tuples, we
         # assume width controls x and unit controls y
         try:
-            assert_valid_width_tuple(unit)
+            validate_width_tuple(unit)
             return (pf.quan(width[0], fix_unitary(width[1])),
                     pf.quan(unit[0], fix_unitary(unit[1])))
         except YTInvalidWidthError:
@@ -137,7 +130,7 @@
         raise YTInvalidWidthError(width)
     if depth is not None:
         if iterable(depth):
-            assert_valid_width_tuple(depth)
+            validate_width_tuple(depth)
             depth = (pf.quan(depth[0], fix_unitary(depth[1])), )
         elif isinstance(depth, Number):
             depth = (pf.quan(depth, 'code_length',
@@ -180,8 +173,8 @@
     elif pf.geometry == "spherical":
         if axis == 0:
             width = pf.domain_width[1], pf.domain_width[2]
-            center = 0.5*(pf.domain_left_edge +
-                pf.domain_right_edge).in_units("code_length")
+            center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
+            center.convert_to_units("code_length")
         else:
             # Our default width here is the full domain
             width = [pf.domain_right_edge[0]*2.0, pf.domain_right_edge[0]*2.0]
@@ -217,7 +210,8 @@
         mat = np.transpose(np.column_stack((perp1,perp2,normal)))
         center = np.dot(mat,center)
 
-    bounds = tuple( ( (2*(i%2))-1)*width[i//2]/2 for i in range(len(width)*2))
+    w = tuple(el.in_units('unitary') for el in width)
+    bounds = tuple(((2*(i % 2))-1)*w[i//2]/2 for i in range(len(w)*2))
 
     return (bounds, center)
 
@@ -343,10 +337,9 @@
             bounds = self.xlim+self.ylim
         if self._frb_generator is ObliqueFixedResolutionBuffer:
             bounds = np.array(bounds)
-        self.frb = self._frb_generator(self.data_source,
-                                        bounds, self.buff_size,
-                                        self.antialias,
-                                        periodic=self._periodic)
+
+        self.frb = self._frb_generator(self.data_source, bounds, self.buff_size,
+                                       self.antialias, periodic=self._periodic)
         if old_fields is None:
             self.frb._get_data_source_fields()
         else:
@@ -400,8 +393,7 @@
         if len(deltas) != 2:
             raise RuntimeError(
                 "The pan function accepts a two-element sequence.\n"
-                "Received %s." % (deltas, )
-                )
+                "Received %s." % (deltas, ))
         if isinstance(deltas[0], Number) and isinstance(deltas[1], Number):
             deltas = (self.pf.quan(deltas[0], 'code_length'),
                       self.pf.quan(deltas[1], 'code_length'))
@@ -413,8 +405,7 @@
         else:
             raise RuntimeError(
                 "The arguments of the pan function must be a sequence of floats,\n"
-                "quantities, or (float, unit) tuples. Received %s." % (deltas, )
-                )
+                "quantities, or (float, unit) tuples. Received %s." % (deltas, ))
         self.xlim = (self.xlim[0] + deltas[0], self.xlim[1] + deltas[0])
         self.ylim = (self.ylim[0] + deltas[1], self.ylim[1] + deltas[1])
         return self
@@ -480,10 +471,10 @@
             self.ylim = tuple(bounds[2:4])
             if len(bounds) == 6:
                 self.zlim = tuple(bounds[4:6])
-        mylog.info("xlim = %f %f" %self.xlim)
-        mylog.info("ylim = %f %f" %self.ylim)
+        mylog.info("xlim = %f %f" % self.xlim)
+        mylog.info("ylim = %f %f" % self.ylim)
         if hasattr(self,'zlim'):
-            mylog.info("zlim = %f %f" %self.zlim)
+            mylog.info("zlim = %f %f" % self.zlim)
 
     @invalidate_data
     def set_width(self, width, unit = None):
@@ -634,12 +625,11 @@
         Examples
         --------
 
-        >>> p = ProjectionPlot(pf, "y", "density")
-        >>> p.show()
+        >>> from yt import load
+        >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+        >>> p = ProjectionPlot(ds, "y", "Density")
         >>> p.set_axes_unit("kpc")
-        >>> p.show()
-        >>> p.set_axes_unit(None)
-        >>> p.show()
+
         """
         # blind except because it could be in conversion_factors or units
         if unit_name is not None:
@@ -694,8 +684,8 @@
             xllim, xrlim = self.xlim
             yllim, yrlim = self.ylim
         elif origin[2] == 'domain':
-            xax = pf.coordinates.x_axis[axis_index]
-            yax = pf.coordinates.y_axis[axis_index]
+            xax = self.pf.coordinates.x_axis[axis_index]
+            yax = self.pf.coordinates.y_axis[axis_index]
             xllim = self.pf.domain_left_edge[xax]
             xrlim = self.pf.domain_right_edge[xax]
             yllim = self.pf.domain_left_edge[yax]
@@ -706,8 +696,8 @@
         else:
             mylog.warn("origin = {0}".format(origin))
             msg = \
-              ('origin keyword "{0}" not recognized, must declare "domain" '
-               'or "center" as the last term in origin.').format(self.origin)
+                ('origin keyword "{0}" not recognized, must declare "domain" '
+                 'or "center" as the last term in origin.').format(self.origin)
             raise RuntimeError(msg)
 
         if origin[0] == 'lower':
@@ -756,7 +746,8 @@
             # This will likely be replaced at some point by the coordinate handler
             # setting plot aspect.
             if self.aspect is None:
-                self.aspect = np.float64(self.pf.quan(1.0, unit_y)/(self.pf.quan(1.0, unit_x)))
+                self.aspect = np.float64(self.pf.quan(1.0, unit_y) /
+                                         self.pf.quan(1.0, unit_x))
 
             extentx = [(self.xlim[i] - xc).in_units(unit_x) for i in (0, 1)]
             extenty = [(self.ylim[i] - yc).in_units(unit_y) for i in (0, 1)]
@@ -771,11 +762,10 @@
             image = self.frb[f]
 
             if image.max() == image.min():
-              if self._field_transform[f] == log_transform:
-                mylog.warning("Plot image for field %s has zero dynamic " \
-                              "range. Min = Max = %d." % \
-                              (f, image.max()))
-                mylog.warning("Switching to linear colorbar scaling.")
+                if self._field_transform[f] == log_transform:
+                    mylog.warning("Plot image for field %s has zero dynamic "
+                                  "range. Min = Max = %d." % (f, image.max()))
+                    mylog.warning("Switching to linear colorbar scaling.")
                 self._field_transform[f] = linear_transform
 
             fp = self._font_properties
@@ -893,10 +883,9 @@
             if self._font_color is not None:
                 ax = self.plots[f].axes
                 cbax = self.plots[f].cb.ax
-                labels = \
-                  ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels() + \
-                  cbax.yaxis.get_ticklabels() + \
-                  [ax.xaxis.label, ax.yaxis.label, cbax.yaxis.label]
+                labels = ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels()
+                labels += cbax.yaxis.get_ticklabels()
+                labels += [ax.xaxis.label, ax.yaxis.label, cbax.yaxis.label]
                 for label in labels:
                     label.set_color(self._font_color)
 
@@ -1013,8 +1002,9 @@
 
     This will save an image the the file 'sliceplot_Density
 
-    >>> pf = load('galaxy0030/galaxy0030')
-    >>> p = SlicePlot(pf,2,'Density','c',(20,'kpc'))
+    >>> from yt import load
+    >>> ds = load('IsolatedGalaxy/galaxy0030/galaxy0030')
+    >>> p = SlicePlot(ds, 2, 'density', 'c', (20, 'kpc'))
     >>> p.save('sliceplot')
 
     """
@@ -1138,11 +1128,12 @@
     Examples
     --------
 
-    This is a very simple way of creating a projection plot.
+    Create a projection plot with a width of 20 kiloparsecs centered on the
+    center of the simulation box:
 
-    >>> pf = load('galaxy0030/galaxy0030')
-    >>> p = ProjectionPlot(pf,2,'Density','c',(20,'kpc'))
-    >>> p.save('sliceplot')
+    >>> from yt import load
+    >>> ds = load('IsolateGalaxygalaxy0030/galaxy0030')
+    >>> p = ProjectionPlot(ds, "z", "density", width=(20, "kpc"))
 
     """
     _plot_type = 'Projection'
@@ -1159,8 +1150,8 @@
         (bounds, center) = get_window_parameters(axis, center, width, pf)
         if field_parameters is None: field_parameters = {}
         proj = pf.proj(fields, axis, weight_field=weight_field,
-                         center=center, data_source=data_source,
-                         field_parameters = field_parameters, style = proj_style)
+                       center=center, data_source=data_source,
+                       field_parameters = field_parameters, style = proj_style)
         PWViewerMPL.__init__(self, proj, bounds, fields=fields, origin=origin,
                              fontsize=fontsize, window_size=window_size, aspect=aspect)
         if axes_unit is None:
@@ -1409,14 +1400,14 @@
         else:
             fields = self.frb.data.keys()
             addl_keys = {}
-        if self._colorbar_valid == False:
+        if self._colorbar_valid is False:
             addl_keys['colorbar_image'] = self._get_cbar_image()
             self._colorbar_valid = True
         min_zoom = 200*self.pf.index.get_smallest_dx() * self.pf['unitary']
         for field in fields:
             to_plot = apply_colormap(self.frb[field],
-                func = self._field_transform[field],
-                cmap_name = self._colormaps[field])
+                                     func = self._field_transform[field],
+                                     cmap_name = self._colormaps[field])
             pngs = self._apply_modifications(to_plot)
             img_data = base64.b64encode(pngs)
             # We scale the width between 200*min_dx and 1.0
@@ -1482,7 +1473,7 @@
         nx = self.frb.buff_size[0]/skip
         ny = self.frb.buff_size[1]/skip
         new_frb = FixedResolutionBuffer(self.frb.data_source,
-                        self.frb.bounds, (nx,ny))
+                                        self.frb.bounds, (nx,ny))
 
         axis = self.frb.data_source.axis
         xax = self.frb.data_source.pf.coordinates.x_axis[axis]
@@ -1543,17 +1534,16 @@
         self.set_center((new_x, new_y))
 
     def get_field_units(self, field, strip_mathml = True):
-        ds = self.frb.data_source
-        pf = self.pf
+        source = self.data_source
         field = self._check_field(field)
-        finfo = self.data_source.pf._get_field_info(*field)
-        if ds._type_name in ("slice", "cutting"):
+        finfo = source.pf._get_field_info(*field)
+        if source._type_name in ("slice", "cutting"):
             units = finfo.get_units()
-        elif ds._type_name == "proj" and (ds.weight_field is not None or 
-                                        ds.proj_style == "mip"):
-            units = finfo.get_units()
-        elif ds._type_name == "proj":
-            units = finfo.get_projected_units()
+        elif source._type_name == "proj":
+            if source.weight_field is not None or source.proj_style in ("mip", "sum"):
+                units = finfo.get_units()
+            else:
+                units = finfo.get_projected_units()
         else:
             units = ""
         if strip_mathml:
@@ -1686,7 +1676,7 @@
     axis : int or one of 'x', 'y', 'z'
          An int corresponding to the axis to slice along (0=x, 1=y, 2=z)
          or the axis name itself.  If specified, this will replace normal.
-         
+
     The following are nominally keyword arguments passed onto the respective
     slice plot objects generated by this function.
 
@@ -1772,10 +1762,12 @@
     Examples
     --------
 
-    >>> slc = SlicePlot(pf, "x", "Density", center=[0.2,0.3,0.4])
-    >>> slc = SlicePlot(pf, 2, "Temperature")
-    >>> slc = SlicePlot(pf, [0.4,0.2,-0.1], "Pressure",
-                        north_vector=[0.2,-0.3,0.1])
+    >>> from yt import load
+    >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+    >>> slc = SlicePlot(ds, "x", "density", center=[0.2,0.3,0.4])
+    >>>
+    >>> slc = SlicePlot(ds, [0.4, 0.2, -0.1], "pressure",
+    ...                 north_vector=[0.2,-0.3,0.1])
 
     """
     # Make sure we are passed a normal
@@ -1797,23 +1789,23 @@
         else:
             normal = np.array(normal)
             np.divide(normal, np.dot(normal,normal), normal)
-        
+
     # by now the normal should be properly set to get either a On/Off Axis plot
     if iterable(normal) and not isinstance(normal, basestring):
         # OffAxisSlicePlot has hardcoded origin; remove it if in kwargs
-        if 'origin' in kwargs: 
+        if 'origin' in kwargs:
             msg = "Ignoring 'origin' keyword as it is ill-defined for " \
                   "an OffAxisSlicePlot object."
             mylog.warn(msg)
             del kwargs['origin']
-        
+
         return OffAxisSlicePlot(pf, normal, fields, *args, **kwargs)
     else:
         # north_vector not used in AxisAlignedSlicePlots; remove it if in kwargs
-        if 'north_vector' in kwargs: 
+        if 'north_vector' in kwargs:
             msg = "Ignoring 'north_vector' keyword as it is ill-defined for " \
                   "an AxisAlignedSlicePlot object."
             mylog.warn(msg)
             del kwargs['north_vector']
-        
+
         return AxisAlignedSlicePlot(pf, normal, fields, *args, **kwargs)

diff -r 01a26e2dbf4f4dbd78242b34b93d3959f0c5210b -r e773029e60d5faded262db6946ad5cdf6d2e8e3a yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -16,6 +16,7 @@
 
 import __builtin__
 import base64
+import os
 import types
 
 from functools import wraps
@@ -230,7 +231,8 @@
             The output file keyword.
         
         """
-        if not self._plot_valid: self._setup_plots()
+        if not self._plot_valid:
+            self._setup_plots()
         unique = set(self.figures.values())
         if len(unique) < len(self.figures):
             figiter = izip(xrange(len(unique)), sorted(unique))
@@ -677,9 +679,11 @@
             cax = None
             draw_colorbar = True
             draw_axes = True
+            zlim = (None, None)
             if f in self.plots:
                 draw_colorbar = self.plots[f]._draw_colorbar
                 draw_axes = self.plots[f]._draw_axes
+                zlim = (self.plots[f].zmin, self.plots[f].zmax)
                 if self.plots[f].figure is not None:
                     fig = self.plots[f].figure
                     axes = self.plots[f].axes
@@ -688,13 +692,14 @@
             x_scale, y_scale, z_scale = self._get_field_log(f, self.profile)
             x_title, y_title, z_title = self._get_field_title(f, self.profile)
 
-            if z_scale == 'log':
-                zmin = data[data > 0.0].min()
-                self._field_transform[f] = log_transform
-            else:
-                zmin = data.min()
-                self._field_transform[f] = linear_transform
-            zlim = [zmin, data.max()]
+            if zlim == (None, None):
+                if z_scale == 'log':
+                    zmin = data[data > 0.0].min()
+                    self._field_transform[f] = log_transform
+                else:
+                    zmin = data.min()
+                    self._field_transform[f] = linear_transform
+                zlim = [zmin, data.max()]
 
             fp = self._font_properties
             f = self.profile.data_source._determine_fields(f)[0]
@@ -740,9 +745,11 @@
         >>> plot.save(mpl_kwargs={'bbox_inches':'tight'})
         
         """
-
-        if not self._plot_valid: self._setup_plots()
-        if mpl_kwargs is None: mpl_kwargs = {}
+        names = []
+        if not self._plot_valid:
+            self._setup_plots()
+        if mpl_kwargs is None:
+            mpl_kwargs = {}
         xfn = self.profile.x_field
         yfn = self.profile.y_field
         if isinstance(xfn, types.TupleType):
@@ -751,17 +758,25 @@
             yfn = yfn[1]
         for f in self.profile.field_data:
             _f = f
-            if isinstance(f, types.TupleType): _f = _f[1]
+            if isinstance(f, types.TupleType):
+                _f = _f[1]
             middle = "2d-Profile_%s_%s_%s" % (xfn, yfn, _f)
             if name is None:
                 prefix = self.profile.pf
-                name = "%s.png" % prefix
+            if name[-1] == os.sep and not os.path.isdir(name):
+                os.mkdir(name)
+            if os.path.isdir(name) and name != str(self.pf):
+                prefix = name + (os.sep if name[-1] != os.sep else '') + str(self.pf)
             suffix = get_image_suffix(name)
-            prefix = name[:name.rfind(suffix)]
+            if suffix != '':
+                for k, v in self.plots.iteritems():
+                    names.append(v.save(name, mpl_kwargs))
+                return names
+
             fn = "%s_%s%s" % (prefix, middle, suffix)
-            if not suffix:
-                suffix = ".png"
+            names.append(fn)
             self.plots[f].save(fn, mpl_kwargs)
+        return names
 
     @invalidate_plot
     def set_title(self, field, title):


https://bitbucket.org/yt_analysis/yt/commits/32c06345184a/
Changeset:   32c06345184a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-12 20:53:08
Summary:     Change a few things about sphere selection.  2x speedup.
Affected #:  1 file

diff -r 80b6a3399f45da6e13828a947813cbe0e057f63d -r 32c06345184a87dd1941196e1cfa97ac41eb8030 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -336,9 +336,9 @@
         # is too.
         cdef np.float64_t rel = x1 - x2
         if self.periodicity[d] :
-            if rel > self.domain_width[d]/2.0 :
+            if rel > self.domain_width[d] * 0.5:
                 rel -= self.domain_width[d]
-            elif rel < -self.domain_width[d]/2.0 :
+            elif rel < -self.domain_width[d] * 0.5:
                 rel += self.domain_width[d]
         return rel
 
@@ -538,12 +538,16 @@
     cdef np.float64_t radius
     cdef np.float64_t radius2
     cdef np.float64_t center[3]
+    cdef np.float64_t bbox[3][2]
 
     def __init__(self, dobj):
-        for i in range(3):
-            self.center[i] = dobj.center[i]
         self.radius = _ensure_code(dobj.radius)
         self.radius2 = self.radius * self.radius
+        center = _ensure_code(dobj.center)
+        for i in range(3):
+            self.center[i] = center[i]
+            self.bbox[i][0] = self.center[i] - self.radius
+            self.bbox[i][1] = self.center[i] + self.radius
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -563,10 +567,12 @@
         cdef int i
         cdef np.float64_t dist, dist2 = 0
         for i in range(3):
+            if pos[i] < self.bbox[i][0] or pos[i] > self.bbox[i][1]:
+                return 0
             dist = self.difference(pos[i], self.center[i], i)
             dist2 += dist*dist
-        if dist2 <= self.radius2: return 1
-        return 0
+            if dist2 > self.radius2: return 0
+        return 1
    
     @cython.boundscheck(False)
     @cython.wraparound(False)


https://bitbucket.org/yt_analysis/yt/commits/a741f5ca6bea/
Changeset:   a741f5ca6bea
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-12 20:56:14
Summary:     Merging from speedup
Affected #:  1 file

diff -r e773029e60d5faded262db6946ad5cdf6d2e8e3a -r a741f5ca6bea439a689d245e85e4f8b86836aef1 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -332,9 +332,9 @@
         # is too.
         cdef np.float64_t rel = x1 - x2
         if self.periodicity[d] :
-            if rel > self.domain_width[d]/2.0 :
+            if rel > self.domain_width[d] * 0.5:
                 rel -= self.domain_width[d]
-            elif rel < -self.domain_width[d]/2.0 :
+            elif rel < -self.domain_width[d] * 0.5:
                 rel += self.domain_width[d]
         return rel
 
@@ -534,12 +534,16 @@
     cdef np.float64_t radius
     cdef np.float64_t radius2
     cdef np.float64_t center[3]
+    cdef np.float64_t bbox[3][2]
 
     def __init__(self, dobj):
-        for i in range(3):
-            self.center[i] = dobj.center[i]
         self.radius = _ensure_code(dobj.radius)
         self.radius2 = self.radius * self.radius
+        center = _ensure_code(dobj.center)
+        for i in range(3):
+            self.center[i] = center[i]
+            self.bbox[i][0] = self.center[i] - self.radius
+            self.bbox[i][1] = self.center[i] + self.radius
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -559,10 +563,12 @@
         cdef int i
         cdef np.float64_t dist, dist2 = 0
         for i in range(3):
+            if pos[i] < self.bbox[i][0] or pos[i] > self.bbox[i][1]:
+                return 0
             dist = self.difference(pos[i], self.center[i], i)
             dist2 += dist*dist
-        if dist2 <= self.radius2: return 1
-        return 0
+            if dist2 > self.radius2: return 0
+        return 1
    
     @cython.boundscheck(False)
     @cython.wraparound(False)


https://bitbucket.org/yt_analysis/yt/commits/587939f91b1e/
Changeset:   587939f91b1e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-21 20:27:08
Summary:     Merging periodicity check
Affected #:  2 files

diff -r a741f5ca6bea439a689d245e85e4f8b86836aef1 -r 587939f91b1e51cfae22b0d095d213509dd486ec yt/fields/field_functions.py
--- a/yt/fields/field_functions.py
+++ b/yt/fields/field_functions.py
@@ -34,7 +34,7 @@
             np.subtract(r, DW[i], rdw)
             np.abs(rdw, rdw)
             np.minimum(r, rdw, r)
-        np.power(r, 2.0, r)
+        np.multiply(r, r, r)
         np.add(radius2, r, radius2)
         if data.pf.dimensionality < i+1:
             break

diff -r a741f5ca6bea439a689d245e85e4f8b86836aef1 -r 587939f91b1e51cfae22b0d095d213509dd486ec yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -535,15 +535,24 @@
     cdef np.float64_t radius2
     cdef np.float64_t center[3]
     cdef np.float64_t bbox[3][2]
+    cdef bint check_box[3]
 
     def __init__(self, dobj):
         self.radius = _ensure_code(dobj.radius)
         self.radius2 = self.radius * self.radius
         center = _ensure_code(dobj.center)
+        cdef np.float64_t mi = np.finfo("float64").min
+        cdef np.float64_t ma = np.finfo("float64").max
         for i in range(3):
             self.center[i] = center[i]
             self.bbox[i][0] = self.center[i] - self.radius
             self.bbox[i][1] = self.center[i] + self.radius
+            if self.bbox[i][0] < dobj.pf.domain_left_edge[i]:
+                self.check_box[i] = False
+            elif self.bbox[i][1] > dobj.pf.domain_right_edge[i]:
+                self.check_box[i] = False
+            else:
+                self.check_box[i] = True
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -564,7 +573,7 @@
         cdef np.float64_t dist, dist2 = 0
         for i in range(3):
             if pos[i] < self.bbox[i][0] or pos[i] > self.bbox[i][1]:
-                return 0
+                if self.check_box[i]: return 0
             dist = self.difference(pos[i], self.center[i], i)
             dist2 += dist*dist
             if dist2 > self.radius2: return 0
@@ -594,16 +603,22 @@
             left_edge[1] <= self.center[1] <= right_edge[1] and
             left_edge[2] <= self.center[2] <= right_edge[2]):
             return 1
+        for i in range(3):
+            if not self.check_box[i]: continue
+            if right_edge[i] < self.bbox[i][0] or \
+               left_edge[i] > self.bbox[i][1]:
+                return 0
         # http://www.gamedev.net/topic/335465-is-this-the-simplest-sphere-aabb-collision-test/
         dist = 0
         for i in range(3):
+            # Early terminate
             box_center = (right_edge[i] + left_edge[i])/2.0
             relcenter = self.difference(box_center, self.center[i], i)
             edge = right_edge[i] - left_edge[i]
             closest = relcenter - fclip(relcenter, -edge/2.0, edge/2.0)
             dist += closest*closest
-        if dist <= self.radius2: return 1
-        return 0
+            if dist > self.radius2: return 0
+        return 1
 
     def _hash_vals(self):
         return (self.radius, self.radius2,


https://bitbucket.org/yt_analysis/yt/commits/18866bc72d64/
Changeset:   18866bc72d64
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-21 21:51:35
Summary:     Merging in sphere speedups
Affected #:  24 files

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 doc/source/cookbook/aligned_cutting_plane.py
--- a/doc/source/cookbook/aligned_cutting_plane.py
+++ b/doc/source/cookbook/aligned_cutting_plane.py
@@ -3,10 +3,10 @@
 # Load the dataset.
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
-# Create a 1 kpc radius sphere, centered on the max density.  Note that this
-# sphere is very small compared to the size of our final plot, and it has a
-# non-axially aligned L vector.
-sp = ds.sphere("center", (15.0, "kpc"))
+# Create a 1 kpc radius sphere, centered on the maximum gas density.  Note
+# that this sphere is very small compared to the size of our final plot,
+# and it has a non-axially aligned L vector.
+sp = ds.sphere("m", (1.0, "kpc"))
 
 # Get the angular momentum vector for the sphere.
 L = sp.quantities.angular_momentum_vector()
@@ -14,5 +14,5 @@
 print "Angular momentum vector: {0}".format(L)
 
 # Create an OffAxisSlicePlot on the object with the L vector as its normal
-p = yt.OffAxisSlicePlot(ds, L, "density", sp.center, (25, "kpc"))
+p = yt.OffAxisSlicePlot(ds, L, "density", sp.center, (15, "kpc"))
 p.save()

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 doc/source/cookbook/fits_radio_cubes.ipynb
--- a/doc/source/cookbook/fits_radio_cubes.ipynb
+++ b/doc/source/cookbook/fits_radio_cubes.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:2f774139560d94508c2c51b70930d46941d9ceef7228655de32a69634f6c6d83"
+  "signature": "sha256:dbc41f6f836cdeb88a549d85e389d6e4e43d163d8c4c267baea8cce0ebdbf441"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -45,7 +45,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = yt.load(\"radio_fits/m33_hi.fits\", nan_mask=0.0)"
+      "ds = yt.load(\"radio_fits/m33_hi.fits\", nan_mask=0.0, z_axis_decomp=True)"
      ],
      "language": "python",
      "metadata": {},
@@ -179,6 +179,31 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
+      "We can also make a projection of all the emission along the line of sight:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"intensity\"], origin=\"native\", proj_style=\"sum\")\n",
+      "prj.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Since we're not doing an integration along a path length, we needed to specify `proj_style = \"sum\"`. "
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
       "We can also look at the slices perpendicular to the other axes, which will show us the structure along the velocity axis:"
      ]
     },

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/analysis_modules/halo_finding/fof/EnzoFOF.c
--- a/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
+++ b/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
@@ -32,11 +32,15 @@
     PyArrayObject    *xpos, *ypos, *zpos;
     xpos=ypos=zpos=NULL;
     float link = 0.2;
+    float fPeriod[3] = {1.0, 1.0, 1.0};
+	int nMembers = 8;
 
     int i;
 
-    if (!PyArg_ParseTuple(args, "OOO|f",
-        &oxpos, &oypos, &ozpos, &link))
+    if (!PyArg_ParseTuple(args, "OOO|f(fff)i",
+        &oxpos, &oypos, &ozpos, &link,
+        &fPeriod[0], &fPeriod[1], &fPeriod[2],
+        &nMembers))
     return PyErr_Format(_FOFerror,
             "EnzoFOF: Invalid parameters.");
 
@@ -74,8 +78,8 @@
 
 	KDFOF kd;
 	int nBucket,j;
-	float fPeriod[3],fEps;
-	int nMembers,nGroup,bVerbose=1;
+	float fEps;
+	int nGroup,bVerbose=1;
 	int sec,usec;
 	
 	/* linking length */
@@ -83,9 +87,6 @@
 	fEps = link;
 	
 	nBucket = 16;
-	nMembers = 8;
-
-	for (j=0;j<3;++j) fPeriod[j] = 1.0;
 
     /* initialize the kd FOF structure */
 

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -66,13 +66,13 @@
         if isinstance(outputs, DatasetSeries):
             self.data_series = outputs
         else:
-            self.data_series = DatasetSeries.from_filenames(outputs)
+            self.data_series = DatasetSeries(outputs)
         self.masks = []
         self.sorts = []
         self.array_indices = []
         self.indices = indices
         self.num_indices = len(indices)
-        self.num_steps = len(filenames)
+        self.num_steps = len(outputs)
         self.times = []
 
         # Default fields 

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -317,7 +317,7 @@
             finfo = self.pf._get_field_info(*field)
             mylog.debug("Setting field %s", field)
             units = finfo.units
-            if self.weight_field is None:
+            if self.weight_field is None and not self._sum_only:
                 # See _handle_chunk where we mandate cm
                 if units == '':
                     input_units = "cm"
@@ -329,7 +329,7 @@
             self[field] = YTArray(field_data[fi].ravel(),
                                   input_units=input_units,
                                   registry=self.pf.unit_registry)
-            if self.weight_field is None:
+            if self.weight_field is None and not self._sum_only:
                 u_obj = Unit(units, registry=self.pf.unit_registry)
                 if u_obj.is_code_unit and input_units != units \
                     or self.pf.no_cgs_equiv_length:

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -139,12 +139,14 @@
             return
         elif isinstance(center, (types.ListType, types.TupleType, np.ndarray)):
             center = self.pf.arr(center, 'code_length')
-        elif center in ("c", "center"):
-            center = self.pf.domain_center
-        elif center == ("max"): # is this dangerous for race conditions?
-            center = self.pf.h.find_max("density")[1]
-        elif center.startswith("max_"):
-            center = self.pf.h.find_max(center[4:])[1]
+        elif isinstance(center, basestring):
+            if center.lower() in ("c", "center"):
+                center = self.pf.domain_center
+             # is this dangerous for race conditions?
+            elif center.lower() in ("max", "m"):
+                center = self.pf.h.find_max(("gas", "density"))[1]
+            elif center.startswith("max_"):
+                center = self.pf.h.find_max(center[4:])[1]
         else:
             center = np.array(center, dtype='float64')
         self.center = self.pf.arr(center, 'code_length')

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/data_objects/image_array.py
--- a/yt/data_objects/image_array.py
+++ b/yt/data_objects/image_array.py
@@ -12,7 +12,6 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-import h5py as h5
 from yt.visualization.image_writer import write_bitmap, write_image
 from yt.units.yt_array import YTArray
 
@@ -26,7 +25,7 @@
     Parameters
     ----------
     input_array: array_like
-        A numpy ndarray, or list. 
+        A numpy ndarray, or list.
 
     Other Parameters
     ----------------
@@ -35,7 +34,7 @@
 
     Returns
     -------
-    obj: ImageArray object 
+    obj: ImageArray object
 
     Raises
     ------
@@ -55,15 +54,15 @@
     --------
     These are written in doctest format, and should illustrate how to
     use the function.  Use the variables 'pf' for the parameter file, 'pc' for
-    a plot collection, 'c' for a center, and 'L' for a vector. 
+    a plot collection, 'c' for a center, and 'L' for a vector.
 
     >>> im = np.zeros([64,128,3])
     >>> for i in xrange(im.shape[0]):
     ...     for k in xrange(im.shape[2]):
     ...         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
 
-    >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-    ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+    >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]),
+    ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),
     ...     'width':0.245, 'units':'cm', 'type':'rendering'}
 
     >>> im_arr = ImageArray(im, info=myinfo)
@@ -84,38 +83,36 @@
         super(ImageArray, self).__array_finalize__(obj)
         self.info = getattr(obj, 'info', None)
 
-    def write_hdf5(self, filename):
+    def write_hdf5(self, filename, dataset_name=None):
         r"""Writes ImageArray to hdf5 file.
 
         Parameters
         ----------
         filename: string
-            Note filename not be modified.
-       
+        The filename to create and write a dataset to
+
+        dataset_name: string
+            The name of the dataset to create in the file.
+
         Examples
-        -------- 
+        --------
         >>> im = np.zeros([64,128,3])
         >>> for i in xrange(im.shape[0]):
         ...     for k in xrange(im.shape[2]):
         ...         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
 
-        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]),
+        ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),
         ...     'width':0.245, 'units':'cm', 'type':'rendering'}
 
         >>> im_arr = ImageArray(im, info=myinfo)
         >>> im_arr.write_hdf5('test_ImageArray.h5')
 
         """
-        array_name = self.info.get("name","image")
-
-        f = h5.File(filename)
-        if array_name in f.keys():
-            del f[array_name]
-        d = f.create_dataset(array_name, data=self)
-        for k, v in self.info.iteritems():
-            d.attrs.create(k, v)
-        f.close()
+        if dataset_name is None:
+            dataset_name = self.info.get("name", "image")
+        super(ImageArray, self).write_hdf5(filename, dataset_name=dataset_name,
+                                           info=self.info)
 
     def add_background_color(self, background='black', inline=True):
         r"""Adds a background color to a 4-channel ImageArray
@@ -126,7 +123,7 @@
 
         Parameters
         ----------
-        background: 
+        background:
             This can be used to set a background color for the image, and can
             take several types of values:
 
@@ -144,7 +141,7 @@
         -------
         out: ImageArray
             The modified ImageArray with a background color added.
-       
+
         Examples
         --------
         >>> im = np.zeros([64,128,4])
@@ -160,8 +157,8 @@
         >>> im_arr.write_png('black_bg.png')
         """
         assert(self.shape[-1] == 4)
-        
-        if background == None:
+
+        if background is None:
             background = (0., 0., 0., 0.)
         elif background == 'white':
             background = (1., 1., 1., 1.)
@@ -175,11 +172,10 @@
             out = self.copy()
 
         for i in range(3):
-            out[:,:,i] = self[:,:,i]*self[:,:,3] + \
-                    background[i]*background[3]*(1.0-self[:,:,3])
-        out[:,:,3] = self[:,:,3] + background[3]*(1.0-self[:,:,3]) 
-        return out 
-
+            out[:, :, i] = self[:, :, i]*self[:, :, 3]
+            out[:, :, i] += background[i]*background[3]*(1.0-self[:, :, 3])
+        out[:, :, 3] = self[:, :, 3]+background[3]*(1.0-self[:, :, 3])
+        return out
 
     def rescale(self, cmax=None, amax=None, inline=True):
         r"""Rescales the image to be in [0,1] range.
@@ -194,7 +190,7 @@
             corresponding to using the maximum value in the alpha channel.
         inline: boolean, optional
             Specifies whether or not the rescaling is done inline. If false,
-            a new copy of the ImageArray will be created, returned. 
+            a new copy of the ImageArray will be created, returned.
             Default:True.
 
         Returns
@@ -207,17 +203,18 @@
         This requires that the shape of the ImageArray to have a length of 3,
         and for the third dimension to be >= 3.  If the third dimension has
         a shape of 4, the alpha channel will also be rescaled.
-       
+
         Examples
-        -------- 
+        --------
         >>> im = np.zeros([64,128,4])
         >>> for i in xrange(im.shape[0]):
         ...     for k in xrange(im.shape[2]):
         ...         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
 
-        >>> im_arr.write_png('original.png')
-        >>> im_arr.rescale()
-        >>> im_arr.write_png('normalized.png')
+        >>> im = ImageArray(im)
+        >>> im.write_png('original.png')
+        >>> im.rescale()
+        >>> im.write_png('normalized.png')
 
         """
         assert(len(self.shape) == 3)
@@ -226,22 +223,22 @@
             out = self
         else:
             out = self.copy()
-        if cmax is None: 
-            cmax = self[:,:,:3].sum(axis=2).max()
+        if cmax is None:
+            cmax = self[:, :, :3].sum(axis=2).max()
 
-        np.multiply(self[:,:,:3], 1./cmax, out[:,:,:3])
+        np.multiply(self[:, :, :3], 1.0/cmax, out[:, :, :3])
 
         if self.shape[2] == 4:
             if amax is None:
-                amax = self[:,:,3].max()
+                amax = self[:, :, 3].max()
             if amax > 0.0:
-                np.multiply(self[:,:,3], 1./amax, out[:,:,3])
-        
+                np.multiply(self[:, :, 3], 1.0/amax, out[:, :, 3])
+
         np.clip(out, 0.0, 1.0, out)
         return out
 
     def write_png(self, filename, clip_ratio=None, background='black',
-                 rescale=True):
+                  rescale=True):
         r"""Writes ImageArray to png file.
 
         Parameters
@@ -250,9 +247,9 @@
             Note filename not be modified.
         clip_ratio: float, optional
             Image will be clipped before saving to the standard deviation
-            of the image multiplied by this value.  Useful for enhancing 
+            of the image multiplied by this value.  Useful for enhancing
             images. Default: None
-        background: 
+        background:
             This can be used to set a background color for the image, and can
             take several types of values:
 
@@ -265,7 +262,7 @@
         rescale: boolean, optional
             If True, will write out a rescaled image (without modifying the
             original image). Default: True
-       
+
         Examples
         --------
         >>> im = np.zeros([64,128,4])
@@ -292,25 +289,25 @@
         else:
             out = scaled
 
-        if filename[-4:] != '.png': 
+        if filename[-4:] != '.png':
             filename += '.png'
 
         if clip_ratio is not None:
-            nz = out[:,:,:3][out[:,:,:3].nonzero()]
+            nz = out[:, :, :3][out[:, :, :3].nonzero()]
             return write_bitmap(out.swapaxes(0, 1), filename,
-                                nz.mean() + \
-                                clip_ratio * nz.std())
+                                nz.mean() + clip_ratio*nz.std())
         else:
             return write_bitmap(out.swapaxes(0, 1), filename)
 
-    def write_image(self, filename, color_bounds=None, channel=None,  cmap_name="algae", func=lambda x: x):
+    def write_image(self, filename, color_bounds=None, channel=None,
+                    cmap_name="algae", func=lambda x: x):
         r"""Writes a single channel of the ImageArray to a png file.
 
         Parameters
         ----------
         filename: string
             Note filename not be modified.
-       
+
         Other Parameters
         ----------------
         channel: int
@@ -323,43 +320,44 @@
             An acceptable colormap.  See either yt.visualization.color_maps or
             http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
         func : function, optional
-            A function to transform the buffer before applying a colormap. 
+            A function to transform the buffer before applying a colormap.
 
         Returns
         -------
         scaled_image : uint8 image that has been saved
-        
+
         Examples
         --------
-        
+
         >>> im = np.zeros([64,128])
         >>> for i in xrange(im.shape[0]):
-        ...     im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+        ...     im[i,:] = np.linspace(0.,0.3*i, im.shape[1])
 
-        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]),
+        ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),
         ...     'width':0.245, 'units':'cm', 'type':'rendering'}
 
         >>> im_arr = ImageArray(im, info=myinfo)
         >>> im_arr.write_image('test_ImageArray.png')
 
         """
-        if filename[-4:] != '.png': 
+        if filename[-4:] != '.png':
             filename += '.png'
 
+        #TODO: Write info dict as png metadata
         if channel is None:
-            return write_image(self.swapaxes(0,1).to_ndarray(), filename,
+            return write_image(self.swapaxes(0, 1).to_ndarray(), filename,
                                color_bounds=color_bounds, cmap_name=cmap_name,
                                func=func)
         else:
-            return write_image(self.swapaxes(0,1)[:,:,channel].to_ndarray(),
+            return write_image(self.swapaxes(0, 1)[:, :, channel].to_ndarray(),
                                filename,
-                               color_bounds=color_bounds, cmap_name=cmap_name, 
+                               color_bounds=color_bounds, cmap_name=cmap_name,
                                func=func)
 
     def save(self, filename, png=True, hdf5=True):
         """
-        Saves ImageArray. 
+        Saves ImageArray.
 
         Arguments:
           filename: string
@@ -380,6 +378,3 @@
                 self.write_image("%s.png" % filename)
         if hdf5:
             self.write_hdf5("%s.h5" % filename)
-
-    __doc__ += np.ndarray.__doc__
-

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -447,12 +447,12 @@
         >>> write_image(np.log10(frb["Density"]), 'density_1pc.png')
         """
         if iterable(width):
-            assert_valid_width_tuple(width)
+            validate_width_tuple(width)
             width = self.pf.quan(width[0], width[1])
         if height is None:
             height = width
         elif iterable(height):
-            assert_valid_width_tuple(height)
+            validate_width_tuple(height)
             height = self.pf.quan(height[0], height[1])
         if not iterable(resolution):
             resolution = (resolution, resolution)

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/fields/field_functions.py
--- a/yt/fields/field_functions.py
+++ b/yt/fields/field_functions.py
@@ -34,7 +34,7 @@
             np.subtract(r, DW[i], rdw)
             np.abs(rdw, rdw)
             np.minimum(r, rdw, r)
-        np.power(r, 2.0, r)
+        np.multiply(r, r, r)
         np.add(radius2, r, radius2)
         if data.pf.dimensionality < i+1:
             break

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -17,6 +17,7 @@
 import weakref
 import warnings
 import re
+import uuid
 
 from yt.config import ytcfg
 from yt.funcs import *
@@ -200,37 +201,49 @@
             self.parameter_file.field_units[k] = self.parameter_file.field_units[primary_fname]
 
     def _count_grids(self):
-        self.num_grids = self.pf.nprocs
+        self.num_grids = self.pf.parameters["nprocs"]
 
     def _parse_index(self):
         f = self._handle # shortcut
         pf = self.parameter_file # shortcut
 
         # If nprocs > 1, decompose the domain into virtual grids
-        if pf.nprocs > 1:
-            bbox = np.array([[le,re] for le, re in zip(pf.domain_left_edge,
-                                                       pf.domain_right_edge)])
-            dims = np.array(pf.domain_dimensions)
-            # If we are creating a dataset of lines, only decompose along the position axes
-            if len(pf.line_database) > 0:
-                dims[pf.vel_axis] = 1
-            psize = get_psize(dims, pf.nprocs)
-            gle, gre, shapes, slices = decompose_array(dims, psize, bbox)
-            self.grid_left_edge = self.pf.arr(gle, "code_length")
-            self.grid_right_edge = self.pf.arr(gre, "code_length")
-            self.grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
-            # If we are creating a dataset of lines, only decompose along the position axes
-            if len(pf.line_database) > 0:
-                self.grid_left_edge[:,pf.vel_axis] = pf.domain_left_edge[pf.vel_axis]
-                self.grid_right_edge[:,pf.vel_axis] = pf.domain_right_edge[pf.vel_axis]
-                self.grid_dimensions[:,pf.vel_axis] = pf.domain_dimensions[pf.vel_axis]
-
+        if self.num_grids > 1:
+            if self.pf.z_axis_decomp:
+                dz = (pf.domain_width/pf.domain_dimensions)[2]
+                self.grid_dimensions[:,2] = np.around(float(pf.domain_dimensions[2])/
+                                                            self.num_grids).astype("int")
+                self.grid_dimensions[-1,2] += (pf.domain_dimensions[2] % self.num_grids)
+                self.grid_left_edge[0,2] = pf.domain_left_edge[2]
+                self.grid_left_edge[1:,2] = pf.domain_left_edge[2] + \
+                                            np.cumsum(self.grid_dimensions[:-1,2])*dz
+                self.grid_right_edge[:,2] = self.grid_left_edge[:,2]+self.grid_dimensions[:,2]*dz
+                self.grid_left_edge[:,:2] = pf.domain_left_edge[:2]
+                self.grid_right_edge[:,:2] = pf.domain_right_edge[:2]
+                self.grid_dimensions[:,:2] = pf.domain_dimensions[:2]
+            else:
+                bbox = np.array([[le,re] for le, re in zip(pf.domain_left_edge,
+                                                           pf.domain_right_edge)])
+                dims = np.array(pf.domain_dimensions)
+                # If we are creating a dataset of lines, only decompose along the position axes
+                if len(pf.line_database) > 0:
+                    dims[pf.vel_axis] = 1
+                psize = get_psize(dims, self.num_grids)
+                gle, gre, shapes, slices = decompose_array(dims, psize, bbox)
+                self.grid_left_edge = self.pf.arr(gle, "code_length")
+                self.grid_right_edge = self.pf.arr(gre, "code_length")
+                self.grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
+                # If we are creating a dataset of lines, only decompose along the position axes
+                if len(pf.line_database) > 0:
+                    self.grid_left_edge[:,pf.vel_axis] = pf.domain_left_edge[pf.vel_axis]
+                    self.grid_right_edge[:,pf.vel_axis] = pf.domain_right_edge[pf.vel_axis]
+                    self.grid_dimensions[:,pf.vel_axis] = pf.domain_dimensions[pf.vel_axis]
         else:
             self.grid_left_edge[0,:] = pf.domain_left_edge
             self.grid_right_edge[0,:] = pf.domain_right_edge
             self.grid_dimensions[0] = pf.domain_dimensions
 
-        if self.pf.events_data:
+        if pf.events_data:
             try:
                 self.grid_particle_count[:] = pf.primary_header["naxis2"]
             except KeyError:
@@ -290,6 +303,7 @@
                  nprocs = None,
                  storage_filename = None,
                  nan_mask = None,
+                 z_axis_decomp = False,
                  line_database = None,
                  line_width = None,
                  suppress_astropy_warnings = True,
@@ -297,8 +311,11 @@
 
         if parameters is None:
             parameters = {}
+        parameters["nprocs"] = nprocs
         self.specified_parameters = parameters
 
+        self.z_axis_decomp = z_axis_decomp
+
         if line_width is not None:
             self.line_width = YTQuantity(line_width[0], line_width[1])
             self.line_units = line_width[1]
@@ -322,11 +339,15 @@
             self.nan_mask = {"all":nan_mask}
         elif isinstance(nan_mask, dict):
             self.nan_mask = nan_mask
-        self.nprocs = nprocs
-        self._handle = _astropy.pyfits.open(self.filenames[0],
-                                      memmap=True,
-                                      do_not_scale_image_data=True,
-                                      ignore_blank=True)
+        if isinstance(self.filenames[0], _astropy.pyfits.PrimaryHDU):
+            self._handle = _astropy.pyfits.HDUList(self.filenames[0])
+            fn = "InMemoryFITSImage_%s" % (uuid.uuid4().hex)
+        else:
+            self._handle = _astropy.pyfits.open(self.filenames[0],
+                                                memmap=True,
+                                                do_not_scale_image_data=True,
+                                                ignore_blank=True)
+            fn = self.filenames[0]
         self._fits_files = [self._handle]
         if self.num_files > 1:
             for fits_file in auxiliary_files:
@@ -387,7 +408,7 @@
 
         self.refine_by = 2
 
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, fn, dataset_type)
         self.storage_filename = storage_filename
 
     def _set_code_unit_attributes(self):
@@ -435,8 +456,11 @@
 
     def _parse_parameter_file(self):
 
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        if self.parameter_filename.startswith("InMemory"):
+            self.unique_identifier = time.time()
+        else:
+            self.unique_identifier = \
+                int(os.stat(self.parameter_filename)[stat.ST_CTIME])
 
         # Determine dimensionality
 
@@ -472,14 +496,26 @@
         self.current_redshift = self.omega_lambda = self.omega_matter = \
             self.hubble_constant = self.cosmological_simulation = 0.0
 
-        # If this is a 2D events file, no need to decompose
-        if self.events_data: self.nprocs = 1
+        if self.dimensionality == 2 and self.z_axis_decomp:
+            mylog.warning("You asked to decompose along the z-axis, but this is a 2D dataset. " +
+                          "Ignoring.")
+            self.z_axis_decomp = False
+
+        if self.events_data: self.specified_parameters["nprocs"] = 1
 
         # If nprocs is None, do some automatic decomposition of the domain
-        if self.nprocs is None:
-            self.nprocs = np.around(np.prod(self.domain_dimensions) /
-                                    32**self.dimensionality).astype("int")
-            self.nprocs = max(min(self.nprocs, 512), 1)
+        if self.specified_parameters["nprocs"] is None:
+            if len(self.line_database) > 0:
+                dims = 2
+            else:
+                dims = self.dimensionality
+            if self.z_axis_decomp:
+                nprocs = np.around(self.domain_dimensions[2]/8).astype("int")
+            else:
+                nprocs = np.around(np.prod(self.domain_dimensions)/32**dims).astype("int")
+            self.parameters["nprocs"] = max(min(nprocs, 512), 1)
+        else:
+            self.parameters["nprocs"] = self.specified_parameters["nprocs"]
 
         self.reversed = False
 

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -88,7 +88,7 @@
             for chunk in chunks:
                 for g in chunk.objs:
                     start = ((g.LeftEdge-self.pf.domain_left_edge)/dx).to_ndarray().astype("int")
-                    end = ((g.RightEdge-self.pf.domain_left_edge)/dx).to_ndarray().astype("int")
+                    end = start + g.ActiveDimensions
                     if self.line_db is not None and fname in self.line_db:
                         my_off = self.line_db.get(fname).in_units(self.pf.vel_unit).value
                         my_off = my_off - 0.5*self.pf.line_width

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -549,7 +549,7 @@
         lengths = self.indexdata['len'][mask]
         return mask, offsets, lengths
 
-    def get_ibbox(self, ileft, iright):
+    def get_ibbox(self, ileft, iright, wandering_particles=True):
         """
         Given left and right indicies, return a mask and
         set of offsets+lengths into the sdf data.
@@ -593,7 +593,7 @@
         #print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()
 
         indices = self.get_keyv([X, Y, Z])
-#       # Only mask out if we are actually getting data rather than getting indices into
+        # Only mask out if we are actually getting data rather than getting indices into
         # a space.
         if self.valid_indexdata:
             indices = indices[indices < self.indexdata['index'][-1]]
@@ -696,7 +696,6 @@
             i += 1
         mylog.debug('Read %i chunks, batched into %i reads' % (num_inds, num_reads))
 
-
     def filter_particles(self, myiter, myfilter):
         for data in myiter:
             mask = myfilter(data)

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -660,17 +660,14 @@
     if not os.path.exists(path):
         only_on_root(os.makedirs, path)
     return path
-        
-def assert_valid_width_tuple(width):
-    try:
-        assert iterable(width) and len(width) == 2, \
-            "width (%s) is not a two element tuple" % width
-        valid = isinstance(width[0], numeric_type) and isinstance(width[1], str)
+
+def validate_width_tuple(width):
+    if not iterable(width) or len(width) != 2:
+        raise YTInvalidWidthError("width (%s) is not a two element tuple" % width)
+    if not isinstance(width[0], numeric_type) and isinstance(width[1], basestring):
         msg = "width (%s) is invalid. " % str(width)
         msg += "Valid widths look like this: (12, 'au')"
-        assert valid, msg
-    except AssertionError, e:
-        raise YTInvalidWidthError(e)
+        raise YTInvalidWidthError(msg)
 
 def camelcase_to_underscore(name):
     s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/geometry/ppv_coordinates.py
--- a/yt/geometry/ppv_coordinates.py
+++ b/yt/geometry/ppv_coordinates.py
@@ -25,8 +25,6 @@
 
         self.axis_name = {}
         self.axis_id = {}
-        self.x_axis = {}
-        self.y_axis = {}
 
         for axis, axis_name in zip([pf.lon_axis, pf.lat_axis, pf.vel_axis],
                                    ["Image\ x", "Image\ y", pf.vel_name]):
@@ -42,28 +40,6 @@
             self.axis_id[axis] = axis
             self.axis_id[axis_name] = axis
 
-            if axis == 0:
-                self.x_axis[axis] = 1
-                self.x_axis[lower_ax] = 1
-                self.x_axis[axis_name] = 1
-                self.y_axis[axis] = 2
-                self.y_axis[lower_ax] = 2
-                self.y_axis[axis_name] = 2
-            elif axis == 1:
-                self.x_axis[axis] = 2
-                self.x_axis[lower_ax] = 2
-                self.x_axis[axis_name] = 2
-                self.y_axis[axis] = 0
-                self.y_axis[lower_ax] = 0
-                self.y_axis[axis_name] = 0
-            elif axis == 2:
-                self.x_axis[axis] = 0
-                self.x_axis[lower_ax] = 0
-                self.x_axis[axis_name] = 0
-                self.y_axis[axis] = 1
-                self.y_axis[lower_ax] = 1
-                self.y_axis[axis_name] = 1
-
         self.default_unit_label = {}
         self.default_unit_label[pf.lon_axis] = "pixel"
         self.default_unit_label[pf.lat_axis] = "pixel"
@@ -75,3 +51,8 @@
     def convert_from_cylindrical(self, coord):
         raise NotImplementedError
 
+    x_axis = { 'x' : 1, 'y' : 0, 'z' : 0,
+                0  : 1,  1  : 0,  2  : 0}
+
+    y_axis = { 'x' : 2, 'y' : 2, 'z' : 1,
+                0  : 2,  1  : 2,  2  : 1}

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -332,9 +332,9 @@
         # is too.
         cdef np.float64_t rel = x1 - x2
         if self.periodicity[d] :
-            if rel > self.domain_width[d]/2.0 :
+            if rel > self.domain_width[d] * 0.5:
                 rel -= self.domain_width[d]
-            elif rel < -self.domain_width[d]/2.0 :
+            elif rel < -self.domain_width[d] * 0.5:
                 rel += self.domain_width[d]
         return rel
 
@@ -534,12 +534,25 @@
     cdef np.float64_t radius
     cdef np.float64_t radius2
     cdef np.float64_t center[3]
+    cdef np.float64_t bbox[3][2]
+    cdef bint check_box[3]
 
     def __init__(self, dobj):
-        for i in range(3):
-            self.center[i] = dobj.center[i]
         self.radius = _ensure_code(dobj.radius)
         self.radius2 = self.radius * self.radius
+        center = _ensure_code(dobj.center)
+        cdef np.float64_t mi = np.finfo("float64").min
+        cdef np.float64_t ma = np.finfo("float64").max
+        for i in range(3):
+            self.center[i] = center[i]
+            self.bbox[i][0] = self.center[i] - self.radius
+            self.bbox[i][1] = self.center[i] + self.radius
+            if self.bbox[i][0] < dobj.pf.domain_left_edge[i]:
+                self.check_box[i] = False
+            elif self.bbox[i][1] > dobj.pf.domain_right_edge[i]:
+                self.check_box[i] = False
+            else:
+                self.check_box[i] = True
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -559,10 +572,12 @@
         cdef int i
         cdef np.float64_t dist, dist2 = 0
         for i in range(3):
+            if pos[i] < self.bbox[i][0] or pos[i] > self.bbox[i][1]:
+                if self.check_box[i]: return 0
             dist = self.difference(pos[i], self.center[i], i)
             dist2 += dist*dist
-        if dist2 <= self.radius2: return 1
-        return 0
+            if dist2 > self.radius2: return 0
+        return 1
    
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -588,16 +603,22 @@
             left_edge[1] <= self.center[1] <= right_edge[1] and
             left_edge[2] <= self.center[2] <= right_edge[2]):
             return 1
+        for i in range(3):
+            if not self.check_box[i]: continue
+            if right_edge[i] < self.bbox[i][0] or \
+               left_edge[i] > self.bbox[i][1]:
+                return 0
         # http://www.gamedev.net/topic/335465-is-this-the-simplest-sphere-aabb-collision-test/
         dist = 0
         for i in range(3):
+            # Early terminate
             box_center = (right_edge[i] + left_edge[i])/2.0
             relcenter = self.difference(box_center, self.center[i], i)
             edge = right_edge[i] - left_edge[i]
             closest = relcenter - fclip(relcenter, -edge/2.0, edge/2.0)
             dist += closest*closest
-        if dist <= self.radius2: return 1
-        return 0
+            if dist > self.radius2: return 0
+        return 1
 
     def _hash_vals(self):
         return (self.radius, self.radius2,

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -14,7 +14,15 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+import copy
+import cPickle as pickle
+import itertools
+import numpy as np
+import operator
 import os
+import shutil
+import tempfile
+
 from nose.tools import assert_true
 from numpy.testing import \
     assert_array_equal, \
@@ -28,12 +36,6 @@
     YTUnitOperationError, YTUfuncUnitError
 from yt.testing import fake_random_pf, requires_module
 from yt.funcs import fix_length
-import numpy as np
-import copy
-import operator
-import cPickle as pickle
-import tempfile
-import itertools
 
 
 def operate_and_compare(a, b, op, answer):
@@ -675,3 +677,54 @@
     yield assert_equal, yt_quan, YTQuantity(yt_quan.to_astropy())
 
 
+def test_subclass():
+
+    class YTASubclass(YTArray):
+        pass
+
+    a = YTASubclass([4, 5, 6], 'g')
+    b = YTASubclass([7, 8, 9], 'kg')
+    nu = YTASubclass([10, 11, 12], '')
+    nda = np.array([3, 4, 5])
+    yta = YTArray([6, 7, 8], 'mg')
+    ytq = YTQuantity(4, 'cm')
+    ndf = np.float64(3)
+
+    def op_comparison(op, inst1, inst2, compare_class):
+        assert_isinstance(op(inst1, inst2), compare_class)
+        assert_isinstance(op(inst2, inst1), compare_class)
+
+    for op in (operator.mul, operator.div, operator.truediv):
+        for inst in (b, ytq, ndf, yta, nda):
+            yield op_comparison, op, a, inst, YTASubclass
+
+        yield op_comparison, op, ytq, nda, YTArray
+        yield op_comparison, op, ytq, yta, YTArray
+
+    for op in (operator.add, operator.sub):
+        yield op_comparison, op, nu, nda, YTASubclass
+        yield op_comparison, op, a, b, YTASubclass
+        yield op_comparison, op, a, yta, YTASubclass
+
+    yield assert_isinstance, a[0], YTQuantity
+    yield assert_isinstance, a[:], YTASubclass
+    yield assert_isinstance, a[:2], YTASubclass
+
+def test_h5_io():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = fake_random_pf(64, nprocs=1, length_unit=10)
+
+    warr = ds.arr(np.random.random((256, 256)), 'code_length')
+
+    warr.write_hdf5('test.h5')
+
+    iarr = YTArray.from_hdf5('test.h5')
+
+    yield assert_equal, warr, iarr
+    yield assert_equal, warr.units.registry['code_length'], iarr.units.registry['code_length']
+
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -74,7 +74,8 @@
         if ret.shape == ():
             return YTQuantity(ret, units)
         else:
-            return YTArray(ret, units)
+            # This could be a subclass, so don't call YTArray directly.
+            return type(args[0])(ret, units)
     return wrapped
 
 def sqrt_unit(unit):
@@ -464,6 +465,92 @@
     # End unit conversion methods
     #
 
+    def write_hdf5(self, filename, dataset_name=None, info=None):
+        r"""Writes ImageArray to hdf5 file.
+
+        Parameters
+        ----------
+        filename: string
+            The filename to create and write a dataset to
+
+        dataset_name: string
+            The name of the dataset to create in the file.
+
+        info: dictionary
+            A dictionary of supplementary info to write to append as attributes
+            to the dataset.
+
+        Examples
+        --------
+        >>> a = YTArray([1,2,3], 'cm')
+
+        >>> myinfo = {'field':'dinosaurs', 'type':'field_data'}
+
+        >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
+        ...              info=myinfo)
+
+        """
+        import h5py
+        from yt.extern.six.moves import cPickle as pickle
+        if info is None:
+            info = {}
+
+        info['units'] = str(self.units)
+        info['unit_registry'] = pickle.dumps(self.units.registry.lut)
+
+        if dataset_name is None:
+            dataset_name = 'array_data'
+
+        f = h5py.File(filename)
+        if dataset_name in f.keys():
+            d = f[dataset_name]
+            # Overwrite without deleting if we can get away with it.
+            if d.shape == self.shape and d.dtype == self.dtype:
+                d[:] = self
+                for k in d.attrs.keys():
+                    del d.attrs[k]
+            else:
+                del f[dataset_name]
+                d = f.create_dataset(dataset_name, data=self)
+        else:
+            d = f.create_dataset(dataset_name, data=self)
+
+        for k, v in info.iteritems():
+            d.attrs.create(k, v)
+        f.close()
+
+    @classmethod
+    def from_hdf5(cls, filename, dataset_name=None):
+        r"""Attempts read in and convert a dataset in an hdf5 file into a YTArray.
+
+        Parameters
+        ----------
+        filename: string
+        The filename to of the hdf5 file.
+
+        dataset_name: string
+            The name of the dataset to read from.  If the dataset has a units
+            attribute, attempt to infer units as well.
+
+        """
+        import h5py
+        from yt.extern.six.moves import cPickle as pickle
+
+        if dataset_name is None:
+            dataset_name = 'array_data'
+
+        f = h5py.File(filename)
+        dataset = f[dataset_name]
+        data = dataset[:]
+        units = dataset.attrs.get('units', '')
+        if 'unit_registry' in dataset.attrs.keys():
+            unit_lut = pickle.loads(dataset.attrs['unit_registry'])
+        else:
+            unit_lut = None
+
+        registry = UnitRegistry(lut=unit_lut, add_default_symbols=False)
+        return cls(data, units, registry=registry)
+
     #
     # Start convenience methods
     #
@@ -766,7 +853,7 @@
 
     @return_arr
     def prod(self, axis=None, dtype=None, out=None):
-        if axis:
+        if axis is not None:
             units = self.units**self.shape[axis]
         else:
             units = self.units**self.size
@@ -814,9 +901,13 @@
             # Raise YTUnitOperationError up here since we know the context now
             except RuntimeError:
                 raise YTUnitOperationError(context[0], u)
+            ret_class = type(self)
         elif context[0] in binary_operators:
             unit1 = getattr(context[1][0], 'units', None)
             unit2 = getattr(context[1][1], 'units', None)
+            cls1 = type(context[1][0])
+            cls2 = type(context[1][1])
+            ret_class = get_binary_op_return_class(cls1, cls2)
             if unit1 is None:
                 unit1 = Unit(registry=getattr(unit2, 'registry', None))
             if unit2 is None and context[0] is not power:
@@ -849,10 +940,15 @@
             out_arr = np.array(out_arr)
             return out_arr
         out_arr.units = unit
-        if out_arr.size > 1:
-            return YTArray(np.array(out_arr), unit)
+        if out_arr.size == 1:
+            return YTQuantity(np.array(out_arr), unit)
         else:
-            return YTQuantity(np.array(out_arr), unit)
+            if ret_class is YTQuantity:
+                # This happens if you do ndarray * YTQuantity. Explicitly
+                # casting to YTArray avoids creating a YTQuantity with size > 1
+                return YTArray(np.array(out_arr, unit))
+            return ret_class(np.array(out_arr), unit)
+
 
     def __reduce__(self):
         """Pickle reduction method
@@ -929,3 +1025,22 @@
         return data.pf.arr(x, units)
     else:
         return data.pf.quan(x, units)
+
+def get_binary_op_return_class(cls1, cls2):
+    if cls1 is cls2:
+        return cls1
+    if cls1 is np.ndarray or issubclass(cls1, numeric_type):
+        return cls2
+    if cls2 is np.ndarray or issubclass(cls2, numeric_type):
+        return cls1
+    if issubclass(cls1, YTQuantity):
+        return cls2
+    if issubclass(cls2, YTQuantity):
+        return cls1
+    if issubclass(cls1, cls2):
+        return cls1
+    if issubclass(cls2, cls1):
+        return cls2
+    else:
+        raise RuntimeError("Operations are only defined on pairs of objects"
+                           "in which one is a subclass of the other")

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -38,7 +38,7 @@
     node.contour_id = contour_id
     node.next = node.parent = NULL
     node.prev = prev
-    node.count = 0
+    node.count = 1
     if prev != NULL: prev.next = node
     return node
 
@@ -59,17 +59,36 @@
     # root.
     while node.parent != NULL:
         temp = node.parent
+        root.count += node.count
+        node.count = 0
         node.parent = root
         node = temp
     return root
 
 cdef inline void contour_union(ContourID *node1, ContourID *node2):
+    if node1 == node2:
+        return
     node1 = contour_find(node1)
     node2 = contour_find(node2)
-    if node1.contour_id < node2.contour_id:
-        node2.parent = node1
-    elif node2.contour_id < node1.contour_id:
-        node1.parent = node2
+    if node1 == node2:
+        return
+    cdef ContourID *pri, *sec
+    if node1.count > node2.count:
+        pri = node1
+        sec = node2
+    elif node2.count > node1.count:
+        pri = node2
+        sec = node1
+    # might be a tie
+    elif node1.contour_id < node2.contour_id:
+        pri = node1
+        sec = node2
+    else:
+        pri = node2
+        sec = node1
+    pri.count += sec.count
+    sec.count = 0
+    sec.parent = pri
 
 cdef inline int candidate_contains(CandidateContour *first,
                             np.int64_t contour_id,
@@ -617,6 +636,12 @@
                         contour_ids[ci,cj,ck] = j + 1
                         break
 
+cdef class FOFNode:
+    cdef np.int64_t tag, count
+    def __init__(self, np.int64_t tag):
+        self.tag = tag
+        self.count = 0
+
 cdef class ParticleContourTree(ContourTree):
     cdef np.float64_t linking_length, linking_length2
     cdef np.float64_t DW[3], DLE[3], DRE[3]
@@ -739,24 +764,16 @@
         cdef np.ndarray[np.int64_t, ndim=1] contour_ids
         contour_ids = np.ones(positions.shape[0], dtype="int64")
         contour_ids *= -1
-        # Sort on our particle IDs.
-        for i in range(doff.shape[0]):
-            if doff[i] < 0: continue
-            for j in range(pcount[i]):
-                offset = pind[doff[i] + j]
-                c1 = container[offset]
-                c0 = contour_find(c1)
-                contour_ids[offset] = c0.contour_id
-                c0.count += 1
-        for i in range(doff.shape[0]):
-            if doff[i] < 0: continue
-            for j in range(pcount[i]):
-                offset = pind[doff[i] + j]
-                c1 = container[offset]
-                if c1 == NULL: continue
-                c0 = contour_find(c1)
-                if c0.count < self.minimum_count:
-                    contour_ids[offset] = -1
+        # Perform one last contour_find on each.  Note that we no longer need
+        # to look at any of the doff or internal offset stuff.
+        for i in range(positions.shape[0]):
+            if container[i] == NULL: continue
+            container[i] = contour_find(container[i])
+        for i in range(positions.shape[0]):
+            if container[i] == NULL: continue
+            c0 = container[i]
+            if c0.count < self.minimum_count: continue
+            contour_ids[i] = particle_ids[pind[c0.contour_id]]
         free(container)
         del pind
         return contour_ids
@@ -810,6 +827,7 @@
                                 self.linking_length2, edges)
             if link == 0: continue
             if c1 == NULL:
+                c0.count += 1
                 container[pind1] = c0
             elif c0.contour_id != c1.contour_id:
                 contour_union(c0, c1)

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/utilities/lib/ragged_arrays.pyx
--- /dev/null
+++ b/yt/utilities/lib/ragged_arrays.pyx
@@ -0,0 +1,97 @@
+"""
+Some simple operations for operating on ragged arrays
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+cimport cython
+
+cdef fused numpy_dt:
+    np.float32_t
+    np.float64_t
+    np.int32_t
+    np.int64_t
+
+cdef numpy_dt r_min(numpy_dt a, numpy_dt b):
+    if a < b: return a
+    return b
+
+cdef numpy_dt r_max(numpy_dt a, numpy_dt b):
+    if a > b: return a
+    return b
+
+cdef numpy_dt r_add(numpy_dt a, numpy_dt b):
+    return a + b
+
+cdef numpy_dt r_subtract(numpy_dt a, numpy_dt b):
+    return a - b
+
+cdef numpy_dt r_multiply(numpy_dt a, numpy_dt b):
+    return a * b
+
+ at cython.cdivision(True)
+cdef numpy_dt r_divide(numpy_dt a, numpy_dt b):
+    return a / b
+
+def index_unop(np.ndarray[numpy_dt, ndim=1] values,
+              np.ndarray[np.int64_t, ndim=1] indices,
+              np.ndarray[np.int64_t, ndim=1] sizes,
+              operation):
+    cdef numpy_dt mi, ma
+    if numpy_dt == np.float32_t:
+        dt = "float32"
+        mi = np.finfo(dt).min
+        ma = np.finfo(dt).max
+    elif numpy_dt == np.float64_t:
+        dt = "float64"
+        mi = np.finfo(dt).min
+        ma = np.finfo(dt).max
+    elif numpy_dt == np.int32_t:
+        dt = "int32"
+        mi = np.iinfo(dt).min
+        ma = np.iinfo(dt).max
+    elif numpy_dt == np.int64_t:
+        dt = "int64"
+        mi = np.iinfo(dt).min
+        ma = np.iinfo(dt).max
+    cdef np.ndarray[numpy_dt] out_values = np.zeros(sizes.size, dtype=dt)
+    cdef numpy_dt (*func)(numpy_dt a, numpy_dt b)
+    # Now we figure out our function.  At present, we only allow addition and
+    # multiplication, because they are commutative and easy to bootstrap.
+    cdef numpy_dt ival, val
+    if operation == "sum":
+        ival = 0
+        func = r_add
+    elif operation == "prod":
+        ival = 1
+        func = r_multiply
+    elif operation == "max":
+        ival = mi
+        func = r_max
+    elif operation == "min":
+        ival = ma
+        func = r_min
+    else:
+        raise NotImplementedError
+    cdef np.int64_t i, j, ind_ind, ind_arr
+    ind_ind = 0
+    for i in range(sizes.size):
+        # Each entry in sizes is the size of the array
+        val = ival
+        for j in range(sizes[i]):
+            ind_arr = indices[ind_ind]
+            val = func(val, values[ind_arr])
+            ind_ind += 1
+        out_values[i] = val
+    return out_values

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -139,6 +139,8 @@
           )
     config.add_extension("write_array",
                          ["yt/utilities/lib/write_array.pyx"])
+    config.add_extension("ragged_arrays",
+                         ["yt/utilities/lib/ragged_arrays.pyx"])
     config.add_extension("GridTree", 
     ["yt/utilities/lib/GridTree.pyx"],
         libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/utilities/lib/tests/test_ragged_arrays.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_ragged_arrays.py
@@ -0,0 +1,36 @@
+from yt.testing import *
+import numpy as np
+from yt.utilities.lib.ragged_arrays import index_unop
+
+operations = ((np.sum, "sum"),
+              (np.prod, "prod"),
+              (np.max, "max"),
+              (np.min, "min"))
+dtypes = ((-1e8, 1e8, "float32"),
+          (-1e8, 1e8, "float64"),
+          (-10000, 10000, "int32"),
+          (-100000000, 100000000, "int64"))
+
+def test_index_unop():
+    np.random.seed(0x4d3d3d3)
+    indices = np.arange(1000)
+    np.random.shuffle(indices)
+    sizes = np.array([
+        200, 50, 50, 100, 32, 32, 32, 32, 32, 64, 376], dtype="int64")
+    for mi, ma, dtype in dtypes:
+        for op, operation in operations:
+            # Create a random set of values
+            values = np.random.random(1000)
+            if operation != "prod":
+                values = values * ma + (ma - mi)
+            if operation == "prod" and dtype.startswith("int"):
+                values = values.astype(dtype)
+                values[values != 0] = 1
+                values[values == 0] = -1
+            values = values.astype(dtype)
+            out_values = index_unop(values, indices, sizes, operation)
+            i = 0
+            for j, v in enumerate(sizes):
+                arr = values[indices[i:i+v]]
+                yield assert_equal, op(arr), out_values[j]
+                i += v

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -1,3 +1,17 @@
+"""
+A base class for "image" plots with colorbars.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 import __builtin__
 import base64
 import numpy as np

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -17,8 +17,6 @@
 import matplotlib
 import types
 import sys
-import os
-from yt.extern.six.moves import builtins, StringIO
 import warnings
 
 from matplotlib.delaunay.triangulate import Triangulation as triang
@@ -39,12 +37,19 @@
     ImagePlotContainer, log_transform, linear_transform, \
     invalidate_data, invalidate_plot, apply_callback
 
+from yt.data_objects.time_series import \
+    DatasetSeries
+from yt.extern.six.moves import \
+    StringIO
 from yt.funcs import \
     mylog, iterable, ensure_list, \
-    fix_axis, assert_valid_width_tuple
-from yt.units.unit_object import Unit
+    fix_axis, validate_width_tuple
+from yt.units.unit_object import \
+    Unit
 from yt.units.unit_registry import \
-     UnitParseError
+    UnitParseError
+from yt.units.yt_array import \
+    YTArray, YTQuantity
 from yt.utilities.png_writer import \
     write_png_to_string
 from yt.utilities.definitions import \
@@ -57,10 +62,6 @@
     YTCannotParseUnitDisplayName, \
     YTUnitConversionError
 
-from yt.data_objects.time_series import \
-    DatasetSeries
-from yt.units.yt_array import YTArray, YTQuantity
-
 # Some magic for dealing with pyparsing being included or not
 # included in matplotlib (not in gentoo, yes in everything else)
 # Also accounting for the fact that in 1.2.0, pyparsing got renamed.
@@ -82,18 +83,10 @@
     else:
         return u
 
-def assert_valid_width_tuple(width):
-    if not iterable(width) or len(width) != 2:
-        raise YTInvalidWidthError("width (%s) is not a two element tuple" % width)
-    if not isinstance(width[0], Number) and isinstance(width[1], basestring):
-        msg = "width (%s) is invalid. " % str(width)
-        msg += "Valid widths look like this: (12, 'au')"
-        raise YTInvalidWidthError(msg)
-
 def validate_iterable_width(width, pf, unit=None):
     if isinstance(width[0], tuple) and isinstance(width[1], tuple):
-        assert_valid_width_tuple(width[0])
-        assert_valid_width_tuple(width[1])
+        validate_width_tuple(width[0])
+        validate_width_tuple(width[1])
         return (pf.quan(width[0][0], fix_unitary(width[0][1])),
                 pf.quan(width[1][0], fix_unitary(width[1][1])))
     elif isinstance(width[0], Number) and isinstance(width[1], Number):
@@ -102,11 +95,11 @@
     elif isinstance(width[0], YTQuantity) and isinstance(width[1], YTQuantity):
         return (pf.quan(width[0]), pf.quan(width[1]))
     else:
-        assert_valid_width_tuple(width)
+        validate_width_tuple(width)
         # If width and unit are both valid width tuples, we
         # assume width controls x and unit controls y
         try:
-            assert_valid_width_tuple(unit)
+            validate_width_tuple(unit)
             return (pf.quan(width[0], fix_unitary(width[1])),
                     pf.quan(unit[0], fix_unitary(unit[1])))
         except YTInvalidWidthError:
@@ -137,7 +130,7 @@
         raise YTInvalidWidthError(width)
     if depth is not None:
         if iterable(depth):
-            assert_valid_width_tuple(depth)
+            validate_width_tuple(depth)
             depth = (pf.quan(depth[0], fix_unitary(depth[1])), )
         elif isinstance(depth, Number):
             depth = (pf.quan(depth, 'code_length',
@@ -180,8 +173,8 @@
     elif pf.geometry == "spherical":
         if axis == 0:
             width = pf.domain_width[1], pf.domain_width[2]
-            center = 0.5*(pf.domain_left_edge +
-                pf.domain_right_edge).in_units("code_length")
+            center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
+            center.convert_to_units("code_length")
         else:
             # Our default width here is the full domain
             width = [pf.domain_right_edge[0]*2.0, pf.domain_right_edge[0]*2.0]
@@ -217,7 +210,8 @@
         mat = np.transpose(np.column_stack((perp1,perp2,normal)))
         center = np.dot(mat,center)
 
-    bounds = tuple( ( (2*(i%2))-1)*width[i//2]/2 for i in range(len(width)*2))
+    w = tuple(el.in_units('unitary') for el in width)
+    bounds = tuple(((2*(i % 2))-1)*w[i//2]/2 for i in range(len(w)*2))
 
     return (bounds, center)
 
@@ -343,10 +337,9 @@
             bounds = self.xlim+self.ylim
         if self._frb_generator is ObliqueFixedResolutionBuffer:
             bounds = np.array(bounds)
-        self.frb = self._frb_generator(self.data_source,
-                                        bounds, self.buff_size,
-                                        self.antialias,
-                                        periodic=self._periodic)
+
+        self.frb = self._frb_generator(self.data_source, bounds, self.buff_size,
+                                       self.antialias, periodic=self._periodic)
         if old_fields is None:
             self.frb._get_data_source_fields()
         else:
@@ -400,8 +393,7 @@
         if len(deltas) != 2:
             raise RuntimeError(
                 "The pan function accepts a two-element sequence.\n"
-                "Received %s." % (deltas, )
-                )
+                "Received %s." % (deltas, ))
         if isinstance(deltas[0], Number) and isinstance(deltas[1], Number):
             deltas = (self.pf.quan(deltas[0], 'code_length'),
                       self.pf.quan(deltas[1], 'code_length'))
@@ -413,8 +405,7 @@
         else:
             raise RuntimeError(
                 "The arguments of the pan function must be a sequence of floats,\n"
-                "quantities, or (float, unit) tuples. Received %s." % (deltas, )
-                )
+                "quantities, or (float, unit) tuples. Received %s." % (deltas, ))
         self.xlim = (self.xlim[0] + deltas[0], self.xlim[1] + deltas[0])
         self.ylim = (self.ylim[0] + deltas[1], self.ylim[1] + deltas[1])
         return self
@@ -480,10 +471,10 @@
             self.ylim = tuple(bounds[2:4])
             if len(bounds) == 6:
                 self.zlim = tuple(bounds[4:6])
-        mylog.info("xlim = %f %f" %self.xlim)
-        mylog.info("ylim = %f %f" %self.ylim)
+        mylog.info("xlim = %f %f" % self.xlim)
+        mylog.info("ylim = %f %f" % self.ylim)
         if hasattr(self,'zlim'):
-            mylog.info("zlim = %f %f" %self.zlim)
+            mylog.info("zlim = %f %f" % self.zlim)
 
     @invalidate_data
     def set_width(self, width, unit = None):
@@ -634,12 +625,11 @@
         Examples
         --------
 
-        >>> p = ProjectionPlot(pf, "y", "density")
-        >>> p.show()
+        >>> from yt import load
+        >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+        >>> p = ProjectionPlot(ds, "y", "Density")
         >>> p.set_axes_unit("kpc")
-        >>> p.show()
-        >>> p.set_axes_unit(None)
-        >>> p.show()
+
         """
         # blind except because it could be in conversion_factors or units
         if unit_name is not None:
@@ -694,8 +684,8 @@
             xllim, xrlim = self.xlim
             yllim, yrlim = self.ylim
         elif origin[2] == 'domain':
-            xax = pf.coordinates.x_axis[axis_index]
-            yax = pf.coordinates.y_axis[axis_index]
+            xax = self.pf.coordinates.x_axis[axis_index]
+            yax = self.pf.coordinates.y_axis[axis_index]
             xllim = self.pf.domain_left_edge[xax]
             xrlim = self.pf.domain_right_edge[xax]
             yllim = self.pf.domain_left_edge[yax]
@@ -706,8 +696,8 @@
         else:
             mylog.warn("origin = {0}".format(origin))
             msg = \
-              ('origin keyword "{0}" not recognized, must declare "domain" '
-               'or "center" as the last term in origin.').format(self.origin)
+                ('origin keyword "{0}" not recognized, must declare "domain" '
+                 'or "center" as the last term in origin.').format(self.origin)
             raise RuntimeError(msg)
 
         if origin[0] == 'lower':
@@ -756,7 +746,8 @@
             # This will likely be replaced at some point by the coordinate handler
             # setting plot aspect.
             if self.aspect is None:
-                self.aspect = np.float64(self.pf.quan(1.0, unit_y)/(self.pf.quan(1.0, unit_x)))
+                self.aspect = np.float64(self.pf.quan(1.0, unit_y) /
+                                         self.pf.quan(1.0, unit_x))
 
             extentx = [(self.xlim[i] - xc).in_units(unit_x) for i in (0, 1)]
             extenty = [(self.ylim[i] - yc).in_units(unit_y) for i in (0, 1)]
@@ -771,11 +762,10 @@
             image = self.frb[f]
 
             if image.max() == image.min():
-              if self._field_transform[f] == log_transform:
-                mylog.warning("Plot image for field %s has zero dynamic " \
-                              "range. Min = Max = %d." % \
-                              (f, image.max()))
-                mylog.warning("Switching to linear colorbar scaling.")
+                if self._field_transform[f] == log_transform:
+                    mylog.warning("Plot image for field %s has zero dynamic "
+                                  "range. Min = Max = %d." % (f, image.max()))
+                    mylog.warning("Switching to linear colorbar scaling.")
                 self._field_transform[f] = linear_transform
 
             fp = self._font_properties
@@ -893,10 +883,9 @@
             if self._font_color is not None:
                 ax = self.plots[f].axes
                 cbax = self.plots[f].cb.ax
-                labels = \
-                  ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels() + \
-                  cbax.yaxis.get_ticklabels() + \
-                  [ax.xaxis.label, ax.yaxis.label, cbax.yaxis.label]
+                labels = ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels()
+                labels += cbax.yaxis.get_ticklabels()
+                labels += [ax.xaxis.label, ax.yaxis.label, cbax.yaxis.label]
                 for label in labels:
                     label.set_color(self._font_color)
 
@@ -1013,8 +1002,9 @@
 
     This will save an image the the file 'sliceplot_Density
 
-    >>> pf = load('galaxy0030/galaxy0030')
-    >>> p = SlicePlot(pf,2,'Density','c',(20,'kpc'))
+    >>> from yt import load
+    >>> ds = load('IsolatedGalaxy/galaxy0030/galaxy0030')
+    >>> p = SlicePlot(ds, 2, 'density', 'c', (20, 'kpc'))
     >>> p.save('sliceplot')
 
     """
@@ -1138,11 +1128,12 @@
     Examples
     --------
 
-    This is a very simple way of creating a projection plot.
+    Create a projection plot with a width of 20 kiloparsecs centered on the
+    center of the simulation box:
 
-    >>> pf = load('galaxy0030/galaxy0030')
-    >>> p = ProjectionPlot(pf,2,'Density','c',(20,'kpc'))
-    >>> p.save('sliceplot')
+    >>> from yt import load
+    >>> ds = load('IsolateGalaxygalaxy0030/galaxy0030')
+    >>> p = ProjectionPlot(ds, "z", "density", width=(20, "kpc"))
 
     """
     _plot_type = 'Projection'
@@ -1159,8 +1150,8 @@
         (bounds, center) = get_window_parameters(axis, center, width, pf)
         if field_parameters is None: field_parameters = {}
         proj = pf.proj(fields, axis, weight_field=weight_field,
-                         center=center, data_source=data_source,
-                         field_parameters = field_parameters, style = proj_style)
+                       center=center, data_source=data_source,
+                       field_parameters = field_parameters, style = proj_style)
         PWViewerMPL.__init__(self, proj, bounds, fields=fields, origin=origin,
                              fontsize=fontsize, window_size=window_size, aspect=aspect)
         if axes_unit is None:
@@ -1409,14 +1400,14 @@
         else:
             fields = self.frb.data.keys()
             addl_keys = {}
-        if self._colorbar_valid == False:
+        if self._colorbar_valid is False:
             addl_keys['colorbar_image'] = self._get_cbar_image()
             self._colorbar_valid = True
         min_zoom = 200*self.pf.index.get_smallest_dx() * self.pf['unitary']
         for field in fields:
             to_plot = apply_colormap(self.frb[field],
-                func = self._field_transform[field],
-                cmap_name = self._colormaps[field])
+                                     func = self._field_transform[field],
+                                     cmap_name = self._colormaps[field])
             pngs = self._apply_modifications(to_plot)
             img_data = base64.b64encode(pngs)
             # We scale the width between 200*min_dx and 1.0
@@ -1482,7 +1473,7 @@
         nx = self.frb.buff_size[0]/skip
         ny = self.frb.buff_size[1]/skip
         new_frb = FixedResolutionBuffer(self.frb.data_source,
-                        self.frb.bounds, (nx,ny))
+                                        self.frb.bounds, (nx,ny))
 
         axis = self.frb.data_source.axis
         xax = self.frb.data_source.pf.coordinates.x_axis[axis]
@@ -1543,17 +1534,16 @@
         self.set_center((new_x, new_y))
 
     def get_field_units(self, field, strip_mathml = True):
-        ds = self.frb.data_source
-        pf = self.pf
+        source = self.data_source
         field = self._check_field(field)
-        finfo = self.data_source.pf._get_field_info(*field)
-        if ds._type_name in ("slice", "cutting"):
+        finfo = source.pf._get_field_info(*field)
+        if source._type_name in ("slice", "cutting"):
             units = finfo.get_units()
-        elif ds._type_name == "proj" and (ds.weight_field is not None or 
-                                        ds.proj_style == "mip"):
-            units = finfo.get_units()
-        elif ds._type_name == "proj":
-            units = finfo.get_projected_units()
+        elif source._type_name == "proj":
+            if source.weight_field is not None or source.proj_style in ("mip", "sum"):
+                units = finfo.get_units()
+            else:
+                units = finfo.get_projected_units()
         else:
             units = ""
         if strip_mathml:
@@ -1686,7 +1676,7 @@
     axis : int or one of 'x', 'y', 'z'
          An int corresponding to the axis to slice along (0=x, 1=y, 2=z)
          or the axis name itself.  If specified, this will replace normal.
-         
+
     The following are nominally keyword arguments passed onto the respective
     slice plot objects generated by this function.
 
@@ -1772,10 +1762,12 @@
     Examples
     --------
 
-    >>> slc = SlicePlot(pf, "x", "Density", center=[0.2,0.3,0.4])
-    >>> slc = SlicePlot(pf, 2, "Temperature")
-    >>> slc = SlicePlot(pf, [0.4,0.2,-0.1], "Pressure",
-                        north_vector=[0.2,-0.3,0.1])
+    >>> from yt import load
+    >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+    >>> slc = SlicePlot(ds, "x", "density", center=[0.2,0.3,0.4])
+    >>>
+    >>> slc = SlicePlot(ds, [0.4, 0.2, -0.1], "pressure",
+    ...                 north_vector=[0.2,-0.3,0.1])
 
     """
     # Make sure we are passed a normal
@@ -1797,23 +1789,23 @@
         else:
             normal = np.array(normal)
             np.divide(normal, np.dot(normal,normal), normal)
-        
+
     # by now the normal should be properly set to get either a On/Off Axis plot
     if iterable(normal) and not isinstance(normal, basestring):
         # OffAxisSlicePlot has hardcoded origin; remove it if in kwargs
-        if 'origin' in kwargs: 
+        if 'origin' in kwargs:
             msg = "Ignoring 'origin' keyword as it is ill-defined for " \
                   "an OffAxisSlicePlot object."
             mylog.warn(msg)
             del kwargs['origin']
-        
+
         return OffAxisSlicePlot(pf, normal, fields, *args, **kwargs)
     else:
         # north_vector not used in AxisAlignedSlicePlots; remove it if in kwargs
-        if 'north_vector' in kwargs: 
+        if 'north_vector' in kwargs:
             msg = "Ignoring 'north_vector' keyword as it is ill-defined for " \
                   "an AxisAlignedSlicePlot object."
             mylog.warn(msg)
             del kwargs['north_vector']
-        
+
         return AxisAlignedSlicePlot(pf, normal, fields, *args, **kwargs)

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 18866bc72d646944545cc637984a69b727f81344 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -16,6 +16,7 @@
 
 import __builtin__
 import base64
+import os
 import types
 
 from functools import wraps
@@ -230,7 +231,8 @@
             The output file keyword.
         
         """
-        if not self._plot_valid: self._setup_plots()
+        if not self._plot_valid:
+            self._setup_plots()
         unique = set(self.figures.values())
         if len(unique) < len(self.figures):
             figiter = izip(xrange(len(unique)), sorted(unique))
@@ -677,9 +679,11 @@
             cax = None
             draw_colorbar = True
             draw_axes = True
+            zlim = (None, None)
             if f in self.plots:
                 draw_colorbar = self.plots[f]._draw_colorbar
                 draw_axes = self.plots[f]._draw_axes
+                zlim = (self.plots[f].zmin, self.plots[f].zmax)
                 if self.plots[f].figure is not None:
                     fig = self.plots[f].figure
                     axes = self.plots[f].axes
@@ -688,13 +692,14 @@
             x_scale, y_scale, z_scale = self._get_field_log(f, self.profile)
             x_title, y_title, z_title = self._get_field_title(f, self.profile)
 
-            if z_scale == 'log':
-                zmin = data[data > 0.0].min()
-                self._field_transform[f] = log_transform
-            else:
-                zmin = data.min()
-                self._field_transform[f] = linear_transform
-            zlim = [zmin, data.max()]
+            if zlim == (None, None):
+                if z_scale == 'log':
+                    zmin = data[data > 0.0].min()
+                    self._field_transform[f] = log_transform
+                else:
+                    zmin = data.min()
+                    self._field_transform[f] = linear_transform
+                zlim = [zmin, data.max()]
 
             fp = self._font_properties
             f = self.profile.data_source._determine_fields(f)[0]
@@ -740,9 +745,11 @@
         >>> plot.save(mpl_kwargs={'bbox_inches':'tight'})
         
         """
-
-        if not self._plot_valid: self._setup_plots()
-        if mpl_kwargs is None: mpl_kwargs = {}
+        names = []
+        if not self._plot_valid:
+            self._setup_plots()
+        if mpl_kwargs is None:
+            mpl_kwargs = {}
         xfn = self.profile.x_field
         yfn = self.profile.y_field
         if isinstance(xfn, types.TupleType):
@@ -751,17 +758,25 @@
             yfn = yfn[1]
         for f in self.profile.field_data:
             _f = f
-            if isinstance(f, types.TupleType): _f = _f[1]
+            if isinstance(f, types.TupleType):
+                _f = _f[1]
             middle = "2d-Profile_%s_%s_%s" % (xfn, yfn, _f)
             if name is None:
                 prefix = self.profile.pf
-                name = "%s.png" % prefix
+            if name[-1] == os.sep and not os.path.isdir(name):
+                os.mkdir(name)
+            if os.path.isdir(name) and name != str(self.pf):
+                prefix = name + (os.sep if name[-1] != os.sep else '') + str(self.pf)
             suffix = get_image_suffix(name)
-            prefix = name[:name.rfind(suffix)]
+            if suffix != '':
+                for k, v in self.plots.iteritems():
+                    names.append(v.save(name, mpl_kwargs))
+                return names
+
             fn = "%s_%s%s" % (prefix, middle, suffix)
-            if not suffix:
-                suffix = ".png"
+            names.append(fn)
             self.plots[f].save(fn, mpl_kwargs)
+        return names
 
     @invalidate_plot
     def set_title(self, field, title):


https://bitbucket.org/yt_analysis/yt/commits/67a2ffab402e/
Changeset:   67a2ffab402e
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-22 17:09:44
Summary:     If you only have one cell, get one more so that the slice is correct.
Affected #:  1 file

diff -r 121428f8a6184b22a4b0e3bf492e64ea6fb926c2 -r 67a2ffab402e6a6e934936b58b4eced659b27375 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -611,6 +611,7 @@
         """
         ileft = np.floor((left - self.rmin) / self.domain_width *  self.domain_dims)
         iright = np.floor((right - self.rmin) / self.domain_width * self.domain_dims)
+        iright[iright <= ileft+1] += 1
 
         return self.get_ibbox(ileft, iright)
 


https://bitbucket.org/yt_analysis/yt/commits/3499697af593/
Changeset:   3499697af593
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-22 17:09:44
Summary:     If you only have one cell, get one more so that the slice is correct.
Affected #:  1 file

diff -r b41d5e789a35ce959ecb1d56187ffad172be1f65 -r 3499697af5933ca592074b251f3ef028343dad12 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -614,6 +614,7 @@
         """
         ileft = np.floor((left - self.rmin) / self.domain_width *  self.domain_dims)
         iright = np.floor((right - self.rmin) / self.domain_width * self.domain_dims)
+        iright[iright <= ileft+1] += 1
 
         return self.get_ibbox(ileft, iright)
 


https://bitbucket.org/yt_analysis/yt/commits/a6a3676b6f2a/
Changeset:   a6a3676b6f2a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-22 03:50:09
Summary:     More optimizations for spheres.
Affected #:  3 files

diff -r 18866bc72d646944545cc637984a69b727f81344 -r a6a3676b6f2a32cda01ad891cd235096867b4851 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -57,3 +57,13 @@
 cdef class OctreeSubsetSelector(SelectorObject):
     cdef SelectorObject base_selector
     cdef public np.int64_t domain_id
+
+cdef inline np.float64_t _periodic_dist(np.float64_t x1, np.float64_t x2,
+                                        np.float64_t dw, bint periodic) nogil:
+    cdef np.float64_t rel = x1 - x2
+    if not periodic: return rel
+    if rel > dw * 0.5:
+        rel -= dw
+    elif rel < -dw * 0.5:
+        rel += dw
+    return rel

diff -r 18866bc72d646944545cc637984a69b727f81344 -r a6a3676b6f2a32cda01ad891cd235096867b4851 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -24,6 +24,7 @@
 from .oct_visitors cimport cind
 from yt.utilities.lib.grid_traversal cimport \
     VolumeContainer, sample_function, walk_volume
+from yt.data_objects.octree_subset import YTPositionArray
 
 cdef extern from "math.h":
     double exp(double x) nogil
@@ -331,7 +332,7 @@
         # domain_width is already in code units, and we assume what is fed in
         # is too.
         cdef np.float64_t rel = x1 - x2
-        if self.periodicity[d] :
+        if self.periodicity[d]:
             if rel > self.domain_width[d] * 0.5:
                 rel -= self.domain_width[d]
             elif rel < -self.domain_width[d] * 0.5:
@@ -487,11 +488,12 @@
         cdef int i
         cdef np.float64_t pos[3]
         cdef np.ndarray[np.uint8_t, ndim=1] mask 
-        mask = np.zeros(x.shape[0], dtype='uint8')
+        mask = np.empty(x.shape[0], dtype='uint8')
         _ensure_code(x)
         _ensure_code(y)
         _ensure_code(z)
 
+
         # this is to allow selectors to optimize the point vs
         # 0-radius sphere case.  These two may have different 
         # effects for 0-volume selectors, however (collision 
@@ -513,7 +515,7 @@
                     mask[i] = self.select_sphere(pos, radius)
                     count += mask[i]
         if count == 0: return None
-        return mask.astype("bool")
+        return mask.view("bool")
 
     def __hash__(self):
         return hash(self._hash_vals() + self._base_hash())
@@ -572,9 +574,12 @@
         cdef int i
         cdef np.float64_t dist, dist2 = 0
         for i in range(3):
-            if pos[i] < self.bbox[i][0] or pos[i] > self.bbox[i][1]:
-                if self.check_box[i]: return 0
-            dist = self.difference(pos[i], self.center[i], i)
+            if self.check_box[i] and \
+              (pos[i] < self.bbox[i][0] or 
+               pos[i] > self.bbox[i][1]):
+                return 0
+            dist = _periodic_dist(pos[i], self.center[i], self.domain_width[i],
+                                  self.periodicity[i])
             dist2 += dist*dist
             if dist2 > self.radius2: return 0
         return 1

diff -r 18866bc72d646944545cc637984a69b727f81344 -r a6a3676b6f2a32cda01ad891cd235096867b4851 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -252,7 +252,7 @@
                     "Perhaps you meant to do something like this instead: \n"
                     "ds.arr(%s, \"%s\")" % (input_array, input_units)
                     )
-        if _astropy.units is not None:
+        if _astropy._units is not None:
             if isinstance(input_array, _astropy.units.quantity.Quantity):
                 return cls.from_astropy(input_array)
         if isinstance(input_array, YTArray):


https://bitbucket.org/yt_analysis/yt/commits/4b1007418f90/
Changeset:   4b1007418f90
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-22 04:15:59
Summary:     A few more micro-opts, and a disabled alternate radius calculation.
Affected #:  3 files

diff -r a6a3676b6f2a32cda01ad891cd235096867b4851 -r 4b1007418f90719b3564b999d38d3ef6e6b4ff55 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -328,6 +328,15 @@
         get_radius
 
     def _particle_radius(field, data):
+        dist = data["particle_position"] - data.get_field_parameter("center")
+        dw = data.pf.domain_width
+        offset = dist.copy()
+        offset[:] = 0.0
+        offset += data.pf.periodicity * (dist > dw/2.0) * -dw/2.0
+        offset += data.pf.periodicity * (dist < dw/2.0) *  dw/2.0
+        dist += offset
+        dist = np.sqrt((dist * dist).sum(axis=1))
+        return dist
         return get_radius(data, "particle_position_")
     registry.add_field((ptype, "particle_radius"),
               function=_particle_radius,

diff -r a6a3676b6f2a32cda01ad891cd235096867b4851 -r 4b1007418f90719b3564b999d38d3ef6e6b4ff55 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -54,6 +54,7 @@
     )
 
     known_particle_fields = (
+        ("particle_position", ("code_length", [], None)),
         ("particle_position_x", ("code_length", [], None)),
         ("particle_position_y", ("code_length", [], None)),
         ("particle_position_z", ("code_length", [], None)),

diff -r a6a3676b6f2a32cda01ad891cd235096867b4851 -r 4b1007418f90719b3564b999d38d3ef6e6b4ff55 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -96,6 +96,7 @@
 
 class StreamParticleIOHandler(BaseIOHandler):
 
+    _vector_fields = ("particle_position", "particle_velocity")
     _dataset_type = "stream_particles"
 
     def __init__(self, pf):
@@ -124,8 +125,13 @@
         for data_file in data_files:
             f = self.fields[data_file.filename]
             for ptype, field_list in sorted(ptf.items()):
-                x, y, z = (f[ptype, "particle_position_%s" % ax]
-                           for ax in 'xyz')
+                if (ptype, "particle_position") in f:
+                    x = f[ptype, "particle_position"][:,0]
+                    y = f[ptype, "particle_position"][:,1]
+                    z = f[ptype, "particle_position"][:,2]
+                else:
+                    x, y, z = (f[ptype, "particle_position_%s" % ax]
+                               for ax in 'xyz')
                 mask = selector.select_points(x, y, z, 0.0)
                 if mask is None: continue
                 for field in field_list:


https://bitbucket.org/yt_analysis/yt/commits/d4bf4fed7f06/
Changeset:   d4bf4fed7f06
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-22 15:11:15
Summary:     Allow for over-allocating particle fields based on oct index.
Affected #:  4 files

diff -r 4b1007418f90719b3564b999d38d3ef6e6b4ff55 -r d4bf4fed7f06b7df096ea372ffa7783b3bc6563b yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -332,8 +332,8 @@
         dw = data.pf.domain_width
         offset = dist.copy()
         offset[:] = 0.0
-        offset += data.pf.periodicity * (dist > dw/2.0) * -dw/2.0
-        offset += data.pf.periodicity * (dist < dw/2.0) *  dw/2.0
+        offset += data.pf.periodicity * (dist >  dw/2.0) * -dw/2.0
+        offset += data.pf.periodicity * (dist < -dw/2.0) *  dw/2.0
         dist += offset
         dist = np.sqrt((dist * dist).sum(axis=1))
         return dist

diff -r 4b1007418f90719b3564b999d38d3ef6e6b4ff55 -r d4bf4fed7f06b7df096ea372ffa7783b3bc6563b yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -117,6 +117,19 @@
                               f[ptype, "particle_position_y"],
                               f[ptype, "particle_position_z"])
             
+    def _count_particles_chunks(self, chunks, ptf, selector):
+        # This is allowed to over-estimate.  We probably *will*, too, because
+        # we're going to count *all* of the particles, not just individual
+        # types.
+        count = 0
+        psize = {}
+        for chunk in chunks:
+            for obj in chunk.objs:
+                count += selector.count_octs(obj.oct_handler, obj.domain_id)
+        for ptype in ptf:
+            psize[ptype] = self.pf.n_ref * count / float(obj.nz)
+        return psize
+
     def _read_particle_fields(self, chunks, ptf, selector):
         data_files = set([])
         for chunk in chunks:

diff -r 4b1007418f90719b3564b999d38d3ef6e6b4ff55 -r d4bf4fed7f06b7df096ea372ffa7783b3bc6563b yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -24,7 +24,6 @@
 from .oct_visitors cimport cind
 from yt.utilities.lib.grid_traversal cimport \
     VolumeContainer, sample_function, walk_volume
-from yt.data_objects.octree_subset import YTPositionArray
 
 cdef extern from "math.h":
     double exp(double x) nogil
@@ -51,6 +50,7 @@
 def _ensure_code(arr):
     if hasattr(arr, "convert_to_units"):
         arr.convert_to_units("code_length")
+        return arr.d
     return arr
 
 @cython.boundscheck(False)

diff -r 4b1007418f90719b3564b999d38d3ef6e6b4ff55 -r d4bf4fed7f06b7df096ea372ffa7783b3bc6563b yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -116,12 +116,17 @@
     def _read_chunk_data(self, chunk, fields):
         return {}
 
+    def _count_particles_chunks(self, chunks, ptf, selector):
+        psize = defaultdict(lambda: 0) # COUNT PTYPES ON DISK
+        for ptype, (x, y, z) in self._read_particle_coords(chunks, ptf):
+            psize[ptype] += selector.count_points(x, y, z, 0.0)
+        return dict(psize.items())
+
     def _read_particle_selection(self, chunks, selector, fields):
         rv = {}
         ind = {}
         # We first need a set of masks for each particle type
         ptf = defaultdict(list)        # ON-DISK TO READ
-        psize = defaultdict(lambda: 0) # COUNT PTYPES ON DISK
         fsize = defaultdict(lambda: 0) # COUNT RV
         field_maps = defaultdict(list) # ptypes -> fields
         chunks = list(chunks)
@@ -139,17 +144,10 @@
                 ptf[ftype].append(fname)
                 field_maps[field].append(field)
         # We can't hash chunks, but otherwise this is a neat idea.
-        if 0 and hash(selector) == self._last_selector_id and \
-           all(ptype in self._last_selector_counts for ptype in ptf):
-            psize.update(self._last_selector_counts)
-        else:
-            # Now we have our full listing.
-            # Here, ptype_map means which particles contribute to a given type.
-            # And ptf is the actual fields from disk to read.
-            for ptype, (x, y, z) in self._read_particle_coords(chunks, ptf):
-                psize[ptype] += selector.count_points(x, y, z, 0.0)
-            self._last_selector_counts = dict(**psize)
-            self._last_selector_id = hash(selector)
+        # Now we have our full listing.
+        # Here, ptype_map means which particles contribute to a given type.
+        # And ptf is the actual fields from disk to read.
+        psize = self._count_particles_chunks(chunks, ptf, selector)
         # Now we allocate
         # ptf, remember, is our mapping of what we want to read
         #for ptype in ptf:
@@ -175,6 +173,10 @@
                 #    field_f, my_ind, my_ind+vals.shape[0], field_r)
                 rv[field_f][my_ind:my_ind + vals.shape[0],...] = vals
                 ind[field_f] += vals.shape[0]
+        # Now we need to truncate all our fields, since we allow for
+        # over-estimating.
+        for field_f in ind:
+            rv[field_f] = rv[field_f][:ind[field_f]]
         return rv
 
 class IOHandlerExtracted(BaseIOHandler):


https://bitbucket.org/yt_analysis/yt/commits/9a0248df765f/
Changeset:   9a0248df765f
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-22 17:35:16
Summary:     Merging in latest sphere updates
Affected #:  29 files

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad doc/source/cookbook/aligned_cutting_plane.py
--- a/doc/source/cookbook/aligned_cutting_plane.py
+++ b/doc/source/cookbook/aligned_cutting_plane.py
@@ -3,10 +3,10 @@
 # Load the dataset.
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
-# Create a 1 kpc radius sphere, centered on the max density.  Note that this
-# sphere is very small compared to the size of our final plot, and it has a
-# non-axially aligned L vector.
-sp = ds.sphere("center", (15.0, "kpc"))
+# Create a 1 kpc radius sphere, centered on the maximum gas density.  Note
+# that this sphere is very small compared to the size of our final plot,
+# and it has a non-axially aligned L vector.
+sp = ds.sphere("m", (1.0, "kpc"))
 
 # Get the angular momentum vector for the sphere.
 L = sp.quantities.angular_momentum_vector()
@@ -14,5 +14,5 @@
 print "Angular momentum vector: {0}".format(L)
 
 # Create an OffAxisSlicePlot on the object with the L vector as its normal
-p = yt.OffAxisSlicePlot(ds, L, "density", sp.center, (25, "kpc"))
+p = yt.OffAxisSlicePlot(ds, L, "density", sp.center, (15, "kpc"))
 p.save()

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad doc/source/cookbook/fits_radio_cubes.ipynb
--- a/doc/source/cookbook/fits_radio_cubes.ipynb
+++ b/doc/source/cookbook/fits_radio_cubes.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:2f774139560d94508c2c51b70930d46941d9ceef7228655de32a69634f6c6d83"
+  "signature": "sha256:dbc41f6f836cdeb88a549d85e389d6e4e43d163d8c4c267baea8cce0ebdbf441"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -45,7 +45,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = yt.load(\"radio_fits/m33_hi.fits\", nan_mask=0.0)"
+      "ds = yt.load(\"radio_fits/m33_hi.fits\", nan_mask=0.0, z_axis_decomp=True)"
      ],
      "language": "python",
      "metadata": {},
@@ -179,6 +179,31 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
+      "We can also make a projection of all the emission along the line of sight:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"intensity\"], origin=\"native\", proj_style=\"sum\")\n",
+      "prj.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Since we're not doing an integration along a path length, we needed to specify `proj_style = \"sum\"`. "
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
       "We can also look at the slices perpendicular to the other axes, which will show us the structure along the velocity axis:"
      ]
     },

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/analysis_modules/halo_finding/fof/EnzoFOF.c
--- a/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
+++ b/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
@@ -32,11 +32,15 @@
     PyArrayObject    *xpos, *ypos, *zpos;
     xpos=ypos=zpos=NULL;
     float link = 0.2;
+    float fPeriod[3] = {1.0, 1.0, 1.0};
+	int nMembers = 8;
 
     int i;
 
-    if (!PyArg_ParseTuple(args, "OOO|f",
-        &oxpos, &oypos, &ozpos, &link))
+    if (!PyArg_ParseTuple(args, "OOO|f(fff)i",
+        &oxpos, &oypos, &ozpos, &link,
+        &fPeriod[0], &fPeriod[1], &fPeriod[2],
+        &nMembers))
     return PyErr_Format(_FOFerror,
             "EnzoFOF: Invalid parameters.");
 
@@ -74,8 +78,8 @@
 
 	KDFOF kd;
 	int nBucket,j;
-	float fPeriod[3],fEps;
-	int nMembers,nGroup,bVerbose=1;
+	float fEps;
+	int nGroup,bVerbose=1;
 	int sec,usec;
 	
 	/* linking length */
@@ -83,9 +87,6 @@
 	fEps = link;
 	
 	nBucket = 16;
-	nMembers = 8;
-
-	for (j=0;j<3;++j) fPeriod[j] = 1.0;
 
     /* initialize the kd FOF structure */
 

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -66,13 +66,13 @@
         if isinstance(outputs, DatasetSeries):
             self.data_series = outputs
         else:
-            self.data_series = DatasetSeries.from_filenames(outputs)
+            self.data_series = DatasetSeries(outputs)
         self.masks = []
         self.sorts = []
         self.array_indices = []
         self.indices = indices
         self.num_indices = len(indices)
-        self.num_steps = len(filenames)
+        self.num_steps = len(outputs)
         self.times = []
 
         # Default fields 

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -317,7 +317,7 @@
             finfo = self.pf._get_field_info(*field)
             mylog.debug("Setting field %s", field)
             units = finfo.units
-            if self.weight_field is None:
+            if self.weight_field is None and not self._sum_only:
                 # See _handle_chunk where we mandate cm
                 if units == '':
                     input_units = "cm"
@@ -329,7 +329,7 @@
             self[field] = YTArray(field_data[fi].ravel(),
                                   input_units=input_units,
                                   registry=self.pf.unit_registry)
-            if self.weight_field is None:
+            if self.weight_field is None and not self._sum_only:
                 u_obj = Unit(units, registry=self.pf.unit_registry)
                 if u_obj.is_code_unit and input_units != units \
                     or self.pf.no_cgs_equiv_length:

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -139,12 +139,14 @@
             return
         elif isinstance(center, (types.ListType, types.TupleType, np.ndarray)):
             center = self.pf.arr(center, 'code_length')
-        elif center in ("c", "center"):
-            center = self.pf.domain_center
-        elif center == ("max"): # is this dangerous for race conditions?
-            center = self.pf.h.find_max("density")[1]
-        elif center.startswith("max_"):
-            center = self.pf.h.find_max(center[4:])[1]
+        elif isinstance(center, basestring):
+            if center.lower() in ("c", "center"):
+                center = self.pf.domain_center
+             # is this dangerous for race conditions?
+            elif center.lower() in ("max", "m"):
+                center = self.pf.h.find_max(("gas", "density"))[1]
+            elif center.startswith("max_"):
+                center = self.pf.h.find_max(center[4:])[1]
         else:
             center = np.array(center, dtype='float64')
         self.center = self.pf.arr(center, 'code_length')

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/data_objects/image_array.py
--- a/yt/data_objects/image_array.py
+++ b/yt/data_objects/image_array.py
@@ -12,7 +12,6 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-import h5py as h5
 from yt.visualization.image_writer import write_bitmap, write_image
 from yt.units.yt_array import YTArray
 
@@ -26,7 +25,7 @@
     Parameters
     ----------
     input_array: array_like
-        A numpy ndarray, or list. 
+        A numpy ndarray, or list.
 
     Other Parameters
     ----------------
@@ -35,7 +34,7 @@
 
     Returns
     -------
-    obj: ImageArray object 
+    obj: ImageArray object
 
     Raises
     ------
@@ -55,15 +54,15 @@
     --------
     These are written in doctest format, and should illustrate how to
     use the function.  Use the variables 'pf' for the parameter file, 'pc' for
-    a plot collection, 'c' for a center, and 'L' for a vector. 
+    a plot collection, 'c' for a center, and 'L' for a vector.
 
     >>> im = np.zeros([64,128,3])
     >>> for i in xrange(im.shape[0]):
     ...     for k in xrange(im.shape[2]):
     ...         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
 
-    >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-    ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+    >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]),
+    ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),
     ...     'width':0.245, 'units':'cm', 'type':'rendering'}
 
     >>> im_arr = ImageArray(im, info=myinfo)
@@ -84,38 +83,36 @@
         super(ImageArray, self).__array_finalize__(obj)
         self.info = getattr(obj, 'info', None)
 
-    def write_hdf5(self, filename):
+    def write_hdf5(self, filename, dataset_name=None):
         r"""Writes ImageArray to hdf5 file.
 
         Parameters
         ----------
         filename: string
-            Note filename not be modified.
-       
+        The filename to create and write a dataset to
+
+        dataset_name: string
+            The name of the dataset to create in the file.
+
         Examples
-        -------- 
+        --------
         >>> im = np.zeros([64,128,3])
         >>> for i in xrange(im.shape[0]):
         ...     for k in xrange(im.shape[2]):
         ...         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
 
-        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]),
+        ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),
         ...     'width':0.245, 'units':'cm', 'type':'rendering'}
 
         >>> im_arr = ImageArray(im, info=myinfo)
         >>> im_arr.write_hdf5('test_ImageArray.h5')
 
         """
-        array_name = self.info.get("name","image")
-
-        f = h5.File(filename)
-        if array_name in f.keys():
-            del f[array_name]
-        d = f.create_dataset(array_name, data=self)
-        for k, v in self.info.iteritems():
-            d.attrs.create(k, v)
-        f.close()
+        if dataset_name is None:
+            dataset_name = self.info.get("name", "image")
+        super(ImageArray, self).write_hdf5(filename, dataset_name=dataset_name,
+                                           info=self.info)
 
     def add_background_color(self, background='black', inline=True):
         r"""Adds a background color to a 4-channel ImageArray
@@ -126,7 +123,7 @@
 
         Parameters
         ----------
-        background: 
+        background:
             This can be used to set a background color for the image, and can
             take several types of values:
 
@@ -144,7 +141,7 @@
         -------
         out: ImageArray
             The modified ImageArray with a background color added.
-       
+
         Examples
         --------
         >>> im = np.zeros([64,128,4])
@@ -160,8 +157,8 @@
         >>> im_arr.write_png('black_bg.png')
         """
         assert(self.shape[-1] == 4)
-        
-        if background == None:
+
+        if background is None:
             background = (0., 0., 0., 0.)
         elif background == 'white':
             background = (1., 1., 1., 1.)
@@ -175,11 +172,10 @@
             out = self.copy()
 
         for i in range(3):
-            out[:,:,i] = self[:,:,i]*self[:,:,3] + \
-                    background[i]*background[3]*(1.0-self[:,:,3])
-        out[:,:,3] = self[:,:,3] + background[3]*(1.0-self[:,:,3]) 
-        return out 
-
+            out[:, :, i] = self[:, :, i]*self[:, :, 3]
+            out[:, :, i] += background[i]*background[3]*(1.0-self[:, :, 3])
+        out[:, :, 3] = self[:, :, 3]+background[3]*(1.0-self[:, :, 3])
+        return out
 
     def rescale(self, cmax=None, amax=None, inline=True):
         r"""Rescales the image to be in [0,1] range.
@@ -194,7 +190,7 @@
             corresponding to using the maximum value in the alpha channel.
         inline: boolean, optional
             Specifies whether or not the rescaling is done inline. If false,
-            a new copy of the ImageArray will be created, returned. 
+            a new copy of the ImageArray will be created, returned.
             Default:True.
 
         Returns
@@ -207,17 +203,18 @@
         This requires that the shape of the ImageArray to have a length of 3,
         and for the third dimension to be >= 3.  If the third dimension has
         a shape of 4, the alpha channel will also be rescaled.
-       
+
         Examples
-        -------- 
+        --------
         >>> im = np.zeros([64,128,4])
         >>> for i in xrange(im.shape[0]):
         ...     for k in xrange(im.shape[2]):
         ...         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
 
-        >>> im_arr.write_png('original.png')
-        >>> im_arr.rescale()
-        >>> im_arr.write_png('normalized.png')
+        >>> im = ImageArray(im)
+        >>> im.write_png('original.png')
+        >>> im.rescale()
+        >>> im.write_png('normalized.png')
 
         """
         assert(len(self.shape) == 3)
@@ -226,22 +223,22 @@
             out = self
         else:
             out = self.copy()
-        if cmax is None: 
-            cmax = self[:,:,:3].sum(axis=2).max()
+        if cmax is None:
+            cmax = self[:, :, :3].sum(axis=2).max()
 
-        np.multiply(self[:,:,:3], 1./cmax, out[:,:,:3])
+        np.multiply(self[:, :, :3], 1.0/cmax, out[:, :, :3])
 
         if self.shape[2] == 4:
             if amax is None:
-                amax = self[:,:,3].max()
+                amax = self[:, :, 3].max()
             if amax > 0.0:
-                np.multiply(self[:,:,3], 1./amax, out[:,:,3])
-        
+                np.multiply(self[:, :, 3], 1.0/amax, out[:, :, 3])
+
         np.clip(out, 0.0, 1.0, out)
         return out
 
     def write_png(self, filename, clip_ratio=None, background='black',
-                 rescale=True):
+                  rescale=True):
         r"""Writes ImageArray to png file.
 
         Parameters
@@ -250,9 +247,9 @@
             Note filename not be modified.
         clip_ratio: float, optional
             Image will be clipped before saving to the standard deviation
-            of the image multiplied by this value.  Useful for enhancing 
+            of the image multiplied by this value.  Useful for enhancing
             images. Default: None
-        background: 
+        background:
             This can be used to set a background color for the image, and can
             take several types of values:
 
@@ -265,7 +262,7 @@
         rescale: boolean, optional
             If True, will write out a rescaled image (without modifying the
             original image). Default: True
-       
+
         Examples
         --------
         >>> im = np.zeros([64,128,4])
@@ -292,25 +289,25 @@
         else:
             out = scaled
 
-        if filename[-4:] != '.png': 
+        if filename[-4:] != '.png':
             filename += '.png'
 
         if clip_ratio is not None:
-            nz = out[:,:,:3][out[:,:,:3].nonzero()]
+            nz = out[:, :, :3][out[:, :, :3].nonzero()]
             return write_bitmap(out.swapaxes(0, 1), filename,
-                                nz.mean() + \
-                                clip_ratio * nz.std())
+                                nz.mean() + clip_ratio*nz.std())
         else:
             return write_bitmap(out.swapaxes(0, 1), filename)
 
-    def write_image(self, filename, color_bounds=None, channel=None,  cmap_name="algae", func=lambda x: x):
+    def write_image(self, filename, color_bounds=None, channel=None,
+                    cmap_name="algae", func=lambda x: x):
         r"""Writes a single channel of the ImageArray to a png file.
 
         Parameters
         ----------
         filename: string
             Note filename not be modified.
-       
+
         Other Parameters
         ----------------
         channel: int
@@ -323,43 +320,44 @@
             An acceptable colormap.  See either yt.visualization.color_maps or
             http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
         func : function, optional
-            A function to transform the buffer before applying a colormap. 
+            A function to transform the buffer before applying a colormap.
 
         Returns
         -------
         scaled_image : uint8 image that has been saved
-        
+
         Examples
         --------
-        
+
         >>> im = np.zeros([64,128])
         >>> for i in xrange(im.shape[0]):
-        ...     im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+        ...     im[i,:] = np.linspace(0.,0.3*i, im.shape[1])
 
-        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]),
+        ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),
         ...     'width':0.245, 'units':'cm', 'type':'rendering'}
 
         >>> im_arr = ImageArray(im, info=myinfo)
         >>> im_arr.write_image('test_ImageArray.png')
 
         """
-        if filename[-4:] != '.png': 
+        if filename[-4:] != '.png':
             filename += '.png'
 
+        #TODO: Write info dict as png metadata
         if channel is None:
-            return write_image(self.swapaxes(0,1).to_ndarray(), filename,
+            return write_image(self.swapaxes(0, 1).to_ndarray(), filename,
                                color_bounds=color_bounds, cmap_name=cmap_name,
                                func=func)
         else:
-            return write_image(self.swapaxes(0,1)[:,:,channel].to_ndarray(),
+            return write_image(self.swapaxes(0, 1)[:, :, channel].to_ndarray(),
                                filename,
-                               color_bounds=color_bounds, cmap_name=cmap_name, 
+                               color_bounds=color_bounds, cmap_name=cmap_name,
                                func=func)
 
     def save(self, filename, png=True, hdf5=True):
         """
-        Saves ImageArray. 
+        Saves ImageArray.
 
         Arguments:
           filename: string
@@ -380,6 +378,3 @@
                 self.write_image("%s.png" % filename)
         if hdf5:
             self.write_hdf5("%s.h5" % filename)
-
-    __doc__ += np.ndarray.__doc__
-

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -447,12 +447,12 @@
         >>> write_image(np.log10(frb["Density"]), 'density_1pc.png')
         """
         if iterable(width):
-            assert_valid_width_tuple(width)
+            validate_width_tuple(width)
             width = self.pf.quan(width[0], width[1])
         if height is None:
             height = width
         elif iterable(height):
-            assert_valid_width_tuple(height)
+            validate_width_tuple(height)
             height = self.pf.quan(height[0], height[1])
         if not iterable(resolution):
             resolution = (resolution, resolution)

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/fields/field_functions.py
--- a/yt/fields/field_functions.py
+++ b/yt/fields/field_functions.py
@@ -34,7 +34,7 @@
             np.subtract(r, DW[i], rdw)
             np.abs(rdw, rdw)
             np.minimum(r, rdw, r)
-        np.power(r, 2.0, r)
+        np.multiply(r, r, r)
         np.add(radius2, r, radius2)
         if data.pf.dimensionality < i+1:
             break

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -328,6 +328,15 @@
         get_radius
 
     def _particle_radius(field, data):
+        dist = data["particle_position"] - data.get_field_parameter("center")
+        dw = data.pf.domain_width
+        offset = dist.copy()
+        offset[:] = 0.0
+        offset += data.pf.periodicity * (dist >  dw/2.0) * -dw/2.0
+        offset += data.pf.periodicity * (dist < -dw/2.0) *  dw/2.0
+        dist += offset
+        dist = np.sqrt((dist * dist).sum(axis=1))
+        return dist
         return get_radius(data, "particle_position_")
     registry.add_field((ptype, "particle_radius"),
               function=_particle_radius,

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -17,6 +17,7 @@
 import weakref
 import warnings
 import re
+import uuid
 
 from yt.config import ytcfg
 from yt.funcs import *
@@ -200,37 +201,49 @@
             self.parameter_file.field_units[k] = self.parameter_file.field_units[primary_fname]
 
     def _count_grids(self):
-        self.num_grids = self.pf.nprocs
+        self.num_grids = self.pf.parameters["nprocs"]
 
     def _parse_index(self):
         f = self._handle # shortcut
         pf = self.parameter_file # shortcut
 
         # If nprocs > 1, decompose the domain into virtual grids
-        if pf.nprocs > 1:
-            bbox = np.array([[le,re] for le, re in zip(pf.domain_left_edge,
-                                                       pf.domain_right_edge)])
-            dims = np.array(pf.domain_dimensions)
-            # If we are creating a dataset of lines, only decompose along the position axes
-            if len(pf.line_database) > 0:
-                dims[pf.vel_axis] = 1
-            psize = get_psize(dims, pf.nprocs)
-            gle, gre, shapes, slices = decompose_array(dims, psize, bbox)
-            self.grid_left_edge = self.pf.arr(gle, "code_length")
-            self.grid_right_edge = self.pf.arr(gre, "code_length")
-            self.grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
-            # If we are creating a dataset of lines, only decompose along the position axes
-            if len(pf.line_database) > 0:
-                self.grid_left_edge[:,pf.vel_axis] = pf.domain_left_edge[pf.vel_axis]
-                self.grid_right_edge[:,pf.vel_axis] = pf.domain_right_edge[pf.vel_axis]
-                self.grid_dimensions[:,pf.vel_axis] = pf.domain_dimensions[pf.vel_axis]
-
+        if self.num_grids > 1:
+            if self.pf.z_axis_decomp:
+                dz = (pf.domain_width/pf.domain_dimensions)[2]
+                self.grid_dimensions[:,2] = np.around(float(pf.domain_dimensions[2])/
+                                                            self.num_grids).astype("int")
+                self.grid_dimensions[-1,2] += (pf.domain_dimensions[2] % self.num_grids)
+                self.grid_left_edge[0,2] = pf.domain_left_edge[2]
+                self.grid_left_edge[1:,2] = pf.domain_left_edge[2] + \
+                                            np.cumsum(self.grid_dimensions[:-1,2])*dz
+                self.grid_right_edge[:,2] = self.grid_left_edge[:,2]+self.grid_dimensions[:,2]*dz
+                self.grid_left_edge[:,:2] = pf.domain_left_edge[:2]
+                self.grid_right_edge[:,:2] = pf.domain_right_edge[:2]
+                self.grid_dimensions[:,:2] = pf.domain_dimensions[:2]
+            else:
+                bbox = np.array([[le,re] for le, re in zip(pf.domain_left_edge,
+                                                           pf.domain_right_edge)])
+                dims = np.array(pf.domain_dimensions)
+                # If we are creating a dataset of lines, only decompose along the position axes
+                if len(pf.line_database) > 0:
+                    dims[pf.vel_axis] = 1
+                psize = get_psize(dims, self.num_grids)
+                gle, gre, shapes, slices = decompose_array(dims, psize, bbox)
+                self.grid_left_edge = self.pf.arr(gle, "code_length")
+                self.grid_right_edge = self.pf.arr(gre, "code_length")
+                self.grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
+                # If we are creating a dataset of lines, only decompose along the position axes
+                if len(pf.line_database) > 0:
+                    self.grid_left_edge[:,pf.vel_axis] = pf.domain_left_edge[pf.vel_axis]
+                    self.grid_right_edge[:,pf.vel_axis] = pf.domain_right_edge[pf.vel_axis]
+                    self.grid_dimensions[:,pf.vel_axis] = pf.domain_dimensions[pf.vel_axis]
         else:
             self.grid_left_edge[0,:] = pf.domain_left_edge
             self.grid_right_edge[0,:] = pf.domain_right_edge
             self.grid_dimensions[0] = pf.domain_dimensions
 
-        if self.pf.events_data:
+        if pf.events_data:
             try:
                 self.grid_particle_count[:] = pf.primary_header["naxis2"]
             except KeyError:
@@ -290,6 +303,7 @@
                  nprocs = None,
                  storage_filename = None,
                  nan_mask = None,
+                 z_axis_decomp = False,
                  line_database = None,
                  line_width = None,
                  suppress_astropy_warnings = True,
@@ -297,8 +311,11 @@
 
         if parameters is None:
             parameters = {}
+        parameters["nprocs"] = nprocs
         self.specified_parameters = parameters
 
+        self.z_axis_decomp = z_axis_decomp
+
         if line_width is not None:
             self.line_width = YTQuantity(line_width[0], line_width[1])
             self.line_units = line_width[1]
@@ -322,11 +339,15 @@
             self.nan_mask = {"all":nan_mask}
         elif isinstance(nan_mask, dict):
             self.nan_mask = nan_mask
-        self.nprocs = nprocs
-        self._handle = _astropy.pyfits.open(self.filenames[0],
-                                      memmap=True,
-                                      do_not_scale_image_data=True,
-                                      ignore_blank=True)
+        if isinstance(self.filenames[0], _astropy.pyfits.PrimaryHDU):
+            self._handle = _astropy.pyfits.HDUList(self.filenames[0])
+            fn = "InMemoryFITSImage_%s" % (uuid.uuid4().hex)
+        else:
+            self._handle = _astropy.pyfits.open(self.filenames[0],
+                                                memmap=True,
+                                                do_not_scale_image_data=True,
+                                                ignore_blank=True)
+            fn = self.filenames[0]
         self._fits_files = [self._handle]
         if self.num_files > 1:
             for fits_file in auxiliary_files:
@@ -387,7 +408,7 @@
 
         self.refine_by = 2
 
-        Dataset.__init__(self, filename, dataset_type)
+        Dataset.__init__(self, fn, dataset_type)
         self.storage_filename = storage_filename
 
     def _set_code_unit_attributes(self):
@@ -435,8 +456,11 @@
 
     def _parse_parameter_file(self):
 
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        if self.parameter_filename.startswith("InMemory"):
+            self.unique_identifier = time.time()
+        else:
+            self.unique_identifier = \
+                int(os.stat(self.parameter_filename)[stat.ST_CTIME])
 
         # Determine dimensionality
 
@@ -472,14 +496,26 @@
         self.current_redshift = self.omega_lambda = self.omega_matter = \
             self.hubble_constant = self.cosmological_simulation = 0.0
 
-        # If this is a 2D events file, no need to decompose
-        if self.events_data: self.nprocs = 1
+        if self.dimensionality == 2 and self.z_axis_decomp:
+            mylog.warning("You asked to decompose along the z-axis, but this is a 2D dataset. " +
+                          "Ignoring.")
+            self.z_axis_decomp = False
+
+        if self.events_data: self.specified_parameters["nprocs"] = 1
 
         # If nprocs is None, do some automatic decomposition of the domain
-        if self.nprocs is None:
-            self.nprocs = np.around(np.prod(self.domain_dimensions) /
-                                    32**self.dimensionality).astype("int")
-            self.nprocs = max(min(self.nprocs, 512), 1)
+        if self.specified_parameters["nprocs"] is None:
+            if len(self.line_database) > 0:
+                dims = 2
+            else:
+                dims = self.dimensionality
+            if self.z_axis_decomp:
+                nprocs = np.around(self.domain_dimensions[2]/8).astype("int")
+            else:
+                nprocs = np.around(np.prod(self.domain_dimensions)/32**dims).astype("int")
+            self.parameters["nprocs"] = max(min(nprocs, 512), 1)
+        else:
+            self.parameters["nprocs"] = self.specified_parameters["nprocs"]
 
         self.reversed = False
 

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -88,7 +88,7 @@
             for chunk in chunks:
                 for g in chunk.objs:
                     start = ((g.LeftEdge-self.pf.domain_left_edge)/dx).to_ndarray().astype("int")
-                    end = ((g.RightEdge-self.pf.domain_left_edge)/dx).to_ndarray().astype("int")
+                    end = start + g.ActiveDimensions
                     if self.line_db is not None and fname in self.line_db:
                         my_off = self.line_db.get(fname).in_units(self.pf.vel_unit).value
                         my_off = my_off - 0.5*self.pf.line_width

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -549,7 +549,7 @@
         lengths = self.indexdata['len'][mask]
         return mask, offsets, lengths
 
-    def get_ibbox(self, ileft, iright):
+    def get_ibbox(self, ileft, iright, wandering_particles=True):
         """
         Given left and right indicies, return a mask and
         set of offsets+lengths into the sdf data.
@@ -593,7 +593,7 @@
         #print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()
 
         indices = self.get_keyv([X, Y, Z])
-#       # Only mask out if we are actually getting data rather than getting indices into
+        # Only mask out if we are actually getting data rather than getting indices into
         # a space.
         if self.valid_indexdata:
             indices = indices[indices < self.indexdata['index'][-1]]
@@ -697,7 +697,6 @@
             i += 1
         mylog.debug('Read %i chunks, batched into %i reads' % (num_inds, num_reads))
 
-
     def filter_particles(self, myiter, myfilter):
         for data in myiter:
             mask = myfilter(data)

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -54,6 +54,7 @@
     )
 
     known_particle_fields = (
+        ("particle_position", ("code_length", [], None)),
         ("particle_position_x", ("code_length", [], None)),
         ("particle_position_y", ("code_length", [], None)),
         ("particle_position_z", ("code_length", [], None)),

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -96,6 +96,7 @@
 
 class StreamParticleIOHandler(BaseIOHandler):
 
+    _vector_fields = ("particle_position", "particle_velocity")
     _dataset_type = "stream_particles"
 
     def __init__(self, pf):
@@ -116,6 +117,19 @@
                               f[ptype, "particle_position_y"],
                               f[ptype, "particle_position_z"])
             
+    def _count_particles_chunks(self, chunks, ptf, selector):
+        # This is allowed to over-estimate.  We probably *will*, too, because
+        # we're going to count *all* of the particles, not just individual
+        # types.
+        count = 0
+        psize = {}
+        for chunk in chunks:
+            for obj in chunk.objs:
+                count += selector.count_octs(obj.oct_handler, obj.domain_id)
+        for ptype in ptf:
+            psize[ptype] = self.pf.n_ref * count / float(obj.nz)
+        return psize
+
     def _read_particle_fields(self, chunks, ptf, selector):
         data_files = set([])
         for chunk in chunks:
@@ -124,8 +138,13 @@
         for data_file in data_files:
             f = self.fields[data_file.filename]
             for ptype, field_list in sorted(ptf.items()):
-                x, y, z = (f[ptype, "particle_position_%s" % ax]
-                           for ax in 'xyz')
+                if (ptype, "particle_position") in f:
+                    x = f[ptype, "particle_position"][:,0]
+                    y = f[ptype, "particle_position"][:,1]
+                    z = f[ptype, "particle_position"][:,2]
+                else:
+                    x, y, z = (f[ptype, "particle_position_%s" % ax]
+                               for ax in 'xyz')
                 mask = selector.select_points(x, y, z, 0.0)
                 if mask is None: continue
                 for field in field_list:

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -660,17 +660,14 @@
     if not os.path.exists(path):
         only_on_root(os.makedirs, path)
     return path
-        
-def assert_valid_width_tuple(width):
-    try:
-        assert iterable(width) and len(width) == 2, \
-            "width (%s) is not a two element tuple" % width
-        valid = isinstance(width[0], numeric_type) and isinstance(width[1], str)
+
+def validate_width_tuple(width):
+    if not iterable(width) or len(width) != 2:
+        raise YTInvalidWidthError("width (%s) is not a two element tuple" % width)
+    if not isinstance(width[0], numeric_type) and isinstance(width[1], basestring):
         msg = "width (%s) is invalid. " % str(width)
         msg += "Valid widths look like this: (12, 'au')"
-        assert valid, msg
-    except AssertionError, e:
-        raise YTInvalidWidthError(e)
+        raise YTInvalidWidthError(msg)
 
 def camelcase_to_underscore(name):
     s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/geometry/ppv_coordinates.py
--- a/yt/geometry/ppv_coordinates.py
+++ b/yt/geometry/ppv_coordinates.py
@@ -25,8 +25,6 @@
 
         self.axis_name = {}
         self.axis_id = {}
-        self.x_axis = {}
-        self.y_axis = {}
 
         for axis, axis_name in zip([pf.lon_axis, pf.lat_axis, pf.vel_axis],
                                    ["Image\ x", "Image\ y", pf.vel_name]):
@@ -42,28 +40,6 @@
             self.axis_id[axis] = axis
             self.axis_id[axis_name] = axis
 
-            if axis == 0:
-                self.x_axis[axis] = 1
-                self.x_axis[lower_ax] = 1
-                self.x_axis[axis_name] = 1
-                self.y_axis[axis] = 2
-                self.y_axis[lower_ax] = 2
-                self.y_axis[axis_name] = 2
-            elif axis == 1:
-                self.x_axis[axis] = 2
-                self.x_axis[lower_ax] = 2
-                self.x_axis[axis_name] = 2
-                self.y_axis[axis] = 0
-                self.y_axis[lower_ax] = 0
-                self.y_axis[axis_name] = 0
-            elif axis == 2:
-                self.x_axis[axis] = 0
-                self.x_axis[lower_ax] = 0
-                self.x_axis[axis_name] = 0
-                self.y_axis[axis] = 1
-                self.y_axis[lower_ax] = 1
-                self.y_axis[axis_name] = 1
-
         self.default_unit_label = {}
         self.default_unit_label[pf.lon_axis] = "pixel"
         self.default_unit_label[pf.lat_axis] = "pixel"
@@ -75,3 +51,8 @@
     def convert_from_cylindrical(self, coord):
         raise NotImplementedError
 
+    x_axis = { 'x' : 1, 'y' : 0, 'z' : 0,
+                0  : 1,  1  : 0,  2  : 0}
+
+    y_axis = { 'x' : 2, 'y' : 2, 'z' : 1,
+                0  : 2,  1  : 2,  2  : 1}

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -57,3 +57,13 @@
 cdef class OctreeSubsetSelector(SelectorObject):
     cdef SelectorObject base_selector
     cdef public np.int64_t domain_id
+
+cdef inline np.float64_t _periodic_dist(np.float64_t x1, np.float64_t x2,
+                                        np.float64_t dw, bint periodic) nogil:
+    cdef np.float64_t rel = x1 - x2
+    if not periodic: return rel
+    if rel > dw * 0.5:
+        rel -= dw
+    elif rel < -dw * 0.5:
+        rel += dw
+    return rel

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -50,6 +50,7 @@
 def _ensure_code(arr):
     if hasattr(arr, "convert_to_units"):
         arr.convert_to_units("code_length")
+        return arr.d
     return arr
 
 @cython.boundscheck(False)
@@ -331,10 +332,10 @@
         # domain_width is already in code units, and we assume what is fed in
         # is too.
         cdef np.float64_t rel = x1 - x2
-        if self.periodicity[d] :
-            if rel > self.domain_width[d]/2.0 :
+        if self.periodicity[d]:
+            if rel > self.domain_width[d] * 0.5:
                 rel -= self.domain_width[d]
-            elif rel < -self.domain_width[d]/2.0 :
+            elif rel < -self.domain_width[d] * 0.5:
                 rel += self.domain_width[d]
         return rel
 
@@ -487,11 +488,12 @@
         cdef int i
         cdef np.float64_t pos[3]
         cdef np.ndarray[np.uint8_t, ndim=1] mask 
-        mask = np.zeros(x.shape[0], dtype='uint8')
+        mask = np.empty(x.shape[0], dtype='uint8')
         _ensure_code(x)
         _ensure_code(y)
         _ensure_code(z)
 
+
         # this is to allow selectors to optimize the point vs
         # 0-radius sphere case.  These two may have different 
         # effects for 0-volume selectors, however (collision 
@@ -513,7 +515,7 @@
                     mask[i] = self.select_sphere(pos, radius)
                     count += mask[i]
         if count == 0: return None
-        return mask.astype("bool")
+        return mask.view("bool")
 
     def __hash__(self):
         return hash(self._hash_vals() + self._base_hash())
@@ -534,12 +536,25 @@
     cdef np.float64_t radius
     cdef np.float64_t radius2
     cdef np.float64_t center[3]
+    cdef np.float64_t bbox[3][2]
+    cdef bint check_box[3]
 
     def __init__(self, dobj):
-        for i in range(3):
-            self.center[i] = dobj.center[i]
         self.radius = _ensure_code(dobj.radius)
         self.radius2 = self.radius * self.radius
+        center = _ensure_code(dobj.center)
+        cdef np.float64_t mi = np.finfo("float64").min
+        cdef np.float64_t ma = np.finfo("float64").max
+        for i in range(3):
+            self.center[i] = center[i]
+            self.bbox[i][0] = self.center[i] - self.radius
+            self.bbox[i][1] = self.center[i] + self.radius
+            if self.bbox[i][0] < dobj.pf.domain_left_edge[i]:
+                self.check_box[i] = False
+            elif self.bbox[i][1] > dobj.pf.domain_right_edge[i]:
+                self.check_box[i] = False
+            else:
+                self.check_box[i] = True
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -559,10 +574,15 @@
         cdef int i
         cdef np.float64_t dist, dist2 = 0
         for i in range(3):
-            dist = self.difference(pos[i], self.center[i], i)
+            if self.check_box[i] and \
+              (pos[i] < self.bbox[i][0] or 
+               pos[i] > self.bbox[i][1]):
+                return 0
+            dist = _periodic_dist(pos[i], self.center[i], self.domain_width[i],
+                                  self.periodicity[i])
             dist2 += dist*dist
-        if dist2 <= self.radius2: return 1
-        return 0
+            if dist2 > self.radius2: return 0
+        return 1
    
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -588,16 +608,22 @@
             left_edge[1] <= self.center[1] <= right_edge[1] and
             left_edge[2] <= self.center[2] <= right_edge[2]):
             return 1
+        for i in range(3):
+            if not self.check_box[i]: continue
+            if right_edge[i] < self.bbox[i][0] or \
+               left_edge[i] > self.bbox[i][1]:
+                return 0
         # http://www.gamedev.net/topic/335465-is-this-the-simplest-sphere-aabb-collision-test/
         dist = 0
         for i in range(3):
+            # Early terminate
             box_center = (right_edge[i] + left_edge[i])/2.0
             relcenter = self.difference(box_center, self.center[i], i)
             edge = right_edge[i] - left_edge[i]
             closest = relcenter - fclip(relcenter, -edge/2.0, edge/2.0)
             dist += closest*closest
-        if dist <= self.radius2: return 1
-        return 0
+            if dist > self.radius2: return 0
+        return 1
 
     def _hash_vals(self):
         return (self.radius, self.radius2,

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -14,7 +14,15 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
+import copy
+import cPickle as pickle
+import itertools
+import numpy as np
+import operator
 import os
+import shutil
+import tempfile
+
 from nose.tools import assert_true
 from numpy.testing import \
     assert_array_equal, \
@@ -28,12 +36,6 @@
     YTUnitOperationError, YTUfuncUnitError
 from yt.testing import fake_random_pf, requires_module
 from yt.funcs import fix_length
-import numpy as np
-import copy
-import operator
-import cPickle as pickle
-import tempfile
-import itertools
 
 
 def operate_and_compare(a, b, op, answer):
@@ -675,3 +677,54 @@
     yield assert_equal, yt_quan, YTQuantity(yt_quan.to_astropy())
 
 
+def test_subclass():
+
+    class YTASubclass(YTArray):
+        pass
+
+    a = YTASubclass([4, 5, 6], 'g')
+    b = YTASubclass([7, 8, 9], 'kg')
+    nu = YTASubclass([10, 11, 12], '')
+    nda = np.array([3, 4, 5])
+    yta = YTArray([6, 7, 8], 'mg')
+    ytq = YTQuantity(4, 'cm')
+    ndf = np.float64(3)
+
+    def op_comparison(op, inst1, inst2, compare_class):
+        assert_isinstance(op(inst1, inst2), compare_class)
+        assert_isinstance(op(inst2, inst1), compare_class)
+
+    for op in (operator.mul, operator.div, operator.truediv):
+        for inst in (b, ytq, ndf, yta, nda):
+            yield op_comparison, op, a, inst, YTASubclass
+
+        yield op_comparison, op, ytq, nda, YTArray
+        yield op_comparison, op, ytq, yta, YTArray
+
+    for op in (operator.add, operator.sub):
+        yield op_comparison, op, nu, nda, YTASubclass
+        yield op_comparison, op, a, b, YTASubclass
+        yield op_comparison, op, a, yta, YTASubclass
+
+    yield assert_isinstance, a[0], YTQuantity
+    yield assert_isinstance, a[:], YTASubclass
+    yield assert_isinstance, a[:2], YTASubclass
+
+def test_h5_io():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = fake_random_pf(64, nprocs=1, length_unit=10)
+
+    warr = ds.arr(np.random.random((256, 256)), 'code_length')
+
+    warr.write_hdf5('test.h5')
+
+    iarr = YTArray.from_hdf5('test.h5')
+
+    yield assert_equal, warr, iarr
+    yield assert_equal, warr.units.registry['code_length'], iarr.units.registry['code_length']
+
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -74,7 +74,8 @@
         if ret.shape == ():
             return YTQuantity(ret, units)
         else:
-            return YTArray(ret, units)
+            # This could be a subclass, so don't call YTArray directly.
+            return type(args[0])(ret, units)
     return wrapped
 
 def sqrt_unit(unit):
@@ -251,7 +252,7 @@
                     "Perhaps you meant to do something like this instead: \n"
                     "ds.arr(%s, \"%s\")" % (input_array, input_units)
                     )
-        if _astropy.units is not None:
+        if _astropy._units is not None:
             if isinstance(input_array, _astropy.units.quantity.Quantity):
                 return cls.from_astropy(input_array)
         if isinstance(input_array, YTArray):
@@ -464,6 +465,92 @@
     # End unit conversion methods
     #
 
+    def write_hdf5(self, filename, dataset_name=None, info=None):
+        r"""Writes ImageArray to hdf5 file.
+
+        Parameters
+        ----------
+        filename: string
+            The filename to create and write a dataset to
+
+        dataset_name: string
+            The name of the dataset to create in the file.
+
+        info: dictionary
+            A dictionary of supplementary info to write to append as attributes
+            to the dataset.
+
+        Examples
+        --------
+        >>> a = YTArray([1,2,3], 'cm')
+
+        >>> myinfo = {'field':'dinosaurs', 'type':'field_data'}
+
+        >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
+        ...              info=myinfo)
+
+        """
+        import h5py
+        from yt.extern.six.moves import cPickle as pickle
+        if info is None:
+            info = {}
+
+        info['units'] = str(self.units)
+        info['unit_registry'] = pickle.dumps(self.units.registry.lut)
+
+        if dataset_name is None:
+            dataset_name = 'array_data'
+
+        f = h5py.File(filename)
+        if dataset_name in f.keys():
+            d = f[dataset_name]
+            # Overwrite without deleting if we can get away with it.
+            if d.shape == self.shape and d.dtype == self.dtype:
+                d[:] = self
+                for k in d.attrs.keys():
+                    del d.attrs[k]
+            else:
+                del f[dataset_name]
+                d = f.create_dataset(dataset_name, data=self)
+        else:
+            d = f.create_dataset(dataset_name, data=self)
+
+        for k, v in info.iteritems():
+            d.attrs.create(k, v)
+        f.close()
+
+    @classmethod
+    def from_hdf5(cls, filename, dataset_name=None):
+        r"""Attempts read in and convert a dataset in an hdf5 file into a YTArray.
+
+        Parameters
+        ----------
+        filename: string
+        The filename to of the hdf5 file.
+
+        dataset_name: string
+            The name of the dataset to read from.  If the dataset has a units
+            attribute, attempt to infer units as well.
+
+        """
+        import h5py
+        from yt.extern.six.moves import cPickle as pickle
+
+        if dataset_name is None:
+            dataset_name = 'array_data'
+
+        f = h5py.File(filename)
+        dataset = f[dataset_name]
+        data = dataset[:]
+        units = dataset.attrs.get('units', '')
+        if 'unit_registry' in dataset.attrs.keys():
+            unit_lut = pickle.loads(dataset.attrs['unit_registry'])
+        else:
+            unit_lut = None
+
+        registry = UnitRegistry(lut=unit_lut, add_default_symbols=False)
+        return cls(data, units, registry=registry)
+
     #
     # Start convenience methods
     #
@@ -766,7 +853,7 @@
 
     @return_arr
     def prod(self, axis=None, dtype=None, out=None):
-        if axis:
+        if axis is not None:
             units = self.units**self.shape[axis]
         else:
             units = self.units**self.size
@@ -814,9 +901,13 @@
             # Raise YTUnitOperationError up here since we know the context now
             except RuntimeError:
                 raise YTUnitOperationError(context[0], u)
+            ret_class = type(self)
         elif context[0] in binary_operators:
             unit1 = getattr(context[1][0], 'units', None)
             unit2 = getattr(context[1][1], 'units', None)
+            cls1 = type(context[1][0])
+            cls2 = type(context[1][1])
+            ret_class = get_binary_op_return_class(cls1, cls2)
             if unit1 is None:
                 unit1 = Unit(registry=getattr(unit2, 'registry', None))
             if unit2 is None and context[0] is not power:
@@ -849,10 +940,15 @@
             out_arr = np.array(out_arr)
             return out_arr
         out_arr.units = unit
-        if out_arr.size > 1:
-            return YTArray(np.array(out_arr), unit)
+        if out_arr.size == 1:
+            return YTQuantity(np.array(out_arr), unit)
         else:
-            return YTQuantity(np.array(out_arr), unit)
+            if ret_class is YTQuantity:
+                # This happens if you do ndarray * YTQuantity. Explicitly
+                # casting to YTArray avoids creating a YTQuantity with size > 1
+                return YTArray(np.array(out_arr, unit))
+            return ret_class(np.array(out_arr), unit)
+
 
     def __reduce__(self):
         """Pickle reduction method
@@ -929,3 +1025,22 @@
         return data.pf.arr(x, units)
     else:
         return data.pf.quan(x, units)
+
+def get_binary_op_return_class(cls1, cls2):
+    if cls1 is cls2:
+        return cls1
+    if cls1 is np.ndarray or issubclass(cls1, numeric_type):
+        return cls2
+    if cls2 is np.ndarray or issubclass(cls2, numeric_type):
+        return cls1
+    if issubclass(cls1, YTQuantity):
+        return cls2
+    if issubclass(cls2, YTQuantity):
+        return cls1
+    if issubclass(cls1, cls2):
+        return cls1
+    if issubclass(cls2, cls1):
+        return cls2
+    else:
+        raise RuntimeError("Operations are only defined on pairs of objects"
+                           "in which one is a subclass of the other")

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -116,12 +116,17 @@
     def _read_chunk_data(self, chunk, fields):
         return {}
 
+    def _count_particles_chunks(self, chunks, ptf, selector):
+        psize = defaultdict(lambda: 0) # COUNT PTYPES ON DISK
+        for ptype, (x, y, z) in self._read_particle_coords(chunks, ptf):
+            psize[ptype] += selector.count_points(x, y, z, 0.0)
+        return dict(psize.items())
+
     def _read_particle_selection(self, chunks, selector, fields):
         rv = {}
         ind = {}
         # We first need a set of masks for each particle type
         ptf = defaultdict(list)        # ON-DISK TO READ
-        psize = defaultdict(lambda: 0) # COUNT PTYPES ON DISK
         fsize = defaultdict(lambda: 0) # COUNT RV
         field_maps = defaultdict(list) # ptypes -> fields
         chunks = list(chunks)
@@ -139,17 +144,10 @@
                 ptf[ftype].append(fname)
                 field_maps[field].append(field)
         # We can't hash chunks, but otherwise this is a neat idea.
-        if 0 and hash(selector) == self._last_selector_id and \
-           all(ptype in self._last_selector_counts for ptype in ptf):
-            psize.update(self._last_selector_counts)
-        else:
-            # Now we have our full listing.
-            # Here, ptype_map means which particles contribute to a given type.
-            # And ptf is the actual fields from disk to read.
-            for ptype, (x, y, z) in self._read_particle_coords(chunks, ptf):
-                psize[ptype] += selector.count_points(x, y, z, 0.0)
-            self._last_selector_counts = dict(**psize)
-            self._last_selector_id = hash(selector)
+        # Now we have our full listing.
+        # Here, ptype_map means which particles contribute to a given type.
+        # And ptf is the actual fields from disk to read.
+        psize = self._count_particles_chunks(chunks, ptf, selector)
         # Now we allocate
         # ptf, remember, is our mapping of what we want to read
         #for ptype in ptf:
@@ -175,6 +173,10 @@
                 #    field_f, my_ind, my_ind+vals.shape[0], field_r)
                 rv[field_f][my_ind:my_ind + vals.shape[0],...] = vals
                 ind[field_f] += vals.shape[0]
+        # Now we need to truncate all our fields, since we allow for
+        # over-estimating.
+        for field_f in ind:
+            rv[field_f] = rv[field_f][:ind[field_f]]
         return rv
 
 class IOHandlerExtracted(BaseIOHandler):

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -38,7 +38,7 @@
     node.contour_id = contour_id
     node.next = node.parent = NULL
     node.prev = prev
-    node.count = 0
+    node.count = 1
     if prev != NULL: prev.next = node
     return node
 
@@ -59,17 +59,36 @@
     # root.
     while node.parent != NULL:
         temp = node.parent
+        root.count += node.count
+        node.count = 0
         node.parent = root
         node = temp
     return root
 
 cdef inline void contour_union(ContourID *node1, ContourID *node2):
+    if node1 == node2:
+        return
     node1 = contour_find(node1)
     node2 = contour_find(node2)
-    if node1.contour_id < node2.contour_id:
-        node2.parent = node1
-    elif node2.contour_id < node1.contour_id:
-        node1.parent = node2
+    if node1 == node2:
+        return
+    cdef ContourID *pri, *sec
+    if node1.count > node2.count:
+        pri = node1
+        sec = node2
+    elif node2.count > node1.count:
+        pri = node2
+        sec = node1
+    # might be a tie
+    elif node1.contour_id < node2.contour_id:
+        pri = node1
+        sec = node2
+    else:
+        pri = node2
+        sec = node1
+    pri.count += sec.count
+    sec.count = 0
+    sec.parent = pri
 
 cdef inline int candidate_contains(CandidateContour *first,
                             np.int64_t contour_id,
@@ -617,6 +636,12 @@
                         contour_ids[ci,cj,ck] = j + 1
                         break
 
+cdef class FOFNode:
+    cdef np.int64_t tag, count
+    def __init__(self, np.int64_t tag):
+        self.tag = tag
+        self.count = 0
+
 cdef class ParticleContourTree(ContourTree):
     cdef np.float64_t linking_length, linking_length2
     cdef np.float64_t DW[3], DLE[3], DRE[3]
@@ -739,24 +764,16 @@
         cdef np.ndarray[np.int64_t, ndim=1] contour_ids
         contour_ids = np.ones(positions.shape[0], dtype="int64")
         contour_ids *= -1
-        # Sort on our particle IDs.
-        for i in range(doff.shape[0]):
-            if doff[i] < 0: continue
-            for j in range(pcount[i]):
-                offset = pind[doff[i] + j]
-                c1 = container[offset]
-                c0 = contour_find(c1)
-                contour_ids[offset] = c0.contour_id
-                c0.count += 1
-        for i in range(doff.shape[0]):
-            if doff[i] < 0: continue
-            for j in range(pcount[i]):
-                offset = pind[doff[i] + j]
-                c1 = container[offset]
-                if c1 == NULL: continue
-                c0 = contour_find(c1)
-                if c0.count < self.minimum_count:
-                    contour_ids[offset] = -1
+        # Perform one last contour_find on each.  Note that we no longer need
+        # to look at any of the doff or internal offset stuff.
+        for i in range(positions.shape[0]):
+            if container[i] == NULL: continue
+            container[i] = contour_find(container[i])
+        for i in range(positions.shape[0]):
+            if container[i] == NULL: continue
+            c0 = container[i]
+            if c0.count < self.minimum_count: continue
+            contour_ids[i] = particle_ids[pind[c0.contour_id]]
         free(container)
         del pind
         return contour_ids
@@ -810,6 +827,7 @@
                                 self.linking_length2, edges)
             if link == 0: continue
             if c1 == NULL:
+                c0.count += 1
                 container[pind1] = c0
             elif c0.contour_id != c1.contour_id:
                 contour_union(c0, c1)

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/utilities/lib/ragged_arrays.pyx
--- /dev/null
+++ b/yt/utilities/lib/ragged_arrays.pyx
@@ -0,0 +1,97 @@
+"""
+Some simple operations for operating on ragged arrays
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+cimport cython
+
+cdef fused numpy_dt:
+    np.float32_t
+    np.float64_t
+    np.int32_t
+    np.int64_t
+
+cdef numpy_dt r_min(numpy_dt a, numpy_dt b):
+    if a < b: return a
+    return b
+
+cdef numpy_dt r_max(numpy_dt a, numpy_dt b):
+    if a > b: return a
+    return b
+
+cdef numpy_dt r_add(numpy_dt a, numpy_dt b):
+    return a + b
+
+cdef numpy_dt r_subtract(numpy_dt a, numpy_dt b):
+    return a - b
+
+cdef numpy_dt r_multiply(numpy_dt a, numpy_dt b):
+    return a * b
+
+ at cython.cdivision(True)
+cdef numpy_dt r_divide(numpy_dt a, numpy_dt b):
+    return a / b
+
+def index_unop(np.ndarray[numpy_dt, ndim=1] values,
+              np.ndarray[np.int64_t, ndim=1] indices,
+              np.ndarray[np.int64_t, ndim=1] sizes,
+              operation):
+    cdef numpy_dt mi, ma
+    if numpy_dt == np.float32_t:
+        dt = "float32"
+        mi = np.finfo(dt).min
+        ma = np.finfo(dt).max
+    elif numpy_dt == np.float64_t:
+        dt = "float64"
+        mi = np.finfo(dt).min
+        ma = np.finfo(dt).max
+    elif numpy_dt == np.int32_t:
+        dt = "int32"
+        mi = np.iinfo(dt).min
+        ma = np.iinfo(dt).max
+    elif numpy_dt == np.int64_t:
+        dt = "int64"
+        mi = np.iinfo(dt).min
+        ma = np.iinfo(dt).max
+    cdef np.ndarray[numpy_dt] out_values = np.zeros(sizes.size, dtype=dt)
+    cdef numpy_dt (*func)(numpy_dt a, numpy_dt b)
+    # Now we figure out our function.  At present, we only allow addition and
+    # multiplication, because they are commutative and easy to bootstrap.
+    cdef numpy_dt ival, val
+    if operation == "sum":
+        ival = 0
+        func = r_add
+    elif operation == "prod":
+        ival = 1
+        func = r_multiply
+    elif operation == "max":
+        ival = mi
+        func = r_max
+    elif operation == "min":
+        ival = ma
+        func = r_min
+    else:
+        raise NotImplementedError
+    cdef np.int64_t i, j, ind_ind, ind_arr
+    ind_ind = 0
+    for i in range(sizes.size):
+        # Each entry in sizes is the size of the array
+        val = ival
+        for j in range(sizes[i]):
+            ind_arr = indices[ind_ind]
+            val = func(val, values[ind_arr])
+            ind_ind += 1
+        out_values[i] = val
+    return out_values

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -139,6 +139,8 @@
           )
     config.add_extension("write_array",
                          ["yt/utilities/lib/write_array.pyx"])
+    config.add_extension("ragged_arrays",
+                         ["yt/utilities/lib/ragged_arrays.pyx"])
     config.add_extension("GridTree", 
     ["yt/utilities/lib/GridTree.pyx"],
         libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/utilities/lib/tests/test_ragged_arrays.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_ragged_arrays.py
@@ -0,0 +1,36 @@
+from yt.testing import *
+import numpy as np
+from yt.utilities.lib.ragged_arrays import index_unop
+
+operations = ((np.sum, "sum"),
+              (np.prod, "prod"),
+              (np.max, "max"),
+              (np.min, "min"))
+dtypes = ((-1e8, 1e8, "float32"),
+          (-1e8, 1e8, "float64"),
+          (-10000, 10000, "int32"),
+          (-100000000, 100000000, "int64"))
+
+def test_index_unop():
+    np.random.seed(0x4d3d3d3)
+    indices = np.arange(1000)
+    np.random.shuffle(indices)
+    sizes = np.array([
+        200, 50, 50, 100, 32, 32, 32, 32, 32, 64, 376], dtype="int64")
+    for mi, ma, dtype in dtypes:
+        for op, operation in operations:
+            # Create a random set of values
+            values = np.random.random(1000)
+            if operation != "prod":
+                values = values * ma + (ma - mi)
+            if operation == "prod" and dtype.startswith("int"):
+                values = values.astype(dtype)
+                values[values != 0] = 1
+                values[values == 0] = -1
+            values = values.astype(dtype)
+            out_values = index_unop(values, indices, sizes, operation)
+            i = 0
+            for j, v in enumerate(sizes):
+                arr = values[indices[i:i+v]]
+                yield assert_equal, op(arr), out_values[j]
+                i += v

diff -r 3499697af5933ca592074b251f3ef028343dad12 -r 9a0248df765f2b10998fc54bfa2e970cd37558ad yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -1,3 +1,17 @@
+"""
+A base class for "image" plots with colorbars.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 import __builtin__
 import base64
 import numpy as np

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/31ddfe299354/
Changeset:   31ddfe299354
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-22 18:37:54
Summary:     Bugfix in get_keyv, wasn't using spread_bitsv.
Affected #:  2 files

diff -r 9a0248df765f2b10998fc54bfa2e970cd37558ad -r 31ddfe2993541dd72293427ed77d803981274d06 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -491,8 +491,8 @@
             level = self.level
         i1, i2, i3 = iarr
         return np.bitwise_or(
-            np.bitwise_or(self.spread_bits(i1, level) , self.spread_bits(i2, level) << 1 ),
-            self.spread_bits(i3, level) << 2)
+            np.bitwise_or(self.spread_bitsv(i1, level) , self.spread_bitsv(i2, level) << 1 ),
+            self.spread_bitsv(i3, level) << 2)
 
     def get_key_slow(self, iarr, level=None):
         if level is None:

diff -r 9a0248df765f2b10998fc54bfa2e970cd37558ad -r 31ddfe2993541dd72293427ed77d803981274d06 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -643,6 +643,7 @@
         _ensure_code(dobj.right_edge)
         _ensure_code(dobj.left_edge)
         DW = _ensure_code(dobj.pf.domain_width.copy())
+        DW = DW.view(np.ndarray)
 
         for i in range(3):
             region_width = dobj.right_edge[i] - dobj.left_edge[i]
@@ -671,7 +672,7 @@
             self.left_edge[i] = dobj.left_edge[i]
             self.right_edge[i] = dobj.right_edge[i]
             self.right_edge_shift[i] = \
-                (dobj.right_edge).to_ndarray()[i] - domain_width.to_ndarray()
+                (dobj.right_edge).to_ndarray()[i] - domain_width
 
     @cython.boundscheck(False)
     @cython.wraparound(False)


https://bitbucket.org/yt_analysis/yt/commits/20bdd5b4b451/
Changeset:   20bdd5b4b451
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-22 22:43:59
Summary:     Merging from yt tip
Affected #:  32 files

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -36,20 +36,20 @@
 .. code:: python
 
     from yt.mods import *
-    from yt.analysis_modules.api import *
+    from yt.analysis_modules.photon_simulator.api import *
     from yt.utilities.cosmology import Cosmology
 
 We're going to load up an Athena dataset of a galaxy cluster core:
 
 .. code:: python
 
-    pf = load("MHDSloshing/virgo_low_res.0054.vtk", 
-              parameters={"TimeUnits":3.1557e13,
-                          "LengthUnits":3.0856e24,
-                          "DensityUnits":6.770424595218825e-27})
+    pf = load("MHDSloshing/virgo_low_res.0054.vtk",
+              parameters={"time_unit":(1.0,"Myr"),
+                          "length_unit":(1.0,"Mpc"),
+                          "mass_unit":(1.0e14,"Msun")}) 
 
 First, to get a sense of what the resulting image will look like, let's
-make a new ``yt`` field called ``"DensitySquared"``, since the X-ray
+make a new ``yt`` field called ``"density_squared"``, since the X-ray
 emission is proportional to :math:`\rho^2`, and a weak function of
 temperature and metallicity.
 
@@ -57,14 +57,14 @@
 
     def _density_squared(field, data):
         return data["density"]**2
-    add_field("DensitySquared", function=_density_squared)
+    add_field("density_squared", function=_density_squared)
 
 Then we'll project this field along the z-axis.
 
 .. code:: python
 
-    prj = ProjectionPlot(pf, "z", ["DensitySquared"], width=(500., "kpc"))
-    prj.set_cmap("DensitySquared", "gray_r")
+    prj = ProjectionPlot(ds, "z", ["density_squared"], width=(500., "kpc"))
+    prj.set_cmap("density_squared", "gray_r")
     prj.show()
 
 .. image:: _images/dsquared.png
@@ -89,7 +89,7 @@
 
 .. code:: python
 
-    sp = pf.sphere("c", (250., "kpc"))
+    sp = ds.sphere("c", (250., "kpc"))
 
 This will serve as our ``data_source`` that we will use later. Next, we
 need to create the ``SpectralModel`` instance that will determine how
@@ -258,11 +258,6 @@
     events = photons.project_photons(L, exp_time_new=2.0e5, redshift_new=0.07, absorb_model=abs_model,
                                      sky_center=(187.5,12.333), responses=[ARF,RMF])
 
-.. parsed-literal::
-
-    WARNING:yt:This routine has not been tested to work with all RMFs. YMMV.
-
-
 Also, the optional keyword ``psf_sigma`` specifies a Gaussian standard
 deviation to scatter the photon sky positions around with, providing a
 crude representation of a PSF.
@@ -282,17 +277,17 @@
 
 .. code:: python
 
-    {'eobs': array([  0.32086522,   0.32271389,   0.32562708, ...,   8.90600621,
-             9.73534237,  10.21614256]), 
-     'xsky': array([ 187.5177707 ,  187.4887825 ,  187.50733609, ...,  187.5059345 ,
-            187.49897546,  187.47307048]), 
-     'ysky': array([ 12.33519996,  12.3544496 ,  12.32750903, ...,  12.34907707,
-            12.33327653,  12.32955225]), 
-     'ypix': array([ 133.85374195,  180.68583074,  115.14110561, ...,  167.61447493,
-            129.17278711,  120.11508562]), 
+    {'eobs': YTArray([  0.32086522,   0.32271389,   0.32562708, ...,   8.90600621,
+             9.73534237,  10.21614256]) keV, 
+     'xsky': YTArray([ 187.5177707 ,  187.4887825 ,  187.50733609, ...,  187.5059345 ,
+            187.49897546,  187.47307048]) degree, 
+     'ysky': YTArray([ 12.33519996,  12.3544496 ,  12.32750903, ...,  12.34907707,
+            12.33327653,  12.32955225]) degree, 
+     'ypix': YTArray([ 133.85374195,  180.68583074,  115.14110561, ...,  167.61447493,
+            129.17278711,  120.11508562]) (dimensionless), 
      'PI': array([ 27,  15,  25, ..., 609, 611, 672]), 
-     'xpix': array([  86.26331108,  155.15934197,  111.06337043, ...,  114.39586907,
-            130.93509652,  192.50639633])}
+     'xpix': YTArray([  86.26331108,  155.15934197,  111.06337043, ...,  114.39586907,
+            130.93509652,  192.50639633]) (dimensionless)}
 
 
 We can bin up the events into an image and save it to a FITS file. The
@@ -436,7 +431,7 @@
 
    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
 
-   pf = load_uniform_grid(data, ddims, 2*R*cm_per_kpc, bbox=bbox)
+   ds = load_uniform_grid(data, ddims, 2*R*cm_per_kpc, bbox=bbox)
 
 where for simplicity we have set the velocities to zero, though we
 could have created a realistic velocity field as well. Now, we
@@ -445,7 +440,7 @@
 
 .. code:: python
 
-   sphere = pf.sphere(pf.domain_center, 1.0/pf["mpc"])
+   sphere = ds.sphere(pf.domain_center, (1.0,"Mpc"))
        
    A = 6000.
    exp_time = 2.0e5

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -236,7 +236,7 @@
 -------------------------------
 
 Data objects can be cut by their field values using the ``cut_region`` 
-method.  For example, this could be used to compute the total mass within 
+method.  For example, this could be used to compute the total gas mass within
 a certain temperature range, as in the following example.
 
 .. notebook-cell::
@@ -244,11 +244,11 @@
    from yt.mods import *
    ds = load("enzo_tiny_cosmology/DD0046/DD0046")
    ad = ds.all_data()
-   total_mass = ad.quantities.total_mass()
+   total_mass = ad.quantities.total_quantity('cell_mass')
    # now select only gas with 1e5 K < T < 1e7 K.
    new_region = ad.cut_region(['obj["temperature"] > 1e5',
                                'obj["temperature"] < 1e7'])
-   cut_mass = new_region.quantities.total_mass()
+   cut_mass = new_region.quantities.total_quantity('cell_mass')
    print "The fraction of mass in this temperature range is %f." % \
      (cut_mass / total_mass)
 

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 doc/source/cookbook/fits_radio_cubes.ipynb
--- a/doc/source/cookbook/fits_radio_cubes.ipynb
+++ b/doc/source/cookbook/fits_radio_cubes.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:dbc41f6f836cdeb88a549d85e389d6e4e43d163d8c4c267baea8cce0ebdbf441"
+  "signature": "sha256:40add63976fd633e0542cf7674b166507985aa14685be6b4e4e53bd9a23befc2"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -23,7 +23,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "This notebook demonstrates some of the capabilties of `yt` on some FITS \"position-position-velocity\" cubes of radio data. "
+      "This notebook demonstrates some of the capabilties of `yt` on some FITS \"position-position-spectrum\" cubes of radio data. "
      ]
     },
     {
@@ -82,7 +82,7 @@
      "input": [
       "from yt.frontends.fits.misc import PlotWindowWCS\n",
       "wcs_slc = PlotWindowWCS(slc)\n",
-      "wcs_slc.show()"
+      "wcs_slc[\"intensity\"]"
      ],
      "language": "python",
      "metadata": {},
@@ -109,14 +109,16 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "We can also take slices of this dataset at a few different values along the \"z\" axis (corresponding to the velocity), so let's try a few. First, we'll check what the value along the velocity axis at the domain center is, as well as the range of possible values. This is the third value of each array. "
+      "We can also take slices of this dataset at a few different values along the \"z\" axis (corresponding to the velocity), so let's try a few. To pick specific velocity values for slices, we will need to use the dataset's `spec2pixel` method to determine which pixels to slice on:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "print ds.domain_left_edge[2], ds.domain_center[2], ds.domain_right_edge[2]"
+      "import yt.units as u\n",
+      "new_center = ds.domain_center\n",
+      "new_center[2] = ds.spec2pixel(-250000.*u.m/u.s)"
      ],
      "language": "python",
      "metadata": {},
@@ -126,15 +128,32 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Now, we'll choose a few values for the velocity within this range:"
+      "Now we can use this new center to create a new slice:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "new_center = ds.domain_center \n",
-      "new_center[2] = -250000.\n",
+      "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can do this a few more times for different values of the velocity:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "new_center[2] = ds.spec2pixel(-100000.*u.m/u.s)\n",
       "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
       "slc.show()"
      ],
@@ -146,21 +165,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "new_center = ds.domain_center \n",
-      "new_center[2] = -100000.\n",
-      "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
-      "slc.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "new_center = ds.domain_center \n",
-      "new_center[2] = -150000.\n",
+      "new_center[2] = ds.spec2pixel(-150000.*u.m/u.s)\n",
       "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
       "slc.show()"
      ],
@@ -179,14 +184,14 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "We can also make a projection of all the emission along the line of sight:"
+      "We can also make a projection of all the emission along the line of sight. Since we're not doing an integration along a path length, we needed to specify `proj_style = \"sum\"`:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "prj = yt.ProjectionPlot(ds, \"z\", [\"intensity\"], origin=\"native\", proj_style=\"sum\")\n",
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"intensity\"], proj_style=\"sum\", origin=\"native\")\n",
       "prj.show()"
      ],
      "language": "python",
@@ -197,13 +202,6 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Since we're not doing an integration along a path length, we needed to specify `proj_style = \"sum\"`. "
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
       "We can also look at the slices perpendicular to the other axes, which will show us the structure along the velocity axis:"
      ]
     },
@@ -211,8 +209,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "slc = yt.SlicePlot(ds, \"x\", [\"intensity\"], origin=\"native\", \n",
-      "                   aspect=\"auto\", window_size=(8.0,8.0))\n",
+      "slc = yt.SlicePlot(ds, \"x\", [\"intensity\"], origin=\"native\", window_size=(8,8))\n",
       "slc.show()"
      ],
      "language": "python",
@@ -223,8 +220,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "slc = yt.SlicePlot(ds, \"y\", [\"intensity\"], origin=\"native\", \n",
-      "                   aspect=\"auto\", window_size=(8.0,8.0))\n",
+      "slc = yt.SlicePlot(ds, \"y\", [\"intensity\"], origin=\"native\", window_size=(8,8))\n",
       "slc.show()"
      ],
      "language": "python",
@@ -235,7 +231,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "In these cases, we needed to set `aspect=\"auto\"` and explicitly declare a square `window_size` to get a figure that looks good. "
+      "In these cases, we needed to explicitly declare a square `window_size` to get a figure that looks good. "
      ]
     },
     {

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -111,6 +111,8 @@
 out with them.  In :ref:`code-style-guide` there is a list of handy tips for
 how to structure and write your code.
 
+.. _mercurial-with-yt:
+
 How to Use Mercurial with yt
 ++++++++++++++++++++++++++++
 
@@ -135,6 +137,8 @@
   * If you run into any troubles, stop by IRC (see :ref:`irc`) or the mailing
     list.
 
+.. _building-yt:
+
 Building yt
 +++++++++++
 
@@ -148,19 +152,31 @@
 
 .. code-block:: bash
 
-   python2.7 setup.py develop
+  $ python2.7 setup.py develop
 
 If you have previously "installed" via ``setup.py install`` you have to
 re-install:
 
 .. code-block:: bash
 
-   python2.7 setup.py install
+  $ python2.7 setup.py install
 
-Only one of these two options is needed.  yt may require you to specify the
-location to libpng and hdf5.  This can be done through files named ``png.cfg``
-and ``hdf5.cfg``.  If you are using the installation script, these will already
-exist.
+Only one of these two options is needed.
+
+If you plan to develop yt on Windows, we recommend using the `MinGW <http://www.mingw.org/>`_ gcc
+compiler that can be installed using the `Anaconda Python
+Distribution <https://store.continuum.io/cshop/anaconda/>`_. Also, the syntax for the
+setup command is slightly different; you must type:
+
+.. code-block:: bash
+
+  $ python2.7 setup.py build --compiler=mingw32 develop
+
+or
+
+.. code-block:: bash
+
+  $ python2.7 setup.py build --compiler=mingw32 install
 
 Making and Sharing Changes
 ++++++++++++++++++++++++++

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -676,8 +676,14 @@
 Additional Options
 ++++++++++++++++++
 
+The following are additional options that may be passed to the ``load`` command when analyzing
+FITS data:
+
+``nan_mask``
+~~~~~~~~~~~~
+
 FITS image data may include ``NaNs``. If you wish to mask this data out,
-you may supply a ``nan_mask`` parameter to ``load``, which may either be a
+you may supply a ``nan_mask`` parameter, which may either be a
 single floating-point number (applies to all fields) or a Python dictionary
 containing different mask values for different fields:
 
@@ -689,9 +695,27 @@
    # passing a dict
    ds = load("m33_hi.fits", nan_mask={"intensity":-1.0,"temperature":0.0})
 
+``suppress_astropy_warnings``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
 Generally, AstroPy may generate a lot of warnings about individual FITS
 files, many of which you may want to ignore. If you want to see these
-warnings, set ``suppress_astropy_warnings = False`` in the call to ``load``.
+warnings, set ``suppress_astropy_warnings = False``.
+
+``z_axis_decomp``
+~~~~~~~~~~~~~~~~~
+
+For some applications, decomposing 3D FITS data into grids that span the x-y plane with short
+strides along the z-axis may result in a significant improvement in I/O speed. To enable this feature, set ``z_axis_decomp=True``.
+
+``spectral_factor``
+~~~~~~~~~~~~~~~~~~~
+
+Often, the aspect ratio of 3D spectral cubes can be far from unity. Because yt sets the pixel
+scale as the ``code_length``, certain visualizations (such as volume renderings) may look extended
+or distended in ways that are undesirable. To adjust the width in ``code_length`` of the spectral
+ axis, set ``spectral_factor`` equal to a constant which gives the desired scaling,
+ or set it to ``"auto"`` to make the width the same as the largest axis in the sky plane.
 
 Miscellaneous Tools for Use with FITS Data
 ++++++++++++++++++++++++++++++++++++++++++
@@ -703,7 +727,6 @@
 
   from yt.frontends.fits.misc import setup_counts_fields, PlotWindowWCS, ds9_region
 
-
 ``setup_counts_fields``
 ~~~~~~~~~~~~~~~~~~~~~~~
 

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -14,8 +14,7 @@
 be time-consuming, yt provides an installation script which downloads and builds
 a fully-isolated Python + NumPy + Matplotlib + HDF5 + Mercurial installation.  
 yt supports Linux and OSX deployment, with the possibility of deployment on 
-other Unix-like systems (XSEDE resources, clusters, etc.).  Windows is not 
-supported.
+other Unix-like systems (XSEDE resources, clusters, etc.).
 
 Since the install is fully-isolated, if you get tired of having yt on your 
 system, you can just delete its directory, and yt and all of its dependencies
@@ -83,14 +82,73 @@
 will also need to set ``LD_LIBRARY_PATH`` and ``PYTHONPATH`` to contain 
 ``$YT_DEST/lib`` and ``$YT_DEST/python2.7/site-packages``, respectively.
 
+.. _testing-installation:
+
+Testing Your Installation
+-------------------------
+
+To test to make sure everything is installed properly, try running yt at
+the command line:
+
+.. code-block:: bash
+
+  $ yt --help
+
+If this works, you should get a list of the various command-line options for
+yt, which means you have successfully installed yt.  Congratulations!
+
+If you get an error, follow the instructions it gives you to debug the problem.
+Do not hesitate to :ref:`contact us <asking-for-help>` so we can help you
+figure it out.
+
+If you like, this might be a good time :ref:`to run the test suite <testing>`.
+
+.. _updating-yt:
+
+Updating yt and its dependencies
+--------------------------------
+
+With many active developers, code development sometimes occurs at a furious
+pace in yt.  To make sure you're using the latest version of the code, run
+this command at a command-line:
+
+.. code-block:: bash
+
+  $ yt update
+
+Additionally, if you want to make sure you have the latest dependencies
+associated with yt and update the codebase simultaneously, type this:
+
+.. code-block:: bash
+
+  $ yt update --all
+
+.. _removing-yt:
+
+Removing yt and its dependencies
+--------------------------------
+
+Because yt and its dependencies are installed in an isolated directory when
+you use the script installer, you can easily remove yt and all of its
+dependencies cleanly.  Simply remove the install directory and its
+subdirectories and you're done.  If you *really* had problems with the
+code, this is a last defense for solving: remove and then fully
+:ref:`re-install <installing-yt>` from the install script again.
+
+.. _alternative-installation:
+
 Alternative Installation Methods
 --------------------------------
 
+.. _pip-installation:
+
+Installing yt Using pip or from Source
+++++++++++++++++++++++++++++++++++++++
+
 If you want to forego the use of the install script, you need to make sure you
 have yt's dependencies installed on your system.  These include: a C compiler,
-``HDF5``, ``Freetype``, ``libpng``, ``python``, ``cython``, ``NumPy``, and
-``matplotlib``.  From here, you can use ``pip`` (which comes with ``Python``) to
-install yt as:
+``HDF5``, ``python``, ``cython``, ``NumPy``, ``matplotlib``, and ``h5py``. From here,
+you can use ``pip`` (which comes with ``Python``) to install yt as:
 
 .. code-block:: bash
 
@@ -110,67 +168,46 @@
 It will install yt into ``$HOME/.local/lib64/python2.7/site-packages``. 
 Please refer to ``setuptools`` documentation for the additional options.
 
-Provided that the required dependencies are in a predictable location, yt should
-be able to find them automatically. However, you can manually specify prefix used
-for installation of ``HDF5``, ``Freetype`` and ``libpng`` by using ``hdf5.cfg``,
-``freetype.cfg``, ``png.cfg`` or setting ``HDF5_DIR``, ``FTYPE_DIR``, ``PNG_DIR``
-environmental variables respectively, e.g.
+If you choose this installation method, you do not need to run the activation
+script as it is unnecessary.
+
+.. _anaconda-installation:
+
+Installing yt Using Anaconda
+++++++++++++++++++++++++++++
+
+Perhaps the quickest way to get yt up and running is to install it using the `Anaconda Python
+Distribution <https://store.continuum.io/cshop/anaconda/>`_, which will provide you with a
+easy-to-use environment for installing Python packages. To install a bare-bones Python
+installation with yt, first visit http://repo.continuum.io/miniconda/ and download a recent
+version of the ``Miniconda-x.y.z`` script (corresponding to Python 2.7) for your platform and
+system architecture. Next, run the script, e.g.:
 
 .. code-block:: bash
 
-  $ echo '/usr/local' > hdf5.cfg
-  $ export FTYPE_DIR=/opt/freetype
+  $ bash Miniconda-3.3.0-Linux-x86_64.sh
 
-If you choose this installation method, you do not need to run the activation
-script as it is unnecessary.
-
-.. _testing-installation:
-
-Testing Your Installation
--------------------------
-
-To test to make sure everything is installed properly, try running yt at 
-the command line:
+Make sure that the Anaconda ``bin`` directory is in your path, and then issue:
 
 .. code-block:: bash
 
-  $ yt --help
+  $ conda install yt
 
-If this works, you should get a list of the various command-line options for
-yt, which means you have successfully installed yt.  Congratulations!  
+which will install yt along with all of its dependencies.
 
-If you get an error, follow the instructions it gives you to debug the problem.  
-Do not hesitate to :ref:`contact us <asking-for-help>` so we can help you 
-figure it out.
+.. _windows-installation:
 
-.. _updating-yt:
+Installing yt on Windows
+++++++++++++++++++++++++
 
-Updating yt and its dependencies
---------------------------------
+Installation on Microsoft Windows is only supported for Windows XP Service Pack 3 and
+higher (both 32-bit and 64-bit) using Anaconda.
 
-With many active developers, code development sometimes occurs at a furious 
-pace in yt.  To make sure you're using the latest version of the code, run
-this command at a command-line:
+Keeping yt Updated via Mercurial
+++++++++++++++++++++++++++++++++
 
-.. code-block:: bash
+If you want to maintain your yt installation via updates straight from the Bitbucket repository,
+or if you want to do some development on your own, we suggest you check out some of the
+:ref:`development docs <contributing-code>`, especially the sections on :ref:`Mercurial
+<mercurial-with-yt>` and :ref:`building yt from source <building-yt>`.
 
-  $ yt update
-
-Additionally, if you want to make sure you have the latest dependencies 
-associated with yt and update the codebase simultaneously, type this:
-
-.. code-block:: bash
-
-  $ yt update --all
-
-.. _removing-yt:
-
-Removing yt and its dependencies
---------------------------------
-
-Because yt and its dependencies are installed in an isolated directory when
-you use the script installer, you can easily remove yt and all of its 
-dependencies cleanly.  Simply remove the install directory and its 
-subdirectories and you're done.  If you *really* had problems with the
-code, this is a last defense for solving: remove and then fully
-:ref:`re-install <installing-yt>` from the install script again.

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -24,7 +24,10 @@
     YTSelectionContainer1D, YTSelectionContainer2D, YTSelectionContainer3D
 from yt.data_objects.derived_quantities import \
     DerivedQuantityCollection
-from yt.utilities.exceptions import YTSphereTooSmall
+from yt.utilities.exceptions import \
+    YTSphereTooSmall, \
+    YTIllDefinedCutRegion, \
+    YTMixedCutRegion
 from yt.utilities.linear_interpolators import TrilinearFieldInterpolator
 from yt.utilities.minimal_representation import \
     MinimalSliceData
@@ -683,6 +686,9 @@
         self.base_object.get_data(fields)
         ind = self._cond_ind
         for field in fields:
+            f = self.base_object[field]
+            if f.shape != ind.shape:
+                raise YTMixedCutRegion(self.conditionals, field)
             self.field_data[field] = self.base_object[field][ind]
 
     @property
@@ -693,6 +699,8 @@
             for cond in self.conditionals:
                 res = eval(cond)
                 if ind is None: ind = res
+                if ind.shape != res.shape:
+                    raise YTIllDefinedCutRegion(self.conditionals)
                 np.logical_and(res, ind, ind)
         return ind
 

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -55,8 +55,8 @@
     SphericalCoordinateHandler
 from yt.geometry.geographic_coordinates import \
     GeographicCoordinateHandler
-from yt.geometry.ppv_coordinates import \
-    PPVCoordinateHandler
+from yt.geometry.spec_cube_coordinates import \
+    SpectralCubeCoordinateHandler
 
 # We want to support the movie format in the future.
 # When such a thing comes to pass, I'll move all the stuff that is contant up
@@ -361,8 +361,8 @@
             self.coordinates = SphericalCoordinateHandler(self)
         elif self.geometry == "geographic":
             self.coordinates = GeographicCoordinateHandler(self)
-        elif self.geometry == "ppv":
-            self.coordinates = PPVCoordinateHandler(self)
+        elif self.geometry == "spectral_cube":
+            self.coordinates = SpectralCubeCoordinateHandler(self)
         else:
             raise YTGeometryNotSupported(self.geometry)
 
@@ -519,16 +519,28 @@
 
     def find_max(self, field):
         """
-        Returns (value, center) of location of maximum for a given field.
+        Returns (value, location) of the maximum of a given field.
         """
         mylog.debug("Searching for maximum value of %s", field)
         source = self.all_data()
         max_val, maxi, mx, my, mz = \
-            source.quantities["MaxLocation"](field)
+            source.quantities.max_location(field)
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f",
               max_val, mx, my, mz)
         return max_val, np.array([mx, my, mz], dtype="float64")
 
+    def find_min(self, field):
+        """
+        Returns (value, location) for the minimum of a given field.
+        """
+        mylog.debug("Searching for minimum value of %s", field)
+        source = self.all_data()
+        min_val, maxi, mx, my, mz = \
+            source.quantities.min_location(field)
+        mylog.info("Min Value is %0.5e at %0.16f %0.16f %0.16f",
+              min_val, mx, my, mz)
+        return min_val, np.array([mx, my, mz], dtype="float64")
+
     # Now all the object related stuff
     def all_data(self, find_max=False):
         if find_max: c = self.find_max("density")[1]

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -44,6 +44,7 @@
     rho_crit_g_cm3_h2, cm_per_mpc
 from yt.utilities.io_handler import io_registry
 from yt.utilities.logger import ytLogger as mylog
+from yt.utilities.pyparselibconfig import libconfig
 
 from .fields import \
     EnzoFieldInfo
@@ -277,7 +278,15 @@
         si, ei, LE, RE, fn, npart = [], [], [], [], [], []
         all = [si, ei, LE, RE, fn]
         pbar = get_pbar("Parsing Hierarchy ", self.num_grids)
-        if self.parameter_file.parameters["VersionNumber"] > 2.0:
+        version = self.parameter_file.parameters.get("VersionNumber", None)
+        params = self.parameter_file.parameters
+        if version is None and "Internal" in params:
+            version = float(params["Internal"]["Provenance"]["VersionNumber"])
+        if version >= 3.0:
+            active_particles = True
+            nap = dict((ap_type, []) for ap_type in 
+                params["Physics"]["ActiveParticles"]["ActiveParticlesEnabled"])
+        elif version > 2.0:
             active_particles = True
             nap = {}
             for type in self.parameters.get("AppendActiveParticleType", []):
@@ -731,10 +740,53 @@
         # Let's read the file
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        data_labels = {}
-        data_label_factors = {}
-        conversion_factors = {}
-        for line in (l.strip() for l in open(self.parameter_filename)):
+        with open(self.parameter_filename, "r") as f:
+            line = f.readline().strip() 
+            f.seek(0)
+            if line == "Internal:":
+                self._parse_enzo3_parameter_file(f)
+            else:
+                self._parse_enzo2_parameter_file(f)
+
+    def _parse_enzo3_parameter_file(self, f):
+        self.parameters = p = libconfig(f)
+        sim = p["SimulationControl"]
+        internal = p["Internal"]
+        phys = p["Physics"]
+        self.refine_by = sim["AMR"]["RefineBy"]
+        self.periodicity = tuple(a == 3 for a in
+                            sim["Domain"]["LeftFaceBoundaryCondition"])
+        self.dimensionality = sim["Domain"]["TopGridRank"]
+        self.domain_dimensions = np.array(sim["Domain"]["TopGridDimensions"],
+                                          dtype="int64")
+        self.domain_left_edge = np.array(sim["Domain"]["DomainLeftEdge"],
+                                         dtype="float64")
+        self.domain_right_edge = np.array(sim["Domain"]["DomainRightEdge"],
+                                          dtype="float64")
+        self.gamma = phys["Hydro"]["Gamma"]
+        self.unique_identifier = internal["Provenance"]["CurrentTimeIdentifier"]
+        self.current_time = internal["InitialTime"]
+        self.cosmological_simulation = phys["Cosmology"]["ComovingCoordinates"]
+        if self.cosmological_simulation == 1:
+            cosmo = phys["Cosmology"]
+            self.current_redshift = internal["CosmologyCurrentRedshift"]
+            self.omega_lambda = cosmo["OmegaLambdaNow"]
+            self.omega_matter = cosmo["OmegaMatterNow"]
+            self.hubble_constant = cosmo["HubbleConstantNow"]
+        else:
+            self.current_redshift = self.omega_lambda = self.omega_matter = \
+                self.hubble_constant = self.cosmological_simulation = 0.0
+        self.particle_types = ["DarkMatter"] + \
+            phys["ActiveParticles"]["ActiveParticlesEnabled"]
+        self.particle_types = tuple(self.particle_types)
+        self.particle_types_raw = self.particle_types
+        if self.dimensionality == 1:
+            self._setup_1d()
+        elif self.dimensionality == 2:
+            self._setup_2d()
+
+    def _parse_enzo2_parameter_file(self, f):
+        for line in (l.strip() for l in f):
             if len(line) < 2: continue
             param, vals = (i.strip() for i in line.split("=",1))
             # First we try to decipher what type of value it is.
@@ -835,8 +887,11 @@
         if self.cosmological_simulation:
             k = self.cosmology_get_units()
             # Now some CGS values
-            self.length_unit = \
-                self.quan(self.parameters["CosmologyComovingBoxSize"], "Mpccm/h")
+            box_size = self.parameters.get("CosmologyComovingBoxSize", None)
+            if box_size is None:
+                box_size = self.parameters["Physics"]["Cosmology"]\
+                    ["CosmologyComovingBoxSize"]
+            self.length_unit = self.quan(box_size, "Mpccm/h")
             self.mass_unit = \
                 self.quan(k['urho'], 'g/cm**3') * (self.length_unit.in_cgs())**3
             self.time_unit = self.quan(k['utim'], 's')
@@ -846,6 +901,11 @@
                 length_unit = self.parameters["LengthUnits"]
                 mass_unit = self.parameters["DensityUnits"] * length_unit**3
                 time_unit = self.parameters["TimeUnits"]
+            elif "SimulationControl" in self.parameters:
+                units = self.parameters["SimulationControl"]["Units"]
+                length_unit = units["Length"]
+                mass_unit = units["Density"] * length_unit**3
+                time_unit = units["Time"]
             else:
                 mylog.warning("Setting 1.0 in code units to be 1.0 cm")
                 mylog.warning("Setting 1.0 in code units to be 1.0 s")

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -96,7 +96,10 @@
     )
 
     def __init__(self, pf, field_list):
-        if pf.parameters["HydroMethod"] == 2:
+        hydro_method = pf.parameters.get("HydroMethod", None)
+        if hydro_method is None:
+            hydro_method = pf.parameters["Physics"]["Hydro"]["HydroMethod"]
+        if hydro_method == 2:
             sl_left = slice(None,-2,None)
             sl_right = slice(1,-1,None)
             div_fac = 1.0
@@ -142,7 +145,11 @@
 
     def setup_fluid_fields(self):
         # Now we conditionally load a few other things.
-        if self.pf.parameters["MultiSpecies"] > 0:
+        params = self.pf.parameters
+        multi_species = params.get("MultiSpecies", None)
+        if multi_species is None:
+            multi_species = params["Physics"]["AtomicPhysics"]["MultiSpecies"]
+        if multi_species > 0:
             self.setup_species_fields()
         self.setup_energy_field()
 
@@ -150,6 +157,16 @@
         # We check which type of field we need, and then we add it.
         ge_name = None
         te_name = None
+        params = self.pf.parameters
+        multi_species = params.get("MultiSpecies", None)
+        if multi_species is None:
+            multi_species = params["Physics"]["AtomicPhysics"]["MultiSpecies"]
+        hydro_method = params.get("HydroMethod", None)
+        if hydro_method is None:
+            hydro_method = params["Physics"]["Hydro"]["HydroMethod"]
+        dual_energy = params.get("DualEnergyFormalism", None)
+        if dual_energy is None:
+            dual_energy = params["Physics"]["Hydro"]["DualEnergyFormalism"]
         if ("enzo", "Gas_Energy") in self.field_list:
             ge_name = "Gas_Energy"
         elif ("enzo", "GasEnergy") in self.field_list:
@@ -159,12 +176,12 @@
         elif ("enzo", "TotalEnergy") in self.field_list:
             te_name = "TotalEnergy"
 
-        if self.pf.parameters["HydroMethod"] == 2:
+        if hydro_method == 2:
             self.add_output_field(("enzo", te_name),
                 units="code_velocity**2")
             self.alias(("gas", "thermal_energy"), ("enzo", te_name))
 
-        elif self.pf.parameters["DualEnergyFormalism"] == 1:
+        elif dual_energy == 1:
             self.add_output_field(
                 ("enzo", ge_name),
                 units="code_velocity**2")
@@ -172,7 +189,7 @@
                 ("gas", "thermal_energy"),
                 ("enzo", ge_name),
                 units = "erg/g")
-        elif self.pf.parameters["HydroMethod"] in (4, 6):
+        elif hydro_method in (4, 6):
             self.add_output_field(
                 ("enzo", te_name),
                 units="code_velocity**2")

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -44,11 +44,15 @@
 
 lon_prefixes = ["X","RA","GLON"]
 lat_prefixes = ["Y","DEC","GLAT"]
-vel_prefixes = ["V","ENER","FREQ","WAV"]
 delimiters = ["*", "/", "-", "^"]
 delimiters += [str(i) for i in xrange(10)]
 regex_pattern = '|'.join(re.escape(_) for _ in delimiters)
 
+spec_names = {"V":"Velocity",
+              "FREQ":"Frequency",
+              "ENER":"Energy",
+              "WAV":"Wavelength"}
+
 field_from_unit = {"Jy":"intensity",
                    "K":"temperature"}
 
@@ -136,6 +140,7 @@
         self._file_map = {}
         self._ext_map = {}
         self._scale_map = {}
+        dup_field_index = {}
         # Since FITS header keywords are case-insensitive, we only pick a subset of
         # prefixes, ones that we expect to end up in headers.
         known_units = dict([(unit.lower(),unit) for unit in self.pf.unit_registry.lut])
@@ -162,13 +167,19 @@
                         if fname is None: fname = "image_%d" % (j)
                     if self.pf.num_files > 1 and fname.startswith("image"):
                         fname += "_file_%d" % (i)
+                    if ("fits", fname) in self.field_list:
+                        if fname in dup_field_index:
+                            dup_field_index[fname] += 1
+                        else:
+                            dup_field_index[fname] = 1
+                        mylog.warning("This field has the same name as a previously loaded " +
+                                      "field. Changing the name from %s to %s_%d. To avoid " %
+                                      (fname, fname, dup_field_index[fname]) +
+                                      " this, change one of the BTYPE header keywords.")
+                        fname += "_%d" % (dup_field_index[fname])
                     for k in xrange(naxis4):
                         if naxis4 > 1:
                             fname += "_%s_%d" % (hdu.header["CTYPE4"], k+1)
-                        if fname in self.field_list:
-                            mylog.error("You have two fields with the same name. Change one of " +
-                                        "the names in the BTYPE header keyword to distinguish " +
-                                        "them.")
                         self._axis_map[fname] = k
                         self._file_map[fname] = fits_file
                         self._ext_map[fname] = j
@@ -210,7 +221,7 @@
         # If nprocs > 1, decompose the domain into virtual grids
         if self.num_grids > 1:
             if self.pf.z_axis_decomp:
-                dz = (pf.domain_width/pf.domain_dimensions)[2]
+                dz = pf.quan(1.0, "code_length")*pf.spectral_factor
                 self.grid_dimensions[:,2] = np.around(float(pf.domain_dimensions[2])/
                                                             self.num_grids).astype("int")
                 self.grid_dimensions[-1,2] += (pf.domain_dimensions[2] % self.num_grids)
@@ -227,7 +238,7 @@
                 dims = np.array(pf.domain_dimensions)
                 # If we are creating a dataset of lines, only decompose along the position axes
                 if len(pf.line_database) > 0:
-                    dims[pf.vel_axis] = 1
+                    dims[pf.spec_axis] = 1
                 psize = get_psize(dims, self.num_grids)
                 gle, gre, shapes, slices = decompose_array(dims, psize, bbox)
                 self.grid_left_edge = self.pf.arr(gle, "code_length")
@@ -235,9 +246,9 @@
                 self.grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
                 # If we are creating a dataset of lines, only decompose along the position axes
                 if len(pf.line_database) > 0:
-                    self.grid_left_edge[:,pf.vel_axis] = pf.domain_left_edge[pf.vel_axis]
-                    self.grid_right_edge[:,pf.vel_axis] = pf.domain_right_edge[pf.vel_axis]
-                    self.grid_dimensions[:,pf.vel_axis] = pf.domain_dimensions[pf.vel_axis]
+                    self.grid_left_edge[:,pf.spec_axis] = pf.domain_left_edge[pf.spec_axis]
+                    self.grid_right_edge[:,pf.spec_axis] = pf.domain_right_edge[pf.spec_axis]
+                    self.grid_dimensions[:,pf.spec_axis] = pf.domain_dimensions[pf.spec_axis]
         else:
             self.grid_left_edge[0,:] = pf.domain_left_edge
             self.grid_right_edge[0,:] = pf.domain_right_edge
@@ -303,6 +314,7 @@
                  nprocs = None,
                  storage_filename = None,
                  nan_mask = None,
+                 spectral_factor = 1.0,
                  z_axis_decomp = False,
                  line_database = None,
                  line_width = None,
@@ -315,10 +327,13 @@
         self.specified_parameters = parameters
 
         self.z_axis_decomp = z_axis_decomp
+        self.spectral_factor = spectral_factor
 
         if line_width is not None:
             self.line_width = YTQuantity(line_width[0], line_width[1])
             self.line_units = line_width[1]
+            mylog.info("For line folding, spectral_factor = 1.0")
+            self.spectral_factor = 1.0
         else:
             self.line_width = None
 
@@ -356,8 +371,8 @@
                 else:
                     fn = os.path.join(ytcfg.get("yt","test_data_dir"),fits_file)
                 f = _astropy.pyfits.open(fn, memmap=True,
-                                   do_not_scale_image_data=True,
-                                   ignore_blank=True)
+                                         do_not_scale_image_data=True,
+                                         ignore_blank=True)
                 self._fits_files.append(f)
 
         if len(self._handle) > 1 and self._handle[1].name == "EVENTS":
@@ -399,13 +414,23 @@
             self.events_data = False
             self.first_image = 0
             self.primary_header = self._handle[self.first_image].header
-            self.wcs = _astropy.pywcs.WCS(header=self.primary_header)
             self.naxis = self.primary_header["naxis"]
             self.axis_names = [self.primary_header["ctype%d" % (i+1)]
                                for i in xrange(self.naxis)]
             self.dims = [self.primary_header["naxis%d" % (i+1)]
                          for i in xrange(self.naxis)]
 
+            wcs = _astropy.pywcs.WCS(header=self.primary_header)
+            if self.naxis == 4:
+                self.wcs = _astropy.pywcs.WCS(naxis=3)
+                self.wcs.wcs.crpix = wcs.wcs.crpix[:3]
+                self.wcs.wcs.cdelt = wcs.wcs.cdelt[:3]
+                self.wcs.wcs.crval = wcs.wcs.crval[:3]
+                self.wcs.wcs.cunit = [str(unit) for unit in wcs.wcs.cunit][:3]
+                self.wcs.wcs.ctype = [type for type in wcs.wcs.ctype][:3]
+            else:
+                self.wcs = wcs
+
         self.refine_by = 2
 
         Dataset.__init__(self, fn, dataset_type)
@@ -441,11 +466,12 @@
         self.time_unit = self.quan(1.0, "s")
         self.velocity_unit = self.quan(1.0, "cm/s")
         if "beam_size" in self.specified_parameters:
+            beam_size = self.specified_parameters["beam_size"]
             beam_size = self.quan(beam_size[0], beam_size[1]).in_cgs().value
         else:
             beam_size = 1.0
         self.unit_registry.add("beam",beam_size,dimensions=dimensions.solid_angle)
-        if self.ppv_data:
+        if self.spec_cube:
             units = self.wcs_2d.wcs.cunit[0]
             if units == "deg": units = "degree"
             if units == "rad": units = "radian"
@@ -520,17 +546,17 @@
         self.reversed = False
 
         # Check to see if this data is in some kind of (Lat,Lon,Vel) format
-        self.ppv_data = False
+        self.spec_cube = False
         x = 0
-        for p in lon_prefixes+lat_prefixes+vel_prefixes:
+        for p in lon_prefixes+lat_prefixes+spec_names.keys():
             y = np_char.startswith(self.axis_names[:self.dimensionality], p)
             x += y.sum()
-        if x == self.dimensionality: self._setup_ppv()
+        if x == self.dimensionality: self._setup_spec_cube()
 
-    def _setup_ppv(self):
+    def _setup_spec_cube(self):
 
-        self.ppv_data = True
-        self.geometry = "ppv"
+        self.spec_cube = True
+        self.geometry = "spectral_cube"
 
         end = min(self.dimensionality+1,4)
         if self.events_data:
@@ -556,11 +582,11 @@
 
         if self.wcs.naxis > 2:
 
-            self.vel_axis = np.zeros((end-1), dtype="bool")
-            for p in vel_prefixes:
-                self.vel_axis += np_char.startswith(ctypes, p)
-            self.vel_axis = np.where(self.vel_axis)[0][0]
-            self.vel_name = ctypes[self.vel_axis].split("-")[0].lower()
+            self.spec_axis = np.zeros((end-1), dtype="bool")
+            for p in spec_names.keys():
+                self.spec_axis += np_char.startswith(ctypes, p)
+            self.spec_axis = np.where(self.spec_axis)[0][0]
+            self.spec_name = spec_names[ctypes[self.spec_axis].split("-")[0][0]]
 
             self.wcs_2d = _astropy.pywcs.WCS(naxis=2)
             self.wcs_2d.wcs.crpix = self.wcs.wcs.crpix[[self.lon_axis, self.lat_axis]]
@@ -571,41 +597,60 @@
             self.wcs_2d.wcs.ctype = [self.wcs.wcs.ctype[self.lon_axis],
                                      self.wcs.wcs.ctype[self.lat_axis]]
 
-            x0 = self.wcs.wcs.crpix[self.vel_axis]
-            dz = self.wcs.wcs.cdelt[self.vel_axis]
-            z0 = self.wcs.wcs.crval[self.vel_axis]
-            self.vel_unit = str(self.wcs.wcs.cunit[self.vel_axis])
-
-            if dz < 0.0:
-                self.reversed = True
-                le = self.dims[self.vel_axis]+0.5
-                re = 0.5
-            else:
-                le = 0.5
-                re = self.dims[self.vel_axis]+0.5
-            self.domain_left_edge[self.vel_axis] = (le-x0)*dz + z0
-            self.domain_right_edge[self.vel_axis] = (re-x0)*dz + z0
-            if self.reversed: dz *= -1
+            self._p0 = self.wcs.wcs.crpix[self.spec_axis]
+            self._dz = self.wcs.wcs.cdelt[self.spec_axis]
+            self._z0 = self.wcs.wcs.crval[self.spec_axis]
+            self.spec_unit = str(self.wcs.wcs.cunit[self.spec_axis])
 
             if self.line_width is not None:
-                self.line_width = self.line_width.in_units(self.vel_unit)
-                self.freq_begin = self.domain_left_edge[self.vel_axis]
-                nz = np.rint(self.line_width.value/dz).astype("int")
-                self.line_width = dz*nz
-                self.domain_left_edge[self.vel_axis] = -self.line_width/2.
-                self.domain_right_edge[self.vel_axis] = self.line_width/2.
-                self.domain_dimensions[self.vel_axis] = nz
-
+                if self._dz < 0.0:
+                    self.reversed = True
+                    le = self.dims[self.spec_axis]+0.5
+                else:
+                    le = 0.5
+                self.line_width = self.line_width.in_units(self.spec_unit)
+                self.freq_begin = (le-self._p0)*self._dz + self._z0
+                # We now reset these so that they are consistent
+                # with the new setup
+                self._dz = np.abs(self._dz)
+                self._p0 = 0.0
+                self._z0 = 0.0
+                nz = np.rint(self.line_width.value/self._dz).astype("int")
+                self.line_width = self._dz*nz
+                self.domain_left_edge[self.spec_axis] = -0.5*float(nz)
+                self.domain_right_edge[self.spec_axis] = 0.5*float(nz)
+                self.domain_dimensions[self.spec_axis] = nz
+            else:
+                if self.spectral_factor == "auto":
+                    self.spectral_factor = float(max(self.domain_dimensions[[self.lon_axis,
+                                                                             self.lat_axis]]))
+                    self.spectral_factor /= self.domain_dimensions[self.spec_axis]
+                    mylog.info("Setting the spectral factor to %f" % (self.spectral_factor))
+                Dz = self.domain_right_edge[self.spec_axis]-self.domain_left_edge[self.spec_axis]
+                self.domain_right_edge[self.spec_axis] = self.domain_left_edge[self.spec_axis] + \
+                                                        self.spectral_factor*Dz
+                self._dz /= self.spectral_factor
+                self._p0 = (self._p0-0.5)*self.spectral_factor + 0.5
         else:
 
             self.wcs_2d = self.wcs
-            self.vel_axis = 2
-            self.vel_name = "z"
-            self.vel_unit = "code length"
+            self.spec_axis = 2
+            self.spec_name = "z"
+            self.spec_unit = "code length"
+
+    def spec2pixel(self, spec_value):
+        sv = self.arr(spec_value).in_units(self.spec_unit)
+        return self.arr((sv.v-self._z0)/self._dz+self._p0,
+                        "code_length")
+
+    def pixel2spec(self, pixel_value):
+        pv = self.arr(pixel_value, "code_length")
+        return self.arr((pv.v-self._p0)*self._dz+self._z0,
+                        self.spec_unit)
 
     def __del__(self):
-        for file in self._fits_files:
-            file.close()
+        for f in self._fits_files:
+            f.close()
             del file
         self._handle.close()
         del self._handle

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/frontends/fits/fields.py
--- a/yt/frontends/fits/fields.py
+++ b/yt/frontends/fits/fields.py
@@ -23,7 +23,7 @@
         for field in pf.field_list:
             if field[0] == "fits": self[field].take_log = False
 
-    def _setup_ppv_fields(self):
+    def _setup_spec_cube_fields(self):
 
         def _get_2d_wcs(data, axis):
             w_coords = data.pf.wcs_2d.wcs_pix2world(data["x"], data["y"], 1)
@@ -42,17 +42,18 @@
             self.add_field(("fits",name), function=world_f(axis, unit), units=unit)
 
         if self.pf.dimensionality == 3:
-            def _vel_los(field, data):
-                axis = "xyz"[data.pf.vel_axis]
-                return data.pf.arr(data[axis].ndarray_view(),data.pf.vel_unit)
-            self.add_field(("fits",self.pf.vel_name), function=_vel_los,
-                           units=self.pf.vel_unit)
+            def _spec(field, data):
+                axis = "xyz"[data.pf.spec_axis]
+                sp = (data[axis].ndarray_view()-self.pf._p0)*self.pf._dz + self.pf._z0
+                return data.pf.arr(sp, data.pf.spec_unit)
+            self.add_field(("fits","spectral"), function=_spec,
+                           units=self.pf.spec_unit, display_name=self.pf.spec_name)
 
     def setup_fluid_fields(self):
 
-        if self.pf.ppv_data:
+        if self.pf.spec_cube:
             def _pixel(field, data):
                 return data.pf.arr(data["ones"], "pixel")
             self.add_field(("fits","pixel"), function=_pixel, units="pixel")
-            self._setup_ppv_fields()
+            self._setup_spec_cube_fields()
             return

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -26,8 +26,10 @@
         self._handle = pf._handle
         if self.pf.line_width is not None:
             self.line_db = self.pf.line_database
+            self.dz = self.pf.line_width/self.domain_dimensions[self.pf.spec_axis]
         else:
             self.line_db = None
+            self.dz = 1.
 
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
@@ -90,19 +92,19 @@
                     start = ((g.LeftEdge-self.pf.domain_left_edge)/dx).to_ndarray().astype("int")
                     end = start + g.ActiveDimensions
                     if self.line_db is not None and fname in self.line_db:
-                        my_off = self.line_db.get(fname).in_units(self.pf.vel_unit).value
+                        my_off = self.line_db.get(fname).in_units(self.pf.spec_unit).value
                         my_off = my_off - 0.5*self.pf.line_width
-                        my_off = int((my_off-self.pf.freq_begin)/dx[self.pf.vel_axis].value)
+                        my_off = int((my_off-self.pf.freq_begin)/self.dz)
                         my_off = max(my_off, 0)
-                        my_off = min(my_off, self.pf.dims[self.pf.vel_axis]-1)
-                        start[self.pf.vel_axis] += my_off
-                        end[self.pf.vel_axis] += my_off
+                        my_off = min(my_off, self.pf.dims[self.pf.spec_axis]-1)
+                        start[self.pf.spec_axis] += my_off
+                        end[self.pf.spec_axis] += my_off
                         mylog.debug("Reading from " + str(start) + str(end))
                     slices = [slice(start[i],end[i]) for i in xrange(3)]
                     if self.pf.reversed:
-                        new_start = self.pf.dims[self.pf.vel_axis]-1-start[self.pf.vel_axis]
-                        new_end = max(self.pf.dims[self.pf.vel_axis]-1-end[self.pf.vel_axis],0)
-                        slices[self.pf.vel_axis] = slice(new_start,new_end,-1)
+                        new_start = self.pf.dims[self.pf.spec_axis]-1-start[self.pf.spec_axis]
+                        new_end = max(self.pf.dims[self.pf.spec_axis]-1-end[self.pf.spec_axis],0)
+                        slices[self.pf.spec_axis] = slice(new_start,new_end,-1)
                     if self.pf.dimensionality == 2:
                         nx, ny = g.ActiveDimensions[:2]
                         nz = 1
@@ -114,8 +116,8 @@
                     else:
                         data = ds.data[slices[2],slices[1],slices[0]].transpose()
                     if self.line_db is not None:
-                        nz1 = data.shape[self.pf.vel_axis]
-                        nz2 = g.ActiveDimensions[self.pf.vel_axis]
+                        nz1 = data.shape[self.pf.spec_axis]
+                        nz2 = g.ActiveDimensions[self.pf.spec_axis]
                         if nz1 != nz2:
                             old_data = data.copy()
                             data = np.zeros(g.ActiveDimensions)

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -15,6 +15,7 @@
 from yt.utilities.on_demand_imports import _astropy
 from yt.funcs import mylog, get_image_suffix
 from yt.visualization._mpl_imports import FigureCanvasAgg
+
 import os
 
 def _make_counts(emin, emax):
@@ -130,26 +131,17 @@
             raise NotImplementedError("WCS axes are not implemented for oblique plots.")
         if not hasattr(pw.pf, "wcs_2d"):
             raise NotImplementedError("WCS axes are not implemented for this dataset.")
-        if pw.data_source.axis != pw.pf.vel_axis:
+        if pw.data_source.axis != pw.pf.spec_axis:
             raise NotImplementedError("WCS axes are not implemented for this axis.")
-        self.pf = pw.pf
+        self.plots = {}
         self.pw = pw
-        self.plots = {}
-        self.wcs_axes = []
         for f in pw.plots:
             rect = pw.plots[f]._get_best_layout()[1]
             fig = pw.plots[f].figure
-            ax = WCSAxes(fig, rect, wcs=pw.pf.wcs_2d, frameon=False)
-            fig.add_axes(ax)
-            self.wcs_axes.append(ax)
-        self._setup_plots()
-
-    def _setup_plots(self):
-        pw = self.pw
-        for f, ax in zip(pw.plots, self.wcs_axes):
-            wcs = ax.wcs.wcs
-            pw.plots[f].axes.get_xaxis().set_visible(False)
-            pw.plots[f].axes.get_yaxis().set_visible(False)
+            ax = fig.axes[0]
+            wcs_ax = WCSAxes(fig, rect, wcs=pw.pf.wcs_2d, frameon=False)
+            fig.add_axes(wcs_ax)
+            wcs = pw.pf.wcs_2d.wcs
             xax = pw.pf.coordinates.x_axis[pw.data_source.axis]
             yax = pw.pf.coordinates.y_axis[pw.data_source.axis]
             xlabel = "%s (%s)" % (wcs.ctype[xax].split("-")[0],
@@ -157,18 +149,18 @@
             ylabel = "%s (%s)" % (wcs.ctype[yax].split("-")[0],
                                   wcs.cunit[yax])
             fp = pw._font_properties
-            ax.coords[0].set_axislabel(xlabel, fontproperties=fp)
-            ax.coords[1].set_axislabel(ylabel, fontproperties=fp)
-            ax.set_xlim(pw.xlim[0].value, pw.xlim[1].value)
-            ax.set_ylim(pw.ylim[0].value, pw.ylim[1].value)
-            ax.coords[0].ticklabels.set_fontproperties(fp)
-            ax.coords[1].ticklabels.set_fontproperties(fp)
-            self.plots[f] = pw.plots[f]
-        self.pw = pw
-        self.pf = self.pw.pf
-
-    def refresh(self):
-        self._setup_plots(self)
+            wcs_ax.coords[0].set_axislabel(xlabel, fontproperties=fp)
+            wcs_ax.coords[1].set_axislabel(ylabel, fontproperties=fp)
+            wcs_ax.coords[0].ticklabels.set_fontproperties(fp)
+            wcs_ax.coords[1].ticklabels.set_fontproperties(fp)
+            ax.xaxis.set_visible(False)
+            ax.yaxis.set_visible(False)
+            wcs_ax.set_xlim(pw.xlim[0].value, pw.xlim[1].value)
+            wcs_ax.set_ylim(pw.ylim[0].value, pw.ylim[1].value)
+            wcs_ax.coords.frame._update_cache = []
+            ax.xaxis.set_visible(False)
+            ax.yaxis.set_visible(False)
+            self.plots[f] = fig
 
     def keys(self):
         return self.plots.keys()
@@ -187,8 +179,8 @@
     def show(self):
         from IPython.core.display import display
         for k, v in sorted(self.plots.iteritems()):
-            canvas = FigureCanvasAgg(v.figure)
-            display(v.figure)
+            canvas = FigureCanvasAgg(v)
+            display(v)
 
     def save(self, name=None, mpl_kwargs=None):
         if mpl_kwargs is None:

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -206,6 +206,7 @@
         else:
             self.data_software = "unknown"
         sp = self._handle["/simulation_parameters"].attrs
+        self.parameters.update(sp)
         self.domain_left_edge = sp["domain_left_edge"][:]
         self.domain_right_edge = sp["domain_right_edge"][:]
         self.domain_dimensions = sp["domain_dimensions"][:]

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -62,6 +62,7 @@
         ("Temperature", ("K", ["temperature"], None)),
         ("Epsilon", ("code_length", [], None)),
         ("Metals", ("code_metallicity", ["metallicity"], None)),
+        ("Metallicity", ("code_metallicity", ["metallicity"], None)),
         ("Phi", ("code_length", [], None)),
         ("FormationTime", ("code_time", ["creation_time"], None)),
         # These are metallicity fields that get discovered for FIRE simulations

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -153,24 +153,29 @@
 
     def _initialize_index(self, data_file, regions):
         # self.fields[g.id][fname] is the pattern here
-        pos = np.column_stack(self.fields[data_file.filename][
-                              ("io", "particle_position_%s" % ax)]
-                              for ax in 'xyz')
-        if np.any(pos.min(axis=0) < data_file.pf.domain_left_edge) or \
-           np.any(pos.max(axis=0) > data_file.pf.domain_right_edge):
-            raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
-                                   data_file.pf.domain_left_edge,
-                                   data_file.pf.domain_right_edge)
-        regions.add_data_file(pos, data_file.file_id)
-        morton = compute_morton(
-                pos[:,0], pos[:,1], pos[:,2],
-                data_file.pf.domain_left_edge,
-                data_file.pf.domain_right_edge)
-        return morton
+        morton = []
+        for ptype in self.pf.particle_types_raw:
+            pos = np.column_stack(self.fields[data_file.filename][
+                                  (ptype, "particle_position_%s" % ax)]
+                                  for ax in 'xyz')
+            if np.any(pos.min(axis=0) < data_file.pf.domain_left_edge) or \
+               np.any(pos.max(axis=0) > data_file.pf.domain_right_edge):
+                raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
+                                       data_file.pf.domain_left_edge,
+                                       data_file.pf.domain_right_edge)
+            regions.add_data_file(pos, data_file.file_id)
+            morton.append(compute_morton(
+                    pos[:,0], pos[:,1], pos[:,2],
+                    data_file.pf.domain_left_edge,
+                    data_file.pf.domain_right_edge))
+        return np.concatenate(morton)
 
     def _count_particles(self, data_file):
-        npart = self.fields[data_file.filename]["io", "particle_position_x"].size
-        return {'io': npart}
+        pcount = {}
+        for ptype in self.pf.particle_types_raw:
+            d = self.fields[data_file.filename]
+            pcount[ptype] = d[ptype, "particle_position_x"].size
+        return pcount
 
     def _identify_fields(self, data_file):
         return self.fields[data_file.filename].keys(), {}

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -622,6 +622,22 @@
 
 @contextlib.contextmanager
 def parallel_profile(prefix):
+    r"""A context manager for profiling parallel code execution using cProfile
+
+    This is a simple context manager that automatically profiles the execution
+    of a snippet of code.
+
+    Parameters
+    ----------
+    prefix : string
+        A string name to prefix outputs with.
+
+    Examples
+    --------
+
+    >>> with parallel_profile('my_profile'):
+    ...     yt.PhasePlot(ds.all_data(), 'density', 'temperature', 'cell_mass')
+    """
     import cProfile
     from yt.config import ytcfg
     fn = "%s_%04i_%04i.cprof" % (prefix,

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/geometry/ppv_coordinates.py
--- a/yt/geometry/ppv_coordinates.py
+++ /dev/null
@@ -1,58 +0,0 @@
-"""
-Cartesian fields
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-from .cartesian_coordinates import \
-    CartesianCoordinateHandler
-
-class PPVCoordinateHandler(CartesianCoordinateHandler):
-
-    def __init__(self, pf):
-        super(PPVCoordinateHandler, self).__init__(pf)
-
-        self.axis_name = {}
-        self.axis_id = {}
-
-        for axis, axis_name in zip([pf.lon_axis, pf.lat_axis, pf.vel_axis],
-                                   ["Image\ x", "Image\ y", pf.vel_name]):
-            lower_ax = "xyz"[axis]
-            upper_ax = lower_ax.upper()
-
-            self.axis_name[axis] = axis_name
-            self.axis_name[lower_ax] = axis_name
-            self.axis_name[upper_ax] = axis_name
-            self.axis_name[axis_name] = axis_name
-
-            self.axis_id[lower_ax] = axis
-            self.axis_id[axis] = axis
-            self.axis_id[axis_name] = axis
-
-        self.default_unit_label = {}
-        self.default_unit_label[pf.lon_axis] = "pixel"
-        self.default_unit_label[pf.lat_axis] = "pixel"
-        self.default_unit_label[pf.vel_axis] = pf.vel_unit
-
-    def convert_to_cylindrical(self, coord):
-        raise NotImplementedError
-
-    def convert_from_cylindrical(self, coord):
-        raise NotImplementedError
-
-    x_axis = { 'x' : 1, 'y' : 0, 'z' : 0,
-                0  : 1,  1  : 0,  2  : 0}
-
-    y_axis = { 'x' : 2, 'y' : 2, 'z' : 1,
-                0  : 2,  1  : 2,  2  : 1}

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -650,8 +650,9 @@
             domain_width = DW[i]
 
             if region_width <= 0:
-                print "Error: region right edge < left edge", region_width
-                raise RuntimeError
+                raise RuntimeError(
+                    "Region right edge < left edge: width = %s" % region_width
+                    )
 
             if dobj.pf.periodicity[i]:
                 # shift so left_edge guaranteed in domain
@@ -664,10 +665,13 @@
             else:
                 if dobj.left_edge[i] < dobj.pf.domain_left_edge[i] or \
                    dobj.right_edge[i] > dobj.pf.domain_right_edge[i]:
-                    print "Error: bad Region in non-periodic domain:", dobj.left_edge[i], \
-                        dobj.pf.domain_left_edge[i], dobj.right_edge[i], dobj.pf.domain_right_edge[i]
-                    raise RuntimeError
-                
+                    raise RuntimeError(
+                        "Error: bad Region in non-periodic domain along dimension %s. "
+                        "Region left edge = %s, Region right edge = %s"
+                        "Dataset left edge = %s, Dataset right edge = %s" % \
+                        (i, dobj.left_edge[i], dobj.right_edge[i],
+                         dobj.pf.domain_left_edge[i], dobj.pf.domain_right_edge[i])
+                    )
             # Already ensured in code
             self.left_edge[i] = dobj.left_edge[i]
             self.right_edge[i] = dobj.right_edge[i]

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/geometry/spec_cube_coordinates.py
--- /dev/null
+++ b/yt/geometry/spec_cube_coordinates.py
@@ -0,0 +1,65 @@
+"""
+Cartesian fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from .cartesian_coordinates import \
+    CartesianCoordinateHandler
+
+class SpectralCubeCoordinateHandler(CartesianCoordinateHandler):
+
+    def __init__(self, pf):
+        super(SpectralCubeCoordinateHandler, self).__init__(pf)
+
+        self.axis_name = {}
+        self.axis_id = {}
+
+        for axis, axis_name in zip([pf.lon_axis, pf.lat_axis, pf.spec_axis],
+                                   ["Image\ x", "Image\ y", pf.spec_name]):
+            lower_ax = "xyz"[axis]
+            upper_ax = lower_ax.upper()
+
+            self.axis_name[axis] = axis_name
+            self.axis_name[lower_ax] = axis_name
+            self.axis_name[upper_ax] = axis_name
+            self.axis_name[axis_name] = axis_name
+
+            self.axis_id[lower_ax] = axis
+            self.axis_id[axis] = axis
+            self.axis_id[axis_name] = axis
+
+        self.default_unit_label = {}
+        self.default_unit_label[pf.lon_axis] = "pixel"
+        self.default_unit_label[pf.lat_axis] = "pixel"
+        self.default_unit_label[pf.spec_axis] = pf.spec_unit
+
+        def _spec_axis(ax, x, y):
+            p = (x,y)[ax]
+            return [self.pf.pixel2spec(pp).v for pp in p]
+
+        self.axis_field = {}
+        self.axis_field[self.pf.spec_axis] = _spec_axis
+
+    def convert_to_cylindrical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_cylindrical(self, coord):
+        raise NotImplementedError
+
+    x_axis = { 'x' : 1, 'y' : 0, 'z' : 0,
+                0  : 1,  1  : 0,  2  : 0}
+
+    y_axis = { 'x' : 2, 'y' : 2, 'z' : 1,
+                0  : 2,  1  : 2,  2  : 1}

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1029,9 +1029,9 @@
 def get_binary_op_return_class(cls1, cls2):
     if cls1 is cls2:
         return cls1
-    if cls1 is np.ndarray or issubclass(cls1, numeric_type):
+    if cls1 is np.ndarray or issubclass(cls1, (numeric_type, np.number)):
         return cls2
-    if cls2 is np.ndarray or issubclass(cls2, numeric_type):
+    if cls2 is np.ndarray or issubclass(cls2, (numeric_type, np.number)):
         return cls1
     if issubclass(cls1, YTQuantity):
         return cls2

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -377,3 +377,26 @@
         r = """Position arrays must be length and shape (N,3).
                But this one has %s and %s.""" % (self.dimensions, self.shape)
         return r
+
+class YTIllDefinedCutRegion(Exception):
+    def __init__(self, conditions):
+        self.conditions = conditions
+
+    def __str__(self):
+        r = """Can't mix particle/discrete and fluid/mesh conditions or
+               quantities.  Conditions specified:
+            """
+        r += "\n".join([c for c in self.conditions])
+        return r
+
+class YTMixedCutRegion(Exception):
+    def __init__(self, conditions, field):
+        self.conditions = conditions
+        self.field = field
+
+    def __str__(self):
+        r = """Can't mix particle/discrete and fluid/mesh conditions or
+               quantities.  Field: %s and Conditions specified:
+            """ % (self.field,)
+        r += "\n".join([c for c in self.conditions])
+        return r

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -20,7 +20,12 @@
 pyfits = _astropy.pyfits
 pywcs = _astropy.pywcs
 
-class FITSImageBuffer(pyfits.HDUList):
+if pyfits is None:
+    HDUList = object
+else:
+    HDUList = pyfits.HDUList
+
+class FITSImageBuffer(HDUList):
 
     def __init__(self, data, fields=None, units="cm",
                  center=None, scale=None, wcs=None):

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/utilities/pyparselibconfig/__init__.py
--- /dev/null
+++ b/yt/utilities/pyparselibconfig/__init__.py
@@ -0,0 +1,2 @@
+from api import libconfig
+

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/utilities/pyparselibconfig/api.py
--- /dev/null
+++ b/yt/utilities/pyparselibconfig/api.py
@@ -0,0 +1,1 @@
+from libconfig import libconfig

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/utilities/pyparselibconfig/libconfig.py
--- /dev/null
+++ b/yt/utilities/pyparselibconfig/libconfig.py
@@ -0,0 +1,102 @@
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, Samuel Skillman 
+#
+# Distributed under the terms of the MIT License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+class libconfig(dict):
+    def __init__(self, config=None):
+        if config is not None:
+            self.read(config)
+
+    def read(self, config):
+        if not hasattr(config, "read"):
+            cfile = open(config, 'r')
+        else:
+            cfile = config
+
+        # Strip out spaces and blanks
+        lines = [line.strip() for line in cfile if len(line) > 0]
+
+        # Strip out comments
+        lines = [line for line in lines if not (line.startswith('#') or
+                                                line.startswith('/'))]
+
+        # Concatenate
+        oneline = ''
+        for line in lines:
+            oneline += line
+
+        statements = oneline.split(';')
+
+        self.parse_statements(self, statements)
+
+    def parse_statements(self, this_dict, statements):
+        while len(statements) > 0:
+            statement = statements.pop(0)
+            if len(statement) == 0:
+                continue
+            if ':' in statement:
+                # DICTIONARY
+                new_level_lines = []
+                k, v = statement.split(':', 1)
+                k = k.strip(' :')
+                v = v.strip(' {')
+                level = 1 + v.count(':')
+                this_dict[k] = {}
+                new_level_lines.append(v)
+
+                while(level > 0 and len(statements) > 0):
+                    nextline = statements.pop(0).strip()
+                    level += nextline.count('{')
+                    if nextline == '}' and level == 1:
+                        level = 0
+                        break
+                    new_level_lines.append(nextline)
+                    level -= nextline.count('}')
+                self.parse_statements(this_dict[k], new_level_lines)
+            else:
+                k, v = statement.split('=')
+                k = k.strip()
+                v = v.strip()
+                this_dict[k] = self.correct_type(v)
+
+    def correct_type(self, v):
+        if v == "true":
+            v = "True"
+        elif v == "false":
+            v = "False"
+        # ...are we really evaling this?  We should work around that somehow.
+        return eval(v)
+
+    def write(self, filename):
+        f = file(filename, 'w')
+
+        self.write_dict(f, self, 0)
+
+    def write_dict(self, f, this_dict, level):
+        tab = ' '*4
+
+        dict_dict = {}
+        for k, v in this_dict.iteritems():
+            if type(v) == dict:
+                dict_dict[k] = v
+            else:
+                if type(v) == str:
+                    v = '"%s"' % v
+                f.writelines(tab*level + '%s = %s;\n' % (k, v))
+
+        for k, v in dict_dict.iteritems():
+            f.writelines('\n')
+            f.writelines(tab*level + '%s :\n' % k)
+            f.writelines(tab*level+'{\n')
+            self.write_dict(f, v, level+1)
+            f.writelines(tab*level+'};\n')
+
+if __name__ == '__main__':
+    cfg = libconfig()
+    cfg.read('test_config.cfg')
+    print cfg

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -163,6 +163,7 @@
                          "yt/utilities/data_point_utilities.c",
                          libraries=["m"])
     config.add_subpackage("tests")
+    config.add_subpackage("pyparselibconfig")
     config.make_config_py()  # installs __config__.py
     # config.make_svn_version_py()
     return config

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -163,7 +163,7 @@
     return center
 
 def get_window_parameters(axis, center, width, pf):
-    if pf.geometry == "cartesian" or pf.geometry == "ppv":
+    if pf.geometry == "cartesian" or pf.geometry == "spectral_cube":
         width = get_sanitized_width(axis, width, None, pf)
         center = get_sanitized_center(center, pf)
     elif pf.geometry in ("polar", "cylindrical"):
@@ -742,7 +742,7 @@
             else:
                 (unit_x, unit_y) = self._axes_unit_names
 
-            # For some plots we may set aspect by hand, such as for PPV data.
+            # For some plots we may set aspect by hand, such as for spectral cube data.
             # This will likely be replaced at some point by the coordinate handler
             # setting plot aspect.
             if self.aspect is None:
@@ -761,7 +761,7 @@
 
             image = self.frb[f]
 
-            if image.max() == image.min():
+            if image.max() == image.min() and zlim == (None, None):
                 if self._field_transform[f] == log_transform:
                     mylog.warning("Plot image for field %s has zero dynamic "
                                   "range. Min = Max = %d." % (f, image.max()))
@@ -832,12 +832,27 @@
                 axis_names = self.pf.coordinates.axis_name
                 xax = self.pf.coordinates.x_axis[axis_index]
                 yax = self.pf.coordinates.y_axis[axis_index]
+
                 if hasattr(self.pf.coordinates, "axis_default_unit_label"):
                     axes_unit_labels = [self.pf.coordinates.axis_default_unit_name[xax],
                                         self.pf.coordinates.axis_default_unit_name[yax]]
                 labels = [r'$\rm{'+axis_names[xax]+axes_unit_labels[0] + r'}$',
                           r'$\rm{'+axis_names[yax]+axes_unit_labels[1] + r'}$']
 
+                if hasattr(self.pf.coordinates, "axis_field"):
+                    if xax in self.pf.coordinates.axis_field:
+                        xmin, xmax = self.pf.coordinates.axis_field[xax](0,
+                                                                         self.xlim, self.ylim)
+                    else:
+                        xmin, xmax = [float(x) for x in extentx]
+                    if yax in self.pf.coordinates.axis_field:
+                        ymin, ymax = self.pf.coordinates.axis_field[yax](1,
+                                                                         self.xlim, self.ylim)
+                    else:
+                        ymin, ymax = [float(y) for y in extenty]
+                    self.plots[f].image.set_extent((xmin,xmax,ymin,ymax))
+                    self.plots[f].axes.set_aspect("auto")
+
             self.plots[f].axes.set_xlabel(labels[0],fontproperties=fp)
             self.plots[f].axes.set_ylabel(labels[1],fontproperties=fp)
 

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -750,6 +750,8 @@
             self._setup_plots()
         if mpl_kwargs is None:
             mpl_kwargs = {}
+        if name is None:
+            name = str(self.profile.pf)
         xfn = self.profile.x_field
         yfn = self.profile.y_field
         if isinstance(xfn, types.TupleType):
@@ -761,18 +763,19 @@
             if isinstance(f, types.TupleType):
                 _f = _f[1]
             middle = "2d-Profile_%s_%s_%s" % (xfn, yfn, _f)
-            if name is None:
-                prefix = self.profile.pf
-            if name[-1] == os.sep and not os.path.isdir(name):
-                os.mkdir(name)
-            if os.path.isdir(name) and name != str(self.pf):
-                prefix = name + (os.sep if name[-1] != os.sep else '') + str(self.pf)
+            splitname = os.path.split(name)
+            if splitname[0] != '' and not os.path.isdir(splitname[0]):
+                os.makedirs(splitname[0])
+            if os.path.isdir(name) and name != str(self.profile.pf):
+                prefix = name + (os.sep if name[-1] != os.sep else '')
+                prefix += str(self.profile.pf)
+            else:
+                prefix = name
             suffix = get_image_suffix(name)
             if suffix != '':
                 for k, v in self.plots.iteritems():
                     names.append(v.save(name, mpl_kwargs))
                 return names
-
             fn = "%s_%s%s" % (prefix, middle, suffix)
             names.append(fn)
             self.plots[f].save(fn, mpl_kwargs)

diff -r 31ddfe2993541dd72293427ed77d803981274d06 -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -558,6 +558,7 @@
         # Set TF limits based on what is visible
         visible = np.argwhere(self.alpha.y > 1.0e-3*self.alpha.y.max())
 
+
         # Display colobar values
         xticks = np.arange(np.ceil(self.alpha.x[0]), np.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
         xticks *= self.alpha.x.size / (self.alpha.x[-1] - self.alpha.x[0])
@@ -569,7 +570,12 @@
         xticks = np.append(visible[-1], xticks)
         ax.yaxis.set_ticks(xticks)
         def x_format(x, pos):
-            return "%.1f" % (x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size) + self.alpha.x[0])
+            val = x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size) + self.alpha.x[0]
+            if abs(val) < 1.e-3 or abs(val) > 1.e4:
+                e = np.floor(np.log10(abs(val)))
+                return r"${:.2f}\times 10^{:d}$".format(val/10.0**e, int(e))
+            else:
+                return "%.1g" % (val)
         ax.yaxis.set_major_formatter(FuncFormatter(x_format))
 
         yticks = np.linspace(0,1,2,endpoint=True) * max_alpha


https://bitbucket.org/yt_analysis/yt/commits/2226c74a9358/
Changeset:   2226c74a9358
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-23 01:14:59
Summary:     Adding a filtered bbox call that is really only used for r* halo catalogs.
Affected #:  1 file

diff -r 20bdd5b4b451ca157dadeda92ead9c8f811b8c35 -r 2226c74a935855d9f89bc4b1aceceefe80ff804b yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -19,6 +19,7 @@
 import numpy as np
 from yt.funcs import *
 from yt.utilities.exceptions import *
+from yt.units.yt_array import YTArray
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -745,6 +746,45 @@
 
             yield filtered
 
+    def iter_filtered_bbox_fields(self, left, right, data,
+                                  pos_fields, fields):
+        """
+        This function should be destroyed, as it will only work with units.
+        """
+
+        kpcuq = left.in_units('kpccm').uq
+        mpcuq = left.in_units('Mpc').uq
+        DW = (self.true_domain_width * kpcuq).in_units('Mpc')
+        if pos_fields is None:
+            pos_fields = 'x','y','z'
+        xf, yf, zf = pos_fields
+        print pos_fields
+
+        mask = np.zeros_like(data, dtype='bool')
+        # I'm sorry.
+        pos = mpcuq * np.array([data[xf].in_units('Mpc'), data[yf].in_units('Mpc'), data[zf].in_units('Mpc')]).T
+
+        # This hurts, but is useful for periodicity. Probably should check first
+        # if it is even needed for a given left/right
+        for i in range(3):
+            pos[:,i] = np.mod(pos[:, i] - left[i], DW[i]) + left[i]
+
+        print left, right, pos.min(axis=0), pos.max(axis=0)
+        # Now get all particles that are within the bbox
+        mask = np.all(pos >= left, axis=1) * np.all(pos < right, axis=1)
+
+        mylog.debug("Filtering particles, returning %i out of %i" % (mask.sum(), mask.shape[0]))
+
+        if np.any(mask):
+            for i,f in enumerate(pos_fields):
+                yield f, pos[:, i][mask]
+
+            for f in fields:
+                if f in pos_fields:
+                    continue
+                print 'yielding nonpos field', f
+                yield f, data[f][mask]
+
     def iter_bbox_data(self, left, right, fields):
         mylog.debug('SINDEX Loading region from %s to %s' %(left, right))
         inds = self.get_bbox(left, right)


https://bitbucket.org/yt_analysis/yt/commits/f8cd907c73a7/
Changeset:   f8cd907c73a7
Branch:      yt-3.0
User:        mswarren
Date:        2014-05-23 15:05:45
Summary:     get_ibbox fix
Affected #:  1 file

diff -r 2226c74a935855d9f89bc4b1aceceefe80ff804b -r f8cd907c73a7c1d0b17b78b861c5cdb47952d96d yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -560,9 +560,10 @@
         ix, iy, iz = (iright-ileft)*1j
         #print 'IBBOX:', ileft, iright, ix, iy, iz
 
-        Z, Y, X = np.mgrid[ileft[2]:iright[2]+1,
-                           ileft[1]:iright[1]+1,
-                           ileft[0]:iright[0]+1]
+        # plus 1 that is sliced, plus a bit since mgrid is not inclusive
+        Z, Y, X = np.mgrid[ileft[2]:iright[2]+1.01,
+                           ileft[1]:iright[1]+1.01,
+                           ileft[0]:iright[0]+1.01]
 
         mask = slice(0, -1, None)
         X = X[mask, mask, mask].astype('int32').ravel()
@@ -753,8 +754,8 @@
         """
 
         kpcuq = left.in_units('kpccm').uq
-        mpcuq = left.in_units('Mpc').uq
-        DW = (self.true_domain_width * kpcuq).in_units('Mpc')
+        mpcuq = left.in_units('Mpc/h').uq
+        DW = (self.true_domain_width * kpcuq).in_units('Mpc/h')
         if pos_fields is None:
             pos_fields = 'x','y','z'
         xf, yf, zf = pos_fields
@@ -762,7 +763,7 @@
 
         mask = np.zeros_like(data, dtype='bool')
         # I'm sorry.
-        pos = mpcuq * np.array([data[xf].in_units('Mpc'), data[yf].in_units('Mpc'), data[zf].in_units('Mpc')]).T
+        pos = mpcuq * np.array([data[xf].in_units('Mpc/h'), data[yf].in_units('Mpc/h'), data[zf].in_units('Mpc/h')]).T
 
         # This hurts, but is useful for periodicity. Probably should check first
         # if it is even needed for a given left/right
@@ -782,7 +783,7 @@
             for f in fields:
                 if f in pos_fields:
                     continue
-                print 'yielding nonpos field', f
+                # print 'yielding nonpos field', f
                 yield f, data[f][mask]
 
     def iter_bbox_data(self, left, right, fields):


https://bitbucket.org/yt_analysis/yt/commits/da797e7980ac/
Changeset:   da797e7980ac
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-24 03:06:49
Summary:     Implementing a first go at HTTP SDF loading. Data access still not quite working due to incompatibility between ndarray and the RedirectArray.
Affected #:  2 files

diff -r f8cd907c73a7c1d0b17b78b861c5cdb47952d96d -r da797e7980acea0ae507bd8a6d47fc06c245d5d9 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -38,7 +38,9 @@
 from .io import \
     IOHandlerSDF, \
     SDFRead,\
-    SDFIndex
+    SDFIndex,\
+    HTTPSDFRead
+
 
 # currently specified by units_2HOT == 2 in header
 # in future will read directly from file
@@ -84,14 +86,23 @@
         super(SDFDataset, self).__init__(filename, dataset_type)
 
     def _parse_parameter_file(self):
-        self.sdf_container = SDFRead(self.parameter_filename,
-                                     header=self.sdf_header)
+        if 'http' in self.parameter_filename:
+            self.sdf_container = HTTPSDFRead(self.parameter_filename,
+                                             header=self.sdf_header)
+        else:
+            self.sdf_container = SDFRead(self.parameter_filename,
+                                         header=self.sdf_header)
+
         # Reference
         self.parameters = self.sdf_container.parameters
         self.dimensionality = 3
         self.refine_by = 2
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        try:
+            self.unique_identifier = \
+                int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        except:
+            self.unique_identifier = time.time()
+
 
         if None in (self.domain_left_edge, self.domain_right_edge):
             R0 = self.parameters['R0']

diff -r f8cd907c73a7c1d0b17b78b861c5cdb47952d96d -r da797e7980acea0ae507bd8a6d47fc06c245d5d9 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -20,6 +20,9 @@
 from yt.funcs import *
 from yt.utilities.exceptions import *
 from yt.units.yt_array import YTArray
+from httpmmap import HTTPArray
+from arbitrary_page import PageCacheURL 
+import cStringIO
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -107,7 +110,7 @@
         return morton
 
     def _count_particles(self, data_file):
-        return {'dark_matter': self._handle['x'].size}
+        return {'dark_matter': self._handle['x'].http_array.shape}
 
     def _identify_fields(self, data_file):
         fields = [("dark_matter", v) for v in self._handle.keys()]
@@ -277,6 +280,51 @@
         for k in self.dtype.names:
             self.data[k] = self.handle[k]
 
+
+class RedirectArray(object):
+    """docstring for RedirectArray"""
+    def __init__(self, http_array, key):
+        self.http_array = http_array
+        self.key = key
+        self.size = http_array.shape
+        self.dtype = http_array.dtype[key]
+
+    def __getitem__(self, sl):
+        if isinstance(sl, int):
+            sl = slice(sl, sl+1)
+            return self.http_array[sl][self.key][0]
+        return self.http_array[sl][self.key]
+
+class HTTPDataStruct(DataStruct):
+    """docstring for HTTPDataStruct"""
+
+    def __init__(self, *args, **kwargs):
+        super(HTTPDataStruct, self).__init__(*args, **kwargs)
+        self.pcu = PageCacheURL(self.filename)
+
+    def set_offset(self, offset):
+        self._offset = offset
+        if self.size == -1:
+            # Read small piece:
+            file_size = self.pcu.total_size
+            file_size -= offset
+            self.size = float(file_size) / self.itemsize
+            assert(int(self.size) == self.size)
+
+    def build_redirect_func(self, key):
+        def redirect(sl):
+            return self.handle[sl][key]
+        return redirect
+
+    def build_memmap(self):
+        assert(self.size != -1)
+        print 'Building memmap with offset: %i' % self._offset 
+        self.handle = HTTPArray(self.filename, dtype=self.dtype,
+                        shape=self.size, offset=self._offset)
+        for k in self.dtype.names:
+            self.data[k] = RedirectArray(self.handle, k)
+
+
 class SDFRead(dict):
 
     """docstring for SDFRead"""
@@ -376,6 +424,65 @@
             self.update(struct.data)
 
 
+class HTTPSDFRead(SDFRead):
+
+    """docstring for SDFRead"""
+
+    _eof = 'SDF-EOH'
+
+    def __init__(self, filename, header=None):
+        self.filename = filename
+        if header is None:
+            header = filename
+        self.header = header
+        self.parameters = {}
+        self.structs = []
+        self.comments = []
+        self.parse_header()
+        self.set_offsets()
+        self.load_memmaps()
+
+    def parse_header(self):
+        """docstring for parse_header"""
+        # Pre-process
+        ascfile = HTTPArray(self.header)
+        max_header_size = 1024*1024
+        lines = cStringIO.StringIO(ascfile[:max_header_size].data[:])
+        while True:
+            l = lines.readline()
+            if self._eof in l: break
+
+            self.parse_line(l, lines)
+
+        hoff = lines.tell()
+        if self.header != self.filename:
+            hoff = 0
+        self.parameters['header_offset'] = hoff
+
+    def parse_struct(self, line, ascfile):
+        assert 'struct' in line
+
+        str_types = []
+        comments = []
+        str_lines = []
+        l = ascfile.readline()
+        while "}" not in l:
+            vtype, vnames = get_struct_vars(l)
+            for v in vnames:
+                str_types.append((v, vtype))
+            l = ascfile.readline()
+        num = l.strip("}[]")
+        num = num.strip("\;\\\n]")
+        if len(num) == 0:
+            # We need to compute the number of records.  The DataStruct will
+            # handle this.
+            num = '-1'
+        num = int(num)
+        struct = HTTPDataStruct(str_types, num, self.filename)
+        self.structs.append(struct)
+        return
+
+
 class SDFIndex(object):
 
     """docstring for SDFIndex


https://bitbucket.org/yt_analysis/yt/commits/8feaa0de2ed7/
Changeset:   8feaa0de2ed7
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-28 20:07:29
Summary:     First go at http loading.
Affected #:  2 files

diff -r da797e7980acea0ae507bd8a6d47fc06c245d5d9 -r 8feaa0de2ed74a0526a18d2ebac475f7bfeb2142 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -83,6 +83,8 @@
         self.idx_level = idx_level
         if self.idx_filename is not None:
             dataset_type = 'sindex_sdf_particles'
+        if 'http' in filename:
+            dataset_type = 'http_sdf_particles'
         super(SDFDataset, self).__init__(filename, dataset_type)
 
     def _parse_parameter_file(self):

diff -r da797e7980acea0ae507bd8a6d47fc06c245d5d9 -r 8feaa0de2ed74a0526a18d2ebac475f7bfeb2142 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -31,7 +31,7 @@
 from yt.utilities.lib.geometry_utils import compute_morton
 
 from yt.geometry.oct_container import _ORDER_MAX
-from particle_filters import bbox_filter, sphere_filter
+from .particle_filters import bbox_filter, sphere_filter
 CHUNKSIZE = 32**3
 
 class IOHandlerSDF(BaseIOHandler):
@@ -117,6 +117,58 @@
         fields.append(("dark_matter", "mass"))
         return fields, {}
 
+class IOHandlerHTTPSDF(IOHandlerSDF):
+    _dataset_type = "http_sdf_particles"
+
+    def _read_particle_coords(self, chunks, ptf):
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        assert(ptf.keys()[0] == "dark_matter")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        assert(len(data_files) == 1)
+        for data_file in data_files:
+            pcount = self._handle['x'].size
+            yield "dark_matter", (
+                self._handle['x'][:pcount], self._handle['y'][:pcount], self._handle['z'][:pcount])
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        assert(ptf.keys()[0] == "dark_matter")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        assert(len(data_files) == 1)
+        for data_file in data_files:
+            pcount = self._handle['x'].size
+            for ptype, field_list in sorted(ptf.items()):
+                x = self._handle['x'][:pcount]
+                y = self._handle['y'][:pcount]
+                z = self._handle['z'][:pcount]
+                mask = selector.select_points(x, y, z, 0.0)
+                del x, y, z
+                if mask is None: continue
+                for field in field_list:
+                    if field == "mass":
+                        if 'particle_mass' in self.parameters:
+                            data = np.ones(mask.sum(), dtype="float64")
+                            data *= self.pf.parameters["particle_mass"]
+                        elif 'm200b' in self._handle.keys():
+                            data = self._handle[field]['m200b'][mask]
+                        else:
+                            raise KeyError
+                    else:
+                        data = self._handle[field][mask]
+                    yield (ptype, field), data
+
+    def _count_particles(self, data_file):
+        return {'dark_matter': self._handle['x'].http_array.shape}
+
+
 class IOHandlerSIndexSDF(IOHandlerSDF):
     _dataset_type = "sindex_sdf_particles"
 


https://bitbucket.org/yt_analysis/yt/commits/af71f6a207fc/
Changeset:   af71f6a207fc
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-28 21:02:11
Summary:     Moving sdf io into utilities/
Affected #:  4 files

diff -r 8feaa0de2ed74a0526a18d2ebac475f7bfeb2142 -r af71f6a207fc7667db9eaeec8062e56662edbd49 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -36,7 +36,8 @@
 from .fields import \
     SDFFieldInfo
 from .io import \
-    IOHandlerSDF, \
+    IOHandlerSDF
+from yt.utilities.sdf import \
     SDFRead,\
     SDFIndex,\
     HTTPSDFRead

diff -r 8feaa0de2ed74a0526a18d2ebac475f7bfeb2142 -r af71f6a207fc7667db9eaeec8062e56662edbd49 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -14,15 +14,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import glob
-import h5py
 import numpy as np
 from yt.funcs import *
 from yt.utilities.exceptions import *
 from yt.units.yt_array import YTArray
-from httpmmap import HTTPArray
-from arbitrary_page import PageCacheURL 
-import cStringIO
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -31,7 +26,6 @@
 from yt.utilities.lib.geometry_utils import compute_morton
 
 from yt.geometry.oct_container import _ORDER_MAX
-from .particle_filters import bbox_filter, sphere_filter
 CHUNKSIZE = 32**3
 
 class IOHandlerSDF(BaseIOHandler):
@@ -261,949 +255,4 @@
         return fields, {}
 
 
-import re
-import os
 
-_types = {
-    'int': 'int32',
-    'int64_t': 'int64',
-    'float': 'float32',
-    'double': 'float64',
-    'unsigned int': 'I',
-    'unsigned char': 'B',
-}
-
-def get_type(vtype, len=None):
-    try:
-        t = _types[vtype]
-        if len is not None:
-            t = np.dtype((t, len))
-        else:
-            t = np.dtype(t)
-    except KeyError:
-        t = eval("np."+vtype)
-    return t
-
-def lstrip(text_list):
-    return [t.strip() for t in text_list]
-
-def get_struct_vars(line):
-    spl = lstrip(line.split(";"))
-    multiv = lstrip(spl[0].split(","))
-    ret = lstrip(multiv[0].split())
-    ctype = ret[0]
-    vnames = [ret[-1]] + multiv[1:]
-    vnames = [v.strip() for v in vnames]
-    for vtype in ret[1:-1]:
-        ctype += ' ' + vtype
-    num = None
-    if len(vnames) == 1:
-        if '[' in vnames[0]:
-            num = int(vnames[0].split('[')[-1].strip(']'))
-            #num = int(re.sub("\D", "", vnames[0]))
-    ctype = get_type(ctype, len=num)
-    return ctype, vnames
-
-class DataStruct(object):
-    """docstring for DataStruct"""
-
-    _offset = 0
-
-    def __init__(self, dtypes, num, filename):
-        self.filename = filename
-        self.dtype = np.dtype(dtypes)
-        self.size = num
-        self.itemsize = self.dtype.itemsize
-        self.data = {}
-        self.handle = None
-
-    def set_offset(self, offset):
-        self._offset = offset
-        if self.size == -1:
-            file_size = os.path.getsize(self.filename)
-            file_size -= offset
-            self.size = float(file_size) / self.itemsize
-            assert(int(self.size) == self.size)
-
-    def build_memmap(self):
-        assert(self.size != -1)
-        self.handle = np.memmap(self.filename, dtype=self.dtype,
-                        mode='r', shape=self.size, offset=self._offset)
-        for k in self.dtype.names:
-            self.data[k] = self.handle[k]
-
-
-class RedirectArray(object):
-    """docstring for RedirectArray"""
-    def __init__(self, http_array, key):
-        self.http_array = http_array
-        self.key = key
-        self.size = http_array.shape
-        self.dtype = http_array.dtype[key]
-
-    def __getitem__(self, sl):
-        if isinstance(sl, int):
-            sl = slice(sl, sl+1)
-            return self.http_array[sl][self.key][0]
-        return self.http_array[sl][self.key]
-
-class HTTPDataStruct(DataStruct):
-    """docstring for HTTPDataStruct"""
-
-    def __init__(self, *args, **kwargs):
-        super(HTTPDataStruct, self).__init__(*args, **kwargs)
-        self.pcu = PageCacheURL(self.filename)
-
-    def set_offset(self, offset):
-        self._offset = offset
-        if self.size == -1:
-            # Read small piece:
-            file_size = self.pcu.total_size
-            file_size -= offset
-            self.size = float(file_size) / self.itemsize
-            assert(int(self.size) == self.size)
-
-    def build_redirect_func(self, key):
-        def redirect(sl):
-            return self.handle[sl][key]
-        return redirect
-
-    def build_memmap(self):
-        assert(self.size != -1)
-        print 'Building memmap with offset: %i' % self._offset 
-        self.handle = HTTPArray(self.filename, dtype=self.dtype,
-                        shape=self.size, offset=self._offset)
-        for k in self.dtype.names:
-            self.data[k] = RedirectArray(self.handle, k)
-
-
-class SDFRead(dict):
-
-    """docstring for SDFRead"""
-
-    _eof = 'SDF-EOH'
-
-    def __init__(self, filename, header=None):
-        self.filename = filename
-        if header is None:
-            header = filename
-        self.header = header
-        self.parameters = {}
-        self.structs = []
-        self.comments = []
-        self.parse_header()
-        self.set_offsets()
-        self.load_memmaps()
-
-    def parse_header(self):
-        """docstring for parse_header"""
-        # Pre-process
-        ascfile = open(self.header, 'r')
-        while True:
-            l = ascfile.readline()
-            if self._eof in l: break
-
-            self.parse_line(l, ascfile)
-
-        hoff = ascfile.tell()
-        ascfile.close()
-        if self.header != self.filename:
-            hoff = 0
-        self.parameters['header_offset'] = hoff
-
-    def parse_line(self, line, ascfile):
-        """Parse a line of sdf"""
-
-
-        if 'struct' in line:
-            self.parse_struct(line, ascfile)
-            return
-
-        if "#" in line:
-            self.comments.append(line)
-            return
-
-        spl = lstrip(line.split("="))
-        vtype, vname = lstrip(spl[0].split())
-        vname = vname.strip("[]")
-        vval = spl[-1].strip(";")
-        if vtype == 'parameter':
-            self.parameters[vname] = vval
-            return
-        elif vtype == "char":
-            vtype = "str"
-
-        try:
-            vval = eval("np."+vtype+"(%s)" % vval)
-        except AttributeError:
-            vval = eval("np."+_types[vtype]+"(%s)" % vval)
-
-        self.parameters[vname] = vval
-
-    def parse_struct(self, line, ascfile):
-        assert 'struct' in line
-
-        str_types = []
-        comments = []
-        str_lines = []
-        l = ascfile.readline()
-        while "}" not in l:
-            vtype, vnames = get_struct_vars(l)
-            for v in vnames:
-                str_types.append((v, vtype))
-            l = ascfile.readline()
-        num = l.strip("}[]")
-        num = num.strip("\;\\\n]")
-        if len(num) == 0:
-            # We need to compute the number of records.  The DataStruct will
-            # handle this.
-            num = '-1'
-        num = int(num)
-        struct = DataStruct(str_types, num, self.filename)
-        self.structs.append(struct)
-        return
-
-    def set_offsets(self):
-        running_off = self.parameters['header_offset']
-        for struct in self.structs:
-            struct.set_offset(running_off)
-            running_off += struct.size * struct.itemsize
-        return
-
-    def load_memmaps(self):
-        for struct in self.structs:
-            struct.build_memmap()
-            self.update(struct.data)
-
-
-class HTTPSDFRead(SDFRead):
-
-    """docstring for SDFRead"""
-
-    _eof = 'SDF-EOH'
-
-    def __init__(self, filename, header=None):
-        self.filename = filename
-        if header is None:
-            header = filename
-        self.header = header
-        self.parameters = {}
-        self.structs = []
-        self.comments = []
-        self.parse_header()
-        self.set_offsets()
-        self.load_memmaps()
-
-    def parse_header(self):
-        """docstring for parse_header"""
-        # Pre-process
-        ascfile = HTTPArray(self.header)
-        max_header_size = 1024*1024
-        lines = cStringIO.StringIO(ascfile[:max_header_size].data[:])
-        while True:
-            l = lines.readline()
-            if self._eof in l: break
-
-            self.parse_line(l, lines)
-
-        hoff = lines.tell()
-        if self.header != self.filename:
-            hoff = 0
-        self.parameters['header_offset'] = hoff
-
-    def parse_struct(self, line, ascfile):
-        assert 'struct' in line
-
-        str_types = []
-        comments = []
-        str_lines = []
-        l = ascfile.readline()
-        while "}" not in l:
-            vtype, vnames = get_struct_vars(l)
-            for v in vnames:
-                str_types.append((v, vtype))
-            l = ascfile.readline()
-        num = l.strip("}[]")
-        num = num.strip("\;\\\n]")
-        if len(num) == 0:
-            # We need to compute the number of records.  The DataStruct will
-            # handle this.
-            num = '-1'
-        num = int(num)
-        struct = HTTPDataStruct(str_types, num, self.filename)
-        self.structs.append(struct)
-        return
-
-
-class SDFIndex(object):
-
-    """docstring for SDFIndex
-
-    This provides an index mechanism into the full SDF Dataset.
-
-    Most useful class methods:
-        get_cell_data(level, cell_iarr, fields)
-        iter_bbox_data(left, right, fields)
-        iter_bbox_data(left, right, fields)
-
-    """
-    def __init__(self, sdfdata, indexdata, level=9):
-        super(SDFIndex, self).__init__()
-        self.sdfdata = sdfdata
-        self.indexdata = indexdata
-        self.level = level
-        self.rmin = None
-        self.rmax = None
-        self.domain_width = None
-        self.domain_buffer = 0
-        self.domain_dims = 0
-        self.domain_active_dims = 0
-        self.wandering_particles = True
-        self.valid_indexdata = True
-        self.masks = {
-            "p" : int("011"*level, 2),
-            "t" : int("101"*level, 2),
-            "r" : int("110"*level, 2),
-            "z" : int("011"*level, 2),
-            "y" : int("101"*level, 2),
-            "x" : int("110"*level, 2),
-            2 : int("011"*level, 2),
-            1 : int("101"*level, 2),
-            0 : int("110"*level, 2),
-        }
-        self.dim_slices = {
-            "p" : slice(0, None, 3),
-            "t" : slice(1, None, 3),
-            "r" : slice(2, None, 3),
-            "z" : slice(0, None, 3),
-            "y" : slice(1, None, 3),
-            "x" : slice(2, None, 3),
-            2 : slice(0, None, 3),
-            1 : slice(1, None, 3),
-            0 : slice(2, None, 3),
-        }
-        self.set_bounds()
-
-    def set_bounds(self):
-        r_0 = self.sdfdata.parameters['R0']
-        DW = 2.0 * r_0
-
-        self.rmin = np.zeros(3)
-        self.rmax = np.zeros(3)
-        sorted_rtp = self.sdfdata.parameters.get("sorted_rtp", False)
-        if sorted_rtp:
-            self.rmin[:] = [0.0, 0.0, -np.pi]
-            self.rmax[:] = [r_0*1.01, 2*np.pi, np.pi]
-        else:
-            self.rmin[0] -= self.sdfdata.parameters.get('Rx', 0.0)
-            self.rmin[1] -= self.sdfdata.parameters.get('Ry', 0.0)
-            self.rmin[2] -= self.sdfdata.parameters.get('Rz', 0.0)
-            self.rmax[0] += self.sdfdata.parameters.get('Rx', r_0)
-            self.rmax[1] += self.sdfdata.parameters.get('Ry', r_0)
-            self.rmax[2] += self.sdfdata.parameters.get('Rz', r_0)
-
-        self.rmin *= self.sdfdata.parameters.get("a", 1.0)
-        self.rmax *= self.sdfdata.parameters.get("a", 1.0)
-
-        #/* expand root for non-power-of-two */
-        expand_root = 0.0
-        ic_Nmesh = self.sdfdata.parameters.get('ic_Nmesh',0)
-        if ic_Nmesh != 0:
-            f2 = 1<<int(np.log2(ic_Nmesh-1)+1)
-            if (f2 != ic_Nmesh):
-                expand_root = 1.0*f2/ic_Nmesh - 1.0;
-            mylog.debug("Expanding: %s, %s, %s" % (f2, ic_Nmesh, expand_root))
-        self.true_domain_left = self.rmin.copy()
-        self.true_domain_right = self.rmax.copy()
-        self.true_domain_width = self.rmax - self.rmin
-        self.rmin *= 1.0 + expand_root
-        self.rmax *= 1.0 + expand_root
-        self.domain_width = self.rmax - self.rmin
-        self.domain_dims = 1 << self.level
-        self.domain_buffer = (self.domain_dims - int(self.domain_dims/(1.0 + expand_root)))/2
-        self.domain_active_dims = self.domain_dims - 2*self.domain_buffer
-        mylog.debug("SINDEX: %s, %s, %s " % (self.domain_width, self.domain_dims, self.domain_active_dims))
-
-    def spread_bits(self, ival, level=None):
-        if level is None:
-            level = self.level
-        res = 0
-        for i in range(level):
-            res |= ((ival>>i)&1)<<(i*3);
-        return res
-
-    def get_key(self, iarr, level=None):
-        if level is None:
-            level = self.level
-        i1, i2, i3 = iarr
-        return self.spread_bits(i1, level) | self.spread_bits(i2, level) << 1 | self.spread_bits(i3, level) << 2
-
-    def spread_bitsv(self, ival, level=None):
-        if level is None:
-            level = self.level
-        res = np.zeros_like(ival, dtype='int64')
-        for i in range(level):
-            res |= np.bitwise_and((ival>>i), 1)<<(i*3);
-        return res
-
-    def get_keyv(self, iarr, level=None):
-        if level is None:
-            level = self.level
-        i1, i2, i3 = iarr
-        return np.bitwise_or(
-            np.bitwise_or(self.spread_bitsv(i1, level) , self.spread_bitsv(i2, level) << 1 ),
-            self.spread_bitsv(i3, level) << 2)
-
-    def get_key_slow(self, iarr, level=None):
-        if level is None:
-            level = self.level
-        i1, i2, i3 = iarr
-        rep1 = np.binary_repr(i1, width=self.level)
-        rep2 = np.binary_repr(i2, width=self.level)
-        rep3 = np.binary_repr(i3, width=self.level)
-        inter = np.zeros(self.level*3, dtype='c')
-        inter[self.dim_slices[0]] = rep1
-        inter[self.dim_slices[1]] = rep2
-        inter[self.dim_slices[2]] = rep3
-        return int(inter.tostring(), 2)
-
-    def get_key_ijk(self, i1, i2, i3, level=None):
-        return self.get_key(np.array([i1, i2, i3]), level=level)
-
-    def get_slice_key(self, ind, dim='r'):
-        slb = np.binary_repr(ind, width=self.level)
-        expanded = np.array([0]*self.level*3, dtype='c')
-        expanded[self.dim_slices[dim]] = slb
-        return int(expanded.tostring(), 2)
-
-    def get_ind_from_key(self, key, dim='r'):
-        ind = [0,0,0]
-        br = np.binary_repr(key, width=self.level*3)
-        for dim in range(3):
-            ind[dim] = int(br[self.dim_slices[dim]],2)
-        return ind
-
-    def get_slice_chunks(self, slice_dim, slice_index):
-        sl_key = self.get_slice_key(slice_index, dim=slice_dim)
-        mask = (self.indexdata['index'] & ~self.masks[slice_dim]) == sl_key
-        offsets = self.indexdata['base'][mask]
-        lengths = self.indexdata['len'][mask]
-        return mask, offsets, lengths
-
-    def get_ibbox_slow(self, ileft, iright):
-        """
-        Given left and right indicies, return a mask and
-        set of offsets+lengths into the sdf data.
-        """
-        mask = np.zeros(self.indexdata['index'].shape, dtype='bool')
-        ileft = np.array(ileft)
-        iright = np.array(iright)
-        for i in range(3):
-            left_key = self.get_slice_key(ileft[i], dim=i)
-            right_key= self.get_slice_key(iright[i], dim=i)
-            dim_inds = (self.indexdata['index'] & ~self.masks[i])
-            mask *= (dim_inds >= left_key) * (dim_inds <= right_key)
-            del dim_inds
-
-        offsets = self.indexdata['base'][mask]
-        lengths = self.indexdata['len'][mask]
-        return mask, offsets, lengths
-
-    def get_ibbox(self, ileft, iright, wandering_particles=True):
-        """
-        Given left and right indicies, return a mask and
-        set of offsets+lengths into the sdf data.
-        """
-        #print 'Getting data from ileft to iright:',  ileft, iright
-
-        ix, iy, iz = (iright-ileft)*1j
-        #print 'IBBOX:', ileft, iright, ix, iy, iz
-
-        # plus 1 that is sliced, plus a bit since mgrid is not inclusive
-        Z, Y, X = np.mgrid[ileft[2]:iright[2]+1.01,
-                           ileft[1]:iright[1]+1.01,
-                           ileft[0]:iright[0]+1.01]
-
-        mask = slice(0, -1, None)
-        X = X[mask, mask, mask].astype('int32').ravel()
-        Y = Y[mask, mask, mask].astype('int32').ravel()
-        Z = Z[mask, mask, mask].astype('int32').ravel()
-
-        if self.wandering_particles:
-            # Need to get padded bbox around the border to catch
-            # wandering particles.
-            dmask = X < self.domain_buffer
-            dmask += Y < self.domain_buffer
-            dmask += Z < self.domain_buffer
-            dmask += X >= self.domain_dims
-            dmask += Y >= self.domain_dims
-            dmask += Z >= self.domain_dims
-            dinds = self.get_keyv([X[dmask], Y[dmask], Z[dmask]])
-            dinds = dinds[dinds < self.indexdata['index'][-1]]
-            dinds = dinds[self.indexdata['len'][dinds] > 0]
-            #print 'Getting boundary layers for wanderers, cells: %i' % dinds.size
-
-        # Correct For periodicity
-        X[X < self.domain_buffer] += self.domain_active_dims
-        Y[Y < self.domain_buffer] += self.domain_active_dims
-        Z[Z < self.domain_buffer] += self.domain_active_dims
-        X[X >= self.domain_buffer + self.domain_active_dims] -= self.domain_active_dims
-        Y[Y >= self.domain_buffer + self.domain_active_dims] -= self.domain_active_dims
-        Z[Z >= self.domain_buffer + self.domain_active_dims] -= self.domain_active_dims
-
-        #print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()
-
-        indices = self.get_keyv([X, Y, Z])
-        # Only mask out if we are actually getting data rather than getting indices into
-        # a space.
-        if self.valid_indexdata:
-            indices = indices[indices < self.indexdata['index'][-1]]
-            indices = indices[self.indexdata['len'][indices] > 0]
-
-        #indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
-        # Here we sort the indices to batch consecutive reads together.
-        if self.wandering_particles:
-            indices = np.sort(np.append(indices, dinds))
-        else:
-            indices = np.sort(indices)
-        return indices
-
-    def get_bbox(self, left, right):
-        """
-        Given left and right indicies, return a mask and
-        set of offsets+lengths into the sdf data.
-        """
-        ileft = np.floor((left - self.rmin) / self.domain_width *  self.domain_dims)
-        iright = np.floor((right - self.rmin) / self.domain_width * self.domain_dims)
-        iright[iright <= ileft+1] += 1
-
-        return self.get_ibbox(ileft, iright)
-
-    def get_nparticles_bbox(self, left, right):
-        """
-        Given left and right edges, return total
-        number of particles present.
-        """
-        ileft = np.floor((left - self.rmin) / self.domain_width *  self.domain_dims)
-        iright = np.floor((right - self.rmin) / self.domain_width * self.domain_dims)
-        indices = self.get_ibbox(ileft, iright)
-        npart = 0
-        for ind in indices:
-            npart += self.indexdata['len'][ind]
-        return npart
-
-    def get_data(self, chunk, fields):
-        data = {}
-        for field in fields:
-            data[field] = self.sdfdata[field][chunk]
-        return data
-
-    def get_next_nonzero_chunk(self, key, stop=None):
-        # These next two while loops are to squeeze the keys if they are empty. Would be better
-        # to go through and set base equal to the last non-zero base, i think.
-        if stop is None:
-            stop = self.indexdata['index'][-1]
-        while key < stop:
-            if self.indexdata['index'][key] == 0:
-                #print 'Squeezing keys, incrementing'
-                key += 1
-            else:
-                break
-        return key
-
-    def get_previous_nonzero_chunk(self, key, stop=None):
-        # These next two while loops are to squeeze the keys if they are empty. Would be better
-        # to go through and set base equal to the last non-zero base, i think.
-        if stop is None:
-            stop = self.indexdata['index'][0]
-        while key > stop:
-            #self.indexdata['index'][-1]:
-            if self.indexdata['index'][key] == 0:
-                #print 'Squeezing keys, decrementing'
-                key -= 1
-            else:
-                break
-        return key
-
-    def iter_data(self, inds, fields):
-        num_inds = len(inds)
-        num_reads = 0
-        mylog.debug('SINDEX Reading %i chunks' % num_inds)
-        i = 0
-        while (i < num_inds):
-            ind = inds[i]
-            base = self.indexdata['base'][ind]
-            length = self.indexdata['len'][ind]
-            # Concatenate aligned reads
-            nexti = i+1
-            combined = 0
-            while nexti < num_inds:
-                nextind = inds[nexti]
-                #        print 'b: %i l: %i end: %i  next: %i' % ( base, length, base + length, self.indexdata['base'][nextind] )
-                if combined < 1024 and base + length == self.indexdata['base'][nextind]:
-                    length += self.indexdata['len'][nextind]
-                    i += 1
-                    nexti += 1
-                    combined += 1
-                else:
-                    break
-
-            chunk = slice(base, base+length)
-            mylog.debug('Reading chunk %i of length %i after catting %i starting at %i' % (i, length, combined, ind))
-            num_reads += 1
-            if length > 0:
-                data = self.get_data(chunk, fields)
-                yield data
-                del data
-            i += 1
-        mylog.debug('Read %i chunks, batched into %i reads' % (num_inds, num_reads))
-
-    def filter_particles(self, myiter, myfilter):
-        for data in myiter:
-            mask = myfilter(data)
-
-            if mask.sum() == 0:
-                continue
-            filtered = {}
-            for f in data.keys():
-                filtered[f] = data[f][mask]
-
-            yield filtered
-
-    def filter_bbox(self, left, right, myiter):
-        """
-        Filter data by masking out data outside of a bbox defined
-        by left/right. Account for periodicity of data, allowing left/right
-        to be outside of the domain.
-        """
-        for data in myiter:
-            mask = np.zeros_like(data, dtype='bool')
-            pos = np.array([data['x'].copy(), data['y'].copy(), data['z'].copy()]).T
-
-
-            # This hurts, but is useful for periodicity. Probably should check first
-            # if it is even needed for a given left/right
-            for i in range(3):
-                pos[:,i] = np.mod(pos[:,i] - left[i], self.true_domain_width[i]) + left[i]
-
-            # Now get all particles that are within the bbox
-            mask = np.all(pos >= left, axis=1) * np.all(pos < right, axis=1)
-
-            mylog.debug("Filtering particles, returning %i out of %i" % (mask.sum(), mask.shape[0]))
-
-            if not np.any(mask):
-                continue
-
-            filtered = {ax: pos[:, i][mask] for i, ax in enumerate('xyz')}
-            for f in data.keys():
-                if f in 'xyz': continue
-                filtered[f] = data[f][mask]
-
-            #for i, ax in enumerate('xyz'):
-            #    print left, right
-            #    assert np.all(filtered[ax] >= left[i])
-            #    assert np.all(filtered[ax] < right[i])
-
-            yield filtered
-
-    def iter_filtered_bbox_fields(self, left, right, data,
-                                  pos_fields, fields):
-        """
-        This function should be destroyed, as it will only work with units.
-        """
-
-        kpcuq = left.in_units('kpccm').uq
-        mpcuq = left.in_units('Mpc/h').uq
-        DW = (self.true_domain_width * kpcuq).in_units('Mpc/h')
-        if pos_fields is None:
-            pos_fields = 'x','y','z'
-        xf, yf, zf = pos_fields
-        print pos_fields
-
-        mask = np.zeros_like(data, dtype='bool')
-        # I'm sorry.
-        pos = mpcuq * np.array([data[xf].in_units('Mpc/h'), data[yf].in_units('Mpc/h'), data[zf].in_units('Mpc/h')]).T
-
-        # This hurts, but is useful for periodicity. Probably should check first
-        # if it is even needed for a given left/right
-        for i in range(3):
-            pos[:,i] = np.mod(pos[:, i] - left[i], DW[i]) + left[i]
-
-        print left, right, pos.min(axis=0), pos.max(axis=0)
-        # Now get all particles that are within the bbox
-        mask = np.all(pos >= left, axis=1) * np.all(pos < right, axis=1)
-
-        mylog.debug("Filtering particles, returning %i out of %i" % (mask.sum(), mask.shape[0]))
-
-        if np.any(mask):
-            for i,f in enumerate(pos_fields):
-                yield f, pos[:, i][mask]
-
-            for f in fields:
-                if f in pos_fields:
-                    continue
-                # print 'yielding nonpos field', f
-                yield f, data[f][mask]
-
-    def iter_bbox_data(self, left, right, fields):
-        mylog.debug('SINDEX Loading region from %s to %s' %(left, right))
-        inds = self.get_bbox(left, right)
-
-        my_filter = bbox_filter(left, right, self.true_domain_width)
-
-        for dd in self.filter_particles(
-            self.iter_data(inds, fields),
-            my_filter):
-            yield dd
-
-    def iter_sphere_data(self, center, radius, fields):
-        mylog.debug('SINDEX Loading spherical region %s to %s' %(center, radius))
-        inds = self.get_bbox(center-radius, center+radius)
-
-        my_filter = sphere_filter(center, radius, self.true_domain_width)
-
-        for dd in self.filter_particles(
-            self.iter_data(inds, fields),
-            my_filter):
-            yield dd
-
-    def iter_ibbox_data(self, left, right, fields):
-        mylog.debug('SINDEX Loading region from %s to %s' %(left, right))
-        inds = self.get_ibbox(left, right)
-        return self.iter_data(inds, fields)
-
-    def get_contiguous_chunk(self, left_key, right_key, fields):
-        liarr = self.get_ind_from_key(left_key)
-        riarr = self.get_ind_from_key(right_key)
-
-        lbase=0
-        llen = 0
-        max_key = self.indexdata['index'][-1]
-        if left_key > max_key:
-            raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (left_key, max_key))
-        right_key = min(right_key, max_key)
-
-        left_key = self.get_next_nonzero_chunk(left_key)
-        right_key = self.get_previous_nonzero_chunk(right_key, left_key)
-
-        lbase = self.indexdata['base'][left_key]
-        llen = self.indexdata['len'][left_key]
-
-        rbase = self.indexdata['base'][right_key]
-        rlen = self.indexdata['len'][right_key]
-
-        length = rbase + rlen - lbase
-        if length > 0:
-            mylog.debug('Getting contiguous chunk of size %i starting at %i' % (length, lbase))
-        return self.get_data(slice(lbase, lbase + length), fields)
-
-    def get_key_data(self, key, fields):
-        max_key = self.indexdata['index'][-1]
-        if key > max_key:
-            raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (key, max_key))
-        base = self.indexdata['base'][key]
-        length = self.indexdata['len'][key] - base
-        if length > 0:
-            mylog.debug('Getting contiguous chunk of size %i starting at %i' % (length, base))
-        return self.get_data(slice(base, base + length), fields)
-
-    def iter_slice_data(self, slice_dim, slice_index, fields):
-        mask, offsets, lengths = self.get_slice_chunks(slice_dim, slice_index)
-        for off, l in zip(offsets, lengths):
-            data = {}
-            chunk = slice(off, off+l)
-            for field in fields:
-                data[field] = self.sdfdata[field][chunk]
-            yield data
-            del data
-
-    def get_key_bounds(self, level, cell_iarr):
-        """
-        Get index keys for index file supplied.
-
-        level: int
-            Requested level
-        cell_iarr: array-like, length 3
-            Requested cell from given level.
-
-        Returns:
-            lmax_lk, lmax_rk
-        """
-        shift = self.level-level
-        level_buff = 0
-        level_lk = self.get_key(cell_iarr + level_buff)
-        level_rk = self.get_key(cell_iarr + level_buff) + 1
-        lmax_lk = (level_lk << shift*3)
-        lmax_rk = (((level_rk) << shift*3) -1)
-        #print "Level ", level, np.binary_repr(level_lk, width=self.level*3), np.binary_repr(level_rk, width=self.level*3)
-        #print "Level ", self.level, np.binary_repr(lmax_lk, width=self.level*3), np.binary_repr(lmax_rk, width=self.level*3)
-        return lmax_lk, lmax_rk
-
-    def get_cell_data(self, level, cell_iarr, fields):
-        """
-        Get data from requested cell
-
-        This uses the raw cell index, and doesn't account for periodicity or
-        an expanded domain (non-power of 2).
-
-        level: int
-            Requested level
-        cell_iarr: array-like, length 3
-            Requested cell from given level.         fields: list
-            Requested fields
-
-        Returns:
-            cell_data: dict
-                Dictionary of field_name, field_data
-        """
-        cell_iarr = np.array(cell_iarr)
-        lk, rk =self.get_key_bounds(level, cell_iarr)
-        return self.get_contiguous_chunk(lk, rk, fields)
-
-    def get_cell_bbox(self, level, cell_iarr):
-        """Get floating point bounding box for a given sindex cell
-
-        Returns:
-            bbox: array-like, shape (3,2)
-
-        """
-        cell_iarr = np.array(cell_iarr)
-        cell_width = self.get_cell_width(level)
-        le = self.rmin + cell_iarr*cell_width
-        re = le+cell_width
-        bbox = np.array([le, re]).T
-        assert bbox.shape == (3, 2)
-        return bbox
-
-    def get_padded_bbox_data(self, level, cell_iarr, pad, fields):
-        """Get floating point bounding box for a given sindex cell
-
-        Returns:
-            bbox: array-like, shape (3,2)
-
-        """
-        bbox = self.get_cell_bbox(level, cell_iarr)
-        filter_left = bbox[:, 0] - pad
-        filter_right = bbox[:, 1] + pad
-
-        data = []
-        for dd in self.filter_bbox(
-            filter_left, filter_right,
-            [self.get_cell_data(level, cell_iarr, fields)]):
-            data.append(dd)
-        #for dd in self.iter_bbox_data(bbox[:,0], bbox[:,1], fields):
-        #    data.append(dd)
-        #assert data[0]['x'].shape[0] > 0
-
-        # Bottom & Top
-        pbox = bbox.copy()
-        pbox[0, 0] -= pad[0]
-        pbox[0, 1] += pad[0]
-        pbox[1, 0] -= pad[1]
-        pbox[1, 1] += pad[1]
-        pbox[2, 0] -= pad[2]
-        pbox[2, 1] = bbox[2, 0]
-        for dd in self.filter_bbox(
-            filter_left, filter_right,
-            self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
-            data.append(dd)
-        pbox[2, 0] = bbox[2, 1]
-        pbox[2, 1] = pbox[2, 0] + pad[2]
-        for dd in self.filter_bbox(
-            filter_left, filter_right,
-            self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
-            data.append(dd)
-
-        # Front & Back
-        pbox = bbox.copy()
-        pbox[0, 0] -= pad[0]
-        pbox[0, 1] += pad[0]
-        pbox[1, 0] -= pad[1]
-        pbox[1, 1] = bbox[1, 0]
-        for dd in self.filter_bbox(
-            filter_left, filter_right,
-            self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
-            data.append(dd)
-        pbox[1, 0] = bbox[1, 1]
-        pbox[1, 1] = pbox[1, 0] + pad[1]
-        for dd in self.filter_bbox(
-            filter_left, filter_right,
-            self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
-            data.append(dd)
-
-        # Left & Right
-        pbox = bbox.copy()
-        pbox[0, 0] -= pad[0]
-        pbox[0, 1] = bbox[0, 0]
-        for dd in self.filter_bbox(
-            filter_left, filter_right,
-            self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
-            data.append(dd)
-        pbox[0, 0] = bbox[0, 1]
-        pbox[0, 1] = pbox[0, 0] + pad[0]
-        for dd in self.filter_bbox(
-            filter_left, filter_right,
-            self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
-            data.append(dd)
-
-        return data
-
-    def get_cell_width(self, level):
-        return self.domain_width / 2**level
-
-    def iter_padded_bbox_keys(self, level, cell_iarr, pad):
-        """
-
-        Returns:
-            bbox: array-like, shape (3,2)
-
-        """
-        bbox = self.get_cell_bbox(level, cell_iarr)
-        filter_left = bbox[:, 0] - pad
-        filter_right = bbox[:, 1] + pad
-
-        # Need to get all of these
-        low_key, high_key = self.get_key_bounds(level, cell_iarr)
-        for k in xrange(low_key, high_key):
-            yield k
-
-        # Bottom & Top
-        pbox = bbox.copy()
-        pbox[0, 0] -= pad[0]
-        pbox[0, 1] += pad[0]
-        pbox[1, 0] -= pad[1]
-        pbox[1, 1] += pad[1]
-        pbox[2, 0] -= pad[2]
-        pbox[2, 1] = bbox[2, 0]
-        for k in self.get_bbox(pbox[:,0], pbox[:,1]):
-            yield k
-
-        pbox[2, 0] = bbox[2, 1]
-        pbox[2, 1] = pbox[2, 0] + pad[2]
-        for k in self.get_bbox(pbox[:,0], pbox[:,1]):
-            yield k
-
-        # Front & Back
-        pbox = bbox.copy()
-        pbox[0, 0] -= pad[0]
-        pbox[0, 1] += pad[0]
-        pbox[1, 0] -= pad[1]
-        pbox[1, 1] = bbox[1, 0]
-        for k in self.get_bbox(pbox[:,0], pbox[:,1]):
-            yield k
-        pbox[1, 0] = bbox[1, 1]
-        pbox[1, 1] = pbox[1, 0] + pad[1]
-        for k in self.get_bbox(pbox[:,0], pbox[:,1]):
-            yield k
-
-        # Left & Right
-        pbox = bbox.copy()
-        pbox[0, 0] -= pad[0]
-        pbox[0, 1] = bbox[0, 0]
-        for k in self.get_bbox(pbox[:,0], pbox[:,1]):
-            yield k
-        pbox[0, 0] = bbox[0, 1]
-        pbox[0, 1] = pbox[0, 0] + pad[0]
-        for k in self.get_bbox(pbox[:,0], pbox[:,1]):
-            yield k
-

diff -r 8feaa0de2ed74a0526a18d2ebac475f7bfeb2142 -r af71f6a207fc7667db9eaeec8062e56662edbd49 yt/frontends/sdf/particle_filters.py
--- a/yt/frontends/sdf/particle_filters.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import numpy as np
-
-
-def bbox_filter(left, right, domain_width):
-
-    def myfilter(chunk, mask=None):
-        pos = np.array([chunk['x'], chunk['y'], chunk['z']]).T
-
-        # This hurts, but is useful for periodicity. Probably should check first
-        # if it is even needed for a given left/right
-        for i in range(3):
-            pos[:,i] = np.mod(pos[:,i] - left[i], domain_width[i]) + left[i]
-
-        # Now get all particles that are within the bbox
-        if mask is None:
-            mask = np.all(pos >= left, axis=1) * np.all(pos < right, axis=1)
-        else:
-            np.multiply(mask, np.all(pos >= left, axis=1), mask)
-            np.multiply(mask, np.all(pos < right, axis=1), mask)
-        return mask
-
-    return myfilter
-
-def sphere_filter(center, radius, domain_width):
-
-    def myfilter(chunk, mask=None):
-        pos = np.array([chunk['x'], chunk['y'], chunk['z']]).T
-
-        # This hurts, but is useful for periodicity. Probably should check first
-        # if it is even needed for a given left/right
-        for i in range(3):
-            pos[:,i] = np.mod(pos[:,i] - left[i], domain_width[i]) + left[i]
-
-        # Now get all particles that are within the radius
-        if mask is None:
-            mask = ((pos-center)**2).sum(axis=1)**0.5 < radius
-        else:
-            np.multiply(mask, np.linalg.norm(pos - center, 2) < radius, mask)
-        return mask
-
-    return myfilter

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/6069eef1b1b4/
Changeset:   6069eef1b1b4
Branch:      yt-3.0
User:        mswarren
Date:        2014-05-25 06:38:05
Summary:     Periodic BC fixes for halo finding bboxes
Affected #:  1 file

diff -r f8cd907c73a7c1d0b17b78b861c5cdb47952d96d -r 6069eef1b1b4b28c6e9f0eeea6d97a89b9355938 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -616,7 +616,6 @@
         """
         ileft = np.floor((left - self.rmin) / self.domain_width *  self.domain_dims)
         iright = np.floor((right - self.rmin) / self.domain_width * self.domain_dims)
-        iright[iright <= ileft+1] += 1
 
         return self.get_ibbox(ileft, iright)
 
@@ -761,17 +760,19 @@
         xf, yf, zf = pos_fields
         print pos_fields
 
-        mask = np.zeros_like(data, dtype='bool')
         # I'm sorry.
         pos = mpcuq * np.array([data[xf].in_units('Mpc/h'), data[yf].in_units('Mpc/h'), data[zf].in_units('Mpc/h')]).T
 
-        # This hurts, but is useful for periodicity. Probably should check first
-        # if it is even needed for a given left/right
+        mask = np.zeros_like(data, dtype='bool')
         for i in range(3):
-            pos[:,i] = np.mod(pos[:, i] - left[i], DW[i]) + left[i]
+            mask = pos[:,i] >= DW[i] + left[i]
+            pos[mask, i] -= DW[i]
+            mask = pos[:,i] < right[i] - DW[i]
+            pos[mask, i] += DW[i]
 
         print left, right, pos.min(axis=0), pos.max(axis=0)
         # Now get all particles that are within the bbox
+        mask = np.zeros_like(data, dtype='bool')
         mask = np.all(pos >= left, axis=1) * np.all(pos < right, axis=1)
 
         mylog.debug("Filtering particles, returning %i out of %i" % (mask.sum(), mask.shape[0]))


https://bitbucket.org/yt_analysis/yt/commits/5c63e0252c4b/
Changeset:   5c63e0252c4b
Branch:      yt-3.0
User:        mswarren
Date:        2014-05-27 07:01:29
Summary:     Set global_particles to NULL when we are done to signal
R* not to write particles in halo .bin files.
Affected #:  1 file

diff -r 6069eef1b1b4b28c6e9f0eeea6d97a89b9355938 -r 5c63e0252c4b97f2356d4f6b92aa538bce8d4e41 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -371,4 +371,5 @@
                 # Now we reset
                 fof_obj.num_p = j = 0
         free(fof_obj.particles)
+        global_particles = NULL
         return pcounts


https://bitbucket.org/yt_analysis/yt/commits/5f39525ca7f6/
Changeset:   5f39525ca7f6
Branch:      yt-3.0
User:        mswarren
Date:        2014-05-28 01:33:53
Summary:     Read offset_center flag to determine if SDF origin is in lower corner.
Affected #:  1 file

diff -r 5c63e0252c4b97f2356d4f6b92aa538bce8d4e41 -r 5f39525ca7f6e0eb14a8d3dc2dd1496f6bdebc76 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -95,12 +95,17 @@
 
         if None in (self.domain_left_edge, self.domain_right_edge):
             R0 = self.parameters['R0']
-            self.domain_left_edge = np.array([
-              -self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
-            self.domain_right_edge = np.array([
-              +self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
-            self.domain_left_edge *= self.parameters.get("a", 1.0)
-            self.domain_right_edge *= self.parameters.get("a", 1.0)
+            if 'offset_center' in self.parameters and self.parameters['offset_center']:
+                self.domain_left_edge = np.array([0, 0, 0])
+                self.domain_right_edge = np.array([
+                 2.0 * self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+            else:
+                self.domain_left_edge = np.array([
+                    -self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+                self.domain_right_edge = np.array([
+                    +self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+                self.domain_left_edge *= self.parameters.get("a", 1.0)
+                self.domain_right_edge *= self.parameters.get("a", 1.0)
 
         nz = 1 << self.over_refine_factor
         self.domain_dimensions = np.ones(3, "int32") * nz


https://bitbucket.org/yt_analysis/yt/commits/7e3e76580556/
Changeset:   7e3e76580556
Branch:      yt-3.0
User:        mswarren
Date:        2014-05-28 01:38:42
Summary:     Fix indent.
Affected #:  1 file

diff -r 5f39525ca7f6e0eb14a8d3dc2dd1496f6bdebc76 -r 7e3e7658055671e785a35769e3c43785fbb9c619 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -104,8 +104,8 @@
                     -self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
                 self.domain_right_edge = np.array([
                     +self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
-                self.domain_left_edge *= self.parameters.get("a", 1.0)
-                self.domain_right_edge *= self.parameters.get("a", 1.0)
+            self.domain_left_edge *= self.parameters.get("a", 1.0)
+            self.domain_right_edge *= self.parameters.get("a", 1.0)
 
         nz = 1 << self.over_refine_factor
         self.domain_dimensions = np.ones(3, "int32") * nz


https://bitbucket.org/yt_analysis/yt/commits/6e94663adf86/
Changeset:   6e94663adf86
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-28 21:28:20
Summary:     Merging in fixes to periodicity, updates to r*
Affected #:  4 files

diff -r af71f6a207fc7667db9eaeec8062e56662edbd49 -r 6e94663adf8609e31cba565fd07152cf9fe8751c yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -371,4 +371,5 @@
                 # Now we reset
                 fof_obj.num_p = j = 0
         free(fof_obj.particles)
+        global_particles = NULL
         return pcounts

diff -r af71f6a207fc7667db9eaeec8062e56662edbd49 -r 6e94663adf8609e31cba565fd07152cf9fe8751c yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -109,10 +109,15 @@
 
         if None in (self.domain_left_edge, self.domain_right_edge):
             R0 = self.parameters['R0']
-            self.domain_left_edge = np.array([
-              -self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
-            self.domain_right_edge = np.array([
-              +self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+            if 'offset_center' in self.parameters and self.parameters['offset_center']:
+                self.domain_left_edge = np.array([0, 0, 0])
+                self.domain_right_edge = np.array([
+                 2.0 * self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+            else:
+                self.domain_left_edge = np.array([
+                    -self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+                self.domain_right_edge = np.array([
+                    +self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
             self.domain_left_edge *= self.parameters.get("a", 1.0)
             self.domain_right_edge *= self.parameters.get("a", 1.0)
 

diff -r af71f6a207fc7667db9eaeec8062e56662edbd49 -r 6e94663adf8609e31cba565fd07152cf9fe8751c yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -253,6 +253,3 @@
         fields = [("dark_matter", v) for v in self._handle.keys()]
         fields.append(("dark_matter", "mass"))
         return fields, {}
-
-
-

diff -r af71f6a207fc7667db9eaeec8062e56662edbd49 -r 6e94663adf8609e31cba565fd07152cf9fe8751c yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -556,7 +556,6 @@
         """
         ileft = np.floor((left - self.rmin) / self.domain_width *  self.domain_dims)
         iright = np.floor((right - self.rmin) / self.domain_width * self.domain_dims)
-        iright[iright <= ileft+1] += 1
 
         return self.get_ibbox(ileft, iright)
 
@@ -701,14 +700,16 @@
         xf, yf, zf = pos_fields
         print pos_fields
 
-        mask = np.zeros_like(data, dtype='bool')
         # I'm sorry.
         pos = mpcuq * np.array([data[xf].in_units('Mpc/h'), data[yf].in_units('Mpc/h'), data[zf].in_units('Mpc/h')]).T
 
         # This hurts, but is useful for periodicity. Probably should check first
         # if it is even needed for a given left/right
         for i in range(3):
-            pos[:,i] = np.mod(pos[:, i] - left[i], DW[i]) + left[i]
+            mask = pos[:,i] >= DW[i] + left[i]
+            pos[mask, i] -= DW[i]
+            mask = pos[:,i] < right[i] - DW[i]
+            pos[mask, i] += DW[i]
 
         print left, right, pos.min(axis=0), pos.max(axis=0)
         # Now get all particles that are within the bbox


https://bitbucket.org/yt_analysis/yt/commits/eb17bb935bf5/
Changeset:   eb17bb935bf5
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-30 23:32:46
Summary:     Allowing for http sdf, header, index files.
Affected #:  5 files

diff -r 6e94663adf8609e31cba565fd07152cf9fe8751c -r eb17bb935bf558e8a2c410cb0db818f2d6bccdd9 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -110,7 +110,7 @@
                 data[field][ub] /= weight_data[field][ub]
                 std_data[field][ub] /= weight_data[field][ub]
             self[field] = data[field]
-            #self["%s_std" % field] = np.sqrt(std_data[field])
+            self["%s_std" % field] = np.sqrt(std_data[field])
         self["UsedBins"] = used
 
         if fractional:
@@ -841,7 +841,7 @@
         if self.weight_field is not None:
             weight_data = chunk[self.weight_field]
         else:
-            weight_data = np.ones(chunk.ires.size, dtype="float64")
+            weight_data = np.ones(filter.size, dtype="float64")
         weight_data = weight_data[filter]
         # So that we can pass these into
         return arr, weight_data, bin_fields

diff -r 6e94663adf8609e31cba565fd07152cf9fe8751c -r eb17bb935bf558e8a2c410cb0db818f2d6bccdd9 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -67,7 +67,8 @@
                  sdf_header = None,
                  idx_filename = None,
                  idx_header = None,
-                 idx_level = 9):
+                 idx_level = 9,
+                 field_map = None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         if bounding_box is not None:
@@ -82,10 +83,15 @@
         self.idx_filename = idx_filename
         self.idx_header = idx_header
         self.idx_level = idx_level
+        if field_map is None:
+            field_map = {}
+        self._field_map = field_map
+        prefix = ''
         if self.idx_filename is not None:
-            dataset_type = 'sindex_sdf_particles'
+            prefix += 'sindex_'
         if 'http' in filename:
-            dataset_type = 'http_sdf_particles'
+            prefix += 'http_'
+        dataset_type = prefix + 'sdf_particles'
         super(SDFDataset, self).__init__(filename, dataset_type)
 
     def _parse_parameter_file(self):
@@ -148,18 +154,24 @@
     def sindex(self):
         if self._sindex is None:
             if self.idx_filename is not None:
-                indexdata = SDFRead(self.idx_filename,
-                                    header=self.idx_header)
-                self._sindex = SDFIndex(self.sdf_container, indexdata, level=self.idx_level)
+
+                if 'http' in self.idx_filename:
+                    indexdata = HTTPSDFRead(self.idx_filename,
+                                            header=self.idx_header)
+                else:
+                    indexdata = SDFRead(self.idx_filename,
+                                        header=self.idx_header)
+                self._sindex = SDFIndex(self.sdf_container, indexdata,
+                                        level=self.idx_level)
             else:
                 raise RuntimeError("SDF index0 file not supplied in load.")
         return self._sindex
 
     def _set_code_unit_attributes(self):
-        self.length_unit = self.quan(1.0, "kpc")
-        self.velocity_unit = self.quan(1.0, "kpc/Gyr")
-        self.time_unit = self.quan(1.0, "Gyr")
-        self.mass_unit = self.quan(1e10, "Msun")
+        self.length_unit = self.quan(1.0, self.parameters.get("length_unit", 'kpc'))
+        self.velocity_unit = self.quan(1.0, self.parameters.get("velocity_unit", 'kpc/Gyr'))
+        self.time_unit = self.quan(1.0, self.parameters.get("time_unit", 'Gyr'))
+        self.mass_unit = self.quan(1.0, self.parameters.get("mass_unit", 'Msun'))
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):

diff -r 6e94663adf8609e31cba565fd07152cf9fe8751c -r eb17bb935bf558e8a2c410cb0db818f2d6bccdd9 yt/frontends/sdf/fields.py
--- a/yt/frontends/sdf/fields.py
+++ b/yt/frontends/sdf/fields.py
@@ -35,13 +35,40 @@
 class SDFFieldInfo(FieldInfoContainer):
     known_other_fields = ()
 
-    known_particle_fields = (
-        ("mass", ("code_mass", ["particle_mass"], None)),
-        ("x", ("code_length", ["particle_position_x"], None)),
-        ("y", ("code_length", ["particle_position_y"], None)),
-        ("z", ("code_length", ["particle_position_z"], None)),
-        ("vx", ("code_velocity", ["particle_velocity_x"], None)),
-        ("vy", ("code_velocity", ["particle_velocity_y"], None)),
-        ("vz", ("code_velocity", ["particle_velocity_z"], None)),
-        ("ident", ("", ["particle_index"], None)),
-    )
+    known_particle_fields = ()
+    _mass_field = None
+
+    def __init__(self, pf, field_list):
+
+        if 'mass' in field_list:
+            self.known_particle_fields.append(("mass", "code_mass",
+                                               ["particle_mass"], None))
+        possible_masses = ['mass', 'm200b', 'mvir']
+        mnf = 'mass'
+        for mn in possible_masses:
+            if mn in pf.sdf_container.keys():
+                mnf = mn
+                self._mass_field=mn
+                break
+
+        idf = pf._field_map.get("particle_index", 'ident')
+        xf = pf._field_map.get("particle_position_x", 'x')
+        yf = pf._field_map.get("particle_position_y", 'y')
+        zf = pf._field_map.get("particle_position_z", 'z')
+        vxf = pf._field_map.get("particle_velocity_x", 'vx')
+        vyf = pf._field_map.get("particle_velocity_z", 'vy')
+        vzf = pf._field_map.get("particle_velocity_z", 'vz')
+
+        self.known_particle_fields = (
+            (idf, ('dimensionless', ['particle_index'], None)),
+            (xf,  ('code_length', ['particle_position_x'], None)),
+            (yf,  ('code_length', ['particle_position_y'], None)),
+            (zf,  ('code_length', ['particle_position_z'], None)),
+            (vxf, ('code_velocity', ['particle_velocity_x'], None)),
+            (vyf, ('code_velocity', ['particle_velocity_y'], None)),
+            (vzf, ('code_velocity', ['particle_velocity_z'], None)),
+            (mnf, ('code_mass', ['particle_mass'], None)),
+        )
+        super(SDFFieldInfo, self).__init__(pf, field_list)
+
+

diff -r 6e94663adf8609e31cba565fd07152cf9fe8751c -r eb17bb935bf558e8a2c410cb0db818f2d6bccdd9 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -103,9 +103,6 @@
             ind += CHUNKSIZE
         return morton
 
-    def _count_particles(self, data_file):
-        return {'dark_matter': self._handle['x'].http_array.shape}
-
     def _identify_fields(self, data_file):
         fields = [("dark_matter", v) for v in self._handle.keys()]
         fields.append(("dark_matter", "mass"))
@@ -148,13 +145,15 @@
                 if mask is None: continue
                 for field in field_list:
                     if field == "mass":
-                        if 'particle_mass' in self.parameters:
-                            data = np.ones(mask.sum(), dtype="float64")
-                            data *= self.pf.parameters["particle_mass"]
-                        elif 'm200b' in self._handle.keys():
-                            data = self._handle[field]['m200b'][mask]
+                        if self.pf.field_info._mass_field is None:
+                            pm = 1.0
+                            if 'particle_mass' in self.pf.parameters:
+                                pm = self.pf.parameters['particle_mass']
+                            else:
+                                raise RuntimeError
+                            data = pm * np.ones(mask.sum(), dtype="float64")
                         else:
-                            raise KeyError
+                            data = self._handle[self.pf.field_info._mass_field][mask]
                     else:
                         data = self._handle[field][mask]
                     yield (ptype, field), data
@@ -253,3 +252,8 @@
         fields = [("dark_matter", v) for v in self._handle.keys()]
         fields.append(("dark_matter", "mass"))
         return fields, {}
+
+
+class IOHandlerSIndexHTTPSDF(IOHandlerSIndexSDF):
+    _dataset_type = "sindex_http_sdf_particles"
+

diff -r 6e94663adf8609e31cba565fd07152cf9fe8751c -r eb17bb935bf558e8a2c410cb0db818f2d6bccdd9 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -4,6 +4,7 @@
 import numpy as np
 from httpmmap import HTTPArray
 from arbitrary_page import PageCacheURL
+from yt.funcs import mylog
 
 _types = {
     'int': 'int32',
@@ -144,14 +145,9 @@
             self.size = float(file_size) / self.itemsize
             assert(int(self.size) == self.size)
 
-    def build_redirect_func(self, key):
-        def redirect(sl):
-            return self.handle[sl][key]
-        return redirect
-
     def build_memmap(self):
         assert(self.size != -1)
-        print 'Building memmap with offset: %i' % self._offset 
+        print 'Building memmap with offset: %i' % self._offset
         self.handle = HTTPArray(self.filename, dtype=self.dtype,
                         shape=self.size, offset=self._offset)
         for k in self.dtype.names:
@@ -163,6 +159,7 @@
     """docstring for SDFRead"""
 
     _eof = 'SDF-EOH'
+    _data_struct = DataStruct
 
     def __init__(self, filename, header=None):
         self.filename = filename
@@ -240,7 +237,7 @@
             # handle this.
             num = '-1'
         num = int(num)
-        struct = DataStruct(str_types, num, self.filename)
+        struct = self._data_struct(str_types, num, self.filename)
         self.structs.append(struct)
         return
 
@@ -261,20 +258,9 @@
 
     """docstring for SDFRead"""
 
+    _data_struct = HTTPDataStruct
     _eof = 'SDF-EOH'
 
-    def __init__(self, filename, header=None):
-        self.filename = filename
-        if header is None:
-            header = filename
-        self.header = header
-        self.parameters = {}
-        self.structs = []
-        self.comments = []
-        self.parse_header()
-        self.set_offsets()
-        self.load_memmaps()
-
     def parse_header(self):
         """docstring for parse_header"""
         # Pre-process
@@ -292,29 +278,6 @@
             hoff = 0
         self.parameters['header_offset'] = hoff
 
-    def parse_struct(self, line, ascfile):
-        assert 'struct' in line
-
-        str_types = []
-        comments = []
-        str_lines = []
-        l = ascfile.readline()
-        while "}" not in l:
-            vtype, vnames = get_struct_vars(l)
-            for v in vnames:
-                str_types.append((v, vtype))
-            l = ascfile.readline()
-        num = l.strip("}[]")
-        num = num.strip("\;\\\n]")
-        if len(num) == 0:
-            # We need to compute the number of records.  The DataStruct will
-            # handle this.
-            num = '-1'
-        num = int(num)
-        struct = HTTPDataStruct(str_types, num, self.filename)
-        self.structs.append(struct)
-        return
-
 
 class SDFIndex(object):
 
@@ -490,7 +453,7 @@
         lengths = self.indexdata['len'][mask]
         return mask, offsets, lengths
 
-    def get_ibbox(self, ileft, iright, wandering_particles=True):
+    def get_ibbox(self, ileft, iright):
         """
         Given left and right indicies, return a mask and
         set of offsets+lengths into the sdf data.


https://bitbucket.org/yt_analysis/yt/commits/abee32fb6348/
Changeset:   abee32fb6348
Branch:      yt-3.0
User:        samskillman
Date:        2014-05-31 21:31:55
Summary:     Bugfix for L0 != R0, and add getitem to data struct
Affected #:  1 file

diff -r eb17bb935bf558e8a2c410cb0db818f2d6bccdd9 -r abee32fb6348ebc45386739123c3688b1c8ac832 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -114,6 +114,32 @@
         for k in self.dtype.names:
             self.data[k] = self.handle[k]
 
+    def __getitem__(self, key):
+        mask = None
+        kt = type(key)
+        if kt == int or kt == np.int64 or kt == np.int32 or kt == np.int:
+            if key == -1:
+                key = slice(-1, None)
+            else:
+                key = slice(key, key+1)
+        elif type(key) == np.ndarray:
+            mask = key
+            key = slice(None, None)
+        if not isinstance(key, slice):
+            raise NotImplementedError
+        if key.start is None:
+            key = slice(0, key.stop)
+        if key.stop is None:
+            key = slice(key.start, self.shape)
+        if key.start < 0:
+            key = slice(self.size + key.start, key.stop)
+        if key.stop < 0:
+            key = slice(key.start, self.size + key.stop)
+        arr = self.handle[key.start:key.stop]
+        if mask is None:
+            return arr
+        else:
+            return arr[mask]
 
 class RedirectArray(object):
     """docstring for RedirectArray"""
@@ -125,7 +151,6 @@
 
     def __getitem__(self, sl):
         if isinstance(sl, int):
-            sl = slice(sl, sl+1)
             return self.http_array[sl][self.key][0]
         return self.http_array[sl][self.key]
 
@@ -330,6 +355,10 @@
 
     def set_bounds(self):
         r_0 = self.sdfdata.parameters['R0']
+        try:
+            r_0 = self.sdfdata.parameters['L0']
+        except:
+            pass
         DW = 2.0 * r_0
 
         self.rmin = np.zeros(3)


https://bitbucket.org/yt_analysis/yt/commits/85f4e395c4b5/
Changeset:   85f4e395c4b5
Branch:      yt-3.0
User:        samskillman
Date:        2014-06-07 06:16:18
Summary:     Don't cache SDFDatasets
Affected #:  1 file

diff -r abee32fb6348ebc45386739123c3688b1c8ac832 -r 85f4e395c4b5dde3225775b3ee93c12271f32a51 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -60,6 +60,8 @@
     _particle_coordinates_name = None
     _particle_velocity_name = None
     _sindex = None
+    _skip_cache = True
+
 
     def __init__(self, filename, dataset_type = "sdf_particles",
                  n_ref = 64, over_refine_factor = 1,


https://bitbucket.org/yt_analysis/yt/commits/609a8cdf3a3f/
Changeset:   609a8cdf3a3f
Branch:      yt-3.0
User:        samskillman
Date:        2014-06-07 06:33:44
Summary:     Merging with tip
Affected #:  2 files

diff -r 85f4e395c4b5dde3225775b3ee93c12271f32a51 -r 609a8cdf3a3f82b58457a9f7a20c18d7caff20fe yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -548,6 +548,7 @@
         """
         ileft = np.floor((left - self.rmin) / self.domain_width *  self.domain_dims)
         iright = np.floor((right - self.rmin) / self.domain_width * self.domain_dims)
+        iright[iright <= ileft+1] += 1
 
         return self.get_ibbox(ileft, iright)
 


https://bitbucket.org/yt_analysis/yt/commits/ae29ecd9e8fc/
Changeset:   ae29ecd9e8fc
Branch:      yt-3.0
User:        samskillman
Date:        2014-06-09 19:11:15
Summary:     Few updates to the sindex rmin/rmax calculation, adding in some useful bits
for particle plotting.
Affected #:  3 files

diff -r 609a8cdf3a3f82b58457a9f7a20c18d7caff20fe -r ae29ecd9e8fc7f6499337a2d0f3b004d1e1c5a4a yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -147,7 +147,7 @@
             # not correct, but most codes can't handle Omega0_r
             self.omega_matter += self.parameters["Omega0_r"]
         self.hubble_constant = self.parameters["h_100"]
-        self.current_time = units_2HOT_v2_time * self.parameters["tpos"]
+        self.current_time = units_2HOT_v2_time * self.parameters.get("tpos", 0.0)
         mylog.info("Calculating time to be %0.3e seconds", self.current_time)
         self.filename_template = self.parameter_filename
         self.file_count = 1
@@ -173,7 +173,13 @@
         self.length_unit = self.quan(1.0, self.parameters.get("length_unit", 'kpc'))
         self.velocity_unit = self.quan(1.0, self.parameters.get("velocity_unit", 'kpc/Gyr'))
         self.time_unit = self.quan(1.0, self.parameters.get("time_unit", 'Gyr'))
-        self.mass_unit = self.quan(1.0, self.parameters.get("mass_unit", 'Msun'))
+        mass_unit = self.parameters.get("mass_unit", 'Msun')
+        if ' ' in mass_unit:
+            factor, unit = self.parameters.get("mass_unit", 'Msun').split(' ')
+        else:
+            factor = 1.0
+            unit = mass_unit
+        self.mass_unit = self.quan(float(factor), unit)
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):

diff -r 609a8cdf3a3f82b58457a9f7a20c18d7caff20fe -r ae29ecd9e8fc7f6499337a2d0f3b004d1e1c5a4a yt/utilities/lib/image_utilities.pyx
--- a/yt/utilities/lib/image_utilities.pyx
+++ b/yt/utilities/lib/image_utilities.pyx
@@ -37,3 +37,24 @@
     #        for k in range(3):
     #            v = buffer[i, j, k]
     #            buffer[i, j, k] = iclip(v, 0, 255)
+
+def add_rgba_points_to_image(
+        np.ndarray[np.float64_t, ndim=3] buffer,
+        np.ndarray[np.float64_t, ndim=1] px, 
+        np.ndarray[np.float64_t, ndim=1] py, 
+        np.ndarray[np.float64_t, ndim=2] rgba,
+        ):  
+    cdef int i, j, k, pi
+    cdef int npart = px.shape[0]
+    cdef int xs = buffer.shape[0]
+    cdef int ys = buffer.shape[1]
+    cdef int v
+    #iv = iclip(<int>(pv * 255), 0, 255)
+    for pi in range(npart):
+        j = <int> (xs * px[pi])
+        i = <int> (ys * py[pi])
+        if i < 0 or j < 0 or i >= xs or j >= ys: 
+            continue
+        for k in range(4):
+            buffer[i, j, k] += rgba[pi, k]
+    return

diff -r 609a8cdf3a3f82b58457a9f7a20c18d7caff20fe -r ae29ecd9e8fc7f6499337a2d0f3b004d1e1c5a4a yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -321,6 +321,11 @@
         self.sdfdata = sdfdata
         self.indexdata = indexdata
         self.level = level
+        idlevel = self.indexdata.parameters.get('level',None)
+        if idlevel and idlevel != level:
+            mylog.warning("Overriding index level to %i" % idlevel)
+            self.level = idlevel
+        
         self.rmin = None
         self.rmax = None
         self.domain_width = None
@@ -353,44 +358,88 @@
         }
         self.set_bounds()
 
+
+    def _fix_rexact(self, rmin, rmax):
+
+        center = 0.5*(rmax+rmin)
+        mysize = (rmax-rmin)
+        mysize *= (1.0 + 0.0*np.finfo(np.float32).eps)
+        self.rmin = center - 0.5*mysize
+        self.rmax = center + 0.5*mysize
+
     def set_bounds(self):
+        sorted_rtp = self.sdfdata.parameters.get("sorted_rtp", False)
+        sorted_xyz = self.sdfdata.parameters.get("sorted_xyz", False)
+        morton_xyz = self.sdfdata.parameters.get("morton_xyz", False)
+        
+        self.rmin = np.zeros(3)
+        self.rmax = np.zeros(3)
         r_0 = self.sdfdata.parameters['R0']
         try:
             r_0 = self.sdfdata.parameters['L0']
         except:
             pass
-        DW = 2.0 * r_0
 
-        self.rmin = np.zeros(3)
-        self.rmax = np.zeros(3)
-        sorted_rtp = self.sdfdata.parameters.get("sorted_rtp", False)
         if sorted_rtp:
-            self.rmin[:] = [0.0, 0.0, -np.pi]
-            self.rmax[:] = [r_0*1.01, 2*np.pi, np.pi]
+            mylog.debug("Setting up Sorted RTP data")
+            rtp_min = np.array([0.0, 0.0, -np.pi])
+            rtp_max = np.array([r_0*1.01, 2*np.pi, np.pi])
+            self._fix_rexact(rtp_min, rtp_max)
+            self.true_domain_left = self.rmin.copy()
+            self.true_domain_right = self.rmax.copy()
+            self.true_domain_width = self.rmax - self.rmin
+        elif sorted_xyz:
+            mylog.debug("Setting up Sorted XYZ data")
+            offset_center = self.sdfdata.parameters.get("offset_center", False)
+            if offset_center:
+                rmin = np.zeros(3)
+                rmax = np.array([2.0*r_0]*3)
+            else:
+                rmin = -1.01*np.array([r_0]*3)
+                rmax = 1.01*np.array([r_0]*3)
+            self._fix_rexact(rmin, rmax)
+            self.true_domain_left = self.rmin.copy()
+            self.true_domain_right = self.rmax.copy()
+            self.true_domain_width = self.rmax - self.rmin
         else:
-            self.rmin[0] -= self.sdfdata.parameters.get('Rx', 0.0)
-            self.rmin[1] -= self.sdfdata.parameters.get('Ry', 0.0)
-            self.rmin[2] -= self.sdfdata.parameters.get('Rz', 0.0)
-            self.rmax[0] += self.sdfdata.parameters.get('Rx', r_0)
-            self.rmax[1] += self.sdfdata.parameters.get('Ry', r_0)
-            self.rmax[2] += self.sdfdata.parameters.get('Rz', r_0)
+            mylog.debug("Setting up regular data")
+            rx = self.sdfdata.parameters.get('Rx')
+            ry = self.sdfdata.parameters.get('Ry')
+            rz = self.sdfdata.parameters.get('Rz')
+            a =  self.sdfdata.parameters.get("a", 1.0)
+            r = np.array([rx, ry, rz])
+            rmin = -a*r
+            rmax = a*r
+            print rmin, rmax
 
-        self.rmin *= self.sdfdata.parameters.get("a", 1.0)
-        self.rmax *= self.sdfdata.parameters.get("a", 1.0)
+            #/* expand root for non-power-of-two */
+            expand_root = 0.0
+            ic_Nmesh = self.sdfdata.parameters.get('ic_Nmesh',0)
+            if ic_Nmesh != 0:
+                f2 = 1<<int(np.log2(ic_Nmesh-1)+1)
+                if (f2 != ic_Nmesh):
+                    expand_root = 1.0*f2/ic_Nmesh - 1.0;
+                mylog.debug("Expanding: %s, %s, %s" % (f2, ic_Nmesh, expand_root))
+            #self._fix_rexact(rmin, rmax)
+            self.true_domain_left = rmin.copy()
+            self.true_domain_right = rmax.copy()
+            self.true_domain_width = rmax - rmin
+            rmin *= (1.0 + expand_root)
+            rmax *= (1.0 + expand_root)
+            self._fix_rexact(rmin, rmax)
+        print self.rmin, self.rmax
 
-        #/* expand root for non-power-of-two */
-        expand_root = 0.0
-        ic_Nmesh = self.sdfdata.parameters.get('ic_Nmesh',0)
-        if ic_Nmesh != 0:
-            f2 = 1<<int(np.log2(ic_Nmesh-1)+1)
-            if (f2 != ic_Nmesh):
-                expand_root = 1.0*f2/ic_Nmesh - 1.0;
-            mylog.debug("Expanding: %s, %s, %s" % (f2, ic_Nmesh, expand_root))
-        self.true_domain_left = self.rmin.copy()
-        self.true_domain_right = self.rmax.copy()
-        self.true_domain_width = self.rmax - self.rmin
-        self.rmin *= 1.0 + expand_root
-        self.rmax *= 1.0 + expand_root
+        if self.indexdata.parameters.get("midx_version", 0) == 1.0:
+            rmin = np.zeros(3)
+            rmax = np.zeros(3)
+            rmin[0] = self.indexdata.parameters['x_min']
+            rmin[1] = self.indexdata.parameters['y_min']
+            rmin[2] = self.indexdata.parameters['z_min']
+            rmax[0] = self.indexdata.parameters['x_max']
+            rmax[1] = self.indexdata.parameters['y_max']
+            rmax[2] = self.indexdata.parameters['z_max']
+            self._fix_rexact(rmin, rmax)
+
         self.domain_width = self.rmax - self.rmin
         self.domain_dims = 1 << self.level
         self.domain_buffer = (self.domain_dims - int(self.domain_dims/(1.0 + expand_root)))/2
@@ -577,7 +626,7 @@
         if stop is None:
             stop = self.indexdata['index'][-1]
         while key < stop:
-            if self.indexdata['index'][key] == 0:
+            if self.indexdata['len'][key] == 0:
                 #print 'Squeezing keys, incrementing'
                 key += 1
             else:
@@ -591,7 +640,7 @@
             stop = self.indexdata['index'][0]
         while key > stop:
             #self.indexdata['index'][-1]:
-            if self.indexdata['index'][key] == 0:
+            if self.indexdata['len'][key] == 0:
                 #print 'Squeezing keys, decrementing'
                 key -= 1
             else:
@@ -649,31 +698,47 @@
         by left/right. Account for periodicity of data, allowing left/right
         to be outside of the domain.
         """
+
         for data in myiter:
-            mask = np.zeros_like(data, dtype='bool')
+            #mask = np.zeros_like(data, dtype='bool')
             pos = np.array([data['x'].copy(), data['y'].copy(), data['z'].copy()]).T
 
-
+            DW = self.true_domain_width
             # This hurts, but is useful for periodicity. Probably should check first
             # if it is even needed for a given left/right
             for i in range(3):
-                pos[:,i] = np.mod(pos[:,i] - left[i], self.true_domain_width[i]) + left[i]
+                #pos[:,i] = np.mod(pos[:,i] - left[i],
+                #                  self.true_domain_width[i]) + left[i]
+                mask = pos[:,i] >= left[i] + DW[i]
+                pos[mask, i] -= DW[i]
+                mask = pos[:,i] < right[i] - DW[i] 
+                pos[mask, i] += DW[i]
+                #del mask
 
             # Now get all particles that are within the bbox
             mask = np.all(pos >= left, axis=1) * np.all(pos < right, axis=1)
+            #print 'Mask shape, sum:', mask.shape, mask.sum()
 
             mylog.debug("Filtering particles, returning %i out of %i" % (mask.sum(), mask.shape[0]))
 
             if not np.any(mask):
                 continue
 
+            #filtered = {}
+            #for i,ax in enumerate('xyz'):
+            #    print "Setting field %s" % ax
+            #    filtered[ax] = pos[:, i][mask]
+
             filtered = {ax: pos[:, i][mask] for i, ax in enumerate('xyz')}
+            #print filtered.keys(), data.keys()
             for f in data.keys():
-                if f in 'xyz': continue
+                if f in 'xyz': 
+                    continue
+                #print "Setting field %s" % f
                 filtered[f] = data[f][mask]
 
             #for i, ax in enumerate('xyz'):
-            #    print left, right
+            #    #print left, right
             #    assert np.all(filtered[ax] >= left[i])
             #    assert np.all(filtered[ax] < right[i])
 
@@ -723,13 +788,21 @@
     def iter_bbox_data(self, left, right, fields):
         mylog.debug('SINDEX Loading region from %s to %s' %(left, right))
         inds = self.get_bbox(left, right)
+        # Need to put left/right in float32 to avoid fp roundoff errors
+        # in the bbox later.
+        left = left.astype('float32')
+        right = right.astype('float32')
 
-        my_filter = bbox_filter(left, right, self.true_domain_width)
-
-        for dd in self.filter_particles(
-            self.iter_data(inds, fields),
-            my_filter):
+        #my_filter = bbox_filter(left, right, self.true_domain_width)
+        data = []
+        for dd in self.filter_bbox(
+            left, right,
+            self.iter_data(inds, fields)):
             yield dd
+        #for dd in self.filter_particles(
+        #    self.iter_data(inds, fields),
+        #    my_filter):
+        #    yield dd
 
     def iter_sphere_data(self, center, radius, fields):
         mylog.debug('SINDEX Loading spherical region %s to %s' %(center, radius))
@@ -833,6 +906,7 @@
         """
         cell_iarr = np.array(cell_iarr)
         lk, rk =self.get_key_bounds(level, cell_iarr)
+        print 'Reading from ', lk, rk
         return self.get_contiguous_chunk(lk, rk, fields)
 
     def get_cell_bbox(self, level, cell_iarr):


https://bitbucket.org/yt_analysis/yt/commits/f1c37e84387a/
Changeset:   f1c37e84387a
Branch:      yt-3.0
User:        samskillman
Date:        2014-06-11 02:46:52
Summary:     Have to force bbox in the yt bounded box frontend to be float32 at the moment.
Otherwise roundoff in filter_bbox doesn't mask out all the particles depending
on the particular precision, and a runtime error in the yt octtree
contstruction is thrown.  Also try to match rmin/rmax logic with midx code.
Affected #:  2 files

diff -r ae29ecd9e8fc7f6499337a2d0f3b004d1e1c5a4a -r f1c37e84387a54338c14dc395a2f41cc8e15b76f yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -74,7 +74,7 @@
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         if bounding_box is not None:
-            bbox = np.array(bounding_box, dtype="float64")
+            bbox = np.array(bounding_box, dtype="float32")
             if bbox.shape == (2, 3):
                 bbox = bbox.transpose()
             self.domain_left_edge = bbox[:,0]

diff -r ae29ecd9e8fc7f6499337a2d0f3b004d1e1c5a4a -r f1c37e84387a54338c14dc395a2f41cc8e15b76f yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -363,7 +363,7 @@
 
         center = 0.5*(rmax+rmin)
         mysize = (rmax-rmin)
-        mysize *= (1.0 + 0.0*np.finfo(np.float32).eps)
+        mysize *= (1.0 + 4.0*np.finfo(np.float32).eps)
         self.rmin = center - 0.5*mysize
         self.rmax = center + 0.5*mysize
 


https://bitbucket.org/yt_analysis/yt/commits/9706ac0647fc/
Changeset:   9706ac0647fc
Branch:      yt-3.0
User:        samskillman
Date:        2014-06-11 20:44:41
Summary:     Fixing up thingking imports
Affected #:  1 file

diff -r f1c37e84387a54338c14dc395a2f41cc8e15b76f -r 9706ac0647fcb7d358e4f96f925e2f421d350df5 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -2,8 +2,8 @@
 import re
 import os
 import numpy as np
-from httpmmap import HTTPArray
-from arbitrary_page import PageCacheURL
+from thingking.httpmmap import HTTPArray
+from thingking.arbitrary_page import PageCacheURL
 from yt.funcs import mylog
 
 _types = {
@@ -724,17 +724,10 @@
             if not np.any(mask):
                 continue
 
-            #filtered = {}
-            #for i,ax in enumerate('xyz'):
-            #    print "Setting field %s" % ax
-            #    filtered[ax] = pos[:, i][mask]
-
             filtered = {ax: pos[:, i][mask] for i, ax in enumerate('xyz')}
-            #print filtered.keys(), data.keys()
             for f in data.keys():
                 if f in 'xyz': 
                     continue
-                #print "Setting field %s" % f
                 filtered[f] = data[f][mask]
 
             #for i, ax in enumerate('xyz'):


https://bitbucket.org/yt_analysis/yt/commits/0ebc43bbe2cc/
Changeset:   0ebc43bbe2cc
Branch:      yt-3.0
User:        samskillman
Date:        2014-06-11 21:55:56
Summary:     New midx fills out the indexdata with 0's, so need a different way to find the max key.
Affected #:  1 file

diff -r 9706ac0647fcb7d358e4f96f925e2f421d350df5 -r 0ebc43bbe2ccb0a5ac603947a41c61cf5ca0522f yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -325,7 +325,7 @@
         if idlevel and idlevel != level:
             mylog.warning("Overriding index level to %i" % idlevel)
             self.level = idlevel
-        
+
         self.rmin = None
         self.rmax = None
         self.domain_width = None
@@ -371,7 +371,7 @@
         sorted_rtp = self.sdfdata.parameters.get("sorted_rtp", False)
         sorted_xyz = self.sdfdata.parameters.get("sorted_xyz", False)
         morton_xyz = self.sdfdata.parameters.get("morton_xyz", False)
-        
+
         self.rmin = np.zeros(3)
         self.rmax = np.zeros(3)
         r_0 = self.sdfdata.parameters['R0']
@@ -429,7 +429,8 @@
             self._fix_rexact(rmin, rmax)
         print self.rmin, self.rmax
 
-        if self.indexdata.parameters.get("midx_version", 0) == 1.0:
+        self._midx_version = self.indexdata.parameters.get("midx_version", 0)
+        if self._midx_version == 1.0:
             rmin = np.zeros(3)
             rmax = np.zeros(3)
             rmin[0] = self.indexdata.parameters['x_min']
@@ -819,7 +820,10 @@
 
         lbase=0
         llen = 0
-        max_key = self.indexdata['index'][-1]
+        if self._midx_version >= 1.0:
+            max_key = self.get_key(np.array([2**self.level - 1]*3))
+        else:
+            max_key = self.indexdata['index'][-1]
         if left_key > max_key:
             raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (left_key, max_key))
         right_key = min(right_key, max_key)


https://bitbucket.org/yt_analysis/yt/commits/cabc874792f3/
Changeset:   cabc874792f3
Branch:      yt-3.0
User:        samskillman
Date:        2014-06-11 23:24:43
Summary:     Fixing more max key issues.
Affected #:  1 file

diff -r 0ebc43bbe2ccb0a5ac603947a41c61cf5ca0522f -r cabc874792f34d147df9f12d13a1c9a7b7f07491 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -357,6 +357,11 @@
             0 : slice(2, None, 3),
         }
         self.set_bounds()
+        if self._midx_version >= 1.0:
+            max_key = self.get_key(np.array([2**self.level - 1]*3))
+        else:
+            max_key = self.indexdata['index'][-1]
+        self._max_key = max_key
 
 
     def _fix_rexact(self, rmin, rmax):
@@ -562,7 +567,7 @@
             dmask += Y >= self.domain_dims
             dmask += Z >= self.domain_dims
             dinds = self.get_keyv([X[dmask], Y[dmask], Z[dmask]])
-            dinds = dinds[dinds < self.indexdata['index'][-1]]
+            dinds = dinds[dinds < self._max_key]
             dinds = dinds[self.indexdata['len'][dinds] > 0]
             #print 'Getting boundary layers for wanderers, cells: %i' % dinds.size
 
@@ -580,7 +585,7 @@
         # Only mask out if we are actually getting data rather than getting indices into
         # a space.
         if self.valid_indexdata:
-            indices = indices[indices < self.indexdata['index'][-1]]
+            indices = indices[indices < self._max_key]
             indices = indices[self.indexdata['len'][indices] > 0]
 
         #indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
@@ -598,7 +603,7 @@
         """
         ileft = np.floor((left - self.rmin) / self.domain_width *  self.domain_dims)
         iright = np.floor((right - self.rmin) / self.domain_width * self.domain_dims)
-        iright[iright <= ileft+1] += 1
+        #iright[iright <= ileft+1] += 1
 
         return self.get_ibbox(ileft, iright)
 
@@ -625,7 +630,7 @@
         # These next two while loops are to squeeze the keys if they are empty. Would be better
         # to go through and set base equal to the last non-zero base, i think.
         if stop is None:
-            stop = self.indexdata['index'][-1]
+            stop = self._max_key
         while key < stop:
             if self.indexdata['len'][key] == 0:
                 #print 'Squeezing keys, incrementing'
@@ -640,7 +645,6 @@
         if stop is None:
             stop = self.indexdata['index'][0]
         while key > stop:
-            #self.indexdata['index'][-1]:
             if self.indexdata['len'][key] == 0:
                 #print 'Squeezing keys, decrementing'
                 key -= 1
@@ -784,8 +788,8 @@
         inds = self.get_bbox(left, right)
         # Need to put left/right in float32 to avoid fp roundoff errors
         # in the bbox later.
-        left = left.astype('float32')
-        right = right.astype('float32')
+        #left = left.astype('float32')
+        #right = right.astype('float32')
 
         #my_filter = bbox_filter(left, right, self.true_domain_width)
         data = []
@@ -820,13 +824,10 @@
 
         lbase=0
         llen = 0
-        if self._midx_version >= 1.0:
-            max_key = self.get_key(np.array([2**self.level - 1]*3))
-        else:
-            max_key = self.indexdata['index'][-1]
-        if left_key > max_key:
-            raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (left_key, max_key))
-        right_key = min(right_key, max_key)
+        if left_key > self._max_key:
+            raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % \
+                               (left_key, self._max_key))
+        right_key = min(right_key, self._max_key)
 
         left_key = self.get_next_nonzero_chunk(left_key)
         right_key = self.get_previous_nonzero_chunk(right_key, left_key)
@@ -843,9 +844,8 @@
         return self.get_data(slice(lbase, lbase + length), fields)
 
     def get_key_data(self, key, fields):
-        max_key = self.indexdata['index'][-1]
-        if key > max_key:
-            raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (key, max_key))
+        if key > self._max_key:
+            raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (key, self._max_key))
         base = self.indexdata['base'][key]
         length = self.indexdata['len'][key] - base
         if length > 0:


https://bitbucket.org/yt_analysis/yt/commits/1f21e03f2bae/
Changeset:   1f21e03f2bae
Branch:      yt-3.0
User:        mswarren
Date:        2014-06-12 01:16:21
Summary:     Put back padding to maintain compatibility with native binary files
Affected #:  2 files

diff -r cabc874792f34d147df9f12d13a1c9a7b7f07491 -r 1f21e03f2bae4e413ea626bcf12f2b8395b4a916 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -32,7 +32,7 @@
         float pos[6]
         float corevel[3]
         float bulkvel[3]
-        float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
+        float m, r, child_r, vmax_r, mgrav, vmax, rvmax, rs, klypin_rs, vrms
         float J[3]
         float energy, spin
         float alt_m[4]
@@ -42,9 +42,9 @@
         float A2[3]
         float bullock_spin, kin_to_pot, m_pe_b, m_pe_d
         np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
-        float min_pos_err, min_vel_err, min_bulkvel_err
+        float min_pos_err, min_vel_err, min_bulkvel_err, _pad
 
-ctypedef packed struct haloflat:
+ctypedef struct haloflat:
     np.int64_t id
     float pos_x, pos_y, pos_z, vel_x, vel_y, vel_z
     float corevel_x, corevel_y, corevel_z
@@ -58,7 +58,7 @@
     float b_to_a2, c_to_a2, A2x, A2y, A2z
     float bullock_spin, kin_to_pot, m_pe_b, m_pe_d
     np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
-    float min_pos_err, min_vel_err, min_bulkvel_err
+    float min_pos_err, min_vel_err, min_bulkvel_err, _pad
 
 # For finding sub halos import finder function and global variable
 # rockstar uses to store the results

diff -r cabc874792f34d147df9f12d13a1c9a7b7f07491 -r 1f21e03f2bae4e413ea626bcf12f2b8395b4a916 yt/frontends/halo_catalogs/rockstar/definitions.py
--- a/yt/frontends/halo_catalogs/rockstar/definitions.py
+++ b/yt/frontends/halo_catalogs/rockstar/definitions.py
@@ -93,6 +93,7 @@
     ('min_pos_err', np.float32),
     ('min_vel_err', np.float32),
     ('min_bulkvel_err', np.float32),
+    ('padding2', np.float32),
 ])
 
 particle_dt = np.dtype([


https://bitbucket.org/yt_analysis/yt/commits/5bb97d018ef6/
Changeset:   5bb97d018ef6
Branch:      yt-3.0
User:        mswarren
Date:        2014-06-12 01:28:05
Summary:     Get index level from midx file unless specified.
Make wandering_particles default to False
Affected #:  2 files

diff -r 1f21e03f2bae4e413ea626bcf12f2b8395b4a916 -r 5bb97d018ef643df946e7a531473a8bd67e1fa37 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -69,7 +69,7 @@
                  sdf_header = None,
                  idx_filename = None,
                  idx_header = None,
-                 idx_level = 9,
+                 idx_level = None,
                  field_map = None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor

diff -r 1f21e03f2bae4e413ea626bcf12f2b8395b4a916 -r 5bb97d018ef643df946e7a531473a8bd67e1fa37 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -316,15 +316,13 @@
         iter_bbox_data(left, right, fields)
 
     """
-    def __init__(self, sdfdata, indexdata, level=9):
+    def __init__(self, sdfdata, indexdata, level=None):
         super(SDFIndex, self).__init__()
         self.sdfdata = sdfdata
         self.indexdata = indexdata
+        if level is None:
+            level = self.indexdata.parameters.get('level',None)
         self.level = level
-        idlevel = self.indexdata.parameters.get('level',None)
-        if idlevel and idlevel != level:
-            mylog.warning("Overriding index level to %i" % idlevel)
-            self.level = idlevel
 
         self.rmin = None
         self.rmax = None
@@ -332,7 +330,7 @@
         self.domain_buffer = 0
         self.domain_dims = 0
         self.domain_active_dims = 0
-        self.wandering_particles = True
+        self.wandering_particles = False
         self.valid_indexdata = True
         self.masks = {
             "p" : int("011"*level, 2),


https://bitbucket.org/yt_analysis/yt/commits/9f46f6288088/
Changeset:   9f46f6288088
Branch:      yt-3.0
User:        samskillman
Date:        2014-06-12 02:47:50
Summary:     Implementing iter_padded_bbox_data, which is now simply wrapped by get_padded_bbox_data. Few whitespace auto-fixes.
Affected #:  1 file

diff -r 1f21e03f2bae4e413ea626bcf12f2b8395b4a916 -r 9f46f628808852c1f7e6f116ab868996961bcbd1 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -716,7 +716,7 @@
                 #                  self.true_domain_width[i]) + left[i]
                 mask = pos[:,i] >= left[i] + DW[i]
                 pos[mask, i] -= DW[i]
-                mask = pos[:,i] < right[i] - DW[i] 
+                mask = pos[:,i] < right[i] - DW[i]
                 pos[mask, i] += DW[i]
                 #del mask
 
@@ -731,7 +731,7 @@
 
             filtered = {ax: pos[:, i][mask] for i, ax in enumerate('xyz')}
             for f in data.keys():
-                if f in 'xyz': 
+                if f in 'xyz':
                     continue
                 filtered[f] = data[f][mask]
 
@@ -921,25 +921,34 @@
         assert bbox.shape == (3, 2)
         return bbox
 
-    def get_padded_bbox_data(self, level, cell_iarr, pad, fields):
-        """Get floating point bounding box for a given sindex cell
 
-        Returns:
-            bbox: array-like, shape (3,2)
+    def iter_padded_bbox_data(self, level, cell_iarr, pad, fields):
+        """
+        Yields data chunks for a cell on the given level
+        plus a padding around the cell, for a list of fields.
+
+        Yields:
+            dd: A dictionaries of data.
+
+        Example:
+
+        for chunk in sindex.iter_padded_bbox_data(
+            6, np.array([128]*3), 8.0, ['x','y','z','ident']):
+
+            print chunk['x'].max()
 
         """
+
         bbox = self.get_cell_bbox(level, cell_iarr)
         filter_left = bbox[:, 0] - pad
         filter_right = bbox[:, 1] + pad
 
-        data = []
+        # Center cell
         for dd in self.filter_bbox(
             filter_left, filter_right,
             [self.get_cell_data(level, cell_iarr, fields)]):
-            data.append(dd)
-        #for dd in self.iter_bbox_data(bbox[:,0], bbox[:,1], fields):
-        #    data.append(dd)
-        #assert data[0]['x'].shape[0] > 0
+            yield dd
+            del dd
 
         # Bottom & Top
         pbox = bbox.copy()
@@ -952,13 +961,16 @@
         for dd in self.filter_bbox(
             filter_left, filter_right,
             self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
-            data.append(dd)
+            yield dd
+            del dd
+
         pbox[2, 0] = bbox[2, 1]
         pbox[2, 1] = pbox[2, 0] + pad[2]
         for dd in self.filter_bbox(
             filter_left, filter_right,
             self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
-            data.append(dd)
+            yield dd
+            del dd
 
         # Front & Back
         pbox = bbox.copy()
@@ -969,13 +981,16 @@
         for dd in self.filter_bbox(
             filter_left, filter_right,
             self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
-            data.append(dd)
+            yield dd
+            del dd
+
         pbox[1, 0] = bbox[1, 1]
         pbox[1, 1] = pbox[1, 0] + pad[1]
         for dd in self.filter_bbox(
             filter_left, filter_right,
             self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
-            data.append(dd)
+            yield dd
+            del dd
 
         # Left & Right
         pbox = bbox.copy()
@@ -984,14 +999,36 @@
         for dd in self.filter_bbox(
             filter_left, filter_right,
             self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
-            data.append(dd)
+            yield dd
+            del dd
+
         pbox[0, 0] = bbox[0, 1]
         pbox[0, 1] = pbox[0, 0] + pad[0]
         for dd in self.filter_bbox(
             filter_left, filter_right,
             self.iter_bbox_data(pbox[:,0], pbox[:,1], fields)):
+            yield dd
+            del dd
+
+    def get_padded_bbox_data(self, level, cell_iarr, pad, fields):
+        """
+        Return list of data chunks for a cell on the given level
+        plus a padding around the cell, for a list of fields.
+
+        Returns:
+            data: A list of dictionaries of data.
+
+        chunks = sindex.get_padded_bbox_data(6, np.array([128]*3),
+                                             8.0, ['x','y','z','ident'])
+
+        """
+        bbox = self.get_cell_bbox(level, cell_iarr)
+        filter_left = bbox[:, 0] - pad
+        filter_right = bbox[:, 1] + pad
+
+        data = []
+        for dd in self.iter_padded_bbox_data(self, level, cell_iarr, pad, fields):
             data.append(dd)
-
         return data
 
     def get_cell_width(self, level):


https://bitbucket.org/yt_analysis/yt/commits/50c5d1417251/
Changeset:   50c5d1417251
Branch:      yt-3.0
User:        samskillman
Date:        2014-06-12 02:48:40
Summary:     Merging with level fixes, wandering particles
Affected #:  2 files

diff -r 9f46f628808852c1f7e6f116ab868996961bcbd1 -r 50c5d14172514dabfffd32bea698048ebee1f7f4 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -69,7 +69,7 @@
                  sdf_header = None,
                  idx_filename = None,
                  idx_header = None,
-                 idx_level = 9,
+                 idx_level = None,
                  field_map = None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor

diff -r 9f46f628808852c1f7e6f116ab868996961bcbd1 -r 50c5d14172514dabfffd32bea698048ebee1f7f4 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -316,15 +316,13 @@
         iter_bbox_data(left, right, fields)
 
     """
-    def __init__(self, sdfdata, indexdata, level=9):
+    def __init__(self, sdfdata, indexdata, level=None):
         super(SDFIndex, self).__init__()
         self.sdfdata = sdfdata
         self.indexdata = indexdata
+        if level is None:
+            level = self.indexdata.parameters.get('level',None)
         self.level = level
-        idlevel = self.indexdata.parameters.get('level',None)
-        if idlevel and idlevel != level:
-            mylog.warning("Overriding index level to %i" % idlevel)
-            self.level = idlevel
 
         self.rmin = None
         self.rmax = None
@@ -332,7 +330,7 @@
         self.domain_buffer = 0
         self.domain_dims = 0
         self.domain_active_dims = 0
-        self.wandering_particles = True
+        self.wandering_particles = False
         self.valid_indexdata = True
         self.masks = {
             "p" : int("011"*level, 2),


https://bitbucket.org/yt_analysis/yt/commits/35b3c5eeb96b/
Changeset:   35b3c5eeb96b
Branch:      yt-3.0
User:        mswarren
Date:        2014-06-12 04:17:47
Summary:     Add routine to calculate strict spherical overdensity masses.
Affected #:  1 file

diff -r 50c5d14172514dabfffd32bea698048ebee1f7f4 -r 35b3c5eeb96b0d54996bf8a79c35b1004b02e2c8 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -269,6 +269,34 @@
                                       particle_thresh_dens[4]],
                                      dtype=np.float64)
         return d
+
+    def assign_masses(self, h, np.ndarray[np.float32_t, ndim=1] r, float force_res, \
+                      double pmass, np.ndarray[np.float64_t, ndim=1] dens_thresh):
+        """Assign spherical overdensity masses to halos.  r must be sorted"""
+        cdef double total_mass = 0.0
+        cdef double m = 0.0
+        cdef double alt_m1 = 0.0
+        cdef double alt_m2 = 0.0
+        cdef double alt_m3 = 0.0
+        cdef double alt_m4 = 0.0
+        cdef double rr
+        cdef double cur_dens
+        for rr in r:
+            if rr < force_res: rr = force_res
+            total_mass += pmass
+            cur_dens = total_mass/(rr*rr*rr)
+            if cur_dens > dens_thresh[0]: m = total_mass
+            if cur_dens > dens_thresh[1]: alt_m1 = total_mass
+            if cur_dens > dens_thresh[2]: alt_m2 = total_mass
+            if cur_dens > dens_thresh[3]: alt_m3 = total_mass
+            if cur_dens > dens_thresh[4]: alt_m4 = total_mass
+            if cur_dens <= dens_thresh[1]:
+                h['m'] = m
+                h['alt_m1'] = alt_m1
+                h['alt_m2'] = alt_m2
+                h['alt_m3'] = alt_m3
+                h['alt_m4'] = alt_m4
+                return
         
     def max_halo_radius(self, int i):
         return max_halo_radius(&halos[i])


https://bitbucket.org/yt_analysis/yt/commits/07b28c995b9c/
Changeset:   07b28c995b9c
Branch:      yt-3.0
User:        mswarren
Date:        2014-06-12 04:24:37
Summary:     Turn off progress messages for halo finding.
stderr is line buffered, and parallel jobs can thrash the file system.
Affected #:  2 files

diff -r 35b3c5eeb96b0d54996bf8a79c35b1004b02e2c8 -r 07b28c995b9c27905df3f99fc67ca352fcec9c54 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -330,6 +330,7 @@
                                 np.ndarray[anyfloat, ndim=2] pos,
                                 np.ndarray[anyfloat, ndim=2] vel):
 
+        verbose = False
         # Define fof object
 
         # Find number of particles
@@ -388,7 +389,7 @@
                 pcounts[ndone] = fof_obj.num_p
                 counter += 1
                 ndone += 1
-                if counter == frac:
+                if verbose and counter == frac:
                     print >> sys.stderr, "R*-ing % 5.1f%% done (%0.3f -> %0.3f)" % (
                         (100.0 * ndone)/pcounts.size,
                         fof_obj.particles[0].pos[2],

diff -r 35b3c5eeb96b0d54996bf8a79c35b1004b02e2c8 -r 07b28c995b9c27905df3f99fc67ca352fcec9c54 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -674,6 +674,7 @@
         cdef np.int64_t moff = octree.get_domain_offset(domain_id + domain_offset)
         cdef np.int64_t i, j, k, n, nneighbors, pind0, offset
         cdef int counter = 0
+        verbose = False
         pcount = np.zeros_like(dom_ind)
         doff = np.zeros_like(dom_ind) - 1
         # First, we find the oct for each particle.
@@ -711,10 +712,10 @@
         cdef np.int64_t *nind = <np.int64_t *> malloc(sizeof(np.int64_t)*nsize)
         counter = 0
         cdef np.int64_t frac = <np.int64_t> (doff.shape[0] / 20.0)
-        print >> sys.stderr, "Will be outputting every", frac
         cdef int inside, skip_early
+        if verbose: print >> sys.stderr, "Will be outputting every", frac
         for i in range(doff.shape[0]):
-            if counter >= frac:
+            if verbose and counter >= frac:
                 counter = 0
                 print >> sys.stderr, "FOF-ing % 5.1f%% done" % ((100.0 * i)/doff.size)
             counter += 1


https://bitbucket.org/yt_analysis/yt/commits/2a589a6679e3/
Changeset:   2a589a6679e3
Branch:      yt-3.0
User:        mswarren
Date:        2014-06-12 15:50:26
Summary:     Use align=True instead of explicit padding.
Affected #:  1 file

diff -r 07b28c995b9c27905df3f99fc67ca352fcec9c54 -r 2a589a6679e3c986473a13ae67ffdb383b475867 yt/frontends/halo_catalogs/rockstar/definitions.py
--- a/yt/frontends/halo_catalogs/rockstar/definitions.py
+++ b/yt/frontends/halo_catalogs/rockstar/definitions.py
@@ -93,8 +93,7 @@
     ('min_pos_err', np.float32),
     ('min_vel_err', np.float32),
     ('min_bulkvel_err', np.float32),
-    ('padding2', np.float32),
-])
+], align=True)
 
 particle_dt = np.dtype([
     ('particle_identifier', np.int64),


https://bitbucket.org/yt_analysis/yt/commits/02f0c8c17e5a/
Changeset:   02f0c8c17e5a
Branch:      yt-3.0
User:        mswarren
Date:        2014-06-12 18:41:29
Summary:     Simplify setting SDF bounds.
Affected #:  1 file

diff -r 2a589a6679e3c986473a13ae67ffdb383b475867 -r 02f0c8c17e5af76a3967cbab0838bed551816754 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -355,13 +355,13 @@
             0 : slice(2, None, 3),
         }
         self.set_bounds()
+        self._midx_version = self.indexdata.parameters.get('midx_version', 0)
         if self._midx_version >= 1.0:
             max_key = self.get_key(np.array([2**self.level - 1]*3))
         else:
             max_key = self.indexdata['index'][-1]
         self._max_key = max_key
 
-
     def _fix_rexact(self, rmin, rmax):
 
         center = 0.5*(rmax+rmin)
@@ -371,83 +371,64 @@
         self.rmax = center + 0.5*mysize
 
     def set_bounds(self):
-        sorted_rtp = self.sdfdata.parameters.get("sorted_rtp", False)
-        sorted_xyz = self.sdfdata.parameters.get("sorted_xyz", False)
-        morton_xyz = self.sdfdata.parameters.get("morton_xyz", False)
-
-        self.rmin = np.zeros(3)
-        self.rmax = np.zeros(3)
-        r_0 = self.sdfdata.parameters['R0']
-        try:
-            r_0 = self.sdfdata.parameters['L0']
-        except:
-            pass
-
-        if sorted_rtp:
-            mylog.debug("Setting up Sorted RTP data")
-            rtp_min = np.array([0.0, 0.0, -np.pi])
-            rtp_max = np.array([r_0*1.01, 2*np.pi, np.pi])
-            self._fix_rexact(rtp_min, rtp_max)
-            self.true_domain_left = self.rmin.copy()
-            self.true_domain_right = self.rmax.copy()
-            self.true_domain_width = self.rmax - self.rmin
-        elif sorted_xyz:
-            mylog.debug("Setting up Sorted XYZ data")
-            offset_center = self.sdfdata.parameters.get("offset_center", False)
-            if offset_center:
-                rmin = np.zeros(3)
-                rmax = np.array([2.0*r_0]*3)
-            else:
-                rmin = -1.01*np.array([r_0]*3)
-                rmax = 1.01*np.array([r_0]*3)
+        if ('x_min' in self.sdfdata.parameters and 'x_max' in self.sdfdata.parameters) or \
+           ('theta_min' in self.sdfdata.parameters and 'theta_max' in self.sdfdata.parameters):
+            if 'x_min' in self.sdfdata.parameters:
+                rmin = np.array([self.sdfdata.parameters['x_min'],
+                                 self.sdfdata.parameters['y_min'],
+                                 self.sdfdata.parameters['z_min']])
+                rmax = np.array([self.sdfdata.parameters['x_max'],
+                                 self.sdfdata.parameters['y_max'],
+                                 self.sdfdata.parameters['z_max']])
+            elif 'theta_min' in self.sdfdata.parameters:
+                rmin = np.array([self.sdfdata.parameters['r_min'],
+                                 self.sdfdata.parameters['theta_min'],
+                                 self.sdfdata.parameters['phi_min']])
+                rmax = np.array([self.sdfdata.parameters['r_max'],
+                                 self.sdfdata.parameters['theta_max'],
+                                 self.sdfdata.parameters['phi_max']])
             self._fix_rexact(rmin, rmax)
             self.true_domain_left = self.rmin.copy()
             self.true_domain_right = self.rmax.copy()
             self.true_domain_width = self.rmax - self.rmin
+            self.domain_width = self.rmax - self.rmin
+            self.domain_dims = 1 << self.level
+            self.domain_buffer = 0
+            self.domain_active_dims = self.domain_dims
         else:
-            mylog.debug("Setting up regular data")
+            mylog.debug("Setting up older data")
             rx = self.sdfdata.parameters.get('Rx')
             ry = self.sdfdata.parameters.get('Ry')
             rz = self.sdfdata.parameters.get('Rz')
             a =  self.sdfdata.parameters.get("a", 1.0)
-            r = np.array([rx, ry, rz])
-            rmin = -a*r
-            rmax = a*r
+            rmin = -a * np.array([rx, ry, rz])
+            rmax = a * np.array([rx, ry, rz])
             print rmin, rmax
-
-            #/* expand root for non-power-of-two */
-            expand_root = 0.0
-            ic_Nmesh = self.sdfdata.parameters.get('ic_Nmesh',0)
-            if ic_Nmesh != 0:
-                f2 = 1<<int(np.log2(ic_Nmesh-1)+1)
-                if (f2 != ic_Nmesh):
-                    expand_root = 1.0*f2/ic_Nmesh - 1.0;
-                mylog.debug("Expanding: %s, %s, %s" % (f2, ic_Nmesh, expand_root))
-            #self._fix_rexact(rmin, rmax)
             self.true_domain_left = rmin.copy()
             self.true_domain_right = rmax.copy()
             self.true_domain_width = rmax - rmin
-            rmin *= (1.0 + expand_root)
-            rmax *= (1.0 + expand_root)
+
+            expand_root = 0.0
+            morton_xyz = self.sdfdata.parameters.get("morton_xyz", False)
+            if not morton_xyz:
+                self.wandering_particles = True
+                ic_Nmesh = self.sdfdata.parameters.get('ic_Nmesh',0)
+                # Expand root for non power-of-2
+                if ic_Nmesh != 0:
+                    f2 = 1<<int(np.log2(ic_Nmesh-1)+1)
+                    if (f2 != ic_Nmesh):
+                        expand_root = 1.0*f2/ic_Nmesh - 1.0;
+                        mylog.debug("Expanding: %s, %s, %s" % (f2, ic_Nmesh, expand_root))
+                        rmin *= (1.0 + expand_root)
+                        rmax *= (1.0 + expand_root)
+
             self._fix_rexact(rmin, rmax)
+            self.domain_width = self.rmax - self.rmin
+            self.domain_dims = 1 << self.level
+            self.domain_buffer = (self.domain_dims - int(self.domain_dims/(1.0 + expand_root)))/2
+            self.domain_active_dims = self.domain_dims - 2*self.domain_buffer
+
         print self.rmin, self.rmax
-
-        self._midx_version = self.indexdata.parameters.get("midx_version", 0)
-        if self._midx_version == 1.0:
-            rmin = np.zeros(3)
-            rmax = np.zeros(3)
-            rmin[0] = self.indexdata.parameters['x_min']
-            rmin[1] = self.indexdata.parameters['y_min']
-            rmin[2] = self.indexdata.parameters['z_min']
-            rmax[0] = self.indexdata.parameters['x_max']
-            rmax[1] = self.indexdata.parameters['y_max']
-            rmax[2] = self.indexdata.parameters['z_max']
-            self._fix_rexact(rmin, rmax)
-
-        self.domain_width = self.rmax - self.rmin
-        self.domain_dims = 1 << self.level
-        self.domain_buffer = (self.domain_dims - int(self.domain_dims/(1.0 + expand_root)))/2
-        self.domain_active_dims = self.domain_dims - 2*self.domain_buffer
         mylog.debug("SINDEX: %s, %s, %s " % (self.domain_width, self.domain_dims, self.domain_active_dims))
 
     def spread_bits(self, ival, level=None):


https://bitbucket.org/yt_analysis/yt/commits/38ab44e522ba/
Changeset:   38ab44e522ba
Branch:      yt-3.0
User:        samskillman
Date:        2014-06-13 00:31:03
Summary:     Specify a stop criteria or you'll search until the first one no matter what. Important for spares midx files.
Affected #:  1 file

diff -r 02f0c8c17e5af76a3967cbab0838bed551816754 -r 38ab44e522ba3d7573b82912a6616c514ab1c185 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -808,7 +808,7 @@
                                (left_key, self._max_key))
         right_key = min(right_key, self._max_key)
 
-        left_key = self.get_next_nonzero_chunk(left_key)
+        left_key = self.get_next_nonzero_chunk(left_key, right_key-1)
         right_key = self.get_previous_nonzero_chunk(right_key, left_key)
 
         lbase = self.indexdata['base'][left_key]


https://bitbucket.org/yt_analysis/yt/commits/154b6f000acb/
Changeset:   154b6f000acb
Branch:      yt-3.0
User:        mswarren
Date:        2014-06-13 17:44:32
Summary:     Use Mpccm for iter_filtered_bbox_fields
Affected #:  1 file

diff -r 38ab44e522ba3d7573b82912a6616c514ab1c185 -r 154b6f000acb3fad7c91580ba54b1c15c4c93c92 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -728,7 +728,7 @@
         """
 
         kpcuq = left.in_units('kpccm').uq
-        mpcuq = left.in_units('Mpc/h').uq
+        mpcuq = left.in_units('Mpccm/h').uq
         DW = (self.true_domain_width * kpcuq).in_units('Mpc/h')
         if pos_fields is None:
             pos_fields = 'x','y','z'
@@ -736,7 +736,7 @@
         print pos_fields
 
         # I'm sorry.
-        pos = mpcuq * np.array([data[xf].in_units('Mpc/h'), data[yf].in_units('Mpc/h'), data[zf].in_units('Mpc/h')]).T
+        pos = mpcuq * np.array([data[xf].in_units('Mpccm/h'), data[yf].in_units('Mpccm/h'), data[zf].in_units('Mpccm/h')]).T
 
         # This hurts, but is useful for periodicity. Probably should check first
         # if it is even needed for a given left/right


https://bitbucket.org/yt_analysis/yt/commits/3ba4d54247ad/
Changeset:   3ba4d54247ad
Branch:      yt-3.0
User:        mswarren
Date:        2014-06-13 17:47:29
Summary:     Call init_cosmology (needed if light_cone == True)
Affected #:  1 file

diff -r 154b6f000acb3fad7c91580ba54b1c15c4c93c92 -r 3ba4d54247ada6e9e274a4bff8ab4fede6e9bb8a yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -73,7 +73,6 @@
     void free_halos() nogil
     float max_halo_radius(halo *h) nogil
 
-
 # global in groupies.c
 cdef extern double particle_thresh_dens[5]
 
@@ -88,6 +87,9 @@
     void setup_config() nogil
     void output_config(char *fn) nogil
 
+cdef import from "distance.h":
+    void init_cosmology() nogil
+
 cdef import from "config_vars.h":
     # Rockstar cleverly puts all of the config variables inside a templated
     # definition of their vaiables.
@@ -237,7 +239,6 @@
             #workaround is to make a new directory
             OUTBASE = outbase 
 
-
         PARTICLE_MASS = particle_mass.in_units('Msun/h')
         PERIODIC = periodic
         BOX_SIZE = pf.domain_width.in_units('Mpccm/h')[0]
@@ -257,6 +258,7 @@
 
         # Needs to be called so rockstar can use the particle mass parameter
         # to calculate virial quantities properly
+        init_cosmology()
         calc_mass_definition()
 
         if write_config: output_config(NULL)


https://bitbucket.org/yt_analysis/yt/commits/56fe3ed45fdf/
Changeset:   56fe3ed45fdf
Branch:      yt-3.0
User:        mswarren
Date:        2014-06-16 23:00:55
Summary:     Warn if m200b reaches the edge of a FOF group
Affected #:  1 file

diff -r 3ba4d54247ada6e9e274a4bff8ab4fede6e9bb8a -r 56fe3ed45fdfee8689169e312c690e7c3f1d40e4 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -293,12 +293,15 @@
             if cur_dens > dens_thresh[3]: alt_m3 = total_mass
             if cur_dens > dens_thresh[4]: alt_m4 = total_mass
             if cur_dens <= dens_thresh[1]:
-                h['m'] = m
-                h['alt_m1'] = alt_m1
-                h['alt_m2'] = alt_m2
-                h['alt_m3'] = alt_m3
-                h['alt_m4'] = alt_m4
-                return
+                break
+        h['m'] = m
+        h['alt_m1'] = alt_m1
+        h['alt_m2'] = alt_m2
+        h['alt_m3'] = alt_m3
+        h['alt_m4'] = alt_m4
+        if cur_dens > dens_thresh[1]:
+            print >> sys.stderr, "r too small in assign_masses, m200b will be wrong!"
+            print >> sys.stderr, "edge_dens/dens_thresh[1] %.3f" % (cur_dens/dens_thresh[1])
         
     def max_halo_radius(self, int i):
         return max_halo_radius(&halos[i])


https://bitbucket.org/yt_analysis/yt/commits/622a7d0115e4/
Changeset:   622a7d0115e4
Branch:      yt-3.0
User:        mswarren
Date:        2014-06-17 13:52:10
Summary:     revert eb17bb9 change of default mass unit
Affected #:  2 files

diff -r 56fe3ed45fdfee8689169e312c690e7c3f1d40e4 -r 622a7d0115e4e8678acd2634c88dadc2ffea52e0 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -299,10 +299,11 @@
         h['alt_m2'] = alt_m2
         h['alt_m3'] = alt_m3
         h['alt_m4'] = alt_m4
-        if cur_dens > dens_thresh[1]:
-            print >> sys.stderr, "r too small in assign_masses, m200b will be wrong!"
-            print >> sys.stderr, "edge_dens/dens_thresh[1] %.3f" % (cur_dens/dens_thresh[1])
-        
+        # if cur_dens > dens_thresh[1]:
+            # This is usually a subhalo problem, and we don't know who is a subhalo
+            # print >> sys.stderr, "r too small in assign_masses, m200b will be wrong!"
+            # print >> sys.stderr, "edge_dens/dens_thresh[1] %.3f" % (cur_dens/dens_thresh[1])
+
     def max_halo_radius(self, int i):
         return max_halo_radius(&halos[i])
 

diff -r 56fe3ed45fdfee8689169e312c690e7c3f1d40e4 -r 622a7d0115e4e8678acd2634c88dadc2ffea52e0 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -173,7 +173,7 @@
         self.length_unit = self.quan(1.0, self.parameters.get("length_unit", 'kpc'))
         self.velocity_unit = self.quan(1.0, self.parameters.get("velocity_unit", 'kpc/Gyr'))
         self.time_unit = self.quan(1.0, self.parameters.get("time_unit", 'Gyr'))
-        mass_unit = self.parameters.get("mass_unit", 'Msun')
+        mass_unit = self.parameters.get("mass_unit", '1e10 Msun')
         if ' ' in mass_unit:
             factor, unit = self.parameters.get("mass_unit", 'Msun').split(' ')
         else:


https://bitbucket.org/yt_analysis/yt/commits/9de86ea05d78/
Changeset:   9de86ea05d78
Branch:      yt-3.0
User:        mswarren
Date:        2014-06-17 15:00:34
Summary:     Complete default mass unit fix
Affected #:  1 file

diff -r 622a7d0115e4e8678acd2634c88dadc2ffea52e0 -r 9de86ea05d7828cde7231e5168c83872593715e5 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -175,7 +175,7 @@
         self.time_unit = self.quan(1.0, self.parameters.get("time_unit", 'Gyr'))
         mass_unit = self.parameters.get("mass_unit", '1e10 Msun')
         if ' ' in mass_unit:
-            factor, unit = self.parameters.get("mass_unit", 'Msun').split(' ')
+            factor, unit = mass_unit.split(' ')
         else:
             factor = 1.0
             unit = mass_unit


https://bitbucket.org/yt_analysis/yt/commits/c0411c147925/
Changeset:   c0411c147925
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-17 15:10:24
Summary:     Merging
Affected #:  73 files

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -41,6 +41,7 @@
 yt/utilities/lib/PointsInVolume.c
 yt/utilities/lib/QuadTree.c
 yt/utilities/lib/RayIntegrators.c
+yt/utilities/lib/ragged_arrays.c
 yt/utilities/lib/VolumeIntegrator.c
 yt/utilities/lib/grid_traversal.c
 yt/utilities/lib/GridTree.c

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 doc/README
--- a/doc/README
+++ b/doc/README
@@ -5,6 +5,6 @@
 http://sphinx.pocoo.org/
 
 Because the documentation requires a number of dependencies, we provide
-pre-build versions online, accessible here:
+pre-built versions online, accessible here:
 
-http://yt-project.org/docs/
+http://yt-project.org/docs/dev-3.0/

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- a/doc/source/analyzing/analysis_modules/PPVCube.ipynb
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:3a720e0a18272564522f9fc23553431908d6f2b4f3e3e7dfe5b3e690e2e37677"
+  "signature": "sha256:3f810954006851303837edb8fd85ee6583a883122b0f4867903562546c4f19d2"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -16,6 +16,18 @@
      ]
     },
     {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%matplotlib inline\n",
+      "from yt.mods import *\n",
+      "from yt.analysis_modules.api import PPVCube"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
@@ -44,30 +56,40 @@
      ]
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "First, we'll set up the grid and the parameters of the profiles:"
+     ]
+    },
+    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "%matplotlib inline\n",
-      "from yt.mods import *\n",
-      "from yt.analysis_modules.api import PPVCube"
+      "nx,ny,nz = (256,256,256) # domain dimensions\n",
+      "R = 10. # outer radius of disk, kpc\n",
+      "r_0 = 3. # scale radius, kpc\n",
+      "beta = 1.4 # for the tangential velocity profile\n",
+      "alpha = -1. # for the radial density profile\n",
+      "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates of x-y plane of disk\n",
+      "r = np.sqrt(x*x+y*y) # polar coordinates\n",
+      "theta = np.arctan2(y, x) # polar coordinates"
      ],
      "language": "python",
      "metadata": {},
      "outputs": []
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Second, we'll construct the data arrays for the density and the velocity of the disk. Since we have the tangential velocity profile, we have to use the polar coordinates we derived earlier to compute `velx` and `vely`. Everywhere outside the disk, all fields are set to zero.  "
+     ]
+    },
+    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "data = {}\n",
-      "nx,ny,nz = (256,256,256)\n",
-      "R = 10. # kpc\n",
-      "r_0 = 3. # kpc\n",
-      "beta = 1.4\n",
-      "alpha = -1.\n",
-      "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates\n",
-      "r = np.sqrt(x*x+y*y) # polar coordinates\n",
-      "theta = np.arctan2(y, x) # polar coordinates\n",
       "dens = np.zeros((nx,ny,nz))\n",
       "dens[:,:,nz/2-3:nz/2+3] = (r**alpha).reshape(nx,ny,1) # the density profile of the disk\n",
       "vel_theta = r/(1.+(r/r_0)**beta) # the azimuthal velocity profile of the disk\n",
@@ -75,11 +97,31 @@
       "vely = np.zeros((nx,ny,nz))\n",
       "velx[:,:,nz/2-3:nz/2+3] = (-vel_theta*np.sin(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
       "vely[:,:,nz/2-3:nz/2+3] = (vel_theta*np.cos(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
+      "dens[r > R] = 0.0\n",
+      "velx[r > R] = 0.0\n",
+      "vely[r > R] = 0.0"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally, we'll package these data arrays up into a dictionary, which will then be shipped off to `load_uniform_grid`. We'll define the width of the grid to be `2*R` kpc, which will be equal to 1  `code_length`. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "data = {}\n",
       "data[\"density\"] = (dens,\"g/cm**3\")\n",
       "data[\"velocity_x\"] = (velx, \"km/s\")\n",
       "data[\"velocity_y\"] = (vely, \"km/s\")\n",
       "data[\"velocity_z\"] = (np.zeros((nx,ny,nz)), \"km/s\") # zero velocity in the z-direction\n",
-      "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])\n",
+      "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) # bbox of width 1 on a side with center (0,0,0)\n",
       "ds = load_uniform_grid(data, (nx,ny,nz), length_unit=(2*R,\"kpc\"), nprocs=1, bbox=bbox)"
      ],
      "language": "python",
@@ -146,7 +188,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "cube = PPVCube(ds, L, \"density\", dims=(200,100,50), velocity_bounds=(-0.5,0.5,\"km/s\"))"
+      "cube = PPVCube(ds, L, \"density\", dims=(200,100,50), velocity_bounds=(-1.5,1.5,\"km/s\"))"
      ],
      "language": "python",
      "metadata": {},
@@ -180,8 +222,18 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(\"cube.fits\")\n",
-      "slc = SlicePlot(ds, \"z\", [\"density\"], center=\"c\") # sliced at the center of the domain\n",
+      "pf = load(\"cube.fits\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Specifying no center gives us the center slice\n",
+      "slc = SlicePlot(pf, \"z\", [\"density\"])\n",
       "slc.show()"
      ],
      "language": "python",
@@ -192,19 +244,11 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "# To figure out what the domain center and width is in pixel (code length) units:\n",
-      "print ds.domain_center\n",
-      "print ds.domain_width"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "slc = SlicePlot(ds, \"z\", [\"density\"], center=[100.5,50.5,-250.0]) # \"z\" slice is in m/s\n",
+      "import yt.units as u\n",
+      "# Picking different velocities for the slices\n",
+      "new_center = pf.domain_center\n",
+      "new_center[2] = pf.spec2pixel(-1.0*u.km/u.s)\n",
+      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -215,7 +259,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "slc = SlicePlot(ds, \"z\", [\"density\"], center=[100.5,50.5,300.0])\n",
+      "new_center[2] = pf.spec2pixel(0.7*u.km/u.s)\n",
+      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -225,7 +270,31 @@
     {
      "cell_type": "code",
      "collapsed": false,
-     "input": [],
+     "input": [
+      "new_center[2] = pf.spec2pixel(-0.3*u.km/u.s)\n",
+      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If we project all the emission at all the different velocities along the z-axis, we recover the entire disk:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj = ProjectionPlot(pf, \"z\", [\"density\"], proj_style=\"sum\")\n",
+      "prj.set_log(\"density\", True)\n",
+      "prj.set_zlim(\"density\", 1.0e-3, 0.2)\n",
+      "prj.show()"
+     ],
      "language": "python",
      "metadata": {},
      "outputs": []

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 doc/source/analyzing/analysis_modules/SZ_projections.ipynb
--- a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
+++ b/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:e5d3c629592c8aacbabf2e3fab2660703298886b8de6f36eb7cdc1f60b726496"
+  "signature": "sha256:7fc053480ba7896bfa5905bd69f7b3dd326364fbab324975b76f79640f2e0adf"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -18,7 +18,7 @@
       "projection of the pressure field of a cluster. However, the *full* S-Z signal is a combination of thermal and kinetic\n",
       "contributions, and for large frequencies and high temperatures\n",
       "relativistic effects are important. For computing the full S-Z signal\n",
-      "incorporating all of these effects, Jens Chluba has written a library:\n",
+      "incorporating all of these effects, there is a library:\n",
       "SZpack ([Chluba et al 2012](http://adsabs.harvard.edu/abs/2012MNRAS.426..510C)). \n",
       "\n",
       "The `sunyaev_zeldovich` analysis module in `yt` makes it possible\n",
@@ -93,10 +93,10 @@
       "from yt.mods import *\n",
       "from yt.analysis_modules.api import SZProjection\n",
       "\n",
-      "pf = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+      "ds = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
       "\n",
       "freqs = [90.,180.,240.]\n",
-      "szprj = SZProjection(pf, freqs)"
+      "szprj = SZProjection(ds, freqs)"
      ],
      "language": "python",
      "metadata": {},
@@ -108,8 +108,8 @@
      "source": [
       "`freqs` is a list or array of frequencies in GHz at which the signal\n",
       "is to be computed. The `SZProjection` constructor also accepts the\n",
-      "optional keywords, **mue** (mean molecular weight for computing the\n",
-      "electron number density, 1.143 is the default) and **high_order** (set\n",
+      "optional keywords, `mue` (mean molecular weight for computing the\n",
+      "electron number density, 1.143 is the default) and `high_order` (set\n",
       "to True to compute terms in the S-Z signal expansion up to\n",
       "second-order in $T_{e,SZ}$ and $\\beta$). "
      ]
@@ -127,7 +127,7 @@
      "collapsed": false,
      "input": [
       "# An on-axis projection along the z-axis with width 10 Mpc, centered on the gas density maximum\n",
-      "szprj.on_axis(\"z\", center=\"max\", width=(10.0, \"mpc\"), nx=400)"
+      "szprj.on_axis(\"z\", center=\"max\", width=(10.0, \"Mpc\"), nx=400)"
      ],
      "language": "python",
      "metadata": {},
@@ -144,7 +144,7 @@
       "which can be accessed dict-like from the projection object (e.g.,\n",
       "`szprj[\"90_GHz\"]`). Projections of other quantities may also be\n",
       "accessed; to see what fields are available call `szprj.keys()`. The methods also accept standard ``yt``\n",
-      "keywords for projections such as **center**, **width**, and **source**. The image buffer size can be controlled by setting **nx**.  \n"
+      "keywords for projections such as `center`, `width`, and `source`. The image buffer size can be controlled by setting `nx`.  \n"
      ]
     },
     {
@@ -216,8 +216,16 @@
      "source": [
       "which would write all of the projections to a single FITS file,\n",
       "including coordinate information in kpc. The optional keyword\n",
-      "**clobber** allows a previous file to be overwritten. \n"
+      "`clobber` allows a previous file to be overwritten. \n"
      ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -226,4 +226,4 @@
 =======
 
 For a full example of how to use these methods together see 
-:ref:`halo_analysis_example`.
+:doc:`halo_analysis_example`.

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
--- a/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
+++ b/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
@@ -23,7 +23,8 @@
      "input": [
       "%matplotlib inline\n",
       "from yt.mods import *\n",
-      "from matplotlib import pylab"
+      "from matplotlib import pylab\n",
+      "from yt.analysis_modules.halo_finding.api import HaloFinder"
      ],
      "language": "python",
      "metadata": {},

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 doc/source/cookbook/amrkdtree_to_uniformgrid.py
--- /dev/null
+++ b/doc/source/cookbook/amrkdtree_to_uniformgrid.py
@@ -0,0 +1,33 @@
+import numpy as np
+import yt
+
+#This is an example of how to map an amr data set
+#to a uniform grid. In this case the highest
+#level of refinement is mapped into a 1024x1024x1024 cube
+
+#first the amr data is loaded
+ds = yt.load("~/pfs/galaxy/new_tests/feedback_8bz/DD0021/DD0021")
+
+#next we get the maxium refinement level
+lmax = ds.parameters['MaximumRefinementLevel']
+
+#calculate the center of the domain
+domain_center = (ds.domain_right_edge - ds.domain_left_edge)/2
+
+#determine the cellsize in the highest refinement level
+cell_size = pf.domain_width/(pf.domain_dimensions*2**lmax)
+
+#calculate the left edge of the new grid
+left_edge = domain_center - 512*cell_size
+
+#the number of cells per side of the new grid
+ncells = 1024
+
+#ask yt for the specified covering grid
+cgrid = pf.h.covering_grid(lmax, left_edge, np.array([ncells,]*3))
+
+#get a map of the density into the new grid
+density_map = cgrid["density"].astype(dtype="float32")
+
+#save the file as a numpy array for convenient future processing
+np.save("/pfs/goldbaum/galaxy/new_tests/feedback_8bz/gas_density_DD0021_log_densities.npy", density_map)

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 doc/source/cookbook/ffmpeg_volume_rendering.py
--- /dev/null
+++ b/doc/source/cookbook/ffmpeg_volume_rendering.py
@@ -0,0 +1,99 @@
+#This is an example of how to make videos of 
+#uniform grid data using Theia and ffmpeg
+
+#The Scene object to hold the ray caster and view camera
+from yt.visualization.volume_rendering.theia.scene import TheiaScene
+
+#GPU based raycasting algorithm to use 
+from yt.visualization.volume_rendering.theia.algorithms.front_to_back import FrontToBackRaycaster
+
+#These will be used to define how to color the data
+from yt.visualization.volume_rendering.transfer_functions import ColorTransferFunction
+from yt.visualization.color_maps import *
+
+#This will be used to launch ffmpeg
+import subprocess as sp
+
+#Of course we need numpy for math magic
+import numpy as np
+
+#Opacity scaling function
+def scale_func(v, mi, ma):
+      return  np.minimum(1.0, (v-mi)/(ma-mi) + 0.0)
+
+#load the uniform grid from a numpy array file
+bolshoi = "/home/bogert/log_densities_1024.npy"
+density_grid = np.load(bolshoi)
+
+#Set the TheiaScene to use the density_grid and 
+#setup the raycaster for a resulting 1080p image
+ts = TheiaScene(volume = density_grid, raycaster = FrontToBackRaycaster(size = (1920,1080) ))
+
+#the min and max values in the data to color
+mi, ma = 0.0, 3.6
+
+#setup colortransferfunction
+bins = 5000
+tf = ColorTransferFunction( (mi, ma), bins)
+tf.map_to_colormap(0.5, ma, colormap="spring", scale_func = scale_func)
+
+#pass the transfer function to the ray caster
+ts.source.raycaster.set_transfer(tf)
+
+#Initial configuration for start of video
+#set initial opacity and brightness values
+#then zoom into the center of the data 30%
+ts.source.raycaster.set_opacity(0.03)
+ts.source.raycaster.set_brightness(2.3)
+ts.camera.zoom(30.0)
+
+#path to ffmpeg executable
+FFMPEG_BIN = "/usr/local/bin/ffmpeg"
+
+pipe = sp.Popen([ FFMPEG_BIN,
+        '-y', # (optional) overwrite the output file if it already exists
+	#This must be set to rawvideo because the image is an array
+        '-f', 'rawvideo', 
+	#This must be set to rawvideo because the image is an array
+        '-vcodec','rawvideo',
+	#The size of the image array and resulting video
+        '-s', '1920x1080', 
+	#This must be rgba to match array format (uint32)
+        '-pix_fmt', 'rgba',
+	#frame rate of video
+        '-r', '29.97', 
+        #Indicate that the input to ffmpeg comes from a pipe
+        '-i', '-', 
+        # Tells FFMPEG not to expect any audio
+        '-an', 
+        #Setup video encoder
+	#Use any encoder you life available from ffmpeg
+        '-vcodec', 'libx264', '-preset', 'ultrafast', '-qp', '0',
+        '-pix_fmt', 'yuv420p',
+        #Name of the output
+        'bolshoiplanck2.mkv' ],
+        stdin=sp.PIPE,stdout=sp.PIPE)
+		
+		
+#Now we loop and produce 500 frames
+for k in range (0,500) :
+    #update the scene resulting in a new image
+    ts.update()
+
+    #get the image array from the ray caster
+    array = ts.source.get_results()
+
+    #send the image array to ffmpeg
+    array.tofile(pipe.stdin)
+
+    #rotate the scene by 0.01 rads in x,y & z
+    ts.camera.rotateX(0.01)
+    ts.camera.rotateZ(0.01)
+    ts.camera.rotateY(0.01)
+
+    #zoom in 0.01% for a total of a 5% zoom
+    ts.camera.zoom(0.01)
+
+
+#Close the pipe to ffmpeg
+pipe.terminate()

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 doc/source/cookbook/opengl_stereo_volume_rendering.py
--- /dev/null
+++ b/doc/source/cookbook/opengl_stereo_volume_rendering.py
@@ -0,0 +1,370 @@
+from OpenGL.GL import *
+from OpenGL.GLUT import *
+from OpenGL.GLU import *
+from OpenGL.GL.ARB.vertex_buffer_object import *
+
+import sys, time
+import numpy as np
+import pycuda.driver as cuda_driver
+import pycuda.gl as cuda_gl
+
+from yt.visualization.volume_rendering.theia.scene import TheiaScene
+from yt.visualization.volume_rendering.theia.algorithms.front_to_back import FrontToBackRaycaster
+from yt.visualization.volume_rendering.transfer_functions import ColorTransferFunction
+from yt.visualization.color_maps import *
+
+import numexpr as ne
+
+window = None     # Number of the glut window.
+rot_enabled = True
+
+#Theia Scene
+ts = None
+
+#RAY CASTING values
+c_tbrightness = 1.0
+c_tdensity = 0.05
+
+output_texture = None # pointer to offscreen render target
+
+leftButton = False
+middleButton = False
+rightButton = False
+
+#Screen width and height
+width = 1920
+height = 1080
+
+eyesep = 0.1
+
+(pbo, pycuda_pbo) = [None]*2
+(rpbo, rpycuda_pbo) = [None]*2
+
+#create 2 PBO for stereo scopic rendering
+def create_PBO(w, h):
+    global pbo, pycuda_pbo, rpbo, rpycuda_pbo
+    num_texels = w*h
+    array = np.zeros((num_texels, 3),np.float32)
+
+    pbo = glGenBuffers(1)
+    glBindBuffer(GL_ARRAY_BUFFER, pbo)
+    glBufferData(GL_ARRAY_BUFFER, array, GL_DYNAMIC_DRAW)
+    glBindBuffer(GL_ARRAY_BUFFER, 0)
+    pycuda_pbo = cuda_gl.RegisteredBuffer(long(pbo))
+
+    rpbo = glGenBuffers(1)
+    glBindBuffer(GL_ARRAY_BUFFER, rpbo)
+    glBufferData(GL_ARRAY_BUFFER, array, GL_DYNAMIC_DRAW)
+    glBindBuffer(GL_ARRAY_BUFFER, 0)
+    rpycuda_pbo = cuda_gl.RegisteredBuffer(long(rpbo))
+
+def destroy_PBO(self):
+    global pbo, pycuda_pbo, rpbo, rpycuda_pbo
+    glBindBuffer(GL_ARRAY_BUFFER, long(pbo))
+    glDeleteBuffers(1, long(pbo));
+    glBindBuffer(GL_ARRAY_BUFFER, 0)
+    pbo,pycuda_pbo = [None]*2
+
+    glBindBuffer(GL_ARRAY_BUFFER, long(rpbo))
+    glDeleteBuffers(1, long(rpbo));
+    glBindBuffer(GL_ARRAY_BUFFER, 0)
+    rpbo,rpycuda_pbo = [None]*2
+
+#consistent with C initPixelBuffer()
+def create_texture(w,h):
+    global output_texture
+    output_texture = glGenTextures(1)
+    glBindTexture(GL_TEXTURE_2D, output_texture)
+    # set basic parameters
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
+    # buffer data
+    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB,
+                 w, h, 0, GL_RGB, GL_FLOAT, None)
+
+#consistent with C initPixelBuffer()
+def destroy_texture():
+    global output_texture
+    glDeleteTextures(output_texture);
+    output_texture = None
+
+def init_gl(w = 512 , h = 512):
+    Width, Height = (w, h)
+
+    glClearColor(0.1, 0.1, 0.5, 1.0)
+    glDisable(GL_DEPTH_TEST)
+
+    #matrix functions
+    glViewport(0, 0, Width, Height)
+    glMatrixMode(GL_PROJECTION);
+    glLoadIdentity();
+
+    #matrix functions
+    gluPerspective(60.0, Width/float(Height), 0.1, 10.0)
+    glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
+
+def resize(Width, Height):
+    global width, height
+    (width, height) = Width, Height
+    glViewport(0, 0, Width, Height)        # Reset The Current Viewport And Perspective Transformation
+    glMatrixMode(GL_PROJECTION)
+    glLoadIdentity()
+    gluPerspective(60.0, Width/float(Height), 0.1, 10.0)
+
+
+def do_tick():
+    global time_of_last_titleupdate, frame_counter, frames_per_second
+    if ((time.clock () * 1000.0) - time_of_last_titleupdate >= 1000.):
+        frames_per_second = frame_counter                   # Save The FPS
+        frame_counter = 0  # Reset The FPS Counter
+        szTitle = "%d FPS" % (frames_per_second )
+        glutSetWindowTitle ( szTitle )
+        time_of_last_titleupdate = time.clock () * 1000.0
+    frame_counter += 1
+
+oldMousePos = [ 0, 0 ]
+def mouseButton( button, mode, x, y ):
+	"""Callback function (mouse button pressed or released).
+
+	The current and old mouse positions are stored in
+	a	global renderParam and a global list respectively"""
+
+	global leftButton, middleButton, rightButton, oldMousePos
+
+        if button == GLUT_LEFT_BUTTON:
+	    if mode == GLUT_DOWN:
+	        leftButton = True
+            else:
+		leftButton = False
+
+        if button == GLUT_MIDDLE_BUTTON:
+	    if mode == GLUT_DOWN:
+	        middleButton = True
+            else:
+		middleButton = False
+
+        if button == GLUT_RIGHT_BUTTON:
+	    if mode == GLUT_DOWN:
+	        rightButton = True
+            else:
+		rightButton = False
+
+	oldMousePos[0], oldMousePos[1] = x, y
+	glutPostRedisplay( )
+
+def mouseMotion( x, y ):
+	"""Callback function (mouse moved while button is pressed).
+
+	The current and old mouse positions are stored in
+	a	global renderParam and a global list respectively.
+	The global translation vector is updated according to
+	the movement of the mouse pointer."""
+
+	global ts, leftButton, middleButton, rightButton, oldMousePos
+	deltaX = x - oldMousePos[ 0 ]
+	deltaY = y - oldMousePos[ 1 ]
+
+	factor = 0.001
+
+	if leftButton == True:
+            ts.camera.rotateX( - deltaY * factor)
+            ts.camera.rotateY( - deltaX * factor)
+	if middleButton == True:
+	    ts.camera.translateX( deltaX* 2.0 * factor)
+	    ts.camera.translateY( - deltaY* 2.0 * factor)
+	if rightButton == True:
+	    ts.camera.scale += deltaY * factor
+
+	oldMousePos[0], oldMousePos[1] = x, y
+	glutPostRedisplay( )
+
+def keyPressed(*args):
+    global c_tbrightness, c_tdensity, eyesep
+    # If escape is pressed, kill everything.
+    if args[0] == '\033':
+        print 'Closing..'
+        destroy_PBOs()
+        destroy_texture()
+        exit()
+
+    #change the brightness of the scene
+    elif args[0] == ']':
+        c_tbrightness += 0.025
+    elif args[0] == '[':
+        c_tbrightness -= 0.025
+
+    #change the density scale
+    elif args[0] == ';':
+        c_tdensity -= 0.001
+    elif args[0] == '\'':
+        c_tdensity += 0.001 
+
+    #change the transfer scale
+    elif args[0] == '-':
+        eyesep -= 0.01
+    elif args[0] == '=':
+        eyesep += 0.01 
+
+def idle():
+    glutPostRedisplay()
+
+def display():
+    try:
+        #process left eye
+        process_image()
+        display_image()
+
+        #process right eye
+        process_image(eye = False)
+        display_image(eye = False)
+
+
+        glutSwapBuffers()
+
+    except:
+        from traceback import print_exc
+        print_exc()
+        from os import _exit
+        _exit(0)
+
+def process(eye = True):
+    global ts, pycuda_pbo, rpycuda_pbo, eyesep, c_tbrightness, c_tdensity
+    """ Use PyCuda """
+
+    ts.get_raycaster().set_opacity(c_tdensity)
+    ts.get_raycaster().set_brightness(c_tbrightness)
+
+    if (eye) :
+        ts.camera.translateX(-eyesep)
+        dest_mapping = pycuda_pbo.map()
+        (dev_ptr, size) = dest_mapping.device_ptr_and_size()
+        ts.get_raycaster().surface.device_ptr = dev_ptr
+        ts.update()
+        dest_mapping.unmap()
+        ts.camera.translateX(eyesep)
+    else :
+        ts.camera.translateX(eyesep)
+        dest_mapping = rpycuda_pbo.map()
+        (dev_ptr, size) = dest_mapping.device_ptr_and_size()
+        ts.get_raycaster().surface.device_ptr = dev_ptr
+        ts.update()
+        dest_mapping.unmap()
+        ts.camera.translateX(-eyesep)
+
+
+def process_image(eye =  True):
+    global output_texture, pbo, rpbo, width, height
+    """ copy image and process using CUDA """
+    # run the Cuda kernel
+    process(eye)
+    # download texture from PBO
+    if (eye) : 
+        glBindBuffer(GL_PIXEL_UNPACK_BUFFER, np.uint64(pbo))
+        glBindTexture(GL_TEXTURE_2D, output_texture)
+
+        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB,
+                 width, height, 0,
+                 GL_RGB, GL_FLOAT, None)
+    else :
+        glBindBuffer(GL_PIXEL_UNPACK_BUFFER, np.uint64(rpbo))
+        glBindTexture(GL_TEXTURE_2D, output_texture)
+
+        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB,
+                 width, height, 0,
+                 GL_RGB, GL_FLOAT, None)
+
+def display_image(eye = True):
+    global width, height
+    """ render a screen sized quad """
+    glDisable(GL_DEPTH_TEST)
+    glDisable(GL_LIGHTING)
+    glEnable(GL_TEXTURE_2D)
+    glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE)
+
+    #matix functions should be moved
+    glMatrixMode(GL_PROJECTION)
+    glPushMatrix()
+    glLoadIdentity()
+    glOrtho(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0)
+    glMatrixMode( GL_MODELVIEW)
+    glLoadIdentity()
+    glViewport(0, 0, width, height)
+
+    if (eye) :
+        glDrawBuffer(GL_BACK_LEFT)
+    else :
+        glDrawBuffer(GL_BACK_RIGHT)
+
+    glBegin(GL_QUADS)
+    glTexCoord2f(0.0, 0.0)
+    glVertex3f(-1.0, -1.0, 0.5)
+    glTexCoord2f(1.0, 0.0)
+    glVertex3f(1.0, -1.0, 0.5)
+    glTexCoord2f(1.0, 1.0)
+    glVertex3f(1.0, 1.0, 0.5)
+    glTexCoord2f(0.0, 1.0)
+    glVertex3f(-1.0, 1.0, 0.5)
+    glEnd()
+
+    glMatrixMode(GL_PROJECTION)
+    glPopMatrix()
+
+    glDisable(GL_TEXTURE_2D)
+    glBindTexture(GL_TEXTURE_2D, 0)
+    glBindBuffer(GL_PIXEL_PACK_BUFFER, 0)
+    glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0)
+
+
+#note we may need to init cuda_gl here and pass it to camera
+def main():
+    global window, ts, width, height
+    (width, height) = (1920, 1080)
+
+    glutInit(sys.argv)
+    glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH | GLUT_STEREO)
+    glutInitWindowSize(*initial_size)
+    glutInitWindowPosition(0, 0)
+    window = glutCreateWindow("Stereo Volume Rendering")
+
+
+    glutDisplayFunc(display)
+    glutIdleFunc(idle)
+    glutReshapeFunc(resize)
+    glutMouseFunc( mouseButton )
+    glutMotionFunc( mouseMotion )
+    glutKeyboardFunc(keyPressed)
+    init_gl(width, height)
+
+    # create texture for blitting to screen
+    create_texture(width, height)
+
+    import pycuda.gl.autoinit
+    import pycuda.gl
+    cuda_gl = pycuda.gl
+
+    create_PBO(width, height)
+    # ----- Load and Set Volume Data -----
+
+    density_grid = np.load("/home/bogert/dd150_log_densities.npy")
+
+    mi, ma= 21.5, 24.5
+    bins = 5000
+    tf = ColorTransferFunction( (mi, ma), bins)
+    tf.map_to_colormap(mi, ma, colormap="algae", scale_func = scale_func)
+
+    ts = TheiaScene(volume = density_grid, raycaster = FrontToBackRaycaster(size = (width, height), tf = tf))
+
+    ts.get_raycaster().set_sample_size(0.01)
+    ts.get_raycaster().set_max_samples(5000)
+
+    glutMainLoop()
+
+def scale_func(v, mi, ma):
+    return  np.minimum(1.0, np.abs((v)-ma)/np.abs(mi-ma) + 0.0)
+
+# Print message to console, and kick off the main to get it rolling.
+if __name__ == "__main__":
+    print "Hit ESC key to quit, 'a' to toggle animation, and 'e' to toggle cuda"
+    main()

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 doc/source/cookbook/opengl_volume_rendering.py
--- /dev/null
+++ b/doc/source/cookbook/opengl_volume_rendering.py
@@ -0,0 +1,322 @@
+from OpenGL.GL import *
+from OpenGL.GLUT import *
+from OpenGL.GLU import *
+from OpenGL.GL.ARB.vertex_buffer_object import *
+
+import sys, time
+import numpy as np
+import pycuda.driver as cuda_driver
+import pycuda.gl as cuda_gl
+
+from yt.visualization.volume_rendering.theia.scene import TheiaScene
+from yt.visualization.volume_rendering.theia.algorithms.front_to_back import FrontToBackRaycaster
+from yt.visualization.volume_rendering.transfer_functions import ColorTransferFunction
+from yt.visualization.color_maps import *
+
+import numexpr as ne
+
+window = None     # Number of the glut window.
+rot_enabled = True
+
+#Theia Scene
+ts = None
+
+#RAY CASTING values
+c_tbrightness = 1.0
+c_tdensity = 0.05
+
+output_texture = None # pointer to offscreen render target
+
+leftButton = False
+middleButton = False
+rightButton = False
+
+#Screen width and height
+width = 1024
+height = 1024
+
+eyesep = 0.1
+
+(pbo, pycuda_pbo) = [None]*2
+
+def create_PBO(w, h):
+    global pbo, pycuda_pbo
+    num_texels = w*h
+    array = np.zeros((w,h,3),np.uint32)
+
+    pbo = glGenBuffers(1)
+    glBindBuffer(GL_ARRAY_BUFFER, pbo)
+    glBufferData(GL_ARRAY_BUFFER, array, GL_DYNAMIC_DRAW)
+    glBindBuffer(GL_ARRAY_BUFFER, 0)
+    pycuda_pbo = cuda_gl.RegisteredBuffer(long(pbo))
+
+def destroy_PBO(self):
+    global pbo, pycuda_pbo
+    glBindBuffer(GL_ARRAY_BUFFER, long(pbo))
+    glDeleteBuffers(1, long(pbo));
+    glBindBuffer(GL_ARRAY_BUFFER, 0)
+    pbo,pycuda_pbo = [None]*2
+
+#consistent with C initPixelBuffer()
+def create_texture(w,h):
+    global output_texture
+    output_texture = glGenTextures(1)
+    glBindTexture(GL_TEXTURE_2D, output_texture)
+    # set basic parameters
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
+    # buffer data
+    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA,
+                 w, h, 0, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, None)
+
+#consistent with C initPixelBuffer()
+def destroy_texture():
+    global output_texture
+    glDeleteTextures(output_texture);
+    output_texture = None
+
+def init_gl(w = 512 , h = 512):
+    Width, Height = (w, h)
+
+    glClearColor(0.1, 0.1, 0.5, 1.0)
+    glDisable(GL_DEPTH_TEST)
+
+    #matrix functions
+    glViewport(0, 0, Width, Height)
+    glMatrixMode(GL_PROJECTION);
+    glLoadIdentity();
+
+    #matrix functions
+    gluPerspective(60.0, Width/float(Height), 0.1, 10.0)
+    glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
+
+def resize(Width, Height):
+    global width, height
+    (width, height) = Width, Height
+    glViewport(0, 0, Width, Height)        # Reset The Current Viewport And Perspective Transformation
+    glMatrixMode(GL_PROJECTION)
+    glLoadIdentity()
+    gluPerspective(60.0, Width/float(Height), 0.1, 10.0)
+
+
+def do_tick():
+    global time_of_last_titleupdate, frame_counter, frames_per_second
+    if ((time.clock () * 1000.0) - time_of_last_titleupdate >= 1000.):
+        frames_per_second = frame_counter                   # Save The FPS
+        frame_counter = 0  # Reset The FPS Counter
+        szTitle = "%d FPS" % (frames_per_second )
+        glutSetWindowTitle ( szTitle )
+        time_of_last_titleupdate = time.clock () * 1000.0
+    frame_counter += 1
+
+oldMousePos = [ 0, 0 ]
+def mouseButton( button, mode, x, y ):
+	"""Callback function (mouse button pressed or released).
+
+	The current and old mouse positions are stored in
+	a	global renderParam and a global list respectively"""
+
+	global leftButton, middleButton, rightButton, oldMousePos
+
+        if button == GLUT_LEFT_BUTTON:
+	    if mode == GLUT_DOWN:
+	        leftButton = True
+            else:
+		leftButton = False
+
+        if button == GLUT_MIDDLE_BUTTON:
+	    if mode == GLUT_DOWN:
+	        middleButton = True
+            else:
+		middleButton = False
+
+        if button == GLUT_RIGHT_BUTTON:
+	    if mode == GLUT_DOWN:
+	        rightButton = True
+            else:
+		rightButton = False
+
+	oldMousePos[0], oldMousePos[1] = x, y
+	glutPostRedisplay( )
+
+def mouseMotion( x, y ):
+	"""Callback function (mouse moved while button is pressed).
+
+	The current and old mouse positions are stored in
+	a	global renderParam and a global list respectively.
+	The global translation vector is updated according to
+	the movement of the mouse pointer."""
+
+	global ts, leftButton, middleButton, rightButton, oldMousePos
+	deltaX = x - oldMousePos[ 0 ]
+	deltaY = y - oldMousePos[ 1 ]
+
+	factor = 0.001
+
+	if leftButton == True:
+             ts.camera.rotateX( - deltaY * factor)
+             ts.camera.rotateY( - deltaX * factor)
+	if middleButton == True:
+	     ts.camera.translateX( deltaX* 2.0 * factor)
+	     ts.camera.translateY( - deltaY* 2.0 * factor)
+	if rightButton == True:
+	     ts.camera.scale += deltaY * factor
+
+	oldMousePos[0], oldMousePos[1] = x, y
+	glutPostRedisplay( )
+
+def keyPressed(*args):
+    global c_tbrightness, c_tdensity
+    # If escape is pressed, kill everything.
+    if args[0] == '\033':
+        print 'Closing..'
+        destroy_PBOs()
+        destroy_texture()
+        exit()
+
+    #change the brightness of the scene
+    elif args[0] == ']':
+        c_tbrightness += 0.025
+    elif args[0] == '[':
+        c_tbrightness -= 0.025
+
+    #change the density scale
+    elif args[0] == ';':
+        c_tdensity -= 0.001
+    elif args[0] == '\'':
+        c_tdensity += 0.001 
+
+def idle():
+    glutPostRedisplay()
+
+def display():
+    try:
+        #process left eye
+        process_image()
+        display_image()
+
+        glutSwapBuffers()
+
+    except:
+        from traceback import print_exc
+        print_exc()
+        from os import _exit
+        _exit(0)
+
+def process(eye = True):
+    global ts, pycuda_pbo, eyesep, c_tbrightness, c_tdensity
+
+    ts.get_raycaster().set_opacity(c_tdensity)
+    ts.get_raycaster().set_brightness(c_tbrightness)
+
+    dest_mapping = pycuda_pbo.map()
+    (dev_ptr, size) = dest_mapping.device_ptr_and_size()
+    ts.get_raycaster().surface.device_ptr = dev_ptr
+    ts.update()
+   # ts.get_raycaster().cast()
+    dest_mapping.unmap()
+
+
+def process_image():
+    global output_texture, pbo, width, height
+    """ copy image and process using CUDA """
+    # run the Cuda kernel
+    process()
+    # download texture from PBO
+    glBindBuffer(GL_PIXEL_UNPACK_BUFFER, np.uint64(pbo))
+    glBindTexture(GL_TEXTURE_2D, output_texture)
+
+    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA,
+                 width, height, 0, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, None)
+
+def display_image(eye = True):
+    global width, height
+    """ render a screen sized quad """
+    glDisable(GL_DEPTH_TEST)
+    glDisable(GL_LIGHTING)
+    glEnable(GL_TEXTURE_2D)
+    glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE)
+
+    #matix functions should be moved
+    glMatrixMode(GL_PROJECTION)
+    glPushMatrix()
+    glLoadIdentity()
+    glOrtho(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0)
+    glMatrixMode( GL_MODELVIEW)
+    glLoadIdentity()
+    glViewport(0, 0, width, height)
+
+    glBegin(GL_QUADS)
+    glTexCoord2f(0.0, 0.0)
+    glVertex3f(-1.0, -1.0, 0.5)
+    glTexCoord2f(1.0, 0.0)
+    glVertex3f(1.0, -1.0, 0.5)
+    glTexCoord2f(1.0, 1.0)
+    glVertex3f(1.0, 1.0, 0.5)
+    glTexCoord2f(0.0, 1.0)
+    glVertex3f(-1.0, 1.0, 0.5)
+    glEnd()
+
+    glMatrixMode(GL_PROJECTION)
+    glPopMatrix()
+
+    glDisable(GL_TEXTURE_2D)
+    glBindTexture(GL_TEXTURE_2D, 0)
+    glBindBuffer(GL_PIXEL_PACK_BUFFER, 0)
+    glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0)
+
+
+#note we may need to init cuda_gl here and pass it to camera
+def main():
+    global window, ts, width, height
+    (width, height) = (1024, 1024)
+
+    glutInit(sys.argv)
+    glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH )
+    glutInitWindowSize(width, height)
+    glutInitWindowPosition(0, 0)
+    window = glutCreateWindow("Stereo Volume Rendering")
+
+
+    glutDisplayFunc(display)
+    glutIdleFunc(idle)
+    glutReshapeFunc(resize)
+    glutMouseFunc( mouseButton )
+    glutMotionFunc( mouseMotion )
+    glutKeyboardFunc(keyPressed)
+    init_gl(width, height)
+
+    # create texture for blitting to screen
+    create_texture(width, height)
+
+    import pycuda.gl.autoinit
+    import pycuda.gl
+    cuda_gl = pycuda.gl
+
+    create_PBO(width, height)
+    # ----- Load and Set Volume Data -----
+
+    density_grid = np.load("/home/bogert/dd150_log_densities.npy")
+
+    mi, ma= 21.5, 24.5
+    bins = 5000
+    tf = ColorTransferFunction( (mi, ma), bins)
+    tf.map_to_colormap(mi, ma, colormap="algae", scale_func = scale_func)
+
+    ts = TheiaScene(volume = density_grid, raycaster = FrontToBackRaycaster(size = (width, height), tf = tf))
+
+    ts.get_raycaster().set_sample_size(0.01)
+    ts.get_raycaster().set_max_samples(5000)
+    ts.update()
+
+    glutMainLoop()
+
+def scale_func(v, mi, ma):
+    return  np.minimum(1.0, np.abs((v)-ma)/np.abs(mi-ma) + 0.0)
+
+# Print message to console, and kick off the main to get it rolling.
+if __name__ == "__main__":
+    print "Hit ESC key to quit, 'a' to toggle animation, and 'e' to toggle cuda"
+    main()

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -74,6 +74,8 @@
 this manner, but still want to contribute, please consider creating an external
 package, which we'll happily link to.
 
+.. _requirements-for-code-submission:
+
 Requirements for Code Submission
 ++++++++++++++++++++++++++++++++
 
@@ -88,22 +90,22 @@
   * New Features
 
     * New unit tests (possibly new answer tests) (See :ref:`testing`)
-    * Docstrings for public API
-    * Addition of new feature to the narrative documentation
-    * Addition of cookbook recipe
+    * Docstrings in the source code for the public API
+    * Addition of new feature to the narrative documentation (See :ref:`writing_documentation`)
+    * Addition of cookbook recipe (See :ref:`writing_documentation`) 
     * Issue created on issue tracker, to ensure this is added to the changelog
 
   * Extension or Breakage of API in Existing Features
 
-    * Update existing narrative docs and docstrings
-    * Update existing cookbook recipes
+    * Update existing narrative docs and docstrings (See :ref:`writing_documentation`) 
+    * Update existing cookbook recipes (See :ref:`writing_documentation`) 
     * Modify of create new unit tests (See :ref:`testing`)
     * Issue created on issue tracker, to ensure this is added to the changelog
 
   * Bug fixes
 
     * Unit test is encouraged, to ensure breakage does not happen again in the
-      future.
+      future. (See :ref:`testing`)
     * Issue created on issue tracker, to ensure this is added to the changelog
 
 When submitting, you will be asked to make sure that your changes meet all of
@@ -178,14 +180,14 @@
 
   $ python2.7 setup.py build --compiler=mingw32 install
 
+.. _sharing-changes:
+
 Making and Sharing Changes
 ++++++++++++++++++++++++++
 
 The simplest way to submit changes to yt is to commit changes in your
 ``$YT_DEST/src/yt-hg`` directory, fork the repository on BitBucket,  push the
-changesets to your fork, and then issue a pull request.  If you will be
-developing much more in-depth features for yt, you will also
-likely want to edit the paths in your 
+changesets to your fork, and then issue a pull request.  
 
 Here's a more detailed flowchart of how to submit changes.
 
@@ -224,25 +226,72 @@
 
         hg push https://bitbucket.org/YourUsername/yt/
 
-  #. Update your pull request by visiting
-     https://bitbucket.org/YourUsername/yt/pull-request/new
+  #. Your pull request will be automatically updated.
 
 .. _writing_documentation:
 
 How to Write Documentation
-++++++++++++++++++++++++++
+--------------------------
 
-The process for writing documentation is identical to the above, except that
-you're modifying source files in the doc directory (i.e. ``$YT_DEST/src/yt-hg/doc``) 
-instead of the src directory (i.e. ``$YT_DEST/src/yt-hg/yt``) of the yt repository.
+Writing documentation is one of the most important but often overlooked tasks
+for increasing yt's impact in the community.  It is the way in which the 
+world will understand how to use our code, so it needs to be done concisely
+and understandably.  Typically, when a developer submits some piece of code 
+with new functionality, she should also include documentation on how to use 
+that functionality (as per :ref:`requirements-for-code-submission`).  
+Depending on the nature of the code addition, this could be a new narrative 
+docs section describing how the new code works and how to use it, it could 
+include a recipe in the cookbook section, or it could simply be adding a note 
+in the relevant docs text somewhere.
+
+The documentation exists in the main mercurial code repository for yt in the 
+``doc`` directory (i.e. ``$YT_DEST/src/yt-hg/doc/source`` on systems installed 
+using the installer script).  It is organized hierarchically into the main 
+categories of:
+
+ * Visualizing
+ * Analyzing
+ * Examining
+ * Cookbook
+ * Bootcamp
+ * Developing
+ * Reference
+ * Help
+
+You will have to figure out where your new/modified doc fits into this, but 
+browsing through the pre-built documentation is a good way to sort that out.
+
 All the source for the documentation is written in 
-`Sphinx <http://sphinx-doc.org/>`_, which uses ReST for markup.
+`Sphinx <http://sphinx-doc.org/>`_, which uses ReST for markup.  ReST is very
+straightforward to markup in a text editor, and if you are new to it, we
+recommend just using other .rst files in the existing yt documentation as 
+templates or checking out the 
+`ReST reference documentation <http://sphinx-doc.org/rest.html>`_.
 
-Cookbook recipes go in ``source/cookbook/`` and must be added to one of the
-``.rst`` files in that directory.  
+New cookbook recipes (see :ref:`cookbook`) are very helpful for the community 
+as they provide simple annotated recipes on how to use specific functionality.  
+To add one, create a concise python script which demonstrates some 
+functionality and pare it down to its minimum.  Add some comment lines to 
+describe what it is that you're doing along the way.  Place this ``.py`` file 
+in the ``source/cookbook/`` directory, and then link to it explicitly in one 
+of the relevant ``.rst`` files in that directory (e.g. ``complex_plots.rst``, 
+``cosmological_analysis.rst``, etc.), and add some description of what the script 
+actually does.  We recommend that you use one of the 
+`sample data sets <http://yt-project.org/data>`_ in your recipe.  When the full
+docs are built, each of the cookbook recipes are executed dynamically on 
+a system which has access to all of the sample datasets.  Any output images 
+generated by your script will then be attached inline in the built documentation 
+directly following your script.
 
-For more information on how to build the documentation to make sure it looks
-the way you expect it to after modifying it, see :ref:`docs_build`.
+After you have made your modifications to the docs, you will want to make sure
+that they render the way you expect them to render.  For more information on
+this, see the section on :ref:`docs_build`.  Unless you're contributing cookbook
+recipes or notebooks which require a dynamical build, you can probably get 
+away with just doing a 'quick' docs build.
+
+When you have completed your documentation additions, commit your changes 
+to your repository and make a pull request in the same way you would contribute 
+a change to the codebase, as described in the section on :ref:`sharing-changes`.
 
 How To Get The Source Code For Editing
 --------------------------------------

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 doc/source/examining/Loading_Generic_Particle_Data.ipynb
--- /dev/null
+++ b/doc/source/examining/Loading_Generic_Particle_Data.ipynb
@@ -0,0 +1,156 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:6da8ec00f414307f27544fbdbc6b4fa476e5e96809003426279b2a1c898b4546"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This example creates a fake in-memory particle dataset and then loads it as a yt dataset using the `load_particles` function.\n",
+      "\n",
+      "Our \"fake\" dataset will be numpy arrays filled with normally distributed randoml particle positions and uniform particle masses.  Since real data is often scaled, I arbitrarily multiply by 1e6 to show how to deal with scaled data."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import numpy as np\n",
+      "\n",
+      "n_particles = 5e6\n",
+      "\n",
+      "ppx, ppy, ppz = 1e6*np.random.normal(size=[3, n_particles])\n",
+      "\n",
+      "ppm = np.ones(n_particles)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The `load_particles` function accepts a dictionary populated with particle data fields loaded in memory as numpy arrays or python lists:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "data = {'particle_position_x': ppx,\n",
+      "        'particle_position_y': ppy,\n",
+      "        'particle_position_z': ppz,\n",
+      "        'particle_mass': ppm}"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To hook up with yt's internal field system, the dictionary keys must be 'particle_position_x', 'particle_position_y', 'particle_position_z', and 'particle_mass', as well as any other particle field provided by one of the particle frontends."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The `load_particles` function transforms the `data` dictionary into an in-memory yt `Dataset` object, providing an interface for further analysis with `yt`. The example below illustrates how to load the data dictionary we created above."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import yt\n",
+      "from yt.units import parsec, Msun\n",
+      "\n",
+      "bbox = 1.1*np.array([[min(ppx), max(ppx)], [min(ppy), max(ppy)], [min(ppy), max(ppy)]])\n",
+      "\n",
+      "ds = yt.load_particles(data, length_unit=parsec, mass_unit=1e8*Msun, n_ref=256, bbox=bbox)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The `length_unit` and `mass_unit` are the conversion from the units used in the `data` dictionary to CGS.  I've arbitrarily chosen one parsec and 10^8 Msun for this example. \n",
+      "\n",
+      "The `n_ref` parameter controls how many particle it takes to accumulate in an oct-tree cell to trigger refinement.  Larger `n_ref` will decrease poisson noise at the cost of resolution in the octree.  \n",
+      "\n",
+      "Finally, the `bbox` parameter is a bounding box in the units of the dataset that contains all of the particles.  This is used to set the size of the base octree block."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This new dataset acts like any other `yt` `Dataset` object, and can be used to create data objects and query for yt fields.  This example shows how to access \"deposit\" fields:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ad = ds.all_data()\n",
+      "\n",
+      "# This is generated with \"cloud-in-cell\" interpolation.\n",
+      "cic_density = ad[\"deposit\", \"all_cic\"]\n",
+      "\n",
+      "# These three are based on nearest-neighbor cell deposition\n",
+      "nn_density = ad[\"deposit\", \"all_density\"]\n",
+      "nn_deposited_mass = ad[\"deposit\", \"all_mass\"]\n",
+      "particle_count_per_cell = ad[\"deposit\", \"all_count\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds.field_list"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds.derived_field_list"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = yt.SlicePlot(ds, 2, ('deposit', 'all_cic'))\n",
+      "slc.set_width((8, 'Mpc'))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -898,3 +898,4 @@
 Generic Particle Data
 ---------------------
 
+.. notebook:: Loading_Generic_Particle_Data.ipynb

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 doc/source/reference/changelog.rst
--- a/doc/source/reference/changelog.rst
+++ b/doc/source/reference/changelog.rst
@@ -348,7 +348,7 @@
    there
  * WebGL interface for isocontours and a pannable map widget added to Reason
  * Performance improvements for volume rendering
- * Adaptive HEALPix support (see :ref:`adaptive_healpix_volume_rendering`)
+ * Adaptive HEALPix support
  * Column density calculations (see :ref:`radial-column-density`)
  * Massive speedup for 1D profiles
  * Lots more, bug fixes etc.

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -93,5 +93,5 @@
   uploading AMRSurface objects.
 * ``suppressStreamLogging`` (default: ``'False'``): If true, execution mode will be
   quiet.
-* ``stdoutStreamLogging`` (default: ``'False'``): If three, logging is directed
+* ``stdoutStreamLogging`` (default: ``'False'``): If true, logging is directed
   to stdout rather than stderr

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -120,7 +120,7 @@
 .. python-script::
    
    from yt.mods import *
-   data_pf = load('Enzo_64/RD0006/RD0006')
+   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
    halos_pf = load('rockstar_halos/halos_0.0.bin')
 
    hc = HaloCatalog(halos_pf=halos_pf)

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 doc/source/visualizing/manual_plotting.rst
--- a/doc/source/visualizing/manual_plotting.rst
+++ b/doc/source/visualizing/manual_plotting.rst
@@ -16,7 +16,7 @@
 packages.
 
 Note that the index object associated with your snapshot file contains a
-list of plots you've made in ``pf.h.plots``.
+list of plots you've made in ``ds.plots``.
 
 .. _fixed-resolution-buffers:
 

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -466,3 +466,90 @@
 your homogenized volume to then be passed in to the camera. A sample usage is shown
 in :ref:`cookbook-amrkdtree_downsampling`.
 
+Hardware Volume Rendering on NVidia Graphics cards
+--------------------------------------------------
+.. versionadded:: 3.0
+
+Theia is a hardware volume renderer that takes advantage of NVidias CUDA language
+to peform ray casting with GPUs instead of the CPU. 
+
+Only unigrid rendering is supported, but yt provides a grid mapping function
+ to get unigrid data from amr or sph formats : 
+    :ref:`cookbook-amrkdtree_to_uniformgrid`.
+
+System Requirements
+-------------------
+.. versionadded:: 3.0
+
+Nvidia graphics card - The memory limit of the graphics card sets the limit
+                       on the size of the data source.
+
+CUDA 5 or later and
+
+The environment variable CUDA_SAMPLES must be set pointing to
+the common/inc samples shipped with CUDA. The following shows an example
+in bash with CUDA 5.5 installed in /usr/local :
+
+export CUDA_SAMPLES=/usr/local/cuda-5.5/samples/common/inc
+
+PyCUDA must also be installed to use Theia. 
+
+PyCUDA can be installed following these instructions :
+
+    git clone --recursive http://git.tiker.net/trees/pycuda.git
+
+    python configure.py
+    python setup.py install
+
+
+Tutorial
+--------
+.. versionadded:: 3.0
+
+Currently rendering only works on uniform grids. Here is an example
+on a 1024 cube of float32 scalars.
+
+.. code-block:: python
+   from yt.visualization.volume_rendering.theia.scene import TheiaScene
+   from yt.visualization.volume_rendering.algorithms.front_to_back import FrontToBackRaycaster
+   import numpy as np
+
+   #load 3D numpy array of float32
+   volume = np.load("/home/bogert/log_densities_1024.npy")
+
+   scene = TheiaScene( volume = volume, raycaster = FrontToBackRaycaster() )
+
+   scene.camera.rotateX(1.0)
+   scene.update()
+
+   surface = scene.get_results()
+   #surface now contains an image array 2x2 int32 rbga values
+
+.. _the-theiascene-interface:
+
+The TheiaScene Interface
+--------------------
+.. versionadded:: 3.0
+
+A TheiaScene object has been created to provide a high level entry point for
+controlling the raycaster's view onto the data. The class  
+:class:`~yt.visualization.volume_rendering.theia.TheiaScene` encapsulates
+ a Camera object and a TheiaSource that intern encapsulates
+a volume. The :class:`~yt.visualization.volume_rendering.theia.Camera`
+provides controls for rotating, translating, and zooming into the volume.
+Using the :class:`~yt.visualization.volume_rendering.theia.TheiaSource`
+automatically transfers the volume to the graphic's card texture memory.
+
+Example Cookbooks
+---------------
+
+OpenGL Example for interactive volume rendering:
+:ref:`cookbook-opengl_volume_rendering`.
+
+OpenGL Stereoscopic Example :
+.. warning::  Frame rate will suffer significantly from stereoscopic rendering.
+              ~2x slower since the volume must be rendered twice.
+:ref:`cookbook-opengl_stereo_volume_rendering`.
+
+Pseudo-Realtime video rendering with ffmpeg :
+:ref:`cookbook-ffmpeg_volume_rendering`.

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -81,13 +81,13 @@
     dpf = halo.halo_catalog.data_pf
     hpf = halo.halo_catalog.halos_pf
     center = dpf.arr([halo.quantities["particle_position_%s" % axis] \
-                      for axis in "xyz"]) / dpf.length_unit
-    radius = factor * halo.quantities[radius_field] / dpf.length_unit
+                      for axis in "xyz"])
+    radius = factor * halo.quantities[radius_field]
     if radius <= 0.0:
         halo.data_object = None
         return
     try:
-        sphere = dpf.sphere(center, (radius, "code_length"))
+        sphere = dpf.sphere(center, radius)
     except YTSphereTooSmall:
         halo.data_object = None
         return

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -43,6 +43,10 @@
         collection is instantiated.
         Default : None (will default to the fields 'particle_position_x',
         'particle_position_y', 'particle_position_z')
+    suppress_logging : boolean
+        Suppress yt's logging when iterating over the simulation time
+        series.
+        Default : False
 
     Examples
     ________
@@ -59,7 +63,7 @@
     >>> for t in trajs :
     >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
     """
-    def __init__(self, outputs, indices, fields=None) :
+    def __init__(self, outputs, indices, fields=None, suppress_logging=False):
 
         indices.sort() # Just in case the caller wasn't careful
         self.field_data = YTFieldData()
@@ -74,6 +78,7 @@
         self.num_indices = len(indices)
         self.num_steps = len(outputs)
         self.times = []
+        self.suppress_logging = suppress_logging
 
         # Default fields 
         
@@ -83,8 +88,9 @@
         fields.append("particle_position_z")
         fields = list(OrderedDict.fromkeys(fields))
 
-        old_level = int(ytcfg.get("yt","loglevel"))
-        mylog.setLevel(40)
+        if self.suppress_logging:
+            old_level = int(ytcfg.get("yt","loglevel"))
+            mylog.setLevel(40)
         my_storage = {}
         pbar = get_pbar("Constructing trajectory information", len(self.data_series))
         for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
@@ -101,7 +107,8 @@
             pbar.update(i)
         pbar.finish()
 
-        mylog.setLevel(old_level)
+        if self.suppress_logging:
+            mylog.setLevel(old_level)
 
         times = []
         for fn, time in sorted(my_storage.items()):
@@ -191,14 +198,16 @@
         with shape (num_indices, num_steps)
         """
         if not self.field_data.has_key(field):
-            old_level = int(ytcfg.get("yt","loglevel"))
-            mylog.setLevel(40)
+            if self.suppress_logging:
+                old_level = int(ytcfg.get("yt","loglevel"))
+                mylog.setLevel(40)
             dd_first = self.data_series[0].all_data()
             fd = dd_first._determine_fields(field)[0]
             if field not in self.particle_fields:
                 if self.data_series[0].field_info[fd].particle_type:
                     self.particle_fields.append(field)
-            particles = np.empty((self.num_indices,self.num_steps)) * np.nan
+            particles = np.empty((self.num_indices,self.num_steps))
+            particles[:] = np.nan
             step = int(0)
             pbar = get_pbar("Generating field %s in trajectories." % (field), self.num_steps)
             my_storage={}
@@ -232,7 +241,8 @@
             for i, (fn, (indices, pfield)) in enumerate(sorted(my_storage.items())):
                 particles[indices,i] = pfield
             self.field_data[field] = array_like_field(dd_first, particles, fd)
-            mylog.setLevel(old_level)
+            if self.suppress_logging:
+                mylog.setLevel(old_level)
         return self.field_data[field]
 
     def trajectory_from_index(self, index):

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -17,14 +17,6 @@
 from yt.visualization.volume_rendering.camera import off_axis_projection
 from yt.funcs import get_pbar
 
-def create_intensity(vmin, vmax, ifield):
-    def _intensity(field, data):
-        idxs = (data["v_los"] >= vmin) & (data["v_los"] < vmax)
-        f = np.zeros(data[ifield].shape)
-        f[idxs] = data[ifield][idxs]
-        return f
-    return _intensity
-
 def create_vlos(z_hat):
     def _v_los(field, data):
         vz = data["velocity_x"]*z_hat[0] + \
@@ -90,9 +82,11 @@
             self.v_bnd = -vmax, vmax
         else:
             self.v_bnd = (ds.quan(velocity_bounds[0], velocity_bounds[2]),
-                     ds.quan(velocity_bounds[1], velocity_bounds[2]))
+                          ds.quan(velocity_bounds[1], velocity_bounds[2]))
 
-        vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv+1)
+        self.vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv+1)
+        self.vmid = 0.5*(self.vbins[1:]+self.vbins[:-1])
+        self.dv = (self.v_bnd[1]-self.v_bnd[0])/self.nv
 
         _vlos = create_vlos(orient.unit_vectors[2])
         ds.field_info.add_field(("gas","v_los"), function=_vlos, units="cm/s")
@@ -100,11 +94,8 @@
         self.data = ds.arr(np.zeros((self.nx,self.ny,self.nv)), self.field_units)
         pbar = get_pbar("Generating cube.", self.nv)
         for i in xrange(self.nv):
-            v1 = vbins[i]
-            v2 = vbins[i+1]
-            _intensity = create_intensity(v1, v2, field)
-            ds.field_info.add_field(("gas","intensity"),
-                                    function=_intensity, units=self.field_units)
+            _intensity = self._create_intensity(i)
+            ds.add_field(("gas","intensity"), function=_intensity, units=self.field_units)
             prj = off_axis_projection(ds, ds.domain_center, normal, width,
                                       (self.nx, self.ny), "intensity")
             self.data[:,:,i] = prj[:,:]
@@ -145,7 +136,7 @@
 
         dx = length_unit[0]/self.nx
         dy = length_unit[0]/self.ny
-        dv = (self.v_bnd[1]-self.v_bnd[0]).in_units("m/s").value/self.nv
+        dv = self.dv.in_units("m/s").value
 
         if length_unit[1] == "deg":
             dx *= -1.
@@ -162,3 +153,11 @@
         fib[0].header["btype"] = self.field
 
         fib.writeto(filename, clobber=clobber)
+
+    def _create_intensity(self, i):
+        def _intensity(field, data):
+            w = np.abs(data["v_los"]-self.vmid[i])/self.dv
+            w = 1.-w
+            w[w < 0.0] = 0.0
+            return data[self.field]*w
+        return _intensity

diff -r 9de86ea05d7828cde7231e5168c83872593715e5 -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -36,35 +36,30 @@
     pass
 
 vlist = "xyz"
-def setup_sunyaev_zeldovich_fields(registry, ftype = "gas", slice_info = None):
+def setup_sunyaev_zeldovich_fields(ds):
     def _t_squared(field, data):
         return data["gas","density"]*data["gas","kT"]*data["gas","kT"]
-    registry.add_field(("gas", "t_squared"),
-                       function = _t_squared,
-                       units="g*keV**2/cm**3")
+    ds.add_field(("gas", "t_squared"), function = _t_squared,
+                 units="g*keV**2/cm**3")
     def _beta_perp_squared(field, data):
         return data["gas","density"]*data["gas","velocity_magnitude"]**2/clight/clight - data["gas","beta_par_squared"]
-    registry.add_field(("gas","beta_perp_squared"), 
-                       function = _beta_perp_squared,
-                       units="g/cm**3")
+    ds.add_field(("gas","beta_perp_squared"), function = _beta_perp_squared,
+                 units="g/cm**3")
 
     def _beta_par_squared(field, data):
         return data["gas","beta_par"]**2/data["gas","density"]
-    registry.add_field(("gas","beta_par_squared"),
-                       function = _beta_par_squared,
-                       units="g/cm**3")
+    ds.add_field(("gas","beta_par_squared"), function = _beta_par_squared,
+                 units="g/cm**3")
 
     def _t_beta_par(field, data):
         return data["gas","kT"]*data["gas","beta_par"]
-    registry.add_field(("gas","t_beta_par"),
-                       function = _t_beta_par,
-                       units="keV*g/cm**3")
+    ds.add_field(("gas","t_beta_par"), function = _t_beta_par,
+                 units="keV*g/cm**3")
 
     def _t_sz(field, data):
         return data["gas","density"]*data["gas","kT"]
-    registry.add_field(("gas","t_sz"),
-                       function = _t_sz,
-                       units="keV*g/cm**3")
+    ds.add_field(("gas","t_sz"), function = _t_sz,
+                 units="keV*g/cm**3")
 
 def generate_beta_par(L):
     def _beta_par(field, data):
@@ -79,8 +74,8 @@
 
     Parameters
     ----------
-    pf : parameter_file
-        The parameter file.
+    ds : Dataset
+        The dataset
     freqs : array_like
         The frequencies (in GHz) at which to compute the SZ spectral distortion.
     mue : float, optional
@@ -91,15 +86,15 @@
     Examples
     --------
     >>> freqs = [90., 180., 240.]
-    >>> szprj = SZProjection(pf, freqs, high_order=True)
+    >>> szprj = SZProjection(ds, freqs, high_order=True)
     """
-    def __init__(self, pf, freqs, mue=1.143, high_order=False):
+    def __init__(self, ds, freqs, mue=1.143, high_order=False):
 
-        self.pf = pf
-        pf.field_info.load_plugin(setup_sunyaev_zeldovich_fields)
+        self.ds = ds
+        setup_sunyaev_zeldovich_fields(self.ds)
         self.num_freqs = len(freqs)
         self.high_order = high_order
-        self.freqs = pf.arr(freqs, "GHz")
+        self.freqs = ds.arr(freqs, "GHz")
         self.mueinv = 1./mue
         self.xinit = hcgs*self.freqs.in_units("Hz")/(kboltz*Tcmb)
         self.freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
@@ -132,12 +127,12 @@
         --------
         >>> szprj.on_axis("y", center="max", width=(1.0, "Mpc"), source=my_sphere)
         """
-        axis = fix_axis(axis, self.pf)
+        axis = fix_axis(axis, self.ds)
 
         if center == "c":
-            ctr = self.pf.domain_center
+            ctr = self.ds.domain_center
         elif center == "max":
-            v, ctr = self.pf.h.find_max("density")
+            v, ctr = self.ds.h.find_max("density")
         else:
             ctr = center
 
@@ -145,8 +140,8 @@
         L[axis] = 1.0
 
         beta_par = generate_beta_par(L)
-        self.pf.field_info.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
-        proj = self.pf.h.proj("density", axis, center=ctr, data_source=source)
+        self.ds.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
+        proj = self.ds.proj("density", axis, center=ctr, data_source=source)
         frb = proj.to_frb(width, nx)
         dens = frb["density"]
         Te = frb["t_sz"]/dens
@@ -171,7 +166,7 @@
                                 np.array(omega1), np.array(sigma1),
                                 np.array(kappa1), np.array(bperp2))
 
-        self.pf.field_info.pop(("gas","beta_par"))
+        self.ds.field_info.pop(("gas","beta_par"))
 
     def off_axis(self, L, center="c", width=(1, "unitary"), nx=800, source=None):
         r""" Make an off-axis projection of the SZ signal.
@@ -196,15 +191,15 @@
         >>> szprj.off_axis(L, center="c", width=(2.0, "Mpc"))
         """
         if iterable(width):
-            w = self.pf.quan(width[0], width[1]).in_units("code_length").value
+            w = self.ds.quan(width[0], width[1]).in_units("code_length").value
         elif isinstance(width, YTQuantity):
             w = width.in_units("code_length").value
         else:
             w = width
         if center == "c":
-            ctr = self.pf.domain_center
+            ctr = self.ds.domain_center
         elif center == "max":
-            v, ctr = self.pf.h.find_max("density")
+            v, ctr = self.ds.h.find_max("density")
         else:
             ctr = center
 
@@ -213,18 +208,18 @@
             raise NotImplementedError
 
         beta_par = generate_beta_par(L)
-        self.pf.field_info.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
+        self.ds.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
 
-        dens    = off_axis_projection(self.pf, ctr, L, w, nx, "density")
-        Te      = off_axis_projection(self.pf, ctr, L, w, nx, "t_sz")/dens
-        bpar    = off_axis_projection(self.pf, ctr, L, w, nx, "beta_par")/dens
-        omega1  = off_axis_projection(self.pf, ctr, L, w, nx, "t_squared")/dens
+        dens    = off_axis_projection(self.ds, ctr, L, w, nx, "density")
+        Te      = off_axis_projection(self.ds, ctr, L, w, nx, "t_sz")/dens
+        bpar    = off_axis_projection(self.ds, ctr, L, w, nx, "beta_par")/dens
+        omega1  = off_axis_projection(self.ds, ctr, L, w, nx, "t_squared")/dens
         omega1  = omega1/(Te*Te) - 1.
         if self.high_order:
-            bperp2  = off_axis_projection(self.pf, ctr, L, w, nx, "beta_perp_squared")/dens
-            sigma1  = off_axis_projection(self.pf, ctr, L, w, nx, "t_beta_par")/dens
+            bperp2  = off_axis_projection(self.ds, ctr, L, w, nx, "beta_perp_squared")/dens
+            sigma1  = off_axis_projection(self.ds, ctr, L, w, nx, "t_beta_par")/dens
             sigma1  = sigma1/Te - bpar
-            kappa1  = off_axis_projection(self.pf, ctr, L, w, nx, "beta_par_squared")/dens
+            kappa1  = off_axis_projection(self.ds, ctr, L, w, nx, "beta_par_squared")/dens
             kappa1 -= bpar
         else:
             bperp2 = np.zeros((nx,nx))
@@ -241,7 +236,7 @@
                                 np.array(omega1), np.array(sigma1),
                                 np.array(kappa1), np.array(bperp2))
 
-        self.pf.field_info.pop(("gas","beta_par"))
+        self.ds.field_info.pop(("gas","beta_par"))
 
     def _compute_intensity(self, tau, Te, bpar, omega1, sigma1, kappa1, bperp2):
 
@@ -278,8 +273,8 @@
 
         for i, field in enumerate(self.freq_fields):
             self.data[field] = I0*self.xinit[i]**3*signal[i,:,:]
-        self.data["Tau"] = self.pf.arr(tau, "dimensionless")
-        self.data["TeSZ"] = self.pf.arr(Te, "keV")
+        self.data["Tau"] = self.ds.arr(tau, "dimensionless")
+        self.data["TeSZ"] = self.ds.arr(Te, "keV")
 
     @parallel_root_only
     def write_fits(self, filename, units="kpc", sky_center=None, sky_scale=None,
@@ -327,7 +322,7 @@
         fib = FITSImageBuffer(self.data, fields=self.data.keys(),
                               center=center, units=units,
                               scale=deltas)
-        fib.update_all_headers("Time", float(self.pf.current_time.in_units(time_units).value))
+        fib.update_all_headers("Time", float(self.ds.current_time.in_units(time_units).value))
         fib.writeto(filename, clobber=clobber)
         
     @parallel_root_only

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/82a49cc6ef5b/
Changeset:   82a49cc6ef5b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-17 15:10:45
Summary:     Reverting RAMSES changes
Affected #:  2 files

diff -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 -r 82a49cc6ef5be4a1b7a4fe88e68d792003aa4081 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -486,7 +486,6 @@
         # Note that unit_l *already* converts to proper!
         # Also note that unit_l must be multiplied by the boxlen parameter to
         # ensure we are correctly set up for the current domain.
-        import yt.units.dimensions as dimensions
         length_unit = self.parameters['unit_l'] * self.parameters['boxlen']
         rho_u = self.parameters['unit_d']
         # We're not multiplying by the boxlength here.
@@ -500,11 +499,6 @@
         self.mass_unit = self.quan(mass_unit, "g")
         self.time_unit = self.quan(time_unit, "s")
         self.velocity_unit = self.length_unit / self.time_unit
-        self.unit_registry.add('code_density', 1.0,
-            dimensions.mass / dimensions.length**3)
-        self.density_unit = self.quan(rho_u * self.parameters["boxlen"]**3,
-                                      "g/cm**3")
-        self.unit_registry.modify("code_density", self.density_unit)
 
     def _parse_parameter_file(self):
         # hardcoded for now

diff -r c0411c147925ef3beae06c5b85d6f618ec1cdd52 -r 82a49cc6ef5be4a1b7a4fe88e68d792003aa4081 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -32,7 +32,7 @@
 
 b_units = "code_magnetic"
 ra_units = "code_length / code_time**2"
-rho_units = "code_density"
+rho_units = "code_mass / code_length**3"
 vel_units = "code_length / code_time"
 
 known_species_masses = dict(


https://bitbucket.org/yt_analysis/yt/commits/d116c37222b2/
Changeset:   d116c37222b2
Branch:      yt-3.0
User:        samskillman
Date:        2014-06-19 19:51:09
Summary:     Remove need to ask for xyz explicitly for bounding box iters. Will still return xyz data in any call
Affected #:  2 files

diff -r 3ba4d54247ada6e9e274a4bff8ab4fede6e9bb8a -r d116c37222b2d3da203f9c5c791cd8d4a617c03f yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -81,6 +81,7 @@
     def _initialize_index(self, data_file, regions):
         x, y, z = (self._handle[ax] for ax in 'xyz')
         pcount = x.size
+
         morton = np.empty(pcount, dtype='uint64')
         ind = 0
         while ind < pcount:
@@ -108,6 +109,14 @@
         fields.append(("dark_matter", "mass"))
         return fields, {}
 
+    def _count_particles(self, data_file):
+        pcount = self._handle['x'].size
+        if (pcount > 1e9):
+            mylog.warn("About to load %i particles into memory. " % (pcount) +
+                       "You may want to consider a midx-enabled load")
+        return {'dark_matter': pcount}
+
+
 class IOHandlerHTTPSDF(IOHandlerSDF):
     _dataset_type = "http_sdf_particles"
 
@@ -178,7 +187,7 @@
     def _read_particle_fields(self, chunks, ptf, selector):
         dle = self.pf.domain_left_edge.in_units("code_length").d
         dre = self.pf.domain_right_edge.in_units("code_length").d
-        required_fields = ['x','y','z']
+        required_fields = []
         for ptype, field_list in sorted(ptf.items()):
             for field in field_list:
                 if field == "mass": continue
@@ -209,7 +218,7 @@
         pcount = 0
         for dd in self.pf.sindex.iter_bbox_data(
             dle, dre,
-            ['x','y','z']):
+            ['x']):
             pcount += dd['x'].size
 
         morton = np.empty(pcount, dtype='uint64')
@@ -241,10 +250,15 @@
     def _count_particles(self, data_file):
         dle = self.pf.domain_left_edge.in_units("code_length").d
         dre = self.pf.domain_right_edge.in_units("code_length").d
+        pcount_estimate = self.pf.sindex.get_nparticles_bbox(dle, dre)
+        if pcount_estimate > 1e9:
+            mylog.warning("Filtering %i particles to find total."
+                          % pcount_estimate + \
+                          " You may want to reconsider your bounding box.")
         pcount = 0
         for dd in self.pf.sindex.iter_bbox_data(
             dle, dre,
-            ['x','y','z']):
+            ['x']):
             pcount += dd['x'].size
         return {'dark_matter': pcount}
 

diff -r 3ba4d54247ada6e9e274a4bff8ab4fede6e9bb8a -r d116c37222b2d3da203f9c5c791cd8d4a617c03f yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -85,6 +85,10 @@
 
     return myfilter
 
+def ensure_xzy_fields(fields):
+    for f in 'xyz':
+        if f not in fields:
+            fields.append(f)
 
 class DataStruct(object):
     """docstring for DataStruct"""
@@ -763,6 +767,7 @@
                 yield f, data[f][mask]
 
     def iter_bbox_data(self, left, right, fields):
+        ensure_xzy_fields(fields)
         mylog.debug('SINDEX Loading region from %s to %s' %(left, right))
         inds = self.get_bbox(left, right)
         # Need to put left/right in float32 to avoid fp roundoff errors
@@ -782,6 +787,7 @@
         #    yield dd
 
     def iter_sphere_data(self, center, radius, fields):
+        ensure_xzy_fields(fields)
         mylog.debug('SINDEX Loading spherical region %s to %s' %(center, radius))
         inds = self.get_bbox(center-radius, center+radius)
 
@@ -918,6 +924,7 @@
 
         """
 
+        ensure_xzy_fields(fields)
         bbox = self.get_cell_bbox(level, cell_iarr)
         filter_left = bbox[:, 0] - pad
         filter_right = bbox[:, 1] + pad
@@ -1001,6 +1008,7 @@
                                              8.0, ['x','y','z','ident'])
 
         """
+        ensure_xzy_fields(fields)
         bbox = self.get_cell_bbox(level, cell_iarr)
         filter_left = bbox[:, 0] - pad
         filter_right = bbox[:, 1] + pad


https://bitbucket.org/yt_analysis/yt/commits/eb96049ecbee/
Changeset:   eb96049ecbee
Branch:      yt-3.0
User:        samskillman
Date:        2014-06-19 19:52:48
Summary:     Merging
Affected #:  77 files

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -41,6 +41,7 @@
 yt/utilities/lib/PointsInVolume.c
 yt/utilities/lib/QuadTree.c
 yt/utilities/lib/RayIntegrators.c
+yt/utilities/lib/ragged_arrays.c
 yt/utilities/lib/VolumeIntegrator.c
 yt/utilities/lib/grid_traversal.c
 yt/utilities/lib/GridTree.c

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 doc/README
--- a/doc/README
+++ b/doc/README
@@ -5,6 +5,6 @@
 http://sphinx.pocoo.org/
 
 Because the documentation requires a number of dependencies, we provide
-pre-build versions online, accessible here:
+pre-built versions online, accessible here:
 
-http://yt-project.org/docs/
+http://yt-project.org/docs/dev-3.0/

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- a/doc/source/analyzing/analysis_modules/PPVCube.ipynb
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:3a720e0a18272564522f9fc23553431908d6f2b4f3e3e7dfe5b3e690e2e37677"
+  "signature": "sha256:3f810954006851303837edb8fd85ee6583a883122b0f4867903562546c4f19d2"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -16,6 +16,18 @@
      ]
     },
     {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%matplotlib inline\n",
+      "from yt.mods import *\n",
+      "from yt.analysis_modules.api import PPVCube"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
@@ -44,30 +56,40 @@
      ]
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "First, we'll set up the grid and the parameters of the profiles:"
+     ]
+    },
+    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "%matplotlib inline\n",
-      "from yt.mods import *\n",
-      "from yt.analysis_modules.api import PPVCube"
+      "nx,ny,nz = (256,256,256) # domain dimensions\n",
+      "R = 10. # outer radius of disk, kpc\n",
+      "r_0 = 3. # scale radius, kpc\n",
+      "beta = 1.4 # for the tangential velocity profile\n",
+      "alpha = -1. # for the radial density profile\n",
+      "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates of x-y plane of disk\n",
+      "r = np.sqrt(x*x+y*y) # polar coordinates\n",
+      "theta = np.arctan2(y, x) # polar coordinates"
      ],
      "language": "python",
      "metadata": {},
      "outputs": []
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Second, we'll construct the data arrays for the density and the velocity of the disk. Since we have the tangential velocity profile, we have to use the polar coordinates we derived earlier to compute `velx` and `vely`. Everywhere outside the disk, all fields are set to zero.  "
+     ]
+    },
+    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "data = {}\n",
-      "nx,ny,nz = (256,256,256)\n",
-      "R = 10. # kpc\n",
-      "r_0 = 3. # kpc\n",
-      "beta = 1.4\n",
-      "alpha = -1.\n",
-      "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates\n",
-      "r = np.sqrt(x*x+y*y) # polar coordinates\n",
-      "theta = np.arctan2(y, x) # polar coordinates\n",
       "dens = np.zeros((nx,ny,nz))\n",
       "dens[:,:,nz/2-3:nz/2+3] = (r**alpha).reshape(nx,ny,1) # the density profile of the disk\n",
       "vel_theta = r/(1.+(r/r_0)**beta) # the azimuthal velocity profile of the disk\n",
@@ -75,11 +97,31 @@
       "vely = np.zeros((nx,ny,nz))\n",
       "velx[:,:,nz/2-3:nz/2+3] = (-vel_theta*np.sin(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
       "vely[:,:,nz/2-3:nz/2+3] = (vel_theta*np.cos(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
+      "dens[r > R] = 0.0\n",
+      "velx[r > R] = 0.0\n",
+      "vely[r > R] = 0.0"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally, we'll package these data arrays up into a dictionary, which will then be shipped off to `load_uniform_grid`. We'll define the width of the grid to be `2*R` kpc, which will be equal to 1  `code_length`. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "data = {}\n",
       "data[\"density\"] = (dens,\"g/cm**3\")\n",
       "data[\"velocity_x\"] = (velx, \"km/s\")\n",
       "data[\"velocity_y\"] = (vely, \"km/s\")\n",
       "data[\"velocity_z\"] = (np.zeros((nx,ny,nz)), \"km/s\") # zero velocity in the z-direction\n",
-      "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])\n",
+      "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) # bbox of width 1 on a side with center (0,0,0)\n",
       "ds = load_uniform_grid(data, (nx,ny,nz), length_unit=(2*R,\"kpc\"), nprocs=1, bbox=bbox)"
      ],
      "language": "python",
@@ -146,7 +188,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "cube = PPVCube(ds, L, \"density\", dims=(200,100,50), velocity_bounds=(-0.5,0.5,\"km/s\"))"
+      "cube = PPVCube(ds, L, \"density\", dims=(200,100,50), velocity_bounds=(-1.5,1.5,\"km/s\"))"
      ],
      "language": "python",
      "metadata": {},
@@ -180,8 +222,18 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(\"cube.fits\")\n",
-      "slc = SlicePlot(ds, \"z\", [\"density\"], center=\"c\") # sliced at the center of the domain\n",
+      "pf = load(\"cube.fits\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Specifying no center gives us the center slice\n",
+      "slc = SlicePlot(pf, \"z\", [\"density\"])\n",
       "slc.show()"
      ],
      "language": "python",
@@ -192,19 +244,11 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "# To figure out what the domain center and width is in pixel (code length) units:\n",
-      "print ds.domain_center\n",
-      "print ds.domain_width"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "slc = SlicePlot(ds, \"z\", [\"density\"], center=[100.5,50.5,-250.0]) # \"z\" slice is in m/s\n",
+      "import yt.units as u\n",
+      "# Picking different velocities for the slices\n",
+      "new_center = pf.domain_center\n",
+      "new_center[2] = pf.spec2pixel(-1.0*u.km/u.s)\n",
+      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -215,7 +259,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "slc = SlicePlot(ds, \"z\", [\"density\"], center=[100.5,50.5,300.0])\n",
+      "new_center[2] = pf.spec2pixel(0.7*u.km/u.s)\n",
+      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -225,7 +270,31 @@
     {
      "cell_type": "code",
      "collapsed": false,
-     "input": [],
+     "input": [
+      "new_center[2] = pf.spec2pixel(-0.3*u.km/u.s)\n",
+      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If we project all the emission at all the different velocities along the z-axis, we recover the entire disk:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj = ProjectionPlot(pf, \"z\", [\"density\"], proj_style=\"sum\")\n",
+      "prj.set_log(\"density\", True)\n",
+      "prj.set_zlim(\"density\", 1.0e-3, 0.2)\n",
+      "prj.show()"
+     ],
      "language": "python",
      "metadata": {},
      "outputs": []

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 doc/source/analyzing/analysis_modules/SZ_projections.ipynb
--- a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
+++ b/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:e5d3c629592c8aacbabf2e3fab2660703298886b8de6f36eb7cdc1f60b726496"
+  "signature": "sha256:7fc053480ba7896bfa5905bd69f7b3dd326364fbab324975b76f79640f2e0adf"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -18,7 +18,7 @@
       "projection of the pressure field of a cluster. However, the *full* S-Z signal is a combination of thermal and kinetic\n",
       "contributions, and for large frequencies and high temperatures\n",
       "relativistic effects are important. For computing the full S-Z signal\n",
-      "incorporating all of these effects, Jens Chluba has written a library:\n",
+      "incorporating all of these effects, there is a library:\n",
       "SZpack ([Chluba et al 2012](http://adsabs.harvard.edu/abs/2012MNRAS.426..510C)). \n",
       "\n",
       "The `sunyaev_zeldovich` analysis module in `yt` makes it possible\n",
@@ -93,10 +93,10 @@
       "from yt.mods import *\n",
       "from yt.analysis_modules.api import SZProjection\n",
       "\n",
-      "pf = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+      "ds = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
       "\n",
       "freqs = [90.,180.,240.]\n",
-      "szprj = SZProjection(pf, freqs)"
+      "szprj = SZProjection(ds, freqs)"
      ],
      "language": "python",
      "metadata": {},
@@ -108,8 +108,8 @@
      "source": [
       "`freqs` is a list or array of frequencies in GHz at which the signal\n",
       "is to be computed. The `SZProjection` constructor also accepts the\n",
-      "optional keywords, **mue** (mean molecular weight for computing the\n",
-      "electron number density, 1.143 is the default) and **high_order** (set\n",
+      "optional keywords, `mue` (mean molecular weight for computing the\n",
+      "electron number density, 1.143 is the default) and `high_order` (set\n",
       "to True to compute terms in the S-Z signal expansion up to\n",
       "second-order in $T_{e,SZ}$ and $\\beta$). "
      ]
@@ -127,7 +127,7 @@
      "collapsed": false,
      "input": [
       "# An on-axis projection along the z-axis with width 10 Mpc, centered on the gas density maximum\n",
-      "szprj.on_axis(\"z\", center=\"max\", width=(10.0, \"mpc\"), nx=400)"
+      "szprj.on_axis(\"z\", center=\"max\", width=(10.0, \"Mpc\"), nx=400)"
      ],
      "language": "python",
      "metadata": {},
@@ -144,7 +144,7 @@
       "which can be accessed dict-like from the projection object (e.g.,\n",
       "`szprj[\"90_GHz\"]`). Projections of other quantities may also be\n",
       "accessed; to see what fields are available call `szprj.keys()`. The methods also accept standard ``yt``\n",
-      "keywords for projections such as **center**, **width**, and **source**. The image buffer size can be controlled by setting **nx**.  \n"
+      "keywords for projections such as `center`, `width`, and `source`. The image buffer size can be controlled by setting `nx`.  \n"
      ]
     },
     {
@@ -216,8 +216,16 @@
      "source": [
       "which would write all of the projections to a single FITS file,\n",
       "including coordinate information in kpc. The optional keyword\n",
-      "**clobber** allows a previous file to be overwritten. \n"
+      "`clobber` allows a previous file to be overwritten. \n"
      ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -226,4 +226,4 @@
 =======
 
 For a full example of how to use these methods together see 
-:ref:`halo_analysis_example`.
+:doc:`halo_analysis_example`.

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
--- a/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
+++ b/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
@@ -23,7 +23,8 @@
      "input": [
       "%matplotlib inline\n",
       "from yt.mods import *\n",
-      "from matplotlib import pylab"
+      "from matplotlib import pylab\n",
+      "from yt.analysis_modules.halo_finding.api import HaloFinder"
      ],
      "language": "python",
      "metadata": {},

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 doc/source/cookbook/amrkdtree_to_uniformgrid.py
--- /dev/null
+++ b/doc/source/cookbook/amrkdtree_to_uniformgrid.py
@@ -0,0 +1,33 @@
+import numpy as np
+import yt
+
+#This is an example of how to map an amr data set
+#to a uniform grid. In this case the highest
+#level of refinement is mapped into a 1024x1024x1024 cube
+
+#first the amr data is loaded
+ds = yt.load("~/pfs/galaxy/new_tests/feedback_8bz/DD0021/DD0021")
+
+#next we get the maxium refinement level
+lmax = ds.parameters['MaximumRefinementLevel']
+
+#calculate the center of the domain
+domain_center = (ds.domain_right_edge - ds.domain_left_edge)/2
+
+#determine the cellsize in the highest refinement level
+cell_size = pf.domain_width/(pf.domain_dimensions*2**lmax)
+
+#calculate the left edge of the new grid
+left_edge = domain_center - 512*cell_size
+
+#the number of cells per side of the new grid
+ncells = 1024
+
+#ask yt for the specified covering grid
+cgrid = pf.h.covering_grid(lmax, left_edge, np.array([ncells,]*3))
+
+#get a map of the density into the new grid
+density_map = cgrid["density"].astype(dtype="float32")
+
+#save the file as a numpy array for convenient future processing
+np.save("/pfs/goldbaum/galaxy/new_tests/feedback_8bz/gas_density_DD0021_log_densities.npy", density_map)

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 doc/source/cookbook/ffmpeg_volume_rendering.py
--- /dev/null
+++ b/doc/source/cookbook/ffmpeg_volume_rendering.py
@@ -0,0 +1,99 @@
+#This is an example of how to make videos of 
+#uniform grid data using Theia and ffmpeg
+
+#The Scene object to hold the ray caster and view camera
+from yt.visualization.volume_rendering.theia.scene import TheiaScene
+
+#GPU based raycasting algorithm to use 
+from yt.visualization.volume_rendering.theia.algorithms.front_to_back import FrontToBackRaycaster
+
+#These will be used to define how to color the data
+from yt.visualization.volume_rendering.transfer_functions import ColorTransferFunction
+from yt.visualization.color_maps import *
+
+#This will be used to launch ffmpeg
+import subprocess as sp
+
+#Of course we need numpy for math magic
+import numpy as np
+
+#Opacity scaling function
+def scale_func(v, mi, ma):
+      return  np.minimum(1.0, (v-mi)/(ma-mi) + 0.0)
+
+#load the uniform grid from a numpy array file
+bolshoi = "/home/bogert/log_densities_1024.npy"
+density_grid = np.load(bolshoi)
+
+#Set the TheiaScene to use the density_grid and 
+#setup the raycaster for a resulting 1080p image
+ts = TheiaScene(volume = density_grid, raycaster = FrontToBackRaycaster(size = (1920,1080) ))
+
+#the min and max values in the data to color
+mi, ma = 0.0, 3.6
+
+#setup colortransferfunction
+bins = 5000
+tf = ColorTransferFunction( (mi, ma), bins)
+tf.map_to_colormap(0.5, ma, colormap="spring", scale_func = scale_func)
+
+#pass the transfer function to the ray caster
+ts.source.raycaster.set_transfer(tf)
+
+#Initial configuration for start of video
+#set initial opacity and brightness values
+#then zoom into the center of the data 30%
+ts.source.raycaster.set_opacity(0.03)
+ts.source.raycaster.set_brightness(2.3)
+ts.camera.zoom(30.0)
+
+#path to ffmpeg executable
+FFMPEG_BIN = "/usr/local/bin/ffmpeg"
+
+pipe = sp.Popen([ FFMPEG_BIN,
+        '-y', # (optional) overwrite the output file if it already exists
+	#This must be set to rawvideo because the image is an array
+        '-f', 'rawvideo', 
+	#This must be set to rawvideo because the image is an array
+        '-vcodec','rawvideo',
+	#The size of the image array and resulting video
+        '-s', '1920x1080', 
+	#This must be rgba to match array format (uint32)
+        '-pix_fmt', 'rgba',
+	#frame rate of video
+        '-r', '29.97', 
+        #Indicate that the input to ffmpeg comes from a pipe
+        '-i', '-', 
+        # Tells FFMPEG not to expect any audio
+        '-an', 
+        #Setup video encoder
+	#Use any encoder you life available from ffmpeg
+        '-vcodec', 'libx264', '-preset', 'ultrafast', '-qp', '0',
+        '-pix_fmt', 'yuv420p',
+        #Name of the output
+        'bolshoiplanck2.mkv' ],
+        stdin=sp.PIPE,stdout=sp.PIPE)
+		
+		
+#Now we loop and produce 500 frames
+for k in range (0,500) :
+    #update the scene resulting in a new image
+    ts.update()
+
+    #get the image array from the ray caster
+    array = ts.source.get_results()
+
+    #send the image array to ffmpeg
+    array.tofile(pipe.stdin)
+
+    #rotate the scene by 0.01 rads in x,y & z
+    ts.camera.rotateX(0.01)
+    ts.camera.rotateZ(0.01)
+    ts.camera.rotateY(0.01)
+
+    #zoom in 0.01% for a total of a 5% zoom
+    ts.camera.zoom(0.01)
+
+
+#Close the pipe to ffmpeg
+pipe.terminate()

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 doc/source/cookbook/opengl_stereo_volume_rendering.py
--- /dev/null
+++ b/doc/source/cookbook/opengl_stereo_volume_rendering.py
@@ -0,0 +1,370 @@
+from OpenGL.GL import *
+from OpenGL.GLUT import *
+from OpenGL.GLU import *
+from OpenGL.GL.ARB.vertex_buffer_object import *
+
+import sys, time
+import numpy as np
+import pycuda.driver as cuda_driver
+import pycuda.gl as cuda_gl
+
+from yt.visualization.volume_rendering.theia.scene import TheiaScene
+from yt.visualization.volume_rendering.theia.algorithms.front_to_back import FrontToBackRaycaster
+from yt.visualization.volume_rendering.transfer_functions import ColorTransferFunction
+from yt.visualization.color_maps import *
+
+import numexpr as ne
+
+window = None     # Number of the glut window.
+rot_enabled = True
+
+#Theia Scene
+ts = None
+
+#RAY CASTING values
+c_tbrightness = 1.0
+c_tdensity = 0.05
+
+output_texture = None # pointer to offscreen render target
+
+leftButton = False
+middleButton = False
+rightButton = False
+
+#Screen width and height
+width = 1920
+height = 1080
+
+eyesep = 0.1
+
+(pbo, pycuda_pbo) = [None]*2
+(rpbo, rpycuda_pbo) = [None]*2
+
+#create 2 PBO for stereo scopic rendering
+def create_PBO(w, h):
+    global pbo, pycuda_pbo, rpbo, rpycuda_pbo
+    num_texels = w*h
+    array = np.zeros((num_texels, 3),np.float32)
+
+    pbo = glGenBuffers(1)
+    glBindBuffer(GL_ARRAY_BUFFER, pbo)
+    glBufferData(GL_ARRAY_BUFFER, array, GL_DYNAMIC_DRAW)
+    glBindBuffer(GL_ARRAY_BUFFER, 0)
+    pycuda_pbo = cuda_gl.RegisteredBuffer(long(pbo))
+
+    rpbo = glGenBuffers(1)
+    glBindBuffer(GL_ARRAY_BUFFER, rpbo)
+    glBufferData(GL_ARRAY_BUFFER, array, GL_DYNAMIC_DRAW)
+    glBindBuffer(GL_ARRAY_BUFFER, 0)
+    rpycuda_pbo = cuda_gl.RegisteredBuffer(long(rpbo))
+
+def destroy_PBO(self):
+    global pbo, pycuda_pbo, rpbo, rpycuda_pbo
+    glBindBuffer(GL_ARRAY_BUFFER, long(pbo))
+    glDeleteBuffers(1, long(pbo));
+    glBindBuffer(GL_ARRAY_BUFFER, 0)
+    pbo,pycuda_pbo = [None]*2
+
+    glBindBuffer(GL_ARRAY_BUFFER, long(rpbo))
+    glDeleteBuffers(1, long(rpbo));
+    glBindBuffer(GL_ARRAY_BUFFER, 0)
+    rpbo,rpycuda_pbo = [None]*2
+
+#consistent with C initPixelBuffer()
+def create_texture(w,h):
+    global output_texture
+    output_texture = glGenTextures(1)
+    glBindTexture(GL_TEXTURE_2D, output_texture)
+    # set basic parameters
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
+    # buffer data
+    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB,
+                 w, h, 0, GL_RGB, GL_FLOAT, None)
+
+#consistent with C initPixelBuffer()
+def destroy_texture():
+    global output_texture
+    glDeleteTextures(output_texture);
+    output_texture = None
+
+def init_gl(w = 512 , h = 512):
+    Width, Height = (w, h)
+
+    glClearColor(0.1, 0.1, 0.5, 1.0)
+    glDisable(GL_DEPTH_TEST)
+
+    #matrix functions
+    glViewport(0, 0, Width, Height)
+    glMatrixMode(GL_PROJECTION);
+    glLoadIdentity();
+
+    #matrix functions
+    gluPerspective(60.0, Width/float(Height), 0.1, 10.0)
+    glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
+
+def resize(Width, Height):
+    global width, height
+    (width, height) = Width, Height
+    glViewport(0, 0, Width, Height)        # Reset The Current Viewport And Perspective Transformation
+    glMatrixMode(GL_PROJECTION)
+    glLoadIdentity()
+    gluPerspective(60.0, Width/float(Height), 0.1, 10.0)
+
+
+def do_tick():
+    global time_of_last_titleupdate, frame_counter, frames_per_second
+    if ((time.clock () * 1000.0) - time_of_last_titleupdate >= 1000.):
+        frames_per_second = frame_counter                   # Save The FPS
+        frame_counter = 0  # Reset The FPS Counter
+        szTitle = "%d FPS" % (frames_per_second )
+        glutSetWindowTitle ( szTitle )
+        time_of_last_titleupdate = time.clock () * 1000.0
+    frame_counter += 1
+
+oldMousePos = [ 0, 0 ]
+def mouseButton( button, mode, x, y ):
+	"""Callback function (mouse button pressed or released).
+
+	The current and old mouse positions are stored in
+	a	global renderParam and a global list respectively"""
+
+	global leftButton, middleButton, rightButton, oldMousePos
+
+        if button == GLUT_LEFT_BUTTON:
+	    if mode == GLUT_DOWN:
+	        leftButton = True
+            else:
+		leftButton = False
+
+        if button == GLUT_MIDDLE_BUTTON:
+	    if mode == GLUT_DOWN:
+	        middleButton = True
+            else:
+		middleButton = False
+
+        if button == GLUT_RIGHT_BUTTON:
+	    if mode == GLUT_DOWN:
+	        rightButton = True
+            else:
+		rightButton = False
+
+	oldMousePos[0], oldMousePos[1] = x, y
+	glutPostRedisplay( )
+
+def mouseMotion( x, y ):
+	"""Callback function (mouse moved while button is pressed).
+
+	The current and old mouse positions are stored in
+	a	global renderParam and a global list respectively.
+	The global translation vector is updated according to
+	the movement of the mouse pointer."""
+
+	global ts, leftButton, middleButton, rightButton, oldMousePos
+	deltaX = x - oldMousePos[ 0 ]
+	deltaY = y - oldMousePos[ 1 ]
+
+	factor = 0.001
+
+	if leftButton == True:
+            ts.camera.rotateX( - deltaY * factor)
+            ts.camera.rotateY( - deltaX * factor)
+	if middleButton == True:
+	    ts.camera.translateX( deltaX* 2.0 * factor)
+	    ts.camera.translateY( - deltaY* 2.0 * factor)
+	if rightButton == True:
+	    ts.camera.scale += deltaY * factor
+
+	oldMousePos[0], oldMousePos[1] = x, y
+	glutPostRedisplay( )
+
+def keyPressed(*args):
+    global c_tbrightness, c_tdensity, eyesep
+    # If escape is pressed, kill everything.
+    if args[0] == '\033':
+        print 'Closing..'
+        destroy_PBOs()
+        destroy_texture()
+        exit()
+
+    #change the brightness of the scene
+    elif args[0] == ']':
+        c_tbrightness += 0.025
+    elif args[0] == '[':
+        c_tbrightness -= 0.025
+
+    #change the density scale
+    elif args[0] == ';':
+        c_tdensity -= 0.001
+    elif args[0] == '\'':
+        c_tdensity += 0.001 
+
+    #change the transfer scale
+    elif args[0] == '-':
+        eyesep -= 0.01
+    elif args[0] == '=':
+        eyesep += 0.01 
+
+def idle():
+    glutPostRedisplay()
+
+def display():
+    try:
+        #process left eye
+        process_image()
+        display_image()
+
+        #process right eye
+        process_image(eye = False)
+        display_image(eye = False)
+
+
+        glutSwapBuffers()
+
+    except:
+        from traceback import print_exc
+        print_exc()
+        from os import _exit
+        _exit(0)
+
+def process(eye = True):
+    global ts, pycuda_pbo, rpycuda_pbo, eyesep, c_tbrightness, c_tdensity
+    """ Use PyCuda """
+
+    ts.get_raycaster().set_opacity(c_tdensity)
+    ts.get_raycaster().set_brightness(c_tbrightness)
+
+    if (eye) :
+        ts.camera.translateX(-eyesep)
+        dest_mapping = pycuda_pbo.map()
+        (dev_ptr, size) = dest_mapping.device_ptr_and_size()
+        ts.get_raycaster().surface.device_ptr = dev_ptr
+        ts.update()
+        dest_mapping.unmap()
+        ts.camera.translateX(eyesep)
+    else :
+        ts.camera.translateX(eyesep)
+        dest_mapping = rpycuda_pbo.map()
+        (dev_ptr, size) = dest_mapping.device_ptr_and_size()
+        ts.get_raycaster().surface.device_ptr = dev_ptr
+        ts.update()
+        dest_mapping.unmap()
+        ts.camera.translateX(-eyesep)
+
+
+def process_image(eye =  True):
+    global output_texture, pbo, rpbo, width, height
+    """ copy image and process using CUDA """
+    # run the Cuda kernel
+    process(eye)
+    # download texture from PBO
+    if (eye) : 
+        glBindBuffer(GL_PIXEL_UNPACK_BUFFER, np.uint64(pbo))
+        glBindTexture(GL_TEXTURE_2D, output_texture)
+
+        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB,
+                 width, height, 0,
+                 GL_RGB, GL_FLOAT, None)
+    else :
+        glBindBuffer(GL_PIXEL_UNPACK_BUFFER, np.uint64(rpbo))
+        glBindTexture(GL_TEXTURE_2D, output_texture)
+
+        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB,
+                 width, height, 0,
+                 GL_RGB, GL_FLOAT, None)
+
+def display_image(eye = True):
+    global width, height
+    """ render a screen sized quad """
+    glDisable(GL_DEPTH_TEST)
+    glDisable(GL_LIGHTING)
+    glEnable(GL_TEXTURE_2D)
+    glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE)
+
+    #matix functions should be moved
+    glMatrixMode(GL_PROJECTION)
+    glPushMatrix()
+    glLoadIdentity()
+    glOrtho(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0)
+    glMatrixMode( GL_MODELVIEW)
+    glLoadIdentity()
+    glViewport(0, 0, width, height)
+
+    if (eye) :
+        glDrawBuffer(GL_BACK_LEFT)
+    else :
+        glDrawBuffer(GL_BACK_RIGHT)
+
+    glBegin(GL_QUADS)
+    glTexCoord2f(0.0, 0.0)
+    glVertex3f(-1.0, -1.0, 0.5)
+    glTexCoord2f(1.0, 0.0)
+    glVertex3f(1.0, -1.0, 0.5)
+    glTexCoord2f(1.0, 1.0)
+    glVertex3f(1.0, 1.0, 0.5)
+    glTexCoord2f(0.0, 1.0)
+    glVertex3f(-1.0, 1.0, 0.5)
+    glEnd()
+
+    glMatrixMode(GL_PROJECTION)
+    glPopMatrix()
+
+    glDisable(GL_TEXTURE_2D)
+    glBindTexture(GL_TEXTURE_2D, 0)
+    glBindBuffer(GL_PIXEL_PACK_BUFFER, 0)
+    glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0)
+
+
+#note we may need to init cuda_gl here and pass it to camera
+def main():
+    global window, ts, width, height
+    (width, height) = (1920, 1080)
+
+    glutInit(sys.argv)
+    glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH | GLUT_STEREO)
+    glutInitWindowSize(*initial_size)
+    glutInitWindowPosition(0, 0)
+    window = glutCreateWindow("Stereo Volume Rendering")
+
+
+    glutDisplayFunc(display)
+    glutIdleFunc(idle)
+    glutReshapeFunc(resize)
+    glutMouseFunc( mouseButton )
+    glutMotionFunc( mouseMotion )
+    glutKeyboardFunc(keyPressed)
+    init_gl(width, height)
+
+    # create texture for blitting to screen
+    create_texture(width, height)
+
+    import pycuda.gl.autoinit
+    import pycuda.gl
+    cuda_gl = pycuda.gl
+
+    create_PBO(width, height)
+    # ----- Load and Set Volume Data -----
+
+    density_grid = np.load("/home/bogert/dd150_log_densities.npy")
+
+    mi, ma= 21.5, 24.5
+    bins = 5000
+    tf = ColorTransferFunction( (mi, ma), bins)
+    tf.map_to_colormap(mi, ma, colormap="algae", scale_func = scale_func)
+
+    ts = TheiaScene(volume = density_grid, raycaster = FrontToBackRaycaster(size = (width, height), tf = tf))
+
+    ts.get_raycaster().set_sample_size(0.01)
+    ts.get_raycaster().set_max_samples(5000)
+
+    glutMainLoop()
+
+def scale_func(v, mi, ma):
+    return  np.minimum(1.0, np.abs((v)-ma)/np.abs(mi-ma) + 0.0)
+
+# Print message to console, and kick off the main to get it rolling.
+if __name__ == "__main__":
+    print "Hit ESC key to quit, 'a' to toggle animation, and 'e' to toggle cuda"
+    main()

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 doc/source/cookbook/opengl_volume_rendering.py
--- /dev/null
+++ b/doc/source/cookbook/opengl_volume_rendering.py
@@ -0,0 +1,322 @@
+from OpenGL.GL import *
+from OpenGL.GLUT import *
+from OpenGL.GLU import *
+from OpenGL.GL.ARB.vertex_buffer_object import *
+
+import sys, time
+import numpy as np
+import pycuda.driver as cuda_driver
+import pycuda.gl as cuda_gl
+
+from yt.visualization.volume_rendering.theia.scene import TheiaScene
+from yt.visualization.volume_rendering.theia.algorithms.front_to_back import FrontToBackRaycaster
+from yt.visualization.volume_rendering.transfer_functions import ColorTransferFunction
+from yt.visualization.color_maps import *
+
+import numexpr as ne
+
+window = None     # Number of the glut window.
+rot_enabled = True
+
+#Theia Scene
+ts = None
+
+#RAY CASTING values
+c_tbrightness = 1.0
+c_tdensity = 0.05
+
+output_texture = None # pointer to offscreen render target
+
+leftButton = False
+middleButton = False
+rightButton = False
+
+#Screen width and height
+width = 1024
+height = 1024
+
+eyesep = 0.1
+
+(pbo, pycuda_pbo) = [None]*2
+
+def create_PBO(w, h):
+    global pbo, pycuda_pbo
+    num_texels = w*h
+    array = np.zeros((w,h,3),np.uint32)
+
+    pbo = glGenBuffers(1)
+    glBindBuffer(GL_ARRAY_BUFFER, pbo)
+    glBufferData(GL_ARRAY_BUFFER, array, GL_DYNAMIC_DRAW)
+    glBindBuffer(GL_ARRAY_BUFFER, 0)
+    pycuda_pbo = cuda_gl.RegisteredBuffer(long(pbo))
+
+def destroy_PBO(self):
+    global pbo, pycuda_pbo
+    glBindBuffer(GL_ARRAY_BUFFER, long(pbo))
+    glDeleteBuffers(1, long(pbo));
+    glBindBuffer(GL_ARRAY_BUFFER, 0)
+    pbo,pycuda_pbo = [None]*2
+
+#consistent with C initPixelBuffer()
+def create_texture(w,h):
+    global output_texture
+    output_texture = glGenTextures(1)
+    glBindTexture(GL_TEXTURE_2D, output_texture)
+    # set basic parameters
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
+    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
+    # buffer data
+    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA,
+                 w, h, 0, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, None)
+
+#consistent with C initPixelBuffer()
+def destroy_texture():
+    global output_texture
+    glDeleteTextures(output_texture);
+    output_texture = None
+
+def init_gl(w = 512 , h = 512):
+    Width, Height = (w, h)
+
+    glClearColor(0.1, 0.1, 0.5, 1.0)
+    glDisable(GL_DEPTH_TEST)
+
+    #matrix functions
+    glViewport(0, 0, Width, Height)
+    glMatrixMode(GL_PROJECTION);
+    glLoadIdentity();
+
+    #matrix functions
+    gluPerspective(60.0, Width/float(Height), 0.1, 10.0)
+    glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
+
+def resize(Width, Height):
+    global width, height
+    (width, height) = Width, Height
+    glViewport(0, 0, Width, Height)        # Reset The Current Viewport And Perspective Transformation
+    glMatrixMode(GL_PROJECTION)
+    glLoadIdentity()
+    gluPerspective(60.0, Width/float(Height), 0.1, 10.0)
+
+
+def do_tick():
+    global time_of_last_titleupdate, frame_counter, frames_per_second
+    if ((time.clock () * 1000.0) - time_of_last_titleupdate >= 1000.):
+        frames_per_second = frame_counter                   # Save The FPS
+        frame_counter = 0  # Reset The FPS Counter
+        szTitle = "%d FPS" % (frames_per_second )
+        glutSetWindowTitle ( szTitle )
+        time_of_last_titleupdate = time.clock () * 1000.0
+    frame_counter += 1
+
+oldMousePos = [ 0, 0 ]
+def mouseButton( button, mode, x, y ):
+	"""Callback function (mouse button pressed or released).
+
+	The current and old mouse positions are stored in
+	a	global renderParam and a global list respectively"""
+
+	global leftButton, middleButton, rightButton, oldMousePos
+
+        if button == GLUT_LEFT_BUTTON:
+	    if mode == GLUT_DOWN:
+	        leftButton = True
+            else:
+		leftButton = False
+
+        if button == GLUT_MIDDLE_BUTTON:
+	    if mode == GLUT_DOWN:
+	        middleButton = True
+            else:
+		middleButton = False
+
+        if button == GLUT_RIGHT_BUTTON:
+	    if mode == GLUT_DOWN:
+	        rightButton = True
+            else:
+		rightButton = False
+
+	oldMousePos[0], oldMousePos[1] = x, y
+	glutPostRedisplay( )
+
+def mouseMotion( x, y ):
+	"""Callback function (mouse moved while button is pressed).
+
+	The current and old mouse positions are stored in
+	a	global renderParam and a global list respectively.
+	The global translation vector is updated according to
+	the movement of the mouse pointer."""
+
+	global ts, leftButton, middleButton, rightButton, oldMousePos
+	deltaX = x - oldMousePos[ 0 ]
+	deltaY = y - oldMousePos[ 1 ]
+
+	factor = 0.001
+
+	if leftButton == True:
+             ts.camera.rotateX( - deltaY * factor)
+             ts.camera.rotateY( - deltaX * factor)
+	if middleButton == True:
+	     ts.camera.translateX( deltaX* 2.0 * factor)
+	     ts.camera.translateY( - deltaY* 2.0 * factor)
+	if rightButton == True:
+	     ts.camera.scale += deltaY * factor
+
+	oldMousePos[0], oldMousePos[1] = x, y
+	glutPostRedisplay( )
+
+def keyPressed(*args):
+    global c_tbrightness, c_tdensity
+    # If escape is pressed, kill everything.
+    if args[0] == '\033':
+        print 'Closing..'
+        destroy_PBOs()
+        destroy_texture()
+        exit()
+
+    #change the brightness of the scene
+    elif args[0] == ']':
+        c_tbrightness += 0.025
+    elif args[0] == '[':
+        c_tbrightness -= 0.025
+
+    #change the density scale
+    elif args[0] == ';':
+        c_tdensity -= 0.001
+    elif args[0] == '\'':
+        c_tdensity += 0.001 
+
+def idle():
+    glutPostRedisplay()
+
+def display():
+    try:
+        #process left eye
+        process_image()
+        display_image()
+
+        glutSwapBuffers()
+
+    except:
+        from traceback import print_exc
+        print_exc()
+        from os import _exit
+        _exit(0)
+
+def process(eye = True):
+    global ts, pycuda_pbo, eyesep, c_tbrightness, c_tdensity
+
+    ts.get_raycaster().set_opacity(c_tdensity)
+    ts.get_raycaster().set_brightness(c_tbrightness)
+
+    dest_mapping = pycuda_pbo.map()
+    (dev_ptr, size) = dest_mapping.device_ptr_and_size()
+    ts.get_raycaster().surface.device_ptr = dev_ptr
+    ts.update()
+   # ts.get_raycaster().cast()
+    dest_mapping.unmap()
+
+
+def process_image():
+    global output_texture, pbo, width, height
+    """ copy image and process using CUDA """
+    # run the Cuda kernel
+    process()
+    # download texture from PBO
+    glBindBuffer(GL_PIXEL_UNPACK_BUFFER, np.uint64(pbo))
+    glBindTexture(GL_TEXTURE_2D, output_texture)
+
+    glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA,
+                 width, height, 0, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8_REV, None)
+
+def display_image(eye = True):
+    global width, height
+    """ render a screen sized quad """
+    glDisable(GL_DEPTH_TEST)
+    glDisable(GL_LIGHTING)
+    glEnable(GL_TEXTURE_2D)
+    glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE)
+
+    #matix functions should be moved
+    glMatrixMode(GL_PROJECTION)
+    glPushMatrix()
+    glLoadIdentity()
+    glOrtho(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0)
+    glMatrixMode( GL_MODELVIEW)
+    glLoadIdentity()
+    glViewport(0, 0, width, height)
+
+    glBegin(GL_QUADS)
+    glTexCoord2f(0.0, 0.0)
+    glVertex3f(-1.0, -1.0, 0.5)
+    glTexCoord2f(1.0, 0.0)
+    glVertex3f(1.0, -1.0, 0.5)
+    glTexCoord2f(1.0, 1.0)
+    glVertex3f(1.0, 1.0, 0.5)
+    glTexCoord2f(0.0, 1.0)
+    glVertex3f(-1.0, 1.0, 0.5)
+    glEnd()
+
+    glMatrixMode(GL_PROJECTION)
+    glPopMatrix()
+
+    glDisable(GL_TEXTURE_2D)
+    glBindTexture(GL_TEXTURE_2D, 0)
+    glBindBuffer(GL_PIXEL_PACK_BUFFER, 0)
+    glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0)
+
+
+#note we may need to init cuda_gl here and pass it to camera
+def main():
+    global window, ts, width, height
+    (width, height) = (1024, 1024)
+
+    glutInit(sys.argv)
+    glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH )
+    glutInitWindowSize(width, height)
+    glutInitWindowPosition(0, 0)
+    window = glutCreateWindow("Stereo Volume Rendering")
+
+
+    glutDisplayFunc(display)
+    glutIdleFunc(idle)
+    glutReshapeFunc(resize)
+    glutMouseFunc( mouseButton )
+    glutMotionFunc( mouseMotion )
+    glutKeyboardFunc(keyPressed)
+    init_gl(width, height)
+
+    # create texture for blitting to screen
+    create_texture(width, height)
+
+    import pycuda.gl.autoinit
+    import pycuda.gl
+    cuda_gl = pycuda.gl
+
+    create_PBO(width, height)
+    # ----- Load and Set Volume Data -----
+
+    density_grid = np.load("/home/bogert/dd150_log_densities.npy")
+
+    mi, ma= 21.5, 24.5
+    bins = 5000
+    tf = ColorTransferFunction( (mi, ma), bins)
+    tf.map_to_colormap(mi, ma, colormap="algae", scale_func = scale_func)
+
+    ts = TheiaScene(volume = density_grid, raycaster = FrontToBackRaycaster(size = (width, height), tf = tf))
+
+    ts.get_raycaster().set_sample_size(0.01)
+    ts.get_raycaster().set_max_samples(5000)
+    ts.update()
+
+    glutMainLoop()
+
+def scale_func(v, mi, ma):
+    return  np.minimum(1.0, np.abs((v)-ma)/np.abs(mi-ma) + 0.0)
+
+# Print message to console, and kick off the main to get it rolling.
+if __name__ == "__main__":
+    print "Hit ESC key to quit, 'a' to toggle animation, and 'e' to toggle cuda"
+    main()

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -74,6 +74,8 @@
 this manner, but still want to contribute, please consider creating an external
 package, which we'll happily link to.
 
+.. _requirements-for-code-submission:
+
 Requirements for Code Submission
 ++++++++++++++++++++++++++++++++
 
@@ -88,22 +90,22 @@
   * New Features
 
     * New unit tests (possibly new answer tests) (See :ref:`testing`)
-    * Docstrings for public API
-    * Addition of new feature to the narrative documentation
-    * Addition of cookbook recipe
+    * Docstrings in the source code for the public API
+    * Addition of new feature to the narrative documentation (See :ref:`writing_documentation`)
+    * Addition of cookbook recipe (See :ref:`writing_documentation`) 
     * Issue created on issue tracker, to ensure this is added to the changelog
 
   * Extension or Breakage of API in Existing Features
 
-    * Update existing narrative docs and docstrings
-    * Update existing cookbook recipes
+    * Update existing narrative docs and docstrings (See :ref:`writing_documentation`) 
+    * Update existing cookbook recipes (See :ref:`writing_documentation`) 
     * Modify of create new unit tests (See :ref:`testing`)
     * Issue created on issue tracker, to ensure this is added to the changelog
 
   * Bug fixes
 
     * Unit test is encouraged, to ensure breakage does not happen again in the
-      future.
+      future. (See :ref:`testing`)
     * Issue created on issue tracker, to ensure this is added to the changelog
 
 When submitting, you will be asked to make sure that your changes meet all of
@@ -178,14 +180,14 @@
 
   $ python2.7 setup.py build --compiler=mingw32 install
 
+.. _sharing-changes:
+
 Making and Sharing Changes
 ++++++++++++++++++++++++++
 
 The simplest way to submit changes to yt is to commit changes in your
 ``$YT_DEST/src/yt-hg`` directory, fork the repository on BitBucket,  push the
-changesets to your fork, and then issue a pull request.  If you will be
-developing much more in-depth features for yt, you will also
-likely want to edit the paths in your 
+changesets to your fork, and then issue a pull request.  
 
 Here's a more detailed flowchart of how to submit changes.
 
@@ -224,25 +226,72 @@
 
         hg push https://bitbucket.org/YourUsername/yt/
 
-  #. Update your pull request by visiting
-     https://bitbucket.org/YourUsername/yt/pull-request/new
+  #. Your pull request will be automatically updated.
 
 .. _writing_documentation:
 
 How to Write Documentation
-++++++++++++++++++++++++++
+--------------------------
 
-The process for writing documentation is identical to the above, except that
-you're modifying source files in the doc directory (i.e. ``$YT_DEST/src/yt-hg/doc``) 
-instead of the src directory (i.e. ``$YT_DEST/src/yt-hg/yt``) of the yt repository.
+Writing documentation is one of the most important but often overlooked tasks
+for increasing yt's impact in the community.  It is the way in which the 
+world will understand how to use our code, so it needs to be done concisely
+and understandably.  Typically, when a developer submits some piece of code 
+with new functionality, she should also include documentation on how to use 
+that functionality (as per :ref:`requirements-for-code-submission`).  
+Depending on the nature of the code addition, this could be a new narrative 
+docs section describing how the new code works and how to use it, it could 
+include a recipe in the cookbook section, or it could simply be adding a note 
+in the relevant docs text somewhere.
+
+The documentation exists in the main mercurial code repository for yt in the 
+``doc`` directory (i.e. ``$YT_DEST/src/yt-hg/doc/source`` on systems installed 
+using the installer script).  It is organized hierarchically into the main 
+categories of:
+
+ * Visualizing
+ * Analyzing
+ * Examining
+ * Cookbook
+ * Bootcamp
+ * Developing
+ * Reference
+ * Help
+
+You will have to figure out where your new/modified doc fits into this, but 
+browsing through the pre-built documentation is a good way to sort that out.
+
 All the source for the documentation is written in 
-`Sphinx <http://sphinx-doc.org/>`_, which uses ReST for markup.
+`Sphinx <http://sphinx-doc.org/>`_, which uses ReST for markup.  ReST is very
+straightforward to markup in a text editor, and if you are new to it, we
+recommend just using other .rst files in the existing yt documentation as 
+templates or checking out the 
+`ReST reference documentation <http://sphinx-doc.org/rest.html>`_.
 
-Cookbook recipes go in ``source/cookbook/`` and must be added to one of the
-``.rst`` files in that directory.  
+New cookbook recipes (see :ref:`cookbook`) are very helpful for the community 
+as they provide simple annotated recipes on how to use specific functionality.  
+To add one, create a concise python script which demonstrates some 
+functionality and pare it down to its minimum.  Add some comment lines to 
+describe what it is that you're doing along the way.  Place this ``.py`` file 
+in the ``source/cookbook/`` directory, and then link to it explicitly in one 
+of the relevant ``.rst`` files in that directory (e.g. ``complex_plots.rst``, 
+``cosmological_analysis.rst``, etc.), and add some description of what the script 
+actually does.  We recommend that you use one of the 
+`sample data sets <http://yt-project.org/data>`_ in your recipe.  When the full
+docs are built, each of the cookbook recipes are executed dynamically on 
+a system which has access to all of the sample datasets.  Any output images 
+generated by your script will then be attached inline in the built documentation 
+directly following your script.
 
-For more information on how to build the documentation to make sure it looks
-the way you expect it to after modifying it, see :ref:`docs_build`.
+After you have made your modifications to the docs, you will want to make sure
+that they render the way you expect them to render.  For more information on
+this, see the section on :ref:`docs_build`.  Unless you're contributing cookbook
+recipes or notebooks which require a dynamical build, you can probably get 
+away with just doing a 'quick' docs build.
+
+When you have completed your documentation additions, commit your changes 
+to your repository and make a pull request in the same way you would contribute 
+a change to the codebase, as described in the section on :ref:`sharing-changes`.
 
 How To Get The Source Code For Editing
 --------------------------------------

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 doc/source/examining/Loading_Generic_Particle_Data.ipynb
--- /dev/null
+++ b/doc/source/examining/Loading_Generic_Particle_Data.ipynb
@@ -0,0 +1,156 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:6da8ec00f414307f27544fbdbc6b4fa476e5e96809003426279b2a1c898b4546"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This example creates a fake in-memory particle dataset and then loads it as a yt dataset using the `load_particles` function.\n",
+      "\n",
+      "Our \"fake\" dataset will be numpy arrays filled with normally distributed randoml particle positions and uniform particle masses.  Since real data is often scaled, I arbitrarily multiply by 1e6 to show how to deal with scaled data."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import numpy as np\n",
+      "\n",
+      "n_particles = 5e6\n",
+      "\n",
+      "ppx, ppy, ppz = 1e6*np.random.normal(size=[3, n_particles])\n",
+      "\n",
+      "ppm = np.ones(n_particles)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The `load_particles` function accepts a dictionary populated with particle data fields loaded in memory as numpy arrays or python lists:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "data = {'particle_position_x': ppx,\n",
+      "        'particle_position_y': ppy,\n",
+      "        'particle_position_z': ppz,\n",
+      "        'particle_mass': ppm}"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To hook up with yt's internal field system, the dictionary keys must be 'particle_position_x', 'particle_position_y', 'particle_position_z', and 'particle_mass', as well as any other particle field provided by one of the particle frontends."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The `load_particles` function transforms the `data` dictionary into an in-memory yt `Dataset` object, providing an interface for further analysis with `yt`. The example below illustrates how to load the data dictionary we created above."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import yt\n",
+      "from yt.units import parsec, Msun\n",
+      "\n",
+      "bbox = 1.1*np.array([[min(ppx), max(ppx)], [min(ppy), max(ppy)], [min(ppy), max(ppy)]])\n",
+      "\n",
+      "ds = yt.load_particles(data, length_unit=parsec, mass_unit=1e8*Msun, n_ref=256, bbox=bbox)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The `length_unit` and `mass_unit` are the conversion from the units used in the `data` dictionary to CGS.  I've arbitrarily chosen one parsec and 10^8 Msun for this example. \n",
+      "\n",
+      "The `n_ref` parameter controls how many particle it takes to accumulate in an oct-tree cell to trigger refinement.  Larger `n_ref` will decrease poisson noise at the cost of resolution in the octree.  \n",
+      "\n",
+      "Finally, the `bbox` parameter is a bounding box in the units of the dataset that contains all of the particles.  This is used to set the size of the base octree block."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This new dataset acts like any other `yt` `Dataset` object, and can be used to create data objects and query for yt fields.  This example shows how to access \"deposit\" fields:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ad = ds.all_data()\n",
+      "\n",
+      "# This is generated with \"cloud-in-cell\" interpolation.\n",
+      "cic_density = ad[\"deposit\", \"all_cic\"]\n",
+      "\n",
+      "# These three are based on nearest-neighbor cell deposition\n",
+      "nn_density = ad[\"deposit\", \"all_density\"]\n",
+      "nn_deposited_mass = ad[\"deposit\", \"all_mass\"]\n",
+      "particle_count_per_cell = ad[\"deposit\", \"all_count\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds.field_list"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds.derived_field_list"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = yt.SlicePlot(ds, 2, ('deposit', 'all_cic'))\n",
+      "slc.set_width((8, 'Mpc'))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -898,3 +898,4 @@
 Generic Particle Data
 ---------------------
 
+.. notebook:: Loading_Generic_Particle_Data.ipynb

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 doc/source/reference/changelog.rst
--- a/doc/source/reference/changelog.rst
+++ b/doc/source/reference/changelog.rst
@@ -348,7 +348,7 @@
    there
  * WebGL interface for isocontours and a pannable map widget added to Reason
  * Performance improvements for volume rendering
- * Adaptive HEALPix support (see :ref:`adaptive_healpix_volume_rendering`)
+ * Adaptive HEALPix support
  * Column density calculations (see :ref:`radial-column-density`)
  * Massive speedup for 1D profiles
  * Lots more, bug fixes etc.

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -93,5 +93,5 @@
   uploading AMRSurface objects.
 * ``suppressStreamLogging`` (default: ``'False'``): If true, execution mode will be
   quiet.
-* ``stdoutStreamLogging`` (default: ``'False'``): If three, logging is directed
+* ``stdoutStreamLogging`` (default: ``'False'``): If true, logging is directed
   to stdout rather than stderr

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -120,7 +120,7 @@
 .. python-script::
    
    from yt.mods import *
-   data_pf = load('Enzo_64/RD0006/RD0006')
+   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
    halos_pf = load('rockstar_halos/halos_0.0.bin')
 
    hc = HaloCatalog(halos_pf=halos_pf)

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 doc/source/visualizing/manual_plotting.rst
--- a/doc/source/visualizing/manual_plotting.rst
+++ b/doc/source/visualizing/manual_plotting.rst
@@ -16,7 +16,7 @@
 packages.
 
 Note that the index object associated with your snapshot file contains a
-list of plots you've made in ``pf.h.plots``.
+list of plots you've made in ``ds.plots``.
 
 .. _fixed-resolution-buffers:
 

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -466,3 +466,90 @@
 your homogenized volume to then be passed in to the camera. A sample usage is shown
 in :ref:`cookbook-amrkdtree_downsampling`.
 
+Hardware Volume Rendering on NVidia Graphics cards
+--------------------------------------------------
+.. versionadded:: 3.0
+
+Theia is a hardware volume renderer that takes advantage of NVidias CUDA language
+to peform ray casting with GPUs instead of the CPU. 
+
+Only unigrid rendering is supported, but yt provides a grid mapping function
+ to get unigrid data from amr or sph formats : 
+    :ref:`cookbook-amrkdtree_to_uniformgrid`.
+
+System Requirements
+-------------------
+.. versionadded:: 3.0
+
+Nvidia graphics card - The memory limit of the graphics card sets the limit
+                       on the size of the data source.
+
+CUDA 5 or later and
+
+The environment variable CUDA_SAMPLES must be set pointing to
+the common/inc samples shipped with CUDA. The following shows an example
+in bash with CUDA 5.5 installed in /usr/local :
+
+export CUDA_SAMPLES=/usr/local/cuda-5.5/samples/common/inc
+
+PyCUDA must also be installed to use Theia. 
+
+PyCUDA can be installed following these instructions :
+
+    git clone --recursive http://git.tiker.net/trees/pycuda.git
+
+    python configure.py
+    python setup.py install
+
+
+Tutorial
+--------
+.. versionadded:: 3.0
+
+Currently rendering only works on uniform grids. Here is an example
+on a 1024 cube of float32 scalars.
+
+.. code-block:: python
+   from yt.visualization.volume_rendering.theia.scene import TheiaScene
+   from yt.visualization.volume_rendering.algorithms.front_to_back import FrontToBackRaycaster
+   import numpy as np
+
+   #load 3D numpy array of float32
+   volume = np.load("/home/bogert/log_densities_1024.npy")
+
+   scene = TheiaScene( volume = volume, raycaster = FrontToBackRaycaster() )
+
+   scene.camera.rotateX(1.0)
+   scene.update()
+
+   surface = scene.get_results()
+   #surface now contains an image array 2x2 int32 rbga values
+
+.. _the-theiascene-interface:
+
+The TheiaScene Interface
+--------------------
+.. versionadded:: 3.0
+
+A TheiaScene object has been created to provide a high level entry point for
+controlling the raycaster's view onto the data. The class  
+:class:`~yt.visualization.volume_rendering.theia.TheiaScene` encapsulates
+ a Camera object and a TheiaSource that intern encapsulates
+a volume. The :class:`~yt.visualization.volume_rendering.theia.Camera`
+provides controls for rotating, translating, and zooming into the volume.
+Using the :class:`~yt.visualization.volume_rendering.theia.TheiaSource`
+automatically transfers the volume to the graphic's card texture memory.
+
+Example Cookbooks
+---------------
+
+OpenGL Example for interactive volume rendering:
+:ref:`cookbook-opengl_volume_rendering`.
+
+OpenGL Stereoscopic Example :
+.. warning::  Frame rate will suffer significantly from stereoscopic rendering.
+              ~2x slower since the volume must be rendered twice.
+:ref:`cookbook-opengl_stereo_volume_rendering`.
+
+Pseudo-Realtime video rendering with ffmpeg :
+:ref:`cookbook-ffmpeg_volume_rendering`.

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -81,13 +81,13 @@
     dpf = halo.halo_catalog.data_pf
     hpf = halo.halo_catalog.halos_pf
     center = dpf.arr([halo.quantities["particle_position_%s" % axis] \
-                      for axis in "xyz"]) / dpf.length_unit
-    radius = factor * halo.quantities[radius_field] / dpf.length_unit
+                      for axis in "xyz"])
+    radius = factor * halo.quantities[radius_field]
     if radius <= 0.0:
         halo.data_object = None
         return
     try:
-        sphere = dpf.sphere(center, (radius, "code_length"))
+        sphere = dpf.sphere(center, radius)
     except YTSphereTooSmall:
         halo.data_object = None
         return

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -293,13 +293,17 @@
             if cur_dens > dens_thresh[3]: alt_m3 = total_mass
             if cur_dens > dens_thresh[4]: alt_m4 = total_mass
             if cur_dens <= dens_thresh[1]:
-                h['m'] = m
-                h['alt_m1'] = alt_m1
-                h['alt_m2'] = alt_m2
-                h['alt_m3'] = alt_m3
-                h['alt_m4'] = alt_m4
-                return
-        
+                break
+        h['m'] = m
+        h['alt_m1'] = alt_m1
+        h['alt_m2'] = alt_m2
+        h['alt_m3'] = alt_m3
+        h['alt_m4'] = alt_m4
+        # if cur_dens > dens_thresh[1]:
+            # This is usually a subhalo problem, and we don't know who is a subhalo
+            # print >> sys.stderr, "r too small in assign_masses, m200b will be wrong!"
+            # print >> sys.stderr, "edge_dens/dens_thresh[1] %.3f" % (cur_dens/dens_thresh[1])
+
     def max_halo_radius(self, int i):
         return max_halo_radius(&halos[i])
 

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -43,6 +43,10 @@
         collection is instantiated.
         Default : None (will default to the fields 'particle_position_x',
         'particle_position_y', 'particle_position_z')
+    suppress_logging : boolean
+        Suppress yt's logging when iterating over the simulation time
+        series.
+        Default : False
 
     Examples
     ________
@@ -59,7 +63,7 @@
     >>> for t in trajs :
     >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
     """
-    def __init__(self, outputs, indices, fields=None) :
+    def __init__(self, outputs, indices, fields=None, suppress_logging=False):
 
         indices.sort() # Just in case the caller wasn't careful
         self.field_data = YTFieldData()
@@ -74,6 +78,7 @@
         self.num_indices = len(indices)
         self.num_steps = len(outputs)
         self.times = []
+        self.suppress_logging = suppress_logging
 
         # Default fields 
         
@@ -83,8 +88,9 @@
         fields.append("particle_position_z")
         fields = list(OrderedDict.fromkeys(fields))
 
-        old_level = int(ytcfg.get("yt","loglevel"))
-        mylog.setLevel(40)
+        if self.suppress_logging:
+            old_level = int(ytcfg.get("yt","loglevel"))
+            mylog.setLevel(40)
         my_storage = {}
         pbar = get_pbar("Constructing trajectory information", len(self.data_series))
         for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
@@ -101,7 +107,8 @@
             pbar.update(i)
         pbar.finish()
 
-        mylog.setLevel(old_level)
+        if self.suppress_logging:
+            mylog.setLevel(old_level)
 
         times = []
         for fn, time in sorted(my_storage.items()):
@@ -191,14 +198,16 @@
         with shape (num_indices, num_steps)
         """
         if not self.field_data.has_key(field):
-            old_level = int(ytcfg.get("yt","loglevel"))
-            mylog.setLevel(40)
+            if self.suppress_logging:
+                old_level = int(ytcfg.get("yt","loglevel"))
+                mylog.setLevel(40)
             dd_first = self.data_series[0].all_data()
             fd = dd_first._determine_fields(field)[0]
             if field not in self.particle_fields:
                 if self.data_series[0].field_info[fd].particle_type:
                     self.particle_fields.append(field)
-            particles = np.empty((self.num_indices,self.num_steps)) * np.nan
+            particles = np.empty((self.num_indices,self.num_steps))
+            particles[:] = np.nan
             step = int(0)
             pbar = get_pbar("Generating field %s in trajectories." % (field), self.num_steps)
             my_storage={}
@@ -232,7 +241,8 @@
             for i, (fn, (indices, pfield)) in enumerate(sorted(my_storage.items())):
                 particles[indices,i] = pfield
             self.field_data[field] = array_like_field(dd_first, particles, fd)
-            mylog.setLevel(old_level)
+            if self.suppress_logging:
+                mylog.setLevel(old_level)
         return self.field_data[field]
 
     def trajectory_from_index(self, index):

diff -r d116c37222b2d3da203f9c5c791cd8d4a617c03f -r eb96049ecbee42eda2e93e3e40b1427a1918b675 yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -17,14 +17,6 @@
 from yt.visualization.volume_rendering.camera import off_axis_projection
 from yt.funcs import get_pbar
 
-def create_intensity(vmin, vmax, ifield):
-    def _intensity(field, data):
-        idxs = (data["v_los"] >= vmin) & (data["v_los"] < vmax)
-        f = np.zeros(data[ifield].shape)
-        f[idxs] = data[ifield][idxs]
-        return f
-    return _intensity
-
 def create_vlos(z_hat):
     def _v_los(field, data):
         vz = data["velocity_x"]*z_hat[0] + \
@@ -90,9 +82,11 @@
             self.v_bnd = -vmax, vmax
         else:
             self.v_bnd = (ds.quan(velocity_bounds[0], velocity_bounds[2]),
-                     ds.quan(velocity_bounds[1], velocity_bounds[2]))
+                          ds.quan(velocity_bounds[1], velocity_bounds[2]))
 
-        vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv+1)
+        self.vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv+1)
+        self.vmid = 0.5*(self.vbins[1:]+self.vbins[:-1])
+        self.dv = (self.v_bnd[1]-self.v_bnd[0])/self.nv
 
         _vlos = create_vlos(orient.unit_vectors[2])
         ds.field_info.add_field(("gas","v_los"), function=_vlos, units="cm/s")
@@ -100,11 +94,8 @@
         self.data = ds.arr(np.zeros((self.nx,self.ny,self.nv)), self.field_units)
         pbar = get_pbar("Generating cube.", self.nv)
         for i in xrange(self.nv):
-            v1 = vbins[i]
-            v2 = vbins[i+1]
-            _intensity = create_intensity(v1, v2, field)
-            ds.field_info.add_field(("gas","intensity"),
-                                    function=_intensity, units=self.field_units)
+            _intensity = self._create_intensity(i)
+            ds.add_field(("gas","intensity"), function=_intensity, units=self.field_units)
             prj = off_axis_projection(ds, ds.domain_center, normal, width,
                                       (self.nx, self.ny), "intensity")
             self.data[:,:,i] = prj[:,:]
@@ -145,7 +136,7 @@
 
         dx = length_unit[0]/self.nx
         dy = length_unit[0]/self.ny
-        dv = (self.v_bnd[1]-self.v_bnd[0]).in_units("m/s").value/self.nv
+        dv = self.dv.in_units("m/s").value
 
         if length_unit[1] == "deg":
             dx *= -1.
@@ -162,3 +153,11 @@
         fib[0].header["btype"] = self.field
 
         fib.writeto(filename, clobber=clobber)
+
+    def _create_intensity(self, i):
+        def _intensity(field, data):
+            w = np.abs(data["v_los"]-self.vmid[i])/self.dv
+            w = 1.-w
+            w[w < 0.0] = 0.0
+            return data[self.field]*w
+        return _intensity

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/e0f12179cd2a/
Changeset:   e0f12179cd2a
Branch:      yt-3.0
User:        samskillman
Date:        2014-06-19 21:48:37
Summary:     Adding find functions, putting prints in debug statements
Affected #:  1 file

diff -r eb96049ecbee42eda2e93e3e40b1427a1918b675 -r e0f12179cd2a465f338d9b6fa998730b9bf90b35 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -176,7 +176,7 @@
 
     def build_memmap(self):
         assert(self.size != -1)
-        print 'Building memmap with offset: %i' % self._offset
+        mylog.info('Building memmap with offset: %i and size %i' % (self._offset, self.size))
         self.handle = HTTPArray(self.filename, dtype=self.dtype,
                         shape=self.size, offset=self._offset)
         for k in self.dtype.names:
@@ -407,7 +407,6 @@
             a =  self.sdfdata.parameters.get("a", 1.0)
             rmin = -a * np.array([rx, ry, rz])
             rmax = a * np.array([rx, ry, rz])
-            print rmin, rmax
             self.true_domain_left = rmin.copy()
             self.true_domain_right = rmax.copy()
             self.true_domain_width = rmax - rmin
@@ -415,6 +414,7 @@
             expand_root = 0.0
             morton_xyz = self.sdfdata.parameters.get("morton_xyz", False)
             if not morton_xyz:
+                mylog.debug("Accounting for wandering particles")
                 self.wandering_particles = True
                 ic_Nmesh = self.sdfdata.parameters.get('ic_Nmesh',0)
                 # Expand root for non power-of-2
@@ -432,8 +432,9 @@
             self.domain_buffer = (self.domain_dims - int(self.domain_dims/(1.0 + expand_root)))/2
             self.domain_active_dims = self.domain_dims - 2*self.domain_buffer
 
-        print self.rmin, self.rmax
-        mylog.debug("SINDEX: %s, %s, %s " % (self.domain_width, self.domain_dims, self.domain_active_dims))
+        mylog.debug("SINDEX rmin: %s, rmax: %s" % (self.rmin, self.rmax))
+        mylog.debug("SINDEX: domain_width: %s, domain_dims: %s, domain_active_dims: %s " %
+                    (self.domain_width, self.domain_dims, self.domain_active_dims))
 
     def spread_bits(self, ival, level=None):
         if level is None:
@@ -528,7 +529,7 @@
         #print 'Getting data from ileft to iright:',  ileft, iright
 
         ix, iy, iz = (iright-ileft)*1j
-        #print 'IBBOX:', ileft, iright, ix, iy, iz
+        mylog.debug('SINDEX IBBOX: %s %s %s %s %s' % (ileft, iright, ix, iy, iz))
 
         # plus 1 that is sliced, plus a bit since mgrid is not inclusive
         Z, Y, X = np.mgrid[ileft[2]:iright[2]+1.01,
@@ -737,7 +738,7 @@
         if pos_fields is None:
             pos_fields = 'x','y','z'
         xf, yf, zf = pos_fields
-        print pos_fields
+        mylog.debug("Using position fields: %s" % pos_fields)
 
         # I'm sorry.
         pos = mpcuq * np.array([data[xf].in_units('Mpccm/h'), data[yf].in_units('Mpccm/h'), data[zf].in_units('Mpccm/h')]).T
@@ -750,7 +751,7 @@
             mask = pos[:,i] < right[i] - DW[i]
             pos[mask, i] += DW[i]
 
-        print left, right, pos.min(axis=0), pos.max(axis=0)
+        mylog.debug("Periodic filtering, %s %s %s %s" % (left, right, pos.min(axis=0), pos.max(axis=0)))
         # Now get all particles that are within the bbox
         mask = np.all(pos >= left, axis=1) * np.all(pos < right, axis=1)
 
@@ -869,6 +870,18 @@
         #print "Level ", self.level, np.binary_repr(lmax_lk, width=self.level*3), np.binary_repr(lmax_rk, width=self.level*3)
         return lmax_lk, lmax_rk
 
+    def find_max_cell(self):
+        max_cell = np.argmax(self.indexdata['len'])
+        return max_cell
+
+    def find_max_cell_center(self):
+        max_cell = self.find_max_cell()
+        cell_ijk = np.array(
+            self.get_ind_from_key(self.indexdata['index'][max_cell]))
+        position = (cell_ijk + 0.5) * (self.domain_width / self.domain_dims) +\
+                self.rmin
+        return position
+
     def get_cell_data(self, level, cell_iarr, fields):
         """
         Get data from requested cell
@@ -888,7 +901,7 @@
         """
         cell_iarr = np.array(cell_iarr)
         lk, rk =self.get_key_bounds(level, cell_iarr)
-        print 'Reading from ', lk, rk
+        mylog.debug("Reading contiguous chunk from %i to %i" % (lk, rk))
         return self.get_contiguous_chunk(lk, rk, fields)
 
     def get_cell_bbox(self, level, cell_iarr):


https://bitbucket.org/yt_analysis/yt/commits/136e03fe55dc/
Changeset:   136e03fe55dc
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-20 14:48:16
Summary:     Switching to a better data center method for PW.
Affected #:  1 file

diff -r e0f12179cd2a465f338d9b6fa998730b9bf90b35 -r 136e03fe55dc1fd9a1f483d82d5cb96085b40aae yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -296,9 +296,11 @@
         self._set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
         if self.data_source.center is not None and oblique == False:
-            center = [self.data_source.center[i] for i in
-                      range(len(self.data_source.center))
-                      if i != self.data_source.axis]
+            ax = self.data_source.axis
+            xax = self.pf.coordinates.x_axis[ax]
+            yax = self.pf.coordinates.y_axis[ax]
+            center = [self.data_source.center[xax],
+                      self.data_source.center[yax]]
             self.set_center(center)
         for field in self.frb.data.keys():
             finfo = self.data_source.pf._get_field_info(*field)


https://bitbucket.org/yt_analysis/yt/commits/6071fc3978c9/
Changeset:   6071fc3978c9
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-20 15:02:30
Summary:     Enable _is_valid for SDF over HTTP
Affected #:  2 files

diff -r 136e03fe55dc1fd9a1f483d82d5cb96085b40aae -r 6071fc3978c9da1e8cd4b100bead5fb2c982b218 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -91,13 +91,13 @@
         prefix = ''
         if self.idx_filename is not None:
             prefix += 'sindex_'
-        if 'http' in filename:
+        if filename.startswith("http"):
             prefix += 'http_'
         dataset_type = prefix + 'sdf_particles'
         super(SDFDataset, self).__init__(filename, dataset_type)
 
     def _parse_parameter_file(self):
-        if 'http' in self.parameter_filename:
+        if self.parameter_filename.startswith("http"):
             self.sdf_container = HTTPSDFRead(self.parameter_filename,
                                              header=self.sdf_header)
         else:
@@ -183,6 +183,8 @@
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):
+        if args[0].startswith("http") and "idx_filename" in kwargs:
+            return True
         if not os.path.isfile(args[0]): return False
         with open(args[0], "r") as f:
             line = f.readline().strip()

diff -r 136e03fe55dc1fd9a1f483d82d5cb96085b40aae -r 6071fc3978c9da1e8cd4b100bead5fb2c982b218 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -640,6 +640,9 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        if args[0].startswith("http://"):
+        if not args[0].startswith("http://"):
+            return False
+        hreq = requests.get(args[0] + "/yt_index.json")
+        if hreq.status_code == 200:
             return True
         return False


https://bitbucket.org/yt_analysis/yt/commits/343c4de42e93/
Changeset:   343c4de42e93
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-20 15:16:11
Summary:     Check for SDF header, and only read 10 bytes.
Affected #:  1 file

diff -r 6071fc3978c9da1e8cd4b100bead5fb2c982b218 -r 343c4de42e932149e737429a10f63284ceaa5277 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -42,6 +42,12 @@
     SDFIndex,\
     HTTPSDFRead
 
+try:
+    import requests
+except ImportError:
+    requests = None
+
+
 
 # currently specified by units_2HOT == 2 in header
 # in future will read directly from file
@@ -183,9 +189,14 @@
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):
-        if args[0].startswith("http") and "idx_filename" in kwargs:
-            return True
-        if not os.path.isfile(args[0]): return False
-        with open(args[0], "r") as f:
-            line = f.readline().strip()
-            return line[:5] == "# SDF"
+        if args[0].startswith("http"):
+            if requests is None: return False
+            hreq = requests.get(args[0], stream=True)
+            if hreq.status_code != 200: return False
+            line = hreq.iter_content(10).next()
+        elif os.path.isfile(args[0]): 
+            with open(args[0], "r") as f:
+                line = f.read(10).strip()
+        else:
+            return False
+        return line.startswith("# SDF")


https://bitbucket.org/yt_analysis/yt/commits/7857e9e271a1/
Changeset:   7857e9e271a1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-20 15:20:09
Summary:     We're going to implement utilization of this later.
Affected #:  1 file

diff -r 343c4de42e932149e737429a10f63284ceaa5277 -r 7857e9e271a191725caf79d9ec2cfef996dd8100 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -193,7 +193,8 @@
             if requests is None: return False
             hreq = requests.get(args[0], stream=True)
             if hreq.status_code != 200: return False
-            line = hreq.iter_content(10).next()
+            # Grab a whole 4k page.
+            line = hreq.iter_content(4096).next()
         elif os.path.isfile(args[0]): 
             with open(args[0], "r") as f:
                 line = f.read(10).strip()


https://bitbucket.org/yt_analysis/yt/commits/88716e655fb1/
Changeset:   88716e655fb1
Branch:      yt-3.0
User:        samskillman
Date:        2014-06-20 03:11:45
Summary:     Need a [:] in order for httparray to pick it up.
Affected #:  1 file

diff -r e0f12179cd2a465f338d9b6fa998730b9bf90b35 -r 88716e655fb105be019348c527fda264dcddf02b yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -871,7 +871,7 @@
         return lmax_lk, lmax_rk
 
     def find_max_cell(self):
-        max_cell = np.argmax(self.indexdata['len'])
+        max_cell = np.argmax(self.indexdata['len'][:])
         return max_cell
 
     def find_max_cell_center(self):


https://bitbucket.org/yt_analysis/yt/commits/05ac628d72ee/
Changeset:   05ac628d72ee
Branch:      yt-3.0
User:        samskillman
Date:        2014-06-20 17:15:56
Summary:     Merging
Affected #:  3 files

diff -r 88716e655fb105be019348c527fda264dcddf02b -r 05ac628d72eea591e0c95aed0b8ee5fec1681038 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -42,6 +42,12 @@
     SDFIndex,\
     HTTPSDFRead
 
+try:
+    import requests
+except ImportError:
+    requests = None
+
+
 
 # currently specified by units_2HOT == 2 in header
 # in future will read directly from file
@@ -91,13 +97,13 @@
         prefix = ''
         if self.idx_filename is not None:
             prefix += 'sindex_'
-        if 'http' in filename:
+        if filename.startswith("http"):
             prefix += 'http_'
         dataset_type = prefix + 'sdf_particles'
         super(SDFDataset, self).__init__(filename, dataset_type)
 
     def _parse_parameter_file(self):
-        if 'http' in self.parameter_filename:
+        if self.parameter_filename.startswith("http"):
             self.sdf_container = HTTPSDFRead(self.parameter_filename,
                                              header=self.sdf_header)
         else:
@@ -183,7 +189,15 @@
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):
-        if not os.path.isfile(args[0]): return False
-        with open(args[0], "r") as f:
-            line = f.readline().strip()
-            return line[:5] == "# SDF"
+        if args[0].startswith("http"):
+            if requests is None: return False
+            hreq = requests.get(args[0], stream=True)
+            if hreq.status_code != 200: return False
+            # Grab a whole 4k page.
+            line = hreq.iter_content(4096).next()
+        elif os.path.isfile(args[0]): 
+            with open(args[0], "r") as f:
+                line = f.read(10).strip()
+        else:
+            return False
+        return line.startswith("# SDF")

diff -r 88716e655fb105be019348c527fda264dcddf02b -r 05ac628d72eea591e0c95aed0b8ee5fec1681038 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -640,6 +640,9 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        if args[0].startswith("http://"):
+        if not args[0].startswith("http://"):
+            return False
+        hreq = requests.get(args[0] + "/yt_index.json")
+        if hreq.status_code == 200:
             return True
         return False

diff -r 88716e655fb105be019348c527fda264dcddf02b -r 05ac628d72eea591e0c95aed0b8ee5fec1681038 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -296,9 +296,11 @@
         self._set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
         if self.data_source.center is not None and oblique == False:
-            center = [self.data_source.center[i] for i in
-                      range(len(self.data_source.center))
-                      if i != self.data_source.axis]
+            ax = self.data_source.axis
+            xax = self.pf.coordinates.x_axis[ax]
+            yax = self.pf.coordinates.y_axis[ax]
+            center = [self.data_source.center[xax],
+                      self.data_source.center[yax]]
             self.set_center(center)
         for field in self.frb.data.keys():
             finfo = self.data_source.pf._get_field_info(*field)


https://bitbucket.org/yt_analysis/yt/commits/5e231e6e1955/
Changeset:   5e231e6e1955
Branch:      yt-3.0
User:        samskillman
Date:        2014-06-20 17:17:36
Summary:     Put in _subspace property for other things to pick up and realize we are in a
subspace of the full volume. Useful for figuring out offsets between particles
and halo datasets
Affected #:  1 file

diff -r 05ac628d72eea591e0c95aed0b8ee5fec1681038 -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -67,6 +67,7 @@
     _particle_velocity_name = None
     _sindex = None
     _skip_cache = True
+    _subspace = False
 
 
     def __init__(self, filename, dataset_type = "sdf_particles",
@@ -80,6 +81,7 @@
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         if bounding_box is not None:
+            self._subspace = True
             bbox = np.array(bounding_box, dtype="float32")
             if bbox.shape == (2, 3):
                 bbox = bbox.transpose()


https://bitbucket.org/yt_analysis/yt/commits/1d8a407e8c26/
Changeset:   1d8a407e8c26
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-24 18:58:11
Summary:     Merging from yt-3.0.
Affected #:  110 files

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -568,7 +568,7 @@
 mkdir -p ${DEST_DIR}/data
 cd ${DEST_DIR}/data
 echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
-get_ytdata xray_emissivity.h5
+[ ! -e xray_emissivity.h5 ] && get_ytdata xray_emissivity.h5
 
 # Set paths to what they should be when yt is activated.
 export PATH=${DEST_DIR}/bin:$PATH
@@ -608,7 +608,6 @@
 echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
 echo '3df0ba4b1cfef5f02fb27925de4c2ca414eca9000af6a3d475d39063720afe987287c3d51377e0a36b88015573ef699f700782e1749c7a357b8390971d858a79  Python-2.7.6.tgz' > Python-2.7.6.tgz.sha512
-echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
@@ -624,7 +623,6 @@
 echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
 echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
 echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b  pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
-echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4  scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
 echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
 echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
@@ -657,7 +655,6 @@
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz
-get_ytproject $ROCKSTAR.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e $BZLIB/done ]
@@ -816,6 +813,7 @@
         YT_DIR=`dirname $ORIG_PWD`
     elif [ ! -e yt-hg ]
     then
+        echo "Cloning yt"
         YT_DIR="$PWD/yt-hg/"
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
         # Recently the hg server has had some issues with timeouts.  In lieu of
@@ -824,9 +822,9 @@
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
         # Now we update to the branch we're interested in.
         ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
-    elif [ -e yt-3.0-hg ] 
+    elif [ -e yt-hg ]
     then
-        YT_DIR="$PWD/yt-3.0-hg/"
+        YT_DIR="$PWD/yt-hg/"
     fi
     echo Setting YT_DIR=${YT_DIR}
 fi
@@ -943,14 +941,19 @@
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
 then
-    if [ ! -e Rockstar/done ]
+    if [ ! -e rockstar/done ]
     then
-        [ ! -e Rockstar ] && tar xfz $ROCKSTAR.tar.gz
         echo "Building Rockstar"
-        cd Rockstar
+        if [ ! -e rockstar ]
+        then
+            ( hg clone http://bitbucket.org/MatthewTurk/rockstar 2>&1 ) 1>> ${LOG_FILE}
+        fi
+        cd rockstar
+        ( hg pull 2>&1 ) 1>> ${LOG_FILE}
+        ( hg up -C tip 2>&1 ) 1>> ${LOG_FILE}
         ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         cp librockstar.so ${DEST_DIR}/lib
-        ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar
+        ROCKSTAR_DIR=${DEST_DIR}/src/rockstar
         echo $ROCKSTAR_DIR > ${YT_DIR}/rockstar.cfg
         touch done
         cd ..

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/bootcamp/1)_Introduction.ipynb
--- a/doc/source/bootcamp/1)_Introduction.ipynb
+++ b/doc/source/bootcamp/1)_Introduction.ipynb
@@ -1,6 +1,7 @@
 {
  "metadata": {
-  "name": ""
+  "name": "",
+  "signature": "sha256:39620670ce7751b23f30d2123fd3598de1c7843331f65de13e29f4ae9f759e0f"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -32,9 +33,40 @@
       "5. Derived Fields and Profiles (IsolatedGalaxy dataset)\n",
       "6. Volume Rendering (IsolatedGalaxy dataset)"
      ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The following code will download the data needed for this tutorial automatically using `curl`. It may take some time so please wait when the kernel is busy. You will need to set `download_datasets` to True before using it."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "download_datasets = False\n",
+      "if download_datasets:\n",
+      "    !curl -sSO http://yt-project.org/data/enzo_tiny_cosmology.tar\n",
+      "    print \"Got enzo_tiny_cosmology\"\n",
+      "    !tar xf enzo_tiny_cosmology.tar\n",
+      "    \n",
+      "    !curl -sSO http://yt-project.org/data/Enzo_64.tar\n",
+      "    print \"Got Enzo_64\"\n",
+      "    !tar xf Enzo_64.tar\n",
+      "    \n",
+      "    !curl -sSO http://yt-project.org/data/IsolatedGalaxy.tar\n",
+      "    print \"Got IsolatedGalaxy\"\n",
+      "    !tar xf IsolatedGalaxy.tar\n",
+      "    \n",
+      "    print \"All done!\""
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/bootcamp/2)_Data_Inspection.ipynb
--- a/doc/source/bootcamp/2)_Data_Inspection.ipynb
+++ b/doc/source/bootcamp/2)_Data_Inspection.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:15cdc35ddb8b1b938967237e17534149f734f4e7a61ebd37d74b675f8059da20"
+  "signature": "sha256:9d67e9e4ca5ce92dcd0658025dbfbd28be47b47ca8d4531fdac16cc2c2fa038b"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -21,7 +21,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.mods import *"
+      "import yt"
      ],
      "language": "python",
      "metadata": {},
@@ -38,7 +38,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
+      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
      ],
      "language": "python",
      "metadata": {},

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/bootcamp/3)_Simple_Visualization.ipynb
--- a/doc/source/bootcamp/3)_Simple_Visualization.ipynb
+++ b/doc/source/bootcamp/3)_Simple_Visualization.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:eb5fbf5eb55a9c8997c687f072c8c6030e74bef0048a72b4f74a06893c11b80a"
+  "signature": "sha256:c00ba7fdbbd9ea957d06060ad70f06f629b1fd4ebf5379c1fdad2697ab0a4cd6"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -21,7 +21,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.mods import *"
+      "import yt"
      ],
      "language": "python",
      "metadata": {},
@@ -38,7 +38,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+      "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
       "print \"Redshift =\", ds.current_redshift"
      ],
      "language": "python",
@@ -58,7 +58,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "p = ProjectionPlot(ds, \"y\", \"density\")\n",
+      "p = yt.ProjectionPlot(ds, \"y\", \"density\")\n",
       "p.show()"
      ],
      "language": "python",
@@ -135,7 +135,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "p = ProjectionPlot(ds, \"z\", [\"density\", \"temperature\"], weight_field=\"density\")\n",
+      "p = yt.ProjectionPlot(ds, \"z\", [\"density\", \"temperature\"], weight_field=\"density\")\n",
       "p.show()"
      ],
      "language": "python",
@@ -189,8 +189,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(\"Enzo_64/DD0043/data0043\")\n",
-      "s = SlicePlot(ds, \"z\", [\"density\", \"velocity_magnitude\"], center=\"max\")\n",
+      "ds = yt.load(\"Enzo_64/DD0043/data0043\")\n",
+      "s = yt.SlicePlot(ds, \"z\", [\"density\", \"velocity_magnitude\"], center=\"max\")\n",
       "s.set_cmap(\"velocity_magnitude\", \"kamae\")\n",
       "s.zoom(10.0)"
      ],
@@ -243,7 +243,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "s = SlicePlot(ds, \"x\", [\"density\"], center=\"max\")\n",
+      "s = yt.SlicePlot(ds, \"x\", [\"density\"], center=\"max\")\n",
       "s.annotate_contour(\"temperature\")\n",
       "s.zoom(2.5)"
      ],
@@ -272,4 +272,4 @@
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
--- a/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
+++ b/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:41293a66cd6fd5eae6da2d0343549144dc53d72e83286999faab3cf21d801f51"
+  "signature": "sha256:a46e1baa90d32045c2b524100f28bad41b3665249612c9a275ee0375a6f4be20"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -22,7 +22,8 @@
      "collapsed": false,
      "input": [
       "%matplotlib inline\n",
-      "from yt.mods import *\n",
+      "import yt\n",
+      "import numpy as np\n",
       "from matplotlib import pylab\n",
       "from yt.analysis_modules.halo_finding.api import HaloFinder"
      ],
@@ -45,7 +46,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ts = DatasetSeries(\"enzo_tiny_cosmology/*/*.hierarchy\")"
+      "ts = yt.DatasetSeries(\"enzo_tiny_cosmology/*/*.hierarchy\")"
      ],
      "language": "python",
      "metadata": {},
@@ -87,8 +88,13 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.semilogy(times, rho_ex[:,0], '-xk')\n",
-      "pylab.semilogy(times, rho_ex[:,1], '-xr')"
+      "pylab.semilogy(times, rho_ex[:,0], '-xk', label='Minimum')\n",
+      "pylab.semilogy(times, rho_ex[:,1], '-xr', label='Maximum')\n",
+      "pylab.ylabel(\"Density ($g/cm^3$)\")\n",
+      "pylab.xlabel(\"Time (Gyr)\")\n",
+      "pylab.legend()\n",
+      "pylab.ylim(1e-32, 1e-21)\n",
+      "pylab.show()"
      ],
      "language": "python",
      "metadata": {},
@@ -109,13 +115,15 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
+      "from yt.units import Msun\n",
+      "\n",
       "mass = []\n",
       "zs = []\n",
       "for ds in ts:\n",
       "    halos = HaloFinder(ds)\n",
       "    dd = ds.all_data()\n",
       "    total_mass = dd.quantities.total_quantity(\"cell_mass\").in_units(\"Msun\")\n",
-      "    total_in_baryons = 0.0\n",
+      "    total_in_baryons = 0.0*Msun\n",
       "    for halo in halos:\n",
       "        sp = halo.get_sphere()\n",
       "        total_in_baryons += sp.quantities.total_quantity(\"cell_mass\").in_units(\"Msun\")\n",
@@ -137,7 +145,11 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.loglog(zs, mass, '-xb')"
+      "pylab.semilogx(zs, mass, '-xb')\n",
+      "pylab.xlabel(\"Redshift\")\n",
+      "pylab.ylabel(\"Mass in halos / Total mass\")\n",
+      "pylab.xlim(max(zs), min(zs))\n",
+      "pylab.ylim(-0.01, .18)"
      ],
      "language": "python",
      "metadata": {},
@@ -155,7 +167,9 @@
       "\n",
       "yt provides the ability to examine rays, or lines, through the domain.  Note that these are not periodic, unlike most other data objects.  We create a ray object and can then examine quantities of it.  Rays have the special fields `t` and `dts`, which correspond to the time the ray enters a given cell and the distance it travels through that cell.\n",
       "\n",
-      "To create a ray, we specify the start and end points."
+      "To create a ray, we specify the start and end points.\n",
+      "\n",
+      "Note that we need to convert these arrays to numpy arrays due to a bug in matplotlib 1.3.1."
      ]
     },
     {
@@ -163,7 +177,7 @@
      "collapsed": false,
      "input": [
       "ray = ds.ray([0.1, 0.2, 0.3], [0.9, 0.8, 0.7])\n",
-      "pylab.semilogy(ray[\"t\"], ray[\"density\"])"
+      "pylab.semilogy(np.array(ray[\"t\"]), np.array(ray[\"density\"]))"
      ],
      "language": "python",
      "metadata": {},
@@ -212,10 +226,12 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
+      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
       "v, c = ds.find_max(\"density\")\n",
       "sl = ds.slice(0, c[0])\n",
-      "print sl[\"index\", \"x\"], sl[\"index\", \"z\"], sl[\"pdx\"]\n",
+      "print sl[\"index\", \"x\"]\n",
+      "print sl[\"index\", \"z\"]\n",
+      "print sl[\"pdx\"]\n",
       "print sl[\"gas\", \"density\"].shape"
      ],
      "language": "python",
@@ -251,8 +267,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "write_image(np.log10(frb[\"gas\", \"density\"]), \"temp.png\")\n",
-      "from IPython.core.display import Image\n",
+      "yt.write_image(np.log10(frb[\"gas\", \"density\"]), \"temp.png\")\n",
+      "from IPython.display import Image\n",
       "Image(filename = \"temp.png\")"
      ],
      "language": "python",
@@ -275,7 +291,7 @@
      "collapsed": false,
      "input": [
       "cp = ds.cutting([0.2, 0.3, 0.5], \"max\")\n",
-      "pw = cp.to_pw(fields = [\"density\"])"
+      "pw = cp.to_pw(fields = [(\"gas\", \"density\")])"
      ],
      "language": "python",
      "metadata": {},
@@ -310,7 +326,8 @@
      "collapsed": false,
      "input": [
       "pws = sl.to_pw(fields=[\"density\"])\n",
-      "pws.show()"
+      "#pws.show()\n",
+      "print pws.plots.keys()"
      ],
      "language": "python",
      "metadata": {},
@@ -362,4 +379,4 @@
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
--- a/doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
+++ b/doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:a19d451f3b4dcfeed448caa22c2cac35c46958e0646c19c226b1e467b76d0718"
+  "signature": "sha256:eca573e749829cacda0a8c07c6d5d11d07a5de657563a44b8c4ffff8f735caed"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -22,7 +22,9 @@
      "collapsed": false,
      "input": [
       "%matplotlib inline\n",
-      "from yt.mods import *\n",
+      "import yt\n",
+      "import numpy as np\n",
+      "from yt import derived_field\n",
       "from matplotlib import pylab"
      ],
      "language": "python",
@@ -61,7 +63,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
+      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
       "dd = ds.all_data()\n",
       "print dd.quantities.keys()"
      ],
@@ -120,7 +122,9 @@
       "bv = sp.quantities.bulk_velocity()\n",
       "L = sp.quantities.angular_momentum_vector()\n",
       "rho_min, rho_max = sp.quantities.extrema(\"density\")\n",
-      "print bv, L, rho_min, rho_max"
+      "print bv\n",
+      "print L\n",
+      "print rho_min, rho_max"
      ],
      "language": "python",
      "metadata": {},
@@ -143,9 +147,11 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "prof = Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=\"cell_mass\")\n",
+      "prof = yt.Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=\"cell_mass\")\n",
       "prof.add_fields([\"temperature\",\"dinosaurs\"])\n",
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"temperature\"]), \"-x\")"
+      "pylab.loglog(np.array(prof.x), np.array(prof[\"temperature\"]), \"-x\")\n",
+      "pylab.xlabel('Density $(g/cm^3)$')\n",
+      "pylab.ylabel('Temperature $(K)$')"
      ],
      "language": "python",
      "metadata": {},
@@ -162,7 +168,9 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"dinosaurs\"]), '-x')"
+      "pylab.loglog(np.array(prof.x), np.array(prof[\"dinosaurs\"]), '-x')\n",
+      "pylab.xlabel('Density $(g/cm^3)$')\n",
+      "pylab.ylabel('Dinosaurs $(K cm / s)$')"
      ],
      "language": "python",
      "metadata": {},
@@ -179,9 +187,30 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "prof = Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=None)\n",
+      "prof = yt.Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=None)\n",
       "prof.add_fields([\"cell_mass\"])\n",
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"cell_mass\"].in_units(\"Msun\")), '-x')"
+      "pylab.loglog(np.array(prof.x), np.array(prof[\"cell_mass\"].in_units(\"Msun\")), '-x')\n",
+      "pylab.xlabel('Density $(g/cm^3)$')\n",
+      "pylab.ylabel('Cell mass $(M_\\odot)$')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "In addition to the low-level `ProfileND` interface, it's also quite straightforward to quickly create plots of profiles using the `ProfilePlot` class.  Let's redo the last plot using `ProfilePlot`"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prof = yt.ProfilePlot(sp, 'density', 'cell_mass', weight_field=None)\n",
+      "prof.set_unit('cell_mass', 'Msun')\n",
+      "prof.show()"
      ],
      "language": "python",
      "metadata": {},

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/bootcamp/6)_Volume_Rendering.ipynb
--- a/doc/source/bootcamp/6)_Volume_Rendering.ipynb
+++ b/doc/source/bootcamp/6)_Volume_Rendering.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:2929940fc3977b495aa124dee851f7602d61e073ed65407dd95e7cf597684b35"
+  "signature": "sha256:2a24bbe82955f9d948b39cbd1b1302968ff57f62f73afb2c7a5c4953393d00ae"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -21,8 +21,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.mods import *\n",
-      "ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
+      "import yt\n",
+      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
      ],
      "language": "python",
      "metadata": {},
@@ -43,7 +43,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "tf = ColorTransferFunction((-28, -24))\n",
+      "tf = yt.ColorTransferFunction((-28, -24))\n",
       "tf.add_layers(4, w=0.01)\n",
       "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20, 'kpc'), 512, tf, fields=[\"density\"])\n",
       "cam.show()"
@@ -80,7 +80,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "tf = ColorTransferFunction((-28, -25))\n",
+      "tf = yt.ColorTransferFunction((-28, -25))\n",
       "tf.add_layers(4, w=0.03)\n",
       "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20.0, 'kpc'), 512, tf, no_ghost=False)\n",
       "cam.show(clip_ratio=4.0)"

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/aligned_cutting_plane.py
--- a/doc/source/cookbook/aligned_cutting_plane.py
+++ b/doc/source/cookbook/aligned_cutting_plane.py
@@ -1,18 +1,20 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
 import yt
 
 # Load the dataset.
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
-# Create a 1 kpc radius sphere, centered on the maximum gas density.  Note
-# that this sphere is very small compared to the size of our final plot,
-# and it has a non-axially aligned L vector.
-sp = ds.sphere("m", (1.0, "kpc"))
+# Create a 15 kpc radius sphere, centered on the center of the sim volume
+sp = ds.sphere("center", (15.0, "kpc"))
 
 # Get the angular momentum vector for the sphere.
 L = sp.quantities.angular_momentum_vector()
 
 print "Angular momentum vector: {0}".format(L)
 
-# Create an OffAxisSlicePlot on the object with the L vector as its normal
-p = yt.OffAxisSlicePlot(ds, L, "density", sp.center, (15, "kpc"))
+# Create an OffAxisSlicePlot of density centered on the object with the L 
+# vector as its normal and a width of 25 kpc on a side
+p = yt.OffAxisSlicePlot(ds, L, "density", sp.center, (25, "kpc"))
 p.save()

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -1,3 +1,6 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED 
+
 # Using AMRKDTree Homogenized Volumes to examine large datasets
 # at lower resolution.
 
@@ -10,17 +13,17 @@
 import yt
 from yt.utilities.amr_kdtree.api import AMRKDTree
 
-# Load up a data and print out the maximum refinement level
+# Load up a dataset
 ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
 
 kd = AMRKDTree(ds)
-# Print out the total volume of all the bricks
-print kd.count_volume()
-# Print out the number of cells
-print kd.count_cells()
+
+# Print out specifics of KD Tree
+print "Total volume of all bricks = %i" % kd.count_volume()
+print "Total number of cells = %i" % kd.count_cells()
 
 tf = yt.ColorTransferFunction((-30, -22))
-cam = ds.h.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256,
+cam = ds.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256,
                   tf, volume=kd)
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5], colormap='RdBu_r')
 cam.snapshot("v1.png", clip_ratio=6.0)

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/average_value.py
--- a/doc/source/cookbook/average_value.py
+++ b/doc/source/cookbook/average_value.py
@@ -5,9 +5,10 @@
 field = "temperature"  # The field to average
 weight = "cell_mass"  # The weight for the average
 
-dd = ds.h.all_data()  # This is a region describing the entire box,
-                      # but note it doesn't read anything in yet!
+ad = ds.all_data()  # This is a region describing the entire box,
+                    # but note it doesn't read anything in yet!
+
 # We now use our 'quantities' call to get the average quantity
-average_value = dd.quantities["WeightedAverageQuantity"](field, weight)
+average_value = ad.quantities.weighted_average_quantity(field, weight)
 
-print "Average %s (weighted by %s) is %0.5e" % (field, weight, average_value)
+print "Average %s (weighted by %s) is %0.3e %s" % (field, weight, average_value, average_value.units)

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/boolean_data_objects.py
--- a/doc/source/cookbook/boolean_data_objects.py
+++ b/doc/source/cookbook/boolean_data_objects.py
@@ -1,23 +1,32 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
 import yt
 
 ds = yt.load("Enzo_64/DD0043/data0043")  # load data
-# Make a few data ojbects to start.
+# Make a few data ojbects to start. Two boxes and two spheres.
 re1 = ds.region([0.5, 0.5, 0.5], [0.4, 0.4, 0.4], [0.6, 0.6, 0.6])
 re2 = ds.region([0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [0.6, 0.6, 0.6])
 sp1 = ds.sphere([0.5, 0.5, 0.5], 0.05)
 sp2 = ds.sphere([0.1, 0.2, 0.3], 0.1)
+
 # The "AND" operator. This will make a region identical to re2.
 bool1 = ds.boolean([re1, "AND", re2])
 xp = bool1["particle_position_x"]
+
 # The "OR" operator. This will make a region identical to re1.
 bool2 = ds.boolean([re1, "OR", re2])
+
 # The "NOT" operator. This will make a region like re1, but with the corner
 # that re2 covers cut out.
 bool3 = ds.boolean([re1, "NOT", re2])
+
 # Disjoint regions can be combined with the "OR" operator.
 bool4 = ds.boolean([sp1, "OR", sp2])
+
 # Find oddly-shaped overlapping regions.
 bool5 = ds.boolean([re2, "AND", sp1])
+
 # Nested logic with parentheses.
 # This is re1 with the oddly-shaped region cut out.
 bool6 = ds.boolean([re1, "NOT", "(", re1, "AND", sp1, ")"])

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -1,11 +1,13 @@
-import numpy as np
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
 
 import yt
+import numpy as np
 
 # Follow the simple_volume_rendering cookbook for the first part of this.
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")  # load data
-dd = ds.all_data()
-mi, ma = dd.quantities["Extrema"]("density")
+ad = ds.all_data()
+mi, ma = ad.quantities.extrema("density")
 
 # Set up transfer function
 tf = yt.ColorTransferFunction((np.log10(mi), np.log10(ma)))
@@ -40,4 +42,4 @@
 # Zoom in by a factor of 10 over 5 frames
 for i, snapshot in enumerate(cam.zoomin(10.0, 5, clip_ratio=8.0)):
     snapshot.write_png('camera_movement_%04i.png' % frame)
-    frame += 1
\ No newline at end of file
+    frame += 1

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/contours_on_slice.py
--- a/doc/source/cookbook/contours_on_slice.py
+++ b/doc/source/cookbook/contours_on_slice.py
@@ -1,13 +1,12 @@
 import yt
 
 # first add density contours on a density slice
-pf = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")  # load data
-p = yt.SlicePlot(pf, "x", "density")
+ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")  
+p = yt.SlicePlot(ds, "x", "density")
 p.annotate_contour("density")
 p.save()
 
-# then add temperature contours on the same densty slice
-pf = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")  # load data
-p = yt.SlicePlot(pf, "x", "density")
+# then add temperature contours on the same density slice
+p = yt.SlicePlot(ds, "x", "density")
 p.annotate_contour("temperature")
-p.save(str(pf)+'_T_contour')
+p.save(str(ds)+'_T_contour')

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/extract_fixed_resolution_data.py
--- a/doc/source/cookbook/extract_fixed_resolution_data.py
+++ b/doc/source/cookbook/extract_fixed_resolution_data.py
@@ -8,21 +8,26 @@
 level = 2
 dims = ds.domain_dimensions * ds.refine_by**level
 
-# Now, we construct an object that describes the data region and structure we
-# want
-cube = ds.covering_grid(2,  # The level we are willing to extract to; higher
-                            # levels than this will not contribute to the data!
+# We construct an object that describes the data region and structure we want
+# In this case, we want all data up to the maximum "level" of refinement 
+# across the entire simulation volume.  Higher levels than this will not 
+# contribute to our covering grid.
+cube = ds.covering_grid(level,  
                         left_edge=[0.0, 0.0, 0.0],
+                        dims=dims,
                         # And any fields to preload (this is optional!)
-                        dims=dims,
                         fields=["density"])
 
 # Now we open our output file using h5py
-# Note that we open with 'w' which will overwrite existing files!
+# Note that we open with 'w' (write), which will overwrite existing files!
 f = h5py.File("my_data.h5", "w")
 
-# We create a dataset at the root note, calling it density...
+# We create a dataset at the root, calling it "density"
 f.create_dataset("/density", data=cube["density"])
 
 # We close our file
 f.close()
+
+# If we want to then access this datacube in the h5 file, we can now...
+f = h5py.File("my_data.h5", "r")
+print f["density"].value

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -1,3 +1,6 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
 import numpy as np
 
 import yt

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/fit_spectrum.py
--- a/doc/source/cookbook/fit_spectrum.py
+++ b/doc/source/cookbook/fit_spectrum.py
@@ -1,22 +1,21 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
 import yt
 from yt.analysis_modules.cosmological_observation.light_ray.api import LightRay
-from yt.analysis_modules.api import AbsorptionSpectrum
+from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
 from yt.analysis_modules.absorption_spectrum.api import generate_total_fit
 
 # Define and add a field to simulate OVI based on a constant relationship to HI
-def _OVI_NumberDensity(field, data):
-    return data['HI_NumberDensity']
+# Do *NOT* use this for science, because this is not how OVI actually behaves;
+# it is just an example.
 
+ at yt.derived_field(name='OVI_number_density', units='cm**-3')
+def _OVI_number_density(field, data):
+    return data['HI_NumberDensity']*2.0
 
-def _convertOVI(data):
-    return 4.9E-4*.2
 
-yt.add_field('my_OVI_NumberDensity',
-             function=_OVI_NumberDensity,
-             convert_function=_convertOVI)
-
-
-# Define species andi associated parameters to add to continuum
+# Define species and associated parameters to add to continuum
 # Parameters used for both adding the transition to the spectrum
 # and for fitting
 # Note that for single species that produce multiple lines
@@ -37,7 +36,7 @@
                  'init_N': 1E14}
 
 OVI_parameters = {'name': 'OVI',
-                  'field': 'my_OVI_NumberDensity',
+                  'field': 'OVI_number_density',
                   'f': [.1325, .06580],
                   'Gamma': [4.148E8, 4.076E8],
                   'wavelength': [1031.9261, 1037.6167],

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/free_free_field.py
--- a/doc/source/cookbook/free_free_field.py
+++ b/doc/source/cookbook/free_free_field.py
@@ -1,3 +1,6 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
 import numpy as np
 import yt
 # Need to grab the proton mass from the constants database

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/global_phase_plots.py
--- a/doc/source/cookbook/global_phase_plots.py
+++ b/doc/source/cookbook/global_phase_plots.py
@@ -4,10 +4,10 @@
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
 # This is an object that describes the entire box
-ad = ds.h.all_data()
+ad = ds.all_data()
 
-# We plot the average VelocityMagnitude (mass-weighted) in our object
-# as a function of Density and temperature
+# We plot the average velocity magnitude (mass-weighted) in our object
+# as a function of density and temperature
 plot = yt.PhasePlot(ad, "density", "temperature", "velocity_magnitude")
 
 # save the plot

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/halo_merger_tree.py
--- a/doc/source/cookbook/halo_merger_tree.py
+++ b/doc/source/cookbook/halo_merger_tree.py
@@ -1,3 +1,6 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
 # This script demonstrates some of the halo merger tracking infrastructure,
 # for tracking halos across multiple datadumps in a time series.
 # Ultimately, it outputs an HDF5 file with the important quantities for the

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/halo_plotting.py
--- a/doc/source/cookbook/halo_plotting.py
+++ b/doc/source/cookbook/halo_plotting.py
@@ -1,16 +1,20 @@
-"""
-This is a mechanism for plotting circles representing identified particle halos
-on an image.  For more information, see :ref:`halo_finding`.
-"""
-from yt.mods import * # set up our namespace
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
 
-data_pf = load("Enzo_64/RD0006/RedshiftOutput0006")
+import yt
+from yt.analysis_modules.halo_analysis.halo_catalog import HaloCatalog
 
-halo_pf = load('rockstar_halos/halos_0.0.bin')
+# Load the dataset
+ds = yt.load("Enzo_64/RD0006/RedshiftOutput0006")
 
-hc - HaloCatalog(halos_pf = halo_pf)
+# Load the halo list from a rockstar output for this dataset
+halos = yt.load('rockstar_halos/halos_0.0.bin')
+
+# Create the halo catalog from this halo list
+hc = HaloCatalog(halos_pf = halos)
 hc.load()
 
-p = ProjectionPlot(pf, "x", "density")
+# Create a projection with the halos overplot on top
+p = yt.ProjectionPlot(ds, "x", "density")
 p.annotate_halos(hc)
 p.save()

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/halo_profiler.py
--- a/doc/source/cookbook/halo_profiler.py
+++ b/doc/source/cookbook/halo_profiler.py
@@ -1,3 +1,6 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
 from yt.mods import *
 
 from yt.analysis_modules.halo_profiler.api import *

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/hse_field.py
--- a/doc/source/cookbook/hse_field.py
+++ b/doc/source/cookbook/hse_field.py
@@ -1,11 +1,14 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
 import numpy as np
 import yt
 
 # Define the components of the gravitational acceleration vector field by
 # taking the gradient of the gravitational potential
 
-
-def _Grav_Accel_x(field, data):
+ at yt.derived_field(name='grav_accel_x', units='cm/s**2', take_log=False)
+def grav_accel_x(field, data):
 
     # We need to set up stencils
 
@@ -19,13 +22,14 @@
     gx -= data["gravitational_potential"][sl_left, 1:-1, 1:-1]/dx
 
     new_field = np.zeros(data["gravitational_potential"].shape,
-                         dtype='float64')
+                         dtype='float64')*gx.unit_array
     new_field[1:-1, 1:-1, 1:-1] = -gx
 
     return new_field
 
 
-def _Grav_Accel_y(field, data):
+ at yt.derived_field(name='grav_accel_y', units='cm/s**2', take_log=False)
+def grav_accel_y(field, data):
 
     # We need to set up stencils
 
@@ -39,13 +43,14 @@
     gy -= data["gravitational_potential"][1:-1, sl_left, 1:-1]/dy
 
     new_field = np.zeros(data["gravitational_potential"].shape,
-                         dtype='float64')
+                         dtype='float64')*gx.unit_array
     new_field[1:-1, 1:-1, 1:-1] = -gy
 
     return new_field
 
 
-def _Grav_Accel_z(field, data):
+ at yt.derived_field(name='grav_accel_z', units='cm/s**2', take_log=False)
+def grav_accel_z(field, data):
 
     # We need to set up stencils
 
@@ -59,7 +64,7 @@
     gz -= data["gravitational_potential"][1:-1, 1:-1, sl_left]/dz
 
     new_field = np.zeros(data["gravitational_potential"].shape,
-                         dtype='float64')
+                         dtype='float64')*gx.unit_array
     new_field[1:-1, 1:-1, 1:-1] = -gz
 
     return new_field
@@ -68,7 +73,8 @@
 # Define the components of the pressure gradient field
 
 
-def _Grad_Pressure_x(field, data):
+ at yt.derived_field(name='grad_pressure_x', units='g/(cm*s)**2', take_log=False)
+def grad_pressure_x(field, data):
 
     # We need to set up stencils
 
@@ -81,13 +87,14 @@
     px = data["pressure"][sl_right, 1:-1, 1:-1]/dx
     px -= data["pressure"][sl_left, 1:-1, 1:-1]/dx
 
-    new_field = np.zeros(data["pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["pressure"].shape, dtype='float64')*px.unit_array
     new_field[1:-1, 1:-1, 1:-1] = px
 
     return new_field
 
 
-def _Grad_Pressure_y(field, data):
+ at yt.derived_field(name='grad_pressure_y', units='g/(cm*s)**2', take_log=False)
+def grad_pressure_y(field, data):
 
     # We need to set up stencils
 
@@ -100,13 +107,14 @@
     py = data["pressure"][1:-1, sl_right, 1:-1]/dy
     py -= data["pressure"][1:-1, sl_left, 1:-1]/dy
 
-    new_field = np.zeros(data["pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["pressure"].shape, dtype='float64')*px.unit_array
     new_field[1:-1, 1:-1, 1:-1] = py
 
     return new_field
 
 
-def _Grad_Pressure_z(field, data):
+ at yt.derived_field(name='grad_pressure_z', units='g/(cm*s)**2', take_log=False)
+def grad_pressure_z(field, data):
 
     # We need to set up stencils
 
@@ -119,7 +127,7 @@
     pz = data["pressure"][1:-1, 1:-1, sl_right]/dz
     pz -= data["pressure"][1:-1, 1:-1, sl_left]/dz
 
-    new_field = np.zeros(data["pressure"].shape, dtype='float64')
+    new_field = np.zeros(data["pressure"].shape, dtype='float64')*px.unit_array
     new_field[1:-1, 1:-1, 1:-1] = pz
 
     return new_field
@@ -127,8 +135,8 @@
 
 # Define the "degree of hydrostatic equilibrium" field
 
-
-def _HSE(field, data):
+ at yt.derived_field(name='HSE', units=None, take_log=False)
+def HSE(field, data):
 
     gx = data["density"]*data["Grav_Accel_x"]
     gy = data["density"]*data["Grav_Accel_y"]
@@ -138,31 +146,10 @@
     hy = data["Grad_Pressure_y"] - gy
     hz = data["Grad_Pressure_z"] - gz
 
-    h = np.sqrt((hx*hx+hy*hy+hz*hz)/(gx*gx+gy*gy+gz*gz))
+    h = np.sqrt((hx*hx+hy*hy+hz*hz)/(gx*gx+gy*gy+gz*gz))*gx.unit_array
 
     return h
 
-# Now add the fields to the database
-
-yt.add_field("Grav_Accel_x", function=_Grav_Accel_x, take_log=False,
-             validators=[yt.ValidateSpatial(1, ["gravitational_potential"])])
-
-yt.add_field("Grav_Accel_y", function=_Grav_Accel_y, take_log=False,
-             validators=[yt.ValidateSpatial(1, ["gravitational_potential"])])
-
-yt.add_field("Grav_Accel_z", function=_Grav_Accel_z, take_log=False,
-             validators=[yt.ValidateSpatial(1, ["gravitational_potential"])])
-
-yt.add_field("Grad_Pressure_x", function=_Grad_Pressure_x, take_log=False,
-             validators=[yt.ValidateSpatial(1, ["pressure"])])
-
-yt.add_field("Grad_Pressure_y", function=_Grad_Pressure_y, take_log=False,
-             validators=[yt.ValidateSpatial(1, ["pressure"])])
-
-yt.add_field("Grad_Pressure_z", function=_Grad_Pressure_z, take_log=False,
-             validators=[yt.ValidateSpatial(1, ["pressure"])])
-
-yt.add_field("HSE", function=_HSE, take_log=False)
 
 # Open two files, one at the beginning and the other at a later time when
 # there's a lot of sloshing going on.
@@ -173,8 +160,8 @@
 # Sphere objects centered at the cluster potential minimum with a radius
 # of 200 kpc
 
-sphere_i = dsi.h.sphere(dsi.domain_center, (200, "kpc"))
-sphere_f = dsf.h.sphere(dsf.domain_center, (200, "kpc"))
+sphere_i = dsi.sphere(dsi.domain_center, (200, "kpc"))
+sphere_f = dsf.sphere(dsf.domain_center, (200, "kpc"))
 
 # Average "degree of hydrostatic equilibrium" in these spheres
 
@@ -188,9 +175,9 @@
 # of the two files
 
 slc_i = yt.SlicePlot(dsi, 2, ["density", "HSE"], center=dsi.domain_center,
-                     width=(1.0, "mpc"))
+                     width=(1.0, "Mpc"))
 slc_f = yt.SlicePlot(dsf, 2, ["density", "HSE"], center=dsf.domain_center,
-                     width=(1.0, "mpc"))
+                     width=(1.0, "Mpc"))
 
 slc_i.save("initial")
 slc_f.save("final")

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/image_background_colors.py
--- a/doc/source/cookbook/image_background_colors.py
+++ b/doc/source/cookbook/image_background_colors.py
@@ -1,21 +1,24 @@
-from yt.mods import *
-
 # This shows how to save ImageArray objects, such as those returned from 
 # volume renderings, to pngs with varying backgrounds.
 
+import yt
+import numpy as np
+
 # Lets make a fake "rendering" that has 4 channels and looks like a linear
 # gradient from the bottom to top.
+
 im = np.zeros([64,128,4])
 for i in xrange(im.shape[0]):
     for k in xrange(im.shape[2]):
         im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
-im_arr = ImageArray(im)
+im_arr = yt.ImageArray(im)
 
 # in this case you would have gotten im_arr from something like:
 # im_arr = cam.snapshot() 
 
 # To save it with the default settings, we can just use write_png, where it 
 # rescales the image and uses a black background.
+
 im_arr.write_png('standard.png')
  
 # write_png accepts a background keyword argument that defaults to 'black'.
@@ -24,12 +27,8 @@
 # white (1.,1.,1.,1.)
 # None  (0.,0.,0.,0.) <-- Transparent!
 # any rgba list/array: [r,g,b,a], bounded by 0..1
+
 im_arr.write_png('black_bg.png', background='black')
 im_arr.write_png('white_bg.png', background='white')
 im_arr.write_png('green_bg.png', background=[0.,1.,0.,1.])
 im_arr.write_png('transparent_bg.png', background=None)
-
-
-
-
-

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/index.rst
--- a/doc/source/cookbook/index.rst
+++ b/doc/source/cookbook/index.rst
@@ -18,9 +18,6 @@
 `here <http://yt-project.org/data/>`_, where you will find links to download 
 individual datasets.
 
-If you want to take a look at more complex recipes, or submit your own,
-check out the `yt Hub <http://hub.yt-project.org>`_.
-
 .. note:: To contribute your own recipes, please follow the instructions 
     on how to contribute documentation code: :ref:`writing_documentation`.
 

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/light_cone_projection.py
--- a/doc/source/cookbook/light_cone_projection.py
+++ b/doc/source/cookbook/light_cone_projection.py
@@ -1,9 +1,13 @@
-from yt.mods import *
-from yt.analysis_modules.api import LightCone
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
+import yt
+from yt.analysis_modules.cosmological_observation.light_cone.light_cone import LightCone
 
 # Create a LightCone object extending from z = 0 to z = 0.1
 # with a 600 arcminute field of view and a resolution of
 # 60 arcseconds.
+
 # We have already set up the redshift dumps to be
 # used for this, so we will not use any of the time
 # data dumps.

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/light_cone_with_halo_mask.py
--- a/doc/source/cookbook/light_cone_with_halo_mask.py
+++ b/doc/source/cookbook/light_cone_with_halo_mask.py
@@ -1,7 +1,10 @@
-from yt.mods import *
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
 
-from yt.analysis_modules.api import LightCone
-from yt.analysis_modules.halo_profiler.api import *
+import yt
+
+from yt.analysis_modules.cosmological_observation.light_cone.light_cone import LightCone
+from yt.analysis_modules.halo_profiler.api import HaloProfiler
 
 # Instantiate a light cone object as usual.
 lc = LightCone('enzo_tiny_cosmology/32Mpc_32.enzo',

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/make_light_ray.py
--- a/doc/source/cookbook/make_light_ray.py
+++ b/doc/source/cookbook/make_light_ray.py
@@ -1,13 +1,16 @@
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
 import os
 import sys
-
-from yt.mods import *
-
-from yt.analysis_modules.halo_profiler.api import *
-from yt.analysis_modules.cosmological_observation.light_ray.api import \
+import yt
+from yt.analysis_modules.halo_profiler.api import HaloProfiler
+from yt.analysis_modules.cosmological_observation.light_ray.light_ray import \
      LightRay
 
-if not os.path.isdir("LR"): os.mkdir('LR')
+# Create a directory for the light rays
+if not os.path.isdir("LR"): 
+    os.mkdir('LR')
      
 # Create a LightRay object extending from z = 0 to z = 0.1
 # and use only the redshift dumps.

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/multi_plot_3x2_FRB.py
--- a/doc/source/cookbook/multi_plot_3x2_FRB.py
+++ b/doc/source/cookbook/multi_plot_3x2_FRB.py
@@ -1,12 +1,14 @@
-from yt.mods import * # set up our namespace
+import yt
+import numpy as np
+from yt.visualization.api import get_multi_plot
 import matplotlib.colorbar as cb
 from matplotlib.colors import LogNorm
 
 fn = "Enzo_64/RD0006/RedshiftOutput0006" # parameter file to load
 
-
-pf = load(fn) # load data
-v, c = pf.h.find_max("density")
+# load data and get center value and center location as maximum density location
+ds = yt.load(fn) 
+v, c = ds.find_max("density")
 
 # set up our Fixed Resolution Buffer parameters: a width, resolution, and center
 width = (1.0, 'unitary')
@@ -28,7 +30,7 @@
 # over the columns, which will become axes of slicing.
 plots = []
 for ax in range(3):
-    sli = pf.slice(ax, c[ax])
+    sli = ds.slice(ax, c[ax])
     frb = sli.to_frb(width, res)
     den_axis = axes[ax][0]
     temp_axis = axes[ax][1]
@@ -39,11 +41,16 @@
         ax.xaxis.set_visible(False)
         ax.yaxis.set_visible(False)
 
-    plots.append(den_axis.imshow(frb['density'], norm=LogNorm()))
+    # converting our fixed resolution buffers to NDarray so matplotlib can
+    # render them
+    dens = np.array(frb['density'])
+    temp = np.array(frb['temperature'])
+
+    plots.append(den_axis.imshow(dens, norm=LogNorm()))
     plots[-1].set_clim((5e-32, 1e-29))
     plots[-1].set_cmap("bds_highcontrast")
 
-    plots.append(temp_axis.imshow(frb['temperature'], norm=LogNorm()))
+    plots.append(temp_axis.imshow(temp, norm=LogNorm()))
     plots[-1].set_clim((1e3, 1e8))
     plots[-1].set_cmap("hot")
     
@@ -60,4 +67,4 @@
     cbar.set_label(t)
 
 # And now we're done!  
-fig.savefig("%s_3x2.png" % pf)
+fig.savefig("%s_3x2.png" % ds)

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/multi_plot_slice_and_proj.py
--- a/doc/source/cookbook/multi_plot_slice_and_proj.py
+++ b/doc/source/cookbook/multi_plot_slice_and_proj.py
@@ -1,4 +1,5 @@
-from yt.mods import * # set up our namespace
+import yt
+import numpy as np
 from yt.visualization.base_plot_types import get_multi_plot
 import matplotlib.colorbar as cb
 from matplotlib.colors import LogNorm
@@ -6,7 +7,7 @@
 fn = "GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150" # parameter file to load
 orient = 'horizontal'
 
-pf = load(fn) # load data
+ds = yt.load(fn) # load data
 
 # There's a lot in here:
 #   From this we get a containing figure, a list-of-lists of axes into which we
@@ -17,12 +18,11 @@
 #   bw is the base-width in inches, but 4 is about right for most cases.
 fig, axes, colorbars = get_multi_plot(3, 2, colorbar=orient, bw = 4)
 
-slc = pf.slice(2, 0.0, fields=["density","temperature","velocity_magnitude"], 
-                 center=pf.domain_center)
-proj = pf.proj("density", 2, weight_field="density", center=pf.domain_center)
+slc = yt.SlicePlot(ds, 'z', fields=["density","temperature","velocity_magnitude"])
+proj = yt.ProjectionPlot(ds, 'z', "density", weight_field="density")
 
-slc_frb = slc.to_frb((1.0, "mpc"), 512)
-proj_frb = proj.to_frb((1.0, "mpc"), 512)
+slc_frb = slc.data_source.to_frb((1.0, "Mpc"), 512)
+proj_frb = proj.data_source.to_frb((1.0, "Mpc"), 512)
 
 dens_axes = [axes[0][0], axes[1][0]]
 temp_axes = [axes[0][1], axes[1][1]]
@@ -37,12 +37,22 @@
     vax.xaxis.set_visible(False)
     vax.yaxis.set_visible(False)
 
-plots = [dens_axes[0].imshow(slc_frb["density"], origin='lower', norm=LogNorm()),
-         dens_axes[1].imshow(proj_frb["density"], origin='lower', norm=LogNorm()),
-         temp_axes[0].imshow(slc_frb["temperature"], origin='lower'),    
-         temp_axes[1].imshow(proj_frb["temperature"], origin='lower'),
-         vels_axes[0].imshow(slc_frb["velocity_magnitude"], origin='lower', norm=LogNorm()),
-         vels_axes[1].imshow(proj_frb["velocity_magnitude"], origin='lower', norm=LogNorm())]
+# Converting our Fixed Resolution Buffers to numpy arrays so that matplotlib
+# can render them
+
+slc_dens = np.array(slc_frb['density'])
+proj_dens = np.array(proj_frb['density'])
+slc_temp = np.array(slc_frb['temperature'])
+proj_temp = np.array(proj_frb['temperature'])
+slc_vel = np.array(slc_frb['velocity_magnitude'])
+proj_vel = np.array(proj_frb['velocity_magnitude'])
+
+plots = [dens_axes[0].imshow(slc_dens, origin='lower', norm=LogNorm()),
+         dens_axes[1].imshow(proj_dens, origin='lower', norm=LogNorm()),
+         temp_axes[0].imshow(slc_temp, origin='lower'),    
+         temp_axes[1].imshow(proj_temp, origin='lower'),
+         vels_axes[0].imshow(slc_vel, origin='lower', norm=LogNorm()),
+         vels_axes[1].imshow(proj_vel, origin='lower', norm=LogNorm())]
          
 plots[0].set_clim((1.0e-27,1.0e-25))
 plots[0].set_cmap("bds_highcontrast")
@@ -58,12 +68,12 @@
 plots[5].set_cmap("gist_rainbow")
 
 titles=[r'$\mathrm{Density}\ (\mathrm{g\ cm^{-3}})$', 
-        r'$\mathrm{temperature}\ (\mathrm{K})$',
-        r'$\mathrm{VelocityMagnitude}\ (\mathrm{cm\ s^{-1}})$']
+        r'$\mathrm{Temperature}\ (\mathrm{K})$',
+        r'$\mathrm{Velocity Magnitude}\ (\mathrm{cm\ s^{-1}})$']
 
 for p, cax, t in zip(plots[0:6:2], colorbars, titles):
     cbar = fig.colorbar(p, cax=cax, orientation=orient)
     cbar.set_label(t)
 
 # And now we're done! 
-fig.savefig("%s_3x2" % pf)
+fig.savefig("%s_3x2" % ds)

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/multi_width_image.py
--- a/doc/source/cookbook/multi_width_image.py
+++ b/doc/source/cookbook/multi_width_image.py
@@ -1,15 +1,16 @@
-from yt.mods import *
+import yt
 
 # Load the dataset.
-pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
 # Create a slice plot for the dataset.  With no additional arguments,
 # the width will be the size of the domain and the center will be the
 # center of the simulation box
-slc = SlicePlot(pf,2,'density')
+slc = yt.SlicePlot(ds, 'z', 'density')
 
-# Create a list of a couple of widths and units.
-widths = [(1, 'mpc'),
+# Create a list of a couple of widths and units. 
+# (N.B. Mpc (megaparsec) != mpc (milliparsec)
+widths = [(1, 'Mpc'),
           (15, 'kpc')]
 
 # Loop through the list of widths and units.
@@ -19,12 +20,12 @@
     slc.set_width(width, unit)
 
     # Write out the image with a unique name.
-    slc.save("%s_%010d_%s" % (pf, width, unit))
+    slc.save("%s_%010d_%s" % (ds, width, unit))
 
 zoomFactors = [2,4,5]
 
 # recreate the original slice
-slc = SlicePlot(pf,2,'density')
+slc = yt.SlicePlot(ds, 'z', 'density')
 
 for zoomFactor in zoomFactors:
 
@@ -32,4 +33,4 @@
     slc.zoom(zoomFactor)
 
     # Write out the image with a unique name.
-    slc.save("%s_%i" % (pf, zoomFactor))
+    slc.save("%s_%i" % (ds, zoomFactor))

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/multiplot_2x2.py
--- a/doc/source/cookbook/multiplot_2x2.py
+++ b/doc/source/cookbook/multiplot_2x2.py
@@ -1,9 +1,9 @@
-from yt.mods import *
+import yt
 import matplotlib.pyplot as plt
 from mpl_toolkits.axes_grid1 import AxesGrid
 
 fn = "IsolatedGalaxy/galaxy0030/galaxy0030"
-pf = load(fn) # load data
+ds = yt.load(fn) # load data
 
 fig = plt.figure()
 
@@ -22,11 +22,16 @@
                 cbar_size="3%",
                 cbar_pad="0%")
 
-fields = ['density', 'velocity_x', 'velocity_y', 'VelocityMagnitude']
+fields = ['density', 'velocity_x', 'velocity_y', 'velocity_magnitude']
 
 # Create the plot.  Since SlicePlot accepts a list of fields, we need only
 # do this once.
-p = SlicePlot(pf, 'z', fields)
+p = yt.SlicePlot(ds, 'z', fields)
+
+# Velocity is going to be both positive and negative, so let's make these
+# slices linear
+p.set_log('velocity_x', False)
+p.set_log('velocity_y', False)
 p.zoom(2)
 
 # For each plotted field, force the SlicePlot to redraw itself onto the AxesGrid

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/multiplot_2x2_coordaxes_slice.py
--- a/doc/source/cookbook/multiplot_2x2_coordaxes_slice.py
+++ b/doc/source/cookbook/multiplot_2x2_coordaxes_slice.py
@@ -1,9 +1,9 @@
-from yt.mods import *
+import yt
 import matplotlib.pyplot as plt
 from mpl_toolkits.axes_grid1 import AxesGrid
 
 fn = "IsolatedGalaxy/galaxy0030/galaxy0030"
-pf = load(fn) # load data
+ds = yt.load(fn) # load data
 
 fig = plt.figure()
 
@@ -27,7 +27,7 @@
 
 for i, (direction, field) in enumerate(zip(cuts, fields)):
     # Load the data and create a single plot
-    p = SlicePlot(pf, direction, field)
+    p = yt.SlicePlot(ds, direction, field)
     p.zoom(40)
 
     # This forces the ProjectionPlot to redraw itself on the AxesGrid axes.

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/multiplot_2x2_time_series.py
--- a/doc/source/cookbook/multiplot_2x2_time_series.py
+++ b/doc/source/cookbook/multiplot_2x2_time_series.py
@@ -1,4 +1,4 @@
-from yt.mods import *
+import yt
 import matplotlib.pyplot as plt
 from mpl_toolkits.axes_grid1 import AxesGrid
 
@@ -23,8 +23,8 @@
 
 for i, fn in enumerate(fns):
     # Load the data and create a single plot
-    pf = load(fn) # load data
-    p = ProjectionPlot(pf, 'z', 'density', width=(55, 'Mpccm'))
+    ds = yt.load(fn) # load data
+    p = yt.ProjectionPlot(ds, 'z', 'density', width=(55, 'Mpccm'))
 
     # Ensure the colorbar limits match for all plots
     p.set_zlim('density', 1e-4, 1e-2)

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/offaxis_projection.py
--- a/doc/source/cookbook/offaxis_projection.py
+++ b/doc/source/cookbook/offaxis_projection.py
@@ -1,7 +1,8 @@
-from yt.mods import *
+import yt
+import numpy as np
 
 # Load the dataset.
-pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
 # Choose a center for the render.
 c = [0.5, 0.5, 0.5]
@@ -25,10 +26,10 @@
 # Create the off axis projection.
 # Setting no_ghost to False speeds up the process, but makes a
 # slighly lower quality image.
-image = off_axis_projection(pf, c, L, W, Npixels, "density", no_ghost=False)
+image = yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
 
 # Write out the final image and give it a name
 # relating to what our dataset is called.
 # We save the log of the values so that the colors do not span
 # many orders of magnitude.  Try it without and see what happens.
-write_image(np.log10(image), "%s_offaxis_projection.png" % pf)
+yt.write_image(np.log10(image), "%s_offaxis_projection.png" % ds)

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/offaxis_projection_colorbar.py
--- a/doc/source/cookbook/offaxis_projection_colorbar.py
+++ b/doc/source/cookbook/offaxis_projection_colorbar.py
@@ -1,8 +1,9 @@
-from yt.mods import * # set up our namespace
+import yt
+import numpy as np
 
 fn = "IsolatedGalaxy/galaxy0030/galaxy0030" # parameter file to load
 
-pf = load(fn) # load data
+ds = yt.load(fn) # load data
 
 # Now we need a center of our volume to render.  Here we'll just use
 # 0.5,0.5,0.5, because volume renderings are not periodic.
@@ -31,9 +32,9 @@
 # Also note that we set the field which we want to project as "density", but
 # really we could use any arbitrary field like "temperature", "metallicity"
 # or whatever.
-image = off_axis_projection(pf, c, L, W, Npixels, "density", no_ghost=False)
+image = yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
 
 # Image is now an NxN array representing the intensities of the various pixels.
 # And now, we call our direct image saver.  We save the log of the result.
-write_projection(image, "offaxis_projection_colorbar.png", 
-                 colorbar_label="Column Density (cm$^{-2}$)")
+yt.write_projection(image, "offaxis_projection_colorbar.png", 
+                    colorbar_label="Column Density (cm$^{-2}$)")

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -1,20 +1,15 @@
-## Opaque Volume Rendering
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
 
-# The new version of yt also features opaque rendering, using grey opacity.
-# For example, this makes blues opaque to red and green.  In this example we
-# will explore how the opacity model you choose changes the appearance of the
-# rendering.
+import yt
+import numpy as np
 
-# Here we start by loading up a dataset, in this case galaxy0030.
-
-from yt.mods import *
-
-pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
 # We start by building a transfer function, and initializing a camera.
 
-tf = ColorTransferFunction((-30, -22))
-cam = pf.h.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256, tf)
+tf = yt.ColorTransferFunction((-30, -22))
+cam = ds.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256, tf)
 
 # Now let's add some isocontours, and take a snapshot.
 
@@ -66,5 +61,3 @@
 
 # That looks pretty different, but the main thing is that you can see that the
 # inner contours are somewhat visible again.  
-
-

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/overplot_grids.py
--- a/doc/source/cookbook/overplot_grids.py
+++ b/doc/source/cookbook/overplot_grids.py
@@ -1,10 +1,10 @@
-from yt.mods import *
+import yt
 
 # Load the dataset.
-pf = load("Enzo_64/DD0043/data0043")
+ds = yt.load("Enzo_64/DD0043/data0043")
 
 # Make a density projection.
-p = ProjectionPlot(pf, "y", "density")
+p = yt.ProjectionPlot(ds, "y", "density")
 
 # Modify the projection
 # The argument specifies the region along the line of sight

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/overplot_particles.py
--- a/doc/source/cookbook/overplot_particles.py
+++ b/doc/source/cookbook/overplot_particles.py
@@ -1,10 +1,10 @@
-from yt.mods import *
+import yt
 
 # Load the dataset.
-pf = load("Enzo_64/DD0043/data0043")
+ds = yt.load("Enzo_64/DD0043/data0043")
 
 # Make a density projection.
-p = ProjectionPlot(pf, "y", "density")
+p = yt.ProjectionPlot(ds, "y", "density")
 
 # Modify the projection
 # The argument specifies the region along the line of sight

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/profile_with_variance.py
--- a/doc/source/cookbook/profile_with_variance.py
+++ b/doc/source/cookbook/profile_with_variance.py
@@ -1,30 +1,34 @@
-from matplotlib import pyplot
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
 
-from yt.mods import *
+import matplotlib.pyplot as plt
+import yt
 
 # Load the dataset.
-pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
-# Create a sphere of radius 1000 kpc centered on the max density.
-sphere = pf.sphere("max", (1000, "kpc"))
+# Create a sphere of radius 1 Mpc centered on the max density location.
+sp = ds.sphere("max", (1, "Mpc"))
 
 # Calculate and store the bulk velocity for the sphere.
-bulk_velocity = sphere.quantities['BulkVelocity']()
-sphere.set_field_parameter('bulk_velocity', bulk_velocity)
+bulk_velocity = sp.quantities['BulkVelocity']()
+sp.set_field_parameter('bulk_velocity', bulk_velocity)
 
 # Create a 1D profile object for profiles over radius
 # and add a velocity profile.
-profile = BinnedProfile1D(sphere, 100, "Radiuskpc", 0.1, 1000.)
-profile.add_fields('VelocityMagnitude')
+prof = yt.ProfilePlot(sp, 'radius', 'velocity_magnitude', 
+                      weight_field='cell_mass')
+prof.set_unit('radius', 'kpc')
+prof.set_xlim(0.1, 1000)
 
 # Plot the average velocity magnitude.
-pyplot.loglog(profile['Radiuskpc'], profile['VelocityMagnitude'],
-              label='mean')
+plt.loglog(prof['radius'], prof['velocity_magnitude'],
+              label='Mean')
 # Plot the variance of the velocity madnitude.
-pyplot.loglog(profile['Radiuskpc'], profile['VelocityMagnitude_std'],
-              label='std')
-pyplot.xlabel('r [kpc]')
-pyplot.ylabel('v [cm/s]')
-pyplot.legend()
+plt.loglog(prof['radius'], prof['velocity_magnitude_std'],
+              label='Standard Deviation')
+plt.xlabel('r [kpc]')
+plt.ylabel('v [cm/s]')
+plt.legend()
 
-pyplot.savefig('velocity_profiles.png')
+plt.savefig('velocity_profiles.png')

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/rad_velocity.py
--- a/doc/source/cookbook/rad_velocity.py
+++ b/doc/source/cookbook/rad_velocity.py
@@ -1,32 +1,38 @@
-from yt.mods import *
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
+import yt
 import matplotlib.pyplot as plt
 
-pf = load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
+ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
 
 # Get the first sphere
-
-sphere0 = pf.sphere(pf.domain_center, (500., "kpc"))
+sp0 = ds.sphere(ds.domain_center, (500., "kpc"))
 
 # Compute the bulk velocity from the cells in this sphere
+bulk_vel = sp0.quantities["BulkVelocity"]()
 
-bulk_vel = sphere0.quantities["BulkVelocity"]()
 
 # Get the second sphere
-
-sphere1 = pf.sphere(pf.domain_center, (500., "kpc"))
+sp1 = ds.sphere(ds.domain_center, (500., "kpc"))
 
 # Set the bulk velocity field parameter 
-sphere1.set_field_parameter("bulk_velocity", bulk_vel)
+sp1.set_field_parameter("bulk_velocity", bulk_vel)
 
 # Radial profile without correction
 
-rad_profile0 = BinnedProfile1D(sphere0, 100, "Radiuskpc", 0.0, 500., log_space=False)
-rad_profile0.add_fields("RadialVelocity")
+rp0 = yt.ProfilePlot(sp0, 'radius', 'radial_velocity')
+rp0.set_unit('radius', 'kpc')
+rp0.set_log('radius', False)
 
 # Radial profile with correction for bulk velocity
 
-rad_profile1 = BinnedProfile1D(sphere1, 100, "Radiuskpc", 0.0, 500., log_space=False)
-rad_profile1.add_fields("RadialVelocity")
+rp1 = yt.ProfilePlot(sp1, 'radius', 'radial_velocity')
+rp1.set_unit('radius', 'kpc')
+rp1.set_log('radius', False)
+
+#rp0.save('radial_velocity_profile_uncorrected.png')
+#rp1.save('radial_velocity_profile_corrected.png')
 
 # Make a plot using matplotlib
 
@@ -41,4 +47,4 @@
 ax.set_ylabel(r"$\mathrm{v_r\ (km/s)}$")
 ax.legend(["Without Correction", "With Correction"])
 
-fig.savefig("%s_profiles.png" % pf)
\ No newline at end of file
+fig.savefig("%s_profiles.png" % ds)

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/radial_profile_styles.py
--- a/doc/source/cookbook/radial_profile_styles.py
+++ b/doc/source/cookbook/radial_profile_styles.py
@@ -1,16 +1,22 @@
-from yt.mods import *
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
+import yt
 import matplotlib.pyplot as plt
 
-pf = load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
+ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
 
 # Get a sphere object
 
-sphere = pf.sphere(pf.domain_center, (500., "kpc"))
+sp = ds.sphere(ds.domain_center, (500., "kpc"))
 
 # Bin up the data from the sphere into a radial profile
 
-rad_profile = BinnedProfile1D(sphere, 100, "Radiuskpc", 0.0, 500., log_space=False)
-rad_profile.add_fields("density","temperature")
+#rp = BinnedProfile1D(sphere, 100, "Radiuskpc", 0.0, 500., log_space=False)
+#rp.add_fields("density","temperature")
+rp = yt.ProfilePlot(sp, 'radius', ['density', 'temperature'])
+rp.set_unit('radius', 'kpc')
+rp.set_log('radius', False)
 
 # Make plots using matplotlib
 
@@ -18,7 +24,7 @@
 ax = fig.add_subplot(111)
 
 # Plot the density as a log-log plot using the default settings
-dens_plot = ax.loglog(rad_profile["Radiuskpc"], rad_profile["density"])
+dens_plot = ax.loglog(rp["Radiuskpc"], rp["density"])
 
 # Here we set the labels of the plot axes
 
@@ -27,7 +33,7 @@
 
 # Save the default plot
 
-fig.savefig("density_profile_default.png" % pf)
+fig.savefig("density_profile_default.png" % ds)
 
 # The "dens_plot" object is a list of plot objects. In our case we only have one,
 # so we index the list by '0' to get it. 
@@ -51,10 +57,10 @@
 
 ax.lines = []
 
-# Since the rad_profile object also includes the standard deviation in each bin,
+# Since the radial profile object also includes the standard deviation in each bin,
 # we'll use these as errorbars. We have to make a new plot for this:
 
-dens_err_plot = ax.errorbar(rad_profile["Radiuskpc"], rad_profile["density"],
-                            yerr=rad_profile["Density_std"])
+dens_err_plot = ax.errorbar(pr["Radiuskpc"], rp["density"],
+                            yerr=rp["Density_std"])
                                                         
-fig.savefig("density_profile_with_errorbars.png")
\ No newline at end of file
+fig.savefig("density_profile_with_errorbars.png")

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -1,18 +1,22 @@
-from yt.mods import *
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
+import yt
+import numpy as np
 
 # Load the dataset.
-pf = load("Enzo_64/DD0043/data0043")
+ds = yt.load("Enzo_64/DD0043/data0043")
 
 # Create a data container (like a sphere or region) that
 # represents the entire domain.
-dd = pf.h.all_data()
+ad = ds.all_data()
 
 # Get the minimum and maximum densities.
-mi, ma = dd.quantities["Extrema"]("density")[0]
+mi, ma = ad.quantities.extrema("density")
 
 # Create a transfer function to map field values to colors.
 # We bump up our minimum to cut out some of the background fluid
-tf = ColorTransferFunction((np.log10(mi)+2.0, np.log10(ma)))
+tf = yt.ColorTransferFunction((np.log10(mi)+2.0, np.log10(ma)))
 
 # Add three guassians, evenly spaced between the min and
 # max specified above with widths of 0.02 and using the
@@ -37,25 +41,24 @@
 # Create a camera object.
 # This object creates the images and
 # can be moved and rotated.
-cam = pf.h.camera(c, L, W, Npixels, tf)
+cam = ds.camera(c, L, W, Npixels, tf)
 
 # Create a snapshot.
 # The return value of this function could also be accepted, modified (or saved
 # for later manipulation) and then put written out using write_bitmap.
 # clip_ratio applies a maximum to the function, which is set to that value
 # times the .std() of the array.
-im = cam.snapshot("%s_volume_rendered.png" % pf, clip_ratio=8.0)
+im = cam.snapshot("%s_volume_rendered.png" % ds, clip_ratio=8.0)
 
 # Add the domain edges, with an alpha blending of 0.3:
 nim = cam.draw_domain(im, alpha=0.3)
-nim.write_png('%s_vr_domain.png' % pf)
+nim.write_png('%s_vr_domain.png' % ds)
 
 # Add the grids, colored by the grid level with the algae colormap
 nim = cam.draw_grids(im, alpha=0.3, cmap='algae')
-nim.write_png('%s_vr_grids.png' % pf)
+nim.write_png('%s_vr_grids.png' % ds)
 
 # Here we can draw the coordinate vectors on top of the image by processing
 # it through the camera. Then save it out.
 cam.draw_coordinate_vectors(nim)
-nim.write_png("%s_vr_vectors.png" % pf)
-
+nim.write_png("%s_vr_vectors.png" % ds)

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/save_profiles.py
--- a/doc/source/cookbook/save_profiles.py
+++ b/doc/source/cookbook/save_profiles.py
@@ -1,28 +1,31 @@
-from yt.mods import *
+### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
+### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+
+import yt
 import matplotlib.pyplot as plt
-import h5py
+import h5py as h5
 
-pf = load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
+ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
 
 # Get a sphere
 
-sp = pf.sphere(pf.domain_center, (500., "kpc"))
+sp = ds.sphere(ds.domain_center, (500., "kpc"))
 
 # Radial profile from the sphere
 
-rad_profile = BinnedProfile1D(sp, 100, "Radiuskpc", 0.0, 500., log_space=False)
-
-# Adding density and temperature fields to the profile
-
-rad_profile.add_fields(["density","temperature"])
+prof = yt.BinnedProfile1D(sp, 100, "Radiuskpc", 0.0, 500., log_space=False)
+prof = yt.ProfilePlot(sp, 'radius', ['density', 'temperature'], weight_field="cell_mass")
+prof.set_unit('radius', 'kpc')
+prof.set_log('radius', False)
+prof.set_xlim(0, 500)
 
 # Write profiles to ASCII file
 
-rad_profile.write_out("%s_profile.dat" % pf, bin_style="center")
+prof.write_out("%s_profile.dat" % ds, bin_style="center")
 
 # Write profiles to HDF5 file
 
-rad_profile.write_out_h5("%s_profile.h5" % pf, bin_style="center")
+prof.write_out_h5("%s_profile.h5" % ds, bin_style="center")
 
 # Now we will show how using NumPy, h5py, and Matplotlib the data in these
 # files may be plotted.
@@ -42,13 +45,13 @@
 ax.set_xlabel(r"$\mathrm{r\ (kpc)}$")
 ax.set_ylabel(r"$\mathrm{\rho\ (g\ cm^{-3})}$")
 ax.set_title("Density vs. Radius")
-fig1.savefig("%s_dens.png" % pf)
+fig1.savefig("%s_dens.png" % ds)
 
 # Plot temperature from HDF5 file
 
 # Get the file handle
 
-f = h5py.File("%s_profile.h5" % pf, "r")
+f = h5py.File("%s_profile.h5" % ds, "r")
 
 # Get the radius and temperature arrays from the file handle
 
@@ -66,4 +69,4 @@
 ax.set_xlabel(r"$\mathrm{r\ (kpc)}$")
 ax.set_ylabel(r"$\mathrm{T\ (K)}$")
 ax.set_title("temperature vs. Radius")
-fig2.savefig("%s_temp.png" % pf)
+fig2.savefig("%s_temp.png" % ds)

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/show_hide_axes_colorbar.py
--- a/doc/source/cookbook/show_hide_axes_colorbar.py
+++ b/doc/source/cookbook/show_hide_axes_colorbar.py
@@ -1,8 +1,8 @@
-from yt.mods import *
+import yt
 
-pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
-slc = SlicePlot(pf, "x", "density")
+slc = yt.SlicePlot(ds, "x", "density")
 
 slc.save("default_sliceplot.png")
 

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/simple_contour_in_slice.py
--- a/doc/source/cookbook/simple_contour_in_slice.py
+++ b/doc/source/cookbook/simple_contour_in_slice.py
@@ -1,10 +1,10 @@
-from yt.mods import *
+import yt
 
 # Load the data file.
-pf = load("Sedov_3d/sedov_hdf5_chk_0002")
+ds = yt.load("Sedov_3d/sedov_hdf5_chk_0002")
 
 # Make a traditional slice plot.
-sp = SlicePlot(pf,"x","density")
+sp = yt.SlicePlot(ds, "x", "density")
 
 # Overlay the slice plot with thick red contours of density.
 sp.annotate_contour("density", ncont=3, clim=(1e-2,1e-1), label=True,

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/simple_off_axis_projection.py
--- a/doc/source/cookbook/simple_off_axis_projection.py
+++ b/doc/source/cookbook/simple_off_axis_projection.py
@@ -1,12 +1,12 @@
-from yt.mods import *
+import yt
 
 # Load the dataset.
-pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
 # Create a 1 kpc radius sphere, centered on the max density.  Note that this
 # sphere is very small compared to the size of our final plot, and it has a
 # non-axially aligned L vector.
-sp = pf.sphere("center", (15.0, "kpc"))
+sp = ds.sphere("center", (15.0, "kpc"))
 
 # Get the angular momentum vector for the sphere.
 L = sp.quantities["AngularMomentumVector"]()
@@ -14,5 +14,5 @@
 print "Angular momentum vector: {0}".format(L)
 
 # Create an OffAxisSlicePlot on the object with the L vector as its normal
-p = OffAxisProjectionPlot(pf, L, "density", sp.center, (25, "kpc"))
+p = yt.OffAxisProjectionPlot(ds, L, "density", sp.center, (25, "kpc"))
 p.save()

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/simple_pdf.py
--- a/doc/source/cookbook/simple_pdf.py
+++ b/doc/source/cookbook/simple_pdf.py
@@ -1,14 +1,14 @@
-from yt.mods import *
+import yt
 
 # Load the dataset.
-pf = load("GalaxyClusterMerger/fiducial_1to3_b0.273d_hdf5_plt_cnt_0175")
+ds = yt.load("GalaxyClusterMerger/fiducial_1to3_b0.273d_hdf5_plt_cnt_0175")
 
 # Create a data object that represents the whole box.
-ad = pf.h.all_data()
+ad = ds.h.all_data()
 
 # This is identical to the simple phase plot, except we supply 
 # the fractional=True keyword to divide the profile data by the sum. 
-plot = PhasePlot(ad, "density", "temperature", "cell_mass",
+plot = yt.PhasePlot(ad, "density", "temperature", "cell_mass",
                  weight_field=None, fractional=True)
 
 # Set a new title for the colorbar since it is now fractional.

diff -r 5e231e6e1955acdd0926a4ee6aba2819832ff2a5 -r 1d8a407e8c26237182c1a3a54a4e738df41d20da doc/source/cookbook/simple_phase.py
--- a/doc/source/cookbook/simple_phase.py
+++ b/doc/source/cookbook/simple_phase.py
@@ -1,18 +1,21 @@
-from yt.mods import *
+import yt
 
 # Load the dataset.
-pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
 # Create a sphere of radius 100 kpc in the center of the domain.
-my_sphere = pf.sphere("c", (100.0, "kpc"))
+my_sphere = ds.sphere("c", (100.0, "kpc"))
 
 # Create a PhasePlot object.
 # Setting weight to None will calculate a sum.
 # Setting weight to a field will calculate an average
 # weighted by that field.
-plot = PhasePlot(my_sphere, "density", "temperature", "cell_mass",
+plot = yt.PhasePlot(my_sphere, "density", "temperature", "cell_mass",
                  weight_field=None)
 
+# Set the units of mass to be in solar masses (not the default in cgs)
+plot.set_unit('cell_mass', 'Msun')
+
 # Save the image.
 # Optionally, give a string as an argument
 # to name files with a keyword.

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/5b7aedc8a816/
Changeset:   5b7aedc8a816
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-24 19:01:10
Summary:     Reverting change to particle_radius.
Affected #:  1 file

diff -r 1d8a407e8c26237182c1a3a54a4e738df41d20da -r 5b7aedc8a8160cf716f909c18c089c3f93bf89c8 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -329,15 +329,6 @@
         get_radius
 
     def _particle_radius(field, data):
-        dist = data["particle_position"] - data.get_field_parameter("center")
-        dw = data.pf.domain_width
-        offset = dist.copy()
-        offset[:] = 0.0
-        offset += data.pf.periodicity * (dist >  dw/2.0) * -dw/2.0
-        offset += data.pf.periodicity * (dist < -dw/2.0) *  dw/2.0
-        dist += offset
-        dist = np.sqrt((dist * dist).sum(axis=1))
-        return dist
         return get_radius(data, "particle_position_")
     registry.add_field((ptype, "particle_radius"),
               function=_particle_radius,


https://bitbucket.org/yt_analysis/yt/commits/0799076168c0/
Changeset:   0799076168c0
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-24 19:07:48
Summary:     Minor cleanup of functions.
Affected #:  2 files

diff -r 5b7aedc8a8160cf716f909c18c089c3f93bf89c8 -r 0799076168c0fd044dc8a2e836bbd5bddc44da9b yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -106,11 +106,11 @@
 
     def _parse_parameter_file(self):
         if self.parameter_filename.startswith("http"):
-            self.sdf_container = HTTPSDFRead(self.parameter_filename,
-                                             header=self.sdf_header)
+            cls = HTTPSDFRead
         else:
-            self.sdf_container = SDFRead(self.parameter_filename,
-                                         header=self.sdf_header)
+            cls = SDFRead
+        self.sdf_container = cls(self.parameter_filename,
+                                 header=self.sdf_header)
 
         # Reference
         self.parameters = self.sdf_container.parameters
@@ -166,11 +166,10 @@
             if self.idx_filename is not None:
 
                 if 'http' in self.idx_filename:
-                    indexdata = HTTPSDFRead(self.idx_filename,
-                                            header=self.idx_header)
+                    cls = HTTPSDFRead
                 else:
-                    indexdata = SDFRead(self.idx_filename,
-                                        header=self.idx_header)
+                    cls = SDFRead
+                indexdata = cls(self.idx_filename, header=self.idx_header)
                 self._sindex = SDFIndex(self.sdf_container, indexdata,
                                         level=self.idx_level)
             else:

diff -r 5b7aedc8a8160cf716f909c18c089c3f93bf89c8 -r 0799076168c0fd044dc8a2e836bbd5bddc44da9b yt/frontends/sdf/fields.py
--- a/yt/frontends/sdf/fields.py
+++ b/yt/frontends/sdf/fields.py
@@ -47,8 +47,7 @@
         mnf = 'mass'
         for mn in possible_masses:
             if mn in pf.sdf_container.keys():
-                mnf = mn
-                self._mass_field=mn
+                mnf = self._mass_field = mn
                 break
 
         idf = pf._field_map.get("particle_index", 'ident')


https://bitbucket.org/yt_analysis/yt/commits/71ad1a95b098/
Changeset:   71ad1a95b098
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-24 19:10:02
Summary:     We no longer need to check this.
Affected #:  1 file

diff -r 0799076168c0fd044dc8a2e836bbd5bddc44da9b -r 71ad1a95b0985898383761de49763aad3b0d0697 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -90,12 +90,6 @@
             pos[:,0] = x[ind:ind+npart]
             pos[:,1] = y[ind:ind+npart]
             pos[:,2] = z[ind:ind+npart]
-            if np.any(pos.min(axis=0) < self.pf.domain_left_edge) or \
-               np.any(pos.max(axis=0) > self.pf.domain_right_edge):
-                raise YTDomainOverflow(pos.min(axis=0),
-                                       pos.max(axis=0),
-                                       self.pf.domain_left_edge,
-                                       self.pf.domain_right_edge)
             regions.add_data_file(pos, data_file.file_id)
             morton[ind:ind+npart] = compute_morton(
                 pos[:,0], pos[:,1], pos[:,2],


https://bitbucket.org/yt_analysis/yt/commits/353451102d24/
Changeset:   353451102d24
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-24 19:13:03
Summary:     Make 'verbose' a C object, not a Python object.
Affected #:  1 file

diff -r 71ad1a95b0985898383761de49763aad3b0d0697 -r 353451102d2436e550479d9800623b831e89c944 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -674,7 +674,7 @@
         cdef np.int64_t moff = octree.get_domain_offset(domain_id + domain_offset)
         cdef np.int64_t i, j, k, n, nneighbors, pind0, offset
         cdef int counter = 0
-        verbose = False
+        cdef int verbose = 0
         pcount = np.zeros_like(dom_ind)
         doff = np.zeros_like(dom_ind) - 1
         # First, we find the oct for each particle.
@@ -713,9 +713,9 @@
         counter = 0
         cdef np.int64_t frac = <np.int64_t> (doff.shape[0] / 20.0)
         cdef int inside, skip_early
-        if verbose: print >> sys.stderr, "Will be outputting every", frac
+        if verbose == 1: print >> sys.stderr, "Will be outputting every", frac
         for i in range(doff.shape[0]):
-            if verbose and counter >= frac:
+            if verbose == 1 and counter >= frac:
                 counter = 0
                 print >> sys.stderr, "FOF-ing % 5.1f%% done" % ((100.0 * i)/doff.size)
             counter += 1


https://bitbucket.org/yt_analysis/yt/commits/d0e052af9cec/
Changeset:   d0e052af9cec
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-24 19:13:44
Summary:     len is a builtin
Affected #:  1 file

diff -r 353451102d2436e550479d9800623b831e89c944 -r d0e052af9cec4b16051e8ccedd6738c85dac1506 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -15,11 +15,11 @@
     'unsigned char': 'B',
 }
 
-def get_type(vtype, len=None):
+def get_type(vtype, tlen=None):
     try:
         t = _types[vtype]
-        if len is not None:
-            t = np.dtype((t, len))
+        if tlen is not None:
+            t = np.dtype((t, tlen))
         else:
             t = np.dtype(t)
     except KeyError:


https://bitbucket.org/yt_analysis/yt/commits/f8742e05b8a3/
Changeset:   f8742e05b8a3
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-24 21:12:10
Summary:     Switch to logical_and
Affected #:  1 file

diff -r d0e052af9cec4b16051e8ccedd6738c85dac1506 -r f8742e05b8a3d75fe61fc4597e732eddfba8035f yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -58,10 +58,11 @@
 
         # Now get all particles that are within the bbox
         if mask is None:
-            mask = np.all(pos >= left, axis=1) * np.all(pos < right, axis=1)
+            mask = np.all(pos >= left, axis=1) 
+            np.logical_and(mask, np.all(pos < right, axis=1), mask)
         else:
-            np.multiply(mask, np.all(pos >= left, axis=1), mask)
-            np.multiply(mask, np.all(pos < right, axis=1), mask)
+            np.logical_and(mask, np.all(pos >= left, axis=1), mask)
+            np.logical_and(mask, np.all(pos < right, axis=1), mask)
         return mask
 
     return myfilter


https://bitbucket.org/yt_analysis/yt/commits/6680b1a8dc41/
Changeset:   6680b1a8dc41
Branch:      yt-3.0
User:        samskillman
Date:        2014-07-02 20:03:09
Summary:     Do not use full midx info to mask here, just loop over indices that count
Affected #:  1 file

diff -r f8742e05b8a3d75fe61fc4597e732eddfba8035f -r 6680b1a8dc41542a56f4421b191ebf3bc4a75f54 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -571,7 +571,9 @@
         # a space.
         if self.valid_indexdata:
             indices = indices[indices < self._max_key]
-            indices = indices[self.indexdata['len'][indices] > 0]
+            #indices = indices[self.indexdata['len'][indices] > 0]
+            # Faster for sparse lookups. Need better heuristic.
+            indices = indices[np.array([(self.indexdata['len'][ind] > 0) for ind in indices])]
 
         #indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
         # Here we sort the indices to batch consecutive reads together.


https://bitbucket.org/yt_analysis/yt/commits/72fd9adbe6d7/
Changeset:   72fd9adbe6d7
Branch:      yt-3.0
User:        samskillman
Date:        2014-07-02 20:04:00
Summary:     sindex -> midx
Affected #:  3 files

diff -r 6680b1a8dc41542a56f4421b191ebf3bc4a75f54 -r 72fd9adbe6d70c1ccba9f41c226e48c198f2e336 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -65,7 +65,7 @@
     _particle_mass_name = None
     _particle_coordinates_name = None
     _particle_velocity_name = None
-    _sindex = None
+    _midx = None
     _skip_cache = True
     _subspace = False
 
@@ -98,7 +98,7 @@
         self._field_map = field_map
         prefix = ''
         if self.idx_filename is not None:
-            prefix += 'sindex_'
+            prefix += 'midx_'
         if filename.startswith("http"):
             prefix += 'http_'
         dataset_type = prefix + 'sdf_particles'
@@ -161,8 +161,8 @@
         self.file_count = 1
 
     @property
-    def sindex(self):
-        if self._sindex is None:
+    def midx(self):
+        if self._midx is None:
             if self.idx_filename is not None:
 
                 if 'http' in self.idx_filename:
@@ -170,11 +170,11 @@
                 else:
                     cls = SDFRead
                 indexdata = cls(self.idx_filename, header=self.idx_header)
-                self._sindex = SDFIndex(self.sdf_container, indexdata,
+                self._midx = SDFIndex(self.sdf_container, indexdata,
                                         level=self.idx_level)
             else:
                 raise RuntimeError("SDF index0 file not supplied in load.")
-        return self._sindex
+        return self._midx
 
     def _set_code_unit_attributes(self):
         self.length_unit = self.quan(1.0, self.parameters.get("length_unit", 'kpc'))

diff -r 6680b1a8dc41542a56f4421b191ebf3bc4a75f54 -r 72fd9adbe6d70c1ccba9f41c226e48c198f2e336 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -166,13 +166,13 @@
 
 
 class IOHandlerSIndexSDF(IOHandlerSDF):
-    _dataset_type = "sindex_sdf_particles"
+    _dataset_type = "midx_sdf_particles"
 
 
     def _read_particle_coords(self, chunks, ptf):
         dle = self.pf.domain_left_edge.in_units("code_length").d
         dre = self.pf.domain_right_edge.in_units("code_length").d
-        for dd in self.pf.sindex.iter_bbox_data(
+        for dd in self.pf.midx.iter_bbox_data(
             dle, dre,
             ['x','y','z']):
             yield "dark_matter", (
@@ -187,7 +187,7 @@
                 if field == "mass": continue
                 required_fields.append(field)
 
-        for dd in self.pf.sindex.iter_bbox_data(
+        for dd in self.pf.midx.iter_bbox_data(
             dle, dre,
             required_fields):
 
@@ -210,7 +210,7 @@
         dle = self.pf.domain_left_edge.in_units("code_length").d
         dre = self.pf.domain_right_edge.in_units("code_length").d
         pcount = 0
-        for dd in self.pf.sindex.iter_bbox_data(
+        for dd in self.pf.midx.iter_bbox_data(
             dle, dre,
             ['x']):
             pcount += dd['x'].size
@@ -219,7 +219,7 @@
         ind = 0
 
         chunk_id = 0
-        for dd in self.pf.sindex.iter_bbox_data(
+        for dd in self.pf.midx.iter_bbox_data(
             dle, dre,
             ['x','y','z']):
             npart = dd['x'].size
@@ -244,13 +244,13 @@
     def _count_particles(self, data_file):
         dle = self.pf.domain_left_edge.in_units("code_length").d
         dre = self.pf.domain_right_edge.in_units("code_length").d
-        pcount_estimate = self.pf.sindex.get_nparticles_bbox(dle, dre)
+        pcount_estimate = self.pf.midx.get_nparticles_bbox(dle, dre)
         if pcount_estimate > 1e9:
             mylog.warning("Filtering %i particles to find total."
                           % pcount_estimate + \
                           " You may want to reconsider your bounding box.")
         pcount = 0
-        for dd in self.pf.sindex.iter_bbox_data(
+        for dd in self.pf.midx.iter_bbox_data(
             dle, dre,
             ['x']):
             pcount += dd['x'].size
@@ -263,5 +263,5 @@
 
 
 class IOHandlerSIndexHTTPSDF(IOHandlerSIndexSDF):
-    _dataset_type = "sindex_http_sdf_particles"
+    _dataset_type = "midx_http_sdf_particles"
 

diff -r 6680b1a8dc41542a56f4421b191ebf3bc4a75f54 -r 72fd9adbe6d70c1ccba9f41c226e48c198f2e336 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -908,7 +908,7 @@
         return self.get_contiguous_chunk(lk, rk, fields)
 
     def get_cell_bbox(self, level, cell_iarr):
-        """Get floating point bounding box for a given sindex cell
+        """Get floating point bounding box for a given midx cell
 
         Returns:
             bbox: array-like, shape (3,2)
@@ -933,7 +933,7 @@
 
         Example:
 
-        for chunk in sindex.iter_padded_bbox_data(
+        for chunk in midx.iter_padded_bbox_data(
             6, np.array([128]*3), 8.0, ['x','y','z','ident']):
 
             print chunk['x'].max()
@@ -1020,7 +1020,7 @@
         Returns:
             data: A list of dictionaries of data.
 
-        chunks = sindex.get_padded_bbox_data(6, np.array([128]*3),
+        chunks = midx.get_padded_bbox_data(6, np.array([128]*3),
                                              8.0, ['x','y','z','ident'])
 
         """


https://bitbucket.org/yt_analysis/yt/commits/af7590b44c64/
Changeset:   af7590b44c64
Branch:      yt-3.0
User:        samskillman
Date:        2014-07-02 20:10:29
Summary:     tlen, not len.
Affected #:  1 file

diff -r 72fd9adbe6d70c1ccba9f41c226e48c198f2e336 -r af7590b44c64c8250aa71f531b165bba0c419fc4 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -43,7 +43,7 @@
         if '[' in vnames[0]:
             num = int(vnames[0].split('[')[-1].strip(']'))
             #num = int(re.sub("\D", "", vnames[0]))
-    ctype = get_type(ctype, len=num)
+    ctype = get_type(ctype, tlen=num)
     return ctype, vnames
 
 def bbox_filter(left, right, domain_width):


https://bitbucket.org/yt_analysis/yt/commits/c976fd72af64/
Changeset:   c976fd72af64
Branch:      yt-3.0
User:        samskillman
Date:        2014-07-02 20:10:48
Summary:     idx -> midx
Affected #:  1 file

diff -r af7590b44c64c8250aa71f531b165bba0c419fc4 -r c976fd72af64aad6078e419af39e6e162daa772f yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -74,9 +74,9 @@
                  n_ref = 64, over_refine_factor = 1,
                  bounding_box = None,
                  sdf_header = None,
-                 idx_filename = None,
-                 idx_header = None,
-                 idx_level = None,
+                 midx_filename = None,
+                 midx_header = None,
+                 midx_level = None,
                  field_map = None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
@@ -90,14 +90,14 @@
         else:
             self.domain_left_edge = self.domain_right_edge = None
         self.sdf_header = sdf_header
-        self.idx_filename = idx_filename
-        self.idx_header = idx_header
-        self.idx_level = idx_level
+        self.midx_filename = midx_filename
+        self.midx_header = midx_header
+        self.midx_level = midx_level
         if field_map is None:
             field_map = {}
         self._field_map = field_map
         prefix = ''
-        if self.idx_filename is not None:
+        if self.midx_filename is not None:
             prefix += 'midx_'
         if filename.startswith("http"):
             prefix += 'http_'
@@ -163,15 +163,15 @@
     @property
     def midx(self):
         if self._midx is None:
-            if self.idx_filename is not None:
+            if self.midx_filename is not None:
 
-                if 'http' in self.idx_filename:
+                if 'http' in self.midx_filename:
                     cls = HTTPSDFRead
                 else:
                     cls = SDFRead
-                indexdata = cls(self.idx_filename, header=self.idx_header)
+                indexdata = cls(self.midx_filename, header=self.midx_header)
                 self._midx = SDFIndex(self.sdf_container, indexdata,
-                                        level=self.idx_level)
+                                        level=self.midx_level)
             else:
                 raise RuntimeError("SDF index0 file not supplied in load.")
         return self._midx


https://bitbucket.org/yt_analysis/yt/commits/6d5f6d3d6f16/
Changeset:   6d5f6d3d6f16
Branch:      yt-3.0
User:        samskillman
Date:        2014-07-02 20:56:38
Summary:     Merging from yt-3.0 tip
Affected #:  58 files

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -57,3 +57,12 @@
 serial the operation ``for pf in ts:`` would also have worked identically.
 
 .. yt_cookbook:: time_series.py
+
+Complex Derived Fields
+~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe estimates the ratio of gravitational and pressure forces in a galaxy
+cluster simulation.  This shows how to create and work with vector derived 
+fields.
+
+.. yt_cookbook:: hse_field.py

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -36,7 +36,7 @@
 axes.  To focus on what's happening in the x-y plane, we make an additional
 Temperature slice for the bottom-right subpanel.
 
-.. yt-cookbook:: multiplot_2x2_coordaxes_slice.py
+.. yt_cookbook:: multiplot_2x2_coordaxes_slice.py
 
 Multi-Plot Slice and Projections
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 doc/source/cookbook/fits_xray_images.rst
--- a/doc/source/cookbook/fits_xray_images.rst
+++ b/doc/source/cookbook/fits_xray_images.rst
@@ -1,6 +1,6 @@
 .. _xray_fits:
 
 FITS X-ray Images in yt
-----------------------
+-----------------------
 
-.. notebook:: fits_xray_images.ipynb
\ No newline at end of file
+.. notebook:: fits_xray_images.ipynb

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 doc/source/cookbook/hse_field.py
--- a/doc/source/cookbook/hse_field.py
+++ b/doc/source/cookbook/hse_field.py
@@ -7,8 +7,10 @@
 # Define the components of the gravitational acceleration vector field by
 # taking the gradient of the gravitational potential
 
- at yt.derived_field(name='grav_accel_x', units='cm/s**2', take_log=False)
-def grav_accel_x(field, data):
+ at yt.derived_field(name='gravitational_acceleration_x',
+                  units='cm/s**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["gravitational_potential"])])
+def gravitational_acceleration_x(field, data):
 
     # We need to set up stencils
 
@@ -16,20 +18,22 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dx = div_fac * data['dx'].flat[0]
+    dx = div_fac * data['dx'][0]
 
     gx = data["gravitational_potential"][sl_right, 1:-1, 1:-1]/dx
     gx -= data["gravitational_potential"][sl_left, 1:-1, 1:-1]/dx
 
     new_field = np.zeros(data["gravitational_potential"].shape,
-                         dtype='float64')*gx.unit_array
+                         dtype='float64')*gx.uq
     new_field[1:-1, 1:-1, 1:-1] = -gx
 
     return new_field
 
 
- at yt.derived_field(name='grav_accel_y', units='cm/s**2', take_log=False)
-def grav_accel_y(field, data):
+ at yt.derived_field(name='gravitational_acceleration_y',
+                  units='cm/s**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["gravitational_potential"])])
+def gravitational_acceleration_y(field, data):
 
     # We need to set up stencils
 
@@ -37,20 +41,23 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dy = div_fac * data['dy'].flat[0]
+    dy = div_fac * data['dy'].flatten()[0]
 
     gy = data["gravitational_potential"][1:-1, sl_right, 1:-1]/dy
     gy -= data["gravitational_potential"][1:-1, sl_left, 1:-1]/dy
 
     new_field = np.zeros(data["gravitational_potential"].shape,
-                         dtype='float64')*gx.unit_array
+                         dtype='float64')*gy.uq
+
     new_field[1:-1, 1:-1, 1:-1] = -gy
 
     return new_field
 
 
- at yt.derived_field(name='grav_accel_z', units='cm/s**2', take_log=False)
-def grav_accel_z(field, data):
+ at yt.derived_field(name='gravitational_acceleration_z',
+                  units='cm/s**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["gravitational_potential"])])
+def gravitational_acceleration_z(field, data):
 
     # We need to set up stencils
 
@@ -58,13 +65,13 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dz = div_fac * data['dz'].flat[0]
+    dz = div_fac * data['dz'].flatten()[0]
 
     gz = data["gravitational_potential"][1:-1, 1:-1, sl_right]/dz
     gz -= data["gravitational_potential"][1:-1, 1:-1, sl_left]/dz
 
     new_field = np.zeros(data["gravitational_potential"].shape,
-                         dtype='float64')*gx.unit_array
+                         dtype='float64')*gz.uq
     new_field[1:-1, 1:-1, 1:-1] = -gz
 
     return new_field
@@ -73,7 +80,8 @@
 # Define the components of the pressure gradient field
 
 
- at yt.derived_field(name='grad_pressure_x', units='g/(cm*s)**2', take_log=False)
+ at yt.derived_field(name='grad_pressure_x', units='g/(cm*s)**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["pressure"])])
 def grad_pressure_x(field, data):
 
     # We need to set up stencils
@@ -82,18 +90,19 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dx = div_fac * data['dx'].flat[0]
+    dx = div_fac * data['dx'].flatten()[0]
 
     px = data["pressure"][sl_right, 1:-1, 1:-1]/dx
     px -= data["pressure"][sl_left, 1:-1, 1:-1]/dx
 
-    new_field = np.zeros(data["pressure"].shape, dtype='float64')*px.unit_array
+    new_field = np.zeros(data["pressure"].shape, dtype='float64')*px.uq
     new_field[1:-1, 1:-1, 1:-1] = px
 
     return new_field
 
 
- at yt.derived_field(name='grad_pressure_y', units='g/(cm*s)**2', take_log=False)
+ at yt.derived_field(name='grad_pressure_y', units='g/(cm*s)**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["pressure"])])
 def grad_pressure_y(field, data):
 
     # We need to set up stencils
@@ -102,18 +111,19 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dy = div_fac * data['dy'].flat[0]
+    dy = div_fac * data['dy'].flatten()[0]
 
     py = data["pressure"][1:-1, sl_right, 1:-1]/dy
     py -= data["pressure"][1:-1, sl_left, 1:-1]/dy
 
-    new_field = np.zeros(data["pressure"].shape, dtype='float64')*px.unit_array
+    new_field = np.zeros(data["pressure"].shape, dtype='float64')*py.uq
     new_field[1:-1, 1:-1, 1:-1] = py
 
     return new_field
 
 
- at yt.derived_field(name='grad_pressure_z', units='g/(cm*s)**2', take_log=False)
+ at yt.derived_field(name='grad_pressure_z', units='g/(cm*s)**2', take_log=False,
+                  validators=[yt.ValidateSpatial(1,["pressure"])])
 def grad_pressure_z(field, data):
 
     # We need to set up stencils
@@ -122,12 +132,12 @@
     sl_right = slice(2, None, None)
     div_fac = 2.0
 
-    dz = div_fac * data['dz'].flat[0]
+    dz = div_fac * data['dz'].flatten()[0]
 
     pz = data["pressure"][1:-1, 1:-1, sl_right]/dz
     pz -= data["pressure"][1:-1, 1:-1, sl_left]/dz
 
-    new_field = np.zeros(data["pressure"].shape, dtype='float64')*px.unit_array
+    new_field = np.zeros(data["pressure"].shape, dtype='float64')*pz.uq
     new_field[1:-1, 1:-1, 1:-1] = pz
 
     return new_field
@@ -135,49 +145,29 @@
 
 # Define the "degree of hydrostatic equilibrium" field
 
- at yt.derived_field(name='HSE', units=None, take_log=False)
+ at yt.derived_field(name='HSE', units=None, take_log=False,
+                  display_name='Hydrostatic Equilibrium')
 def HSE(field, data):
 
-    gx = data["density"]*data["Grav_Accel_x"]
-    gy = data["density"]*data["Grav_Accel_y"]
-    gz = data["density"]*data["Grav_Accel_z"]
+    gx = data["density"]*data["gravitational_acceleration_x"]
+    gy = data["density"]*data["gravitational_acceleration_y"]
+    gz = data["density"]*data["gravitational_acceleration_z"]
 
-    hx = data["Grad_Pressure_x"] - gx
-    hy = data["Grad_Pressure_y"] - gy
-    hz = data["Grad_Pressure_z"] - gz
+    hx = data["grad_pressure_x"] - gx
+    hy = data["grad_pressure_y"] - gy
+    hz = data["grad_pressure_z"] - gz
 
-    h = np.sqrt((hx*hx+hy*hy+hz*hz)/(gx*gx+gy*gy+gz*gz))*gx.unit_array
+    h = np.sqrt((hx*hx+hy*hy+hz*hz)/(gx*gx+gy*gy+gz*gz))
 
     return h
 
 
-# Open two files, one at the beginning and the other at a later time when
-# there's a lot of sloshing going on.
+# Open a dataset from when there's a lot of sloshing going on.
 
-dsi = yt.load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0000")
-dsf = yt.load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0350")
+ds = yt.load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0350")
 
-# Sphere objects centered at the cluster potential minimum with a radius
-# of 200 kpc
 
-sphere_i = dsi.sphere(dsi.domain_center, (200, "kpc"))
-sphere_f = dsf.sphere(dsf.domain_center, (200, "kpc"))
+# Take a slice through the center of the domain
+slc = yt.SlicePlot(ds, 2, ["density", "HSE"], width=(1, 'Mpc'))
 
-# Average "degree of hydrostatic equilibrium" in these spheres
-
-hse_i = sphere_i.quantities["WeightedAverageQuantity"]("HSE", "cell_mass")
-hse_f = sphere_f.quantities["WeightedAverageQuantity"]("HSE", "cell_mass")
-
-print "Degree of hydrostatic equilibrium initially: ", hse_i
-print "Degree of hydrostatic equilibrium later: ", hse_f
-
-# Just for good measure, take slices through the center of the domains
-# of the two files
-
-slc_i = yt.SlicePlot(dsi, 2, ["density", "HSE"], center=dsi.domain_center,
-                     width=(1.0, "Mpc"))
-slc_f = yt.SlicePlot(dsf, 2, ["density", "HSE"], center=dsf.domain_center,
-                     width=(1.0, "Mpc"))
-
-slc_i.save("initial")
-slc_f.save("final")
+slc.save("hse")

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 doc/source/cookbook/profile_with_variance.py
--- a/doc/source/cookbook/profile_with_variance.py
+++ b/doc/source/cookbook/profile_with_variance.py
@@ -16,17 +16,15 @@
 
 # Create a 1D profile object for profiles over radius
 # and add a velocity profile.
-prof = yt.ProfilePlot(sp, 'radius', 'velocity_magnitude', 
-                      weight_field='cell_mass')
-prof.set_unit('radius', 'kpc')
-prof.set_xlim(0.1, 1000)
+prof = yt.create_profile(sp, 'radius', 'velocity_magnitude',
+                         units = {'radius': 'kpc'},
+                         extrema = {'radius': ((0.1, 'kpc'), (1000.0, 'kpc'))},
+                         weight_field='cell_mass')
 
 # Plot the average velocity magnitude.
-plt.loglog(prof['radius'], prof['velocity_magnitude'],
-              label='Mean')
+plt.loglog(prof.x, prof['velocity_magnitude'], label='Mean')
 # Plot the variance of the velocity madnitude.
-plt.loglog(prof['radius'], prof['velocity_magnitude_std'],
-              label='Standard Deviation')
+plt.loglog(prof.x, prof['velocity_magnitude_std'], label='Standard Deviation')
 plt.xlabel('r [kpc]')
 plt.ylabel('v [cm/s]')
 plt.legend()

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 doc/source/cookbook/rad_velocity.py
--- a/doc/source/cookbook/rad_velocity.py
+++ b/doc/source/cookbook/rad_velocity.py
@@ -21,18 +21,15 @@
 
 # Radial profile without correction
 
-rp0 = yt.ProfilePlot(sp0, 'radius', 'radial_velocity')
-rp0.set_unit('radius', 'kpc')
-rp0.set_log('radius', False)
+rp0 = yt.create_profile(sp0, 'radius', 'radial_velocity',
+        units = {'radius': 'kpc'},
+        logs = {'radius': False})
 
 # Radial profile with correction for bulk velocity
 
-rp1 = yt.ProfilePlot(sp1, 'radius', 'radial_velocity')
-rp1.set_unit('radius', 'kpc')
-rp1.set_log('radius', False)
-
-#rp0.save('radial_velocity_profile_uncorrected.png')
-#rp1.save('radial_velocity_profile_corrected.png')
+rp1 = yt.create_profile(sp1, 'radius', 'radial_velocity',
+        units = {'radius': 'kpc'},
+        logs = {'radius': False})
 
 # Make a plot using matplotlib
 
@@ -40,8 +37,8 @@
 ax = fig.add_subplot(111)
 
 # Here we scale the velocities by 1.0e5 to get into km/s
-ax.plot(rad_profile0["Radiuskpc"], rad_profile0["RadialVelocity"]/1.0e5,
-		rad_profile1["Radiuskpc"], rad_profile1["RadialVelocity"]/1.0e5)
+ax.plot(rp0.x, rp0["radial_velocity"].in_units("km/s"),
+		rp1.x, rp1["radial_velocity"].in_units("km/s"))
 
 ax.set_xlabel(r"$\mathrm{r\ (kpc)}$")
 ax.set_ylabel(r"$\mathrm{v_r\ (km/s)}$")

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 doc/source/cookbook/radial_profile_styles.py
--- a/doc/source/cookbook/radial_profile_styles.py
+++ b/doc/source/cookbook/radial_profile_styles.py
@@ -12,11 +12,9 @@
 
 # Bin up the data from the sphere into a radial profile
 
-#rp = BinnedProfile1D(sphere, 100, "Radiuskpc", 0.0, 500., log_space=False)
-#rp.add_fields("density","temperature")
-rp = yt.ProfilePlot(sp, 'radius', ['density', 'temperature'])
-rp.set_unit('radius', 'kpc')
-rp.set_log('radius', False)
+rp = yt.create_profile(sp, 'radius', ['density', 'temperature'],
+                       units = {'radius': 'kpc'},
+                       logs = {'radius': False})
 
 # Make plots using matplotlib
 
@@ -24,7 +22,7 @@
 ax = fig.add_subplot(111)
 
 # Plot the density as a log-log plot using the default settings
-dens_plot = ax.loglog(rp["Radiuskpc"], rp["density"])
+dens_plot = ax.loglog(rp.x, rp["density"])
 
 # Here we set the labels of the plot axes
 
@@ -52,15 +50,3 @@
 dens_plot[0].set_markersize(10)
 
 fig.savefig("density_profile_thick_with_xs.png")
-
-# Now get rid of the line on the axes plot
-
-ax.lines = []
-
-# Since the radial profile object also includes the standard deviation in each bin,
-# we'll use these as errorbars. We have to make a new plot for this:
-
-dens_err_plot = ax.errorbar(pr["Radiuskpc"], rp["density"],
-                            yerr=rp["Density_std"])
-                                                        
-fig.savefig("density_profile_with_errorbars.png")

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 doc/source/cookbook/save_profiles.py
--- a/doc/source/cookbook/save_profiles.py
+++ /dev/null
@@ -1,72 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import yt
-import matplotlib.pyplot as plt
-import h5py as h5
-
-ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
-
-# Get a sphere
-
-sp = ds.sphere(ds.domain_center, (500., "kpc"))
-
-# Radial profile from the sphere
-
-prof = yt.BinnedProfile1D(sp, 100, "Radiuskpc", 0.0, 500., log_space=False)
-prof = yt.ProfilePlot(sp, 'radius', ['density', 'temperature'], weight_field="cell_mass")
-prof.set_unit('radius', 'kpc')
-prof.set_log('radius', False)
-prof.set_xlim(0, 500)
-
-# Write profiles to ASCII file
-
-prof.write_out("%s_profile.dat" % ds, bin_style="center")
-
-# Write profiles to HDF5 file
-
-prof.write_out_h5("%s_profile.h5" % ds, bin_style="center")
-
-# Now we will show how using NumPy, h5py, and Matplotlib the data in these
-# files may be plotted.
-
-# Plot density from ASCII file
-
-# Open the text file using NumPy's "loadtxt" method. In order to get the 
-# separate columns into separate NumPy arrays, it is essential to set unpack=True.
-
-r, dens, std_dens, temp, std_temp = \
-	np.loadtxt("sloshing_nomag2_hdf5_plt_cnt_0150_profile.dat", unpack=True)
-
-fig1 = plt.figure()
-
-ax = fig1.add_subplot(111)
-ax.plot(r, dens)
-ax.set_xlabel(r"$\mathrm{r\ (kpc)}$")
-ax.set_ylabel(r"$\mathrm{\rho\ (g\ cm^{-3})}$")
-ax.set_title("Density vs. Radius")
-fig1.savefig("%s_dens.png" % ds)
-
-# Plot temperature from HDF5 file
-
-# Get the file handle
-
-f = h5py.File("%s_profile.h5" % ds, "r")
-
-# Get the radius and temperature arrays from the file handle
-
-r = f["/Radiuskpc-1d"].attrs["x-axis-Radiuskpc"][:]
-temp = f["/Radiuskpc-1d/temperature"][:]
-
-# Close the file handle
-
-f.close()
-
-fig2 = plt.figure()
-
-ax = fig2.add_subplot(111)
-ax.plot(r, temp)
-ax.set_xlabel(r"$\mathrm{r\ (kpc)}$")
-ax.set_ylabel(r"$\mathrm{T\ (K)}$")
-ax.set_title("temperature vs. Radius")
-fig2.savefig("%s_temp.png" % ds)

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 doc/source/cookbook/simple_profile.py
--- a/doc/source/cookbook/simple_profile.py
+++ b/doc/source/cookbook/simple_profile.py
@@ -12,6 +12,7 @@
 sphere = ds.sphere("c", (100., "kpc"))
 plot = yt.ProfilePlot(sphere, "density", ["temperature", "velocity_x"],
                       weight_field="cell_mass")
+plot.set_log("velocity_x", False)
 
 # Save the image.
 # Optionally, give a string as an argument

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 doc/source/cookbook/simple_slice_with_multiple_fields.py
--- a/doc/source/cookbook/simple_slice_with_multiple_fields.py
+++ b/doc/source/cookbook/simple_slice_with_multiple_fields.py
@@ -7,5 +7,5 @@
 ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
 
 # Create density slices of several fields along the x axis
-yt.SlicePlot(ds, 'x', ['density','temperature','pressure','vorticity_squared'], 
+yt.SlicePlot(ds, 'x', ['density','temperature','pressure'], 
              width = (800.0, 'kpc')).save()

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -51,7 +51,7 @@
 
 If you are developing new functionality, it is sometimes more convenient to use
 the Nose command line interface, ``nosetests``. You can run the unit tests
-using `no`qsetets` by navigating to the base directory of the yt mercurial
+using ``nose`` by navigating to the base directory of the yt mercurial
 repository and invoking ``nosetests``:
 
 .. code-block:: bash

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 doc/source/examining/Loading_Generic_Array_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Array_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Array_Data.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:cd145d8cadbf1a0065d0f9fb4ea107c215fcd53245b3bb7d29303af46f063552"
+  "signature": "sha256:5fc7783d6c99659c353a35348bb21210fcb7572d5357f32dd61755d4a7f8fe6c"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -443,7 +443,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "f = pyfits.open(data_dir+\"/UnigridData/velocity_field_20.fits.gz\")\n",
+      "f = pyfits.open(data_dir+\"/UnigridData/velocity_field_20.fits\")\n",
       "f.info()"
      ],
      "language": "python",
@@ -462,7 +462,7 @@
      "collapsed": false,
      "input": [
       "data = {}\n",
-      "for hdu in f[1:]:\n",
+      "for hdu in f:\n",
       "    name = hdu.name.lower()\n",
       "    data[name] = (hdu.data,\"km/s\")\n",
       "print data.keys()"

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -711,11 +711,13 @@
 ``spectral_factor``
 ~~~~~~~~~~~~~~~~~~~
 
-Often, the aspect ratio of 3D spectral cubes can be far from unity. Because yt sets the pixel
-scale as the ``code_length``, certain visualizations (such as volume renderings) may look extended
-or distended in ways that are undesirable. To adjust the width in ``code_length`` of the spectral
- axis, set ``spectral_factor`` equal to a constant which gives the desired scaling,
- or set it to ``"auto"`` to make the width the same as the largest axis in the sky plane.
+Often, the aspect ratio of 3D spectral cubes can be far from unity. Because yt
+sets the pixel scale as the ``code_length``, certain visualizations (such as
+volume renderings) may look extended or distended in ways that are
+undesirable. To adjust the width in ``code_length`` of the spectral axis, set
+``spectral_factor`` equal to a constant which gives the desired scaling, or set
+it to ``"auto"`` to make the width the same as the largest axis in the sky
+plane.
 
 Miscellaneous Tools for Use with FITS Data
 ++++++++++++++++++++++++++++++++++++++++++
@@ -792,11 +794,11 @@
 PyNE Data
 ---------
 
-.. _loading-numpy-array:
-
 Generic Array Data
 ------------------
 
+See :ref:`loading-numpy-array` for more detail.
+
 Even if your data is not strictly related to fields commonly used in
 astrophysical codes or your code is not supported yet, you can still feed it to
 ``yt`` to use its advanced visualization and analysis facilities. The only
@@ -848,6 +850,8 @@
 Generic AMR Data
 ----------------
 
+See :ref:`loading-numpy-array` for more detail.
+
 It is possible to create native ``yt`` parameter file from Python's dictionary
 that describes set of rectangular patches of data of possibly varying
 resolution. 

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -11,10 +11,31 @@
    :toctree: generated/
 
    ~yt.visualization.plot_window.SlicePlot
+   ~yt.visualization.plot_window.AxisAlignedSlicePlot
    ~yt.visualization.plot_window.OffAxisSlicePlot
    ~yt.visualization.plot_window.ProjectionPlot
    ~yt.visualization.plot_window.OffAxisProjectionPlot
 
+ProfilePlot and PhasePlot
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.visualization.profile_plotter.ProfilePlot
+   ~yt.visualization.profile_plotter.PhasePlot
+
+Fixed Resolution Pixelization
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.visualization.fixed_resolution.FixedResolutionBuffer
+   ~yt.visualization.fixed_resolution.CylindricalFixedResolutionBuffer
+   ~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer
+   ~yt.visualization.fixed_resolution.OffAxisProjectionFixedResolutionBuffer
+
 Data Sources
 ------------
 
@@ -91,6 +112,33 @@
    ~yt.data_objects.time_series.TimeSeriesQuantitiesContainer
    ~yt.data_objects.time_series.AnalysisTaskProxy
 
+Geometry Handlers
+-----------------
+
+These objects generate an "index" into multiresolution data.
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.geometry.geometry_handler.Index
+   ~yt.geometry.grid_geometry_handler.GridIndex
+   ~yt.geometry.oct_geometry_handler.OctreeIndex
+   ~yt.geometry.particle_geometry_handler.ParticleIndex
+   ~yt.geometry.unstructured_mesh_handler.UnstructuredIndex
+
+Units
+-----
+
+These classes enable yt's symbolic unit handling system.
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.units.unit_object.Unit
+   ~yt.units.unit_registry.UnitRegistry
+   ~yt.units.yt_array.YTArray
+   ~yt.units.yt_array.YTQuantity
+
 Frontends
 ---------
 
@@ -145,6 +193,22 @@
    ~yt.frontends.boxlib.io.IOHandlerNyx
    ~yt.frontends.boxlib.io.IOHandlerOrion
 
+Chombo
+^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.frontends.chombo.data_structures.ChomboGrid
+   ~yt.frontends.chombo.data_structures.ChomboHierarchy
+   ~yt.frontends.chombo.data_structures.ChomboDataset
+   ~yt.frontends.chombo.data_structures.Orion2Hierarchy
+   ~yt.frontends.chombo.data_structures.Orion2Dataset
+   ~yt.frontends.chombo.io.IOHandlerChomboHDF5
+   ~yt.frontends.chombo.io.IOHandlerChombo2DHDF5
+   ~yt.frontends.chombo.io.IOHandlerChombo1DHDF5
+   ~yt.frontends.chombo.io.IOHandlerOrion2HDF5
+
 Enzo
 ^^^^
 
@@ -194,6 +258,17 @@
    ~yt.frontends.flash.fields.FLASHFieldInfo
    ~yt.frontends.flash.io.IOHandlerFLASH
 
+GDF
+^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.frontends.gdf.data_structures.GDFGrid
+   ~yt.frontends.gdf.data_structures.GDFHierarchy
+   ~yt.frontends.gdf.data_structures.GDFDataset
+   ~yt.frontends.gdf.io.IOHandlerGDFHDF5
+
 Halo Catalogs
 ^^^^^^^^^^^^^
 
@@ -281,6 +356,19 @@
    ~yt.frontends.stream.io.IOHandlerStreamOctree
    ~yt.frontends.stream.io.StreamParticleIOHandler
 
+Loading Data
+------------
+
+.. autosummary::
+   :toctree: generated/
+
+   yt.convenience.load
+   yt.convenience.simulation
+   yt.frontends.stream.data_structures.load_uniform_grid
+   yt.frontends.stream.data_structures.load_amr_grids
+   yt.frontends.stream.data_structures.load_particles
+   yt.frontends.stream.data_structures.load_hexahedral_mesh
+
 Derived Datatypes
 -----------------
 
@@ -288,16 +376,19 @@
 ^^^^^^^^^^^^^^^^^^^^^^^
 
 These types are used to sum data up and either return that sum or return an
-average.  Typically they are more easily used through the
-`yt.visualization.plot_collection` interface.
+average.  Typically they are more easily used through the ``ProfilePlot``
+``PhasePlot`` interface. We also provide the ``create_profile`` function
+to create these objects in a uniform manner.
 
 
 .. autosummary::
    :toctree: generated/
 
-   ~yt.data_objects.profiles.BinnedProfile1D
-   ~yt.data_objects.profiles.BinnedProfile2D
-   ~yt.data_objects.profiles.BinnedProfile3D
+   ~yt.data_objects.profiles.ProfileND
+   ~yt.data_objects.profiles.Profile1D
+   ~yt.data_objects.profiles.Profile2D
+   ~yt.data_objects.profiles.Profile3D
+   ~yt.data_objects.profiles.create_profile
 
 Halo Finding and Particle Functions
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -415,8 +506,6 @@
    :toctree: generated/
 
    ~yt.data_objects.image_array.ImageArray
-   ~yt.data_objects.image_array.ImageArray.write_png
-   ~yt.data_objects.image_array.ImageArray.write_hdf5
 
 Extension Types
 ---------------

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -120,6 +120,8 @@
 .. python-script::
    
    from yt.mods import *
+   from yt.analysis_modules.halo_analysis.halo_catalog import HaloCatalog
+
    data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
    halos_pf = load('rockstar_halos/halos_0.0.bin')
 

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 doc/source/visualizing/_images/mapserver.png
Binary file doc/source/visualizing/_images/mapserver.png has changed

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -478,8 +478,7 @@
     :ref:`cookbook-amrkdtree_to_uniformgrid`.
 
 System Requirements
--------------------
-.. versionadded:: 3.0
++++++++++++++++++++
 
 Nvidia graphics card - The memory limit of the graphics card sets the limit
                        on the size of the data source.
@@ -490,7 +489,7 @@
 the common/inc samples shipped with CUDA. The following shows an example
 in bash with CUDA 5.5 installed in /usr/local :
 
-export CUDA_SAMPLES=/usr/local/cuda-5.5/samples/common/inc
+    export CUDA_SAMPLES=/usr/local/cuda-5.5/samples/common/inc
 
 PyCUDA must also be installed to use Theia. 
 
@@ -503,13 +502,13 @@
 
 
 Tutorial
---------
-.. versionadded:: 3.0
+++++++++
 
 Currently rendering only works on uniform grids. Here is an example
 on a 1024 cube of float32 scalars.
 
 .. code-block:: python
+
    from yt.visualization.volume_rendering.theia.scene import TheiaScene
    from yt.visualization.volume_rendering.algorithms.front_to_back import FrontToBackRaycaster
    import numpy as np
@@ -528,28 +527,27 @@
 .. _the-theiascene-interface:
 
 The TheiaScene Interface
---------------------
-.. versionadded:: 3.0
+++++++++++++++++++++++++
 
 A TheiaScene object has been created to provide a high level entry point for
-controlling the raycaster's view onto the data. The class  
-:class:`~yt.visualization.volume_rendering.theia.TheiaScene` encapsulates
- a Camera object and a TheiaSource that intern encapsulates
-a volume. The :class:`~yt.visualization.volume_rendering.theia.Camera`
-provides controls for rotating, translating, and zooming into the volume.
-Using the :class:`~yt.visualization.volume_rendering.theia.TheiaSource`
-automatically transfers the volume to the graphic's card texture memory.
+controlling the raycaster's view onto the data. The class
+:class:`~yt.visualization.volume_rendering.theia.TheiaScene` encapsulates a
+Camera object and a TheiaSource that intern encapsulates a volume. The
+:class:`~yt.visualization.volume_rendering.theia.Camera` provides controls for
+rotating, translating, and zooming into the volume.  Using the
+:class:`~yt.visualization.volume_rendering.theia.TheiaSource` automatically
+transfers the volume to the graphic's card texture memory.
 
 Example Cookbooks
----------------
++++++++++++++++++
 
 OpenGL Example for interactive volume rendering:
 :ref:`cookbook-opengl_volume_rendering`.
 
-OpenGL Stereoscopic Example :
 .. warning::  Frame rate will suffer significantly from stereoscopic rendering.
               ~2x slower since the volume must be rendered twice.
-:ref:`cookbook-opengl_stereo_volume_rendering`.
+
+OpenGL Stereoscopic Example: :ref:`cookbook-opengl_stereo_volume_rendering`.
 
 Pseudo-Realtime video rendering with ffmpeg :
 :ref:`cookbook-ffmpeg_volume_rendering`.

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/analysis_modules/halo_analysis/halo_catalog.py
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ b/yt/analysis_modules/halo_analysis/halo_catalog.py
@@ -59,6 +59,8 @@
     output_dir : str
         The top level directory into which analysis output will be written.
         Default: "."
+    finder_kwargs : dict
+        Arguments to pass to the halo finder if finder_method is given.
 
     Examples
     --------
@@ -98,6 +100,7 @@
     
     def __init__(self, halos_pf=None, data_pf=None, 
                  data_source=None, finder_method=None, 
+                 finder_kwargs=None,
                  output_dir="halo_catalogs/catalog"):
         ParallelAnalysisInterface.__init__(self)
         self.halos_pf = halos_pf
@@ -122,8 +125,11 @@
         self.data_source = data_source
 
         if finder_method is not None:
-            finder_method = finding_method_registry.find(finder_method)
+            finder_method = finding_method_registry.find(finder_method,
+                        **finder_kwargs)
         self.finder_method = finder_method            
+        if finder_kwargs is None:
+            finder_kwargs = {}
         
         # all of the analysis actions to be performed: callbacks, filters, and quantities
         self.actions = []
@@ -203,6 +209,7 @@
             field_type = kwargs.pop("field_type")
         else:
             field_type = None
+        prepend = kwargs.pop("prepend",False)
         if field_type is None:
             quantity = quantity_registry.find(key, *args, **kwargs)
         elif (field_type, key) in self.halos_pf.field_info:
@@ -210,7 +217,10 @@
         else:
             raise RuntimeError("HaloCatalog quantity must be a registered function or a field of a known type.")
         self.quantities.append(key)
-        self.actions.append(("quantity", (key, quantity)))
+        if prepend:
+            self.actions.insert(0, ("quantity", (key, quantity)))
+        else:
+            self.actions.append(("quantity", (key, quantity)))
 
     def add_filter(self, halo_filter, *args, **kwargs):
         r"""
@@ -430,10 +440,10 @@
         out_file.close()
 
     def add_default_quantities(self, field_type='halos'):
-        self.add_quantity("particle_identifier", field_type=field_type)
-        self.add_quantity("particle_mass", field_type=field_type)
-        self.add_quantity("particle_position_x", field_type=field_type)
-        self.add_quantity("particle_position_y", field_type=field_type)
-        self.add_quantity("particle_position_z", field_type=field_type)
-        self.add_quantity("virial_radius", field_type=field_type)
+        self.add_quantity("particle_identifier", field_type=field_type,prepend=True)
+        self.add_quantity("particle_mass", field_type=field_type,prepend=True)
+        self.add_quantity("particle_position_x", field_type=field_type,prepend=True)
+        self.add_quantity("particle_position_y", field_type=field_type,prepend=True)
+        self.add_quantity("particle_position_z", field_type=field_type,prepend=True)
+        self.add_quantity("virial_radius", field_type=field_type,prepend=True)
 

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/analysis_modules/halo_analysis/halo_finding_methods.py
--- a/yt/analysis_modules/halo_analysis/halo_finding_methods.py
+++ b/yt/analysis_modules/halo_analysis/halo_finding_methods.py
@@ -44,27 +44,27 @@
     def __call__(self, ds):
         return self.function(ds, *self.args, **self.kwargs)
 
-def _hop_method(pf):
+def _hop_method(pf, **finder_kwargs):
     r"""
     Run the Hop halo finding method.
     """
     
-    halo_list = HOPHaloFinder(pf)
+    halo_list = HOPHaloFinder(pf, **finder_kwargs)
     halos_pf = _parse_old_halo_list(pf, halo_list)
     return halos_pf
 add_finding_method("hop", _hop_method)
 
-def _fof_method(pf):
+def _fof_method(pf, **finder_kwargs):
     r"""
     Run the FoF halo finding method.
     """
 
-    halo_list = FOFHaloFinder(pf)
+    halo_list = FOFHaloFinder(pf, **finder_kwargs)
     halos_pf = _parse_old_halo_list(pf, halo_list)
     return halos_pf
 add_finding_method("fof", _fof_method)
 
-def _rockstar_method(pf):
+def _rockstar_method(pf, **finder_kwargs):
     r"""
     Run the Rockstar halo finding method.
     """
@@ -74,7 +74,7 @@
     from yt.analysis_modules.halo_finding.rockstar.api import \
      RockstarHaloFinder
     
-    rh = RockstarHaloFinder(pf)
+    rh = RockstarHaloFinder(pf, **finder_kwargs)
     rh.run()
 
 

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -170,29 +170,26 @@
     To use the script below you must run it using MPI:
     mpirun -np 4 python run_rockstar.py --parallel
 
-    run_rockstar.py:
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.halo_finding.rockstar.api import \
+    ... RockstarHaloFinder
+    >>> from yt.data_objects.particle_filters import \
+    ... particle_filter
 
-    from yt.mods import *
+    >>> # create a particle filter to remove star particles
+    >>> @particle_filter("dark_matter", requires=["creation_time"])
+    ... def _dm_filter(pfilter, data):
+    ...     return data["creation_time"] <= 0.0
 
-    from yt.analysis_modules.halo_finding.rockstar.api import \
-        RockstarHaloFinder
-    from yt.data_objects.particle_filters import \
-        particle_filter
+    >>> def setup_pf(pf):
+    ...     pf.add_particle_filter("dark_matter")
 
-    # create a particle filter to remove star particles
-    @particle_filter("dark_matter", requires=["creation_time"])
-    def _dm_filter(pfilter, data):
-        return data["creation_time"] <= 0.0
+    >>> es = simulation("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo")
+    >>> es.get_time_series(setup_function=setup_pf, redshift_data=False)
 
-    def setup_pf(pf):
-        pf.add_particle_filter("dark_matter")
-
-    es = simulation("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo")
-    es.get_time_series(setup_function=setup_pf, redshift_data=False)
-
-    rh = RockstarHaloFinder(es, num_readers=1, num_writers=2,
-                            particle_type="dark_matter")
-    rh.run()
+    >>> rh = RockstarHaloFinder(es, num_readers=1, num_writers=2,
+    ...                         particle_type="dark_matter")
+    >>> rh.run()
 
     """
     def __init__(self, ts, num_readers = 1, num_writers = None,

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -201,7 +201,8 @@
             if self.suppress_logging:
                 old_level = int(ytcfg.get("yt","loglevel"))
                 mylog.setLevel(40)
-            dd_first = self.data_series[0].all_data()
+            ds_first = self.data_series[0]
+            dd_first = ds_first.all_data()
             fd = dd_first._determine_fields(field)[0]
             if field not in self.particle_fields:
                 if self.data_series[0].field_info[fd].particle_type:

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -156,7 +156,8 @@
 
     def _create_intensity(self, i):
         def _intensity(field, data):
-            w = np.abs(data["v_los"]-self.vmid[i])/self.dv
+            vlos = data["v_los"]
+            w = np.abs(vlos-self.vmid[i])/self.dv.in_units(vlos.units)
             w = 1.-w
             w[w < 0.0] = 0.0
             return data[self.field]*w

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -623,11 +623,13 @@
                                         fluids, self, self._current_chunk)
         for f, v in read_fluids.items():
             self.field_data[f] = self.pf.arr(v, input_units = finfos[f].units)
+            self.field_data[f].convert_to_units(finfos[f].output_units)
 
         read_particles, gen_particles = self.index._read_particle_fields(
                                         particles, self, self._current_chunk)
         for f, v in read_particles.items():
             self.field_data[f] = self.pf.arr(v, input_units = finfos[f].units)
+            self.field_data[f].convert_to_units(finfos[f].output_units)
 
         fields_to_generate += gen_fluids + gen_particles
         self._generate_fields(fields_to_generate)

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -50,6 +50,7 @@
         return
 
     def __call__(self, *args, **kwargs):
+        """Calculate results for the derived quantity"""
         self.count_values(*args, **kwargs)
         chunks = self.data_source.chunks([], chunking_style="io")
         storage = {}

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -754,6 +754,7 @@
         self.weight_values = np.zeros(size, dtype="float64")
 
 class ProfileND(ParallelAnalysisInterface):
+    """The profile object class"""
     def __init__(self, data_source, weight_field = None):
         self.data_source = data_source
         self.pf = data_source.pf
@@ -763,6 +764,14 @@
         ParallelAnalysisInterface.__init__(self, comm=data_source.comm)
 
     def add_fields(self, fields):
+        """Add fields to profile
+
+        Parameters
+        ----------
+        fields : list of field names
+            A list of fields to create profile histograms for
+        
+        """
         fields = ensure_list(fields)
         temp_storage = ProfileFieldAccumulator(len(fields), self.size)
         cfields = fields + list(self.bin_fields)
@@ -774,7 +783,7 @@
     def set_field_unit(self, field, new_unit):
         """Sets a new unit for the requested field
 
-        parameters
+        Parameters
         ----------
         field : string or field tuple
            The name of the field that is to be changed.
@@ -802,6 +811,8 @@
         blank = ~temp_storage.used
         self.used = temp_storage.used
         if self.weight_field is not None:
+            # This is unnecessary, but it will suppress division errors.
+            temp_storage.weight_values[blank] = 1e-30
             temp_storage.values /= temp_storage.weight_values[...,None]
             self.weight = temp_storage.weight_values[...,None]
             self.weight[blank] = 0.0
@@ -871,6 +882,28 @@
             return np.linspace(mi, ma, n+1)
 
 class Profile1D(ProfileND):
+    """An object that represents a 1D profile.
+
+    Parameters
+    ----------
+
+    data_source : AMD3DData object
+        The data object to be profiled
+    x_field : string field name
+        The field to profile as a function of
+    x_n : integer
+        The number of bins along the x direction.
+    x_min : float
+        The minimum value of the x profile field.
+    x_max : float
+        The maximum value of hte x profile field.
+    x_log : boolean
+        Controls whether or not the bins for the x field are evenly
+        spaced in linear (False) or log (True) space.
+    weight_field : string field name
+        The field to weight the profiled fields by.
+
+    """
     def __init__(self, data_source, x_field, x_n, x_min, x_max, x_log,
                  weight_field = None):
         super(Profile1D, self).__init__(data_source, weight_field)
@@ -911,6 +944,39 @@
         return ((self.x_bins[0], self.x_bins[-1]),)
 
 class Profile2D(ProfileND):
+    """An object that represents a 2D profile.
+
+    Parameters
+    ----------
+
+    data_source : AMD3DData object
+        The data object to be profiled
+    x_field : string field name
+        The field to profile as a function of along the x axis.
+    x_n : integer
+        The number of bins along the x direction.
+    x_min : float
+        The minimum value of the x profile field.
+    x_max : float
+        The maximum value of hte x profile field.
+    x_log : boolean
+        Controls whether or not the bins for the x field are evenly
+        spaced in linear (False) or log (True) space.
+    y_field : string field name
+        The field to profile as a function of along the y axis
+    y_n : integer
+        The number of bins along the y direction.
+    y_min : float
+        The minimum value of the y profile field.
+    y_max : float
+        The maximum value of hte y profile field.
+    y_log : boolean
+        Controls whether or not the bins for the y field are evenly
+        spaced in linear (False) or log (True) space.
+    weight_field : string field name
+        The field to weight the profiled fields by.
+
+    """
     def __init__(self, data_source,
                  x_field, x_n, x_min, x_max, x_log,
                  y_field, y_n, y_min, y_max, y_log,
@@ -975,6 +1041,50 @@
                 (self.y_bins[0], self.y_bins[-1]))
 
 class Profile3D(ProfileND):
+    """An object that represents a 2D profile.
+
+    Parameters
+    ----------
+
+    data_source : AMD3DData object
+        The data object to be profiled
+    x_field : string field name
+        The field to profile as a function of along the x axis.
+    x_n : integer
+        The number of bins along the x direction.
+    x_min : float
+        The minimum value of the x profile field.
+    x_max : float
+        The maximum value of hte x profile field.
+    x_log : boolean
+        Controls whether or not the bins for the x field are evenly
+        spaced in linear (False) or log (True) space.
+    y_field : string field name
+        The field to profile as a function of along the y axis
+    y_n : integer
+        The number of bins along the y direction.
+    y_min : float
+        The minimum value of the y profile field.
+    y_max : float
+        The maximum value of hte y profile field.
+    y_log : boolean
+        Controls whether or not the bins for the y field are evenly
+        spaced in linear (False) or log (True) space.
+    z_field : string field name
+        The field to profile as a function of along the z axis
+    z_n : integer
+        The number of bins along the z direction.
+    z_min : float
+        The minimum value of the z profile field.
+    z_max : float
+        The maximum value of hte z profile field.
+    z_log : boolean
+        Controls whether or not the bins for the z field are evenly
+        spaced in linear (False) or log (True) space.
+    weight_field : string field name
+        The field to weight the profiled fields by.
+
+    """
     def __init__(self, data_source,
                  x_field, x_n, x_min, x_max, x_log,
                  y_field, y_n, y_min, y_max, y_log,

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -717,3 +717,6 @@
 
     def _calculate_offsets(self, fields):
         pass
+
+    def __cmp__(self, other):
+        return cmp(self.filename, other.filename)

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/fields/angular_momentum.py
--- a/yt/fields/angular_momentum.py
+++ b/yt/fields/angular_momentum.py
@@ -45,28 +45,25 @@
     def _specific_angular_momentum_x(field, data):
         xv, yv, zv = obtain_velocities(data, ftype)
         center = data.get_field_parameter('center')
-        v_vec = obtain_rvec(data)
-        v_vec = np.rollaxis(v_vec, 0, len(v_vec.shape))
-        v_vec = data.pf.arr(v_vec, input_units = data["index", "x"].units)
-        rv = v_vec - center
+        rv = obtain_rvec(data)
+        rv = np.rollaxis(rv, 0, len(rv.shape))
+        rv = data.pf.arr(rv, input_units = data["index", "x"].units)
         return yv * rv[...,2] - zv * rv[...,1]
 
     def _specific_angular_momentum_y(field, data):
         xv, yv, zv = obtain_velocities(data, ftype)
         center = data.get_field_parameter('center')
-        v_vec = obtain_rvec(data)
-        v_vec = np.rollaxis(v_vec, 0, len(v_vec.shape))
-        v_vec = data.pf.arr(v_vec, input_units = data["index", "x"].units)
-        rv = v_vec - center
+        rv = obtain_rvec(data)
+        rv = np.rollaxis(rv, 0, len(rv.shape))
+        rv = data.pf.arr(rv, input_units = data["index", "x"].units)
         return - (xv * rv[...,2] - zv * rv[...,0])
 
     def _specific_angular_momentum_z(field, data):
         xv, yv, zv = obtain_velocities(data, ftype)
         center = data.get_field_parameter('center')
-        v_vec = obtain_rvec(data)
-        v_vec = np.rollaxis(v_vec, 0, len(v_vec.shape))
-        v_vec = data.pf.arr(v_vec, input_units = data["index", "x"].units)
-        rv = v_vec - center
+        rv = obtain_rvec(data)
+        rv = np.rollaxis(rv, 0, len(rv.shape))
+        rv = data.pf.arr(rv, input_units = data["index", "x"].units)
         return xv * rv[...,1] - yv * rv[...,0]
 
     registry.add_field((ftype, "specific_angular_momentum_x"),

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -78,11 +78,15 @@
        Used for baryon fields from the data that are not in all the grids
     display_name : str
        A name used in the plots
+    output_units : str
+       For fields that exist on disk, which we may want to convert to other
+       fields or that get aliased to themselves, we can specify a different
+       desired output unit than the unit found on disk.
     """
     def __init__(self, name, function, units=None,
                  take_log=True, validators=None,
                  particle_type=False, vector_field=False, display_field=True,
-                 not_in_all=False, display_name=None):
+                 not_in_all=False, display_name=None, output_units = None):
         self.name = name
         self.take_log = take_log
         self.display_name = display_name
@@ -90,6 +94,8 @@
         self.display_field = display_field
         self.particle_type = particle_type
         self.vector_field = vector_field
+        if output_units is None: output_units = units
+        self.output_units = output_units
 
         self._function = function
 

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -66,14 +66,22 @@
         pass
 
     def setup_particle_fields(self, ptype, ftype='gas', num_neighbors=64 ):
+        skip_output_units = ("code_length",)
         for f, (units, aliases, dn) in sorted(self.known_particle_fields):
             units = self.pf.field_units.get((ptype, f), units)
+            if (f in aliases or ptype not in self.pf.particle_types_raw) and \
+                units not in skip_output_units:
+                u = Unit(units, registry = self.pf.unit_registry)
+                output_units = str(u.get_cgs_equivalent())
+            else:
+                output_units = units
             self.add_output_field((ptype, f),
-                units = units, particle_type = True, display_name = dn)
+                units = units, particle_type = True, display_name = dn,
+                output_units = output_units)
             if (ptype, f) not in self.field_list:
                 continue
             for alias in aliases:
-                self.alias((ptype, alias), (ptype, f))
+                self.alias((ptype, alias), (ptype, f), units = output_units)
 
         # We'll either have particle_position or particle_position_[xyz]
         if (ptype, "particle_position") in self.field_list or \

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -145,14 +145,14 @@
 
         # look for fluid fields
         output_fields = []
-        for key, val in self._handle['/'].attrs.items():
+        for key, val in self._handle.attrs.items():
             if key.startswith("component"):
                 output_fields.append(val)
         self.field_list = [("chombo", c) for c in output_fields]
 
         # look for particle fields
         particle_fields = []
-        for key, val in self._handle['/'].attrs.items():
+        for key, val in self._handle.attrs.items():
             if key.startswith("particle"):
                 particle_fields.append(val)
         self.field_list.extend([("io", c) for c in particle_fields])        

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -37,7 +37,7 @@
         if self._field_dict is not None:
             return self._field_dict
         field_dict = {}
-        for key, val in self._handle['/'].attrs.items():
+        for key, val in self._handle.attrs.items():
             if key.startswith('component_'):
                 comp_number = int(re.match('component_(\d)', key).groups()[0])
                 field_dict[val] = comp_number
@@ -50,7 +50,7 @@
         if self._particle_field_index is not None:
             return self._particle_field_index
         field_dict = {}
-        for key, val in self._handle['/'].attrs.items():
+        for key, val in self._handle.attrs.items():
             if key.startswith('particle_'):
                 comp_number = int(re.match('particle_component_(\d)', key).groups()[0])
                 field_dict[val] = comp_number
@@ -58,8 +58,8 @@
         return self._particle_field_index        
         
     def _read_field_names(self,grid):
-        ncomp = int(self._handle['/'].attrs['num_components'])
-        fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
+        ncomp = int(self._handle.attrs['num_components'])
+        fns = [c[1] for c in f.attrs.items()[-ncomp-1:-1]]
     
     def _read_data(self,grid,field):
 

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -49,7 +49,7 @@
 
 class EnzoFieldInfo(FieldInfoContainer):
     known_other_fields = (
-        ("Cooling_Time", ("code_time", ["cooling_time"], None)),
+        ("Cooling_Time", ("s", ["cooling_time"], None)),
         ("HI_kph", ("1/code_time", [], None)),
         ("HeI_kph", ("1/code_time", [], None)),
         ("HeII_kph", ("1/code_time", [], None)),
@@ -80,16 +80,16 @@
         ("particle_position_x", ("code_length", [], None)),
         ("particle_position_y", ("code_length", [], None)),
         ("particle_position_z", ("code_length", [], None)),
-        ("particle_velocity_x", (vel_units, [], None)),
-        ("particle_velocity_y", (vel_units, [], None)),
-        ("particle_velocity_z", (vel_units, [], None)),
+        ("particle_velocity_x", (vel_units, ["particle_velocity_x"], None)),
+        ("particle_velocity_y", (vel_units, ["particle_velocity_y"], None)),
+        ("particle_velocity_z", (vel_units, ["particle_velocity_z"], None)),
         ("creation_time", ("code_time", [], None)),
         ("dynamical_time", ("code_time", [], None)),
         ("metallicity_fraction", ("code_metallicity", [], None)),
         ("metallicity", ("", [], None)),
         ("particle_type", ("", [], None)),
         ("particle_index", ("", [], None)),
-        ("particle_mass", ("code_mass", [], None)),
+        ("particle_mass", ("code_mass", ["particle_mass"], None)),
         ("GridID", ("", [], None)),
         ("identifier", ("", ["particle_index"], None)),
         ("level", ("", [], None)),

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -283,7 +283,8 @@
                     else :
                         pval = val
                     if vn in self.parameters and self.parameters[vn] != pval:
-                        mylog.warning("{0} {1} overwrites a simulation scalar of the same name".format(hn[:-1],vn)) 
+                        mylog.info("{0} {1} overwrites a simulation "
+                                   "scalar of the same name".format(hn[:-1],vn))
                     self.parameters[vn] = pval
         if self._flash_version == 7:
             for hn in hns:
@@ -300,7 +301,8 @@
                     else :
                         pval = val
                     if vn in self.parameters and self.parameters[vn] != pval:
-                        mylog.warning("{0} {1} overwrites a simulation scalar of the same name".format(hn[:-1],vn))
+                        mylog.info("{0} {1} overwrites a simulation "
+                                   "scalar of the same name".format(hn[:-1],vn))
                     self.parameters[vn] = pval
         
         # Determine block size
@@ -363,7 +365,7 @@
         try:
             self.gamma = self.parameters["gamma"]
         except:
-            mylog.warning("Cannot find Gamma")
+            mylog.info("Cannot find Gamma")
             pass
 
         # Get the simulation time

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/frontends/halo_catalogs/halo_catalog/io.py
--- a/yt/frontends/halo_catalogs/halo_catalog/io.py
+++ b/yt/frontends/halo_catalogs/halo_catalog/io.py
@@ -43,7 +43,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with h5py.File(data_file.filename, "r") as f:
                 x = f['particle_position_x'].value.astype("float64")
@@ -61,7 +61,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/frontends/halo_catalogs/owls_subfind/io.py
--- a/yt/frontends/halo_catalogs/owls_subfind/io.py
+++ b/yt/frontends/halo_catalogs/owls_subfind/io.py
@@ -44,7 +44,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):
                     pcount = data_file.total_particles[ptype]
@@ -78,7 +78,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(ptf.items()):
                     pcount = data_file.total_particles[ptype]

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/frontends/halo_catalogs/rockstar/io.py
--- a/yt/frontends/halo_catalogs/rockstar/io.py
+++ b/yt/frontends/halo_catalogs/rockstar/io.py
@@ -45,7 +45,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with open(data_file.filename, "rb") as f:
                 f.seek(data_file._position_offset, os.SEEK_SET)
@@ -65,7 +65,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with open(data_file.filename, "rb") as f:
                 for ptype, field_list in sorted(ptf.items()):

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -47,7 +47,7 @@
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
         assert(len(data_files) == 1)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = self._handle['x'].size
             yield "dark_matter", (
                 self._handle['x'], self._handle['y'], self._handle['z'])
@@ -61,7 +61,7 @@
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
         assert(len(data_files) == 1)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             pcount = self._handle['x'].size
             for ptype, field_list in sorted(ptf.items()):
                 x = self._handle['x']

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -77,7 +77,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             f = _get_h5_handle(data_file.filename)
             # This double-reads
             for ptype, field_list in sorted(ptf.items()):
@@ -93,7 +93,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             f = _get_h5_handle(data_file.filename)
             for ptype, field_list in sorted(ptf.items()):
                 g = f["/%s" % ptype]
@@ -251,7 +251,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
@@ -268,7 +268,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
@@ -498,7 +498,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
@@ -519,7 +519,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             poff = data_file.field_offsets
             tp = data_file.total_particles
             f = open(data_file.filename, "rb")
@@ -725,7 +725,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             for ptype in ptf:
                 s = self._open_stream(data_file, (ptype, "Coordinates"))
                 c = np.frombuffer(s, dtype="float64")
@@ -738,7 +738,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             for ptype, field_list in sorted(ptf.items()):
                 s = self._open_stream(data_file, (ptype, "Coordinates"))
                 c = np.frombuffer(s, dtype="float64")

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -519,16 +519,17 @@
 
     This should allow a uniform grid of data to be loaded directly into yt and
     analyzed as would any others.  This comes with several caveats:
-        * Units will be incorrect unless the unit system is explicitly
-          specified.
-        * Some functions may behave oddly, and parallelism will be
-          disappointing or non-existent in most cases.
-        * Particles may be difficult to integrate.
+
+    * Units will be incorrect unless the unit system is explicitly
+      specified.
+    * Some functions may behave oddly, and parallelism will be
+      disappointing or non-existent in most cases.
+    * Particles may be difficult to integrate.
 
     Particle fields are detected as one-dimensional fields. The number of
     particles is set by the "number_of_particles" key in data.
     
-Parameters
+    Parameters
     ----------
     data : dict
         This is a dict of numpy arrays or (numpy array, unit spec) tuples.
@@ -690,13 +691,16 @@
     This should allow a sequence of grids of varying resolution of data to be
     loaded directly into yt and analyzed as would any others.  This comes with
     several caveats:
-        * Units will be incorrect unless the unit system is explicitly specified.
-        * Some functions may behave oddly, and parallelism will be
-          disappointing or non-existent in most cases.
-        * Particles may be difficult to integrate.
-        * No consistency checks are performed on the index
-Parameters
+
+    * Units will be incorrect unless the unit system is explicitly specified.
+    * Some functions may behave oddly, and parallelism will be
+      disappointing or non-existent in most cases.
+    * Particles may be difficult to integrate.
+    * No consistency checks are performed on the index
+
+    Parameters
     ----------
+
     grid_data : list of dicts
         This is a list of dicts. Each dict must have entries "left_edge",
         "right_edge", "dimensions", "level", and then any remaining entries are
@@ -751,6 +755,7 @@
     ...
     >>> units = dict(Density='g/cm**3')
     >>> pf = load_amr_grids(grid_data, [32, 32, 32], 1.0)
+
     """
 
     domain_dimensions = np.array(domain_dimensions)
@@ -971,10 +976,11 @@
 
     This should allow a collection of particle data to be loaded directly into
     yt and analyzed as would any others.  This comes with several caveats:
-        * Units will be incorrect unless the data has already been converted to
-          cgs.
-        * Some functions may behave oddly, and parallelism will be
-          disappointing or non-existent in most cases.
+
+    * Units will be incorrect unless the data has already been converted to
+      cgs.
+    * Some functions may behave oddly, and parallelism will be
+      disappointing or non-existent in most cases.
 
     This will initialize an Octree of data.  Note that fluid fields will not
     work yet, or possibly ever.
@@ -1142,11 +1148,12 @@
 
     This should allow a semistructured grid of data to be loaded directly into
     yt and analyzed as would any others.  This comes with several caveats:
-        * Units will be incorrect unless the data has already been converted to
-          cgs.
-        * Some functions may behave oddly, and parallelism will be
-          disappointing or non-existent in most cases.
-        * Particles may be difficult to integrate.
+
+    * Units will be incorrect unless the data has already been converted to
+      cgs.
+    * Some functions may behave oddly, and parallelism will be
+      disappointing or non-existent in most cases.
+    * Particles may be difficult to integrate.
 
     Particle fields are detected as one-dimensional fields. The number of particles
     is set by the "number_of_particles" key in data.

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -109,7 +109,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             f = self.fields[data_file.filename]
             # This double-reads
             for ptype, field_list in sorted(ptf.items()):
@@ -135,7 +135,7 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
-        for data_file in data_files:
+        for data_file in sorted(data_files):
             f = self.fields[data_file.filename]
             for ptype, field_list in sorted(ptf.items()):
                 if (ptype, "particle_position") in f:

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -60,7 +60,10 @@
     convert scalar, list or tuple argument passed to functions using Cython.
     """
     if isinstance(obj, np.ndarray):
-        return obj
+        if obj.shape == ():
+            return np.array([obj])
+        # We cast to ndarray to catch ndarray subclasses
+        return np.array(obj)
     elif isinstance(obj, (types.ListType, types.TupleType)):
         return np.asarray(obj)
     else:

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -38,6 +38,7 @@
 from yt.utilities.exceptions import YTFieldNotFound
 
 class Index(ParallelAnalysisInterface):
+    """The base index class"""
     _global_mesh = True
     _unsupported_objects = ()
     _index_properties = ()

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -37,6 +37,7 @@
 from yt.data_objects.data_containers import data_object_registry
 
 class GridIndex(Index):
+    """The index class for patch and block AMR datasets. """
     float_type = 'float64'
     _preload_implemented = False
     _index_properties = ("grid_left_edge", "grid_right_edge",
@@ -240,6 +241,40 @@
         ind = pts.find_points_in_tree()
         return self.grids[ind], ind
 
+    def find_field_value_at_point(self, fields, coord):
+        r"""Find the value of fields at a coordinate.
+
+        Returns the values [field1, field2,...] of the fields at the given
+        (x, y, z) points. Returns a list of field values in the same order as
+        the input *fields*.
+
+        Parameters
+        ----------
+        fields : string or list of strings
+            The field(s) that will be returned.
+
+        coord : list or array of coordinates
+            The location for which field values will be returned.
+
+        Examples
+        --------
+        >>> pf.h.find_field_value_at_point(['Density', 'Temperature'],
+            [0.4, 0.3, 0.8])
+        [2.1489e-24, 1.23843e4]
+        """
+        this = self.find_points(*coord)[0][-1]
+        cellwidth = (this.RightEdge - this.LeftEdge) / this.ActiveDimensions
+        mark = np.zeros(3).astype('int')
+        # Find the index for the cell containing this point.
+        for dim in xrange(len(coord)):
+            mark[dim] = int((coord[dim] - this.LeftEdge[dim]) / cellwidth[dim])
+        out = []
+        fields = ensure_list(fields)
+        # Pull out the values and add it to the out list.
+        for field in fields:
+            out.append(this[field][mark[0], mark[1], mark[2]])
+        return out
+
     def get_grid_tree(self) :
 
         left_edge = self.pf.arr(np.zeros((self.num_grids, 3)),

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -18,6 +18,7 @@
 cimport numpy as np
 import numpy as np
 from selection_routines cimport SelectorObject
+from libc.math cimport floor
 cimport selection_routines
 
 ORDER_MAX = 20
@@ -278,7 +279,7 @@
         cdef np.int64_t ind[3], level = -1
         for i in range(3):
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
-            ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
+            ind[i] = <np.int64_t> (floor((ppos[i] - self.DLE[i])/dds[i]))
             cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
             ipos[i] = 0
             ind32[i] = ind[i]

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/geometry/oct_geometry_handler.py
--- a/yt/geometry/oct_geometry_handler.py
+++ b/yt/geometry/oct_geometry_handler.py
@@ -35,7 +35,7 @@
 from yt.data_objects.data_containers import data_object_registry
 
 class OctreeIndex(Index):
-
+    """The Index subclass for oct AMR datasets"""
     def _setup_geometry(self):
         mylog.debug("Initializing Octree Geometry Handler.")
         self._initialize_oct_handler()

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -38,6 +38,7 @@
 from yt.data_objects.octree_subset import ParticleOctreeSubset
 
 class ParticleIndex(Index):
+    """The Index subclass for particle datasets"""
     _global_mesh = False
 
     def __init__(self, pf, dataset_type):

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/geometry/unstructured_mesh_handler.py
--- a/yt/geometry/unstructured_mesh_handler.py
+++ b/yt/geometry/unstructured_mesh_handler.py
@@ -22,6 +22,7 @@
 from yt.utilities.lib.mesh_utilities import smallest_fwidth
 
 class UnstructuredIndex(Index):
+    """The Index subclass for unstructured and hexahedral mesh datasets. """
     _global_mesh = False
     _unsupported_objects = ('proj', 'covering_grid', 'smoothed_covering_grid')
 

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -756,7 +756,6 @@
     yield assert_array_equal, yt_arr, YTArray(yt_arr.to_astropy())
     yield assert_equal, yt_quan, YTQuantity(yt_quan.to_astropy())
 
-
 def test_subclass():
 
     class YTASubclass(YTArray):

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/units/unit_registry.py
--- a/yt/units/unit_registry.py
+++ b/yt/units/unit_registry.py
@@ -22,6 +22,7 @@
     pass
 
 class UnitRegistry:
+    """A registry for unit symbols"""
     def __init__(self, add_default_symbols=True, lut=None):
         if lut:
             self.lut = lut

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -178,6 +178,61 @@
 
 class YTArray(np.ndarray):
     """
+    An ndarray subclass that attaches a symbolic unit object to the array data.
+
+    Parameters
+    ----------
+
+    input_array : ndarray or ndarray subclass
+        An array to attach units to
+    input_units : String unit specification, unit symbol object, or astropy units
+        The units of the array. Powers must be specified using python
+        symtax (cm**3, not cm^3).
+    registry : A UnitRegistry object
+        The registry to create units from. If input_units is already associated
+        with a unit registry and this is specified, this will be used instead of
+        the registry associated with the unit object.
+    dtype : string of NumPy dtype object
+        The dtype of the array data.
+
+    Examples
+    --------
+
+    >>> from yt import YTArray
+    >>> a = YTArray([1,2,3], 'cm')
+    >>> b = YTArray([4,5,6], 'm')
+    >>> a + b
+    YTArray([ 401.,  502.,  603.]) cm
+    >>> b + a
+    YTArray([ 4.01,  5.02,  6.03]) m
+
+    NumPy ufuncs will pass through units where appropriate.
+
+    >>> import numpy as np
+    >>> a = YTArray(np.arange(8), 'g/cm**3')
+    >>> np.ones_like(a)
+    YTArray([1, 1, 1, 1, 1, 1, 1, 1]) g/cm**3
+
+    and strip them when it would be annoying to deal with them.
+
+    >>> np.log10(a)
+    array([       -inf,  0.        ,  0.30103   ,  0.47712125,  0.60205999,
+            0.69897   ,  0.77815125,  0.84509804])
+
+    YTArray is tightly integrated with yt datasets:
+
+    >>> import yt
+    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+    >>> a = ds.arr(np.ones(5), 'code_length')
+    >>> a.in_cgs()
+    YTArray([  3.08600000e+24,   3.08600000e+24,   3.08600000e+24,
+             3.08600000e+24,   3.08600000e+24]) cm
+
+    This is equivalent to:
+
+    >>> b = YTArray(np.ones(5), 'code_length', registry=ds.unit_registry)
+    >>> np.all(a == b)
+    True
 
     """
     _ufunc_registry = {
@@ -1006,10 +1061,68 @@
         return type(self)(ret, copy.deepcopy(self.units))
 
 class YTQuantity(YTArray):
-    def __new__(cls, input, input_units=None, registry=None, dtype=np.float64):
-        if not isinstance(input, (numeric_type, np.number, np.ndarray)):
+    """
+    A scalar associated with a unit.
+
+    Parameters
+    ----------
+
+    input_scalar : ndarray or ndarray subclass
+        An array to attach units to
+    input_units : String unit specification, unit symbol object, or astropy units
+        The units of the array. Powers must be specified using python
+        symtax (cm**3, not cm^3).
+    registry : A UnitRegistry object
+        The registry to create units from. If input_units is already associated
+        with a unit registry and this is specified, this will be used instead of
+        the registry associated with the unit object.
+    dtype : string of NumPy dtype object
+        The dtype of the array data.
+
+    Examples
+    --------
+
+    >>> from yt import YTQuantity
+    >>> a = YTQuantity(1, 'cm')
+    >>> b = YTQuantity(2, 'm')
+    >>> a + b
+    201.0 cm
+    >>> b + a
+    2.01 m
+
+    NumPy ufuncs will pass through units where appropriate.
+
+    >>> import numpy as np
+    >>> a = YTQuantity(12, 'g/cm**3')
+    >>> np.ones_like(a)
+    1 g/cm**3
+
+    and strip them when it would be annoying to deal with them.
+
+    >>> print np.log10(a)
+    1.07918124605
+
+    YTQuantity is tightly integrated with yt datasets:
+
+    >>> import yt
+    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+    >>> a = ds.quan(5, 'code_length')
+    >>> a.in_cgs()
+    1.543e+25 cm
+
+    This is equivalent to:
+
+    >>> b = YTQuantity(5, 'code_length', registry=ds.unit_registry)
+    >>> np.all(a == b)
+    True
+
+    """
+    def __new__(cls, input_scalar, input_units=None, registry=None,
+                dtype=np.float64):
+        if not isinstance(input_scalar, (numeric_type, np.number, np.ndarray)):
             raise RuntimeError("YTQuantity values must be numeric")
-        ret = YTArray.__new__(cls, input, input_units, registry, dtype=dtype)
+        ret = YTArray.__new__(cls, input_scalar, input_units, registry,
+                              dtype=dtype)
         if ret.size > 1:
             raise RuntimeError("YTQuantity instances must be scalars")
         return ret

diff -r c976fd72af64aad6078e419af39e6e162daa772f -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 yt/utilities/file_handler.py
--- a/yt/utilities/file_handler.py
+++ b/yt/utilities/file_handler.py
@@ -40,6 +40,10 @@
     def keys(self):
         return self.handle.keys
 
+    @property
+    def items(self):
+        return self.handle.items
+
 class FITSFileHandler(HDF5FileHandler):
     def __init__(self, filename):
         from yt.utilities.on_demand_imports import _astropy

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/5234dba63f93/
Changeset:   5234dba63f93
Branch:      yt-3.0
User:        samskillman
Date:        2014-07-02 20:58:34
Summary:     Adding load_sdf convenience to pick between http and local
Affected #:  2 files

diff -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 -r 5234dba63f938e422f911e5b8666394d29bd05ff yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -309,6 +309,14 @@
         self.parameters['header_offset'] = hoff
 
 
+def load_sdf(filename, header=None):
+    if 'http' in filename:
+        sdf = HTTPSDFRead(filename, header=header)
+    else:
+        sdf = SDFRead(filename, header=header)
+    return sdf
+
+
 class SDFIndex(object):
 
     """docstring for SDFIndex

diff -r 6d5f6d3d6f161743ded86f6c7e7e15a46f5e4cd3 -r 5234dba63f938e422f911e5b8666394d29bd05ff yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -813,7 +813,7 @@
                 elif np.nanmax(image) <= 0:
                     msg = "Plot image for field %s has no positive " \
                           "values.  Max = %d." % (f, np.nanmax(image))
-                elif np.all(np.logical_not(np.isfinite(image))):
+                elif not np.any(np.isfinite(image)):
                     msg = "Plot image for field %s is filled with NaNs." % (f,)
                 if msg is not None:
                     mylog.warning(msg)


https://bitbucket.org/yt_analysis/yt/commits/0d6f3ba1be58/
Changeset:   0d6f3ba1be58
Branch:      yt-3.0
User:        samskillman
Date:        2014-07-03 20:03:23
Summary:     Allow for separate header yt.load()'s. Add support for non-ASCII parameters by finding all binary data that have length 1.
Left to do:
* Add renaming methods for gadget -> sdf parameters
Affected #:  2 files

diff -r 5234dba63f938e422f911e5b8666394d29bd05ff -r 0d6f3ba1be58aa32db62c796e7383012b1258008 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -106,10 +106,10 @@
 
     def _parse_parameter_file(self):
         if self.parameter_filename.startswith("http"):
-            cls = HTTPSDFRead
+            sdf_class = HTTPSDFRead
         else:
-            cls = SDFRead
-        self.sdf_container = cls(self.parameter_filename,
+            sdf_class = SDFRead
+        self.sdf_container = sdf_class(self.parameter_filename,
                                  header=self.sdf_header)
 
         # Reference
@@ -124,7 +124,11 @@
 
 
         if None in (self.domain_left_edge, self.domain_right_edge):
-            R0 = self.parameters['R0']
+            if 'R0' in self.parameters:
+                R0 = self.parameters['R0']
+            elif 'BoxSize' in self.parameters:
+                R0 = self.parameters['BoxSize']/2
+
             if 'offset_center' in self.parameters and self.parameters['offset_center']:
                 self.domain_left_edge = np.array([0, 0, 0])
                 self.domain_right_edge = np.array([
@@ -147,14 +151,18 @@
         self.cosmological_simulation = 1
 
         self.current_redshift = self.parameters.get("redshift", 0.0)
-        self.omega_lambda = self.parameters["Omega0_lambda"]
-        self.omega_matter = self.parameters["Omega0_m"]
-        if "Omega0_fld" in self.parameters:
-            self.omega_lambda += self.parameters["Omega0_fld"]
-        if "Omega0_r" in self.parameters:
-            # not correct, but most codes can't handle Omega0_r
-            self.omega_matter += self.parameters["Omega0_r"]
-        self.hubble_constant = self.parameters["h_100"]
+        try:
+            self.omega_matter = self.parameters["Omega0_m"]
+            if "Omega0_fld" in self.parameters:
+                self.omega_lambda += self.parameters["Omega0_fld"]
+            if "Omega0_r" in self.parameters:
+                # not correct, but most codes can't handle Omega0_r
+                self.omega_matter += self.parameters["Omega0_r"]
+            self.hubble_constant = self.parameters["h_100"]
+        except:
+            self.omega_matter = 1.0
+            self.omega_lambda = 0.0
+            self.hubble_constant = 1.0
         self.current_time = units_2HOT_v2_time * self.parameters.get("tpos", 0.0)
         mylog.info("Calculating time to be %0.3e seconds", self.current_time)
         self.filename_template = self.parameter_filename
@@ -166,10 +174,10 @@
             if self.midx_filename is not None:
 
                 if 'http' in self.midx_filename:
-                    cls = HTTPSDFRead
+                    sdf_class = HTTPSDFRead
                 else:
-                    cls = SDFRead
-                indexdata = cls(self.midx_filename, header=self.midx_header)
+                    sdf_class = SDFRead
+                indexdata = sdf_class(self.midx_filename, header=self.midx_header)
                 self._midx = SDFIndex(self.sdf_container, indexdata,
                                         level=self.midx_level)
             else:
@@ -190,14 +198,16 @@
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):
-        if args[0].startswith("http"):
+        sdf_header = kwargs.get('sdf_header', args[0])
+        print 'Parsing sdf_header: %s' % sdf_header
+        if sdf_header.startswith("http"):
             if requests is None: return False
-            hreq = requests.get(args[0], stream=True)
+            hreq = requests.get(sdf_header, stream=True)
             if hreq.status_code != 200: return False
             # Grab a whole 4k page.
             line = hreq.iter_content(4096).next()
-        elif os.path.isfile(args[0]): 
-            with open(args[0], "r") as f:
+        elif os.path.isfile(sdf_header):
+            with open(sdf_header, "r") as f:
                 line = f.read(10).strip()
         else:
             return False

diff -r 5234dba63f938e422f911e5b8666394d29bd05ff -r 0d6f3ba1be58aa32db62c796e7383012b1258008 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -8,11 +8,14 @@
 
 _types = {
     'int': 'int32',
+    'int32_t': 'int32',
+    'uint32_t': 'uint32',
     'int64_t': 'int64',
     'float': 'float32',
     'double': 'float64',
     'unsigned int': 'I',
     'unsigned char': 'B',
+    'char': 'B',
 }
 
 def get_type(vtype, tlen=None):
@@ -119,6 +122,15 @@
         for k in self.dtype.names:
             self.data[k] = self.handle[k]
 
+    def __del__(self):
+        if self.handle:
+            try:
+                self.handle.close()
+            except AttributeError:
+                pass
+            del self.handle
+            self.handle = None
+
     def __getitem__(self, key):
         mask = None
         kt = type(key)
@@ -188,7 +200,7 @@
 
     """docstring for SDFRead"""
 
-    _eof = 'SDF-EOH'
+    _eof = 'SDF-EO'
     _data_struct = DataStruct
 
     def __init__(self, filename, header=None):
@@ -202,6 +214,7 @@
         self.parse_header()
         self.set_offsets()
         self.load_memmaps()
+        self.get_binary_parameters()
 
     def parse_header(self):
         """docstring for parse_header"""
@@ -232,6 +245,11 @@
             return
 
         spl = lstrip(line.split("="))
+        if len(spl) == 1:
+            # No equals were found.
+            self.parse_C_param(line, ascfile)
+            return
+
         vtype, vname = lstrip(spl[0].split())
         vname = vname.strip("[]")
         vval = spl[-1].strip(";")
@@ -248,9 +266,54 @@
 
         self.parameters[vname] = vval
 
+    def parse_C(self, line, ascfile):
+        vtype, vnames = get_struct_vars(line)
+
+        # Test for array
+        test = line.split("}")
+        if len(test) > 1:
+            num = test[-1].strip("{}}[]")
+            num = num.strip("\;\\\n]")
+            if len(num) > 0:
+                num = int(num)
+            else:
+                num = 1
+        else:
+            num = 1
+        str_types = []
+        comments = []
+        str_lines = []
+        vtype, vnames = get_struct_vars(line)
+        for v in vnames:
+            str_types.append((v, vtype))
+        struct = self._data_struct(str_types, num, self.filename)
+        return struct
+
+    def parse_C_param(self, line, ascfile):
+        self.structs.append(self.parse_C(line, ascfile))
+        return
+
+    def get_binary_parameters(self):
+        for struct in self.structs:
+            for vname, vval in struct.data.iteritems():
+                print 'Searching for parameters:', vname, vval.size
+                if vval.size == 1:
+                    self.parameters[vname] = vval[0]
+                    del struct
+                    param_struct = self.pop(vname)
+                    del param_struct
+
     def parse_struct(self, line, ascfile):
         assert 'struct' in line
+        if "}" in line:
+            # Struct is in a single line
+            line = line.split("{")[-1].strip()
+            print line
+            struct = self.parse_C(line, ascfile)
+            self.structs.append(struct)
+            return
 
+        # Otherwise multi-line struct
         str_types = []
         comments = []
         str_lines = []
@@ -289,7 +352,6 @@
     """docstring for SDFRead"""
 
     _data_struct = HTTPDataStruct
-    _eof = 'SDF-EOH'
 
     def parse_header(self):
         """docstring for parse_header"""


https://bitbucket.org/yt_analysis/yt/commits/ebd95737ede1/
Changeset:   ebd95737ede1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-09 05:57:42
Summary:     Adding ptype_dn
Affected #:  1 file

diff -r 5234dba63f938e422f911e5b8666394d29bd05ff -r ebd95737ede1727053e2e91ae3768c94b8ce2301 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -72,6 +72,7 @@
 
 def particle_deposition_functions(ptype, coord_name, mass_name, registry):
     orig = set(registry.keys())
+    ptype_dn = ptype.replace("_","\/").title()
     def particle_count(field, data):
         pos = data[ptype, coord_name]
         d = data.deposit(pos, method = "count")
@@ -81,7 +82,7 @@
     registry.add_field(("deposit", "%s_count" % ptype),
              function = particle_count,
              validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Count}" % ptype)
+             display_name = "\\mathrm{%s Count}" % ptype_dn)
 
     def particle_mass(field, data):
         pos = data[ptype, coord_name]
@@ -92,7 +93,7 @@
     registry.add_field(("deposit", "%s_mass" % ptype),
              function = particle_mass,
              validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Mass}" % ptype,
+             display_name = "\\mathrm{%s Mass}" % ptype_dn,
              units = "g")
              
     def particle_density(field, data):
@@ -108,7 +109,7 @@
     registry.add_field(("deposit", "%s_density" % ptype),
              function = particle_density,
              validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Density}" % ptype,
+             display_name = "\\mathrm{%s Density}" % ptype_dn,
              units = "g/cm**3")
 
     def particle_cic(field, data):
@@ -121,7 +122,7 @@
     registry.add_field(("deposit", "%s_cic" % ptype),
              function = particle_cic,
              validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s CIC Density}" % ptype,
+             display_name = "\\mathrm{%s CIC Density}" % ptype_dn,
              units = "g/cm**3")
 
     # Now some translation functions.


https://bitbucket.org/yt_analysis/yt/commits/97bb830b1295/
Changeset:   97bb830b1295
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-11 17:19:30
Summary:     Updating sdf.py to be more explicit about 32/64 bit ints.
Affected #:  1 file

diff -r ebd95737ede1727053e2e91ae3768c94b8ce2301 -r 97bb830b12950c602489ae45f68a96920b9c4a44 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -370,7 +370,8 @@
         self.set_bounds()
         self._midx_version = self.indexdata.parameters.get('midx_version', 0)
         if self._midx_version >= 1.0:
-            max_key = self.get_key(np.array([2**self.level - 1]*3))
+            max_key = self.get_key(np.array([2**self.level - 1]*3,
+                                            dtype="int64"))
         else:
             max_key = self.indexdata['index'][-1]
         self._max_key = max_key
@@ -456,7 +457,7 @@
     def get_key(self, iarr, level=None):
         if level is None:
             level = self.level
-        i1, i2, i3 = iarr
+        i1, i2, i3 = (v.astype("int64") for v in iarr)
         return self.spread_bits(i1, level) | self.spread_bits(i2, level) << 1 | self.spread_bits(i3, level) << 2
 
     def spread_bitsv(self, ival, level=None):
@@ -464,16 +465,20 @@
             level = self.level
         res = np.zeros_like(ival, dtype='int64')
         for i in range(level):
-            res |= np.bitwise_and((ival>>i), 1)<<(i*3);
+            ares = np.bitwise_and(ival, 1<<i) << (i*2)
+            np.bitwise_or(res, ares, res)
         return res
 
     def get_keyv(self, iarr, level=None):
         if level is None:
             level = self.level
-        i1, i2, i3 = iarr
-        return np.bitwise_or(
-            np.bitwise_or(self.spread_bitsv(i1, level) , self.spread_bitsv(i2, level) << 1 ),
-            self.spread_bitsv(i3, level) << 2)
+        i1, i2, i3 = (v.astype("int64") for v in iarr)
+        i1 = self.spread_bitsv(i1, level)
+        i2 = self.spread_bitsv(i2, level) << 1
+        i3 = self.spread_bitsv(i3, level) << 2
+        np.bitwise_or(i1, i2, i1)
+        np.bitwise_or(i1, i3, i1)
+        return i1
 
     def get_key_slow(self, iarr, level=None):
         if level is None:
@@ -517,8 +522,8 @@
         set of offsets+lengths into the sdf data.
         """
         mask = np.zeros(self.indexdata['index'].shape, dtype='bool')
-        ileft = np.array(ileft)
-        iright = np.array(iright)
+        ileft = np.array(ileft, dtype="int64")
+        iright = np.array(iright, dtype="int64")
         for i in range(3):
             left_key = self.get_slice_key(ileft[i], dim=i)
             right_key= self.get_slice_key(iright[i], dim=i)
@@ -546,9 +551,9 @@
                            ileft[0]:iright[0]+1.01]
 
         mask = slice(0, -1, None)
-        X = X[mask, mask, mask].astype('int32').ravel()
-        Y = Y[mask, mask, mask].astype('int32').ravel()
-        Z = Z[mask, mask, mask].astype('int32').ravel()
+        X = X[mask, mask, mask].astype('int64').ravel()
+        Y = Y[mask, mask, mask].astype('int64').ravel()
+        Z = Z[mask, mask, mask].astype('int64').ravel()
 
         if self.wandering_particles:
             # Need to get padded bbox around the border to catch
@@ -581,7 +586,11 @@
             indices = indices[indices < self._max_key]
             #indices = indices[self.indexdata['len'][indices] > 0]
             # Faster for sparse lookups. Need better heuristic.
-            indices = indices[np.array([(self.indexdata['len'][ind] > 0) for ind in indices])]
+            new_indices = []
+            for ind in indices:
+                if self.indexdata['len'][ind] > 0:
+                    new_indices.append(ind)
+            indices = np.array(indices, dtype="int64")
 
         #indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
         # Here we sort the indices to batch consecutive reads together.
@@ -888,7 +897,8 @@
     def find_max_cell_center(self):
         max_cell = self.find_max_cell()
         cell_ijk = np.array(
-            self.get_ind_from_key(self.indexdata['index'][max_cell]))
+            self.get_ind_from_key(self.indexdata['index'][max_cell]),
+            dtype="int64")
         position = (cell_ijk + 0.5) * (self.domain_width / self.domain_dims) +\
                 self.rmin
         return position
@@ -910,7 +920,7 @@
             cell_data: dict
                 Dictionary of field_name, field_data
         """
-        cell_iarr = np.array(cell_iarr)
+        cell_iarr = np.array(cell_iarr, dtype="int64")
         lk, rk =self.get_key_bounds(level, cell_iarr)
         mylog.debug("Reading contiguous chunk from %i to %i" % (lk, rk))
         return self.get_contiguous_chunk(lk, rk, fields)
@@ -922,7 +932,7 @@
             bbox: array-like, shape (3,2)
 
         """
-        cell_iarr = np.array(cell_iarr)
+        cell_iarr = np.array(cell_iarr, dtype="int64")
         cell_width = self.get_cell_width(level)
         le = self.rmin + cell_iarr*cell_width
         re = le+cell_width


https://bitbucket.org/yt_analysis/yt/commits/8d5437e60d1f/
Changeset:   8d5437e60d1f
Branch:      yt-3.0
User:        samskillman
Date:        2014-07-12 18:51:05
Summary:     Adding install_requires of thingking.
Affected #:  1 file

diff -r 97bb830b12950c602489ae45f68a96920b9c4a44 -r 8d5437e60d1f75173c96a55c6819ea032ee8a9f6 setup.py
--- a/setup.py
+++ b/setup.py
@@ -279,6 +279,7 @@
         data_files=REASON_FILES,
         cmdclass={'build_py': my_build_py, 'build_forthon': BuildForthon,
                   'build_src': my_build_src, 'install_data': my_install_data},
+        install_requires=["thingking"],
     )
     return
 


https://bitbucket.org/yt_analysis/yt/commits/e15dca490068/
Changeset:   e15dca490068
Branch:      yt-3.0
User:        samskillman
Date:        2014-07-12 19:07:50
Summary:     Backout out changes dealing with reading gadget with an sdf header.
Affected #:  2 files

diff -r 0d6f3ba1be58aa32db62c796e7383012b1258008 -r e15dca490068768f2ca59c0eb137d32ce931b4d9 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -124,11 +124,7 @@
 
 
         if None in (self.domain_left_edge, self.domain_right_edge):
-            if 'R0' in self.parameters:
-                R0 = self.parameters['R0']
-            elif 'BoxSize' in self.parameters:
-                R0 = self.parameters['BoxSize']/2
-
+            R0 = self.parameters['R0']
             if 'offset_center' in self.parameters and self.parameters['offset_center']:
                 self.domain_left_edge = np.array([0, 0, 0])
                 self.domain_right_edge = np.array([
@@ -151,18 +147,14 @@
         self.cosmological_simulation = 1
 
         self.current_redshift = self.parameters.get("redshift", 0.0)
-        try:
-            self.omega_matter = self.parameters["Omega0_m"]
-            if "Omega0_fld" in self.parameters:
-                self.omega_lambda += self.parameters["Omega0_fld"]
-            if "Omega0_r" in self.parameters:
-                # not correct, but most codes can't handle Omega0_r
-                self.omega_matter += self.parameters["Omega0_r"]
-            self.hubble_constant = self.parameters["h_100"]
-        except:
-            self.omega_matter = 1.0
-            self.omega_lambda = 0.0
-            self.hubble_constant = 1.0
+        self.omega_lambda = self.parameters["Omega0_lambda"]
+        self.omega_matter = self.parameters["Omega0_m"]
+        if "Omega0_fld" in self.parameters:
+            self.omega_lambda += self.parameters["Omega0_fld"]
+        if "Omega0_r" in self.parameters:
+            # not correct, but most codes can't handle Omega0_r
+            self.omega_matter += self.parameters["Omega0_r"]
+        self.hubble_constant = self.parameters["h_100"]
         self.current_time = units_2HOT_v2_time * self.parameters.get("tpos", 0.0)
         mylog.info("Calculating time to be %0.3e seconds", self.current_time)
         self.filename_template = self.parameter_filename

diff -r 0d6f3ba1be58aa32db62c796e7383012b1258008 -r e15dca490068768f2ca59c0eb137d32ce931b4d9 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -214,7 +214,6 @@
         self.parse_header()
         self.set_offsets()
         self.load_memmaps()
-        self.get_binary_parameters()
 
     def parse_header(self):
         """docstring for parse_header"""
@@ -245,11 +244,6 @@
             return
 
         spl = lstrip(line.split("="))
-        if len(spl) == 1:
-            # No equals were found.
-            self.parse_C_param(line, ascfile)
-            return
-
         vtype, vname = lstrip(spl[0].split())
         vname = vname.strip("[]")
         vval = spl[-1].strip(";")
@@ -266,54 +260,9 @@
 
         self.parameters[vname] = vval
 
-    def parse_C(self, line, ascfile):
-        vtype, vnames = get_struct_vars(line)
-
-        # Test for array
-        test = line.split("}")
-        if len(test) > 1:
-            num = test[-1].strip("{}}[]")
-            num = num.strip("\;\\\n]")
-            if len(num) > 0:
-                num = int(num)
-            else:
-                num = 1
-        else:
-            num = 1
-        str_types = []
-        comments = []
-        str_lines = []
-        vtype, vnames = get_struct_vars(line)
-        for v in vnames:
-            str_types.append((v, vtype))
-        struct = self._data_struct(str_types, num, self.filename)
-        return struct
-
-    def parse_C_param(self, line, ascfile):
-        self.structs.append(self.parse_C(line, ascfile))
-        return
-
-    def get_binary_parameters(self):
-        for struct in self.structs:
-            for vname, vval in struct.data.iteritems():
-                print 'Searching for parameters:', vname, vval.size
-                if vval.size == 1:
-                    self.parameters[vname] = vval[0]
-                    del struct
-                    param_struct = self.pop(vname)
-                    del param_struct
-
     def parse_struct(self, line, ascfile):
         assert 'struct' in line
-        if "}" in line:
-            # Struct is in a single line
-            line = line.split("{")[-1].strip()
-            print line
-            struct = self.parse_C(line, ascfile)
-            self.structs.append(struct)
-            return
 
-        # Otherwise multi-line struct
         str_types = []
         comments = []
         str_lines = []


https://bitbucket.org/yt_analysis/yt/commits/6649d323511a/
Changeset:   6649d323511a
Branch:      yt-3.0
User:        samskillman
Date:        2014-07-12 19:20:16
Summary:     Merging
Affected #:  2 files

diff -r 8d5437e60d1f75173c96a55c6819ea032ee8a9f6 -r 6649d323511aebaf9a26535c96935fb14315a0f7 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -106,10 +106,10 @@
 
     def _parse_parameter_file(self):
         if self.parameter_filename.startswith("http"):
-            cls = HTTPSDFRead
+            sdf_class = HTTPSDFRead
         else:
-            cls = SDFRead
-        self.sdf_container = cls(self.parameter_filename,
+            sdf_class = SDFRead
+        self.sdf_container = sdf_class(self.parameter_filename,
                                  header=self.sdf_header)
 
         # Reference
@@ -166,10 +166,10 @@
             if self.midx_filename is not None:
 
                 if 'http' in self.midx_filename:
-                    cls = HTTPSDFRead
+                    sdf_class = HTTPSDFRead
                 else:
-                    cls = SDFRead
-                indexdata = cls(self.midx_filename, header=self.midx_header)
+                    sdf_class = SDFRead
+                indexdata = sdf_class(self.midx_filename, header=self.midx_header)
                 self._midx = SDFIndex(self.sdf_container, indexdata,
                                         level=self.midx_level)
             else:
@@ -190,14 +190,16 @@
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):
-        if args[0].startswith("http"):
+        sdf_header = kwargs.get('sdf_header', args[0])
+        print 'Parsing sdf_header: %s' % sdf_header
+        if sdf_header.startswith("http"):
             if requests is None: return False
-            hreq = requests.get(args[0], stream=True)
+            hreq = requests.get(sdf_header, stream=True)
             if hreq.status_code != 200: return False
             # Grab a whole 4k page.
             line = hreq.iter_content(4096).next()
-        elif os.path.isfile(args[0]): 
-            with open(args[0], "r") as f:
+        elif os.path.isfile(sdf_header):
+            with open(sdf_header, "r") as f:
                 line = f.read(10).strip()
         else:
             return False

diff -r 8d5437e60d1f75173c96a55c6819ea032ee8a9f6 -r 6649d323511aebaf9a26535c96935fb14315a0f7 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -8,11 +8,14 @@
 
 _types = {
     'int': 'int32',
+    'int32_t': 'int32',
+    'uint32_t': 'uint32',
     'int64_t': 'int64',
     'float': 'float32',
     'double': 'float64',
     'unsigned int': 'I',
     'unsigned char': 'B',
+    'char': 'B',
 }
 
 def get_type(vtype, tlen=None):
@@ -119,6 +122,15 @@
         for k in self.dtype.names:
             self.data[k] = self.handle[k]
 
+    def __del__(self):
+        if self.handle:
+            try:
+                self.handle.close()
+            except AttributeError:
+                pass
+            del self.handle
+            self.handle = None
+
     def __getitem__(self, key):
         mask = None
         kt = type(key)
@@ -188,7 +200,7 @@
 
     """docstring for SDFRead"""
 
-    _eof = 'SDF-EOH'
+    _eof = 'SDF-EO'
     _data_struct = DataStruct
 
     def __init__(self, filename, header=None):
@@ -289,7 +301,6 @@
     """docstring for SDFRead"""
 
     _data_struct = HTTPDataStruct
-    _eof = 'SDF-EOH'
 
     def parse_header(self):
         """docstring for parse_header"""


https://bitbucket.org/yt_analysis/yt/commits/042ef7fd843c/
Changeset:   042ef7fd843c
Branch:      yt-3.0
User:        samskillman
Date:        2014-07-12 19:27:10
Summary:     Merging from yt tip, one manual merge in setup.py
Affected #:  61 files

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -7,6 +7,7 @@
 rockstar.cfg
 yt_updater.log
 yt/frontends/artio/_artio_caller.c
+yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.c
 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/frontends/sph/smoothing_kernel.c

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -2,15 +2,21 @@
 
 Contributors:   
                 Tom Abel (tabel at stanford.edu)
-                David Collins (dcollins at physics.ucsd.edu)
+                Gabriel Altay (gabriel.altay at gmail.com)
+                Kenza Arraki (karraki at gmail.com)
+                Alex Bogert (fbogert at ucsc.edu)
+                David Collins (dcollins4096 at gmail.com)
                 Brian Crosby (crosby.bd at gmail.com)
                 Andrew Cunningham (ajcunn at gmail.com)
+                Miguel de Val-Borro (miguel.deval at gmail.com)
                 Hilary Egan (hilaryye at gmail.com)
                 John Forces (jforbes at ucolick.org)
+                Sam Geen (samgeen at gmail.com)
                 Nathan Goldbaum (goldbaum at ucolick.org)
                 Markus Haider (markus.haider at uibk.ac.at)
                 Cameron Hummels (chummels at gmail.com)
                 Christian Karch (chiffre at posteo.de)
+                Ben W. Keller (kellerbw at mcmaster.ca)
                 Ji-hoon Kim (me at jihoonkim.org)
                 Steffen Klemer (sklemer at phys.uni-goettingen.de)
                 Kacper Kowalik (xarthisius.kk at gmail.com)
@@ -21,18 +27,23 @@
                 Chris Malone (chris.m.malone at gmail.com)
                 Josh Maloney (joshua.moloney at colorado.edu)
                 Chris Moody (cemoody at ucsc.edu)
+                Stuart Mumford (stuart at mumford.me.uk)
                 Andrew Myers (atmyers at astro.berkeley.edu)
                 Jill Naiman (jnaiman at ucolick.org)
+                Desika Narayanan (dnarayan at haverford.edu)
                 Kaylea Nelson (kaylea.nelson at yale.edu)
                 Jeff Oishi (jsoishi at gmail.com)
+                Brian O'Shea (bwoshea at gmail.com)
                 Jean-Claude Passy (jcpassy at uvic.ca)
+                John Regan (john.regan at helsinki.fi)
                 Mark Richardson (Mark.L.Richardson at asu.edu)
                 Thomas Robitaille (thomas.robitaille at gmail.com)
                 Anna Rosen (rosen at ucolick.org)
                 Douglas Rudd (drudd at uchicago.edu)
                 Anthony Scopatz (scopatz at gmail.com)
                 Noel Scudder (noel.scudder at stonybrook.edu)
-                Devin Silvia (devin.silvia at colorado.edu)
+                Pat Shriwise (shriwise at wisc.edu)
+                Devin Silvia (devin.silvia at gmail.com)
                 Sam Skillman (samskillman at gmail.com)
                 Stephen Skory (s at skory.us)
                 Britton Smith (brittonsmith at gmail.com)
@@ -42,8 +53,10 @@
                 Stephanie Tonnesen (stonnes at gmail.com)
                 Matthew Turk (matthewturk at gmail.com)
                 Rich Wagner (rwagner at physics.ucsd.edu)
+                Michael S. Warren (mswarren at gmail.com)
                 Andrew Wetzel (andrew.wetzel at yale.edu)
                 John Wise (jwise at physics.gatech.edu)
+                Michael Zingale (michael.zingale at stonybrook.edu)
                 John ZuHone (jzuhone at gmail.com)
 
 Several items included in the yt/extern directory were written by other

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -12,4 +12,3 @@
 prune tests
 graft yt/gui/reason/html/resources
 exclude clean.sh .hgchurn
-recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -567,8 +567,10 @@
 
 mkdir -p ${DEST_DIR}/data
 cd ${DEST_DIR}/data
-echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
-[ ! -e xray_emissivity.h5 ] && get_ytdata xray_emissivity.h5
+echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  cloudy_emissivity.h5' > cloudy_emissivity.h5.sha512
+[ ! -e cloudy_emissivity.h5 ] && get_ytdata cloudy_emissivity.h5
+echo '0f714ae2eace0141b1381abf1160dc8f8a521335e886f99919caf3beb31df1fe271d67c7b2a804b1467949eb16b0ef87a3d53abad0e8160fccac1e90d8d9e85f  apec_emissivity.h5' > apec_emissivity.h5.sha512
+[ ! -e apec_emissivity.h5 ] && get_ytdata apec_emissivity.h5
 
 # Set paths to what they should be when yt is activated.
 export PATH=${DEST_DIR}/bin:$PATH

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -1,6 +1,11 @@
 Constructing Mock X-ray Observations
 ------------------------------------
 
+.. note::
+
+  If you just want to create derived fields for X-ray emission,
+  you should go `here <xray_emission_fields.html>`_ instead.
+
 The ``photon_simulator`` analysis module enables the creation of
 simulated X-ray photon lists of events from datasets that ``yt`` is able
 to read. The simulated events then can be exported to X-ray telescope

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 doc/source/analyzing/analysis_modules/running_halofinder.rst
--- a/doc/source/analyzing/analysis_modules/running_halofinder.rst
+++ b/doc/source/analyzing/analysis_modules/running_halofinder.rst
@@ -300,40 +300,11 @@
 Therefore Parallel HOP is not a direct substitution for
 normal HOP, but is very similar.
 
-.. _fkd_setup:
-
-Fortran kD Tree Setup
-^^^^^^^^^^^^^^^^^^^^^
-
-Parallel HOP will not build automatically with yt. Please follow the instructions
-below in order to setup Parallel HOP.
-
-  #. Download `Forthon <http://hifweb.lbl.gov/Forthon/>`_. Extract the files
-     (e.g. tar -zxvf Forthon.tgz) and cd into the new Forthon directory. 
-     Making sure you're using the same version of python you use with yt, invoke
-     ``python setup.py install``.
-  #. Change directory to your yt source. Starting from the top level, cd into
-     ``yt/utilities/kdtree``.
-  #. Inside that directory, you should see these files:
-  
-     .. code-block:: bash
-     
-        % ls
-        Makefile        fKD.f90         fKD_source.f90
-        __init__.py     fKD.v           test.py
-  
-  #. Type ``make``. If that is successful, there should be a file in the
-     directory named ``fKDpy.so``. If there are problems, please contact the
-     `yt-users email list <http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org>`_.
-  #. Go to the top level of the yt source directory, which from the ``kdtree``
-     directory is three levels up ``cd ../../..``, and invoke
-     ``python setup.py install``.
-  #. Parallel HOP should now work.
-     
-
 Running Parallel HOP
 ^^^^^^^^^^^^^^^^^^^^
 
+Note: This is probably broken now that the Fortran kdtree has been removed.
+
 In the simplest form, Parallel HOP is run very similarly to the other halo finders.
 In the example below, Parallel HOP will be run on a dataset with all the default
 values. Parallel HOP can be run in serial, but as mentioned above, it is

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 doc/source/analyzing/analysis_modules/xray_emission_fields.rst
--- a/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
+++ b/doc/source/analyzing/analysis_modules/xray_emission_fields.rst
@@ -2,41 +2,46 @@
 
 X-ray Emission Fields
 =====================
-.. sectionauthor:: Britton Smith <brittonsmith at gmail.com>
+.. sectionauthor:: Britton Smith <brittonsmith at gmail.com>, John ZuHone <jzuhone at gmail.com>
+
+.. note::
+
+  If you came here trying to figure out how to create simulated X-ray photons and observations,
+  you should go `here <photon_simulator.html>`_ instead.
 
 This functionality provides the ability to create metallicity-dependent 
-X-ray luminosity, emissivity, and photo emissivity fields for a given 
+X-ray luminosity, emissivity, and photon emissivity fields for a given
 photon energy range.  This works by interpolating from emission tables 
-created with the photoionization code, `Cloudy <http://nublado.org/>`_.  
-If you installed yt with the install script, the data should be located in 
-the *data* directory inside the installation directory.  Emission fields can 
-be made for any interval between 0.1 keV and 100 keV.
+created from the photoionization code `Cloudy <http://nublado.org/>`_ or
+the collisional ionization database `AtomDB <http://www.atomdb.org>`_. If
+you installed yt with the install script, these data files should be located in
+the *data* directory inside the installation directory, or can be downloaded
+from `<http://yt-project.org/data>`_. Emission fields can be made for any
+interval between 0.1 keV and 100 keV.
 
 Adding Emission Fields
 ----------------------
 
-Fields can be created for luminosity (erg/s), emissivity (erg/s/cm^3), 
-and photon emissivity (photons/s/cm^3).  The only required arguments are 
-the minimum and maximum energies.
+Fields will be created for luminosity :math:`{\rm (erg~s^{-1})}`, emissivity :math:`{\rm (erg~s^{-1}~cm^{-3})}`,
+and photon emissivity :math:`{\rm (photons~s^{-1}~cm^{-3})}`.  The only required arguments are the
+dataset object, and the minimum and maximum energies of the energy band.
 
 .. code-block:: python
 
-  from yt.mods import *
+  import yt
   from yt.analysis_modules.spectral_integrator.api import \
-       add_xray_luminosity_field, \
-       add_xray_emissivity_field, \
-       add_xray_photon_emissivity_field
+       add_xray_emissivity_field
 
-  add_xray_luminosity_field(0.5, 7)
-  add_xray_emissivity_field(0.5, 7)
-  add_xray_photon_emissivity_field(0.5, 7)
+  xray_fields = add_xray_emissivity_field(0.5, 7.0)
 
 Additional keyword arguments are:
 
- * **filename**  (*string*): Path to data file containing emissivity 
-   values.  If None, a file called xray_emissivity.h5 is used.  This file 
-   contains emissivity tables for primordial elements and for metals at 
-   solar metallicity for the energy range 0.1 to 100 keV.  Default: None.
+ * **filename** (*string*): Path to data file containing emissivity values. If None,
+   a file called "cloudy_emissivity.h5" is used, for photoionized plasmas. A second
+   option, for collisionally ionized plasmas, is in the file "apec_emissivity.h5",
+   available at http://yt-project.org/data. These files contain emissivity tables
+   for primordial elements and for metals at solar metallicity for the energy range
+   0.1 to 100 keV. Default: None.
 
  * **with_metals** (*bool*): If True, use the metallicity field to add the 
    contribution from metals.  If False, only the emission from H/He is 
@@ -46,24 +51,27 @@
    metallicity for the emission from metals.  The *with_metals* keyword 
    must be set to False to use this.  Default: None.
 
-The resulting fields can be used like all normal fields.
+The resulting fields can be used like all normal fields. The function will return the names of
+the created fields in a Python list.
 
-.. python-script::
+.. code-block:: python
 
-  from yt.mods import *
+  import yt
   from yt.analysis_modules.spectral_integrator.api import \
-       add_xray_luminosity_field, \
-       add_xray_emissivity_field, \
-       add_xray_photon_emissivity_field
+       add_xray_emissivity_field
 
-  add_xray_luminosity_field(0.5, 7)
-  add_xray_emissivity_field(0.5, 7)
-  add_xray_photon_emissivity_field(0.5, 7)
+  xray_fields = add_xray_emissivity_field(0.5, 7.0, filename="apec_emissivity.h5")
 
-  pf = load("enzo_tiny_cosmology/DD0046/DD0046")
-  plot = SlicePlot(pf, 'x', 'Xray_Luminosity_0.5_7keV')
+  ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+  plot = yt.SlicePlot(ds, 'x', 'xray_luminosity_0.5_7.0_keV')
   plot.save()
-  plot = ProjectionPlot(pf, 'x', 'Xray_Emissivity_0.5_7keV')
+  plot = yt.ProjectionPlot(ds, 'x', 'xray_emissivity_0.5_7.0_keV')
   plot.save()
-  plot = ProjectionPlot(pf, 'x', 'Xray_Photon_Emissivity_0.5_7keV')
+  plot = yt.ProjectionPlot(ds, 'x', 'xray_photon_emissivity_0.5_7.0_keV')
   plot.save()
+
+.. warning::
+
+  The X-ray fields depend on the number density of hydrogen atoms, in the yt field
+  ``H_number_density``. If this field is not defined (either in the dataset or by the user),
+  the primordial hydrogen mass fraction (X = 0.76) will be used to construct it.
\ No newline at end of file

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
--- a/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
+++ b/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:b7541e0167001c6dd74306c8490385ace7bdb0533a829286f0505c0b24c67f16"
+  "signature": "sha256:882b31591c60bfe6ad4cb0f8842953d2e94fb8a12ce742be831a65642eea72c9"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -325,8 +325,7 @@
      "input": [
       "from astropy import units as u\n",
       "x = 42.0 * u.meter\n",
-      "y = YTQuantity(x)\n",
-      "y2 = YTQuantity.from_astropy(x) # Another way to create the quantity"
+      "y = YTQuantity.from_astropy(x) "
      ],
      "language": "python",
      "metadata": {},
@@ -337,8 +336,7 @@
      "collapsed": false,
      "input": [
       "print x, type(x)\n",
-      "print y, type(y)\n",
-      "print y2, type(y2)"
+      "print y, type(y)"
      ],
      "language": "python",
      "metadata": {},
@@ -349,8 +347,7 @@
      "collapsed": false,
      "input": [
       "a = np.random.random(size=10) * u.km/u.s\n",
-      "b = YTArray(a)\n",
-      "b2 = YTArray.from_astropy(a) # Another way to create the quantity"
+      "b = YTArray.from_astropy(a)"
      ],
      "language": "python",
      "metadata": {},
@@ -361,8 +358,7 @@
      "collapsed": false,
      "input": [
       "print a, type(a)\n",
-      "print b, type(b)\n",
-      "print b2, type(b2)"
+      "print b, type(b)"
      ],
      "language": "python",
      "metadata": {},
@@ -438,7 +434,7 @@
      "collapsed": false,
      "input": [
       "k1 = kboltz.to_astropy()\n",
-      "k2 = YTQuantity(kb)\n",
+      "k2 = YTQuantity.from_astropy(kb)\n",
       "print k1 == k2"
      ],
      "language": "python",
@@ -449,7 +445,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "c = YTArray(a)\n",
+      "c = YTArray.from_astropy(a)\n",
       "d = c.to_astropy()\n",
       "print a == d"
      ],

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 doc/source/cookbook/aligned_cutting_plane.py
--- a/doc/source/cookbook/aligned_cutting_plane.py
+++ /dev/null
@@ -1,20 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import yt
-
-# Load the dataset.
-ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-
-# Create a 15 kpc radius sphere, centered on the center of the sim volume
-sp = ds.sphere("center", (15.0, "kpc"))
-
-# Get the angular momentum vector for the sphere.
-L = sp.quantities.angular_momentum_vector()
-
-print "Angular momentum vector: {0}".format(L)
-
-# Create an OffAxisSlicePlot of density centered on the object with the L 
-# vector as its normal and a width of 25 kpc on a side
-p = yt.OffAxisSlicePlot(ds, L, "density", sp.center, (25, "kpc"))
-p.save()

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -1,6 +1,3 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
 import yt
 import numpy as np
 

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 doc/source/cookbook/fits_radio_cubes.ipynb
--- a/doc/source/cookbook/fits_radio_cubes.ipynb
+++ b/doc/source/cookbook/fits_radio_cubes.ipynb
@@ -81,8 +81,7 @@
      "collapsed": false,
      "input": [
       "from yt.frontends.fits.misc import PlotWindowWCS\n",
-      "wcs_slc = PlotWindowWCS(slc)\n",
-      "wcs_slc[\"intensity\"]"
+      "PlotWindowWCS(slc)\n"
      ],
      "language": "python",
      "metadata": {},
@@ -99,7 +98,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "wcs_slc.save()"
+      "slc.save()"
      ],
      "language": "python",
      "metadata": {},
@@ -463,4 +462,4 @@
    "metadata": {}
   }
  ]
-}
\ No newline at end of file
+}

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 doc/source/cookbook/halo_plotting.py
--- a/doc/source/cookbook/halo_plotting.py
+++ b/doc/source/cookbook/halo_plotting.py
@@ -1,6 +1,3 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
 import yt
 from yt.analysis_modules.halo_analysis.halo_catalog import HaloCatalog
 

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 doc/source/cookbook/rad_velocity.py
--- a/doc/source/cookbook/rad_velocity.py
+++ b/doc/source/cookbook/rad_velocity.py
@@ -1,6 +1,3 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
 import yt
 import matplotlib.pyplot as plt
 

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 doc/source/cookbook/radial_profile_styles.py
--- a/doc/source/cookbook/radial_profile_styles.py
+++ b/doc/source/cookbook/radial_profile_styles.py
@@ -1,6 +1,3 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
 import yt
 import matplotlib.pyplot as plt
 

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 doc/source/cookbook/simple_off_axis_projection.py
--- a/doc/source/cookbook/simple_off_axis_projection.py
+++ b/doc/source/cookbook/simple_off_axis_projection.py
@@ -3,9 +3,7 @@
 # Load the dataset.
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
-# Create a 1 kpc radius sphere, centered on the max density.  Note that this
-# sphere is very small compared to the size of our final plot, and it has a
-# non-axially aligned L vector.
+# Create a 15 kpc radius sphere, centered on the center of the sim volume
 sp = ds.sphere("center", (15.0, "kpc"))
 
 # Get the angular momentum vector for the sphere.
@@ -13,6 +11,7 @@
 
 print "Angular momentum vector: {0}".format(L)
 
-# Create an OffAxisSlicePlot on the object with the L vector as its normal
+# Create an OffAxisProjectionPlot of density centered on the object with the L 
+# vector as its normal and a width of 25 kpc on a side
 p = yt.OffAxisProjectionPlot(ds, L, "density", sp.center, (25, "kpc"))
 p.save()

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 doc/source/cookbook/simple_off_axis_slice.py
--- /dev/null
+++ b/doc/source/cookbook/simple_off_axis_slice.py
@@ -0,0 +1,17 @@
+import yt
+
+# Load the dataset.
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+
+# Create a 15 kpc radius sphere, centered on the center of the sim volume
+sp = ds.sphere("center", (15.0, "kpc"))
+
+# Get the angular momentum vector for the sphere.
+L = sp.quantities.angular_momentum_vector()
+
+print "Angular momentum vector: {0}".format(L)
+
+# Create an OffAxisSlicePlot of density centered on the object with the L 
+# vector as its normal and a width of 25 kpc on a side
+p = yt.OffAxisSlicePlot(ds, L, "density", sp.center, (25, "kpc"))
+p.save()

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 doc/source/cookbook/simple_plots.rst
--- a/doc/source/cookbook/simple_plots.rst
+++ b/doc/source/cookbook/simple_plots.rst
@@ -49,8 +49,7 @@
 Simple Radial Profiles
 ~~~~~~~~~~~~~~~~~~~~~~
 
-This shows how to make a profile of a quantity with respect to the radius, in
-this case the radius in Mpc.
+This shows how to make a profile of a quantity with respect to the radius.
 
 .. yt_cookbook:: simple_radial_profile.py
 
@@ -87,17 +86,17 @@
 Off-Axis Slicing
 ~~~~~~~~~~~~~~~~
 
-A cutting plane allows you to slice at some angle that isn't aligned with the
-axes.
+One can create slices from any arbitrary angle, not just those aligned with
+the x,y,z axes.
 
-.. yt_cookbook:: aligned_cutting_plane.py
+.. yt_cookbook:: simple_off_axis_slice.py
 
 .. _cookbook-simple-off-axis-projection:
 
 Off-Axis Projection
 ~~~~~~~~~~~~~~~~~~~
 
-Like cutting planes, off-axis projections can be created from any arbitrary 
+Like off-axis slices, off-axis projections can be created from any arbitrary 
 viewing angle.
 
 .. yt_cookbook:: simple_off_axis_projection.py
@@ -119,6 +118,8 @@
 
 .. yt_cookbook:: show_hide_axes_colorbar.py
 
+.. _matplotlib-primitives:
+
 Accessing and Modifying Plots Directly
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 doc/source/cookbook/simple_slice_with_multiple_fields.py
--- a/doc/source/cookbook/simple_slice_with_multiple_fields.py
+++ b/doc/source/cookbook/simple_slice_with_multiple_fields.py
@@ -1,6 +1,3 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
 import yt
 
 # Load the dataset

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 doc/source/cookbook/time_series_profiles.py
--- a/doc/source/cookbook/time_series_profiles.py
+++ b/doc/source/cookbook/time_series_profiles.py
@@ -1,6 +1,3 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
 import yt
 
 # Create a time-series object.

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -15,6 +15,7 @@
    ~yt.visualization.plot_window.OffAxisSlicePlot
    ~yt.visualization.plot_window.ProjectionPlot
    ~yt.visualization.plot_window.OffAxisProjectionPlot
+   ~yt.visualization.plot_window.WindowPlotMPL
 
 ProfilePlot and PhasePlot
 ^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -24,6 +25,7 @@
 
    ~yt.visualization.profile_plotter.ProfilePlot
    ~yt.visualization.profile_plotter.PhasePlot
+   ~yt.visualization.profile_plotter.PhasePlotMPL
 
 Fixed Resolution Pixelization
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -550,8 +552,6 @@
    ~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum
    ~yt.analysis_modules.spectral_integrator.spectral_frequency_integrator.EmissivityIntegrator
    ~yt.analysis_modules.spectral_integrator.spectral_frequency_integrator.add_xray_emissivity_field
-   ~yt.analysis_modules.spectral_integrator.spectral_frequency_integrator.add_xray_luminosity_field
-   ~yt.analysis_modules.spectral_integrator.spectral_frequency_integrator.add_xray_photon_emissivity_field
 
 Absorption spectra fitting:
 

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -1,370 +1,445 @@
+Arrow callback
+~~~~~~~~~~~~~~
+
 .. function:: annotate_arrow(self, pos, code_size, plot_args=None):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.ArrowCallback`.)
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.ArrowCallback`.)
 
-   This adds an arrow pointing at *pos* with size
-   *code_size* in code units.  *plot_args* is a dict fed to
+   This adds an arrow pointing at ``pos`` with size
+   ``code_size`` in code units.  ``plot_args`` is a dict fed to
    matplotlib with arrow properties.
 
 .. python-script::
 
-   from yt.mods import *
-   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
-   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'), center='max')
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = yt.SlicePlot(ds, 'z', 'density', width=(10,'kpc'), center='c')
    slc.annotate_arrow((0.5, 0.5, 0.5), (1, 'kpc'))
    slc.save()
 
--------------
+Clump Finder Callback
+~~~~~~~~~~~~~~~~~~~~~
 
 .. function:: annotate_clumps(self, clumps, plot_args=None):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.ClumpContourCallback`.)
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.ClumpContourCallback`.)
 
-   Take a list of *clumps* and plot them as a set of
+   Take a list of ``clumps`` and plot them as a set of
    contours.
 
 .. python-script::
 
-   from yt.mods import *
-   from yt.analysis_modules.level_sets.api import *
+   import yt
+   import numpy as np
+   from yt.analysis_modules.level_sets.api import \
+       Clump, find_clumps, get_lowest_clumps
 
-   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
-   data_source = pf.disk([0.5, 0.5, 0.5], [0., 0., 1.],
-                           (8., 'kpc'), (1., 'kpc'))
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.],
+                         (8., 'kpc'), (1., 'kpc'))
 
    c_min = 10**np.floor(np.log10(data_source['density']).min()  )
    c_max = 10**np.floor(np.log10(data_source['density']).max()+1)
 
-   function = 'self.data[\'Density\'].size > 20'
+   function = 'self.data[\'density\'].size > 20'
    master_clump = Clump(data_source, None, 'density', function=function)
    find_clumps(master_clump, c_min, c_max, 2.0)
    leaf_clumps = get_lowest_clumps(master_clump)
 
-   prj = ProjectionPlot(pf, 2, 'density', center='c', width=(20,'kpc'))
+   prj = yt.ProjectionPlot(ds, 2, 'density', center='c', width=(20,'kpc'))
    prj.annotate_clumps(leaf_clumps)
    prj.save('clumps')
 
--------------
+Overplot Contours
+~~~~~~~~~~~~~~~~~
 
-.. function:: annotate_contour(self, field, ncont=5, factor=4, take_log=False, clim=None, plot_args=None):
+.. function:: annotate_contour(self, field, ncont=5, factor=4, take_log=False,
+                               clim=None, plot_args=None):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.ContourCallback`.)
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.ContourCallback`.)
 
-   Add contours in *field* to the plot.  *ncont* governs the
-   number of contours generated, *factor* governs the number
-   of points used in the interpolation, *take_log* governs
-   how it is contoured and *clim* gives the (upper, lower)
-   limits for contouring.
+   Add contours in ``field`` to the plot.  ``ncont`` governs the number of
+   contours generated, ``factor`` governs the number of points used in the
+   interpolation, ``take_log`` governs how it is contoured and ``clim`` gives
+   the (upper, lower) limits for contouring.
 
 .. python-script::
-   
-   from yt.mods import *
-   pf = load("Enzo_64/DD0043/data0043")
-   s = SlicePlot(pf, "x", ["density"], center="max")
+
+   import yt
+   ds = yt.load("Enzo_64/DD0043/data0043")
+   s = yt.SlicePlot(ds, "x", "density", center="max")
    s.annotate_contour("temperature")
    s.save()
 
--------------
+Overplot quivers
+~~~~~~~~~~~~~~~~
+
+Axis-Aligned data sources
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. function:: annotate_quiver(self, field_x, field_y, factor, scale=None,
+                              scale_units=None, normalize=False):
+
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.QuiverCallback`.)
+
+   Adds a 'quiver' plot to any plot, using the ``field_x`` and ``field_y`` from
+   the associated data, skipping every ``factor`` datapoints ``scale`` is the
+   data units per arrow length unit using ``scale_units`` (see
+   matplotlib.axes.Axes.quiver for more info)
+
+.. python-script::
+
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   p = yt.ProjectionPlot(ds, 'z', 'density', center=[0.5, 0.5, 0.5],
+                         weight_field='density', width=(20, 'kpc'))
+   p.annotate_quiver('velocity_x', 'velocity_y', 16)
+   p.save()
+
+Off-axis Data Sources
+^^^^^^^^^^^^^^^^^^^^^
 
 .. function:: annotate_cquiver(self, field_x, field_y, factor):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.CuttingQuiverCallback`.)
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.CuttingQuiverCallback`.)
 
-   Get a quiver plot on top of a cutting plane, using
-   *field_x* and *field_y*, skipping every *factor*
-   datapoint in the discretization.
+   Get a quiver plot on top of a cutting plane, using ``field_x`` and
+   ``field_y``, skipping every ``factor`` datapoint in the discretization.
 
 .. python-script::
 
-   from yt.mods import *
-   pf = load("Enzo_64/DD0043/data0043")
-   s = OffAxisSlicePlot(pf, [1,1,0], ["density"], center="c")
+   import yt
+   ds = yt.load("Enzo_64/DD0043/data0043")
+   s = yt.OffAxisSlicePlot(ds, [1,1,0], ["density"], center="c")
    s.annotate_cquiver('cutting_plane_velocity_x', 'cutting_plane_velocity_y', 10)
    s.zoom(1.5)
    s.save()
 
--------------
+Overplot grids
+~~~~~~~~~~~~~~
 
-.. function:: annotate_grids(self, alpha=1.0, min_pix=1, annotate=False, periodic=True):
+.. function:: annotate_grids(self, alpha=1.0, min_pix=1, annotate=False,
+                             periodic=True):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.GridBoundaryCallback`.)
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.GridBoundaryCallback`.)
 
-   Adds grid boundaries to a plot, optionally with
-   *alpha*-blending. Cuttoff for display is at *min_pix*
-   wide. *annotate* puts the grid id in the corner of the
-   grid.  (Not so great in projections...)
+   Adds grid boundaries to a plot, optionally with alpha-blending via the
+   ``alpha`` keyword. Cuttoff for display is at ``min_pix`` wide. ``annotate``
+   puts the grid id in the corner of the grid.  (Not so great in projections...)
 
 .. python-script::
 
-   from yt.mods import *
-   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
-   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'), center='max')
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = yt.SlicePlot(ds, 'z', 'density', width=(10,'kpc'), center='max')
    slc.annotate_grids()
    slc.save()
 
--------------
+Overplot Halo Annotations
+~~~~~~~~~~~~~~~~~~~~~~~~~
 
-.. function:: annotate_halos(self, halo_catalog, col='white', alpha =1, width= None):
+.. function:: annotate_halos(self, halo_catalog, col='white', alpha =1,
+                             width=None):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.HaloCatalogCallback`.)
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.HaloCatalogCallback`.)
 
-   Accepts a :class:`yt.HaloCatalog` *HaloCatalog* and plots 
-   a circle at the location of each halo with the radius of
-   the circle corresponding to the virial radius of the halo.
-   If *width* is set to None (default) all halos are plotted.
-   Otherwise, only halos that fall within a slab with width
-   *width* centered on the center of the plot data. The 
-   color and transparency of the circles can be controlled with
-   *col* and *alpha* respectively.
+   Accepts a :class:`yt.HaloCatalog` and plots a circle at the location of each
+   halo with the radius of the circle corresponding to the virial radius of the
+   halo.  If ``width`` is set to None (default) all halos are plotted.
+   Otherwise, only halos that fall within a slab with width ``width`` centered
+   on the center of the plot data. The color and transparency of the circles can
+   be controlled with ``col`` and ``alpha`` respectively.
 
 .. python-script::
-   
-   from yt.mods import *
+
+   import yt
    from yt.analysis_modules.halo_analysis.halo_catalog import HaloCatalog
 
-   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
-   halos_pf = load('rockstar_halos/halos_0.0.bin')
+   data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
+   halos_ds = yt.load('rockstar_halos/halos_0.0.bin')
 
-   hc = HaloCatalog(halos_pf=halos_pf)
+   hc = HaloCatalog(halos_pf=halos_ds)
    hc.create()
 
-   prj = ProjectionPlot(data_pf, 'z', 'density')
+   prj = yt.ProjectionPlot(data_ds, 'z', 'density')
    prj.annotate_halos(hc)
    prj.save()
 
--------------
+Overplot a Straight Line
+~~~~~~~~~~~~~~~~~~~~~~~~
 
-.. function:: annotate_image_line(self, p1, p2, data_coords=False, plot_args=None):
+.. function:: annotate_image_line(self, p1, p2, data_coords=False,
+                                  plot_args=None):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.ImageLineCallback`.)
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.ImageLineCallback`.)
 
-   Plot from *p1* to *p2* (normalized image plane coordinates) with
-   *plot_args* fed into the plot.
+   Plot from ``p1`` to ``p2`` (normalized image plane coordinates) with
+  ``plot_args`` fed into the plot.
 
 .. python-script::
 
-   from yt.mods import *
-   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
-   p = ProjectionPlot(pf, 'z', 'density', center='m', width=(10, 'kpc'))
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   p = yt.ProjectionPlot(ds, 'z', 'density', center='m', width=(10, 'kpc'))
    p.annotate_image_line((0.3, 0.4), (0.8, 0.9), plot_args={'linewidth':5})
    p.save()
 
--------------
+Overplot a line plot
+~~~~~~~~~~~~~~~~~~~~
 
 .. function:: annotate_line(self, x, y, plot_args=None):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.LinePlotCallback`.)
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.LinePlotCallback`.)
 
-   Over plot *x* and *y* (in code units) with *plot_args* fed into the plot.
+   Over plot numpy arrays or lists ``x`` and ``y`` (in code units) with
+   ``plot_args`` fed into the plot.
 
 .. python-script::
 
-   from yt.mods import *
-   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
-   p = ProjectionPlot(pf, 'z', 'density', center='m', width=(10, 'kpc'))
-   p.annotate_line([-6, -4, -2, 0, 2, 4, 6], [3.6, 1.6, 0.4, 0, 0.4, 1.6, 3.6], plot_args={'linewidth':5})
+   import yt
+   import numpy as np
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   p = yt.ProjectionPlot(ds, 'z', 'density', center='m', width=(20, 'kpc'))
+   x = np.array([-6, -4, -2, 0, 2, 4, 6])
+   y = x**2/10
+   p.annotate_line(x, y, plot_args={'linewidth':5})
    p.save()
 
--------------
+Overplot Magnetic Field Quivers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-.. function:: annotate_magnetic_field(self, factor=16, scale=None, scale_units=None, normalize=False):
+.. function:: annotate_magnetic_field(self, factor=16, scale=None,
+                                      scale_units=None, normalize=False):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.MagFieldCallback`.)
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.MagFieldCallback`.)
 
-   Adds a 'quiver' plot of magnetic field to the plot,
-   skipping all but every *factor* datapoint. *scale* is the
-   data units per arrow length unit using *scale_units* (see
-   matplotlib.axes.Axes.quiver for more info). if
-   *normalize* is True, the magnetic fields will be scaled
-   by their local (in-plane) length, allowing morphological
-   features to be more clearly seen for fields with
-   substantial variation in field strength.
+   Adds a 'quiver' plot of magnetic field to the plot, skipping all but every
+   ``factor`` datapoint. ``scale`` is the data units per arrow length unit using
+   ``scale_units`` (see matplotlib.axes.Axes.quiver for more info). if
+   ``normalize`` is ``True``, the magnetic fields will be scaled by their local
+   (in-plane) length, allowing morphological features to be more clearly seen
+   for fields with substantial variation in field strength.
 
 .. python-script::
 
-   from yt.mods import *
-   pf = load("MHDSloshing/virgo_low_res.0054.vtk",
-             parameters={"TimeUnits":3.1557e13, "LengthUnits":3.0856e24,
-                         "DensityUnits":6.770424595218825e-27})
-   p = ProjectionPlot(pf, 'z', 'density', center='c', width=(300, 'kpc'))
+   import yt
+   ds = yt.load("MHDSloshing/virgo_low_res.0054.vtk",
+                parameters={"time_unit":(1, 'Myr'), "length_unit":(1, 'Mpc'),
+                            "mass_unit":(1e17, 'Msun')})
+   p = yt.ProjectionPlot(ds, 'z', 'density', center='c', width=(300, 'kpc'))
    p.annotate_magnetic_field()
    p.save()
 
--------------
+Annotate a Point With a Marker
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 .. function:: annotate_marker(self, pos, marker='x', plot_args=None):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.MarkerAnnotateCallback`.)
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.MarkerAnnotateCallback`.)
 
-   Adds text *marker* at *pos* in code coordinates.
-   *plot_args* is a dict that will be forwarded to the plot
+   Adds ``marker`` at ``pos`` in code coordinates.
+   ``plot_args`` is a dict that will be forwarded to the plot
    command.
 
 .. python-script::
 
-   from yt.mods import *
-   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
-   s = SlicePlot(pf, 'z', 'density', center='m', width=(10, 'kpc'))
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   s = yt.SlicePlot(ds, 'z', 'density', center='c', width=(10, 'kpc'))
    s.annotate_marker([0.5, 0.5, 0.5], plot_args={'s':10000})
-   s.save()   
+   s.save()
 
--------------
+Overplotting Particle Positions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-.. function:: annotate_particles(self, width, p_size=1.0, col='k', marker='o', stride=1.0, ptype=None, stars_only=False, dm_only=False, minimum_mass=None):
+.. function:: annotate_particles(self, width, p_size=1.0, col='k', marker='o',
+                                 stride=1.0, ptype=None, stars_only=False,
+                                 dm_only=False, minimum_mass=None):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.ParticleCallback`.)
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.ParticleCallback`.)
 
-   Adds particle positions, based on a thick slab along
-   *axis* with a *width* along the line of sight.  *p_size*
-   controls the number of pixels per particle, and *col*
-   governs the color.  *ptype* will restrict plotted
-   particles to only those that are of a given type.
-   *minimum_mass* will require that the particles be of a
-   given mass, calculated via ParticleMassMsun, to be
-   plotted.
+   Adds particle positions, based on a thick slab along ``axis`` with a
+   ``width`` along the line of sight.  ``p_size`` controls the number of pixels
+   per particle, and ``col`` governs the color.  ``ptype`` will restrict plotted
+   particles to only those that are of a given type.  ``minimum_mass`` will
+   require that the particles be of a given mass minimum mass in solar units.
 
 .. python-script::
 
-   from yt.mods import *
-   pf = load("Enzo_64/DD0043/data0043")
-   p = ProjectionPlot(pf, "x", "density", center='m', width=(10, 'Mpc'))
+   import yt
+   ds = yt.load("Enzo_64/DD0043/data0043")
+   p = yt.ProjectionPlot(ds, "x", "density", center='m', width=(10, 'Mpc'))
    p.annotate_particles((10, 'Mpc'))
    p.save()
 
--------------
+Annotate a point with text
+~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 .. function:: annotate_point(self, pos, text, text_args=None):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.PointAnnotateCallback`.)
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.PointAnnotateCallback`.)
 
-   This adds *text* at position *pos*, where *pos* is in
-   code-space. *text_args* is a dict fed to the text
-   placement code.
+   This adds ``text`` at position ``pos``, where ``pos`` is in
+   code-space. ``text_args`` is a dict fed to the text placement code.
 
 .. python-script::
 
-   from yt.mods import *
-   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
-   p = ProjectionPlot(pf, 'z', 'density', center='m', width=(10, 'kpc'))
-   p.annotate_point([0.5, 0.496, 0.5], "What's going on here?", text_args={'size':'xx-large', 'color':'w'})
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   p = yt.ProjectionPlot(ds, 'z', 'density', center='m', width=(10, 'kpc'))
+   p.annotate_point([0.5, 0.496, 0.5], "What's going on here?",
+                    text_args={'size':'xx-large', 'color':'w'})
    p.save()
 
--------------
+Overplot a circle on a plot
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-.. function:: annotate_quiver(self, field_x, field_y, factor, scale=None, scale_units=None, normalize=False):
+.. function:: annotate_sphere(self, center, radius, circle_args=None, text=None,
+                              text_args=None):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.QuiverCallback`.)
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.SphereCallback`.)
 
-   Adds a 'quiver' plot to any plot, using the *field_x* and
-   *field_y* from the associated data, skipping every
-   *factor* datapoints *scale* is the data units per arrow
-   length unit using *scale_units*  (see
-   matplotlib.axes.Axes.quiver for more info)
+   A sphere centered at ``center`` in code units with radius ``radius`` in code
+   units will be created, with optional ``circle_args``, ``text``, and
+   ``text_args``.
 
 .. python-script::
 
-   from yt.mods import *
-   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
-   p = ProjectionPlot(pf, 'z', 'density', center=[0.5, 0.5, 0.5], 
-                      weight_field='density', width=(20, 'kpc'))
-   p.annotate_quiver('velocity_x', 'velocity_y', 16)
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   p = yt.ProjectionPlot(ds, 'z', 'density', center='c', width=(20, 'kpc'))
+   p.annotate_sphere([0.5, 0.5, 0.5], (2, 'kpc'), {'fill':True})
    p.save()
 
--------------
+Overplot streamlines
+~~~~~~~~~~~~~~~~~~~~
 
-.. function:: annotate_sphere(self, center, radius, circle_args=None, text=None, text_args=None):
+.. function:: annotate_streamlines(self, field_x, field_y, factor=6.0, nx=16,
+                                   ny=16, xstart=(0, 1), ystart=(0, 1),
+                                   nsample=256, start_at_xedge=False,
+                                   start_at_yedge=False, plot_args=None):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.SphereCallback`.)
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.StreamlineCallback`.)
 
-   A sphere centered at *center* in code units with radius
-   *radius* in code units will be created, with optional
-   *circle_args*, *text*, and *text_args*.
+   Add streamlines to any plot, using the ``field_x`` and ``field_y`` from the
+   associated data, using ``nx`` and ``ny`` starting points that are bounded by
+   ``xstart`` and ``ystart``.  To begin streamlines from the left edge of the
+   plot, set ``start_at_xedge`` to ``True``; for the bottom edge, use
+   ``start_at_yedge``.  A line with the qmean vector magnitude will cover
+   1.0/``factor`` of the image.
 
 .. python-script::
 
-   from yt.mods import *
-   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
-   p = ProjectionPlot(pf, 'z', 'density', center='c', width=(20, 'kpc'))
-   p.annotate_sphere([0.5, 0.5, 0.5], (2, 'kpc'), {'fill':True})
-   p.save()
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   s = yt.SlicePlot(ds, 'z', 'density', center='c', width=(20, 'kpc'))
+   s.annotate_streamlines('velocity_x', 'velocity_y')
+   s.save()
 
--------------
+Add text
+~~~~~~~~
 
-.. function:: annotate_streamlines(self, field_x, field_y, factor=6.0, nx=16, ny=16, xstart=(0, 1), ystart=(0, 1), nsample=256, start_at_xedge=False, start_at_yedge=False, plot_args=None):
+.. function:: annotate_text(self, pos, text, data_coords=False, text_args=None):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.StreamlineCallback`.)
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.TextLabelCallback`.)
 
-   Add streamlines to any plot, using the *field_x* and
-   *field_y* from the associated data, using *nx* and *ny*
-   starting points that are bounded by *xstart* and
-   *ystart*.  To begin streamlines from the left edge of the
-   plot, set *start_at_xedge* to True; for the bottom edge,
-   use *start_at_yedge*.  A line with the qmean vector
-   magnitude will cover 1.0/*factor* of the image.
+   Accepts a position in (0..1, 0..1) of the image, some text and optionally
+   some text arguments. If data_coords is True, position will be in code units
+   instead of image coordinates.
 
 .. python-script::
 
-   from yt.mods import *
-   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
-   s = SlicePlot(pf, 'z', 'density', center='c', width=(20, 'kpc'))
-   s.annotate_streamlines('velocity_x', 'velocity_y')
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   s = yt.SlicePlot(ds, 'z', 'density', center='m', width=(10, 'kpc'))
+   s.annotate_text((0.5, 0.5), 'Sample text',
+                   text_args={'size':'xx-large', 'color':'w'})
    s.save()
 
--------------
+Add a title to the plot
+~~~~~~~~~~~~~~~~~~~~~~~
 
-.. function:: annotate_text(self, pos, text, data_coords=False, text_args=None):
+.. function:: annotate_title(self, title='Plot'):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.TextLabelCallback`.)
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.TitleCallback`.)
 
-   Accepts a position in (0..1, 0..1) of the image, some
-   text and optionally some text arguments. If data_coords
-   is True, position will be in code units instead of image
-   coordinates.
+   Accepts a ``title`` and adds it to the plot.
 
 .. python-script::
 
-   from yt.mods import *
-   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
-   s = SlicePlot(pf, 'z', 'density', center='m', width=(10, 'kpc'))
-   s.annotate_text((0.5, 0.5), 'Sample text', text_args={'size':'xx-large', 'color':'w'})
-   s.save()
-
--------------
-
-.. function:: annotate_title(self, title='Plot'):
-
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.TitleCallback`.)
-
-   Accepts a *title* and adds it to the plot
-
-.. python-script::
-
-   from yt.mods import *
-   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
-   p = ProjectionPlot(pf, 'z', 'density', center='c', width=(20, 'kpc'))
-   p.annotate_title('Density plot')
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   p = yt.ProjectionPlot(ds, 'z', 'density', center='c', width=(20, 'kpc'))
+   p.annotate_title('Density Plot')
    p.save()
 
--------------
+Overplot quivers for the velocity field
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-.. function:: annotate_velocity(self, factor=16, scale=None, scale_units=None, normalize=False):
+.. function:: annotate_velocity(self, factor=16, scale=None, scale_units=None,
+                                normalize=False):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.VelocityCallback`.)
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.VelocityCallback`.)
 
-   Adds a 'quiver' plot of velocity to the plot, skipping
-   all but every *factor* datapoint. *scale* is the data
-   units per arrow length unit using *scale_units* (see
-   matplotlib.axes.Axes.quiver for more info). if
-   *normalize* is True, the velocity fields will be scaled
-   by their local (in-plane) length, allowing morphological
-   features to be more clearly seen for fields with
-   substantial variation in field strength (normalize is not
+   Adds a 'quiver' plot of velocity to the plot, skipping all but every
+   ``factor`` datapoint. ``scale`` is the data units per arrow length unit using
+   ``scale_units`` (see matplotlib.axes.Axes.quiver for more info). if
+   ``normalize`` is ``True``, the velocity fields will be scaled by their local
+   (in-plane) length, allowing morphological features to be more clearly seen
+   for fields with substantial variation in field strength (normalize is not
    implemented and thus ignored for Cutting Planes).
 
 .. python-script::
 
-   from yt.mods import *
-   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
-   p = SlicePlot(pf, 'z', 'density', center='m', width=(10, 'kpc'))
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   p = yt.SlicePlot(ds, 'z', 'density', center='m', width=(10, 'kpc'))
    p.annotate_velocity()
    p.save()
+
+Add a Timestamp Inset Box
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. function:: annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", 
+                                 **kwargs, normalized=False, bbox_dict=None)
+
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.TimestampCallback`.)
+
+   Adds the current time to the plot at point given by *x* and *y*.  If *units*
+   is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If
+   *units* is None, it will attempt to figure out the correct value by which to
+   scale.  The *format* keyword is a template string that will be evaluated and
+   displayed on the plot.  If *normalized* is true, *x* and *y* are interpreted
+   as normalized plot coordinates (0,0 is lower-left and 1,1 is upper-right)
+   otherwise *x* and *y* are assumed to be in plot coordinates. The *bbox_dict*
+   is an optional dict of arguments for the bbox that frames the timestamp, see
+   matplotlib's text annotation guide for more details. All other *kwargs* will
+   be passed to the text() method on the plot axes.  See matplotlib's text()
+   functions for more information.
+
+.. python-script::
+
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   p = yt.SlicePlot(ds, 'z', 'density', center='c', width=(20, 'kpc'))
+   p.annotate_timestamp()
+   p.save()

diff -r 6649d323511aebaf9a26535c96935fb14315a0f7 -r 042ef7fd843c29d73c353afb39a3bd1f30092033 doc/source/visualizing/manual_plotting.rst
--- a/doc/source/visualizing/manual_plotting.rst
+++ b/doc/source/visualizing/manual_plotting.rst
@@ -35,11 +35,12 @@
 .. python-script::
    
    import pylab as P
-   from yt.mods import *
-   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   import numpy as np
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
-   c = pf.h.find_max('density')[1]
-   proj = pf.proj('density', 0)
+   c = ds.find_max('density')[1]
+   proj = ds.proj('density', 0)
 
    width = (10, 'kpc') # we want a 1.5 mpc view
    res = [1000, 1000] # create an image with 1000x1000 pixels
@@ -64,22 +65,33 @@
 Line Plots
 ----------
 
-This is perhaps the simplest thing to do. ``yt`` provides a number of one dimensional objects, and these return a 1-D numpy array of their contents with direct dictionary access. As a simple example, take a :class:`~yt.data_objects.data_containers.AMROrthoRayBase` object, which can be created from a index by calling ``pf.ortho_ray(axis, center)``. 
+This is perhaps the simplest thing to do. ``yt`` provides a number of one
+dimensional objects, and these return a 1-D numpy array of their contents with
+direct dictionary access. As a simple example, take a
+:class:`~yt.data_objects.data_containers.AMROrthoRayBase` object, which can be
+created from a index by calling ``pf.ortho_ray(axis, center)``.
 
 .. python-script::
 
-   from yt.mods import *
+   import yt
+   import numpy as np
    import pylab as P
-   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
-   c = pf.h.find_max("density")[1]
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   c = ds.find_max("density")[1]
    ax = 0 # take a line cut along the x axis
-   ray = pf.ortho_ray(ax, (c[1], c[2])) # cutting through the y0,z0 such that we hit the max density
+
+   # cutting through the y0,z0 such that we hit the max density
+   ray = ds.ortho_ray(ax, (c[1], c[2]))
+
+   # Sort the ray values by 'x' so there are no discontinuities
+   # in the line plot
+   srt = np.argsort(ray['x'])
 
    P.subplot(211)
-   P.semilogy(np.array(ray['x']), np.array(ray['density']))
+   P.semilogy(np.array(ray['x'][srt]), np.array(ray['density'][srt]))
    P.ylabel('density')
    P.subplot(212)
-   P.semilogy(np.array(ray['x']), np.array(ray['temperature']))
+   P.semilogy(np.array(ray['x'][srt]), np.array(ray['temperature'][srt]))
    P.xlabel('x')
    P.ylabel('temperature')
 

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/8be075b216ca/
Changeset:   8be075b216ca
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-15 16:31:29
Summary:     Merging from ytanalysis/yt-3.0
Affected #:  46 files

diff -r 042ef7fd843c29d73c353afb39a3bd1f30092033 -r 8be075b216cabe5bc7b7b04b013e8d7e485c33b8 doc/source/analyzing/analysis_modules/light_cone_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_cone_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_cone_generator.rst
@@ -2,15 +2,15 @@
 
 Light Cone Generator
 ====================
-.. sectionauthor:: Britton Smith <brittonsmith at gmail.com>
 
-Light cones are projections made by stacking multiple datasets together to 
-continuously span a given redshift interval.  The width of individual 
-projection slices is adjusted such that each slice has the same angular size.  
-Each projection slice is randomly shifted and projected along a random axis to 
-ensure that the same structures are not sampled multiple times.  Since deeper 
-images sample earlier epochs of the simulation, light cones represent the 
-closest thing to synthetic imaging observations.
+Light cones are created by stacking multiple datasets together to 
+continuously span a given redshift interval.  To make a projection of a 
+field through a light cone, the width of individual slices is adjusted 
+such that each slice has the same angular size.  
+Each slice is randomly shifted and projected along a random axis to 
+ensure that the same structures are not sampled multiple times.  A 
+recipe for creating a simple light cone projection can be found in 
+the cookbook under :ref:`cookbook-light_cone`.
 
 .. image:: _images/LightCone_full_small.png
    :width: 500
@@ -23,46 +23,41 @@
 Configuring the Light Cone Generator
 ------------------------------------
 
-A recipe for creating a simple light cone projection can be found in the 
-cookbook.  The required arguments to instantiate a ``LightCone`` objects are 
+The required arguments to instantiate a ``LightCone`` object are 
 the path to the simulation parameter file, the simulation type, the nearest 
 redshift, and the furthest redshift of the light cone.
 
 .. code-block:: python
 
-  from yt.analysis_modules.api import LightCone
+  from yt.analysis_modules.cosmological_observation.api import \
+       LightCone
 
   lc = LightCone('enzo_tiny_cosmology/32Mpc_32.enzo',
                  'Enzo', 0., 0.1)
 
 The additional keyword arguments are:
 
- * **field_of_view_in_arcminutes** (*float*): The field of view of the image 
-   in units of arcminutes.  Default: 600.0.
-
- * **image_resolution_in_arcseconds** (*float*): The size of each image pixel 
-   in units of arcseconds.  Default: 60.0.
-
- * **use_minimum_datasets** (*bool*):  If True, the minimum number of datasets 
-   is used to connect the initial and final redshift.  If false, the light 
-   cone solution will contain as many entries as possible within the redshift 
-   interval.  Default: True.
+ * **use_minimum_datasets** (*bool*):  If True, the minimum number of 
+   datasets is used to connect the initial and final redshift.  If False, 
+   the light cone solution will contain as many entries as possible within 
+   the redshift interval.  Default: True.
 
  * **deltaz_min** (*float*): Specifies the minimum Delta-z between 
    consecutive datasets in the returned list.  Default: 0.0.
 
- * **minimum_coherent_box_fraction** (*float*): Used with use_minimum_datasets 
-   set to False, this parameter specifies the fraction of the total box size 
-   to be traversed before rerandomizing the projection axis and center.  This 
-   was invented to allow light cones with thin slices to sample coherent large 
-   scale structure, but in practice does not work so well.  Try setting this 
-   parameter to 1 and see what happens.  Default: 0.0.
+ * **minimum_coherent_box_fraction** (*float*): Used with 
+   **use_minimum_datasets** set to False, this parameter specifies the 
+   fraction of the total box size to be traversed before rerandomizing the 
+   projection axis and center.  This was invented to allow light cones with 
+   thin slices to sample coherent large cale structure, but in practice does 
+   not work so well.  Try setting this parameter to 1 and see what happens.  
+   Default: 0.0.
 
  * **time_data** (*bool*): Whether or not to include time outputs when 
    gathering datasets for time series.  Default: True.
 
- * **redshift_data** (*bool*): Whether or not to include redshift outputs when 
-   gathering datasets for time series.  Default: True.
+ * **redshift_data** (*bool*): Whether or not to include redshift outputs 
+   when gathering datasets for time series.  Default: True.
 
  * **set_parameters** (*dict*): Dictionary of parameters to attach to 
    pf.parameters.  Default: None.
@@ -76,10 +71,10 @@
 Creating Light Cone Solutions
 -----------------------------
 
-A light cone solution consists of a list of datasets and the width, depth, 
-center, and axis of the projection to be made for that slice.  The 
-:meth:`LightCone.calculate_light_cone_solution` function is used to 
-calculate the random shifting and projection axis:
+A light cone solution consists of a list of datasets spanning a redshift 
+interval with a random orientation for each dataset.  A new solution 
+is calcuated with the :meth:`LightCone.calculate_light_cone_solution` 
+function:
 
 .. code-block:: python
 
@@ -87,70 +82,39 @@
 
 The keyword argument are:
 
- * **seed** (*int*): the seed for the random number generator.  Any light cone 
-   solution can be reproduced by giving the same random seed.  Default: None 
-   (each solution will be distinct).
+ * **seed** (*int*): the seed for the random number generator.  Any light 
+   cone solution can be reproduced by giving the same random seed.  
+   Default: None.
 
  * **filename** (*str*): if given, a text file detailing the solution will be 
    written out.  Default: None.
 
-If a new solution for the same LightCone object is desired, the 
-:meth:`rerandomize_light_cone_solution` method should be called in place of 
-:meth:`calculate_light_cone_solution`:
-
-.. code-block:: python
-
-  new_seed = 987654321
-  lc.rerandomize_light_cone_solution(new_seed, Recycle=True, 
-                                     filename='new_lightcone.dat')
-
-Additional keyword arguments are:
-
- * **recycle** (*bool*): if True, the new solution will have the same shift in 
-   the line of sight as the original solution.  Since the projections of each 
-   slice are serialized and stored for the entire width of the box (even if 
-   the width used is left than the total box), the projection data can be 
-   deserialized instead of being remade from scratch.  This can greatly speed 
-   up the creation of a large number of light cone projections.  Default: True.
-
- * **filename** (*str*): if given, a text file detailing the solution will be 
-   written out.  Default: None.
-
-If :meth:`rerandomize_light_cone_solution` is used, the LightCone object will 
-keep a copy of the original solution that can be returned to at any time by 
-calling :meth:`restore_master_solution`:
-
-.. code-block:: python
-
-  lc.restore_master_solution()
-
-.. note:: All light cone solutions made with the above method will still use 
-   the same list of datasets.  Only the shifting and projection axis will be 
-   different.
-
 Making a Light Cone Projection
 ------------------------------
 
-With the light cone solution set, projections can be made of any available 
-field:
+With the light cone solution in place, projections with a given field of 
+view and resolution can be made of any available field:
 
 .. code-block:: python
 
   field = 'density'
-  lc.project_light_cone(field , weight_field=None, 
+  field_of_view = (600.0, "arcmin")
+  resolution = (60.0, "arcsec")
+  lc.project_light_cone(field_of_vew, resolution,
+                        field , weight_field=None, 
                         save_stack=True, 
                         save_slice_images=True)
 
+The field of view and resolution can be specified either as a tuple of 
+value and unit string or as a unitful ``YTQuantity``.  
 Additional keyword arguments:
 
- * **weight_field** (*str*): the weight field of the projection.  This has the 
-   same meaning as in standard projections.  Default: None.
+ * **weight_field** (*str*): the weight field of the projection.  This has 
+   the same meaning as in standard projections.  Default: None.
 
- * **apply_halo_mask** (*bool*): if True, a boolean mask is apply to the light 
-   cone projection.  See below for a description of halo masks.  Default: False.
-
- * **node** (*str*): a prefix to be prepended to the node name under which the 
-   projection data is serialized.  Default: None.
+ * **photon_field** (*bool*): if True, the projection data for each slice is 
+   decremented by 4 pi R :superscript:`2` , where R is the luminosity 
+   distance between the observer and the slice redshift.  Default: False.
 
  * **save_stack** (*bool*): if True, the unflatted light cone data including 
    each individual slice is written to an hdf5 file.  Default: True.
@@ -161,13 +125,7 @@
  * **save_slice_images** (*bool*): save images for each individual projection 
    slice.  Default: False.
 
- * **flatten_stack** (*bool*): if True, the light cone stack is continually 
-   flattened each time a slice is added in order to save memory.  This is 
-   generally not necessary.  Default: False.
-
- * **photon_field** (*bool*): if True, the projection data for each slice is 
-   decremented by 4 pi R :superscript:`2` , where R is the luminosity 
-   distance between the observer and the slice redshift.  Default: False.
+ * **cmap_name** (*string*): color map for images.  Default: "algae".
 
  * **njobs** (*int*): The number of parallel jobs over which the light cone 
    projection will be split.  Choose -1 for one processor per individual
@@ -177,34 +135,4 @@
  * **dynamic** (*bool*): If True, use dynamic load balancing to create the 
    projections.  Default: False.
 
-Sampling Unique Light Cone Volumes
-----------------------------------
-
-When making a large number of light cones, particularly for statistical 
-analysis, it is important to have a handle on the amount of sampled volume in 
-common from one projection to another.  Any statistics may untrustworthy if a 
-set of light cones have too much volume in common, even if they may all be 
-entirely different in appearance.  LightCone objects have the ability to 
-calculate the volume in common between two solutions with the same dataset 
-ist.  The :meth:`find_unique_solutions` and 
-:meth:`project_unique_light_cones` functions can be used to create a set of 
-light cone solutions that have some maximum volume in common and create light 
-cone projections for those solutions.  If specified, the code will attempt to 
-use recycled solutions that can use the same serialized projection objects 
-that have already been created.  This can greatly increase the speed of making 
-multiple light cone projections.  See the cookbook for an example of doing this.
-
-Making Light Cones with a Halo Mask
------------------------------------
-
-The situation may arise where it is necessary or desirable to know the 
-location of halos within the light cone volume, and specifically their 
-location in the final image.  This can be useful for developing algorithms to 
-find galaxies or clusters in image data.  The light cone generator does this 
-by running the HaloProfiler (see :ref:`halo_profiling`) on each of the 
-datasets used in the light cone and shifting them accordingly with the light 
-cone solution.  The ability also exists to create a boolean mask with the 
-dimensions of the final light cone image that can be used to mask out the 
-halos in the image.  It is left as an exercise to the reader to find a use for 
-this functionality.  This process is somewhat complicated, but not terribly.  
-See the recipe in the cookbook for an example of this functionality.
+.. note:: As of :code:`yt-3.0`, the halo mask and unique light cone functionality no longer exist.  These are still available in :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`, help is needed to port them over.  Contact the yt-users mailing list if you are interested in doing this.
\ No newline at end of file

diff -r 042ef7fd843c29d73c353afb39a3bd1f30092033 -r 8be075b216cabe5bc7b7b04b013e8d7e485c33b8 doc/source/analyzing/analysis_modules/light_ray_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
@@ -1,20 +1,21 @@
 .. _light-ray-generator:
 
 Light Ray Generator
-====================
-.. sectionauthor:: Britton Smith <brittonsmith at gmail.com>
+===================
 
 Light rays are similar to light cones (:ref:`light-cone-generator`) in how  
 they stack multiple datasets together to span a redshift interval.  Unlike 
-light cones, which which stack randomly oriented projections from each 
+light cones, which stack randomly oriented projections from each 
 dataset to create synthetic images, light rays use thin pencil beams to 
-simulate QSO sight lines.
+simulate QSO sight lines.  A sample script can be found in the cookbook 
+under :ref:`cookbook-light_ray`.
 
 .. image:: _images/lightray.png
 
-A ray segment records the information of all grid cells intersected by the ray 
-as well as the path length, dl, of the ray through the cell.  Column densities 
-can be calculated by multiplying physical densities by the path length.
+A ray segment records the information of all grid cells intersected by the 
+ray as well as the path length, dl, of the ray through the cell.  Column 
+densities can be calculated by multiplying physical densities by the path 
+length.
 
 Configuring the Light Ray Generator
 -----------------------------------
@@ -36,22 +37,22 @@
    ray solution will contain as many entries as possible within the redshift
    interval.  Default: True.
 
- * **deltaz_min** (*float*):  Specifies the minimum Delta-z between consecutive
-   datasets in the returned list.  Default: 0.0.
+ * **deltaz_min** (*float*):  Specifies the minimum Delta-z between 
+   consecutive datasets in the returned list.  Default: 0.0.
 
- * **minimum_coherent_box_fraction** (*float*): Used with use_minimum_datasets 
-   set to False, this parameter specifies the fraction of the total box size 
-   to be traversed before rerandomizing the projection axis and center.  This
-   was invented to allow light rays with thin slices to sample coherent large 
-   scale structure, but in practice does not work so well.  Try setting this 
-   parameter to 1 and see what happens.  Default: 0.0.
+ * **minimum_coherent_box_fraction** (*float*): Used with 
+   **use_minimum_datasets** set to False, this parameter specifies the 
+   fraction of the total box size to be traversed before rerandomizing the 
+   projection axis and center.  This was invented to allow light rays with 
+   thin slices to sample coherent large scale structure, but in practice 
+   does not work so well.  Try setting this parameter to 1 and see what 
+   happens.  Default: 0.0.
 
- * **time_data** (*bool*): Whether or not to include time outputs when gathering
-   datasets for time series.  Default: True.
-
- * **redshift_data** (*bool*): Whether or not to include redshift outputs when 
+ * **time_data** (*bool*): Whether or not to include time outputs when 
    gathering datasets for time series.  Default: True.
 
+ * **redshift_data** (*bool*): Whether or not to include redshift outputs 
+   when gathering datasets for time series.  Default: True.
 
 Making Light Ray Data
 ---------------------
@@ -74,7 +75,21 @@
 
  * **seed** (*int*): Seed for the random number generator.  Default: None.
 
- * **fields** (*list*): A list of fields for which to get data.  Default: None.
+ * **start_position** (*list* of floats): Used only if creating a light ray 
+   from a single dataset.  The coordinates of the starting position of the 
+   ray.  Default: None.
+
+ * **end_position** (*list* of floats): Used only if creating a light ray 
+   from a single dataset.  The coordinates of the ending position of the ray.
+   Default: None.
+
+ * **trajectory** (*list* of floats): Used only if creating a light ray 
+   from a single dataset.  The (r, theta, phi) direction of the light ray.  
+   Use either **end_position** or **trajectory**, not both.  
+   Default: None.
+
+ * **fields** (*list*): A list of fields for which to get data.  
+   Default: None.
 
  * **solution_filename** (*string*): Path to a text file where the 
    trajectories of each subray is written out.  Default: None.
@@ -83,51 +98,17 @@
    Default: None.
 
  * **get_los_velocity** (*bool*): If True, the line of sight velocity is 
-   calculated for each point in the ray.  Default: False.
+   calculated for each point in the ray.  Default: True.
 
- * **get_nearest_halo** (*bool*): If True, the HaloProfiler will be used to 
-   calculate the distance and mass of the nearest halo for each point in the
-   ray.  This option requires additional information to be included.  See 
-   the cookbook for an example.  Default: False.
-
- * **nearest_halo_fields** (*list*): A list of fields to be calculated for the 
-   halos nearest to every pixel in the ray.  Default: None.
-
- * **halo_list_file** (*str*): Filename containing a list of halo properties to be used 
-   for getting the nearest halos to absorbers.  Default: None.
-
- * **halo_profiler_parameters** (*dict*): A dictionary of parameters to be 
-   passed to the HaloProfiler to create the appropriate data used to get 
-   properties for the nearest halos.  Default: None.
-
- * **njobs** (*int*): The number of parallel jobs over which the slices for the
-   halo mask will be split.  Choose -1 for one processor per individual slice 
-   and 1 to have all processors work together on each projection.  Default: 1
+ * **njobs** (*int*): The number of parallel jobs over which the slices for 
+   the halo mask will be split.  Choose -1 for one processor per individual 
+   slice and 1 to have all processors work together on each projection.  
+   Default: 1
 
  * **dynamic** (*bool*): If True, use dynamic load balancing to create the 
    projections.  Default: False.
 
-Getting The Nearest Galaxies
-----------------------------
-
-The light ray tool will use the HaloProfiler to calculate the distance and 
-mass of the nearest halo to that pixel.  In order to do this, a dictionary 
-called halo_profiler_parameters is used to pass instructions to the 
-HaloProfiler.  This dictionary has three additional keywords:
-
- * **halo_profiler_kwargs** (*dict*): A dictionary of standard HaloProfiler 
-   keyword arguments and values to be given to the HaloProfiler.
-
- * **halo_profiler_actions** (*list*): A list of actions to be performed by 
-   the HaloProfiler.  Each item in the list should be a dictionary with the 
-   following entries: "function", "args", and "kwargs", for the function to 
-   be performed, the arguments supplied to that function, and the keyword 
-   arguments.
-
- * **halo_list** (*string*): 'all' to use the full halo list, or 'filtered' 
-   to use the filtered halo list created after calling make_profiles.
-
-See the recipe in the cookbook for am example.
+.. note:: As of :code:`yt-3.0`, the functionality for recording properties of the nearest halo to each element of the ray no longer exists.  This is still available in :code:`yt-2.x`.  If you would like to use this feature in :code:`yt-3.x`, help is needed to port it over.  Contact the yt-users mailing list if you are interested in doing this.
 
 What Can I do with this?
 ------------------------

diff -r 042ef7fd843c29d73c353afb39a3bd1f30092033 -r 8be075b216cabe5bc7b7b04b013e8d7e485c33b8 doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -29,6 +29,8 @@
 
 .. yt_cookbook:: halo_merger_tree.py
 
+.. _cookbook-light_cone:
+
 Light Cone Projection
 ~~~~~~~~~~~~~~~~~~~~~
 This script creates a light cone projection, a synthetic observation 
@@ -37,27 +39,15 @@
 
 .. yt_cookbook:: light_cone_projection.py
 
-Light Cone with Halo Mask
-~~~~~~~~~~~~~~~~~~~~~~~~~
-This script combines the light cone generator with the halo profiler to 
-make a light cone projection with all of the halos cut out of the image.
+.. _cookbook-light_ray:
 
-.. yt_cookbook:: light_cone_with_halo_mask.py 
+Light Ray
+~~~~~~~~~
+This script demonstrates how to make a synthetic quasar sight line that 
+extends over multiple datasets and can be used to generate a synthetic 
+absorption spectrum.
 
-Making Unique Light Cones
-~~~~~~~~~~~~~~~~~~~~~~~~~
-This script demonstrates how to make a series of light cone projections
-that only have a maximum amount of volume in common.
-
-.. yt_cookbook:: unique_light_cone_projections.py 
-
-Making Light Rays
-~~~~~~~~~~~~~~~~~
-This script demonstrates how to make a synthetic quasar sight line and 
-uses the halo profiler to record information about halos close to the 
-line of sight.
-
-.. yt_cookbook:: make_light_ray.py 
+.. yt_cookbook:: light_ray.py 
 
 Creating and Fitting Absorption Spectra
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

diff -r 042ef7fd843c29d73c353afb39a3bd1f30092033 -r 8be075b216cabe5bc7b7b04b013e8d7e485c33b8 doc/source/cookbook/light_cone_projection.py
--- a/doc/source/cookbook/light_cone_projection.py
+++ b/doc/source/cookbook/light_cone_projection.py
@@ -1,12 +1,8 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+import yt
+from yt.analysis_modules.cosmological_observation.api import \
+     LightCone
 
-import yt
-from yt.analysis_modules.cosmological_observation.light_cone.light_cone import LightCone
-
-# Create a LightCone object extending from z = 0 to z = 0.1
-# with a 600 arcminute field of view and a resolution of
-# 60 arcseconds.
+# Create a LightCone object extending from z = 0 to z = 0.1.
 
 # We have already set up the redshift dumps to be
 # used for this, so we will not use any of the time
@@ -14,20 +10,21 @@
 lc = LightCone('enzo_tiny_cosmology/32Mpc_32.enzo',
                'Enzo', 0., 0.1,
                observer_redshift=0.0,
-               field_of_view_in_arcminutes=600.0,
-               image_resolution_in_arcseconds=60.0,
                time_data=False)
 
 # Calculate a randomization of the solution.
-lc.calculate_light_cone_solution(seed=123456789)
+lc.calculate_light_cone_solution(seed=123456789, filename="LC/solution.txt")
 
 # Choose the field to be projected.
-field = 'SZY'
+field = 'szy'
 
+# Use the LightCone object to make a projection with a 600 arcminute 
+# field of view and a resolution of 60 arcseconds.
 # Set njobs to -1 to have one core work on each projection
-# in parallel.  Set save_slice_images to True to see an
-# image for each individual slice.
-lc.project_light_cone(field, save_stack=False,
+# in parallel.
+lc.project_light_cone((600.0, "arcmin"), (60.0, "arcsec"), field,
+                      weight_field=None,
+                      save_stack=True,
                       save_final_image=True,
-                      save_slice_images=False,
+                      save_slice_images=True,
                       njobs=-1)

diff -r 042ef7fd843c29d73c353afb39a3bd1f30092033 -r 8be075b216cabe5bc7b7b04b013e8d7e485c33b8 doc/source/cookbook/light_cone_with_halo_mask.py
--- a/doc/source/cookbook/light_cone_with_halo_mask.py
+++ /dev/null
@@ -1,78 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import yt
-
-from yt.analysis_modules.cosmological_observation.light_cone.light_cone import LightCone
-from yt.analysis_modules.halo_profiler.api import HaloProfiler
-
-# Instantiate a light cone object as usual.
-lc = LightCone('enzo_tiny_cosmology/32Mpc_32.enzo',
-               'Enzo', 0, 0.1,
-               observer_redshift=0.0,
-               field_of_view_in_arcminutes=600.0,
-               image_resolution_in_arcseconds=60.0,
-               time_data=False,
-               output_dir='LC_HM', output_prefix='LightCone')
-
-# Calculate the light cone solution.
-lc.calculate_light_cone_solution(seed=123456789,
-                                 filename='LC_HM/lightcone.dat')
-
-
-# Configure the HaloProfiler.
-# These are keyword arguments given when creating a
-# HaloProfiler object.
-halo_profiler_kwargs = {'halo_list_file': 'HopAnalysis.out',
-                        'output_dir': 'halo_analysis'}
-
-# Create a list of actions for the HaloProfiler to take.
-halo_profiler_actions = []
-
-# Each item in the list is a dictionary containing three things:
-# 1. 'function' - the function to be called.
-# 2. 'args' - a list of arguments given with the function.
-# 3. 'kwargs' - a dictionary of keyword arguments.
-
-# Add a virial filter.
-halo_profiler_actions.append({'function': HaloProfiler.add_halo_filter,
-                              'args': [VirialFilter],
-                              'kwargs': {'must_be_virialized':False,
-                                         'overdensity_field':'ActualOverdensity',
-                                         'virial_overdensity':100,
-                                         'virial_filters':[['TotalMassMsun','>','1e5']],
-                                         'virial_quantities':['TotalMassMsun','RadiusMpc']}})
-
-# Add a call to make the profiles.
-halo_profiler_actions.append({'function': HaloProfiler.make_profiles,
-                              'kwargs': {'filename': "VirializedHalos.out"}})
-
-# Specify the desired halo list is the filtered list.
-# If 'all' is given instead, the full list will be used.
-halo_list = 'filtered'
-
-# Put them all into one dictionary.
-halo_profiler_parameters=dict(halo_profiler_kwargs=halo_profiler_kwargs,
-                              halo_profiler_actions=halo_profiler_actions,
-                              halo_list=halo_list)
-
-# Get the halo list for the active solution of this light cone using
-# the HaloProfiler settings set up above.
-# Write the boolean map to an hdf5 file called 'halo_mask.h5'.
-# Write a text file detailing the location, redshift, radius, and mass
-# of each halo in light cone projection.
-lc.get_halo_mask(mask_file='LC_HM/halo_mask.h5',
-                 map_file='LC_HM/halo_map.out',
-                 cube_file='LC_HM/halo_cube.h5',
-                 virial_overdensity=100,
-                 halo_profiler_parameters=halo_profiler_parameters,
-                 njobs=1, dynamic=False)
-
-# Choose the field to be projected.
-field = 'SZY'
-
-# Make the light cone projection and apply the halo mask.
-pc = lc.project_light_cone(field, save_stack=False,
-                           save_final_image=True,
-                           save_slice_images=False,
-                           apply_halo_mask=True)

diff -r 042ef7fd843c29d73c353afb39a3bd1f30092033 -r 8be075b216cabe5bc7b7b04b013e8d7e485c33b8 doc/source/cookbook/light_ray.py
--- /dev/null
+++ b/doc/source/cookbook/light_ray.py
@@ -0,0 +1,25 @@
+import os
+import sys
+import yt
+from yt.analysis_modules.cosmological_observation.api import \
+     LightRay
+
+# Create a directory for the light rays
+if not os.path.isdir("LR"): 
+    os.mkdir('LR')
+     
+# Create a LightRay object extending from z = 0 to z = 0.1
+# and use only the redshift dumps.
+lr = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo",
+              'Enzo', 0.0, 0.1,
+              use_minimum_datasets=True,
+              time_data=False)
+
+# Make a light ray, and set njobs to -1 to use one core
+# per dataset.
+lr.make_light_ray(seed=123456789,
+                  solution_filename='LR/lightraysolution.txt',
+                  data_filename='LR/lightray.h5',
+                  fields=['temperature', 'density'],
+                  get_los_velocity=True,
+                  njobs=-1)

diff -r 042ef7fd843c29d73c353afb39a3bd1f30092033 -r 8be075b216cabe5bc7b7b04b013e8d7e485c33b8 doc/source/cookbook/make_light_ray.py
--- a/doc/source/cookbook/make_light_ray.py
+++ /dev/null
@@ -1,69 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import os
-import sys
-import yt
-from yt.analysis_modules.halo_profiler.api import HaloProfiler
-from yt.analysis_modules.cosmological_observation.light_ray.light_ray import \
-     LightRay
-
-# Create a directory for the light rays
-if not os.path.isdir("LR"): 
-    os.mkdir('LR')
-     
-# Create a LightRay object extending from z = 0 to z = 0.1
-# and use only the redshift dumps.
-lr = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo",
-              'Enzo', 0.0, 0.1,
-              use_minimum_datasets=True,
-              time_data=False)
-
-# Configure the HaloProfiler.
-# These are keyword arguments given when creating a
-# HaloProfiler object.
-halo_profiler_kwargs = {'halo_list_file': 'HopAnalysis.out',
-                        'output_dir' : 'halo_analysis'}
-
-# Create a list of actions for the HaloProfiler to take.
-halo_profiler_actions = []
-
-# Each item in the list is a dictionary containing three things:
-# 1. 'function' - the function to be called.
-# 2. 'args' - a list of arguments given with the function.
-# 3. 'kwargs' - a dictionary of keyword arguments.
-
-# Add a virial filter.
-halo_profiler_actions.append({'function': HaloProfiler.add_halo_filter,
-                              'args': [VirialFilter],
-                              'kwargs': {'must_be_virialized':False,
-                                         'overdensity_field':'ActualOverdensity',
-                                         'virial_overdensity':100,
-                                         'virial_filters':[['TotalMassMsun','>','1e5']],
-                                         'virial_quantities':['TotalMassMsun','RadiusMpc']}})
-
-# Add a call to make the profiles.
-halo_profiler_actions.append({'function': HaloProfiler.make_profiles,
-                              'kwargs': {'filename': "VirializedHalos.out"}})
-
-# Specify the desired halo list is the filtered list.
-# If 'all' is given instead, the full list will be used.
-halo_list = 'filtered'
-
-# Put them all into one dictionary.
-halo_profiler_parameters=dict(halo_profiler_kwargs=halo_profiler_kwargs,
-                              halo_profiler_actions=halo_profiler_actions,
-                              halo_list=halo_list)
-
-# Make a light ray, and set njobs to -1 to use one core
-# per dataset.
-lr.make_light_ray(seed=123456789,
-                  solution_filename='LR/lightraysolution.txt',
-                  data_filename='LR/lightray.h5',
-                  fields=['temperature', 'density'],
-                  get_nearest_halo=True,
-                  nearest_halo_fields=['TotalMassMsun_100',
-                                       'RadiusMpc_100'],
-                  halo_profiler_parameters=halo_profiler_parameters,
-                  get_los_velocity=True,
-                  njobs=-1)

diff -r 042ef7fd843c29d73c353afb39a3bd1f30092033 -r 8be075b216cabe5bc7b7b04b013e8d7e485c33b8 doc/source/cookbook/unique_light_cone_projections.py
--- a/doc/source/cookbook/unique_light_cone_projections.py
+++ /dev/null
@@ -1,34 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import yt
-from yt.analysis_modules.cosmological_observation.light_cone.light_cone import LightCone
-
-# Instantiate a light cone.
-lc = LightCone("enzo_tiny_cosmology/32Mpc_32.enzo", 'Enzo', 0, 0.1,
-               observer_redshift=0.0,
-               field_of_view_in_arcminutes=120.0,
-               image_resolution_in_arcseconds=60.0,
-               use_minimum_datasets=True,
-               time_data=False,
-               output_dir='LC_U', output_prefix='LightCone')
-
-# Try to find 10 solutions that have at most 10% volume in
-# common and give up after 50 consecutive failed attempts.
-# The recycle=True setting tells the code to first attempt
-# to use solutions with the same projection axes as other
-# solutions.  This will save time when making the projection.
-yt.find_unique_solutions(lc, max_overlap=0.10, failures=50,
-                         seed=123456789, recycle=True,
-                         solutions=10, filename='LC_U/unique.dat')
-
-# Choose the field to be projected.
-field = 'SZY'
-
-# Make light cone projections with each of the random seeds
-# found above.  All output files will be written with unique
-# names based on the random seed numbers.
-yt.project_unique_light_cones(lc, 'LC_U/unique.dat', field,
-                              save_stack=False,
-                              save_final_image=True,
-                              save_slice_images=False)

diff -r 042ef7fd843c29d73c353afb39a3bd1f30092033 -r 8be075b216cabe5bc7b7b04b013e8d7e485c33b8 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -224,6 +224,8 @@
 :class:`~yt.visualization.plot_window.ProjectionPlot` for the full
 class description.
 
+.. _off-axis-projections:
+
 Off Axis Projection Plots
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 
@@ -772,8 +774,8 @@
    ds = yt.load("sizmbhloz-clref04SNth-rs9_a0.9011/sizmbhloz-clref04SNth-rs9_a0.9011.art")
    center = ds.arr([64.0, 64.0, 64.0], 'code_length')
    rvir = ds.quan(1e-1, "Mpccm/h")
+   sph = ds.sphere(center, rvir)
 
-   sph = ds.sphere(center, rvir)
    plot = yt.PhasePlot(sph, "density", "temperature", "cell_mass",
                        weight_field=None)
    plot.set_unit('density', 'Msun/pc**3')
@@ -782,6 +784,29 @@
    plot.set_ylim(1,1e7)
    plot.save()
 
+It is also possible to construct a custom 2D profile object and then use the
+``from_profile`` method to create a ``PhasePlot`` using the profile object.
+This will sometimes be faster, especially if you need custom x and y axes
+limits.  The following example illustrates this workflow:
+
+.. python-script::
+
+   import yt
+   ds = yt.load("sizmbhloz-clref04SNth-rs9_a0.9011/sizmbhloz-clref04SNth-rs9_a0.9011.art")
+   center = ds.arr([64.0, 64.0, 64.0], 'code_length')
+   rvir = ds.quan(1e-1, "Mpccm/h")
+   sph = ds.sphere(center, rvir)
+   units = dict(density='Msun/pc**3', cell_mass='Msun')
+   extrema = dict(density=(1e-5, 1e1), temperature=(1, 1e7))
+
+   profile = yt.create_profile(sph, ['density', 'temperature'],
+                               n_bins=[128, 128], fields=['cell_mass'],
+                               weight_field=None, units=units, extrema=extrema)
+
+   plot = yt.PhasePlot.from_profile(profile)
+
+   plot.save()
+
 Probability Distribution Functions and Accumulation
 ---------------------------------------------------
 

diff -r 042ef7fd843c29d73c353afb39a3bd1f30092033 -r 8be075b216cabe5bc7b7b04b013e8d7e485c33b8 yt/analysis_modules/cosmological_observation/api.py
--- a/yt/analysis_modules/cosmological_observation/api.py
+++ b/yt/analysis_modules/cosmological_observation/api.py
@@ -17,9 +17,7 @@
     CosmologySplice
 
 from .light_cone.api import \
-    LightCone, \
-    find_unique_solutions, \
-    project_unique_light_cones
+    LightCone
 
 from .light_ray.api import \
     LightRay

diff -r 042ef7fd843c29d73c353afb39a3bd1f30092033 -r 8be075b216cabe5bc7b7b04b013e8d7e485c33b8 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -78,8 +78,9 @@
 
         Examples
         --------
-        >>> cosmo = es.create_cosmology_splice(1.0, 0.0, minimal=True,
-                                               deltaz_min=0.0)
+
+        >>> co = CosmologySplice("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo")
+        >>> cosmo = co.create_cosmology_splice(1.0, 0.0)
 
         """
 
@@ -133,12 +134,12 @@
 
             # fill redshift space with datasets
             while ((z > near_redshift) and
-                   (np.fabs(z - near_redshift) > z_Tolerance)):
+                   (np.abs(z - near_redshift) > z_Tolerance)):
 
                 # For first data dump, choose closest to desired redshift.
                 if (len(cosmology_splice) == 0):
                     # Sort data outputs by proximity to current redsfhit.
-                    self.splice_outputs.sort(key=lambda obj:np.fabs(z - \
+                    self.splice_outputs.sort(key=lambda obj:np.abs(z - \
                         obj['redshift']))
                     cosmology_splice.append(self.splice_outputs[0])
 
@@ -153,20 +154,20 @@
 
                     if current_slice is cosmology_splice[-1]:
                         near_redshift = cosmology_splice[-1]['redshift'] - \
-                          cosmology_splice[-1]['deltazMax']
+                          cosmology_splice[-1]['dz_max']
                         mylog.error("Cosmology splice incomplete due to insufficient data outputs.")
                         break
                     else:
                         cosmology_splice.append(current_slice)
 
                 z = cosmology_splice[-1]['redshift'] - \
-                  cosmology_splice[-1]['deltazMax']
+                  cosmology_splice[-1]['dz_max']
 
         # Make light ray using maximum number of datasets (minimum spacing).
         else:
             # Sort data outputs by proximity to current redsfhit.
-            self.splice_outputs.sort(key=lambda obj:np.fabs(far_redshift -
-                                                                    obj['redshift']))
+            self.splice_outputs.sort(key=lambda obj:np.abs(far_redshift -
+                                                           obj['redshift']))
             # For first data dump, choose closest to desired redshift.
             cosmology_splice.append(self.splice_outputs[0])
 
@@ -175,14 +176,14 @@
                 if (nextOutput['redshift'] <= near_redshift):
                     break
                 if ((cosmology_splice[-1]['redshift'] - nextOutput['redshift']) >
-                    cosmology_splice[-1]['deltazMin']):
+                    cosmology_splice[-1]['dz_min']):
                     cosmology_splice.append(nextOutput)
                 nextOutput = nextOutput['next']
             if (cosmology_splice[-1]['redshift'] -
-                cosmology_splice[-1]['deltazMax']) > near_redshift:
+                cosmology_splice[-1]['dz_max']) > near_redshift:
                 mylog.error("Cosmology splice incomplete due to insufficient data outputs.")
                 near_redshift = cosmology_splice[-1]['redshift'] - \
-                  cosmology_splice[-1]['deltazMax']
+                  cosmology_splice[-1]['dz_max']
 
         mylog.info("create_cosmology_splice: Used %d data dumps to get from z = %f to %f." %
                    (len(cosmology_splice), far_redshift, near_redshift))
@@ -253,7 +254,7 @@
             z = rounded
 
             deltaz_max = self._deltaz_forward(z, self.simulation.box_size)
-            outputs.append({'redshift': z, 'deltazMax': deltaz_max})
+            outputs.append({'redshift': z, 'dz_max': deltaz_max})
             z -= deltaz_max
 
         mylog.info("%d data dumps will be needed to get from z = %f to %f." %
@@ -282,28 +283,24 @@
             # at a given redshift using Newton's method.
             z1 = z
             z2 = z1 - 0.1 # just an initial guess
-            distance1 = 0.0
+            distance1 = self.simulation.quan(0.0, "Mpccm / h")
+            distance2 = self.cosmology.comoving_radial_distance(z2, z)
             iteration = 1
 
-            # Convert comoving radial distance into Mpc / h,
-            # since that's how box size is stored.
-            distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-              self.simulation.hubble_constant
-
-            while ((np.fabs(distance2-target_distance)/distance2) > d_Tolerance):
+            while ((np.abs(distance2-target_distance)/distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
-                z2 = ((target_distance - distance2) / m) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-                  self.simulation.hubble_constant
+                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
+                distance2 = self.cosmology.comoving_radial_distance(z2, z)
                 iteration += 1
                 if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, np.fabs(z2 - z)))
+                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
+                                "exceeded for z = %f (delta z = %f)." %
+                                (z, np.abs(z2 - z)))
                     break
-            output['deltazMax'] = np.fabs(z2 - z)
-
+            output['dz_max'] = np.abs(z2 - z)
+            
     def _calculate_deltaz_min(self, deltaz_min=0.0):
         r"""Calculate delta z that corresponds to a single top grid pixel
         going from z to (z - delta z).
@@ -322,28 +319,24 @@
             # top grid pixel at a given redshift using Newton's method.
             z1 = z
             z2 = z1 - 0.01 # just an initial guess
-            distance1 = 0.0
+            distance1 = self.simulation.quan(0.0, "Mpccm / h")
+            distance2 = self.cosmology.comoving_radial_distance(z2, z)
             iteration = 1
 
-            # Convert comoving radial distance into Mpc / h,
-            # since that's how box size is stored.
-            distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-              self.simulation.hubble_constant
-
-            while ((np.fabs(distance2 - target_distance) / distance2) > d_Tolerance):
+            while ((np.abs(distance2 - target_distance) / distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
-                z2 = ((target_distance - distance2) / m) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-                  self.simulation.hubble_constant
+                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
+                distance2 = self.cosmology.comoving_radial_distance(z2, z)
                 iteration += 1
                 if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, np.fabs(z2 - z)))
+                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
+                                "exceeded for z = %f (delta z = %f)." %
+                                (z, np.abs(z2 - z)))
                     break
             # Use this calculation or the absolute minimum specified by the user.
-            output['deltazMin'] = max(np.fabs(z2 - z), deltaz_min)
+            output['dz_min'] = max(np.abs(z2 - z), deltaz_min)
 
     def _deltaz_forward(self, z, target_distance):
         r"""Calculate deltaz corresponding to moving a comoving distance
@@ -357,24 +350,20 @@
         # box at a given redshift.
         z1 = z
         z2 = z1 - 0.1 # just an initial guess
-        distance1 = 0.0
+        distance1 = self.simulation.quan(0.0, "Mpccm / h")
+        distance2 = self.cosmology.comoving_radial_distance(z2, z)
         iteration = 1
 
-        # Convert comoving radial distance into Mpc / h,
-        # since that's how box size is stored.
-        distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-          self.cosmology.hubble_constant
-
-        while ((np.fabs(distance2 - target_distance)/distance2) > d_Tolerance):
+        while ((np.abs(distance2 - target_distance)/distance2) > d_Tolerance):
             m = (distance2 - distance1) / (z2 - z1)
             z1 = z2
             distance1 = distance2
-            z2 = ((target_distance - distance2) / m) + z2
-            distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-              self.cosmology.hubble_constant
+            z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
+            distance2 = self.cosmology.comoving_radial_distance(z2, z)
             iteration += 1
             if (iteration > max_Iterations):
-                mylog.error("deltaz_forward: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                            (z, np.fabs(z2 - z)))
+                mylog.error("deltaz_forward: Warning - max iterations " +
+                            "exceeded for z = %f (delta z = %f)." %
+                            (z, np.abs(z2 - z)))
                 break
-        return np.fabs(z2 - z)
+        return np.abs(z2 - z)

diff -r 042ef7fd843c29d73c353afb39a3bd1f30092033 -r 8be075b216cabe5bc7b7b04b013e8d7e485c33b8 yt/analysis_modules/cosmological_observation/light_cone/api.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/api.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/api.py
@@ -1,5 +1,5 @@
 """
-API for lightcone
+API for light_cone
 
 
 
@@ -15,7 +15,3 @@
 
 from .light_cone import \
     LightCone
-
-from .unique_solution import \
-    project_unique_light_cones, \
-    find_unique_solutions

diff -r 042ef7fd843c29d73c353afb39a3bd1f30092033 -r 8be075b216cabe5bc7b7b04b013e8d7e485c33b8 yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
+++ /dev/null
@@ -1,118 +0,0 @@
-"""
-Function to calculate volume in common between two n-cubes, with optional
-periodic boundary conditions.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-def common_volume(n_cube_1, n_cube_2, periodic=None):
-    "Return the n-volume in common between the two n-cubes."
-
-    # Check for proper args.
-    if ((len(np.shape(n_cube_1)) != 2) or
-        (np.shape(n_cube_1)[1] != 2) or
-        (np.shape(n_cube_1) != np.shape(n_cube_2))):
-        print "Arguments must be 2 (n, 2) numpy array."
-        return 0
-
-    if ((periodic is not None) and
-        (np.shape(n_cube_1) != np.shape(periodic))):
-        print "periodic argument must be (n, 2) numpy array."
-        return 0
-
-    nCommon = 1.0
-    for q in range(np.shape(n_cube_1)[0]):
-        if (periodic is None):
-            nCommon *= common_segment(n_cube_1[q], n_cube_2[q])
-        else:
-            nCommon *= common_segment(n_cube_1[q], n_cube_2[q],
-                                      periodic=periodic[q])
-
-    return nCommon
-
-def common_segment(seg1, seg2, periodic=None):
-    "Return the length of the common segment."
-
-    # Check for proper args.
-    if ((len(seg1) != 2) or (len(seg2) != 2)):
-        print "Arguments must be arrays of size 2."
-        return 0
-
-    # If not periodic, then this is very easy.
-    if periodic is None:
-        seg1.sort()
-        len1 = seg1[1] - seg1[0]
-        seg2.sort()
-        len2 = seg2[1] - seg2[0]
-
-        common = 0.0
-
-        add = seg1[1] - seg2[0]
-        if ((add > 0) and (add <= max(len1, len2))):
-            common += add
-        add = seg2[1] - seg1[0]
-        if ((add > 0) and (add <= max(len1, len2))):
-            common += add
-        common = min(common, len1, len2)
-        return common
-
-    # If periodic, it's a little more complicated.
-    else:
-        if len(periodic) != 2:
-            print "periodic array must be of size 2."
-            return 0
-
-        seg1.sort()
-        flen1 = seg1[1] - seg1[0]
-        len1 = flen1 - int(flen1)
-        seg2.sort()
-        flen2 = seg2[1] - seg2[0]
-        len2 = flen2 - int(flen2)
-
-        periodic.sort()
-        scale = periodic[1] - periodic[0]
-
-        if (abs(int(flen1)-int(flen2)) >= scale):
-            return min(flen1, flen2)
-
-        # Adjust for periodicity
-        seg1[0] = np.mod(seg1[0], scale) + periodic[0]
-        seg1[1] = seg1[0] + len1
-        if (seg1[1] > periodic[1]): seg1[1] -= scale
-        seg2[0] = np.mod(seg2[0], scale) + periodic[0]
-        seg2[1] = seg2[0] + len2
-        if (seg2[1] > periodic[1]): seg2[1] -= scale
-
-        # create list of non-periodic segments
-        pseg1 = []
-        if (seg1[0] >= seg1[1]):
-            pseg1.append([seg1[0], periodic[1]])
-            pseg1.append([periodic[0], seg1[1]])
-        else:
-            pseg1.append(seg1)
-        pseg2 = []
-        if (seg2[0] >= seg2[1]):
-            pseg2.append([seg2[0], periodic[1]])
-            pseg2.append([periodic[0], seg2[1]])
-        else:
-            pseg2.append(seg2)
-
-        # Add up common segments.
-        common = min(int(flen1), int(flen2))
-
-        for subseg1 in pseg1:
-            for subseg2 in pseg2:
-                common += common_segment(subseg1, subseg2)
-
-        return common

diff -r 042ef7fd843c29d73c353afb39a3bd1f30092033 -r 8be075b216cabe5bc7b7b04b013e8d7e485c33b8 yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
+++ /dev/null
@@ -1,383 +0,0 @@
-"""
-Light cone halo mask functions.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import copy
-import h5py
-import numpy as np
-
-from yt.funcs import *
-from yt.analysis_modules.halo_profiler.api import \
-     HaloProfiler
-from yt.convenience import load
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     parallel_objects, \
-     parallel_root_only
-
-def _light_cone_halo_mask(lightCone, cube_file=None,
-                          mask_file=None, map_file=None,
-                          halo_profiler_parameters=None,
-                          virial_overdensity=200,
-                          njobs=1, dynamic=False):
-    "Make a boolean mask to cut clusters out of light cone projections."
-
-    if halo_profiler_parameters is None:
-        halo_profiler_parameters = {}
-
-    pixels = int(lightCone.field_of_view_in_arcminutes * 60.0 /
-                 lightCone.image_resolution_in_arcseconds)
-
-    # Loop through files in light cone solution and get virial quantities.
-    halo_map_storage = {}
-    for my_storage, my_slice in \
-      parallel_objects(lightCone.light_cone_solution,
-                       njobs=njobs, dynamic=dynamic,
-                       storage=halo_map_storage):
-        halo_list = _get_halo_list(my_slice['filename'],
-                                   **halo_profiler_parameters)
-        my_storage.result = \
-          {'mask': _make_slice_mask(my_slice, halo_list, pixels,
-                                    virial_overdensity)}
-        if map_file is not None:
-            my_storage.result['map'] = \
-              _make_slice_halo_map(my_slice, halo_list,
-                                   virial_overdensity)
-
-    # Reassemble halo mask and map lists.
-    light_cone_mask = []
-    halo_map = []
-    all_slices = halo_map_storage.keys()
-    all_slices.sort()
-    for i in all_slices:
-        light_cone_mask.append(halo_map_storage[i]['mask'])
-        if map_file is not None:
-            halo_map.extend(halo_map_storage[i]['map'])
-    del halo_map_storage
-
-    # Write out cube of masks from each slice.
-    if cube_file is not None:
-        _write_halo_mask(cube_file, np.array(light_cone_mask))
-
-    # Write out a text list of all halos in the image.
-    if map_file is not None:
-        _write_halo_map(map_file, halo_map)
-
-    # Write out final mask.
-    if mask_file is not None:
-        # Final mask is simply the product of the mask from each slice.
-        final_mask = np.ones(shape=(pixels, pixels))
-        for mask in light_cone_mask:
-            final_mask *= mask
-        _write_halo_mask(mask_file, final_mask)
-
-    return light_cone_mask
-
- at parallel_root_only
-def _write_halo_mask(filename, halo_mask):
-    r"""Write out an hdf5 file with the halo mask that
-    can be applied to an image.
-    """
-
-    mylog.info("Saving halo mask to %s." % filename)
-    output = h5py.File(filename, 'a')
-    if 'HaloMask' in output.keys():
-        del output['HaloMask']
-    output.create_dataset('HaloMask', data=np.array(halo_mask))
-    output.close()
-
- at parallel_root_only
-def _write_halo_map(filename, halo_map):
-    "Write a text list of halos in a light cone image."
-
-    mylog.info("Saving halo map to %s." % filename)
-    f = open(filename, 'w')
-    f.write("#z       x         y        r_image   r_mpc     m_Msun\n")
-    for halo in halo_map:
-        f.write("%7.4f %9.6f %9.6f %9.3e %9.3e %9.3e\n" % \
-                    (halo['redshift'], halo['x'], halo['y'],
-                     halo['radius_image'], halo['radius_mpc'],
-                     halo['mass']))
-    f.close()
-
-def _get_halo_list(dataset, halo_profiler_kwargs=None,
-                   halo_profiler_actions=None, halo_list='all'):
-    "Load a list of halos for the dataset."
-
-    if halo_profiler_kwargs is None: halo_profiler_kwargs = {}
-    if halo_profiler_actions is None: halo_profiler_actions = []
-
-    hp = HaloProfiler(dataset, **halo_profiler_kwargs)
-    for action in halo_profiler_actions:
-        if not action.has_key('args'): action['args'] = ()
-        if not action.has_key('kwargs'): action['kwargs'] = {}
-        action['function'](hp, *action['args'], **action['kwargs'])
-
-    if halo_list == 'all':
-        return_list = copy.deepcopy(hp.all_halos)
-    elif halo_list == 'filtered':
-        return_list = copy.deepcopy(hp.filtered_halos)
-    else:
-        mylog.error("Keyword, halo_list, must be either 'all' or 'filtered'.")
-        return_list = None
-
-    del hp
-    return return_list
-
-def _make_slice_mask(slice, halo_list, pixels, virial_overdensity):
-    "Make halo mask for one slice in light cone solution."
-
-    # Get shifted, tiled halo list.
-    all_halo_x, all_halo_y, \
-      all_halo_radius, all_halo_mass = \
-      _make_slice_halo_list(slice, halo_list, virial_overdensity)
-
-    # Make boolean mask and cut out halos.
-    dx = slice['box_width_fraction'] / pixels
-    x = [(q + 0.5) * dx for q in range(pixels)]
-    haloMask = np.ones(shape=(pixels, pixels), dtype=bool)
-
-    # Cut out any pixel that has any part at all in the circle.
-    for q in range(len(all_halo_radius)):
-        dif_xIndex = np.array(int(all_halo_x[q]/dx) -
-                              np.array(range(pixels))) != 0
-        dif_yIndex = np.array(int(all_halo_y[q]/dx) -
-                              np.array(range(pixels))) != 0
-
-        xDistance = (np.abs(x - all_halo_x[q]) -
-                     (0.5 * dx)) * dif_xIndex
-        yDistance = (np.abs(x - all_halo_y[q]) -
-                     (0.5 * dx)) * dif_yIndex
-
-        distance = np.array([np.sqrt(w**2 + xDistance**2)
-                             for w in yDistance])
-        haloMask *= (distance >= all_halo_radius[q])
-
-    return haloMask
-
-def _make_slice_halo_map(slice, halo_list, virial_overdensity):
-    "Make list of halos for one slice in light cone solution."
-
-    # Get units to convert virial radii back to physical units.
-    dataset_object = load(slice['filename'])
-    Mpc_units = dataset_object.units['mpc']
-    del dataset_object
-
-    # Get shifted, tiled halo list.
-    all_halo_x, all_halo_y, \
-      all_halo_radius, all_halo_mass = \
-      _make_slice_halo_list(slice, halo_list, virial_overdensity)
-
-    # Construct list of halos
-    halo_map = []
-
-    for q in range(len(all_halo_x)):
-        # Give radius in both physics units and
-        # units of the image (0 to 1).
-        radius_mpc = all_halo_radius[q] * Mpc_units
-        radius_image = all_halo_radius[q] / slice['box_width_fraction']
-
-        halo_map.append({'x': all_halo_x[q] / slice['box_width_fraction'],
-                         'y': all_halo_y[q] / slice['box_width_fraction'],
-                         'redshift': slice['redshift'],
-                         'radius_mpc': radius_mpc,
-                         'radius_image': radius_image,
-                         'mass': all_halo_mass[q]})
-
-    return halo_map
-
-def _make_slice_halo_list(slice, halo_list, virial_overdensity):
-    "Make shifted, tiled list of halos for halo mask and halo map."
-
-   # Make numpy arrays for halo centers and virial radii.
-    halo_x = []
-    halo_y = []
-    halo_depth = []
-    halo_radius = []
-    halo_mass = []
-
-    # Get units to convert virial radii to code units.
-    dataset_object = load(slice['filename'])
-    Mpc_units = dataset_object.units['mpc']
-    del dataset_object
-
-    for halo in halo_list:
-        if halo is not None:
-            center = copy.deepcopy(halo['center'])
-            halo_depth.append(center.pop(slice['projection_axis']))
-            halo_x.append(center[0])
-            halo_y.append(center[1])
-            halo_radius.append(halo['RadiusMpc_%d' % virial_overdensity] /
-                               Mpc_units)
-            halo_mass.append(halo['TotalMassMsun_%d' % virial_overdensity])
-
-    halo_x = np.array(halo_x)
-    halo_y = np.array(halo_y)
-    halo_depth = np.array(halo_depth)
-    halo_radius = np.array(halo_radius)
-    halo_mass = np.array(halo_mass)
-
-    # Adjust halo centers along line of sight.
-    depth_center = slice['projection_center'][slice['projection_axis']]
-    depth_left = depth_center - 0.5 * slice['box_depth_fraction']
-    depth_right = depth_center + 0.5 * slice['box_depth_fraction']
-
-    # Make boolean mask to pick out centers in region along line of sight.
-    # Halos near edges may wrap around to other side.
-    add_left = (halo_depth + halo_radius) > 1 # should be box width
-    add_right = (halo_depth - halo_radius) < 0
-
-    halo_depth = np.concatenate([halo_depth,
-                                 (halo_depth[add_left]-1),
-                                 (halo_depth[add_right]+1)])
-    halo_x = np.concatenate([halo_x, halo_x[add_left], halo_x[add_right]])
-    halo_y = np.concatenate([halo_y, halo_y[add_left], halo_y[add_right]])
-    halo_radius = np.concatenate([halo_radius,
-                                  halo_radius[add_left],
-                                  halo_radius[add_right]])
-    halo_mass = np.concatenate([halo_mass,
-                                halo_mass[add_left],
-                                halo_mass[add_right]])
-
-    del add_left, add_right
-
-    # Cut out the halos outside the region of interest.
-    if (slice['box_depth_fraction'] < 1):
-        if (depth_left < 0):
-            mask = ((halo_depth + halo_radius >= 0) &
-                    (halo_depth - halo_radius <= depth_right)) | \
-                ((halo_depth + halo_radius >= depth_left + 1) &
-                 (halo_depth - halo_radius <= 1))
-        elif (depth_right > 1):
-            mask = ((halo_depth + halo_radius >= 0) &
-                    (halo_depth - halo_radius <= depth_right - 1)) | \
-                ((halo_depth + halo_radius >= depth_left) &
-                 (halo_depth - halo_radius <= 1))
-        else:
-            mask = (halo_depth + halo_radius >= depth_left) & \
-              (halo_depth - halo_radius <= depth_right)
-
-        halo_x = halo_x[mask]
-        halo_y = halo_y[mask]
-        halo_radius = halo_radius[mask]
-        halo_mass = halo_mass[mask]
-        del mask
-    del halo_depth
-
-    all_halo_x = np.array([])
-    all_halo_y = np.array([])
-    all_halo_radius = np.array([])
-    all_halo_mass = np.array([])
-
-    # Tile halos of width box fraction is greater than one.
-    # Copy original into offset positions to make tiles.
-    for x in range(int(np.ceil(slice['box_width_fraction']))):
-        for y in range(int(np.ceil(slice['box_width_fraction']))):
-            all_halo_x = np.concatenate([all_halo_x, halo_x+x])
-            all_halo_y = np.concatenate([all_halo_y, halo_y+y])
-            all_halo_radius = np.concatenate([all_halo_radius, halo_radius])
-            all_halo_mass = np.concatenate([all_halo_mass, halo_mass])
-
-    del halo_x, halo_y, halo_radius, halo_mass
-
-    # Shift centers laterally.
-    offset = copy.deepcopy(slice['projection_center'])
-    del offset[slice['projection_axis']]
-
-    # Shift x and y positions.
-    all_halo_x -= offset[0]
-    all_halo_y -= offset[1]
-
-    # Wrap off-edge centers back around to
-    # other side (periodic boundary conditions).
-    all_halo_x[all_halo_x < 0] += np.ceil(slice['box_width_fraction'])
-    all_halo_y[all_halo_y < 0] += np.ceil(slice['box_width_fraction'])
-
-    # After shifting, some centers have fractional coverage
-    # on both sides of the box.
-    # Find those centers and make copies to be placed on the other side.
-
-    # Centers hanging off the right edge.
-    add_x_right = all_halo_x + all_halo_radius > \
-      np.ceil(slice['box_width_fraction'])
-    add_x_halo_x = all_halo_x[add_x_right]
-    add_x_halo_x -= np.ceil(slice['box_width_fraction'])
-    add_x_halo_y = all_halo_y[add_x_right]
-    add_x_halo_radius = all_halo_radius[add_x_right]
-    add_x_halo_mass = all_halo_mass[add_x_right]
-    del add_x_right
-
-    # Centers hanging off the left edge.
-    add_x_left = all_halo_x - all_halo_radius < 0
-    add2_x_halo_x = all_halo_x[add_x_left]
-    add2_x_halo_x += np.ceil(slice['box_width_fraction'])
-    add2_x_halo_y = all_halo_y[add_x_left]
-    add2_x_halo_radius = all_halo_radius[add_x_left]
-    add2_x_halo_mass = all_halo_mass[add_x_left]
-    del add_x_left
-
-    # Centers hanging off the top edge.
-    add_y_right = all_halo_y + all_halo_radius > \
-      np.ceil(slice['box_width_fraction'])
-    add_y_halo_x = all_halo_x[add_y_right]
-    add_y_halo_y = all_halo_y[add_y_right]
-    add_y_halo_y -= np.ceil(slice['box_width_fraction'])
-    add_y_halo_radius = all_halo_radius[add_y_right]
-    add_y_halo_mass = all_halo_mass[add_y_right]
-    del add_y_right
-
-    # Centers hanging off the bottom edge.
-    add_y_left = all_halo_y - all_halo_radius < 0
-    add2_y_halo_x = all_halo_x[add_y_left]
-    add2_y_halo_y = all_halo_y[add_y_left]
-    add2_y_halo_y += np.ceil(slice['box_width_fraction'])
-    add2_y_halo_radius = all_halo_radius[add_y_left]
-    add2_y_halo_mass = all_halo_mass[add_y_left]
-    del add_y_left
-
-    # Add the hanging centers back to the projection data.
-    all_halo_x = np.concatenate([all_halo_x,
-                                 add_x_halo_x, add2_x_halo_x,
-                                 add_y_halo_x, add2_y_halo_x])
-    all_halo_y = np.concatenate([all_halo_y,
-                                 add_x_halo_y, add2_x_halo_y,
-                                 add_y_halo_y, add2_y_halo_y])
-    all_halo_radius = np.concatenate([all_halo_radius,
-                                      add_x_halo_radius,
-                                      add2_x_halo_radius,
-                                      add_y_halo_radius,
-                                      add2_y_halo_radius])
-    all_halo_mass = np.concatenate([all_halo_mass,
-                                    add_x_halo_mass,
-                                    add2_x_halo_mass,
-                                    add_y_halo_mass,
-                                    add2_y_halo_mass])
-
-    del add_x_halo_x, add_x_halo_y, add_x_halo_radius
-    del add2_x_halo_x, add2_x_halo_y, add2_x_halo_radius
-    del add_y_halo_x, add_y_halo_y, add_y_halo_radius
-    del add2_y_halo_x, add2_y_halo_y, add2_y_halo_radius
-
-    # Cut edges to proper width.
-    cut_mask = (all_halo_x - all_halo_radius <
-                slice['box_width_fraction']) & \
-        (all_halo_y - all_halo_radius <
-         slice['box_width_fraction'])
-    all_halo_x = all_halo_x[cut_mask]
-    all_halo_y = all_halo_y[cut_mask]
-    all_halo_radius = all_halo_radius[cut_mask]
-    all_halo_mass = all_halo_mass[cut_mask]
-    del cut_mask
-
-    return (all_halo_x, all_halo_y,
-            all_halo_radius, all_halo_mass)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/6bc4901848e8/
Changeset:   6bc4901848e8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-15 16:33:38
Summary:     Merging once more
Affected #:  2 files

diff -r 8be075b216cabe5bc7b7b04b013e8d7e485c33b8 -r 6bc4901848e8eb7a79fb77bc47ea1e777548e6ad yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
@@ -55,32 +55,37 @@
           my_slice["projection_center"][my_slice["projection_axis"]] \
             + 0.5 * my_slice["box_depth_fraction"]
         if (depthLeft < 0):
-            cut_mask = ("((grid[\"%s\"] + 0.5*grid[\"d%s\"] >= 0) & " + \
-              "(grid[\"%s\"] - 0.5*grid[\"d%s\"] <= %f)) | " + \
-              "((grid[\"%s\"] + 0.5*grid[\"d%s\"] >= %f) & " + \
-              "(grid[\"%s\"] - 0.5*grid[\"d%s\"] <= 1))") % \
+            cut_mask = ("((obj[\"%s\"] + 0.5*obj[\"d%s\"] >= 0) & " + \
+              "(obj[\"%s\"] - 0.5*obj[\"d%s\"] <= %f)) | " + \
+              "((obj[\"%s\"] + 0.5*obj[\"d%s\"] >= %f) & " + \
+              "(obj[\"%s\"] - 0.5*obj[\"d%s\"] <= 1))") % \
                 (axis, axis, axis, axis, depthRight, 
                  axis, axis, (depthLeft+1), axis, axis)
         elif (depthRight > 1):
-            cut_mask = ("((grid[\"%s\"] + 0.5*grid[\"d%s\"] >= 0) & " + \
-              "(grid[\"%s\"] - 0.5*grid[\"d%s\"] <= %f)) | " + \
-              "((grid[\"%s\"] + 0.5*grid[\"d%s\"] >= %f) & " + \
-              "(grid[\"%s\"] - 0.5*grid[\"d%s\"] <= 1))") % \
+            cut_mask = ("((obj[\"%s\"] + 0.5*obj[\"d%s\"] >= 0) & " + \
+              "(obj[\"%s\"] - 0.5*obj[\"d%s\"] <= %f)) | " + \
+              "((obj[\"%s\"] + 0.5*obj[\"d%s\"] >= %f) & " + \
+              "(obj[\"%s\"] - 0.5*obj[\"d%s\"] <= 1))") % \
                 (axis, axis, axis, axis, (depthRight-1),
                  axis, axis, depthLeft, axis, axis)
         else:
-            cut_mask = ("(grid[\"%s\"] + 0.5*grid[\"d%s\"] >= %f) & " + \
-              "(grid[\"%s\"] - 0.5*grid[\"%s\"] <= %f)") % \
+            cut_mask = ("(obj[\"%s\"] + 0.5*obj[\"d%s\"] >= %f) & " + \
+              "(obj[\"%s\"] - 0.5*obj[\"%s\"] <= %f)") % \
               (axis, axis, depthLeft, axis, axis, depthRight)
 
         these_field_cuts.append(cut_mask)
 
+    data_source = my_slice["object"].all_data()
+    cut_region = data_source.cut_region(these_field_cuts)
+        
     # Make projection.
     proj = my_slice["object"].proj(field, my_slice["projection_axis"], 
         weight_field, center=region_center,
-        field_parameters=dict(field_cuts=these_field_cuts))
+        data_source=cut_region)
     proj_field = proj.field[0]
 
+    del data_source, cut_region
+    
     # 2. The Tile Problem
     # Tile projection to specified width.
 

diff -r 8be075b216cabe5bc7b7b04b013e8d7e485c33b8 -r 6bc4901848e8eb7a79fb77bc47ea1e777548e6ad yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -14,6 +14,7 @@
 #-----------------------------------------------------------------------------
 
 from yt.funcs import *
+from yt.units.unit_object import Unit
 from .volume_rendering.api import off_axis_projection
 from yt.data_objects.image_array import ImageArray
 from yt.utilities.lib.misc_utilities import \
@@ -418,7 +419,10 @@
                                    weight=ds.weight_field, volume=ds.volume,
                                    no_ghost=ds.no_ghost, interpolated=ds.interpolated,
                                    north_vector=ds.north_vector)
-        ia = ImageArray(buff.swapaxes(0,1), info=self._get_info(item))
+        units = Unit(ds.pf.field_info[item].units, registry=ds.pf.unit_registry)
+        if ds.weight_field is None:
+            units *= Unit('cm', registry=ds.pf.unit_registry)
+        ia = ImageArray(buff.swapaxes(0,1), input_units=units, info=self._get_info(item))
         self[item] = ia
         return ia 
 


https://bitbucket.org/yt_analysis/yt/commits/899c25a81030/
Changeset:   899c25a81030
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-15 16:35:56
Summary:     Make HTTPArray and PageCacheURL importerror-friendly.
Affected #:  2 files

diff -r 6bc4901848e8eb7a79fb77bc47ea1e777548e6ad -r 899c25a81030450ced8dfad72be218b36411f9a4 setup.py
--- a/setup.py
+++ b/setup.py
@@ -224,7 +224,6 @@
         zip_safe=False,
         data_files=REASON_FILES,
         cmdclass={'build_py': my_build_py, 'build_src': my_build_src},
-        install_requires=["thingking"],
     )
     return
 

diff -r 6bc4901848e8eb7a79fb77bc47ea1e777548e6ad -r 899c25a81030450ced8dfad72be218b36411f9a4 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -2,8 +2,11 @@
 import re
 import os
 import numpy as np
-from thingking.httpmmap import HTTPArray
-from thingking.arbitrary_page import PageCacheURL
+try:
+    from thingking.httpmmap import HTTPArray
+    from thingking.arbitrary_page import PageCacheURL
+except ImportError:
+    HTTPArray = PageCacheURL = None
 from yt.funcs import mylog
 
 _types = {
@@ -176,6 +179,8 @@
 
     def __init__(self, *args, **kwargs):
         super(HTTPDataStruct, self).__init__(*args, **kwargs)
+        if None in (PageCacheURL, HTTPArray):
+            raise ImportError("thingking")
         self.pcu = PageCacheURL(self.filename)
 
     def set_offset(self, offset):
@@ -302,6 +307,11 @@
 
     _data_struct = HTTPDataStruct
 
+    def __init__(self, *args, **kwargs):
+        if None in (PageCacheURL, HTTPArray):
+            raise ImportError("thingking")
+        super(HTTPSDFRead, self).__init__(*args, **kwargs)
+
     def parse_header(self):
         """docstring for parse_header"""
         # Pre-process


https://bitbucket.org/yt_analysis/yt/commits/7d6db463bca0/
Changeset:   7d6db463bca0
Branch:      yt-3.0
User:        samskillman
Date:        2014-07-12 23:31:30
Summary:     prefixing internal functions with _, changing SINDEX to MIDX, cleaning up a few bits.
Affected #:  1 file

diff -r 042ef7fd843c29d73c353afb39a3bd1f30092033 -r 7d6db463bca0ad7254e772651516d881f3834020 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -18,7 +18,8 @@
     'char': 'B',
 }
 
-def get_type(vtype, tlen=None):
+
+def _get_type(vtype, tlen=None):
     try:
         t = _types[vtype]
         if tlen is not None:
@@ -29,13 +30,15 @@
         t = eval("np."+vtype)
     return t
 
-def lstrip(text_list):
+
+def _lstrip(text_list):
     return [t.strip() for t in text_list]
 
-def get_struct_vars(line):
-    spl = lstrip(line.split(";"))
-    multiv = lstrip(spl[0].split(","))
-    ret = lstrip(multiv[0].split())
+
+def _get_struct_vars(line):
+    spl = _lstrip(line.split(";"))
+    multiv = _lstrip(spl[0].split(","))
+    ret = _lstrip(multiv[0].split())
     ctype = ret[0]
     vnames = [ret[-1]] + multiv[1:]
     vnames = [v.strip() for v in vnames]
@@ -46,9 +49,10 @@
         if '[' in vnames[0]:
             num = int(vnames[0].split('[')[-1].strip(']'))
             #num = int(re.sub("\D", "", vnames[0]))
-    ctype = get_type(ctype, tlen=num)
+    ctype = _get_type(ctype, tlen=num)
     return ctype, vnames
 
+
 def bbox_filter(left, right, domain_width):
 
     def myfilter(chunk, mask=None):
@@ -89,7 +93,7 @@
 
     return myfilter
 
-def ensure_xzy_fields(fields):
+def _ensure_xyz_fields(fields):
     for f in 'xyz':
         if f not in fields:
             fields.append(f)
@@ -243,8 +247,8 @@
             self.comments.append(line)
             return
 
-        spl = lstrip(line.split("="))
-        vtype, vname = lstrip(spl[0].split())
+        spl = _lstrip(line.split("="))
+        vtype, vname = _lstrip(spl[0].split())
         vname = vname.strip("[]")
         vval = spl[-1].strip(";")
         if vtype == 'parameter':
@@ -268,7 +272,7 @@
         str_lines = []
         l = ascfile.readline()
         while "}" not in l:
-            vtype, vnames = get_struct_vars(l)
+            vtype, vnames = _get_struct_vars(l)
             for v in vnames:
                 str_types.append((v, vtype))
             l = ascfile.readline()
@@ -453,8 +457,8 @@
             self.domain_buffer = (self.domain_dims - int(self.domain_dims/(1.0 + expand_root)))/2
             self.domain_active_dims = self.domain_dims - 2*self.domain_buffer
 
-        mylog.debug("SINDEX rmin: %s, rmax: %s" % (self.rmin, self.rmax))
-        mylog.debug("SINDEX: domain_width: %s, domain_dims: %s, domain_active_dims: %s " %
+        mylog.debug("MIDX rmin: %s, rmax: %s" % (self.rmin, self.rmax))
+        mylog.debug("MIDX: domain_width: %s, domain_dims: %s, domain_active_dims: %s " %
                     (self.domain_width, self.domain_dims, self.domain_active_dims))
 
     def spread_bits(self, ival, level=None):
@@ -554,7 +558,7 @@
         #print 'Getting data from ileft to iright:',  ileft, iright
 
         ix, iy, iz = (iright-ileft)*1j
-        mylog.debug('SINDEX IBBOX: %s %s %s %s %s' % (ileft, iright, ix, iy, iz))
+        mylog.debug('MIDX IBBOX: %s %s %s %s %s' % (ileft, iright, ix, iy, iz))
 
         # plus 1 that is sliced, plus a bit since mgrid is not inclusive
         Z, Y, X = np.mgrid[ileft[2]:iright[2]+1.01,
@@ -670,7 +674,7 @@
     def iter_data(self, inds, fields):
         num_inds = len(inds)
         num_reads = 0
-        mylog.debug('SINDEX Reading %i chunks' % num_inds)
+        mylog.debug('MIDX Reading %i chunks' % num_inds)
         i = 0
         while (i < num_inds):
             ind = inds[i]
@@ -799,8 +803,8 @@
                 yield f, data[f][mask]
 
     def iter_bbox_data(self, left, right, fields):
-        ensure_xzy_fields(fields)
-        mylog.debug('SINDEX Loading region from %s to %s' %(left, right))
+        _ensure_xyz_fields(fields)
+        mylog.debug('MIDX Loading region from %s to %s' %(left, right))
         inds = self.get_bbox(left, right)
         # Need to put left/right in float32 to avoid fp roundoff errors
         # in the bbox later.
@@ -819,8 +823,8 @@
         #    yield dd
 
     def iter_sphere_data(self, center, radius, fields):
-        ensure_xzy_fields(fields)
-        mylog.debug('SINDEX Loading spherical region %s to %s' %(center, radius))
+        _ensure_xyz_fields(fields)
+        mylog.debug('MIDX Loading spherical region %s to %s' %(center, radius))
         inds = self.get_bbox(center-radius, center+radius)
 
         my_filter = sphere_filter(center, radius, self.true_domain_width)
@@ -831,7 +835,7 @@
             yield dd
 
     def iter_ibbox_data(self, left, right, fields):
-        mylog.debug('SINDEX Loading region from %s to %s' %(left, right))
+        mylog.debug('MIDX Loading region from %s to %s' %(left, right))
         inds = self.get_ibbox(left, right)
         return self.iter_data(inds, fields)
 
@@ -969,7 +973,7 @@
 
         """
 
-        ensure_xzy_fields(fields)
+        _ensure_xyz_fields(fields)
         bbox = self.get_cell_bbox(level, cell_iarr)
         filter_left = bbox[:, 0] - pad
         filter_right = bbox[:, 1] + pad
@@ -1053,7 +1057,7 @@
                                              8.0, ['x','y','z','ident'])
 
         """
-        ensure_xzy_fields(fields)
+        _ensure_xyz_fields(fields)
         bbox = self.get_cell_bbox(level, cell_iarr)
         filter_left = bbox[:, 0] - pad
         filter_right = bbox[:, 1] + pad


https://bitbucket.org/yt_analysis/yt/commits/0f353ef72304/
Changeset:   0f353ef72304
Branch:      yt-3.0
User:        samskillman
Date:        2014-07-13 00:14:14
Summary:     Adding docstrings for SDFRead
Affected #:  1 file

diff -r 7d6db463bca0ad7254e772651516d881f3834020 -r 0f353ef7230400941b5d0a248fa99dd3c3c02d51 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -202,12 +202,45 @@
 
 class SDFRead(dict):
 
-    """docstring for SDFRead"""
-
     _eof = 'SDF-EO'
     _data_struct = DataStruct
 
     def __init__(self, filename, header=None):
+        r""" Read an SDF file, loading parameters and variables.
+
+        Given an SDF file (see http://bitbucket.org/JohnSalmon/sdf), parse the
+        ASCII header and construct numpy memmap array
+        access.
+
+        Parameters
+        ----------
+        filename: string
+        The filename associated with the data to be loaded.
+        header: string, optional
+        If separate from the data file, a file containing the
+        header can be specified. Default: None.
+
+        Returns
+        -------
+        self : SDFRead object
+        Dict-like container of parameters and data.
+
+
+        References
+        ----------
+        SDF is described here:
+
+            J. K. Salmon and M. S. Warren. Self-Describing File (SDF) Library.
+            Zenodo, Jun 2014. URL http://bitbucket.org/JohnSalmon/sdf.
+
+        Examples
+        --------
+
+        >>> sdf = SDFRead("data.sdf", header="data.hdr")
+        >>> print sdf.parameters
+        >>> print sdf['x']
+
+        """
         self.filename = filename
         if header is None:
             header = filename
@@ -219,6 +252,16 @@
         self.set_offsets()
         self.load_memmaps()
 
+    def __repr__(self):
+        disp = "<SDFRead Object> file: %s\n" % self.filename
+        disp += "parameters: \n"
+        for k, v in self.parameters.iteritems():
+            disp += "\t%s: %s\n" % (k, v)
+        disp += "arrays: \n"
+        for k, v in self.iteritems():
+            disp += "\t%s[%s]\n" % (k, v.size)
+        return disp
+
     def parse_header(self):
         """docstring for parse_header"""
         # Pre-process


https://bitbucket.org/yt_analysis/yt/commits/48fce424fc99/
Changeset:   48fce424fc99
Branch:      yt-3.0
User:        samskillman
Date:        2014-07-13 00:23:05
Summary:     Adding docstrings to HTTPSDFRead and load_sdf
Affected #:  1 file

diff -r 0f353ef7230400941b5d0a248fa99dd3c3c02d51 -r 48fce424fc99191b402b7b2faf6df138a4cc18eb yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -345,7 +345,41 @@
 
 class HTTPSDFRead(SDFRead):
 
-    """docstring for SDFRead"""
+    r""" Read an SDF file hosted on the internet.
+
+    Given an SDF file (see http://bitbucket.org/JohnSalmon/sdf), parse the
+    ASCII header and construct numpy memmap array
+    access.
+
+    Parameters
+    ----------
+    filename: string
+    The filename associated with the data to be loaded.
+    header: string, optional
+    If separate from the data file, a file containing the
+    header can be specified. Default: None.
+
+    Returns
+    -------
+    self : SDFRead object
+    Dict-like container of parameters and data.
+
+
+    References
+    ----------
+    SDF is described here:
+
+        J. K. Salmon and M. S. Warren. Self-Describing File (SDF) Library.
+        Zenodo, Jun 2014. URL http://bitbucket.org/JohnSalmon/sdf.
+
+    Examples
+    --------
+
+    >>> sdf = SDFRead("data.sdf", header="data.hdr")
+    >>> print sdf.parameters
+    >>> print sdf['x']
+
+    """
 
     _data_struct = HTTPDataStruct
 
@@ -368,6 +402,41 @@
 
 
 def load_sdf(filename, header=None):
+    r""" Load an SDF file.
+
+    Given an SDF file (see http://bitbucket.org/JohnSalmon/sdf), parse the
+    ASCII header and construct numpy memmap array access. The file can
+    be either local (on a hard drive, for example), or remote (on the World
+    Wide Web).
+
+    Parameters
+    ----------
+    filename: string
+        The filename or WWW address associated with the data to be loaded.
+    header: string, optional
+        If separate from the data file, a file containing the
+        header can be specified. Default: None.
+
+    Returns
+    -------
+    sdf : SDFRead object
+        Dict-like container of parameters and data.
+
+    References
+    ----------
+    SDF is described here:
+
+        J. K. Salmon and M. S. Warren. Self-Describing File (SDF) Library.
+        Zenodo, Jun 2014. URL http://bitbucket.org/JohnSalmon/sdf.
+
+    Examples
+    --------
+
+    >>> sdf = SDFRead("data.sdf", header="data.hdr")
+    >>> print sdf.parameters
+    >>> print sdf['x']
+
+    """
     if 'http' in filename:
         sdf = HTTPSDFRead(filename, header=header)
     else:


https://bitbucket.org/yt_analysis/yt/commits/8733c491a096/
Changeset:   8733c491a096
Branch:      yt-3.0
User:        samskillman
Date:        2014-07-13 00:28:47
Summary:     some PEP8 and a bugfix for the DataStruct.__del__
Affected #:  1 file

diff -r 48fce424fc99191b402b7b2faf6df138a4cc18eb -r 8733c491a0965f9d028a997537427ac2e5529ef8 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -58,14 +58,14 @@
     def myfilter(chunk, mask=None):
         pos = np.array([chunk['x'], chunk['y'], chunk['z']]).T
 
-        # This hurts, but is useful for periodicity. Probably should check first
-        # if it is even needed for a given left/right
+        # This hurts, but is useful for periodicity. Probably should check
+        # first if it is even needed for a given left/right
         for i in range(3):
-            pos[:,i] = np.mod(pos[:,i] - left[i], domain_width[i]) + left[i]
+            pos[:, i] = np.mod(pos[:, i] - left[i], domain_width[i]) + left[i]
 
         # Now get all particles that are within the bbox
         if mask is None:
-            mask = np.all(pos >= left, axis=1) 
+            mask = np.all(pos >= left, axis=1)
             np.logical_and(mask, np.all(pos < right, axis=1), mask)
         else:
             np.logical_and(mask, np.all(pos >= left, axis=1), mask)
@@ -74,15 +74,17 @@
 
     return myfilter
 
+
 def sphere_filter(center, radius, domain_width):
 
     def myfilter(chunk, mask=None):
         pos = np.array([chunk['x'], chunk['y'], chunk['z']]).T
+        left = center-radius
 
-        # This hurts, but is useful for periodicity. Probably should check first
-        # if it is even needed for a given left/right
+        # This hurts, but is useful for periodicity. Probably should check
+        # first if it is even needed for a given left/right
         for i in range(3):
-            pos[:,i] = np.mod(pos[:,i] - left[i], domain_width[i]) + left[i]
+            pos[:, i] = np.mod(pos[:, i] - left[i], domain_width[i]) + left[i]
 
         # Now get all particles that are within the radius
         if mask is None:
@@ -93,11 +95,13 @@
 
     return myfilter
 
+
 def _ensure_xyz_fields(fields):
     for f in 'xyz':
         if f not in fields:
             fields.append(f)
 
+
 class DataStruct(object):
     """docstring for DataStruct"""
 
@@ -121,13 +125,13 @@
 
     def build_memmap(self):
         assert(self.size != -1)
-        self.handle = np.memmap(self.filename, dtype=self.dtype,
-                        mode='r', shape=self.size, offset=self._offset)
+        self.handle = np.memmap(self.filename, dtype=self.dtype, mode='r',
+                                shape=self.size, offset=self._offset)
         for k in self.dtype.names:
             self.data[k] = self.handle[k]
 
     def __del__(self):
-        if self.handle:
+        if self.handle is not None:
             try:
                 self.handle.close()
             except AttributeError:


https://bitbucket.org/yt_analysis/yt/commits/8b014cbf298b/
Changeset:   8b014cbf298b
Branch:      yt-3.0
User:        samskillman
Date:        2014-07-15 17:21:26
Summary:     Merging
Affected #:  48 files

diff -r 8733c491a0965f9d028a997537427ac2e5529ef8 -r 8b014cbf298bec2fa1b12a43b9b616674a05b42d doc/source/analyzing/analysis_modules/light_cone_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_cone_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_cone_generator.rst
@@ -2,15 +2,15 @@
 
 Light Cone Generator
 ====================
-.. sectionauthor:: Britton Smith <brittonsmith at gmail.com>
 
-Light cones are projections made by stacking multiple datasets together to 
-continuously span a given redshift interval.  The width of individual 
-projection slices is adjusted such that each slice has the same angular size.  
-Each projection slice is randomly shifted and projected along a random axis to 
-ensure that the same structures are not sampled multiple times.  Since deeper 
-images sample earlier epochs of the simulation, light cones represent the 
-closest thing to synthetic imaging observations.
+Light cones are created by stacking multiple datasets together to 
+continuously span a given redshift interval.  To make a projection of a 
+field through a light cone, the width of individual slices is adjusted 
+such that each slice has the same angular size.  
+Each slice is randomly shifted and projected along a random axis to 
+ensure that the same structures are not sampled multiple times.  A 
+recipe for creating a simple light cone projection can be found in 
+the cookbook under :ref:`cookbook-light_cone`.
 
 .. image:: _images/LightCone_full_small.png
    :width: 500
@@ -23,46 +23,41 @@
 Configuring the Light Cone Generator
 ------------------------------------
 
-A recipe for creating a simple light cone projection can be found in the 
-cookbook.  The required arguments to instantiate a ``LightCone`` objects are 
+The required arguments to instantiate a ``LightCone`` object are 
 the path to the simulation parameter file, the simulation type, the nearest 
 redshift, and the furthest redshift of the light cone.
 
 .. code-block:: python
 
-  from yt.analysis_modules.api import LightCone
+  from yt.analysis_modules.cosmological_observation.api import \
+       LightCone
 
   lc = LightCone('enzo_tiny_cosmology/32Mpc_32.enzo',
                  'Enzo', 0., 0.1)
 
 The additional keyword arguments are:
 
- * **field_of_view_in_arcminutes** (*float*): The field of view of the image 
-   in units of arcminutes.  Default: 600.0.
-
- * **image_resolution_in_arcseconds** (*float*): The size of each image pixel 
-   in units of arcseconds.  Default: 60.0.
-
- * **use_minimum_datasets** (*bool*):  If True, the minimum number of datasets 
-   is used to connect the initial and final redshift.  If false, the light 
-   cone solution will contain as many entries as possible within the redshift 
-   interval.  Default: True.
+ * **use_minimum_datasets** (*bool*):  If True, the minimum number of 
+   datasets is used to connect the initial and final redshift.  If False, 
+   the light cone solution will contain as many entries as possible within 
+   the redshift interval.  Default: True.
 
  * **deltaz_min** (*float*): Specifies the minimum Delta-z between 
    consecutive datasets in the returned list.  Default: 0.0.
 
- * **minimum_coherent_box_fraction** (*float*): Used with use_minimum_datasets 
-   set to False, this parameter specifies the fraction of the total box size 
-   to be traversed before rerandomizing the projection axis and center.  This 
-   was invented to allow light cones with thin slices to sample coherent large 
-   scale structure, but in practice does not work so well.  Try setting this 
-   parameter to 1 and see what happens.  Default: 0.0.
+ * **minimum_coherent_box_fraction** (*float*): Used with 
+   **use_minimum_datasets** set to False, this parameter specifies the 
+   fraction of the total box size to be traversed before rerandomizing the 
+   projection axis and center.  This was invented to allow light cones with 
+   thin slices to sample coherent large cale structure, but in practice does 
+   not work so well.  Try setting this parameter to 1 and see what happens.  
+   Default: 0.0.
 
  * **time_data** (*bool*): Whether or not to include time outputs when 
    gathering datasets for time series.  Default: True.
 
- * **redshift_data** (*bool*): Whether or not to include redshift outputs when 
-   gathering datasets for time series.  Default: True.
+ * **redshift_data** (*bool*): Whether or not to include redshift outputs 
+   when gathering datasets for time series.  Default: True.
 
  * **set_parameters** (*dict*): Dictionary of parameters to attach to 
    pf.parameters.  Default: None.
@@ -76,10 +71,10 @@
 Creating Light Cone Solutions
 -----------------------------
 
-A light cone solution consists of a list of datasets and the width, depth, 
-center, and axis of the projection to be made for that slice.  The 
-:meth:`LightCone.calculate_light_cone_solution` function is used to 
-calculate the random shifting and projection axis:
+A light cone solution consists of a list of datasets spanning a redshift 
+interval with a random orientation for each dataset.  A new solution 
+is calcuated with the :meth:`LightCone.calculate_light_cone_solution` 
+function:
 
 .. code-block:: python
 
@@ -87,70 +82,39 @@
 
 The keyword argument are:
 
- * **seed** (*int*): the seed for the random number generator.  Any light cone 
-   solution can be reproduced by giving the same random seed.  Default: None 
-   (each solution will be distinct).
+ * **seed** (*int*): the seed for the random number generator.  Any light 
+   cone solution can be reproduced by giving the same random seed.  
+   Default: None.
 
  * **filename** (*str*): if given, a text file detailing the solution will be 
    written out.  Default: None.
 
-If a new solution for the same LightCone object is desired, the 
-:meth:`rerandomize_light_cone_solution` method should be called in place of 
-:meth:`calculate_light_cone_solution`:
-
-.. code-block:: python
-
-  new_seed = 987654321
-  lc.rerandomize_light_cone_solution(new_seed, Recycle=True, 
-                                     filename='new_lightcone.dat')
-
-Additional keyword arguments are:
-
- * **recycle** (*bool*): if True, the new solution will have the same shift in 
-   the line of sight as the original solution.  Since the projections of each 
-   slice are serialized and stored for the entire width of the box (even if 
-   the width used is left than the total box), the projection data can be 
-   deserialized instead of being remade from scratch.  This can greatly speed 
-   up the creation of a large number of light cone projections.  Default: True.
-
- * **filename** (*str*): if given, a text file detailing the solution will be 
-   written out.  Default: None.
-
-If :meth:`rerandomize_light_cone_solution` is used, the LightCone object will 
-keep a copy of the original solution that can be returned to at any time by 
-calling :meth:`restore_master_solution`:
-
-.. code-block:: python
-
-  lc.restore_master_solution()
-
-.. note:: All light cone solutions made with the above method will still use 
-   the same list of datasets.  Only the shifting and projection axis will be 
-   different.
-
 Making a Light Cone Projection
 ------------------------------
 
-With the light cone solution set, projections can be made of any available 
-field:
+With the light cone solution in place, projections with a given field of 
+view and resolution can be made of any available field:
 
 .. code-block:: python
 
   field = 'density'
-  lc.project_light_cone(field , weight_field=None, 
+  field_of_view = (600.0, "arcmin")
+  resolution = (60.0, "arcsec")
+  lc.project_light_cone(field_of_vew, resolution,
+                        field , weight_field=None, 
                         save_stack=True, 
                         save_slice_images=True)
 
+The field of view and resolution can be specified either as a tuple of 
+value and unit string or as a unitful ``YTQuantity``.  
 Additional keyword arguments:
 
- * **weight_field** (*str*): the weight field of the projection.  This has the 
-   same meaning as in standard projections.  Default: None.
+ * **weight_field** (*str*): the weight field of the projection.  This has 
+   the same meaning as in standard projections.  Default: None.
 
- * **apply_halo_mask** (*bool*): if True, a boolean mask is apply to the light 
-   cone projection.  See below for a description of halo masks.  Default: False.
-
- * **node** (*str*): a prefix to be prepended to the node name under which the 
-   projection data is serialized.  Default: None.
+ * **photon_field** (*bool*): if True, the projection data for each slice is 
+   decremented by 4 pi R :superscript:`2` , where R is the luminosity 
+   distance between the observer and the slice redshift.  Default: False.
 
  * **save_stack** (*bool*): if True, the unflatted light cone data including 
    each individual slice is written to an hdf5 file.  Default: True.
@@ -161,13 +125,7 @@
  * **save_slice_images** (*bool*): save images for each individual projection 
    slice.  Default: False.
 
- * **flatten_stack** (*bool*): if True, the light cone stack is continually 
-   flattened each time a slice is added in order to save memory.  This is 
-   generally not necessary.  Default: False.
-
- * **photon_field** (*bool*): if True, the projection data for each slice is 
-   decremented by 4 pi R :superscript:`2` , where R is the luminosity 
-   distance between the observer and the slice redshift.  Default: False.
+ * **cmap_name** (*string*): color map for images.  Default: "algae".
 
  * **njobs** (*int*): The number of parallel jobs over which the light cone 
    projection will be split.  Choose -1 for one processor per individual
@@ -177,34 +135,4 @@
  * **dynamic** (*bool*): If True, use dynamic load balancing to create the 
    projections.  Default: False.
 
-Sampling Unique Light Cone Volumes
-----------------------------------
-
-When making a large number of light cones, particularly for statistical 
-analysis, it is important to have a handle on the amount of sampled volume in 
-common from one projection to another.  Any statistics may untrustworthy if a 
-set of light cones have too much volume in common, even if they may all be 
-entirely different in appearance.  LightCone objects have the ability to 
-calculate the volume in common between two solutions with the same dataset 
-ist.  The :meth:`find_unique_solutions` and 
-:meth:`project_unique_light_cones` functions can be used to create a set of 
-light cone solutions that have some maximum volume in common and create light 
-cone projections for those solutions.  If specified, the code will attempt to 
-use recycled solutions that can use the same serialized projection objects 
-that have already been created.  This can greatly increase the speed of making 
-multiple light cone projections.  See the cookbook for an example of doing this.
-
-Making Light Cones with a Halo Mask
------------------------------------
-
-The situation may arise where it is necessary or desirable to know the 
-location of halos within the light cone volume, and specifically their 
-location in the final image.  This can be useful for developing algorithms to 
-find galaxies or clusters in image data.  The light cone generator does this 
-by running the HaloProfiler (see :ref:`halo_profiling`) on each of the 
-datasets used in the light cone and shifting them accordingly with the light 
-cone solution.  The ability also exists to create a boolean mask with the 
-dimensions of the final light cone image that can be used to mask out the 
-halos in the image.  It is left as an exercise to the reader to find a use for 
-this functionality.  This process is somewhat complicated, but not terribly.  
-See the recipe in the cookbook for an example of this functionality.
+.. note:: As of :code:`yt-3.0`, the halo mask and unique light cone functionality no longer exist.  These are still available in :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`, help is needed to port them over.  Contact the yt-users mailing list if you are interested in doing this.
\ No newline at end of file

diff -r 8733c491a0965f9d028a997537427ac2e5529ef8 -r 8b014cbf298bec2fa1b12a43b9b616674a05b42d doc/source/analyzing/analysis_modules/light_ray_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
@@ -1,20 +1,21 @@
 .. _light-ray-generator:
 
 Light Ray Generator
-====================
-.. sectionauthor:: Britton Smith <brittonsmith at gmail.com>
+===================
 
 Light rays are similar to light cones (:ref:`light-cone-generator`) in how  
 they stack multiple datasets together to span a redshift interval.  Unlike 
-light cones, which which stack randomly oriented projections from each 
+light cones, which stack randomly oriented projections from each 
 dataset to create synthetic images, light rays use thin pencil beams to 
-simulate QSO sight lines.
+simulate QSO sight lines.  A sample script can be found in the cookbook 
+under :ref:`cookbook-light_ray`.
 
 .. image:: _images/lightray.png
 
-A ray segment records the information of all grid cells intersected by the ray 
-as well as the path length, dl, of the ray through the cell.  Column densities 
-can be calculated by multiplying physical densities by the path length.
+A ray segment records the information of all grid cells intersected by the 
+ray as well as the path length, dl, of the ray through the cell.  Column 
+densities can be calculated by multiplying physical densities by the path 
+length.
 
 Configuring the Light Ray Generator
 -----------------------------------
@@ -36,22 +37,22 @@
    ray solution will contain as many entries as possible within the redshift
    interval.  Default: True.
 
- * **deltaz_min** (*float*):  Specifies the minimum Delta-z between consecutive
-   datasets in the returned list.  Default: 0.0.
+ * **deltaz_min** (*float*):  Specifies the minimum Delta-z between 
+   consecutive datasets in the returned list.  Default: 0.0.
 
- * **minimum_coherent_box_fraction** (*float*): Used with use_minimum_datasets 
-   set to False, this parameter specifies the fraction of the total box size 
-   to be traversed before rerandomizing the projection axis and center.  This
-   was invented to allow light rays with thin slices to sample coherent large 
-   scale structure, but in practice does not work so well.  Try setting this 
-   parameter to 1 and see what happens.  Default: 0.0.
+ * **minimum_coherent_box_fraction** (*float*): Used with 
+   **use_minimum_datasets** set to False, this parameter specifies the 
+   fraction of the total box size to be traversed before rerandomizing the 
+   projection axis and center.  This was invented to allow light rays with 
+   thin slices to sample coherent large scale structure, but in practice 
+   does not work so well.  Try setting this parameter to 1 and see what 
+   happens.  Default: 0.0.
 
- * **time_data** (*bool*): Whether or not to include time outputs when gathering
-   datasets for time series.  Default: True.
-
- * **redshift_data** (*bool*): Whether or not to include redshift outputs when 
+ * **time_data** (*bool*): Whether or not to include time outputs when 
    gathering datasets for time series.  Default: True.
 
+ * **redshift_data** (*bool*): Whether or not to include redshift outputs 
+   when gathering datasets for time series.  Default: True.
 
 Making Light Ray Data
 ---------------------
@@ -74,7 +75,21 @@
 
  * **seed** (*int*): Seed for the random number generator.  Default: None.
 
- * **fields** (*list*): A list of fields for which to get data.  Default: None.
+ * **start_position** (*list* of floats): Used only if creating a light ray 
+   from a single dataset.  The coordinates of the starting position of the 
+   ray.  Default: None.
+
+ * **end_position** (*list* of floats): Used only if creating a light ray 
+   from a single dataset.  The coordinates of the ending position of the ray.
+   Default: None.
+
+ * **trajectory** (*list* of floats): Used only if creating a light ray 
+   from a single dataset.  The (r, theta, phi) direction of the light ray.  
+   Use either **end_position** or **trajectory**, not both.  
+   Default: None.
+
+ * **fields** (*list*): A list of fields for which to get data.  
+   Default: None.
 
  * **solution_filename** (*string*): Path to a text file where the 
    trajectories of each subray is written out.  Default: None.
@@ -83,51 +98,17 @@
    Default: None.
 
  * **get_los_velocity** (*bool*): If True, the line of sight velocity is 
-   calculated for each point in the ray.  Default: False.
+   calculated for each point in the ray.  Default: True.
 
- * **get_nearest_halo** (*bool*): If True, the HaloProfiler will be used to 
-   calculate the distance and mass of the nearest halo for each point in the
-   ray.  This option requires additional information to be included.  See 
-   the cookbook for an example.  Default: False.
-
- * **nearest_halo_fields** (*list*): A list of fields to be calculated for the 
-   halos nearest to every pixel in the ray.  Default: None.
-
- * **halo_list_file** (*str*): Filename containing a list of halo properties to be used 
-   for getting the nearest halos to absorbers.  Default: None.
-
- * **halo_profiler_parameters** (*dict*): A dictionary of parameters to be 
-   passed to the HaloProfiler to create the appropriate data used to get 
-   properties for the nearest halos.  Default: None.
-
- * **njobs** (*int*): The number of parallel jobs over which the slices for the
-   halo mask will be split.  Choose -1 for one processor per individual slice 
-   and 1 to have all processors work together on each projection.  Default: 1
+ * **njobs** (*int*): The number of parallel jobs over which the slices for 
+   the halo mask will be split.  Choose -1 for one processor per individual 
+   slice and 1 to have all processors work together on each projection.  
+   Default: 1
 
  * **dynamic** (*bool*): If True, use dynamic load balancing to create the 
    projections.  Default: False.
 
-Getting The Nearest Galaxies
-----------------------------
-
-The light ray tool will use the HaloProfiler to calculate the distance and 
-mass of the nearest halo to that pixel.  In order to do this, a dictionary 
-called halo_profiler_parameters is used to pass instructions to the 
-HaloProfiler.  This dictionary has three additional keywords:
-
- * **halo_profiler_kwargs** (*dict*): A dictionary of standard HaloProfiler 
-   keyword arguments and values to be given to the HaloProfiler.
-
- * **halo_profiler_actions** (*list*): A list of actions to be performed by 
-   the HaloProfiler.  Each item in the list should be a dictionary with the 
-   following entries: "function", "args", and "kwargs", for the function to 
-   be performed, the arguments supplied to that function, and the keyword 
-   arguments.
-
- * **halo_list** (*string*): 'all' to use the full halo list, or 'filtered' 
-   to use the filtered halo list created after calling make_profiles.
-
-See the recipe in the cookbook for am example.
+.. note:: As of :code:`yt-3.0`, the functionality for recording properties of the nearest halo to each element of the ray no longer exists.  This is still available in :code:`yt-2.x`.  If you would like to use this feature in :code:`yt-3.x`, help is needed to port it over.  Contact the yt-users mailing list if you are interested in doing this.
 
 What Can I do with this?
 ------------------------

diff -r 8733c491a0965f9d028a997537427ac2e5529ef8 -r 8b014cbf298bec2fa1b12a43b9b616674a05b42d doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -29,6 +29,8 @@
 
 .. yt_cookbook:: halo_merger_tree.py
 
+.. _cookbook-light_cone:
+
 Light Cone Projection
 ~~~~~~~~~~~~~~~~~~~~~
 This script creates a light cone projection, a synthetic observation 
@@ -37,27 +39,15 @@
 
 .. yt_cookbook:: light_cone_projection.py
 
-Light Cone with Halo Mask
-~~~~~~~~~~~~~~~~~~~~~~~~~
-This script combines the light cone generator with the halo profiler to 
-make a light cone projection with all of the halos cut out of the image.
+.. _cookbook-light_ray:
 
-.. yt_cookbook:: light_cone_with_halo_mask.py 
+Light Ray
+~~~~~~~~~
+This script demonstrates how to make a synthetic quasar sight line that 
+extends over multiple datasets and can be used to generate a synthetic 
+absorption spectrum.
 
-Making Unique Light Cones
-~~~~~~~~~~~~~~~~~~~~~~~~~
-This script demonstrates how to make a series of light cone projections
-that only have a maximum amount of volume in common.
-
-.. yt_cookbook:: unique_light_cone_projections.py 
-
-Making Light Rays
-~~~~~~~~~~~~~~~~~
-This script demonstrates how to make a synthetic quasar sight line and 
-uses the halo profiler to record information about halos close to the 
-line of sight.
-
-.. yt_cookbook:: make_light_ray.py 
+.. yt_cookbook:: light_ray.py 
 
 Creating and Fitting Absorption Spectra
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

diff -r 8733c491a0965f9d028a997537427ac2e5529ef8 -r 8b014cbf298bec2fa1b12a43b9b616674a05b42d doc/source/cookbook/light_cone_projection.py
--- a/doc/source/cookbook/light_cone_projection.py
+++ b/doc/source/cookbook/light_cone_projection.py
@@ -1,12 +1,8 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
+import yt
+from yt.analysis_modules.cosmological_observation.api import \
+     LightCone
 
-import yt
-from yt.analysis_modules.cosmological_observation.light_cone.light_cone import LightCone
-
-# Create a LightCone object extending from z = 0 to z = 0.1
-# with a 600 arcminute field of view and a resolution of
-# 60 arcseconds.
+# Create a LightCone object extending from z = 0 to z = 0.1.
 
 # We have already set up the redshift dumps to be
 # used for this, so we will not use any of the time
@@ -14,20 +10,21 @@
 lc = LightCone('enzo_tiny_cosmology/32Mpc_32.enzo',
                'Enzo', 0., 0.1,
                observer_redshift=0.0,
-               field_of_view_in_arcminutes=600.0,
-               image_resolution_in_arcseconds=60.0,
                time_data=False)
 
 # Calculate a randomization of the solution.
-lc.calculate_light_cone_solution(seed=123456789)
+lc.calculate_light_cone_solution(seed=123456789, filename="LC/solution.txt")
 
 # Choose the field to be projected.
-field = 'SZY'
+field = 'szy'
 
+# Use the LightCone object to make a projection with a 600 arcminute 
+# field of view and a resolution of 60 arcseconds.
 # Set njobs to -1 to have one core work on each projection
-# in parallel.  Set save_slice_images to True to see an
-# image for each individual slice.
-lc.project_light_cone(field, save_stack=False,
+# in parallel.
+lc.project_light_cone((600.0, "arcmin"), (60.0, "arcsec"), field,
+                      weight_field=None,
+                      save_stack=True,
                       save_final_image=True,
-                      save_slice_images=False,
+                      save_slice_images=True,
                       njobs=-1)

diff -r 8733c491a0965f9d028a997537427ac2e5529ef8 -r 8b014cbf298bec2fa1b12a43b9b616674a05b42d doc/source/cookbook/light_cone_with_halo_mask.py
--- a/doc/source/cookbook/light_cone_with_halo_mask.py
+++ /dev/null
@@ -1,78 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import yt
-
-from yt.analysis_modules.cosmological_observation.light_cone.light_cone import LightCone
-from yt.analysis_modules.halo_profiler.api import HaloProfiler
-
-# Instantiate a light cone object as usual.
-lc = LightCone('enzo_tiny_cosmology/32Mpc_32.enzo',
-               'Enzo', 0, 0.1,
-               observer_redshift=0.0,
-               field_of_view_in_arcminutes=600.0,
-               image_resolution_in_arcseconds=60.0,
-               time_data=False,
-               output_dir='LC_HM', output_prefix='LightCone')
-
-# Calculate the light cone solution.
-lc.calculate_light_cone_solution(seed=123456789,
-                                 filename='LC_HM/lightcone.dat')
-
-
-# Configure the HaloProfiler.
-# These are keyword arguments given when creating a
-# HaloProfiler object.
-halo_profiler_kwargs = {'halo_list_file': 'HopAnalysis.out',
-                        'output_dir': 'halo_analysis'}
-
-# Create a list of actions for the HaloProfiler to take.
-halo_profiler_actions = []
-
-# Each item in the list is a dictionary containing three things:
-# 1. 'function' - the function to be called.
-# 2. 'args' - a list of arguments given with the function.
-# 3. 'kwargs' - a dictionary of keyword arguments.
-
-# Add a virial filter.
-halo_profiler_actions.append({'function': HaloProfiler.add_halo_filter,
-                              'args': [VirialFilter],
-                              'kwargs': {'must_be_virialized':False,
-                                         'overdensity_field':'ActualOverdensity',
-                                         'virial_overdensity':100,
-                                         'virial_filters':[['TotalMassMsun','>','1e5']],
-                                         'virial_quantities':['TotalMassMsun','RadiusMpc']}})
-
-# Add a call to make the profiles.
-halo_profiler_actions.append({'function': HaloProfiler.make_profiles,
-                              'kwargs': {'filename': "VirializedHalos.out"}})
-
-# Specify the desired halo list is the filtered list.
-# If 'all' is given instead, the full list will be used.
-halo_list = 'filtered'
-
-# Put them all into one dictionary.
-halo_profiler_parameters=dict(halo_profiler_kwargs=halo_profiler_kwargs,
-                              halo_profiler_actions=halo_profiler_actions,
-                              halo_list=halo_list)
-
-# Get the halo list for the active solution of this light cone using
-# the HaloProfiler settings set up above.
-# Write the boolean map to an hdf5 file called 'halo_mask.h5'.
-# Write a text file detailing the location, redshift, radius, and mass
-# of each halo in light cone projection.
-lc.get_halo_mask(mask_file='LC_HM/halo_mask.h5',
-                 map_file='LC_HM/halo_map.out',
-                 cube_file='LC_HM/halo_cube.h5',
-                 virial_overdensity=100,
-                 halo_profiler_parameters=halo_profiler_parameters,
-                 njobs=1, dynamic=False)
-
-# Choose the field to be projected.
-field = 'SZY'
-
-# Make the light cone projection and apply the halo mask.
-pc = lc.project_light_cone(field, save_stack=False,
-                           save_final_image=True,
-                           save_slice_images=False,
-                           apply_halo_mask=True)

diff -r 8733c491a0965f9d028a997537427ac2e5529ef8 -r 8b014cbf298bec2fa1b12a43b9b616674a05b42d doc/source/cookbook/light_ray.py
--- /dev/null
+++ b/doc/source/cookbook/light_ray.py
@@ -0,0 +1,25 @@
+import os
+import sys
+import yt
+from yt.analysis_modules.cosmological_observation.api import \
+     LightRay
+
+# Create a directory for the light rays
+if not os.path.isdir("LR"): 
+    os.mkdir('LR')
+     
+# Create a LightRay object extending from z = 0 to z = 0.1
+# and use only the redshift dumps.
+lr = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo",
+              'Enzo', 0.0, 0.1,
+              use_minimum_datasets=True,
+              time_data=False)
+
+# Make a light ray, and set njobs to -1 to use one core
+# per dataset.
+lr.make_light_ray(seed=123456789,
+                  solution_filename='LR/lightraysolution.txt',
+                  data_filename='LR/lightray.h5',
+                  fields=['temperature', 'density'],
+                  get_los_velocity=True,
+                  njobs=-1)

diff -r 8733c491a0965f9d028a997537427ac2e5529ef8 -r 8b014cbf298bec2fa1b12a43b9b616674a05b42d doc/source/cookbook/make_light_ray.py
--- a/doc/source/cookbook/make_light_ray.py
+++ /dev/null
@@ -1,69 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import os
-import sys
-import yt
-from yt.analysis_modules.halo_profiler.api import HaloProfiler
-from yt.analysis_modules.cosmological_observation.light_ray.light_ray import \
-     LightRay
-
-# Create a directory for the light rays
-if not os.path.isdir("LR"): 
-    os.mkdir('LR')
-     
-# Create a LightRay object extending from z = 0 to z = 0.1
-# and use only the redshift dumps.
-lr = LightRay("enzo_tiny_cosmology/32Mpc_32.enzo",
-              'Enzo', 0.0, 0.1,
-              use_minimum_datasets=True,
-              time_data=False)
-
-# Configure the HaloProfiler.
-# These are keyword arguments given when creating a
-# HaloProfiler object.
-halo_profiler_kwargs = {'halo_list_file': 'HopAnalysis.out',
-                        'output_dir' : 'halo_analysis'}
-
-# Create a list of actions for the HaloProfiler to take.
-halo_profiler_actions = []
-
-# Each item in the list is a dictionary containing three things:
-# 1. 'function' - the function to be called.
-# 2. 'args' - a list of arguments given with the function.
-# 3. 'kwargs' - a dictionary of keyword arguments.
-
-# Add a virial filter.
-halo_profiler_actions.append({'function': HaloProfiler.add_halo_filter,
-                              'args': [VirialFilter],
-                              'kwargs': {'must_be_virialized':False,
-                                         'overdensity_field':'ActualOverdensity',
-                                         'virial_overdensity':100,
-                                         'virial_filters':[['TotalMassMsun','>','1e5']],
-                                         'virial_quantities':['TotalMassMsun','RadiusMpc']}})
-
-# Add a call to make the profiles.
-halo_profiler_actions.append({'function': HaloProfiler.make_profiles,
-                              'kwargs': {'filename': "VirializedHalos.out"}})
-
-# Specify the desired halo list is the filtered list.
-# If 'all' is given instead, the full list will be used.
-halo_list = 'filtered'
-
-# Put them all into one dictionary.
-halo_profiler_parameters=dict(halo_profiler_kwargs=halo_profiler_kwargs,
-                              halo_profiler_actions=halo_profiler_actions,
-                              halo_list=halo_list)
-
-# Make a light ray, and set njobs to -1 to use one core
-# per dataset.
-lr.make_light_ray(seed=123456789,
-                  solution_filename='LR/lightraysolution.txt',
-                  data_filename='LR/lightray.h5',
-                  fields=['temperature', 'density'],
-                  get_nearest_halo=True,
-                  nearest_halo_fields=['TotalMassMsun_100',
-                                       'RadiusMpc_100'],
-                  halo_profiler_parameters=halo_profiler_parameters,
-                  get_los_velocity=True,
-                  njobs=-1)

diff -r 8733c491a0965f9d028a997537427ac2e5529ef8 -r 8b014cbf298bec2fa1b12a43b9b616674a05b42d doc/source/cookbook/unique_light_cone_projections.py
--- a/doc/source/cookbook/unique_light_cone_projections.py
+++ /dev/null
@@ -1,34 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import yt
-from yt.analysis_modules.cosmological_observation.light_cone.light_cone import LightCone
-
-# Instantiate a light cone.
-lc = LightCone("enzo_tiny_cosmology/32Mpc_32.enzo", 'Enzo', 0, 0.1,
-               observer_redshift=0.0,
-               field_of_view_in_arcminutes=120.0,
-               image_resolution_in_arcseconds=60.0,
-               use_minimum_datasets=True,
-               time_data=False,
-               output_dir='LC_U', output_prefix='LightCone')
-
-# Try to find 10 solutions that have at most 10% volume in
-# common and give up after 50 consecutive failed attempts.
-# The recycle=True setting tells the code to first attempt
-# to use solutions with the same projection axes as other
-# solutions.  This will save time when making the projection.
-yt.find_unique_solutions(lc, max_overlap=0.10, failures=50,
-                         seed=123456789, recycle=True,
-                         solutions=10, filename='LC_U/unique.dat')
-
-# Choose the field to be projected.
-field = 'SZY'
-
-# Make light cone projections with each of the random seeds
-# found above.  All output files will be written with unique
-# names based on the random seed numbers.
-yt.project_unique_light_cones(lc, 'LC_U/unique.dat', field,
-                              save_stack=False,
-                              save_final_image=True,
-                              save_slice_images=False)

diff -r 8733c491a0965f9d028a997537427ac2e5529ef8 -r 8b014cbf298bec2fa1b12a43b9b616674a05b42d doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -224,6 +224,8 @@
 :class:`~yt.visualization.plot_window.ProjectionPlot` for the full
 class description.
 
+.. _off-axis-projections:
+
 Off Axis Projection Plots
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 
@@ -772,8 +774,8 @@
    ds = yt.load("sizmbhloz-clref04SNth-rs9_a0.9011/sizmbhloz-clref04SNth-rs9_a0.9011.art")
    center = ds.arr([64.0, 64.0, 64.0], 'code_length')
    rvir = ds.quan(1e-1, "Mpccm/h")
+   sph = ds.sphere(center, rvir)
 
-   sph = ds.sphere(center, rvir)
    plot = yt.PhasePlot(sph, "density", "temperature", "cell_mass",
                        weight_field=None)
    plot.set_unit('density', 'Msun/pc**3')
@@ -782,6 +784,29 @@
    plot.set_ylim(1,1e7)
    plot.save()
 
+It is also possible to construct a custom 2D profile object and then use the
+``from_profile`` method to create a ``PhasePlot`` using the profile object.
+This will sometimes be faster, especially if you need custom x and y axes
+limits.  The following example illustrates this workflow:
+
+.. python-script::
+
+   import yt
+   ds = yt.load("sizmbhloz-clref04SNth-rs9_a0.9011/sizmbhloz-clref04SNth-rs9_a0.9011.art")
+   center = ds.arr([64.0, 64.0, 64.0], 'code_length')
+   rvir = ds.quan(1e-1, "Mpccm/h")
+   sph = ds.sphere(center, rvir)
+   units = dict(density='Msun/pc**3', cell_mass='Msun')
+   extrema = dict(density=(1e-5, 1e1), temperature=(1, 1e7))
+
+   profile = yt.create_profile(sph, ['density', 'temperature'],
+                               n_bins=[128, 128], fields=['cell_mass'],
+                               weight_field=None, units=units, extrema=extrema)
+
+   plot = yt.PhasePlot.from_profile(profile)
+
+   plot.save()
+
 Probability Distribution Functions and Accumulation
 ---------------------------------------------------
 

diff -r 8733c491a0965f9d028a997537427ac2e5529ef8 -r 8b014cbf298bec2fa1b12a43b9b616674a05b42d setup.py
--- a/setup.py
+++ b/setup.py
@@ -224,7 +224,6 @@
         zip_safe=False,
         data_files=REASON_FILES,
         cmdclass={'build_py': my_build_py, 'build_src': my_build_src},
-        install_requires=["thingking"],
     )
     return
 

diff -r 8733c491a0965f9d028a997537427ac2e5529ef8 -r 8b014cbf298bec2fa1b12a43b9b616674a05b42d yt/analysis_modules/cosmological_observation/api.py
--- a/yt/analysis_modules/cosmological_observation/api.py
+++ b/yt/analysis_modules/cosmological_observation/api.py
@@ -17,9 +17,7 @@
     CosmologySplice
 
 from .light_cone.api import \
-    LightCone, \
-    find_unique_solutions, \
-    project_unique_light_cones
+    LightCone
 
 from .light_ray.api import \
     LightRay

diff -r 8733c491a0965f9d028a997537427ac2e5529ef8 -r 8b014cbf298bec2fa1b12a43b9b616674a05b42d yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -78,8 +78,9 @@
 
         Examples
         --------
-        >>> cosmo = es.create_cosmology_splice(1.0, 0.0, minimal=True,
-                                               deltaz_min=0.0)
+
+        >>> co = CosmologySplice("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo")
+        >>> cosmo = co.create_cosmology_splice(1.0, 0.0)
 
         """
 
@@ -133,12 +134,12 @@
 
             # fill redshift space with datasets
             while ((z > near_redshift) and
-                   (np.fabs(z - near_redshift) > z_Tolerance)):
+                   (np.abs(z - near_redshift) > z_Tolerance)):
 
                 # For first data dump, choose closest to desired redshift.
                 if (len(cosmology_splice) == 0):
                     # Sort data outputs by proximity to current redsfhit.
-                    self.splice_outputs.sort(key=lambda obj:np.fabs(z - \
+                    self.splice_outputs.sort(key=lambda obj:np.abs(z - \
                         obj['redshift']))
                     cosmology_splice.append(self.splice_outputs[0])
 
@@ -153,20 +154,20 @@
 
                     if current_slice is cosmology_splice[-1]:
                         near_redshift = cosmology_splice[-1]['redshift'] - \
-                          cosmology_splice[-1]['deltazMax']
+                          cosmology_splice[-1]['dz_max']
                         mylog.error("Cosmology splice incomplete due to insufficient data outputs.")
                         break
                     else:
                         cosmology_splice.append(current_slice)
 
                 z = cosmology_splice[-1]['redshift'] - \
-                  cosmology_splice[-1]['deltazMax']
+                  cosmology_splice[-1]['dz_max']
 
         # Make light ray using maximum number of datasets (minimum spacing).
         else:
             # Sort data outputs by proximity to current redsfhit.
-            self.splice_outputs.sort(key=lambda obj:np.fabs(far_redshift -
-                                                                    obj['redshift']))
+            self.splice_outputs.sort(key=lambda obj:np.abs(far_redshift -
+                                                           obj['redshift']))
             # For first data dump, choose closest to desired redshift.
             cosmology_splice.append(self.splice_outputs[0])
 
@@ -175,14 +176,14 @@
                 if (nextOutput['redshift'] <= near_redshift):
                     break
                 if ((cosmology_splice[-1]['redshift'] - nextOutput['redshift']) >
-                    cosmology_splice[-1]['deltazMin']):
+                    cosmology_splice[-1]['dz_min']):
                     cosmology_splice.append(nextOutput)
                 nextOutput = nextOutput['next']
             if (cosmology_splice[-1]['redshift'] -
-                cosmology_splice[-1]['deltazMax']) > near_redshift:
+                cosmology_splice[-1]['dz_max']) > near_redshift:
                 mylog.error("Cosmology splice incomplete due to insufficient data outputs.")
                 near_redshift = cosmology_splice[-1]['redshift'] - \
-                  cosmology_splice[-1]['deltazMax']
+                  cosmology_splice[-1]['dz_max']
 
         mylog.info("create_cosmology_splice: Used %d data dumps to get from z = %f to %f." %
                    (len(cosmology_splice), far_redshift, near_redshift))
@@ -253,7 +254,7 @@
             z = rounded
 
             deltaz_max = self._deltaz_forward(z, self.simulation.box_size)
-            outputs.append({'redshift': z, 'deltazMax': deltaz_max})
+            outputs.append({'redshift': z, 'dz_max': deltaz_max})
             z -= deltaz_max
 
         mylog.info("%d data dumps will be needed to get from z = %f to %f." %
@@ -282,28 +283,24 @@
             # at a given redshift using Newton's method.
             z1 = z
             z2 = z1 - 0.1 # just an initial guess
-            distance1 = 0.0
+            distance1 = self.simulation.quan(0.0, "Mpccm / h")
+            distance2 = self.cosmology.comoving_radial_distance(z2, z)
             iteration = 1
 
-            # Convert comoving radial distance into Mpc / h,
-            # since that's how box size is stored.
-            distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-              self.simulation.hubble_constant
-
-            while ((np.fabs(distance2-target_distance)/distance2) > d_Tolerance):
+            while ((np.abs(distance2-target_distance)/distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
-                z2 = ((target_distance - distance2) / m) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-                  self.simulation.hubble_constant
+                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
+                distance2 = self.cosmology.comoving_radial_distance(z2, z)
                 iteration += 1
                 if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, np.fabs(z2 - z)))
+                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
+                                "exceeded for z = %f (delta z = %f)." %
+                                (z, np.abs(z2 - z)))
                     break
-            output['deltazMax'] = np.fabs(z2 - z)
-
+            output['dz_max'] = np.abs(z2 - z)
+            
     def _calculate_deltaz_min(self, deltaz_min=0.0):
         r"""Calculate delta z that corresponds to a single top grid pixel
         going from z to (z - delta z).
@@ -322,28 +319,24 @@
             # top grid pixel at a given redshift using Newton's method.
             z1 = z
             z2 = z1 - 0.01 # just an initial guess
-            distance1 = 0.0
+            distance1 = self.simulation.quan(0.0, "Mpccm / h")
+            distance2 = self.cosmology.comoving_radial_distance(z2, z)
             iteration = 1
 
-            # Convert comoving radial distance into Mpc / h,
-            # since that's how box size is stored.
-            distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-              self.simulation.hubble_constant
-
-            while ((np.fabs(distance2 - target_distance) / distance2) > d_Tolerance):
+            while ((np.abs(distance2 - target_distance) / distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
-                z2 = ((target_distance - distance2) / m) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-                  self.simulation.hubble_constant
+                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
+                distance2 = self.cosmology.comoving_radial_distance(z2, z)
                 iteration += 1
                 if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, np.fabs(z2 - z)))
+                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
+                                "exceeded for z = %f (delta z = %f)." %
+                                (z, np.abs(z2 - z)))
                     break
             # Use this calculation or the absolute minimum specified by the user.
-            output['deltazMin'] = max(np.fabs(z2 - z), deltaz_min)
+            output['dz_min'] = max(np.abs(z2 - z), deltaz_min)
 
     def _deltaz_forward(self, z, target_distance):
         r"""Calculate deltaz corresponding to moving a comoving distance
@@ -357,24 +350,20 @@
         # box at a given redshift.
         z1 = z
         z2 = z1 - 0.1 # just an initial guess
-        distance1 = 0.0
+        distance1 = self.simulation.quan(0.0, "Mpccm / h")
+        distance2 = self.cosmology.comoving_radial_distance(z2, z)
         iteration = 1
 
-        # Convert comoving radial distance into Mpc / h,
-        # since that's how box size is stored.
-        distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-          self.cosmology.hubble_constant
-
-        while ((np.fabs(distance2 - target_distance)/distance2) > d_Tolerance):
+        while ((np.abs(distance2 - target_distance)/distance2) > d_Tolerance):
             m = (distance2 - distance1) / (z2 - z1)
             z1 = z2
             distance1 = distance2
-            z2 = ((target_distance - distance2) / m) + z2
-            distance2 = self.cosmology.comoving_radial_distance(z2, z) * \
-              self.cosmology.hubble_constant
+            z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
+            distance2 = self.cosmology.comoving_radial_distance(z2, z)
             iteration += 1
             if (iteration > max_Iterations):
-                mylog.error("deltaz_forward: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                            (z, np.fabs(z2 - z)))
+                mylog.error("deltaz_forward: Warning - max iterations " +
+                            "exceeded for z = %f (delta z = %f)." %
+                            (z, np.abs(z2 - z)))
                 break
-        return np.fabs(z2 - z)
+        return np.abs(z2 - z)

diff -r 8733c491a0965f9d028a997537427ac2e5529ef8 -r 8b014cbf298bec2fa1b12a43b9b616674a05b42d yt/analysis_modules/cosmological_observation/light_cone/api.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/api.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/api.py
@@ -1,5 +1,5 @@
 """
-API for lightcone
+API for light_cone
 
 
 
@@ -15,7 +15,3 @@
 
 from .light_cone import \
     LightCone
-
-from .unique_solution import \
-    project_unique_light_cones, \
-    find_unique_solutions

diff -r 8733c491a0965f9d028a997537427ac2e5529ef8 -r 8b014cbf298bec2fa1b12a43b9b616674a05b42d yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
+++ /dev/null
@@ -1,118 +0,0 @@
-"""
-Function to calculate volume in common between two n-cubes, with optional
-periodic boundary conditions.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-def common_volume(n_cube_1, n_cube_2, periodic=None):
-    "Return the n-volume in common between the two n-cubes."
-
-    # Check for proper args.
-    if ((len(np.shape(n_cube_1)) != 2) or
-        (np.shape(n_cube_1)[1] != 2) or
-        (np.shape(n_cube_1) != np.shape(n_cube_2))):
-        print "Arguments must be 2 (n, 2) numpy array."
-        return 0
-
-    if ((periodic is not None) and
-        (np.shape(n_cube_1) != np.shape(periodic))):
-        print "periodic argument must be (n, 2) numpy array."
-        return 0
-
-    nCommon = 1.0
-    for q in range(np.shape(n_cube_1)[0]):
-        if (periodic is None):
-            nCommon *= common_segment(n_cube_1[q], n_cube_2[q])
-        else:
-            nCommon *= common_segment(n_cube_1[q], n_cube_2[q],
-                                      periodic=periodic[q])
-
-    return nCommon
-
-def common_segment(seg1, seg2, periodic=None):
-    "Return the length of the common segment."
-
-    # Check for proper args.
-    if ((len(seg1) != 2) or (len(seg2) != 2)):
-        print "Arguments must be arrays of size 2."
-        return 0
-
-    # If not periodic, then this is very easy.
-    if periodic is None:
-        seg1.sort()
-        len1 = seg1[1] - seg1[0]
-        seg2.sort()
-        len2 = seg2[1] - seg2[0]
-
-        common = 0.0
-
-        add = seg1[1] - seg2[0]
-        if ((add > 0) and (add <= max(len1, len2))):
-            common += add
-        add = seg2[1] - seg1[0]
-        if ((add > 0) and (add <= max(len1, len2))):
-            common += add
-        common = min(common, len1, len2)
-        return common
-
-    # If periodic, it's a little more complicated.
-    else:
-        if len(periodic) != 2:
-            print "periodic array must be of size 2."
-            return 0
-
-        seg1.sort()
-        flen1 = seg1[1] - seg1[0]
-        len1 = flen1 - int(flen1)
-        seg2.sort()
-        flen2 = seg2[1] - seg2[0]
-        len2 = flen2 - int(flen2)
-
-        periodic.sort()
-        scale = periodic[1] - periodic[0]
-
-        if (abs(int(flen1)-int(flen2)) >= scale):
-            return min(flen1, flen2)
-
-        # Adjust for periodicity
-        seg1[0] = np.mod(seg1[0], scale) + periodic[0]
-        seg1[1] = seg1[0] + len1
-        if (seg1[1] > periodic[1]): seg1[1] -= scale
-        seg2[0] = np.mod(seg2[0], scale) + periodic[0]
-        seg2[1] = seg2[0] + len2
-        if (seg2[1] > periodic[1]): seg2[1] -= scale
-
-        # create list of non-periodic segments
-        pseg1 = []
-        if (seg1[0] >= seg1[1]):
-            pseg1.append([seg1[0], periodic[1]])
-            pseg1.append([periodic[0], seg1[1]])
-        else:
-            pseg1.append(seg1)
-        pseg2 = []
-        if (seg2[0] >= seg2[1]):
-            pseg2.append([seg2[0], periodic[1]])
-            pseg2.append([periodic[0], seg2[1]])
-        else:
-            pseg2.append(seg2)
-
-        # Add up common segments.
-        common = min(int(flen1), int(flen2))
-
-        for subseg1 in pseg1:
-            for subseg2 in pseg2:
-                common += common_segment(subseg1, subseg2)
-
-        return common

diff -r 8733c491a0965f9d028a997537427ac2e5529ef8 -r 8b014cbf298bec2fa1b12a43b9b616674a05b42d yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
+++ /dev/null
@@ -1,383 +0,0 @@
-"""
-Light cone halo mask functions.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import copy
-import h5py
-import numpy as np
-
-from yt.funcs import *
-from yt.analysis_modules.halo_profiler.api import \
-     HaloProfiler
-from yt.convenience import load
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     parallel_objects, \
-     parallel_root_only
-
-def _light_cone_halo_mask(lightCone, cube_file=None,
-                          mask_file=None, map_file=None,
-                          halo_profiler_parameters=None,
-                          virial_overdensity=200,
-                          njobs=1, dynamic=False):
-    "Make a boolean mask to cut clusters out of light cone projections."
-
-    if halo_profiler_parameters is None:
-        halo_profiler_parameters = {}
-
-    pixels = int(lightCone.field_of_view_in_arcminutes * 60.0 /
-                 lightCone.image_resolution_in_arcseconds)
-
-    # Loop through files in light cone solution and get virial quantities.
-    halo_map_storage = {}
-    for my_storage, my_slice in \
-      parallel_objects(lightCone.light_cone_solution,
-                       njobs=njobs, dynamic=dynamic,
-                       storage=halo_map_storage):
-        halo_list = _get_halo_list(my_slice['filename'],
-                                   **halo_profiler_parameters)
-        my_storage.result = \
-          {'mask': _make_slice_mask(my_slice, halo_list, pixels,
-                                    virial_overdensity)}
-        if map_file is not None:
-            my_storage.result['map'] = \
-              _make_slice_halo_map(my_slice, halo_list,
-                                   virial_overdensity)
-
-    # Reassemble halo mask and map lists.
-    light_cone_mask = []
-    halo_map = []
-    all_slices = halo_map_storage.keys()
-    all_slices.sort()
-    for i in all_slices:
-        light_cone_mask.append(halo_map_storage[i]['mask'])
-        if map_file is not None:
-            halo_map.extend(halo_map_storage[i]['map'])
-    del halo_map_storage
-
-    # Write out cube of masks from each slice.
-    if cube_file is not None:
-        _write_halo_mask(cube_file, np.array(light_cone_mask))
-
-    # Write out a text list of all halos in the image.
-    if map_file is not None:
-        _write_halo_map(map_file, halo_map)
-
-    # Write out final mask.
-    if mask_file is not None:
-        # Final mask is simply the product of the mask from each slice.
-        final_mask = np.ones(shape=(pixels, pixels))
-        for mask in light_cone_mask:
-            final_mask *= mask
-        _write_halo_mask(mask_file, final_mask)
-
-    return light_cone_mask
-
- at parallel_root_only
-def _write_halo_mask(filename, halo_mask):
-    r"""Write out an hdf5 file with the halo mask that
-    can be applied to an image.
-    """
-
-    mylog.info("Saving halo mask to %s." % filename)
-    output = h5py.File(filename, 'a')
-    if 'HaloMask' in output.keys():
-        del output['HaloMask']
-    output.create_dataset('HaloMask', data=np.array(halo_mask))
-    output.close()
-
- at parallel_root_only
-def _write_halo_map(filename, halo_map):
-    "Write a text list of halos in a light cone image."
-
-    mylog.info("Saving halo map to %s." % filename)
-    f = open(filename, 'w')
-    f.write("#z       x         y        r_image   r_mpc     m_Msun\n")
-    for halo in halo_map:
-        f.write("%7.4f %9.6f %9.6f %9.3e %9.3e %9.3e\n" % \
-                    (halo['redshift'], halo['x'], halo['y'],
-                     halo['radius_image'], halo['radius_mpc'],
-                     halo['mass']))
-    f.close()
-
-def _get_halo_list(dataset, halo_profiler_kwargs=None,
-                   halo_profiler_actions=None, halo_list='all'):
-    "Load a list of halos for the dataset."
-
-    if halo_profiler_kwargs is None: halo_profiler_kwargs = {}
-    if halo_profiler_actions is None: halo_profiler_actions = []
-
-    hp = HaloProfiler(dataset, **halo_profiler_kwargs)
-    for action in halo_profiler_actions:
-        if not action.has_key('args'): action['args'] = ()
-        if not action.has_key('kwargs'): action['kwargs'] = {}
-        action['function'](hp, *action['args'], **action['kwargs'])
-
-    if halo_list == 'all':
-        return_list = copy.deepcopy(hp.all_halos)
-    elif halo_list == 'filtered':
-        return_list = copy.deepcopy(hp.filtered_halos)
-    else:
-        mylog.error("Keyword, halo_list, must be either 'all' or 'filtered'.")
-        return_list = None
-
-    del hp
-    return return_list
-
-def _make_slice_mask(slice, halo_list, pixels, virial_overdensity):
-    "Make halo mask for one slice in light cone solution."
-
-    # Get shifted, tiled halo list.
-    all_halo_x, all_halo_y, \
-      all_halo_radius, all_halo_mass = \
-      _make_slice_halo_list(slice, halo_list, virial_overdensity)
-
-    # Make boolean mask and cut out halos.
-    dx = slice['box_width_fraction'] / pixels
-    x = [(q + 0.5) * dx for q in range(pixels)]
-    haloMask = np.ones(shape=(pixels, pixels), dtype=bool)
-
-    # Cut out any pixel that has any part at all in the circle.
-    for q in range(len(all_halo_radius)):
-        dif_xIndex = np.array(int(all_halo_x[q]/dx) -
-                              np.array(range(pixels))) != 0
-        dif_yIndex = np.array(int(all_halo_y[q]/dx) -
-                              np.array(range(pixels))) != 0
-
-        xDistance = (np.abs(x - all_halo_x[q]) -
-                     (0.5 * dx)) * dif_xIndex
-        yDistance = (np.abs(x - all_halo_y[q]) -
-                     (0.5 * dx)) * dif_yIndex
-
-        distance = np.array([np.sqrt(w**2 + xDistance**2)
-                             for w in yDistance])
-        haloMask *= (distance >= all_halo_radius[q])
-
-    return haloMask
-
-def _make_slice_halo_map(slice, halo_list, virial_overdensity):
-    "Make list of halos for one slice in light cone solution."
-
-    # Get units to convert virial radii back to physical units.
-    dataset_object = load(slice['filename'])
-    Mpc_units = dataset_object.units['mpc']
-    del dataset_object
-
-    # Get shifted, tiled halo list.
-    all_halo_x, all_halo_y, \
-      all_halo_radius, all_halo_mass = \
-      _make_slice_halo_list(slice, halo_list, virial_overdensity)
-
-    # Construct list of halos
-    halo_map = []
-
-    for q in range(len(all_halo_x)):
-        # Give radius in both physics units and
-        # units of the image (0 to 1).
-        radius_mpc = all_halo_radius[q] * Mpc_units
-        radius_image = all_halo_radius[q] / slice['box_width_fraction']
-
-        halo_map.append({'x': all_halo_x[q] / slice['box_width_fraction'],
-                         'y': all_halo_y[q] / slice['box_width_fraction'],
-                         'redshift': slice['redshift'],
-                         'radius_mpc': radius_mpc,
-                         'radius_image': radius_image,
-                         'mass': all_halo_mass[q]})
-
-    return halo_map
-
-def _make_slice_halo_list(slice, halo_list, virial_overdensity):
-    "Make shifted, tiled list of halos for halo mask and halo map."
-
-   # Make numpy arrays for halo centers and virial radii.
-    halo_x = []
-    halo_y = []
-    halo_depth = []
-    halo_radius = []
-    halo_mass = []
-
-    # Get units to convert virial radii to code units.
-    dataset_object = load(slice['filename'])
-    Mpc_units = dataset_object.units['mpc']
-    del dataset_object
-
-    for halo in halo_list:
-        if halo is not None:
-            center = copy.deepcopy(halo['center'])
-            halo_depth.append(center.pop(slice['projection_axis']))
-            halo_x.append(center[0])
-            halo_y.append(center[1])
-            halo_radius.append(halo['RadiusMpc_%d' % virial_overdensity] /
-                               Mpc_units)
-            halo_mass.append(halo['TotalMassMsun_%d' % virial_overdensity])
-
-    halo_x = np.array(halo_x)
-    halo_y = np.array(halo_y)
-    halo_depth = np.array(halo_depth)
-    halo_radius = np.array(halo_radius)
-    halo_mass = np.array(halo_mass)
-
-    # Adjust halo centers along line of sight.
-    depth_center = slice['projection_center'][slice['projection_axis']]
-    depth_left = depth_center - 0.5 * slice['box_depth_fraction']
-    depth_right = depth_center + 0.5 * slice['box_depth_fraction']
-
-    # Make boolean mask to pick out centers in region along line of sight.
-    # Halos near edges may wrap around to other side.
-    add_left = (halo_depth + halo_radius) > 1 # should be box width
-    add_right = (halo_depth - halo_radius) < 0
-
-    halo_depth = np.concatenate([halo_depth,
-                                 (halo_depth[add_left]-1),
-                                 (halo_depth[add_right]+1)])
-    halo_x = np.concatenate([halo_x, halo_x[add_left], halo_x[add_right]])
-    halo_y = np.concatenate([halo_y, halo_y[add_left], halo_y[add_right]])
-    halo_radius = np.concatenate([halo_radius,
-                                  halo_radius[add_left],
-                                  halo_radius[add_right]])
-    halo_mass = np.concatenate([halo_mass,
-                                halo_mass[add_left],
-                                halo_mass[add_right]])
-
-    del add_left, add_right
-
-    # Cut out the halos outside the region of interest.
-    if (slice['box_depth_fraction'] < 1):
-        if (depth_left < 0):
-            mask = ((halo_depth + halo_radius >= 0) &
-                    (halo_depth - halo_radius <= depth_right)) | \
-                ((halo_depth + halo_radius >= depth_left + 1) &
-                 (halo_depth - halo_radius <= 1))
-        elif (depth_right > 1):
-            mask = ((halo_depth + halo_radius >= 0) &
-                    (halo_depth - halo_radius <= depth_right - 1)) | \
-                ((halo_depth + halo_radius >= depth_left) &
-                 (halo_depth - halo_radius <= 1))
-        else:
-            mask = (halo_depth + halo_radius >= depth_left) & \
-              (halo_depth - halo_radius <= depth_right)
-
-        halo_x = halo_x[mask]
-        halo_y = halo_y[mask]
-        halo_radius = halo_radius[mask]
-        halo_mass = halo_mass[mask]
-        del mask
-    del halo_depth
-
-    all_halo_x = np.array([])
-    all_halo_y = np.array([])
-    all_halo_radius = np.array([])
-    all_halo_mass = np.array([])
-
-    # Tile halos of width box fraction is greater than one.
-    # Copy original into offset positions to make tiles.
-    for x in range(int(np.ceil(slice['box_width_fraction']))):
-        for y in range(int(np.ceil(slice['box_width_fraction']))):
-            all_halo_x = np.concatenate([all_halo_x, halo_x+x])
-            all_halo_y = np.concatenate([all_halo_y, halo_y+y])
-            all_halo_radius = np.concatenate([all_halo_radius, halo_radius])
-            all_halo_mass = np.concatenate([all_halo_mass, halo_mass])
-
-    del halo_x, halo_y, halo_radius, halo_mass
-
-    # Shift centers laterally.
-    offset = copy.deepcopy(slice['projection_center'])
-    del offset[slice['projection_axis']]
-
-    # Shift x and y positions.
-    all_halo_x -= offset[0]
-    all_halo_y -= offset[1]
-
-    # Wrap off-edge centers back around to
-    # other side (periodic boundary conditions).
-    all_halo_x[all_halo_x < 0] += np.ceil(slice['box_width_fraction'])
-    all_halo_y[all_halo_y < 0] += np.ceil(slice['box_width_fraction'])
-
-    # After shifting, some centers have fractional coverage
-    # on both sides of the box.
-    # Find those centers and make copies to be placed on the other side.
-
-    # Centers hanging off the right edge.
-    add_x_right = all_halo_x + all_halo_radius > \
-      np.ceil(slice['box_width_fraction'])
-    add_x_halo_x = all_halo_x[add_x_right]
-    add_x_halo_x -= np.ceil(slice['box_width_fraction'])
-    add_x_halo_y = all_halo_y[add_x_right]
-    add_x_halo_radius = all_halo_radius[add_x_right]
-    add_x_halo_mass = all_halo_mass[add_x_right]
-    del add_x_right
-
-    # Centers hanging off the left edge.
-    add_x_left = all_halo_x - all_halo_radius < 0
-    add2_x_halo_x = all_halo_x[add_x_left]
-    add2_x_halo_x += np.ceil(slice['box_width_fraction'])
-    add2_x_halo_y = all_halo_y[add_x_left]
-    add2_x_halo_radius = all_halo_radius[add_x_left]
-    add2_x_halo_mass = all_halo_mass[add_x_left]
-    del add_x_left
-
-    # Centers hanging off the top edge.
-    add_y_right = all_halo_y + all_halo_radius > \
-      np.ceil(slice['box_width_fraction'])
-    add_y_halo_x = all_halo_x[add_y_right]
-    add_y_halo_y = all_halo_y[add_y_right]
-    add_y_halo_y -= np.ceil(slice['box_width_fraction'])
-    add_y_halo_radius = all_halo_radius[add_y_right]
-    add_y_halo_mass = all_halo_mass[add_y_right]
-    del add_y_right
-
-    # Centers hanging off the bottom edge.
-    add_y_left = all_halo_y - all_halo_radius < 0
-    add2_y_halo_x = all_halo_x[add_y_left]
-    add2_y_halo_y = all_halo_y[add_y_left]
-    add2_y_halo_y += np.ceil(slice['box_width_fraction'])
-    add2_y_halo_radius = all_halo_radius[add_y_left]
-    add2_y_halo_mass = all_halo_mass[add_y_left]
-    del add_y_left
-
-    # Add the hanging centers back to the projection data.
-    all_halo_x = np.concatenate([all_halo_x,
-                                 add_x_halo_x, add2_x_halo_x,
-                                 add_y_halo_x, add2_y_halo_x])
-    all_halo_y = np.concatenate([all_halo_y,
-                                 add_x_halo_y, add2_x_halo_y,
-                                 add_y_halo_y, add2_y_halo_y])
-    all_halo_radius = np.concatenate([all_halo_radius,
-                                      add_x_halo_radius,
-                                      add2_x_halo_radius,
-                                      add_y_halo_radius,
-                                      add2_y_halo_radius])
-    all_halo_mass = np.concatenate([all_halo_mass,
-                                    add_x_halo_mass,
-                                    add2_x_halo_mass,
-                                    add_y_halo_mass,
-                                    add2_y_halo_mass])
-
-    del add_x_halo_x, add_x_halo_y, add_x_halo_radius
-    del add2_x_halo_x, add2_x_halo_y, add2_x_halo_radius
-    del add_y_halo_x, add_y_halo_y, add_y_halo_radius
-    del add2_y_halo_x, add2_y_halo_y, add2_y_halo_radius
-
-    # Cut edges to proper width.
-    cut_mask = (all_halo_x - all_halo_radius <
-                slice['box_width_fraction']) & \
-        (all_halo_y - all_halo_radius <
-         slice['box_width_fraction'])
-    all_halo_x = all_halo_x[cut_mask]
-    all_halo_y = all_halo_y[cut_mask]
-    all_halo_radius = all_halo_radius[cut_mask]
-    all_halo_mass = all_halo_mass[cut_mask]
-    del cut_mask
-
-    return (all_halo_x, all_halo_y,
-            all_halo_radius, all_halo_mass)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/0ee31f797d7d/
Changeset:   0ee31f797d7d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-15 20:22:51
Summary:     We do not need this, nor did we ever, really.
Affected #:  1 file

diff -r 8b014cbf298bec2fa1b12a43b9b616674a05b42d -r 0ee31f797d7dd9082a115f0a9758de094271990e yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -50,7 +50,6 @@
 def _ensure_code(arr):
     if hasattr(arr, "convert_to_units"):
         arr.convert_to_units("code_length")
-        return arr.d
     return arr
 
 @cython.boundscheck(False)
@@ -702,7 +701,6 @@
         _ensure_code(dobj.right_edge)
         _ensure_code(dobj.left_edge)
         DW = _ensure_code(dobj.pf.domain_width.copy())
-        DW = DW.view(np.ndarray)
 
         for i in range(3):
             region_width = dobj.right_edge[i] - dobj.left_edge[i]
@@ -735,7 +733,7 @@
             self.left_edge[i] = dobj.left_edge[i]
             self.right_edge[i] = dobj.right_edge[i]
             self.right_edge_shift[i] = \
-                (dobj.right_edge).to_ndarray()[i] - domain_width
+                (dobj.right_edge).to_ndarray()[i] - domain_width.to_ndarray()
             if not self.periodicity[i]:
                 self.right_edge_shift[i] = -np.inf
 


https://bitbucket.org/yt_analysis/yt/commits/944990baefe9/
Changeset:   944990baefe9
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-15 21:09:05
Summary:     Fixing case of field name collision
Affected #:  1 file

diff -r 0ee31f797d7dd9082a115f0a9758de094271990e -r 944990baefe91e4c009690c925ae68a5220632a5 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -90,6 +90,9 @@
                    "particle_position", "particle_velocity",
                    self)
         else:
+            if (ptype, "particle_position") in self and \
+                 self[ptype, "particle_position"]._function == NullFunc:
+                self.pop((ptype, "particle_position"))
             particle_vector_functions(ptype,
                     ["particle_position_%s" % ax for ax in 'xyz'],
                     ["particle_velocity_%s" % ax for ax in 'xyz'],


https://bitbucket.org/yt_analysis/yt/commits/1b12b9fc5f79/
Changeset:   1b12b9fc5f79
Branch:      yt-3.0
User:        samskillman
Date:        2014-07-15 23:12:57
Summary:     Adding a bit of text about the thingking import.
Affected #:  1 file

diff -r 944990baefe91e4c009690c925ae68a5220632a5 -r 1b12b9fc5f79e9543c5b29d83cf730e38dd67d94 yt/utilities/sdf.py
--- a/yt/utilities/sdf.py
+++ b/yt/utilities/sdf.py
@@ -188,7 +188,7 @@
     def __init__(self, *args, **kwargs):
         super(HTTPDataStruct, self).__init__(*args, **kwargs)
         if None in (PageCacheURL, HTTPArray):
-            raise ImportError("thingking")
+            raise ImportError("'thingking' is required for loading of remote HTTP data.")
         self.pcu = PageCacheURL(self.filename)
 
     def set_offset(self, offset):


https://bitbucket.org/yt_analysis/yt/commits/61258ae50537/
Changeset:   61258ae50537
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-16 15:08:37
Summary:     Adding explanatory comment.
Affected #:  1 file

diff -r 1b12b9fc5f79e9543c5b29d83cf730e38dd67d94 -r 61258ae505375115fd53ed06c0f66c110b6b4c2b yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -90,6 +90,13 @@
                    "particle_position", "particle_velocity",
                    self)
         else:
+            # We need to check to make sure that there's a "known field" that
+            # overlaps with one of the vector fields.  For instance, if we are
+            # in the Stream frontend, and we have a set of scalar position
+            # fields, they will overlap with -- and be overridden by -- the
+            # "known" vector field that the frontend creates.  So the easiest
+            # thing to do is to simply remove the on-disk field (which doesn't
+            # exist) and replace it with a derived field.
             if (ptype, "particle_position") in self and \
                  self[ptype, "particle_position"]._function == NullFunc:
                 self.pop((ptype, "particle_position"))


https://bitbucket.org/yt_analysis/yt/commits/990c27c2dd3c/
Changeset:   990c27c2dd3c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-17 15:16:31
Summary:     Merged in darkskysims/yt-dark/yt-3.0 (pull request #955)

SDF frontend and Dark Sky Sims work
Affected #:  25 files

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/analysis_modules/halo_finding/fof/EnzoFOF.c
--- a/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
+++ b/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
@@ -32,11 +32,15 @@
     PyArrayObject    *xpos, *ypos, *zpos;
     xpos=ypos=zpos=NULL;
     float link = 0.2;
+    float fPeriod[3] = {1.0, 1.0, 1.0};
+	int nMembers = 8;
 
     int i;
 
-    if (!PyArg_ParseTuple(args, "OOO|f",
-        &oxpos, &oypos, &ozpos, &link))
+    if (!PyArg_ParseTuple(args, "OOO|f(fff)i",
+        &oxpos, &oypos, &ozpos, &link,
+        &fPeriod[0], &fPeriod[1], &fPeriod[2],
+        &nMembers))
     return PyErr_Format(_FOFerror,
             "EnzoFOF: Invalid parameters.");
 
@@ -74,8 +78,8 @@
 
 	KDFOF kd;
 	int nBucket,j;
-	float fPeriod[3],fEps;
-	int nMembers,nGroup,bVerbose=1;
+	float fEps;
+	int nGroup,bVerbose=1;
 	int sec,usec;
 	
 	/* linking length */
@@ -83,9 +87,6 @@
 	fEps = link;
 	
 	nBucket = 16;
-	nMembers = 8;
-
-	for (j=0;j<3;++j) fPeriod[j] = 1.0;
 
     /* initialize the kd FOF structure */
 

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -32,30 +32,33 @@
         float pos[6]
         float corevel[3]
         float bulkvel[3]
-        float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
+        float m, r, child_r, vmax_r, mgrav, vmax, rvmax, rs, klypin_rs, vrms
         float J[3]
         float energy, spin
         float alt_m[4]
         float Xoff, Voff, b_to_a, c_to_a
         float A[3]
-        float bullock_spin, kin_to_pot
+        float b_to_a2, c_to_a2
+        float A2[3]
+        float bullock_spin, kin_to_pot, m_pe_b, m_pe_d
         np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
-        float min_pos_err, min_vel_err, min_bulkvel_err
+        float min_pos_err, min_vel_err, min_bulkvel_err, _pad
 
-ctypedef packed struct haloflat:
+ctypedef struct haloflat:
     np.int64_t id
-    float pos_x, pos_y, pos_z, pos_v, pos_u, pos_w
+    float pos_x, pos_y, pos_z, vel_x, vel_y, vel_z
     float corevel_x, corevel_y, corevel_z
     float bulkvel_x, bulkvel_y, bulkvel_z
-    float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
-    float J1, J2, J3
+    float m, r, child_r, vmax_r, mgrav, vmax, rvmax, rs, klypin_rs, vrms
+    float Jx, Jy, Jz
     float energy, spin
     float alt_m1, alt_m2, alt_m3, alt_m4
     float Xoff, Voff, b_to_a, c_to_a
-    float A1, A2, A3
-    float bullock_spin, kin_to_pot
+    float Ax, Ay, Az
+    float b_to_a2, c_to_a2, A2x, A2y, A2z
+    float bullock_spin, kin_to_pot, m_pe_b, m_pe_d
     np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
-    float min_pos_err, min_vel_err, min_bulkvel_err
+    float min_pos_err, min_vel_err, min_bulkvel_err, _pad
 
 # For finding sub halos import finder function and global variable
 # rockstar uses to store the results
@@ -68,6 +71,10 @@
     void free_particle_copies() nogil
     void alloc_particle_copies(np.int64_t total_copies) nogil
     void free_halos() nogil
+    float max_halo_radius(halo *h) nogil
+
+# global in groupies.c
+cdef extern double particle_thresh_dens[5]
 
 # For outputing halos, rockstar style
 
@@ -80,6 +87,9 @@
     void setup_config() nogil
     void output_config(char *fn) nogil
 
+cdef import from "distance.h":
+    void init_cosmology() nogil
+
 cdef import from "config_vars.h":
     # Rockstar cleverly puts all of the config variables inside a templated
     # definition of their vaiables.
@@ -87,13 +97,21 @@
     np.float64_t PARTICLE_MASS
 
     char *MASS_DEFINITION
+    char *MASS_DEFINITION2
+    char *MASS_DEFINITION3
+    char *MASS_DEFINITION4
+    char *MASS_DEFINITION5
+    np.int64_t STRICT_SO_MASSES
     np.int64_t MIN_HALO_OUTPUT_SIZE
     np.float64_t FORCE_RES
+    np.float64_t FORCE_RES_PHYS_MAX
 
     np.float64_t SCALE_NOW
     np.float64_t h0
     np.float64_t Ol
     np.float64_t Om
+    np.float64_t W0
+    np.float64_t WA
 
     np.int64_t GADGET_ID_BYTES
     np.float64_t GADGET_MASS_CONVERSION
@@ -111,6 +129,7 @@
     char *INBASE
     char *FILENAME
     np.int64_t STARTING_SNAP
+    np.int64_t RESTART_SNAP
     np.int64_t NUM_SNAPS
     np.int64_t NUM_BLOCKS
     np.int64_t NUM_READERS
@@ -130,10 +149,13 @@
     np.int64_t FULL_PARTICLE_CHUNKS
     char *BGC2_SNAPNAMES
 
+    np.int64_t SHAPE_ITERATIONS
+    np.int64_t WEIGHTED_SHAPES
     np.int64_t BOUND_PROPS
     np.int64_t BOUND_OUT_TO_HALO_EDGE
     np.int64_t DO_MERGER_TREE_ONLY
     np.int64_t IGNORE_PARTICLE_IDS
+    np.float64_t EXACT_LL_CALC
     np.float64_t TRIM_OVERLAP
     np.float64_t ROUND_AFTER_TRIM
     np.int64_t LIGHTCONE
@@ -147,20 +169,20 @@
 
     np.int64_t SWAP_ENDIANNESS
     np.int64_t GADGET_VARIANT
+    np.int64_t ART_VARIANT
 
     np.float64_t FOF_FRACTION
     np.float64_t FOF_LINKING_LENGTH
+    np.float64_t INITIAL_METRIC_SCALING
     np.float64_t INCLUDE_HOST_POTENTIAL_RATIO
-    np.float64_t DOUBLE_COUNT_SUBHALO_MASS_RATIO
     np.int64_t TEMPORAL_HALO_FINDING
     np.int64_t MIN_HALO_PARTICLES
     np.float64_t UNBOUND_THRESHOLD
     np.int64_t ALT_NFW_METRIC
+    np.int64_t EXTRA_PROFILING
 
     np.int64_t TOTAL_PARTICLES
     np.float64_t BOX_SIZE
-    np.int64_t OUTPUT_HMAD
-    np.int64_t OUTPUT_PARTICLES
     np.int64_t OUTPUT_LEVELS
     np.float64_t DUMP_PARTICLES[3]
 
@@ -179,16 +201,20 @@
         self.pf = pf
 
     def setup_rockstar(self,
-                        particle_mass,
-                        int periodic = 1, force_res=None,
-                        int min_halo_size = 25, outbase = "None",
-                        callbacks = None):
+                       particle_mass,
+                       int periodic = 1, force_res = None,
+                       int min_halo_size = 25, outbase = "None",
+                       write_config = False,  exact_ll_calc = False,
+                       lightcone = False, lightcone_origin = [0,0,0],
+                       callbacks = None):
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
         global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
         global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
-        
+        global OUTPUT_FORMAT, EXTRA_PROFILING
+        global STRICT_SO_MASSES, EXACT_LL_CALC
+        global LIGHTCONE, LIGHTCONE_ORIGIN
 
         if force_res is not None:
             FORCE_RES=np.float64(force_res)
@@ -197,7 +223,7 @@
         
         FILENAME = "inline.<block>"
         FILE_FORMAT = "GENERIC"
-        OUTPUT_FORMAT = "ASCII"
+        OUTPUT_FORMAT = "BOTH"
         MIN_HALO_OUTPUT_SIZE=min_halo_size
         
         pf = self.pf
@@ -213,27 +239,95 @@
             #workaround is to make a new directory
             OUTBASE = outbase 
 
-
         PARTICLE_MASS = particle_mass.in_units('Msun/h')
         PERIODIC = periodic
         BOX_SIZE = pf.domain_width.in_units('Mpccm/h')[0]
 
+        if exact_ll_calc: EXACT_LL_CALC = 1
+        STRICT_SO_MASSES = 1    # presumably unused in our code path
+        EXTRA_PROFILING = 0
+
+        if lightcone:
+            LIGHTCONE = 1
+            LIGHTCONE_ORIGIN[0] = lightcone_origin[0]
+            LIGHTCONE_ORIGIN[1] = lightcone_origin[1]
+            LIGHTCONE_ORIGIN[2] = lightcone_origin[2]
+
         # Set up the configuration options
         setup_config()
 
         # Needs to be called so rockstar can use the particle mass parameter
         # to calculate virial quantities properly
+        init_cosmology()
         calc_mass_definition()
 
-    def output_halos(self):
-        output_halos(0, 0, 0, NULL) 
+        if write_config: output_config(NULL)
+
+    def particle_thresh_dens(self):
+        cdef np.ndarray d = np.array([particle_thresh_dens[0],
+                                      particle_thresh_dens[1],
+                                      particle_thresh_dens[2],
+                                      particle_thresh_dens[3],
+                                      particle_thresh_dens[4]],
+                                     dtype=np.float64)
+        return d
+
+    def assign_masses(self, h, np.ndarray[np.float32_t, ndim=1] r, float force_res, \
+                      double pmass, np.ndarray[np.float64_t, ndim=1] dens_thresh):
+        """Assign spherical overdensity masses to halos.  r must be sorted"""
+        cdef double total_mass = 0.0
+        cdef double m = 0.0
+        cdef double alt_m1 = 0.0
+        cdef double alt_m2 = 0.0
+        cdef double alt_m3 = 0.0
+        cdef double alt_m4 = 0.0
+        cdef double rr
+        cdef double cur_dens
+        for rr in r:
+            if rr < force_res: rr = force_res
+            total_mass += pmass
+            cur_dens = total_mass/(rr*rr*rr)
+            if cur_dens > dens_thresh[0]: m = total_mass
+            if cur_dens > dens_thresh[1]: alt_m1 = total_mass
+            if cur_dens > dens_thresh[2]: alt_m2 = total_mass
+            if cur_dens > dens_thresh[3]: alt_m3 = total_mass
+            if cur_dens > dens_thresh[4]: alt_m4 = total_mass
+            if cur_dens <= dens_thresh[1]:
+                break
+        h['m'] = m
+        h['alt_m1'] = alt_m1
+        h['alt_m2'] = alt_m2
+        h['alt_m3'] = alt_m3
+        h['alt_m4'] = alt_m4
+        # if cur_dens > dens_thresh[1]:
+            # This is usually a subhalo problem, and we don't know who is a subhalo
+            # print >> sys.stderr, "r too small in assign_masses, m200b will be wrong!"
+            # print >> sys.stderr, "edge_dens/dens_thresh[1] %.3f" % (cur_dens/dens_thresh[1])
+
+    def max_halo_radius(self, int i):
+        return max_halo_radius(&halos[i])
+
+    def output_halos(self, np.int64_t idoffset, np.ndarray[np.float32_t, ndim=2] bbox):
+        cdef float bounds[6]
+        if idoffset is None: idoffset = 0
+        if bbox is None:
+            output_halos(idoffset, 0, 0, NULL) 
+        else:
+            for i in range(3):
+                bounds[i] = bbox[i,0]
+                bounds[i+3] = bbox[i,1]
+            output_halos(idoffset, 0, 0, bounds) 
+
+    def output_config(self):
+        output_config(NULL) 
 
     def return_halos(self):
         cdef haloflat[:] haloview = <haloflat[:num_halos]> (<haloflat*> halos)
-        rv = np.asarray(haloview).copy()
+        return np.asarray(haloview)
+
+    def finish(self):
         rockstar_cleanup()
         free_halos()
-        return rv
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -242,6 +336,7 @@
                                 np.ndarray[anyfloat, ndim=2] pos,
                                 np.ndarray[anyfloat, ndim=2] vel):
 
+        verbose = False
         # Define fof object
 
         # Find number of particles
@@ -271,7 +366,7 @@
                 j += 1
         if j > max_count:
             max_count = j
-        #print >> sys.stderr, "Most frequent occurrance: %s" % max_count
+        #print >> sys.stderr, "Most frequent occurrence: %s" % max_count
         fof_obj.particles = <particle*> malloc(max_count * sizeof(particle))
         j = 0
         cdef int counter = 0, ndone = 0
@@ -300,7 +395,7 @@
                 pcounts[ndone] = fof_obj.num_p
                 counter += 1
                 ndone += 1
-                if counter == frac:
+                if verbose and counter == frac:
                     print >> sys.stderr, "R*-ing % 5.1f%% done (%0.3f -> %0.3f)" % (
                         (100.0 * ndone)/pcounts.size,
                         fof_obj.particles[0].pos[2],
@@ -311,4 +406,5 @@
                 # Now we reset
                 fof_obj.num_p = j = 0
         free(fof_obj.particles)
+        global_particles = NULL
         return pcounts

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -328,8 +328,10 @@
     def morton(self):
         self.validate()
         eps = np.finfo(self.dtype).eps
-        LE = self.min(axis=0) - eps * self.uq
-        RE = self.max(axis=0) + eps * self.uq
+        LE = self.min(axis=0)
+        LE -= np.abs(LE) * eps
+        RE = self.max(axis=0)
+        RE += np.abs(RE) * eps
         morton = compute_morton(
             self[:,0], self[:,1], self[:,2],
             LE, RE)
@@ -340,8 +342,10 @@
         mi = self.morton
         mi.sort()
         eps = np.finfo(self.dtype).eps
-        LE = self.min(axis=0) - eps * self.uq
-        RE = self.max(axis=0) + eps * self.uq
+        LE = self.min(axis=0)
+        LE -= np.abs(LE) * eps
+        RE = self.max(axis=0)
+        RE += np.abs(RE) * eps
         octree = ParticleOctreeContainer(dims, LE, RE, 
             over_refine = over_refine_factor)
         octree.n_ref = n_ref

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -110,7 +110,7 @@
                 data[field][ub] /= weight_data[field][ub]
                 std_data[field][ub] /= weight_data[field][ub]
             self[field] = data[field]
-            #self["%s_std" % field] = np.sqrt(std_data[field])
+            self["%s_std" % field] = np.sqrt(std_data[field])
         self["UsedBins"] = used
 
         if fractional:
@@ -852,7 +852,7 @@
         if self.weight_field is not None:
             weight_data = chunk[self.weight_field]
         else:
-            weight_data = np.ones(chunk.ires.size, dtype="float64")
+            weight_data = np.ones(filter.size, dtype="float64")
         weight_data = weight_data[filter]
         # So that we can pass these into
         return arr, weight_data, bin_fields

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/fields/field_functions.py
--- a/yt/fields/field_functions.py
+++ b/yt/fields/field_functions.py
@@ -34,7 +34,7 @@
             np.subtract(r, DW[i], rdw)
             np.abs(rdw, rdw)
             np.minimum(r, rdw, r)
-        np.power(r, 2.0, r)
+        np.multiply(r, r, r)
         np.add(radius2, r, radius2)
         if data.pf.dimensionality < i+1:
             break

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -90,6 +90,16 @@
                    "particle_position", "particle_velocity",
                    self)
         else:
+            # We need to check to make sure that there's a "known field" that
+            # overlaps with one of the vector fields.  For instance, if we are
+            # in the Stream frontend, and we have a set of scalar position
+            # fields, they will overlap with -- and be overridden by -- the
+            # "known" vector field that the frontend creates.  So the easiest
+            # thing to do is to simply remove the on-disk field (which doesn't
+            # exist) and replace it with a derived field.
+            if (ptype, "particle_position") in self and \
+                 self[ptype, "particle_position"]._function == NullFunc:
+                self.pop((ptype, "particle_position"))
             particle_vector_functions(ptype,
                     ["particle_position_%s" % ax for ax in 'xyz'],
                     ["particle_velocity_%s" % ax for ax in 'xyz'],

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -72,6 +72,7 @@
 
 def particle_deposition_functions(ptype, coord_name, mass_name, registry):
     orig = set(registry.keys())
+    ptype_dn = ptype.replace("_","\/").title()
     def particle_count(field, data):
         pos = data[ptype, coord_name]
         d = data.deposit(pos, method = "count")
@@ -81,7 +82,7 @@
     registry.add_field(("deposit", "%s_count" % ptype),
              function = particle_count,
              validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Count}" % ptype)
+             display_name = "\\mathrm{%s Count}" % ptype_dn)
 
     def particle_mass(field, data):
         pos = data[ptype, coord_name]
@@ -92,7 +93,7 @@
     registry.add_field(("deposit", "%s_mass" % ptype),
              function = particle_mass,
              validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Mass}" % ptype,
+             display_name = "\\mathrm{%s Mass}" % ptype_dn,
              units = "g")
              
     def particle_density(field, data):
@@ -108,7 +109,7 @@
     registry.add_field(("deposit", "%s_density" % ptype),
              function = particle_density,
              validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Density}" % ptype,
+             display_name = "\\mathrm{%s Density}" % ptype_dn,
              units = "g/cm**3")
 
     def particle_cic(field, data):
@@ -121,7 +122,7 @@
     registry.add_field(("deposit", "%s_cic" % ptype),
              function = particle_cic,
              validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s CIC Density}" % ptype,
+             display_name = "\\mathrm{%s CIC Density}" % ptype_dn,
              units = "g/cm**3")
 
     # Now some translation functions.

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/frontends/halo_catalogs/rockstar/definitions.py
--- a/yt/frontends/halo_catalogs/rockstar/definitions.py
+++ b/yt/frontends/halo_catalogs/rockstar/definitions.py
@@ -30,7 +30,9 @@
     ("box_size", 1, "f"),
     ("particle_mass", 1, "f"),
     ("particle_type", 1, "q"),
-    ("unused", BINARY_HEADER_SIZE - 4*12 - 8*6, "c")
+    ("format_revision", 1, "i"),
+    ("version", 12, "c"),
+    ("unused", BINARY_HEADER_SIZE - 4*12 - 4 - 8*6 - 12, "c")
 )
 
 halo_dt = np.dtype([
@@ -38,17 +40,17 @@
     ('particle_position_x', np.float32),
     ('particle_position_y', np.float32),
     ('particle_position_z', np.float32),
-    ('particle_mposition_x', np.float32),
-    ('particle_mposition_y', np.float32),
-    ('particle_mposition_z', np.float32),
     ('particle_velocity_x', np.float32),
     ('particle_velocity_y', np.float32),
     ('particle_velocity_z', np.float32),
-    ('particle_bvelocity_x', np.float32),
-    ('particle_bvelocity_y', np.float32),
-    ('particle_bvelocity_z', np.float32),
+    ('particle_corevel_x', np.float32),
+    ('particle_corevel_y', np.float32),
+    ('particle_corevel_z', np.float32),
+    ('particle_bulkvel_x', np.float32),
+    ('particle_bulkvel_y', np.float32),
+    ('particle_bulkvel_z', np.float32),
     ('particle_mass', np.float32),
-    ('virial_radius', np.float32),
+    ('radius', np.float32),
     ('child_r', np.float32),
     ('vmax_r', np.float32),
     ('mgrav', np.float32),
@@ -57,9 +59,9 @@
     ('rs', np.float32),
     ('klypin_rs', np.float32),
     ('vrms', np.float32),
-    ('JX', np.float32),
-    ('JY', np.float32),
-    ('JZ', np.float32),
+    ('Jx', np.float32),
+    ('Jy', np.float32),
+    ('Jz', np.float32),
     ('energy', np.float32),
     ('spin', np.float32),
     ('alt_m1', np.float32),
@@ -73,8 +75,15 @@
     ('Ax', np.float32),
     ('Ay', np.float32),
     ('Az', np.float32),
+    ('b_to_a2', np.float32),
+    ('c_to_a2', np.float32),
+    ('A2x', np.float32),
+    ('A2y', np.float32),
+    ('A2z', np.float32),
     ('bullock_spin', np.float32),
     ('kin_to_pot', np.float32),
+    ('m_pe_b', np.float32),
+    ('m_pe_d', np.float32),
     ('num_p', np.int64),
     ('num_child_particles', np.int64),
     ('p_start', np.int64),
@@ -84,8 +93,7 @@
     ('min_pos_err', np.float32),
     ('min_vel_err', np.float32),
     ('min_bulkvel_err', np.float32),
-    ('padding2', np.float32),
-])
+], align=True)
 
 particle_dt = np.dtype([
     ('particle_identifier', np.int64),

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/frontends/halo_catalogs/rockstar/fields.py
--- a/yt/frontends/halo_catalogs/rockstar/fields.py
+++ b/yt/frontends/halo_catalogs/rockstar/fields.py
@@ -40,17 +40,17 @@
         ("particle_position_x", (p_units, [], None)),
         ("particle_position_y", (p_units, [], None)),
         ("particle_position_z", (p_units, [], None)),
-        ("particle_mposition_x", (p_units, [], None)),
-        ("particle_mposition_y", (p_units, [], None)),
-        ("particle_mposition_z", (p_units, [], None)),
         ("particle_velocity_x", (v_units, [], None)),
         ("particle_velocity_y", (v_units, [], None)),
         ("particle_velocity_z", (v_units, [], None)),
-        ("particle_bvelocity_x", (v_units, [], None)),
-        ("particle_bvelocity_y", (v_units, [], None)),
-        ("particle_bvelocity_z", (v_units, [], None)),
-        ("particle_mass", (m_units, [], "Virial Mass")),
-        ("virial_radius", (r_units, [], "Virial Radius")),
+        ("particle_corevel_x", (v_units, [], None)),
+        ("particle_corevel_y", (v_units, [], None)),
+        ("particle_corevel_z", (v_units, [], None)),
+        ("particle_bulkvel_x", (v_units, [], None)),
+        ("particle_bulkvel_y", (v_units, [], None)),
+        ("particle_bulkvel_z", (v_units, [], None)),
+        ("particle_mass", (m_units, [], "Mass")),
+        ("virial_radius", (r_units, [], "Radius")),
         ("child_r", (r_units, [], None)),
         ("vmax_r", (v_units, [], None)),
     # These fields I don't have good definitions for yet.
@@ -60,9 +60,9 @@
     ('rs', (r_units, [], "R_s")),
     ('klypin_rs', (r_units, [], "Klypin R_s")),
     ('vrms', (v_units, [], "V_{rms}")),
-    ('JX', ("", [], "J_x")),
-    ('JY', ("", [], "J_y")),
-    ('JZ', ("", [], "J_z")),
+    ('Jx', ("", [], "J_x")),
+    ('Jy', ("", [], "J_y")),
+    ('Jz', ("", [], "J_z")),
     ('energy', ("", [], None)),
     ('spin', ("", [], "Spin Parameter")),
     ('alt_m1', (m_units, [], None)),
@@ -76,8 +76,15 @@
     ('Ax', ("", [], "A_x")),
     ('Ay', ("", [], "A_y")),
     ('Az', ("", [], "A_z")),
+    ('b_to_a2', ("", [], None)),
+    ('c_to_a2', ("", [], None)),
+    ('A2x', ("", [], "A2_x")),
+    ('A2y', ("", [], "A2_y")),
+    ('A2z', ("", [], "A2_z")),
     ('bullock_spin', ("", [], "Bullock Spin Parameter")),
     ('kin_to_pot', ("", [], "Kinetic to Potential")),
+    ('m_pe_b', ("", [], None)),
+    ('m_pe_d', ("", [], None)),
     ('num_p', ("", [], "Number of Particles")),
     ('num_child_particles', ("", [], "Number of Child Particles")),
     ('p_start', ("", [], None)),

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -29,17 +29,31 @@
     ParticleIndex
 from yt.data_objects.static_output import \
     Dataset, ParticleFile
-from yt.utilities.physical_constants import \
-    G, \
+from yt.utilities.physical_ratios import \
     cm_per_kpc, \
-    mass_sun_cgs
-from yt.utilities.cosmology import Cosmology
+    mass_sun_grams, \
+    sec_per_Gyr
 from .fields import \
     SDFFieldInfo
 from .io import \
-    IOHandlerSDF, \
+    IOHandlerSDF
+from yt.utilities.sdf import \
     SDFRead,\
-    SDFIndex
+    SDFIndex,\
+    HTTPSDFRead
+
+try:
+    import requests
+except ImportError:
+    requests = None
+
+
+
+# currently specified by units_2HOT == 2 in header
+# in future will read directly from file
+units_2HOT_v2_length = 3.08567802e21
+units_2HOT_v2_mass = 1.98892e43
+units_2HOT_v2_time = 3.1558149984e16
 
 class SDFFile(ParticleFile):
     pass
@@ -51,19 +65,24 @@
     _particle_mass_name = None
     _particle_coordinates_name = None
     _particle_velocity_name = None
-    _sindex = None
+    _midx = None
+    _skip_cache = True
+    _subspace = False
+
 
     def __init__(self, filename, dataset_type = "sdf_particles",
                  n_ref = 64, over_refine_factor = 1,
                  bounding_box = None,
                  sdf_header = None,
-                 idx_filename = None,
-                 idx_header = None,
-                 idx_level = 9):
+                 midx_filename = None,
+                 midx_header = None,
+                 midx_level = None,
+                 field_map = None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         if bounding_box is not None:
-            bbox = np.array(bounding_box, dtype="float64")
+            self._subspace = True
+            bbox = np.array(bounding_box, dtype="float32")
             if bbox.shape == (2, 3):
                 bbox = bbox.transpose()
             self.domain_left_edge = bbox[:,0]
@@ -71,69 +90,117 @@
         else:
             self.domain_left_edge = self.domain_right_edge = None
         self.sdf_header = sdf_header
-        self.idx_filename = idx_filename
-        self.idx_header = idx_header
-        self.idx_level = idx_level
+        self.midx_filename = midx_filename
+        self.midx_header = midx_header
+        self.midx_level = midx_level
+        if field_map is None:
+            field_map = {}
+        self._field_map = field_map
+        prefix = ''
+        if self.midx_filename is not None:
+            prefix += 'midx_'
+        if filename.startswith("http"):
+            prefix += 'http_'
+        dataset_type = prefix + 'sdf_particles'
         super(SDFDataset, self).__init__(filename, dataset_type)
 
     def _parse_parameter_file(self):
-        self.sdf_container = SDFRead(self.parameter_filename,
-                                     header=self.sdf_header)
+        if self.parameter_filename.startswith("http"):
+            sdf_class = HTTPSDFRead
+        else:
+            sdf_class = SDFRead
+        self.sdf_container = sdf_class(self.parameter_filename,
+                                 header=self.sdf_header)
+
         # Reference
         self.parameters = self.sdf_container.parameters
         self.dimensionality = 3
         self.refine_by = 2
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        try:
+            self.unique_identifier = \
+                int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        except:
+            self.unique_identifier = time.time()
+
 
         if None in (self.domain_left_edge, self.domain_right_edge):
             R0 = self.parameters['R0']
-            self.domain_left_edge = np.array([
-              -self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
-            self.domain_right_edge = np.array([
-              +self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+            if 'offset_center' in self.parameters and self.parameters['offset_center']:
+                self.domain_left_edge = np.array([0, 0, 0])
+                self.domain_right_edge = np.array([
+                 2.0 * self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+            else:
+                self.domain_left_edge = np.array([
+                    -self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+                self.domain_right_edge = np.array([
+                    +self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
             self.domain_left_edge *= self.parameters.get("a", 1.0)
             self.domain_right_edge *= self.parameters.get("a", 1.0)
 
         nz = 1 << self.over_refine_factor
         self.domain_dimensions = np.ones(3, "int32") * nz
-        self.periodicity = (True, True, True)
+        if "do_periodic" in self.parameters and self.parameters["do_periodic"]:
+            self.periodicity = (True, True, True)
+        else:
+            self.periodicity = (False, False, False)
 
         self.cosmological_simulation = 1
 
         self.current_redshift = self.parameters.get("redshift", 0.0)
         self.omega_lambda = self.parameters["Omega0_lambda"]
         self.omega_matter = self.parameters["Omega0_m"]
+        if "Omega0_fld" in self.parameters:
+            self.omega_lambda += self.parameters["Omega0_fld"]
+        if "Omega0_r" in self.parameters:
+            # not correct, but most codes can't handle Omega0_r
+            self.omega_matter += self.parameters["Omega0_r"]
         self.hubble_constant = self.parameters["h_100"]
-        # Now we calculate our time based on the cosmology.
-        cosmo = Cosmology(self.hubble_constant,
-                          self.omega_matter, self.omega_lambda)
-        self.current_time = cosmo.hubble_time(self.current_redshift)
+        self.current_time = units_2HOT_v2_time * self.parameters.get("tpos", 0.0)
         mylog.info("Calculating time to be %0.3e seconds", self.current_time)
         self.filename_template = self.parameter_filename
         self.file_count = 1
 
     @property
-    def sindex(self):
-        if self._sindex is None:
-            if self.idx_filename is not None:
-                indexdata = SDFRead(self.idx_filename,
-                                    header=self.idx_header)
-                self._sindex = SDFIndex(self.sdf_container, indexdata, level=self.idx_level)
+    def midx(self):
+        if self._midx is None:
+            if self.midx_filename is not None:
+
+                if 'http' in self.midx_filename:
+                    sdf_class = HTTPSDFRead
+                else:
+                    sdf_class = SDFRead
+                indexdata = sdf_class(self.midx_filename, header=self.midx_header)
+                self._midx = SDFIndex(self.sdf_container, indexdata,
+                                        level=self.midx_level)
             else:
                 raise RuntimeError("SDF index0 file not supplied in load.")
-        else:
-            return self._sindex
+        return self._midx
 
     def _set_code_unit_attributes(self):
-        self.length_unit = self.quan(1.0, "kpc")
-        self.velocity_unit = self.quan(1.0, "kpc/Gyr")
-        self.time_unit = self.quan(1.0, "Gyr")
-        self.mass_unit = self.quan(1e10, "Msun")
+        self.length_unit = self.quan(1.0, self.parameters.get("length_unit", 'kpc'))
+        self.velocity_unit = self.quan(1.0, self.parameters.get("velocity_unit", 'kpc/Gyr'))
+        self.time_unit = self.quan(1.0, self.parameters.get("time_unit", 'Gyr'))
+        mass_unit = self.parameters.get("mass_unit", '1e10 Msun')
+        if ' ' in mass_unit:
+            factor, unit = mass_unit.split(' ')
+        else:
+            factor = 1.0
+            unit = mass_unit
+        self.mass_unit = self.quan(float(factor), unit)
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):
-        if not os.path.isfile(args[0]): return False
-        with open(args[0], "r") as f:
-            line = f.readline().strip()
-            return line == "# SDF 1.0"
+        sdf_header = kwargs.get('sdf_header', args[0])
+        print 'Parsing sdf_header: %s' % sdf_header
+        if sdf_header.startswith("http"):
+            if requests is None: return False
+            hreq = requests.get(sdf_header, stream=True)
+            if hreq.status_code != 200: return False
+            # Grab a whole 4k page.
+            line = hreq.iter_content(4096).next()
+        elif os.path.isfile(sdf_header):
+            with open(sdf_header, "r") as f:
+                line = f.read(10).strip()
+        else:
+            return False
+        return line.startswith("# SDF")

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/frontends/sdf/fields.py
--- a/yt/frontends/sdf/fields.py
+++ b/yt/frontends/sdf/fields.py
@@ -35,13 +35,39 @@
 class SDFFieldInfo(FieldInfoContainer):
     known_other_fields = ()
 
-    known_particle_fields = (
-        ("mass", ("code_mass", ["particle_mass"], None)),
-        ("x", ("code_length", ["particle_position_x"], None)),
-        ("y", ("code_length", ["particle_position_y"], None)),
-        ("z", ("code_length", ["particle_position_z"], None)),
-        ("vx", ("code_velocity", ["particle_velocity_x"], None)),
-        ("vy", ("code_velocity", ["particle_velocity_y"], None)),
-        ("vz", ("code_velocity", ["particle_velocity_z"], None)),
-        ("ident", ("", ["particle_index"], None)),
-    )
+    known_particle_fields = ()
+    _mass_field = None
+
+    def __init__(self, pf, field_list):
+
+        if 'mass' in field_list:
+            self.known_particle_fields.append(("mass", "code_mass",
+                                               ["particle_mass"], None))
+        possible_masses = ['mass', 'm200b', 'mvir']
+        mnf = 'mass'
+        for mn in possible_masses:
+            if mn in pf.sdf_container.keys():
+                mnf = self._mass_field = mn
+                break
+
+        idf = pf._field_map.get("particle_index", 'ident')
+        xf = pf._field_map.get("particle_position_x", 'x')
+        yf = pf._field_map.get("particle_position_y", 'y')
+        zf = pf._field_map.get("particle_position_z", 'z')
+        vxf = pf._field_map.get("particle_velocity_x", 'vx')
+        vyf = pf._field_map.get("particle_velocity_z", 'vy')
+        vzf = pf._field_map.get("particle_velocity_z", 'vz')
+
+        self.known_particle_fields = (
+            (idf, ('dimensionless', ['particle_index'], None)),
+            (xf,  ('code_length', ['particle_position_x'], None)),
+            (yf,  ('code_length', ['particle_position_y'], None)),
+            (zf,  ('code_length', ['particle_position_z'], None)),
+            (vxf, ('code_velocity', ['particle_velocity_x'], None)),
+            (vyf, ('code_velocity', ['particle_velocity_y'], None)),
+            (vzf, ('code_velocity', ['particle_velocity_z'], None)),
+            (mnf, ('code_mass', ['particle_mass'], None)),
+        )
+        super(SDFFieldInfo, self).__init__(pf, field_list)
+
+

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -14,11 +14,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import glob
-import h5py
 import numpy as np
 from yt.funcs import *
 from yt.utilities.exceptions import *
+from yt.units.yt_array import YTArray
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -82,6 +81,7 @@
     def _initialize_index(self, data_file, regions):
         x, y, z = (self._handle[ax] for ax in 'xyz')
         pcount = x.size
+
         morton = np.empty(pcount, dtype='uint64')
         ind = 0
         while ind < pcount:
@@ -90,12 +90,6 @@
             pos[:,0] = x[ind:ind+npart]
             pos[:,1] = y[ind:ind+npart]
             pos[:,2] = z[ind:ind+npart]
-            if np.any(pos.min(axis=0) < self.pf.domain_left_edge) or \
-               np.any(pos.max(axis=0) > self.pf.domain_right_edge):
-                raise YTDomainOverflow(pos.min(axis=0),
-                                       pos.max(axis=0),
-                                       self.pf.domain_left_edge,
-                                       self.pf.domain_right_edge)
             regions.add_data_file(pos, data_file.file_id)
             morton[ind:ind+npart] = compute_morton(
                 pos[:,0], pos[:,1], pos[:,2],
@@ -104,461 +98,170 @@
             ind += CHUNKSIZE
         return morton
 
+    def _identify_fields(self, data_file):
+        fields = [("dark_matter", v) for v in self._handle.keys()]
+        fields.append(("dark_matter", "mass"))
+        return fields, {}
+
     def _count_particles(self, data_file):
-        return {'dark_matter': self._handle['x'].size}
+        pcount = self._handle['x'].size
+        if (pcount > 1e9):
+            mylog.warn("About to load %i particles into memory. " % (pcount) +
+                       "You may want to consider a midx-enabled load")
+        return {'dark_matter': pcount}
+
+
+class IOHandlerHTTPSDF(IOHandlerSDF):
+    _dataset_type = "http_sdf_particles"
+
+    def _read_particle_coords(self, chunks, ptf):
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        assert(ptf.keys()[0] == "dark_matter")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        assert(len(data_files) == 1)
+        for data_file in data_files:
+            pcount = self._handle['x'].size
+            yield "dark_matter", (
+                self._handle['x'][:pcount], self._handle['y'][:pcount], self._handle['z'][:pcount])
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        assert(ptf.keys()[0] == "dark_matter")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        assert(len(data_files) == 1)
+        for data_file in data_files:
+            pcount = self._handle['x'].size
+            for ptype, field_list in sorted(ptf.items()):
+                x = self._handle['x'][:pcount]
+                y = self._handle['y'][:pcount]
+                z = self._handle['z'][:pcount]
+                mask = selector.select_points(x, y, z, 0.0)
+                del x, y, z
+                if mask is None: continue
+                for field in field_list:
+                    if field == "mass":
+                        if self.pf.field_info._mass_field is None:
+                            pm = 1.0
+                            if 'particle_mass' in self.pf.parameters:
+                                pm = self.pf.parameters['particle_mass']
+                            else:
+                                raise RuntimeError
+                            data = pm * np.ones(mask.sum(), dtype="float64")
+                        else:
+                            data = self._handle[self.pf.field_info._mass_field][mask]
+                    else:
+                        data = self._handle[field][mask]
+                    yield (ptype, field), data
+
+    def _count_particles(self, data_file):
+        return {'dark_matter': self._handle['x'].http_array.shape}
+
+
+class IOHandlerSIndexSDF(IOHandlerSDF):
+    _dataset_type = "midx_sdf_particles"
+
+
+    def _read_particle_coords(self, chunks, ptf):
+        dle = self.pf.domain_left_edge.in_units("code_length").d
+        dre = self.pf.domain_right_edge.in_units("code_length").d
+        for dd in self.pf.midx.iter_bbox_data(
+            dle, dre,
+            ['x','y','z']):
+            yield "dark_matter", (
+                dd['x'], dd['y'], dd['z'])
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        dle = self.pf.domain_left_edge.in_units("code_length").d
+        dre = self.pf.domain_right_edge.in_units("code_length").d
+        required_fields = []
+        for ptype, field_list in sorted(ptf.items()):
+            for field in field_list:
+                if field == "mass": continue
+                required_fields.append(field)
+
+        for dd in self.pf.midx.iter_bbox_data(
+            dle, dre,
+            required_fields):
+
+            for ptype, field_list in sorted(ptf.items()):
+                x = dd['x']
+                y = dd['y']
+                z = dd['z']
+                mask = selector.select_points(x, y, z, 0.0)
+                del x, y, z
+                if mask is None: continue
+                for field in field_list:
+                    if field == "mass":
+                        data = np.ones(mask.sum(), dtype="float64")
+                        data *= self.pf.parameters["particle_mass"]
+                    else:
+                        data = dd[field][mask]
+                    yield (ptype, field), data
+
+    def _initialize_index(self, data_file, regions):
+        dle = self.pf.domain_left_edge.in_units("code_length").d
+        dre = self.pf.domain_right_edge.in_units("code_length").d
+        pcount = 0
+        for dd in self.pf.midx.iter_bbox_data(
+            dle, dre,
+            ['x']):
+            pcount += dd['x'].size
+
+        morton = np.empty(pcount, dtype='uint64')
+        ind = 0
+
+        chunk_id = 0
+        for dd in self.pf.midx.iter_bbox_data(
+            dle, dre,
+            ['x','y','z']):
+            npart = dd['x'].size
+            pos = np.empty((npart, 3), dtype=dd['x'].dtype)
+            pos[:,0] = dd['x']
+            pos[:,1] = dd['y']
+            pos[:,2] = dd['z']
+            if np.any(pos.min(axis=0) < self.pf.domain_left_edge) or \
+               np.any(pos.max(axis=0) > self.pf.domain_right_edge):
+                raise YTDomainOverflow(pos.min(axis=0),
+                                       pos.max(axis=0),
+                                       self.pf.domain_left_edge,
+                                       self.pf.domain_right_edge)
+            regions.add_data_file(pos, chunk_id)
+            morton[ind:ind+npart] = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.pf.domain_left_edge,
+                data_file.pf.domain_right_edge)
+            ind += npart
+        return morton
+
+    def _count_particles(self, data_file):
+        dle = self.pf.domain_left_edge.in_units("code_length").d
+        dre = self.pf.domain_right_edge.in_units("code_length").d
+        pcount_estimate = self.pf.midx.get_nparticles_bbox(dle, dre)
+        if pcount_estimate > 1e9:
+            mylog.warning("Filtering %i particles to find total."
+                          % pcount_estimate + \
+                          " You may want to reconsider your bounding box.")
+        pcount = 0
+        for dd in self.pf.midx.iter_bbox_data(
+            dle, dre,
+            ['x']):
+            pcount += dd['x'].size
+        return {'dark_matter': pcount}
 
     def _identify_fields(self, data_file):
         fields = [("dark_matter", v) for v in self._handle.keys()]
         fields.append(("dark_matter", "mass"))
         return fields, {}
 
-import re
-import os
 
-_types = {
-    'int': 'int32',
-    'int64_t': 'int64',
-    'float': 'float32',
-    'double': 'float64',
-    'unsigned int': 'I',
-    'unsigned char': 'B',
-}
+class IOHandlerSIndexHTTPSDF(IOHandlerSIndexSDF):
+    _dataset_type = "midx_http_sdf_particles"
 
-def get_type(vtype, len=None):
-    try:
-        t = _types[vtype]
-        if len is not None:
-            t = np.dtype((t, len))
-        else:
-            t = np.dtype(t)
-    except KeyError:
-        t = eval("np."+vtype)
-    return t
-
-def lstrip(text_list):
-    return [t.strip() for t in text_list]
-
-def get_struct_vars(line):
-    spl = lstrip(line.split(";"))
-    multiv = lstrip(spl[0].split(","))
-    ret = lstrip(multiv[0].split())
-    ctype = ret[0]
-    vnames = [ret[-1]] + multiv[1:]
-    vnames = [v.strip() for v in vnames]
-    for vtype in ret[1:-1]:
-        ctype += ' ' + vtype
-    num = None
-    if len(vnames) == 1:
-        if '[' in vnames[0]:
-            num = int(vnames[0].split('[')[-1].strip(']'))
-            #num = int(re.sub("\D", "", vnames[0]))
-    ctype = get_type(ctype, len=num)
-    return ctype, vnames
-
-class DataStruct(object):
-    """docstring for DataStruct"""
-
-    _offset = 0
-
-    def __init__(self, dtypes, num, filename):
-        self.filename = filename
-        self.dtype = np.dtype(dtypes)
-        self.size = num
-        self.itemsize = self.dtype.itemsize
-        self.data = {}
-        self.handle = None
-
-    def set_offset(self, offset):
-        self._offset = offset
-        if self.size == -1:
-            file_size = os.path.getsize(self.filename)
-            file_size -= offset
-            self.size = float(file_size) / self.itemsize
-            assert(int(self.size) == self.size)
-
-    def build_memmap(self):
-        assert(self.size != -1)
-        self.handle = np.memmap(self.filename, dtype=self.dtype,
-                        mode='r', shape=self.size, offset=self._offset)
-        for k in self.dtype.names:
-            self.data[k] = self.handle[k]
-
-class SDFRead(dict):
-
-    """docstring for SDFRead"""
-
-    _eof = 'SDF-EOH'
-
-    def __init__(self, filename, header=None):
-        self.filename = filename
-        if header is None:
-            header = filename
-        self.header = header
-        self.parameters = {}
-        self.structs = []
-        self.comments = []
-        self.parse_header()
-        self.set_offsets()
-        self.load_memmaps()
-
-    def parse_header(self):
-        """docstring for parse_header"""
-        # Pre-process
-        ascfile = open(self.header, 'r')
-        while True:
-            l = ascfile.readline()
-            if self._eof in l: break
-
-            self.parse_line(l, ascfile)
-
-        hoff = ascfile.tell()
-        ascfile.close()
-        if self.header != self.filename:
-            hoff = 0
-        self.parameters['header_offset'] = hoff
-
-    def parse_line(self, line, ascfile):
-        """Parse a line of sdf"""
-
-
-        if 'struct' in line:
-            self.parse_struct(line, ascfile)
-            return
-
-        if "#" in line:
-            self.comments.append(line)
-            return
-
-        spl = lstrip(line.split("="))
-        vtype, vname = lstrip(spl[0].split())
-        vname = vname.strip("[]")
-        vval = spl[-1].strip(";")
-        if vtype == 'parameter':
-            self.parameters[vname] = vval
-            return
-        elif vtype == "char":
-            vtype = "str"
-
-        try:
-            vval = eval("np."+vtype+"(%s)" % vval)
-        except AttributeError:
-            vval = eval("np."+_types[vtype]+"(%s)" % vval)
-
-        self.parameters[vname] = vval
-
-    def parse_struct(self, line, ascfile):
-        assert 'struct' in line
-
-        str_types = []
-        comments = []
-        str_lines = []
-        l = ascfile.readline()
-        while "}" not in l:
-            vtype, vnames = get_struct_vars(l)
-            for v in vnames:
-                str_types.append((v, vtype))
-            l = ascfile.readline()
-        num = l.strip("}[]")
-        num = num.strip("\;\\\n]")
-        if len(num) == 0:
-            # We need to compute the number of records.  The DataStruct will
-            # handle this.
-            num = '-1'
-        num = int(num)
-        struct = DataStruct(str_types, num, self.filename)
-        self.structs.append(struct)
-        return
-
-    def set_offsets(self):
-        running_off = self.parameters['header_offset']
-        for struct in self.structs:
-            struct.set_offset(running_off)
-            running_off += struct.size * struct.itemsize
-        return
-
-    def load_memmaps(self):
-        for struct in self.structs:
-            struct.build_memmap()
-            self.update(struct.data)
-
-
-class SDFIndex(object):
-
-    """docstring for SDFIndex
-
-    This provides an index mechanism into the full SDF Dataset.
-
-    Most useful class methods:
-        get_cell_data(level, cell_iarr, fields)
-        iter_bbox_data(left, right, fields)
-        iter_bbox_data(left, right, fields)
-
-    """
-    def __init__(self, sdfdata, indexdata, level=9):
-        super(SDFIndex, self).__init__()
-        self.sdfdata = sdfdata
-        self.indexdata = indexdata
-        self.level = level
-        self.rmin = None
-        self.rmax = None
-        self.domain_width = None
-        self.domain_buffer = 0
-        self.domain_dims = 0
-        self.domain_active_dims = 0
-        self.masks = {
-            "p" : int("011"*level, 2),
-            "t" : int("101"*level, 2),
-            "r" : int("110"*level, 2),
-            "z" : int("011"*level, 2),
-            "y" : int("101"*level, 2),
-            "x" : int("110"*level, 2),
-            2 : int("011"*level, 2),
-            1 : int("101"*level, 2),
-            0 : int("110"*level, 2),
-        }
-        self.dim_slices = {
-            "p" : slice(0, None, 3),
-            "t" : slice(1, None, 3),
-            "r" : slice(2, None, 3),
-            "z" : slice(0, None, 3),
-            "y" : slice(1, None, 3),
-            "x" : slice(2, None, 3),
-            2 : slice(0, None, 3),
-            1 : slice(1, None, 3),
-            0 : slice(2, None, 3),
-        }
-        self.set_bounds()
-
-    def set_bounds(self):
-        r_0 = self.sdfdata.parameters['R0']
-        DW = 2.0 * r_0
-
-        self.rmin = np.zeros(3)
-        self.rmax = np.zeros(3)
-        sorted_rtp = self.sdfdata.parameters.get("sorted_rtp", False)
-        if sorted_rtp:
-            self.rmin[:] = [0.0, 0.0, -np.pi]
-            self.rmax[:] = [r_0*1.01, 2*np.pi, np.pi]
-        else:
-            self.rmin[0] -= self.sdfdata.parameters.get('Rx', 0.0)
-            self.rmin[1] -= self.sdfdata.parameters.get('Ry', 0.0)
-            self.rmin[2] -= self.sdfdata.parameters.get('Rz', 0.0)
-            self.rmax[0] += self.sdfdata.parameters.get('Rx', r_0)
-            self.rmax[1] += self.sdfdata.parameters.get('Ry', r_0)
-            self.rmax[2] += self.sdfdata.parameters.get('Rz', r_0)
-
-        #/* expand root for non-power-of-two */
-        expand_root = 0.0
-        ic_Nmesh = self.sdfdata.parameters.get('ic_Nmesh',0)
-        if ic_Nmesh != 0:
-            f2 = 1<<int(np.log2(ic_Nmesh-1)+1)
-            if (f2 != ic_Nmesh):
-                expand_root = 1.0*f2/ic_Nmesh - 1.0;
-            print 'Expanding: ', f2, ic_Nmesh, expand_root
-        self.rmin *= 1.0 + expand_root
-        self.rmax *= 1.0 + expand_root
-        self.domain_width = self.rmax - self.rmin
-        self.domain_dims = 1 << self.level
-        self.domain_buffer = (self.domain_dims - int(self.domain_dims/(1.0 + expand_root)))/2
-        self.domain_active_dims = self.domain_dims - 2*self.domain_buffer
-        print 'Domain stuff:', self.domain_width, self.domain_dims, self.domain_active_dims
-
-    def get_key(self, iarr, level=None):
-        if level is None:
-            level = self.level
-        i1, i2, i3 = iarr
-        rep1 = np.binary_repr(i1, width=self.level)
-        rep2 = np.binary_repr(i2, width=self.level)
-        rep3 = np.binary_repr(i3, width=self.level)
-        inter = np.zeros(self.level*3, dtype='c')
-        inter[self.dim_slices[0]] = rep1
-        inter[self.dim_slices[1]] = rep2
-        inter[self.dim_slices[2]] = rep3
-        return int(inter.tostring(), 2)
-
-    def get_key_ijk(self, i1, i2, i3, level=None):
-        return self.get_key(np.array([i1, i2, i3]), level=level)
-
-    def get_slice_key(self, ind, dim='r'):
-        slb = np.binary_repr(ind, width=self.level)
-        expanded = np.array([0]*self.level*3, dtype='c')
-        expanded[self.dim_slices[dim]] = slb
-        return int(expanded.tostring(), 2)
-
-    def get_slice_chunks(self, slice_dim, slice_index):
-        sl_key = self.get_slice_key(slice_index, dim=slice_dim)
-        mask = (self.indexdata['index'] & ~self.masks[slice_dim]) == sl_key
-        offsets = self.indexdata['base'][mask]
-        lengths = self.indexdata['len'][mask]
-        return mask, offsets, lengths
-
-    def get_ibbox_slow(self, ileft, iright):
-        """
-        Given left and right indicies, return a mask and
-        set of offsets+lengths into the sdf data.
-        """
-        mask = np.zeros(self.indexdata['index'].shape, dtype='bool')
-        ileft = np.array(ileft)
-        iright = np.array(iright)
-        for i in range(3):
-            left_key = self.get_slice_key(ileft[i], dim=i)
-            right_key= self.get_slice_key(iright[i], dim=i)
-            dim_inds = (self.indexdata['index'] & ~self.masks[i])
-            mask *= (dim_inds >= left_key) * (dim_inds <= right_key)
-            del dim_inds
-
-        offsets = self.indexdata['base'][mask]
-        lengths = self.indexdata['len'][mask]
-        return mask, offsets, lengths
-
-    def get_ibbox(self, ileft, iright):
-        """
-        Given left and right indicies, return a mask and
-        set of offsets+lengths into the sdf data.
-        """
-        mask = np.zeros(self.indexdata['index'].shape, dtype='bool')
-
-        print 'Getting data from ileft to iright:',  ileft, iright
-
-        X, Y, Z = np.mgrid[ileft[0]:iright[0]+1,
-                           ileft[1]:iright[1]+1,
-                           ileft[2]:iright[2]+1]
-
-        X = X.ravel()
-        Y = Y.ravel()
-        Z = Z.ravel()
-        # Correct For periodicity
-        X[X < self.domain_buffer] += self.domain_active_dims
-        X[X >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
-        Y[Y < self.domain_buffer] += self.domain_active_dims
-        Y[Y >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
-        Z[Z < self.domain_buffer] += self.domain_active_dims
-        Z[Z >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
-
-        print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()
-
-        indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
-        indices = indices[indices < self.indexdata['index'].shape[0]]
-        return indices
-
-    def get_bbox(self, left, right):
-        """
-        Given left and right indicies, return a mask and
-        set of offsets+lengths into the sdf data.
-        """
-        ileft = np.floor((left - self.rmin) / self.domain_width *  self.domain_dims)
-        iright = np.floor((right - self.rmin) / self.domain_width * self.domain_dims)
-
-        return self.get_ibbox(ileft, iright)
-
-    def get_data(self, chunk, fields):
-        data = {}
-        for field in fields:
-            data[field] = self.sdfdata[field][chunk]
-        return data
-
-    def iter_data(self, inds, fields):
-        num_inds = len(inds)
-        num_reads = 0
-        print 'Reading %i chunks' % num_inds
-        i = 0
-        while (i < num_inds):
-            ind = inds[i]
-            base = self.indexdata['base'][ind]
-            length = self.indexdata['len'][ind]
-            # Concatenate aligned reads
-            nexti = i+1
-            combined = 0
-            while nexti < len(inds):
-                nextind = inds[nexti]
-                #        print 'b: %i l: %i end: %i  next: %i' % ( base, length, base + length, self.indexdata['base'][nextind] )
-                if base + length == self.indexdata['base'][nextind]:
-                    length += self.indexdata['len'][nextind]
-                    i += 1
-                    nexti += 1
-                    combined += 1
-                else:
-                    break
-
-            chunk = slice(base, base+length)
-            print 'Reading chunk %i of length %i after catting %i' % (i, length, combined)
-            num_reads += 1
-            data = self.get_data(chunk, fields)
-            yield data
-            del data
-            i += 1
-        print 'Read %i chunks, batched into %i reads' % (num_inds, num_reads)
-
-    def iter_bbox_data(self, left, right, fields):
-        print 'Loading region from ', left, 'to', right
-        inds = self.get_bbox(left, right)
-        return self.iter_data(inds, fields)
-
-    def iter_ibbox_data(self, left, right, fields):
-        print 'Loading region from ', left, 'to', right
-        inds = self.get_ibbox(left, right)
-        return self.iter_data(inds, fields)
-
-    def get_contiguous_chunk(self, left_key, right_key, fields):
-        max_key = self.indexdata['index'][-1]
-        if left_key > max_key:
-            raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (left_key, max_key))
-        base = self.indexdata['base'][left_key]
-        right_key = min(right_key, self.indexdata['index'][-1])
-        length = self.indexdata['base'][right_key] + \
-            self.indexdata['len'][right_key] - base
-        print 'Getting contiguous chunk of size %i starting at %i' % (length, base)
-        return self.get_data(slice(base, base + length), fields)
-
-    def iter_slice_data(self, slice_dim, slice_index, fields):
-        mask, offsets, lengths = self.get_slice_chunks(slice_dim, slice_index)
-        for off, l in zip(offsets, lengths):
-            data = {}
-            chunk = slice(off, off+l)
-            for field in fields:
-                data[field] = self.sdfdata[field][chunk]
-            yield data
-            del data
-
-    def get_key_bounds(self, level, cell_iarr):
-        """
-        Get index keys for index file supplied.
-
-        level: int
-            Requested level
-        cell_iarr: array-like, length 3
-            Requested cell from given level.
-
-        Returns:
-            lmax_lk, lmax_rk
-        """
-        shift = self.level-level
-        level_buff = 0
-        level_lk = self.get_key(cell_iarr + level_buff)
-        level_rk = self.get_key(cell_iarr + level_buff) + 1
-        lmax_lk = (level_lk << shift*3)
-        lmax_rk = (((level_rk) << shift*3) -1)
-        #print "Level ", level, np.binary_repr(level_lk, width=self.level*3), np.binary_repr(level_rk, width=self.level*3)
-        #print "Level ", self.level, np.binary_repr(lmax_lk, width=self.level*3), np.binary_repr(lmax_rk, width=self.level*3)
-        return lmax_lk, lmax_rk
-
-    def get_cell_data(self, level, cell_iarr, fields):
-        """
-        Get data from requested cell
-
-        This uses the raw cell index, and doesn't account for periodicity or
-        an expanded domain (non-power of 2).
-
-        level: int
-            Requested level
-        cell_iarr: array-like, length 3
-            Requested cell from given level.         fields: list
-            Requested fields
-
-        Returns:
-            cell_data: dict
-                Dictionary of field_name, field_data
-        """
-        cell_iarr = np.array(cell_iarr)
-        lk, rk =self.get_key_bounds(level, cell_iarr)
-        return self.get_contiguous_chunk(lk, rk, fields)

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -649,6 +649,9 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        if args[0].startswith("http://"):
+        if not args[0].startswith("http://"):
+            return False
+        hreq = requests.get(args[0] + "/yt_index.json")
+        if hreq.status_code == 200:
             return True
         return False

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -54,6 +54,7 @@
     )
 
     known_particle_fields = (
+        ("particle_position", ("code_length", [], None)),
         ("particle_position_x", ("code_length", [], None)),
         ("particle_position_y", ("code_length", [], None)),
         ("particle_position_z", ("code_length", [], None)),

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -96,6 +96,7 @@
 
 class StreamParticleIOHandler(BaseIOHandler):
 
+    _vector_fields = ("particle_position", "particle_velocity")
     _dataset_type = "stream_particles"
 
     def __init__(self, pf):
@@ -116,6 +117,19 @@
                               f[ptype, "particle_position_y"],
                               f[ptype, "particle_position_z"])
             
+    def _count_particles_chunks(self, chunks, ptf, selector):
+        # This is allowed to over-estimate.  We probably *will*, too, because
+        # we're going to count *all* of the particles, not just individual
+        # types.
+        count = 0
+        psize = {}
+        for chunk in chunks:
+            for obj in chunk.objs:
+                count += selector.count_octs(obj.oct_handler, obj.domain_id)
+        for ptype in ptf:
+            psize[ptype] = self.pf.n_ref * count / float(obj.nz)
+        return psize
+
     def _read_particle_fields(self, chunks, ptf, selector):
         data_files = set([])
         for chunk in chunks:
@@ -124,8 +138,13 @@
         for data_file in sorted(data_files):
             f = self.fields[data_file.filename]
             for ptype, field_list in sorted(ptf.items()):
-                x, y, z = (f[ptype, "particle_position_%s" % ax]
-                           for ax in 'xyz')
+                if (ptype, "particle_position") in f:
+                    x = f[ptype, "particle_position"][:,0]
+                    y = f[ptype, "particle_position"][:,1]
+                    z = f[ptype, "particle_position"][:,2]
+                else:
+                    x, y, z = (f[ptype, "particle_position_%s" % ax]
+                               for ax in 'xyz')
                 mask = selector.select_points(x, y, z, 0.0)
                 if mask is None: continue
                 for field in field_list:

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -18,6 +18,10 @@
 from oct_visitors cimport Oct, OctVisitorData, \
     oct_visitor_function
 
+ctypedef fused anyfloat:
+    np.float32_t
+    np.float64_t
+
 cdef class SelectorObject:
     cdef public np.int32_t min_level
     cdef public np.int32_t max_level
@@ -53,3 +57,13 @@
 cdef class OctreeSubsetSelector(SelectorObject):
     cdef SelectorObject base_selector
     cdef public np.int64_t domain_id
+
+cdef inline np.float64_t _periodic_dist(np.float64_t x1, np.float64_t x2,
+                                        np.float64_t dw, bint periodic) nogil:
+    cdef np.float64_t rel = x1 - x2
+    if not periodic: return rel
+    if rel > dw * 0.5:
+        rel -= dw
+    elif rel < -dw * 0.5:
+        rel += dw
+    return rel

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -36,10 +36,6 @@
     long int lrint(double x) nogil
     double fabs(double x) nogil
 
-ctypedef fused anyfloat:
-    np.float32_t
-    np.float64_t
-
 # These routines are separated into a couple different categories:
 #
 #   * Routines for identifying intersections of an object with a bounding box
@@ -335,10 +331,10 @@
         # domain_width is already in code units, and we assume what is fed in
         # is too.
         cdef np.float64_t rel = x1 - x2
-        if self.periodicity[d] :
-            if rel > self.domain_width[d]/2.0 :
+        if self.periodicity[d]:
+            if rel > self.domain_width[d] * 0.5:
                 rel -= self.domain_width[d]
-            elif rel < -self.domain_width[d]/2.0 :
+            elif rel < -self.domain_width[d] * 0.5:
                 rel += self.domain_width[d]
         return rel
 
@@ -491,11 +487,12 @@
         cdef int i
         cdef np.float64_t pos[3]
         cdef np.ndarray[np.uint8_t, ndim=1] mask 
-        mask = np.zeros(x.shape[0], dtype='uint8')
+        mask = np.empty(x.shape[0], dtype='uint8')
         _ensure_code(x)
         _ensure_code(y)
         _ensure_code(z)
 
+
         # this is to allow selectors to optimize the point vs
         # 0-radius sphere case.  These two may have different 
         # effects for 0-volume selectors, however (collision 
@@ -517,7 +514,7 @@
                     mask[i] = self.select_sphere(pos, radius)
                     count += mask[i]
         if count == 0: return None
-        return mask.astype("bool")
+        return mask.view("bool")
 
     def __hash__(self):
         return hash(self._hash_vals() + self._base_hash())
@@ -595,12 +592,27 @@
     cdef np.float64_t radius
     cdef np.float64_t radius2
     cdef np.float64_t center[3]
+    cdef np.float64_t bbox[3][2]
+    cdef bint check_box[3]
 
     def __init__(self, dobj):
         for i in range(3):
             self.center[i] = _ensure_code(dobj.center[i])
         self.radius = _ensure_code(dobj.radius)
         self.radius2 = self.radius * self.radius
+        center = _ensure_code(dobj.center)
+        cdef np.float64_t mi = np.finfo("float64").min
+        cdef np.float64_t ma = np.finfo("float64").max
+        for i in range(3):
+            self.center[i] = center[i]
+            self.bbox[i][0] = self.center[i] - self.radius
+            self.bbox[i][1] = self.center[i] + self.radius
+            if self.bbox[i][0] < dobj.pf.domain_left_edge[i]:
+                self.check_box[i] = False
+            elif self.bbox[i][1] > dobj.pf.domain_right_edge[i]:
+                self.check_box[i] = False
+            else:
+                self.check_box[i] = True
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -620,10 +632,15 @@
         cdef int i
         cdef np.float64_t dist, dist2 = 0
         for i in range(3):
-            dist = self.difference(pos[i], self.center[i], i)
+            if self.check_box[i] and \
+              (pos[i] < self.bbox[i][0] or 
+               pos[i] > self.bbox[i][1]):
+                return 0
+            dist = _periodic_dist(pos[i], self.center[i], self.domain_width[i],
+                                  self.periodicity[i])
             dist2 += dist*dist
-        if dist2 <= self.radius2: return 1
-        return 0
+            if dist2 > self.radius2: return 0
+        return 1
    
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -649,16 +666,22 @@
             left_edge[1] <= self.center[1] <= right_edge[1] and
             left_edge[2] <= self.center[2] <= right_edge[2]):
             return 1
+        for i in range(3):
+            if not self.check_box[i]: continue
+            if right_edge[i] < self.bbox[i][0] or \
+               left_edge[i] > self.bbox[i][1]:
+                return 0
         # http://www.gamedev.net/topic/335465-is-this-the-simplest-sphere-aabb-collision-test/
         dist = 0
         for i in range(3):
+            # Early terminate
             box_center = (right_edge[i] + left_edge[i])/2.0
             relcenter = self.difference(box_center, self.center[i], i)
             edge = right_edge[i] - left_edge[i]
             closest = relcenter - fclip(relcenter, -edge/2.0, edge/2.0)
             dist += closest*closest
-        if dist <= self.radius2: return 1
-        return 0
+            if dist > self.radius2: return 0
+        return 1
 
     def _hash_vals(self):
         return (self.radius, self.radius2,

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -116,12 +116,17 @@
     def _read_chunk_data(self, chunk, fields):
         return {}
 
+    def _count_particles_chunks(self, chunks, ptf, selector):
+        psize = defaultdict(lambda: 0) # COUNT PTYPES ON DISK
+        for ptype, (x, y, z) in self._read_particle_coords(chunks, ptf):
+            psize[ptype] += selector.count_points(x, y, z, 0.0)
+        return dict(psize.items())
+
     def _read_particle_selection(self, chunks, selector, fields):
         rv = {}
         ind = {}
         # We first need a set of masks for each particle type
         ptf = defaultdict(list)        # ON-DISK TO READ
-        psize = defaultdict(lambda: 0) # COUNT PTYPES ON DISK
         fsize = defaultdict(lambda: 0) # COUNT RV
         field_maps = defaultdict(list) # ptypes -> fields
         chunks = list(chunks)
@@ -139,17 +144,10 @@
                 ptf[ftype].append(fname)
                 field_maps[field].append(field)
         # We can't hash chunks, but otherwise this is a neat idea.
-        if 0 and hash(selector) == self._last_selector_id and \
-           all(ptype in self._last_selector_counts for ptype in ptf):
-            psize.update(self._last_selector_counts)
-        else:
-            # Now we have our full listing.
-            # Here, ptype_map means which particles contribute to a given type.
-            # And ptf is the actual fields from disk to read.
-            for ptype, (x, y, z) in self._read_particle_coords(chunks, ptf):
-                psize[ptype] += selector.count_points(x, y, z, 0.0)
-            self._last_selector_counts = dict(**psize)
-            self._last_selector_id = hash(selector)
+        # Now we have our full listing.
+        # Here, ptype_map means which particles contribute to a given type.
+        # And ptf is the actual fields from disk to read.
+        psize = self._count_particles_chunks(chunks, ptf, selector)
         # Now we allocate
         # ptf, remember, is our mapping of what we want to read
         #for ptype in ptf:
@@ -175,6 +173,10 @@
                 #    field_f, my_ind, my_ind+vals.shape[0], field_r)
                 rv[field_f][my_ind:my_ind + vals.shape[0],...] = vals
                 ind[field_f] += vals.shape[0]
+        # Now we need to truncate all our fields, since we allow for
+        # over-estimating.
+        for field_f in ind:
+            rv[field_f] = rv[field_f][:ind[field_f]]
         return rv
 
 class IOHandlerExtracted(BaseIOHandler):

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list