[yt-svn] commit/yt: 6 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Dec 14 11:13:02 PST 2015


6 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/37cdde21b8a6/
Changeset:   37cdde21b8a6
Branch:      yt
User:        MatthewTurk
Date:        2015-10-31 19:39:22+00:00
Summary:     Removing some unused values.
Affected #:  1 file

diff -r 9a49e5d6da9fc23bc7a2858bb5a304b9c4f5537b -r 37cdde21b8a6ad3a8c410c0144499c86936cb470 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -41,10 +41,6 @@
     cdef int maxn
     cdef int curn
     cdef bint periodicity[3]
-    cdef np.int64_t *doffs
-    cdef np.int64_t *pinds
-    cdef np.int64_t *pcounts
-    cdef np.float64_t *ppos
     # Note that we are preallocating here, so this is *not* threadsafe.
     cdef NeighborList *neighbors
     cdef void (*pos_setup)(np.float64_t ipos[3], np.float64_t opos[3])


https://bitbucket.org/yt_analysis/yt/commits/cba9dfe78ac6/
Changeset:   cba9dfe78ac6
Branch:      yt
User:        MatthewTurk
Date:        2015-10-31 20:00:00+00:00
Summary:     Starting conversion to memoryviews
Affected #:  2 files

diff -r 37cdde21b8a6ad3a8c410c0144499c86936cb470 -r cba9dfe78ac681778f58fd9cf289da2f9f5f5562 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -47,8 +47,8 @@
     cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
                                np.float64_t dds[3], np.float64_t *ppos,
                                np.float64_t **fields, 
-                               np.int64_t *doffs, np.int64_t **nind, 
-                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t[:] doffs, np.int64_t **nind, 
+                               np.int64_t[:] pinds, np.int64_t *pcounts,
                                np.int64_t offset, np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
                                int *nsize, np.float64_t *oct_left_edges,
@@ -60,8 +60,8 @@
     cdef void neighbor_process_particle(self, np.float64_t cpos[3],
                                np.float64_t *ppos,
                                np.float64_t **fields, 
-                               np.int64_t *doffs, np.int64_t **nind, 
-                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t[:] doffs, np.int64_t **nind, 
+                               np.int64_t[:] pinds, np.int64_t *pcounts,
                                np.int64_t offset,
                                np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
@@ -72,9 +72,9 @@
     cdef void neighbor_find(self,
                             np.int64_t nneighbors,
                             np.int64_t *nind,
-                            np.int64_t *doffs,
+                            np.int64_t[:] doffs,
                             np.int64_t *pcounts,
-                            np.int64_t *pinds,
+                            np.int64_t[:] pinds,
                             np.float64_t *ppos,
                             np.float64_t cpos[3],
                             np.float64_t* oct_left_edges,

diff -r 37cdde21b8a6ad3a8c410c0144499c86936cb470 -r cba9dfe78ac681778f58fd9cf289da2f9f5f5562 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -80,6 +80,7 @@
         self.nvals = nvals
         self.nfields = nfields
         self.maxn = max_neighbors
+
         self.neighbors = <NeighborList *> malloc(
             sizeof(NeighborList) * self.maxn)
         self.neighbor_reset()
@@ -134,6 +135,7 @@
             pdom_ind = mdom_ind
         cdef int nf, i, j, n
         cdef int dims[3]
+        cdef np.float64_t[:] *field_check
         cdef np.float64_t **field_pointers
         cdef np.float64_t *field_vals
         cdef np.float64_t pos[3]
@@ -146,8 +148,8 @@
         cdef Oct *oct
         cdef np.int64_t numpart, offset, local_ind, poff
         cdef np.int64_t moff_p, moff_m
-        cdef np.int64_t *doffs
-        cdef np.int64_t *pinds
+        cdef np.int64_t[:] doffs
+        cdef np.int64_t[:] pinds
         cdef np.int64_t *pcounts
         cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
         cdef np.ndarray[np.int64_t, ndim=2] doff_m
@@ -247,8 +249,8 @@
         # refers to that oct's particles.
         ppos = <np.float64_t *> positions.data
         cart_pos = <np.float64_t *> cart_positions.data
-        doffs = <np.int64_t*> doff.data
-        pinds = <np.int64_t*> pind.data
+        doffs = doff
+        pinds = pinds
         pcounts = <np.int64_t*> pcount.data
         cdef np.ndarray[np.uint8_t, ndim=1] visited
         visited = np.zeros(mdom_ind.shape[0], dtype="uint8")
@@ -304,8 +306,8 @@
         cdef Oct **neighbors = NULL
         cdef np.int64_t nneighbors, numpart, offset, local_ind
         cdef np.int64_t moff_p, moff_m, pind0, poff
-        cdef np.int64_t *doffs
-        cdef np.int64_t *pinds
+        cdef np.int64_t[:] doffs
+        cdef np.int64_t[:] pinds
         cdef np.int64_t *pcounts
         cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
         cdef np.ndarray[np.float64_t, ndim=1] tarr
@@ -378,8 +380,8 @@
         # refers to that oct's particles.
         ppos = <np.float64_t *> positions.data
         cart_pos = <np.float64_t *> cart_positions.data
-        doffs = <np.int64_t*> doff.data
-        pinds = <np.int64_t*> pind.data
+        doffs = doff
+        pinds = pind
         pcounts = <np.int64_t*> pcount.data
         cdef int maxnei = 0
         cdef int nproc = 0
@@ -527,9 +529,9 @@
     cdef void neighbor_find(self,
                             np.int64_t nneighbors,
                             np.int64_t *nind,
-                            np.int64_t *doffs,
+                            np.int64_t[:] doffs,
                             np.int64_t *pcounts,
-                            np.int64_t *pinds,
+                            np.int64_t[:] pinds,
                             np.float64_t *ppos,
                             np.float64_t cpos[3],
                             np.float64_t *oct_left_edges,
@@ -587,8 +589,8 @@
     cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
                                np.float64_t dds[3], np.float64_t *ppos,
                                np.float64_t **fields,
-                               np.int64_t *doffs, np.int64_t **nind,
-                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t [:] doffs, np.int64_t **nind,
+                               np.int64_t [:] pinds, np.int64_t *pcounts,
                                np.int64_t offset,
                                np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
@@ -629,8 +631,8 @@
     cdef void neighbor_process_particle(self, np.float64_t cpos[3],
                                np.float64_t *ppos,
                                np.float64_t **fields,
-                               np.int64_t *doffs, np.int64_t **nind,
-                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t[:] doffs, np.int64_t **nind,
+                               np.int64_t[:] pinds, np.int64_t *pcounts,
                                np.int64_t offset,
                                np.float64_t **index_fields,
                                OctreeContainer octree,


https://bitbucket.org/yt_analysis/yt/commits/f5b9cba0cd9d/
Changeset:   f5b9cba0cd9d
Branch:      yt
User:        MatthewTurk
Date:        2015-11-01 02:31:21+00:00
Summary:     Function signatures all match
Affected #:  2 files

diff -r cba9dfe78ac681778f58fd9cf289da2f9f5f5562 -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -45,23 +45,23 @@
     cdef NeighborList *neighbors
     cdef void (*pos_setup)(np.float64_t ipos[3], np.float64_t opos[3])
     cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
-                               np.float64_t dds[3], np.float64_t *ppos,
+                               np.float64_t dds[3], np.float64_t[:,:] ppos,
                                np.float64_t **fields, 
                                np.int64_t[:] doffs, np.int64_t **nind, 
-                               np.int64_t[:] pinds, np.int64_t *pcounts,
+                               np.int64_t[:] pinds, np.int64_t[:] pcounts,
                                np.int64_t offset, np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
-                               int *nsize, np.float64_t *oct_left_edges,
-                               np.float64_t *oct_dds)
+                               int *nsize, np.float64_t[:,:] oct_left_edges,
+                               np.float64_t[:,:] oct_dds)
     cdef int neighbor_search(self, np.float64_t pos[3], OctreeContainer octree,
                              np.int64_t **nind, int *nsize, 
                              np.int64_t nneighbors, np.int64_t domain_id, 
                              Oct **oct = ?, int extra_layer = ?)
     cdef void neighbor_process_particle(self, np.float64_t cpos[3],
-                               np.float64_t *ppos,
+                               np.float64_t[:,:] ppos,
                                np.float64_t **fields, 
                                np.int64_t[:] doffs, np.int64_t **nind, 
-                               np.int64_t[:] pinds, np.int64_t *pcounts,
+                               np.int64_t[:] pinds, np.int64_t[:] pcounts,
                                np.int64_t offset,
                                np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
@@ -73,12 +73,12 @@
                             np.int64_t nneighbors,
                             np.int64_t *nind,
                             np.int64_t[:] doffs,
-                            np.int64_t *pcounts,
+                            np.int64_t[:] pcounts,
                             np.int64_t[:] pinds,
-                            np.float64_t *ppos,
+                            np.float64_t[:,:] ppos,
                             np.float64_t cpos[3],
-                            np.float64_t* oct_left_edges,
-                            np.float64_t* oct_dds)
+                            np.float64_t[:,:] oct_left_edges,
+                            np.float64_t[:,:] oct_dds)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields)

diff -r cba9dfe78ac681778f58fd9cf289da2f9f5f5562 -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -96,15 +96,15 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def process_octree(self, OctreeContainer mesh_octree,
-                     np.ndarray[np.int64_t, ndim=1] mdom_ind,
-                     np.ndarray[np.float64_t, ndim=2] positions,
-                     np.ndarray[np.float64_t, ndim=2] oct_positions,
+                     np.int64_t [:] mdom_ind,
+                     np.float64_t[:,:] positions,
+                     np.float64_t[:,:] oct_positions,
                      fields = None, int domain_id = -1,
                      int domain_offset = 0,
                      periodicity = (True, True, True),
                      index_fields = None,
                      OctreeContainer particle_octree = None,
-                     np.ndarray[np.int64_t, ndim=1] pdom_ind = None,
+                     np.int64_t [:] pdom_ind = None,
                      geometry = "cartesian"):
         # This will be a several-step operation.
         #
@@ -139,7 +139,6 @@
         cdef np.float64_t **field_pointers
         cdef np.float64_t *field_vals
         cdef np.float64_t pos[3]
-        cdef np.float64_t *ppos
         cdef np.float64_t dds[3]
         cdef np.float64_t **octree_field_pointers
         cdef int nsize = 0
@@ -148,15 +147,12 @@
         cdef Oct *oct
         cdef np.int64_t numpart, offset, local_ind, poff
         cdef np.int64_t moff_p, moff_m
-        cdef np.int64_t[:] doffs
-        cdef np.int64_t[:] pinds
-        cdef np.int64_t *pcounts
-        cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
-        cdef np.ndarray[np.int64_t, ndim=2] doff_m
+        cdef np.int64_t[:] pind, doff, pdoms, pcount
+        cdef np.int64_t[:,:] doff_m
         cdef np.ndarray[np.float64_t, ndim=1] tarr
         cdef np.ndarray[np.float64_t, ndim=4] iarr
-        cdef np.ndarray[np.float64_t, ndim=2] cart_positions
-        cdef np.ndarray[np.float64_t, ndim=2] oct_left_edges, oct_dds
+        cdef np.float64_t[:,:] cart_positions
+        cdef np.float64_t[:,:] oct_left_edges, oct_dds
         cdef OctInfo oinfo
         if geometry == "cartesian":
             self.pos_setup = cart_coord_setup
@@ -247,11 +243,6 @@
         #raise RuntimeError
         # Now doff is full of offsets to the first entry in the pind that
         # refers to that oct's particles.
-        ppos = <np.float64_t *> positions.data
-        cart_pos = <np.float64_t *> cart_positions.data
-        doffs = doff
-        pinds = pinds
-        pcounts = <np.int64_t*> pcount.data
         cdef np.ndarray[np.uint8_t, ndim=1] visited
         visited = np.zeros(mdom_ind.shape[0], dtype="uint8")
         cdef int nproc = 0
@@ -265,10 +256,10 @@
             if offset < 0: continue
             nproc += 1
             self.neighbor_process(
-                dims, moi.left_edge, moi.dds, cart_pos, field_pointers, doffs,
-                &nind, pinds, pcounts, offset, index_field_pointers,
-                particle_octree, domain_id, &nsize, &oct_left_edges[0, 0],
-                &oct_dds[0, 0])
+                dims, moi.left_edge, moi.dds, cart_positions, field_pointers, doff,
+                &nind, pind, pcount, offset, index_field_pointers,
+                particle_octree, domain_id, &nsize, oct_left_edges,
+                oct_dds)
         #print "VISITED", visited.sum(), visited.size,
         #print 100.0*float(visited.sum())/visited.size
         if nind != NULL:
@@ -295,7 +286,6 @@
         cdef int dims[3]
         cdef np.float64_t **field_pointers
         cdef np.float64_t *field_vals
-        cdef np.float64_t *ppos
         cdef np.float64_t dds[3]
         cdef np.float64_t pos[3]
         cdef np.float64_t **octree_field_pointers
@@ -306,10 +296,7 @@
         cdef Oct **neighbors = NULL
         cdef np.int64_t nneighbors, numpart, offset, local_ind
         cdef np.int64_t moff_p, moff_m, pind0, poff
-        cdef np.int64_t[:] doffs
-        cdef np.int64_t[:] pinds
-        cdef np.int64_t *pcounts
-        cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
+        cdef np.int64_t[:] pind, doff, pdoms, pcount
         cdef np.ndarray[np.float64_t, ndim=1] tarr
         cdef np.ndarray[np.float64_t, ndim=2] cart_positions
         if geometry == "cartesian":
@@ -378,11 +365,6 @@
         #raise RuntimeError
         # Now doff is full of offsets to the first entry in the pind that
         # refers to that oct's particles.
-        ppos = <np.float64_t *> positions.data
-        cart_pos = <np.float64_t *> cart_positions.data
-        doffs = doff
-        pinds = pind
-        pcounts = <np.int64_t*> pcount.data
         cdef int maxnei = 0
         cdef int nproc = 0
         for i in range(doff.shape[0]):
@@ -394,8 +376,8 @@
                 pind0 = pind[doff[i] + j]
                 for k in range(3):
                     pos[k] = positions[pind0, k]
-                self.neighbor_process_particle(pos, cart_pos, field_pointers,
-                            doffs, &nind, pinds, pcounts, pind0,
+                self.neighbor_process_particle(pos, cart_positions, field_pointers,
+                            doff, &nind, pind, pcount, pind0,
                             NULL, particle_octree, domain_id, &nsize)
         #print "VISITED", visited.sum(), visited.size,
         #print 100.0*float(visited.sum())/visited.size
@@ -530,12 +512,12 @@
                             np.int64_t nneighbors,
                             np.int64_t *nind,
                             np.int64_t[:] doffs,
-                            np.int64_t *pcounts,
+                            np.int64_t[:] pcounts,
                             np.int64_t[:] pinds,
-                            np.float64_t *ppos,
+                            np.float64_t[:,:] ppos,
                             np.float64_t cpos[3],
-                            np.float64_t *oct_left_edges,
-                            np.float64_t *oct_dds,
+                            np.float64_t[:,:] oct_left_edges,
+                            np.float64_t[:,:] oct_dds,
                             ):
         # We are now given the number of neighbors, the indices into the
         # domains for them, and the number of particles for each.
@@ -547,7 +529,7 @@
             if nind[ni] == -1: continue
             # terminate early if all 8 corners of oct are farther away than
             # most distant currently known neighbor
-            if oct_left_edges != NULL and self.curn == self.maxn:
+            if oct_left_edges != None and self.curn == self.maxn:
                 r2_trunc = self.neighbors[self.curn - 1].r2
                 # iterate over each dimension in the outer loop so we can
                 # consolidate temporary storage
@@ -557,8 +539,8 @@
                 r2 = 0.0
                 for k in range(3):
                     # We start at left edge, then do halfway, then right edge.
-                    ex[0] = oct_left_edges[3*nind[ni] + k]
-                    ex[1] = ex[0] + oct_dds[3*nind[ni] + k]
+                    ex[0] = oct_left_edges[nind[ni], k]
+                    ex[1] = ex[0] + oct_dds[nind[ni], k]
                     # There are three possibilities; we are between, left-of,
                     # or right-of the extrema.  Thanks to
                     # http://stackoverflow.com/questions/5254838/calculating-distance-between-a-point-and-a-rectangular-box-nearest-point
@@ -583,19 +565,19 @@
             for i in range(pc):
                 pn = pinds[offset + i]
                 for j in range(3):
-                    pos[j] = ppos[pn * 3 + j]
+                    pos[j] = ppos[pn, j]
                 self.neighbor_eval(pn, pos, cpos)
 
     cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
-                               np.float64_t dds[3], np.float64_t *ppos,
+                               np.float64_t dds[3], np.float64_t[:,:] ppos,
                                np.float64_t **fields,
                                np.int64_t [:] doffs, np.int64_t **nind,
-                               np.int64_t [:] pinds, np.int64_t *pcounts,
+                               np.int64_t [:] pinds, np.int64_t[:] pcounts,
                                np.int64_t offset,
                                np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
-                               int *nsize, np.float64_t *oct_left_edges,
-                               np.float64_t *oct_dds):
+                               int *nsize, np.float64_t[:,:] oct_left_edges,
+                               np.float64_t[:,:] oct_dds):
         # Note that we assume that fields[0] == smoothing length in the native
         # units supplied.  We can now iterate over every cell in the block and
         # every particle to find the nearest.  We will use a priority heap.
@@ -629,10 +611,10 @@
             cpos[0] += dds[0]
 
     cdef void neighbor_process_particle(self, np.float64_t cpos[3],
-                               np.float64_t *ppos,
+                               np.float64_t[:,:] ppos,
                                np.float64_t **fields,
                                np.int64_t[:] doffs, np.int64_t **nind,
-                               np.int64_t[:] pinds, np.int64_t *pcounts,
+                               np.int64_t[:] pinds, np.int64_t[:] pcounts,
                                np.int64_t offset,
                                np.float64_t **index_fields,
                                OctreeContainer octree,
@@ -651,7 +633,7 @@
         nneighbors = self.neighbor_search(opos, octree,
                         nind, nsize, nneighbors, domain_id, &oct, 0)
         self.neighbor_find(nneighbors, nind[0], doffs, pcounts, pinds, ppos,
-                           opos, NULL, NULL)
+                           opos, None, None)
         self.process(offset, i, j, k, dim, opos, fields, index_fields)
 
 cdef class VolumeWeightedSmooth(ParticleSmoothOperation):


https://bitbucket.org/yt_analysis/yt/commits/76e10575d7fd/
Changeset:   76e10575d7fd
Branch:      yt
User:        MatthewTurk
Date:        2015-11-19 20:42:05+00:00
Summary:     Merging with upstream
Affected #:  290 files

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -32,6 +32,7 @@
 yt/utilities/lib/CICDeposit.c
 yt/utilities/lib/ContourFinding.c
 yt/utilities/lib/DepthFirstOctree.c
+yt/utilities/lib/element_mappings.c
 yt/utilities/lib/FixedInterpolator.c
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 coding_styleguide.txt
--- /dev/null
+++ b/coding_styleguide.txt
@@ -0,0 +1,101 @@
+Style Guide for Coding in yt
+============================
+
+Coding Style Guide
+------------------
+
+ * In general, follow PEP-8 guidelines.
+   http://www.python.org/dev/peps/pep-0008/
+ * Classes are ``ConjoinedCapitals``, methods and functions are
+   ``lowercase_with_underscores``.
+ * Use 4 spaces, not tabs, to represent indentation.
+ * Line widths should not be more than 80 characters.
+ * Do not use nested classes unless you have a very good reason to, such as
+   requiring a namespace or class-definition modification.  Classes should live
+   at the top level.  ``__metaclass__`` is exempt from this.
+ * Do not use unnecessary parenthesis in conditionals.  ``if((something) and
+   (something_else))`` should be rewritten as
+   ``if something and something_else``. Python is more forgiving than C.
+ * Avoid copying memory when possible. For example, don't do
+   ``a = a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3``
+   should be ``np.multiply(a, 3, a)``.
+ * In general, avoid all double-underscore method names: ``__something`` is
+   usually unnecessary.
+ * When writing a subclass, use the super built-in to access the super class,
+   rather than explicitly. Ex: ``super(SpecialGridSubclass, self).__init__()``
+   rather than ``SpecialGrid.__init__()``.
+ * Docstrings should describe input, output, behavior, and any state changes
+   that occur on an object.  See the file ``doc/docstring_example.txt`` for a
+   fiducial example of a docstring.
+ * Use only one top-level import per line. Unless there is a good reason not to,
+   imports should happen at the top of the file, after the copyright blurb.
+ * Never compare with ``True`` or ``False`` using ``==`` or ``!=``, always use
+   ``is`` or ``is not``.
+ * If you are comparing with a numpy boolean array, just refer to the array.
+   Ex: do ``np.all(array)`` instead of ``np.all(array == True)``.
+ * Never comapre with None using ``==`` or ``!=``, use ``is None`` or
+   ``is not None``.
+ * Use ``statement is not True`` instead of ``not statement is True``
+ * Only one statement per line, do not use semicolons to put two or more
+   statements on a single line.
+ * Only declare local variables if they will be used later. If you do not use the
+   return value of a function, do not store it in a variable.
+ * Add tests for new functionality. When fixing a bug, consider adding a test to
+   prevent the bug from recurring.
+
+API Guide
+---------
+
+ * Do not use ``from some_module import *``
+ * Internally, only import from source files directly -- instead of:
+
+     ``from yt.visualization.api import ProjectionPlot``
+
+   do:
+
+     ``from yt.visualization.plot_window import ProjectionPlot``
+
+ * Import symbols from the module where they are defined, avoid transitive
+   imports.
+ * Import standard library modules, functions, and classes from builtins, do not
+   import them from other yt files.
+ * Numpy is to be imported as ``np``.
+ * Do not use too many keyword arguments.  If you have a lot of keyword
+   arguments, then you are doing too much in ``__init__`` and not enough via
+   parameter setting.
+ * In function arguments, place spaces before commas.  ``def something(a,b,c)``
+   should be ``def something(a, b, c)``.
+ * Don't create a new class to replicate the functionality of an old class --
+   replace the old class.  Too many options makes for a confusing user
+   experience.
+ * Parameter files external to yt are a last resort.
+ * The usage of the ``**kwargs`` construction should be avoided.  If they cannot
+   be avoided, they must be explained, even if they are only to be passed on to
+   a nested function.
+
+Variable Names and Enzo-isms
+----------------------------
+Avoid Enzo-isms.  This includes but is not limited to:
+
+ * Hard-coding parameter names that are the same as those in Enzo.  The
+   following translation table should be of some help.  Note that the
+   parameters are now properties on a ``Dataset`` subclass: you access them
+   like ds.refine_by .
+
+    - ``RefineBy `` => `` refine_by``
+    - ``TopGridRank `` => `` dimensionality``
+    - ``TopGridDimensions `` => `` domain_dimensions``
+    - ``InitialTime `` => `` current_time``
+    - ``DomainLeftEdge `` => `` domain_left_edge``
+    - ``DomainRightEdge `` => `` domain_right_edge``
+    - ``CurrentTimeIdentifier `` => `` unique_identifier``
+    - ``CosmologyCurrentRedshift `` => `` current_redshift``
+    - ``ComovingCoordinates `` => `` cosmological_simulation``
+    - ``CosmologyOmegaMatterNow `` => `` omega_matter``
+    - ``CosmologyOmegaLambdaNow `` => `` omega_lambda``
+    - ``CosmologyHubbleConstantNow `` => `` hubble_constant``
+
+ * Do not assume that the domain runs from 0 .. 1.  This is not true
+   everywhere.
+ * Variable names should be short but descriptive.
+ * No globals!

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-Style Guide for Coding in yt
-============================
-
-Coding Style Guide
-------------------
-
- * In general, follow PEP-8 guidelines.
-   http://www.python.org/dev/peps/pep-0008/
- * Classes are ConjoinedCapitals, methods and functions are
-   lowercase_with_underscores.
- * Use 4 spaces, not tabs, to represent indentation.
- * Line widths should not be more than 80 characters.
- * Do not use nested classes unless you have a very good reason to, such as
-   requiring a namespace or class-definition modification.  Classes should live
-   at the top level.  __metaclass__ is exempt from this.
- * Do not use unnecessary parenthesis in conditionals.  if((something) and
-   (something_else)) should be rewritten as if something and something_else.
-   Python is more forgiving than C.
- * Avoid copying memory when possible. For example, don't do 
-   "a = a.reshape(3,4)" when "a.shape = (3,4)" will do, and "a = a * 3" should
-   be "np.multiply(a, 3, a)".
- * In general, avoid all double-underscore method names: __something is usually
-   unnecessary.
- * When writing a subclass, use the super built-in to access the super class,
-   rather than explicitly. Ex: "super(SpecialGrid, self).__init__()" rather than
-   "SpecialGrid.__init__()".
- * Doc strings should describe input, output, behavior, and any state changes
-   that occur on an object.  See the file `doc/docstring_example.txt` for a
-   fiducial example of a docstring.
-
-API Guide
----------
-
- * Do not import "*" from anything other than "yt.funcs".
- * Internally, only import from source files directly -- instead of:
-
-   from yt.visualization.api import ProjectionPlot
-
-   do:
-
-   from yt.visualization.plot_window import ProjectionPlot
-
- * Numpy is to be imported as "np", after a long time of using "na".
- * Do not use too many keyword arguments.  If you have a lot of keyword
-   arguments, then you are doing too much in __init__ and not enough via
-   parameter setting.
- * In function arguments, place spaces before commas.  def something(a,b,c)
-   should be def something(a, b, c).
- * Don't create a new class to replicate the functionality of an old class --
-   replace the old class.  Too many options makes for a confusing user
-   experience.
- * Parameter files external to yt are a last resort.
- * The usage of the **kwargs construction should be avoided.  If they cannot
-   be avoided, they must be explained, even if they are only to be passed on to
-   a nested function.
-
-Variable Names and Enzo-isms
-----------------------------
-
- * Avoid Enzo-isms.  This includes but is not limited to:
-   * Hard-coding parameter names that are the same as those in Enzo.  The
-     following translation table should be of some help.  Note that the
-     parameters are now properties on a Dataset subclass: you access them
-     like ds.refine_by .
-     * RefineBy => refine_by
-     * TopGridRank => dimensionality
-     * TopGridDimensions => domain_dimensions
-     * InitialTime => current_time
-     * DomainLeftEdge => domain_left_edge
-     * DomainRightEdge => domain_right_edge
-     * CurrentTimeIdentifier => unique_identifier
-     * CosmologyCurrentRedshift => current_redshift
-     * ComovingCoordinates => cosmological_simulation
-     * CosmologyOmegaMatterNow => omega_matter
-     * CosmologyOmegaLambdaNow => omega_lambda
-     * CosmologyHubbleConstantNow => hubble_constant
-   * Do not assume that the domain runs from 0 .. 1.  This is not true
-     everywhere.
- * Variable names should be short but descriptive.
- * No globals!

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/get_yt.sh
--- a/doc/get_yt.sh
+++ b/doc/get_yt.sh
@@ -23,7 +23,7 @@
 DEST_SUFFIX="yt-conda"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
 BRANCH="yt" # This is the branch to which we will forcibly update.
-INST_YT_SOURCE=1 # Do we do a source install of yt?
+INST_YT_SOURCE=0 # Do we do a source install of yt?
 
 ##################################################################
 #                                                                #
@@ -37,7 +37,7 @@
 # ( SOMECOMMAND 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
 MINICONDA_URLBASE="http://repo.continuum.io/miniconda"
-MINICONDA_VERSION="1.9.1"
+MINICONDA_VERSION="latest"
 YT_RECIPE_REPO="https://bitbucket.org/yt_analysis/yt_conda/raw/default"
 
 function do_exit
@@ -61,12 +61,14 @@
     ( $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
-function get_ytproject
-{
-    [ -e $1 ] && return
-    echo "Downloading $1 from yt-project.org"
-    ${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
-    ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
+# These are needed to prevent pushd and popd from printing to stdout
+
+function pushd () {
+    command pushd "$@" > /dev/null
+}
+
+function popd () {
+    command popd "$@" > /dev/null
 }
 
 function get_ytdata
@@ -101,122 +103,125 @@
 echo "This will install Miniconda from Continuum Analytics, the necessary"
 echo "packages to run yt, and create a self-contained environment for you to"
 echo "use yt.  Additionally, Conda itself provides the ability to install"
-echo "many other packages that can be used for other purposes."
+echo "many other packages that can be used for other purposes using the"
+echo "'conda install' command."
 echo
 MYOS=`uname -s`       # A guess at the OS
-if [ "${MYOS##Darwin}" != "${MYOS}" ]
+if [ $INST_YT_SOURCE -ne 0 ]
 then
-  echo "Looks like you're running on Mac OSX."
-  echo
-  echo "NOTE: you must have the Xcode command line tools installed."
-  echo
-  echo "The instructions for obtaining these tools varies according"
-  echo "to your exact OS version.  On older versions of OS X, you"
-  echo "must register for an account on the apple developer tools"
-  echo "website: https://developer.apple.com/downloads to obtain the"
-  echo "download link."
-  echo
-  echo "We have gathered some additional instructions for each"
-  echo "version of OS X below. If you have trouble installing yt"
-  echo "after following these instructions, don't hesitate to contact"
-  echo "the yt user's e-mail list."
-  echo
-  echo "You can see which version of OSX you are running by clicking"
-  echo "'About This Mac' in the apple menu on the left hand side of"
-  echo "menu bar.  We're assuming that you've installed all operating"
-  echo "system updates; if you have an older version, we suggest"
-  echo "running software update and installing all available updates."
-  echo
-  echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
-  echo "Apple developer tools website."
-  echo
-  echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
-  echo "developer tools website.  You can either download the"
-  echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-  echo "Software Update to update to XCode 3.2.6 or"
-  echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
-  echo "bundle (4.1 GB)."
-  echo
-  echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
-  echo "(search for Xcode)."
-  echo "Alternatively, download the Xcode command line tools from"
-  echo "the Apple developer tools website."
-  echo
-  echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
-  echo "(search for Xcode)."
-  echo "Additionally, you will have to manually install the Xcode"
-  echo "command line tools, see:"
-  echo "http://stackoverflow.com/questions/9353444"
-  echo "Alternatively, download the Xcode command line tools from"
-  echo "the Apple developer tools website."
-  echo
-  echo "NOTE: It's possible that the installation will fail, if so,"
-  echo "please set the following environment variables, remove any"
-  echo "broken installation tree, and re-run this script verbatim."
-  echo
-  echo "$ export CC=gcc"
-  echo "$ export CXX=g++"
-  echo
-  MINICONDA_OS="MacOSX-x86_64"
+    if [ "${MYOS##Darwin}" != "${MYOS}" ]
+    then
+        echo "Looks like you're running on Mac OSX."
+        echo
+        echo "NOTE: you must have the Xcode command line tools installed."
+        echo
+        echo "The instructions for obtaining these tools varies according"
+        echo "to your exact OS version.  On older versions of OS X, you"
+        echo "must register for an account on the apple developer tools"
+        echo "website: https://developer.apple.com/downloads to obtain the"
+        echo "download link."
+        echo
+        echo "We have gathered some additional instructions for each"
+        echo "version of OS X below. If you have trouble installing yt"
+        echo "after following these instructions, don't hesitate to contact"
+        echo "the yt user's e-mail list."
+        echo
+        echo "You can see which version of OSX you are running by clicking"
+        echo "'About This Mac' in the apple menu on the left hand side of"
+        echo "menu bar.  We're assuming that you've installed all operating"
+        echo "system updates; if you have an older version, we suggest"
+        echo "running software update and installing all available updates."
+        echo
+        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
+        echo "Apple developer tools website."
+        echo
+        echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
+        echo "developer tools website.  You can either download the"
+        echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
+        echo "Software Update to update to XCode 3.2.6 or"
+        echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
+        echo "bundle (4.1 GB)."
+        echo
+        echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
+        echo "(search for Xcode)."
+        echo "Alternatively, download the Xcode command line tools from"
+        echo "the Apple developer tools website."
+        echo
+        echo "OS X 10.8.4, 10.9, 10.10, and 10.11:"
+        echo "download the appropriate version of Xcode from the"
+        echo "mac app store (search for Xcode)."
+        echo
+        echo "Additionally, you will have to manually install the Xcode"
+        echo "command line tools."
+        echo
+        echo "For OS X 10.8, see:"
+        echo "http://stackoverflow.com/questions/9353444"
+        echo
+        echo "For OS X 10.9 and newer the command line tools can be installed"
+        echo "with the following command:"
+        echo "    xcode-select --install"
+    fi
+    if [ "${MYOS##Linux}" != "${MYOS}" ]
+    then
+        echo "Looks like you're on Linux."
+        echo
+        echo "Please make sure you have the developer tools for your OS "
+        echo "installed."
+        echo
+        if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
+        then
+            echo "Looks like you're on an OpenSUSE-compatible machine."
+            echo
+            echo "You need to have these packages installed:"
+            echo
+            echo "  * devel_C_C++"
+            echo "  * libuuid-devel"
+            echo "  * gcc-c++"
+            echo "  * chrpath"
+            echo
+            echo "You can accomplish this by executing:"
+            echo
+            echo "$ sudo zypper install -t pattern devel_C_C++"
+            echo "$ sudo zypper install gcc-c++ libuuid-devel zip"
+            echo "$ sudo zypper install chrpath"
+        fi
+        if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
+        then
+            echo "Looks like you're on an Ubuntu-compatible machine."
+            echo
+            echo "You need to have these packages installed:"
+            echo
+            echo "  * libssl-dev"
+            echo "  * build-essential"
+            echo "  * libncurses5"
+            echo "  * libncurses5-dev"
+            echo "  * uuid-dev"
+            echo "  * chrpath"
+            echo
+            echo "You can accomplish this by executing:"
+            echo
+            echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev chrpath"
+            echo
+        fi
+        echo
+        echo "If you are running on a supercomputer or other module-enabled"
+        echo "system, please make sure that the GNU module has been loaded."
+        echo
+    fi
 fi
-if [ "${MYOS##Linux}" != "${MYOS}" ]
+if [ "${MYOS##x86_64}" != "${MYOS}" ]
 then
-  echo "Looks like you're on Linux."
-  echo
-  echo "Please make sure you have the developer tools for your OS installed."
-  echo
-  if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
-  then
-    echo "Looks like you're on an OpenSUSE-compatible machine."
-    echo
-    echo "You need to have these packages installed:"
-    echo
-    echo "  * devel_C_C++"
-    echo "  * libopenssl-devel"
-    echo "  * libuuid-devel"
-    echo "  * zip"
-    echo "  * gcc-c++"
-    echo "  * chrpath"
-    echo
-    echo "You can accomplish this by executing:"
-    echo
-    echo "$ sudo zypper install -t pattern devel_C_C++"
-    echo "$ sudo zypper install gcc-c++ libopenssl-devel libuuid-devel zip"
-    echo "$ sudo zypper install chrpath"
-  fi
-  if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
-  then
-    echo "Looks like you're on an Ubuntu-compatible machine."
-    echo
-    echo "You need to have these packages installed:"
-    echo
-    echo "  * libssl-dev"
-    echo "  * build-essential"
-    echo "  * libncurses5"
-    echo "  * libncurses5-dev"
-    echo "  * zip"
-    echo "  * uuid-dev"
-    echo "  * chrpath"
-    echo
-    echo "You can accomplish this by executing:"
-    echo
-    echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev chrpath"
-    echo
-  fi
-  echo
-  echo "If you are running on a supercomputer or other module-enabled"
-  echo "system, please make sure that the GNU module has been loaded."
-  echo
-  if [ "${MYOS##x86_64}" != "${MYOS}" ]
-  then
     MINICONDA_OS="Linux-x86_64"
-  elif [ "${MYOS##i386}" != "${MYOS}" ]
-  then
+elif [ "${MYOS##i386}" != "${MYOS}" ]
+then
     MINICONDA_OS="Linux-x86"
-  else
-    echo "Not sure which type of Linux you're on.  Going with x86_64."
+elif [ "${MYOS##Darwin}" != "${MYOS}" ]
+then
+     MINICONDA_OS="MacOSX-x86_64"
+else
+    echo "Not sure which Linux distro you are running."
+    echo "Going with x86_64 architecture."
     MINICONDA_OS="Linux-x86_64"
-  fi
 fi
 echo
 echo "If you'd rather not continue, hit Ctrl-C."
@@ -233,7 +238,7 @@
 if type -P wget &>/dev/null
 then
     echo "Using wget"
-    export GETFILE="wget -nv"
+    export GETFILE="wget -nv -nc"
 else
     echo "Using curl"
     export GETFILE="curl -sSO"
@@ -250,9 +255,6 @@
 
 log_cmd bash ./${MINICONDA_PKG} -b -p $DEST_DIR
 
-# I don't think we need OR want this anymore:
-#export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
-
 # This we *do* need.
 export PATH=${DEST_DIR}/bin:$PATH
 
@@ -261,51 +263,40 @@
 
 declare -a YT_DEPS
 YT_DEPS+=('python')
-YT_DEPS+=('distribute')
-YT_DEPS+=('libpng')
+YT_DEPS+=('setuptools')
 YT_DEPS+=('numpy')
-YT_DEPS+=('pygments')
-YT_DEPS+=('jinja2')
-YT_DEPS+=('tornado')
-YT_DEPS+=('pyzmq')
+YT_DEPS+=('jupyter')
 YT_DEPS+=('ipython')
 YT_DEPS+=('sphinx')
 YT_DEPS+=('h5py')
 YT_DEPS+=('matplotlib')
 YT_DEPS+=('cython')
 YT_DEPS+=('nose')
+YT_DEPS+=('conda-build')
+YT_DEPS+=('mercurial')
+YT_DEPS+=('sympy')
 
 # Here is our dependency list for yt
-log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/free
-log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/dev
-log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/gpl
 log_cmd conda update --yes conda
 
-echo "Current dependencies: ${YT_DEPS[@]}"
 log_cmd echo "DEPENDENCIES" ${YT_DEPS[@]}
-log_cmd conda install --yes ${YT_DEPS[@]}
-
-echo "Installing mercurial."
-get_ytrecipe mercurial
+for YT_DEP in "${YT_DEPS[@]}"; do
+    echo "Installing $YT_DEP"
+    log_cmd conda install --yes ${YT_DEP}
+done
 
 if [ $INST_YT_SOURCE -eq 0 ]
 then
-  echo "Installing yt as a package."
-  get_ytrecipe yt
+  echo "Installing yt"
+  log_cmd conda install --yes yt
 else
-  # We do a source install.
-  YT_DIR="${DEST_DIR}/src/yt-hg"
-  export PNG_DIR=${DEST_DIR}
-  export FTYPE_DIR=${DEST_DIR}
-  export HDF5_DIR=${DEST_DIR}
-  log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
-  pushd ${YT_DIR}
-  log_cmd python setup.py develop
-  popd
-  log_cmd cp ${YT_DIR}/doc/activate ${DEST_DIR}/bin/activate 
-  log_cmd sed -i.bak -e "s,__YT_DIR__,${DEST_DIR}," ${DEST_DIR}/bin/activate
-  log_cmd cp ${YT_DIR}/doc/activate.csh ${DEST_DIR}/bin/activate.csh
-  log_cmd sed -i.bak -e "s,__YT_DIR__,${DEST_DIR}," ${DEST_DIR}/bin/activate.csh
+    # We do a source install.
+    echo "Installing yt from source"
+    YT_DIR="${DEST_DIR}/src/yt-hg"
+    log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
+    pushd ${YT_DIR}
+    log_cmd python setup.py develop
+    popd
 fi
 
 echo
@@ -314,34 +305,26 @@
 echo
 echo "yt and the Conda system are now installed in $DEST_DIR ."
 echo
-if [ $INST_YT_SOURCE -eq 0 ]
-then
-  echo "You must now modify your PATH variable by prepending:"
-  echo 
-  echo "   $DEST_DIR/bin"
-  echo
-  echo "For example, if you use bash, place something like this at the end"
-  echo "of your ~/.bashrc :"
-  echo
-  echo "   export PATH=$DEST_DIR/bin:$PATH"
-else
-  echo "To run from this new installation, use the activate script for this "
-  echo "environment."
-  echo
-  echo "    $ source $DEST_DIR/bin/activate"
-  echo
-  echo "This modifies the environment variables YT_DEST, PATH, PYTHONPATH, and"
-  echo "LD_LIBRARY_PATH to match your new yt install.  If you use csh, just"
-  echo "append .csh to the above."
-fi
+echo "You must now modify your PATH variable by prepending:"
+echo 
+echo "   $DEST_DIR/bin"
+echo
+echo "On Bash-style shells you can copy/paste the following command to "
+echo "temporarily activate the yt installtion:"
+echo
+echo "    export PATH=$DEST_DIR/bin:\$PATH"
+echo
+echo "and on csh-style shells:"
+echo
+echo "    setenv PATH $DEST_DIR/bin:\$PATH"
+echo
+echo "You can also update the init file appropriate for your shell to include"
+echo "the same command."
 echo
 echo "To get started with yt, check out the orientation:"
 echo
 echo "    http://yt-project.org/doc/orientation/"
 echo
-echo "or just activate your environment and run 'yt serve' to bring up the"
-echo "yt GUI."
-echo
 echo "For support, see the website and join the mailing list:"
 echo
 echo "    http://yt-project.org/"

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -233,53 +233,61 @@
         echo
         echo "NOTE: you must have the Xcode command line tools installed."
         echo
-	echo "The instructions for obtaining these tools varies according"
-	echo "to your exact OS version.  On older versions of OS X, you"
-	echo "must register for an account on the apple developer tools"
-	echo "website: https://developer.apple.com/downloads to obtain the"
-	echo "download link."
-	echo
-	echo "We have gathered some additional instructions for each"
-	echo "version of OS X below. If you have trouble installing yt"
-	echo "after following these instructions, don't hesitate to contact"
-	echo "the yt user's e-mail list."
-	echo
-	echo "You can see which version of OSX you are running by clicking"
-	echo "'About This Mac' in the apple menu on the left hand side of"
-	echo "menu bar.  We're assuming that you've installed all operating"
-	echo "system updates; if you have an older version, we suggest"
-	echo "running software update and installing all available updates."
-	echo
+        echo "The instructions for obtaining these tools varies according"
+        echo "to your exact OS version.  On older versions of OS X, you"
+        echo "must register for an account on the apple developer tools"
+        echo "website: https://developer.apple.com/downloads to obtain the"
+        echo "download link."
+        echo
+        echo "We have gathered some additional instructions for each"
+        echo "version of OS X below. If you have trouble installing yt"
+        echo "after following these instructions, don't hesitate to contact"
+        echo "the yt user's e-mail list."
+        echo
+        echo "You can see which version of OSX you are running by clicking"
+        echo "'About This Mac' in the apple menu on the left hand side of"
+        echo "menu bar.  We're assuming that you've installed all operating"
+        echo "system updates; if you have an older version, we suggest"
+        echo "running software update and installing all available updates."
+        echo
         echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
-	echo "Apple developer tools website."
+        echo "Apple developer tools website."
         echo
         echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
-	echo "developer tools website.  You can either download the"
-	echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-	echo "Software Update to update to XCode 3.2.6 or"
-	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
-	echo "bundle (4.1 GB)."
+        echo "developer tools website.  You can either download the"
+        echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
+        echo "Software Update to update to XCode 3.2.6 or"
+        echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
+        echo "bundle (4.1 GB)."
         echo
         echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
-	echo "(search for Xcode)."
+        echo "(search for Xcode)."
         echo "Alternatively, download the Xcode command line tools from"
         echo "the Apple developer tools website."
         echo
-	echo "OS X 10.8.4, 10.9, and 10.10: download the appropriate version of"
-	echo "Xcode from the mac app store (search for Xcode)."
-    echo
-	echo "Additionally, you will have to manually install the Xcode"
-	echo "command line tools."
-    echo
-    echo "For OS X 10.8, see:"
-   	echo "http://stackoverflow.com/questions/9353444"
-	echo
-    echo "For OS X 10.9 and 10.10, the command line tools can be installed"
-    echo "with the following command:"
-    echo "    xcode-select --install"
-    echo
-    OSX_VERSION=`sw_vers -productVersion`
-    if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
+        echo "OS X 10.8.4, 10.9, 10.10, and 10.11:"
+        echo "download the appropriate version of Xcode from the"
+        echo "mac app store (search for Xcode)."
+        echo
+        echo "Additionally, you will have to manually install the Xcode"
+        echo "command line tools."
+        echo
+        echo "For OS X 10.8, see:"
+        echo "http://stackoverflow.com/questions/9353444"
+        echo
+        echo "For OS X 10.9 and newer the command line tools can be installed"
+        echo "with the following command:"
+        echo "    xcode-select --install"
+        echo
+        echo "For OS X 10.11, you will additionally need to install the OpenSSL"
+        echo "library using a package manager like homebrew or macports."
+        echo "If you install fails with a message like"
+        echo "    ImportError: cannot import HTTPSHandler"
+        echo "then you do not have the OpenSSL headers available in a location"
+        echo "visible to your C compiler. Consider installing yt using the"
+        echo "get_yt.sh script instead, as that bundles OpenSSL."
+        OSX_VERSION=`sw_vers -productVersion`
+        if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
         then
             MPL_SUPP_CFLAGS="${MPL_SUPP_CFLAGS} -mmacosx-version-min=10.7"
             MPL_SUPP_CXXFLAGS="${MPL_SUPP_CXXFLAGS} -mmacosx-version-min=10.7"
@@ -358,17 +366,17 @@
     fi
     if [ $INST_SCIPY -eq 1 ]
     then
-	echo
-	echo "Looks like you've requested that the install script build SciPy."
-	echo
-	echo "If the SciPy build fails, please uncomment one of the the lines"
-	echo "at the top of the install script that sets NUMPY_ARGS, delete"
-	echo "any broken installation tree, and re-run the install script"
-	echo "verbatim."
-	echo
-	echo "If that doesn't work, don't hesitate to ask for help on the yt"
-	echo "user's mailing list."
-	echo
+    echo
+    echo "Looks like you've requested that the install script build SciPy."
+    echo
+    echo "If the SciPy build fails, please uncomment one of the the lines"
+    echo "at the top of the install script that sets NUMPY_ARGS, delete"
+    echo "any broken installation tree, and re-run the install script"
+    echo "verbatim."
+    echo
+    echo "If that doesn't work, don't hesitate to ask for help on the yt"
+    echo "user's mailing list."
+    echo
     fi
     if [ ! -z "${CFLAGS}" ]
     then
@@ -490,9 +498,9 @@
 
 if [ $INST_PY3 -eq 1 ]
 then
-	 PYTHON_EXEC='python3.4'
+     PYTHON_EXEC='python3.4'
 else 
-	 PYTHON_EXEC='python2.7'
+     PYTHON_EXEC='python2.7'
 fi
 
 function do_setup_py
@@ -899,28 +907,28 @@
 else
     if [ ! -e $SCIPY/done ]
     then
-	if [ ! -e BLAS/done ]
-	then
-	    tar xfz blas.tar.gz
-	    echo "Building BLAS"
-	    cd BLAS
-	    gfortran -O2 -fPIC -fno-second-underscore -c *.f
-	    ( ar r libfblas.a *.o 2>&1 ) 1>> ${LOG_FILE}
-	    ( ranlib libfblas.a 2>&1 ) 1>> ${LOG_FILE}
-	    rm -rf *.o
-	    touch done
-	    cd ..
-	fi
-	if [ ! -e $LAPACK/done ]
-	then
-	    tar xfz $LAPACK.tar.gz
-	    echo "Building LAPACK"
-	    cd $LAPACK/
-	    cp INSTALL/make.inc.gfortran make.inc
-	    ( make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 2>&1 ) 1>> ${LOG_FILE} || do_exit
-	    touch done
-	    cd ..
-	fi
+    if [ ! -e BLAS/done ]
+    then
+        tar xfz blas.tar.gz
+        echo "Building BLAS"
+        cd BLAS
+        gfortran -O2 -fPIC -fno-second-underscore -c *.f
+        ( ar r libfblas.a *.o 2>&1 ) 1>> ${LOG_FILE}
+        ( ranlib libfblas.a 2>&1 ) 1>> ${LOG_FILE}
+        rm -rf *.o
+        touch done
+        cd ..
+    fi
+    if [ ! -e $LAPACK/done ]
+    then
+        tar xfz $LAPACK.tar.gz
+        echo "Building LAPACK"
+        cd $LAPACK/
+        cp INSTALL/make.inc.gfortran make.inc
+        ( make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        touch done
+        cd ..
+    fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
     export LAPACK=$PWD/$LAPACK/liblapack.a
@@ -1030,7 +1038,7 @@
 cd $MY_PWD
 
 if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import readline" 2>&1 )>> ${LOG_FILE}) || \
-	[[ "${MYOS##Darwin}" != "${MYOS}" && $INST_PY3 -eq 1 ]] 
+    [[ "${MYOS##Darwin}" != "${MYOS}" && $INST_PY3 -eq 1 ]] 
 then
     if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
     then

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -59,7 +59,7 @@
   from yt.analysis_modules.halo_finding.api import *
 
   ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
-  halo_list = parallelHF(ds)
+  halo_list = HaloFinder(ds)
   halo_list.dump('MyHaloList')
 
 Ellipsoid Parameters

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -1,3 +1,5 @@
+.. _photon_simulator:
+
 Constructing Mock X-ray Observations
 ------------------------------------
 
@@ -98,9 +100,8 @@
    `AtomDB <http://www.atomdb.org>`_ and get the files from the
    `xray_data <http://yt-project.org/data/xray_data.tar.gz>`_ auxiliary
    data package (see the ``xray_data`` `README <xray_data_README.html>`_ 
-   for details on the latter). Make sure that
-   in what follows you specify the full path to the locations of these
-   files.
+   for details on the latter). Make sure that in what follows you 
+   specify the full path to the locations of these files.
 
 To generate photons from this dataset, we have several different things
 we need to set up. The first is a standard yt data object. It could
@@ -197,7 +198,7 @@
 
 .. code:: python
 
-    A = 6000.
+    A = 3000.
     exp_time = 4.0e5
     redshift = 0.05
     cosmo = Cosmology()
@@ -298,7 +299,7 @@
 
 The second option, ``TableAbsorbModel``, takes as input an HDF5 file
 containing two datasets, ``"energy"`` (in keV), and ``"cross_section"``
-(in cm2), and the Galactic column density :math:`N_H`:
+(in :math:`cm^2`), and the Galactic column density :math:`N_H`:
 
 .. code:: python
 
@@ -307,7 +308,7 @@
 Now we're ready to project the photons. First, we choose a line-of-sight
 vector ``normal``. Second, we'll adjust the exposure time and the redshift.
 Third, we'll pass in the absorption ``SpectrumModel``. Fourth, we'll
-specify a ``sky_center`` in RA,DEC on the sky in degrees.
+specify a ``sky_center`` in RA and DEC on the sky in degrees.
 
 Also, we're going to convolve the photons with instrument ``responses``.
 For this, you need a ARF/RMF pair with matching energy bins. This is of
@@ -322,8 +323,8 @@
 
 .. code:: python
 
-    ARF = "chandra_ACIS-S3_onaxis_arf.fits"
-    RMF = "chandra_ACIS-S3_onaxis_rmf.fits"
+    ARF = "acisi_aimpt_cy17.arf"
+    RMF = "acisi_aimpt_cy17.rmf"
     normal = [0.0,0.0,1.0]
     events = photons.project_photons(normal, exp_time_new=2.0e5, redshift_new=0.07, dist_new=None, 
                                      absorb_model=abs_model, sky_center=(187.5,12.333), responses=[ARF,RMF], 
@@ -540,7 +541,7 @@
 
    sphere = ds.sphere("c", (1.0,"Mpc"))
        
-   A = 6000.
+   A = 3000.
    exp_time = 2.0e5
    redshift = 0.05
    cosmo = Cosmology()
@@ -555,7 +556,8 @@
 
 
    events = photons.project_photons([0.0,0.0,1.0], 
-                                    responses=["sim_arf.fits","sim_rmf.fits"], 
+                                    responses=["acisi_aimpt_cy17.arf",
+                                               "acisi_aimpt_cy17.rmf"], 
                                     absorb_model=abs_model,
                                     north_vector=[0.0,1.0,0.0])
 

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -374,6 +374,17 @@
 "Gas_smoothed_Temperature")``, which in most cases would be aliased to the
 field ``("gas", "temperature")`` for convenience.
 
+Other smoothing kernels besides the cubic spline one are available through a
+keyword argument ``kernel_name`` of the method ``add_smoothed_particle_field``.
+Current available kernel names include:
+
+* ``cubic``, ``quartic``, and ``quintic`` - spline kernels.
+* ``wendland2``, ``wendland4`` and ``wendland6`` - Wendland kernels.
+
+The added smoothed particle field can be accessed by
+``("deposit", "particletype_kernelname_smoothed_fieldname")`` (except for the
+cubic spline kernel, which obeys the naming scheme given above).
+
 Computing the Nth Nearest Neighbor
 ----------------------------------
 

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -54,10 +54,13 @@
  
 .. code-block:: python
 
-   frb.export_hdf5("my_images.h5", fields=["density","temperature"])
+   frb.save_as_dataset("my_images.h5", fields=["density","temperature"])
    frb.export_fits("my_images.fits", fields=["density","temperature"],
                    clobber=True, units="kpc")
 
+In the HDF5 case, the created file can be reloaded just like a regular dataset with
+``yt.load`` and will, itself, be a first-class dataset.  For more information on
+this, see :ref:`saving-grid-data-containers`.
 In the FITS case, there is an option for setting the ``units`` of the coordinate system in
 the file. If you want to overwrite a file with the same name, set ``clobber=True``. 
 

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/analyzing/index.rst
--- a/doc/source/analyzing/index.rst
+++ b/doc/source/analyzing/index.rst
@@ -20,5 +20,6 @@
    units/index
    filtering
    generating_processed_data
+   saving_data
    time_series_analysis
    parallel_computation

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -457,69 +457,9 @@
 ---------------------------
 
 Often, when operating interactively or via the scripting interface, it is
-convenient to save an object or multiple objects out to disk and then restart
-the calculation later.  For example, this is useful after clump finding 
-(:ref:`clump_finding`), which can be very time consuming.  
-Typically, the save and load operations are used on 3D data objects.  yt
-has a separate set of serialization operations for 2D objects such as
-projections.
-
-yt will save out objects to disk under the presupposition that the
-construction of the objects is the difficult part, rather than the generation
-of the data -- this means that you can save out an object as a description of
-how to recreate it in space, but not the actual data arrays affiliated with
-that object.  The information that is saved includes the dataset off of
-which the object "hangs."  It is this piece of information that is the most
-difficult; the object, when reloaded, must be able to reconstruct a dataset
-from whatever limited information it has in the save file.
-
-You can save objects to an output file using the function 
-:func:`~yt.data_objects.index.save_object`: 
-
-.. code-block:: python
-
-   import yt
-   ds = yt.load("my_data")
-   sp = ds.sphere([0.5, 0.5, 0.5], (10.0, 'kpc'))
-   sp.save_object("sphere_name", "save_file.cpkl")
-
-This will store the object as ``sphere_name`` in the file
-``save_file.cpkl``, which will be created or accessed using the standard
-python module :mod:`shelve`.  
-
-To re-load an object saved this way, you can use the shelve module directly:
-
-.. code-block:: python
-
-   import yt
-   import shelve
-   ds = yt.load("my_data") 
-   saved_fn = shelve.open("save_file.cpkl")
-   ds, sp = saved_fn["sphere_name"]
-
-Additionally, we can store multiple objects in a single shelve file, so we 
-have to call the sphere by name.
-
-For certain data objects such as projections, serialization can be performed
-automatically if ``serialize`` option is set to ``True`` in :ref:`the
-configuration file <configuration-file>` or set directly in the script:
-
-.. code-block:: python
-
-   from yt.config import ytcfg; ytcfg["yt", "serialize"] = "True"
-
-.. note:: Use serialization with caution. Enabling serialization means that
-   once a projection of a dataset has been created (and stored in the .yt file
-   in the same directory), any subsequent changes to that dataset will be
-   ignored when attempting to create the same projection. So if you take a
-   density projection of your dataset in the 'x' direction, then somehow tweak
-   that dataset significantly, and take the density projection again, yt will
-   default to finding the original projection and 
-   :ref:`not your new one <faq-old-data>`.
-
-.. note:: It's also possible to use the standard :mod:`cPickle` module for
-          loading and storing objects -- so in theory you could even save a
-          list of objects!
-
-This method works for clumps, as well, and the entire clump index will be
-stored and restored upon load.
+convenient to save an object to disk and then restart the calculation later or
+transfer the data from a container to another filesystem.  This can be
+particularly useful when working with extremely large datasets.  Field data
+can be saved to disk in a format that allows for it to be reloaded just like
+a regular dataset.  For information on how to do this, see
+:ref:`saving-data-containers`.

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -501,11 +501,7 @@
 subtle art in estimating the amount of memory needed for halo finding, but a
 rule of thumb is that the HOP halo finder is the most memory intensive
 (:func:`HaloFinder`), and Friends of Friends (:func:`FOFHaloFinder`) being the
-most memory-conservative.  It has been found that :func:`parallelHF` needs
-roughly 1 MB of memory per 5,000 particles, although recent work has improved
-this and the memory requirement is now smaller than this. But this is a good
-starting point for beginning to calculate the memory required for halo-finding.
-For more information, see :ref:`halo_finding`.
+most memory-conservative. For more information, see :ref:`halo_finding`.
 
 **Volume Rendering**
 

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/analyzing/saving_data.rst
--- /dev/null
+++ b/doc/source/analyzing/saving_data.rst
@@ -0,0 +1,243 @@
+.. _saving_data
+
+Saving Reloadable Data
+======================
+
+Most of the data loaded into or generated with yt can be saved to a
+format that can be reloaded as a first-class dataset.  This includes
+the following:
+
+  * geometric data containers (regions, spheres, disks, rays, etc.)
+
+  * grid data containers (covering grids, arbitrary grids, fixed
+    resolution buffers)
+
+  * spatial plots (projections, slices, cutting planes)
+
+  * profiles
+
+  * generic array data
+
+In the case of projections, slices, and profiles, reloaded data can be
+used to remake plots.  For information on this, see :ref:`remaking-plots`.
+
+.. _saving-data-containers:
+
+Geometric Data Containers
+-------------------------
+
+Data from geometric data containers can be saved with the
+:func:`~yt.data_objects.data_containers.save_as_dataset`` function.
+
+.. notebook-cell::
+
+   import yt
+   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+
+   sphere = ds.sphere([0.5]*3, (10, "Mpc"))
+   fn = sphere.save_as_dataset(fields=["density", "particle_mass"])
+   print (fn)
+
+This function will return the name of the file to which the dataset
+was saved.  The filename will be a combination of the name of the
+original dataset and the type of data container.  Optionally, a
+specific filename can be given with the ``filename`` keyword.  If no
+fields are given, the fields that have previously been queried will
+be saved.
+
+The newly created dataset can be loaded like all other supported
+data through ``yt.load``.  Once loaded, field data can be accessed
+through the traditional data containers or through the ``data``
+attribute, which will be a data container configured like the
+original data container used to make the dataset.  Grid data is
+accessed by the ``grid`` data type and particle data is accessed
+with the original particle type.  As with the original dataset, grid
+positions and cell sizes are accessible with, for example,
+("grid", "x") and ("grid", "dx").  Particle positions are
+accessible as (<particle_type>, "particle_position_x").  All original
+simulation parameters are accessible in the ``parameters``
+dictionary, normally associated with all datasets.
+
+.. code-block:: python
+
+   sphere_ds = yt.load("DD0046_sphere.h5")
+
+   # use the original data container
+   print (sphere_ds.data["grid", "density"])
+
+   # create a new data container
+   ad = sphere_ds.all_data()
+
+   # grid data
+   print (ad["grid", "density"])
+   print (ad["grid", "x"])
+   print (ad["grid", "dx"])
+
+   # particle data
+   print (ad["all", "particle_mass"])
+   print (ad["all", "particle_position_x"])
+
+Note that because field data queried from geometric containers is
+returned as unordered 1D arrays, data container datasets are treated,
+effectively, as particle data.  Thus, 3D indexing of grid data from
+these datasets is not possible.
+
+.. _saving-grid-data-containers:
+
+Grid Data Containers
+--------------------
+
+Data containers that return field data as multidimensional arrays
+can be saved so as to preserve this type of access.  This includes
+covering grids, arbitrary grids, and fixed resolution buffers.
+Saving data from these containers works just as with geometric data
+containers.  Field data can be accessed through geometric data
+containers.
+
+.. code-block:: python
+
+   cg = ds.covering_grid(level=0, left_edge=[0.25]*3, dims=[16]*3)
+   fn = cg.save_as_dataset(fields=["density", "particle_mass"])
+
+   cg_ds = yt.load(fn)
+   ad = cg_ds.all_data()
+   print (ad["grid", "density"])
+
+Multidimensional indexing of field data is also available through
+the ``data`` attribute.
+
+.. code-block:: python
+
+   print (cg_ds.data["grid", "density"])
+
+Fixed resolution buffers work just the same.
+
+.. code-block:: python
+
+   my_proj = ds.proj("density", "x", weight_field="density")
+   frb = my_proj.to_frb(1.0, (800, 800))
+   fn = frb.save_as_dataset(fields=["density"])
+   frb_ds = yt.load(fn)
+   print (frb_ds.data["density"])
+
+.. _saving-spatial-plots:
+
+Spatial Plots
+-------------
+
+Spatial plots, such as projections, slices, and off-axis slices
+(cutting planes) can also be saved and reloaded.
+
+.. code-block:: python
+
+   proj = ds.proj("density", "x", weight_field="density")
+   proj.save_as_dataset()
+
+Once reloaded, they can be handed to their associated plotting
+functions to make images.
+
+.. code-block:: python
+
+   proj_ds = yt.load("DD0046_proj.h5")
+   p = yt.ProjectionPlot(proj_ds, "x", "density",
+                         weight_field="density")
+   p.save()
+
+.. _saving-profile-data:
+
+Profiles
+--------
+
+Profiles created with :func:`~yt.data_objects.profiles.create_profile`,
+:class:`~yt.visualization.profile_plotter.ProfilePlot`, and
+:class:`~yt.visualization.profile_plotter.PhasePlot` can be saved with
+the :func:`~yt.data_objects.profiles.save_as_dataset` function, which
+works just as above.  Profile datasets are a type of non-spatial grid
+datasets.  Geometric selection is not possible, but data can be
+accessed through the ``.data`` attribute.
+
+.. notebook-cell::
+
+   import yt
+   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+   ad = ds.all_data()
+
+   profile_2d = yt.create_profile(ad, ["density", "temperature"],
+                                  "cell_mass", weight_field=None,
+                                  n_bins=(128, 128))
+   profile_2d.save_as_dataset()
+
+   prof_2d_ds = yt.load("DD0046_Profile2D.h5")
+   print (prof_2d_ds.data["cell_mass"])
+
+The x, y (if at least 2D), and z (if 3D) bin fields can be accessed as 1D
+arrays with "x", "y", and "z".
+
+.. code-block:: python
+
+   print (prof_2d_ds.data["x"])
+
+The bin fields can also be returned with the same shape as the profile
+data by accessing them with their original names.  This allows for
+boolean masking of profile data using the bin fields.
+
+.. code-block:: python
+
+   # density is the x bin field
+   print (prof_2d_ds.data["density"])
+
+For 1, 2, and 3D profile datasets, a fake profile object will be
+constructed by accessing the ".profile" attribute.  This is used
+primarily in the case of 1 and 2D profiles to create figures using
+:class:`~yt.visualization.profile_plotter.ProfilePlot` and
+:class:`~yt.visualization.profile_plotter.PhasePlot`.
+
+.. code-block:: python
+
+   p = yt.PhasePlot(prof_2d_ds.data, "density", "temperature",
+                    "cell_mass", weight_field=None)
+   p.save()
+
+.. _saving-array-data:
+
+Generic Array Data
+------------------
+
+Generic arrays can be saved and reloaded as non-spatial data using
+the :func:`~yt.frontends.ytdata.utilities.save_as_dataset` function,
+also available as ``yt.save_as_dataset``.  As with profiles, geometric
+selection is not possible, but the data can be accessed through the
+``.data`` attribute.
+
+.. notebook-cell::
+
+   import yt
+   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+
+   region = ds.box([0.25]*3, [0.75]*3)
+   sphere = ds.sphere(ds.domain_center, (10, "Mpc"))
+   my_data = {}
+   my_data["region_density"] = region["density"]
+   my_data["sphere_density"] = sphere["density"]
+   yt.save_as_dataset(ds, "test_data.h5", my_data)
+
+   array_ds = yt.load("test_data.h5")
+   print (array_ds.data["region_density"])
+   print (array_ds.data["sphere_density"])
+
+Array data can be saved with or without a dataset loaded.  If no
+dataset has been loaded, as fake dataset can be provided as a
+dictionary.
+
+.. notebook-cell::
+
+   import numpy as np
+   import yt
+
+   my_data = {"density": yt.YTArray(np.random.random(10), "g/cm**3"),
+              "temperature": yt.YTArray(np.random.random(10), "K")}
+   fake_ds = {"current_time": yt.YTQuantity(10, "Myr")}
+   yt.save_as_dataset(fake_ds, "random_data.h5", my_data)
+
+   new_ds = yt.load("random_data.h5")
+   print (new_ds.data["density"])

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -67,7 +67,7 @@
 # built documents.
 #
 # The short X.Y version.
-version = '3.3'
+version = '3.3-dev'
 # The full version, including alpha/beta/rc tags.
 release = '3.3-dev'
 

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -4,6 +4,13 @@
 # In this example we will show how to use the AMRKDTree to take a simulation
 # with 8 levels of refinement and only use levels 0-3 to render the dataset.
 
+# Currently this cookbook is flawed in that the data that is covered by the
+# higher resolution data gets masked during the rendering.  This should be
+# fixed by changing either the data source or the code in
+# yt/utilities/amr_kdtree.py where data is being masked for the partitioned
+# grid.  Right now the quick fix is to create a data_collection, but this
+# will only work for patch based simulations that have ds.index.grids.
+
 # We begin by loading up yt, and importing the AMRKDTree
 import numpy as np
 
@@ -12,58 +19,58 @@
 
 # Load up a dataset and define the kdtree
 ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-kd = AMRKDTree(ds)
+im, sc = yt.volume_render(ds, 'density', fname='v0.png')
+sc.camera.set_width(ds.arr(100, 'kpc'))
+render_source = sc.get_source(0)
+kd=render_source.volume
 
 # Print out specifics of KD Tree
 print("Total volume of all bricks = %i" % kd.count_volume())
 print("Total number of cells = %i" % kd.count_cells())
 
-# Define a camera and take an volume rendering.
-tf = yt.ColorTransferFunction((-30, -22))
-cam = ds.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256,
-                  tf, volume=kd)
-tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5], colormap='RdBu_r')
-cam.snapshot("v1.png", clip_ratio=6.0)
-
-# This rendering is okay, but lets say I'd like to improve it, and I don't want
-# to spend the time rendering the high resolution data.  What we can do is
-# generate a low resolution version of the AMRKDTree and pass that in to the
-# camera.  We do this by specifying a maximum refinement level of 6.
-
-kd_low_res = AMRKDTree(ds, max_level=6)
+new_source = ds.all_data()
+new_source.max_level=3
+kd_low_res = AMRKDTree(ds, data_source=new_source)
 print(kd_low_res.count_volume())
 print(kd_low_res.count_cells())
 
 # Now we pass this in as the volume to our camera, and render the snapshot
 # again.
 
-cam.volume = kd_low_res
-cam.snapshot("v4.png", clip_ratio=6.0)
+render_source.set_volume(kd_low_res)
+render_source.set_fields('density')
+sc.render()
+sc.save("v1.png", sigma_clip=6.0)
 
 # This operation was substantiall faster.  Now lets modify the low resolution
 # rendering until we find something we like.
 
+tf = render_source.transfer_function
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=np.ones(4, dtype='float64'), colormap='RdBu_r')
-cam.snapshot("v2.png", clip_ratio=6.0)
+sc.render()
+sc.save("v2.png", sigma_clip=6.0)
 
 # This looks better.  Now let's try turning on opacity.
 
 tf.grey_opacity = True
-cam.snapshot("v4.png", clip_ratio=6.0)
-
-# That seemed to pick out som interesting structures.  Now let's bump up the
-# opacity.
-
+sc.render()
+sc.save("v3.png", sigma_clip=6.0)
+#
+## That seemed to pick out som interesting structures.  Now let's bump up the
+## opacity.
+#
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=10.0 * np.ones(4, dtype='float64'), colormap='RdBu_r')
-cam.snapshot("v3.png", clip_ratio=6.0)
-
-# This looks pretty good, now lets go back to the full resolution AMRKDTree
-
-cam.volume = kd
-cam.snapshot("v4.png", clip_ratio=6.0)
+sc.render()
+sc.save("v4.png", sigma_clip=6.0)
+#
+## This looks pretty good, now lets go back to the full resolution AMRKDTree
+#
+render_source.set_volume(kd)
+sc.render()
+sc.save("v5.png", sigma_clip=6.0)
 
 # This looks great!

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -3,40 +3,29 @@
 
 # Follow the simple_volume_rendering cookbook for the first part of this.
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")  # load data
-ad = ds.all_data()
-mi, ma = ad.quantities.extrema("density")
-
-# Set up transfer function
-tf = yt.ColorTransferFunction((np.log10(mi), np.log10(ma)))
-tf.add_layers(6, w=0.05)
-
-# Set up camera paramters
-c = [0.5, 0.5, 0.5]  # Center
-L = [1, 1, 1]  # Normal Vector
-W = 1.0  # Width
-Nvec = 512  # Pixels on a side
-
-# Specify a north vector, which helps with rotations.
-north_vector = [0., 0., 1.]
+sc = yt.create_scene(ds)
+cam = sc.camera
+cam.resolution = (512, 512)
+cam.set_width(ds.domain_width/20.0)
 
 # Find the maximum density location, store it in max_c
 v, max_c = ds.find_max('density')
 
-# Initialize the Camera
-cam = ds.camera(c, L, W, (Nvec, Nvec), tf, north_vector=north_vector)
 frame = 0
-
-# Do a rotation over 5 frames
-for i, snapshot in enumerate(cam.rotation(np.pi, 5, clip_ratio=8.0)):
-    snapshot.write_png('camera_movement_%04i.png' % frame)
-    frame += 1
-
 # Move to the maximum density location over 5 frames
-for i, snapshot in enumerate(cam.move_to(max_c, 5, clip_ratio=8.0)):
-    snapshot.write_png('camera_movement_%04i.png' % frame)
+for _ in cam.iter_move(max_c, 5):
+    sc.render()
+    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
     frame += 1
 
 # Zoom in by a factor of 10 over 5 frames
-for i, snapshot in enumerate(cam.zoomin(10.0, 5, clip_ratio=8.0)):
-    snapshot.write_png('camera_movement_%04i.png' % frame)
+for _ in cam.iter_zoom(10.0, 5):
+    sc.render()
+    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
     frame += 1
+
+# Do a rotation over 5 frames
+for _ in cam.iter_rotate(np.pi, 5):
+    sc.render()
+    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    frame += 1

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -196,10 +196,41 @@
 
 In this recipe, we move a camera through a domain and take multiple volume
 rendering snapshots.
-See :ref:`volume_rendering` for more information.
+See :ref:`camera_movement` for more information.
 
 .. yt_cookbook:: camera_movement.py
 
+Volume Rendering with Custom Camera
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In this recipe we modify the :ref:`cookbook-simple_volume_rendering` recipe to
+use customized camera properties. See :ref:`volume_rendering` for more
+information.
+
+.. yt_cookbook:: custom_camera_volume_rendering.py
+
+.. _cookbook-custom-transfer-function:
+
+Volume Rendering with a Custom Transfer Function
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In this recipe we modify the :ref:`cookbook-simple_volume_rendering` recipe to
+use customized camera properties. See :ref:`volume_rendering` for more
+information.
+
+.. yt_cookbook:: custom_transfer_function_volume_rendering.py
+
+.. _cookbook-sigma_clip:
+
+Volume Rendering with Sigma Clipping
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In this recipe we output several images with different values of sigma_clip
+set in order to change the contrast of the resulting image.  See 
+:ref:`sigma_clip` for more information.
+
+.. yt_cookbook:: sigma_clip.py
+
 Zooming into an Image
 ~~~~~~~~~~~~~~~~~~~~~
 
@@ -212,6 +243,15 @@
 
 .. yt_cookbook:: zoomin_frames.py
 
+.. _cookbook-various_lens:
+
+Various Lens Types for Volume Rendering
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This example illustrates the usage and feature of different lenses for volume rendering.
+
+.. yt_cookbook:: various_lens.py
+
 .. _cookbook-opaque_rendering:
 
 Opaque Volume Rendering
@@ -220,7 +260,7 @@
 This recipe demonstrates how to make semi-opaque volume renderings, but also
 how to step through and try different things to identify the type of volume
 rendering you want.
-See :ref:`volume_rendering` for more information.
+See :ref:`opaque_rendering` for more information.
 
 .. yt_cookbook:: opaque_rendering.py
 
@@ -235,23 +275,27 @@
 
 .. yt_cookbook:: amrkdtree_downsampling.py
 
+.. _cookbook-volume_rendering_annotations:
+
 Volume Rendering with Bounding Box and Overlaid Grids
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 This recipe demonstrates how to overplot a bounding box on a volume rendering
 as well as overplotting grids representing the level of refinement achieved
 in different regions of the code.
-See :ref:`volume_rendering` for more information.
+See :ref:`volume_rendering_annotations` for more information.
 
 .. yt_cookbook:: rendering_with_box_and_grids.py
 
 Volume Rendering with Annotation
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 This recipe demonstrates how to write the simulation time, show an
 axis triad indicating the direction of the coordinate system, and show
-the transfer function on a volume rendering.
-See :ref:`volume_rendering` for more information.
+the transfer function on a volume rendering.  Please note that this 
+recipe relies on the old volume rendering interface.  While one can
+continue to use this interface, it may be incompatible with some of the
+new developments and the infrastructure described in :ref:`volume_rendering`.
 
 .. yt_cookbook:: vol-annotated.py
 

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/cookbook/custom_camera_volume_rendering.py
--- /dev/null
+++ b/doc/source/cookbook/custom_camera_volume_rendering.py
@@ -0,0 +1,22 @@
+import yt
+
+# Load the dataset
+ds = yt.load("Enzo_64/DD0043/data0043")
+
+# Create a volume rendering
+sc = yt.create_scene(ds, field=('gas', 'density'))
+
+# Now increase the resolution
+sc.camera.resolution = (1024, 1024)
+
+# Set the camera focus to a position that is offset from the center of
+# the domain
+sc.camera.focus = ds.arr([0.3, 0.3, 0.3], 'unitary')
+
+# Move the camera position to the other side of the dataset
+sc.camera.position = ds.arr([0, 0, 0], 'unitary')
+
+# save to disk with a custom filename and apply sigma clipping to eliminate
+# very bright pixels, producing an image with better contrast.
+sc.render()
+sc.save('custom.png', sigma_clip=4)

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/cookbook/custom_transfer_function_volume_rendering.py
--- /dev/null
+++ b/doc/source/cookbook/custom_transfer_function_volume_rendering.py
@@ -0,0 +1,24 @@
+import yt
+import numpy as np
+
+# Load the dataset
+ds = yt.load("Enzo_64/DD0043/data0043")
+
+# Create a volume rendering
+sc = yt.create_scene(ds, field=('gas', 'density'))
+
+# Modify the transfer function
+
+# First get the render source, in this case the entire domain, with field ('gas','density')
+render_source = sc.get_source(0)
+
+# Clear the transfer function
+render_source.transfer_function.clear()
+
+# Map a range of density values (in log space) to the Reds_r colormap
+render_source.transfer_function.map_to_colormap(
+    np.log10(ds.quan(5.0e-31, 'g/cm**3')),
+    np.log10(ds.quan(1.0e-29, 'g/cm**3')),
+    scale=30.0, colormap='RdBu_r')
+
+sc.save('new_tf.png')

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/cookbook/image_background_colors.py
--- a/doc/source/cookbook/image_background_colors.py
+++ b/doc/source/cookbook/image_background_colors.py
@@ -2,27 +2,14 @@
 # volume renderings, to pngs with varying backgrounds.
 
 # First we use the simple_volume_rendering.py recipe from above to generate
-# a standard volume rendering.  The only difference is that we use 
-# grey_opacity=True with our TransferFunction, as the colored background 
-# functionality requires images with an opacity between 0 and 1. 
-
-# We have removed all the comments from the volume rendering recipe for 
-# brevity here, but consult the recipe for more details.
+# a standard volume rendering.
 
 import yt
 import numpy as np
 
 ds = yt.load("Enzo_64/DD0043/data0043")
-ad = ds.all_data()
-mi, ma = ad.quantities.extrema("density")
-tf = yt.ColorTransferFunction((np.log10(mi)+1, np.log10(ma)), grey_opacity=True)
-tf.add_layers(5, w=0.02, colormap="spectral")
-c = [0.5, 0.5, 0.5]
-L = [0.5, 0.2, 0.7]
-W = 1.0
-Npixels = 512
-cam = ds.camera(c, L, W, Npixels, tf)
-im = cam.snapshot("original.png" % ds, clip_ratio=8.0)
+im, sc = yt.volume_render(ds, 'density')
+im.write_png("original.png", sigma_clip=8.0)
 
 # Our image array can now be transformed to include different background
 # colors.  By default, the background color is black.  The following
@@ -35,10 +22,10 @@
 # None  (0.,0.,0.,0.) <-- Transparent!
 # any rgba list/array: [r,g,b,a], bounded by 0..1
 
-# We include the clip_ratio=8 keyword here to bring out more contrast between
+# We include the sigma_clip=8 keyword here to bring out more contrast between
 # the background and foreground, but it is entirely optional.
 
-im.write_png('black_bg.png', background='black', clip_ratio=8.0)
-im.write_png('white_bg.png', background='white', clip_ratio=8.0)
-im.write_png('green_bg.png', background=[0.,1.,0.,1.], clip_ratio=8.0)
-im.write_png('transparent_bg.png', background=None, clip_ratio=8.0)
+im.write_png('black_bg.png', background='black', sigma_clip=8.0)
+im.write_png('white_bg.png', background='white', sigma_clip=8.0)
+im.write_png('green_bg.png', background=[0.,1.,0.,1.], sigma_clip=8.0)
+im.write_png('transparent_bg.png', background=None, sigma_clip=8.0)

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/cookbook/index.rst
--- a/doc/source/cookbook/index.rst
+++ b/doc/source/cookbook/index.rst
@@ -44,8 +44,10 @@
    embedded_webm_animation
    gadget_notebook
    owls_notebook
+   ../visualizing/transfer_function_helper
    ../analyzing/analysis_modules/sunyaev_zeldovich
    fits_radio_cubes
    fits_xray_images
    tipsy_notebook
    halo_analysis_example
+   ../visualizing/volume_rendering_tutorial

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/cookbook/offaxis_projection.py
--- a/doc/source/cookbook/offaxis_projection.py
+++ b/doc/source/cookbook/offaxis_projection.py
@@ -11,7 +11,7 @@
 # objects, you could set it the way you would a cutting plane -- but for this
 # dataset, we'll just choose an off-axis value at random.  This gets normalized
 # automatically.
-L = [0.5, 0.4, 0.7]
+L = [1.0, 0.0, 0.0]
 
 # Our "width" is the width of the image plane as well as the depth.
 # The first element is the left to right width, the second is the
@@ -26,7 +26,7 @@
 # Create the off axis projection.
 # Setting no_ghost to False speeds up the process, but makes a
 # slighly lower quality image.
-image = yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
+image, sc= yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
 
 # Write out the final image and give it a name
 # relating to what our dataset is called.

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/cookbook/offaxis_projection_colorbar.py
--- a/doc/source/cookbook/offaxis_projection_colorbar.py
+++ b/doc/source/cookbook/offaxis_projection_colorbar.py
@@ -32,7 +32,7 @@
 # Also note that we set the field which we want to project as "density", but
 # really we could use any arbitrary field like "temperature", "metallicity"
 # or whatever.
-image = yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
+image, sc = yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
 
 # Image is now an NxN array representing the intensities of the various pixels.
 # And now, we call our direct image saver.  We save the log of the result.

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -3,44 +3,51 @@
 
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
-# We start by building a transfer function, and initializing a camera.
+# We start by building a default volume rendering scene 
 
-tf = yt.ColorTransferFunction((-30, -22))
-cam = ds.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256, tf)
+im, sc = yt.volume_render(ds, field=("gas","density"), fname="v0.png", sigma_clip=6.0)
 
-# Now let's add some isocontours, and take a snapshot.
-
-tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5], colormap = 'RdBu_r')
-cam.snapshot("v1.png", clip_ratio=6.0)
+sc.camera.set_width(ds.arr(0.1,'code_length'))
+tf = sc.get_source(0).transfer_function 
+tf.clear()
+tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
+        alpha=np.logspace(-3,0,4), colormap = 'RdBu_r')
+sc.render()
+sc.save("v1.png", sigma_clip=6.0)
 
 # In this case, the default alphas used (np.logspace(-3,0,Nbins)) does not
 # accentuate the outer regions of the galaxy. Let's start by bringing up the
 # alpha values for each contour to go between 0.1 and 1.0
 
+tf = sc.get_source(0).transfer_function 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(0,0,4), colormap = 'RdBu_r')
-cam.snapshot("v2.png", clip_ratio=6.0)
+sc.render()
+sc.save("v2.png", sigma_clip=6.0)
 
 # Now let's set the grey_opacity to True.  This should make the inner portions
 # start to be obcured
 
 tf.grey_opacity = True
-cam.snapshot("v3.png", clip_ratio=6.0)
+sc.render()
+sc.save("v3.png", sigma_clip=6.0)
 
 # That looks pretty good, but let's start bumping up the opacity.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-cam.snapshot("v4.png", clip_ratio=6.0)
+sc.render()
+sc.save("v4.png", sigma_clip=6.0)
 
 # Let's bump up again to see if we can obscure the inner contour.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=30.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-cam.snapshot("v5.png", clip_ratio=6.0)
+sc.render()
+sc.save("v5.png", sigma_clip=6.0)
 
 # Now we are losing sight of everything.  Let's see if we can obscure the next
 # layer
@@ -48,13 +55,15 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=100.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-cam.snapshot("v6.png", clip_ratio=6.0)
+sc.render()
+sc.save("v6.png", sigma_clip=6.0)
 
 # That is very opaque!  Now lets go back and see what it would look like with
 # grey_opacity = False
 
 tf.grey_opacity=False
-cam.snapshot("v7.png", clip_ratio=6.0)
+sc.render()
+sc.save("v7.png", sigma_clip=6.0)
 
 # That looks pretty different, but the main thing is that you can see that the
 # inner contours are somewhat visible again.  

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -1,61 +1,22 @@
 import yt
 import numpy as np
+from yt.visualization.volume_rendering.api import BoxSource, CoordinateVectorSource
 
 # Load the dataset.
 ds = yt.load("Enzo_64/DD0043/data0043")
+sc = yt.create_scene(ds, ('gas','density'))
+sc.get_source(0).transfer_function.grey_opacity=True
 
-# Create a data container (like a sphere or region) that
-# represents the entire domain.
-ad = ds.all_data()
+sc.annotate_domain(ds)
+sc.render()
+sc.save("%s_vr_domain.png" % ds)
 
-# Get the minimum and maximum densities.
-mi, ma = ad.quantities.extrema("density")
-
-# Create a transfer function to map field values to colors.
-# We bump up our minimum to cut out some of the background fluid
-tf = yt.ColorTransferFunction((np.log10(mi)+2.0, np.log10(ma)))
-
-# Add three Gaussians, evenly spaced between the min and
-# max specified above with widths of 0.02 and using the
-# gist_stern colormap.
-tf.add_layers(3, w=0.02, colormap="gist_stern")
-
-# Choose a center for the render.
-c = [0.5, 0.5, 0.5]
-
-# Choose a vector representing the viewing direction.
-L = [0.5, 0.2, 0.7]
-
-# Set the width of the image.
-# Decreasing or increasing this value
-# results in a zoom in or out.
-W = 1.0
-
-# The number of pixels along one side of the image.
-# The final image will have Npixel^2 pixels.
-Npixels = 512
-
-# Create a camera object.
-# This object creates the images and
-# can be moved and rotated.
-cam = ds.camera(c, L, W, Npixels, tf)
-
-# Create a snapshot.
-# The return value of this function could also be accepted, modified (or saved
-# for later manipulation) and then put written out using write_bitmap.
-# clip_ratio applies a maximum to the function, which is set to that value
-# times the .std() of the array.
-im = cam.snapshot("%s_volume_rendered.png" % ds, clip_ratio=8.0)
-
-# Add the domain edges, with an alpha blending of 0.3:
-nim = cam.draw_domain(im, alpha=0.3)
-nim.write_png('%s_vr_domain.png' % ds)
-
-# Add the grids, colored by the grid level with the algae colormap
-nim = cam.draw_grids(im, alpha=0.3, cmap='algae')
-nim.write_png('%s_vr_grids.png' % ds)
+sc.annotate_grids(ds)
+sc.render()
+sc.save("%s_vr_grids.png" % ds)
 
 # Here we can draw the coordinate vectors on top of the image by processing
 # it through the camera. Then save it out.
-cam.draw_coordinate_vectors(nim)
-nim.write_png("%s_vr_vectors.png" % ds)
+sc.annotate_axes()
+sc.render()
+sc.save("%s_vr_coords.png" % ds)

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/cookbook/sigma_clip.py
--- /dev/null
+++ b/doc/source/cookbook/sigma_clip.py
@@ -0,0 +1,17 @@
+import yt
+
+# Load the dataset.
+ds = yt.load("enzo_tiny_cosmology/RD0009/RD0009")
+
+# Create a volume rendering, which will determine data bounds, use the first
+# acceptable field in the field_list, and set up a default transfer function.
+
+# Render and save output images with different levels of sigma clipping.
+# Sigma clipping removes the highest intensity pixels in a volume render, 
+# which affects the overall contrast of the image.
+sc = yt.create_scene(ds, field=('gas', 'density'))
+sc.render()
+sc.save('clip_0.png')
+sc.save('clip_2.png', sigma_clip=2)
+sc.save('clip_4.png', sigma_clip=4)
+sc.save('clip_6.png', sigma_clip=6)

diff -r f5b9cba0cd9d15d7303f4ed881e4f44e42c95f08 -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 doc/source/cookbook/simple_volume_rendering.py
--- a/doc/source/cookbook/simple_volume_rendering.py
+++ b/doc/source/cookbook/simple_volume_rendering.py
@@ -1,48 +1,10 @@
 import yt
-import numpy as np
 
 # Load the dataset.
 ds = yt.load("Enzo_64/DD0043/data0043")
 
-# Create a data container (like a sphere or region) that
-# represents the entire domain.
-ad = ds.all_data()
+# Create a volume rendering, which will determine data bounds, use the first
+# acceptable field in the field_list, and set up a default transfer function.
 
-# Get the minimum and maximum densities.
-mi, ma = ad.quantities.extrema("density")
-
-# Create a transfer function to map field values to colors.
-# We bump up our minimum to cut out some of the background fluid
-tf = yt.ColorTransferFunction((np.log10(mi)+1, np.log10(ma)))
-
-# Add five Gaussians, evenly spaced between the min and
-# max specified above with widths of 0.02 and using the
-# spectral colormap.
-tf.add_layers(5, w=0.02, colormap="spectral")
-
-# Choose a center for the render.
-c = [0.5, 0.5, 0.5]
-
-# Choose a vector representing the viewing direction.
-L = [0.5, 0.2, 0.7]
-
-# Set the width of the image.
-# Decreasing or increasing this value
-# results in a zoom in or out.
-W = 1.0
-
-# The number of pixels along one side of the image.
-# The final image will have Npixel^2 pixels.
-Npixels = 512
-
-# Create a camera object.
-# This object creates the images and
-# can be moved and rotated.
-cam = ds.camera(c, L, W, Npixels, tf)
-
-# Create a snapshot.
-# The return value of this function could also be accepted, modified (or saved
-# for later manipulation) and then put written out using write_bitmap.
-# clip_ratio applies a maximum to the function, which is set to that value
-# times the .std() of the array.
-cam.snapshot("%s_volume_rendered.png" % ds, clip_ratio=8.0)
+# This will save a file named 'data0043_Render_density.png' to disk.
+im, sc = yt.volume_render(ds, field=('gas', 'density'))

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/2c8190a0bf6e/
Changeset:   2c8190a0bf6e
Branch:      yt
User:        MatthewTurk
Date:        2015-12-11 21:54:43+00:00
Summary:     Turn off initialization checks.
Affected #:  1 file

diff -r 76e10575d7fdd99f509ce9cebda0ce9caa17e771 -r 2c8190a0bf6ef3a20a7953e1bff81f4688c0bc69 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -42,6 +42,7 @@
 @cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
+ at cython.initializedcheck(False)
 cdef np.float64_t r2dist(np.float64_t ppos[3],
                          np.float64_t cpos[3],
                          np.float64_t DW[3],
@@ -95,6 +96,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     def process_octree(self, OctreeContainer mesh_octree,
                      np.int64_t [:] mdom_ind,
                      np.float64_t[:,:] positions,
@@ -268,6 +270,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     def process_particles(self, OctreeContainer particle_octree,
                      np.ndarray[np.int64_t, ndim=1] pdom_ind,
                      np.ndarray[np.float64_t, ndim=2] positions,
@@ -452,6 +455,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     def process_grid(self, gobj,
                      np.ndarray[np.float64_t, ndim=2] positions,
                      fields = None):
@@ -508,6 +512,10 @@
         if self.curn < self.maxn:
             self.curn += 1
 
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void neighbor_find(self,
                             np.int64_t nneighbors,
                             np.int64_t *nind,
@@ -568,6 +576,10 @@
                     pos[j] = ppos[pn, j]
                 self.neighbor_eval(pn, pos, cpos)
 
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
                                np.float64_t dds[3], np.float64_t[:,:] ppos,
                                np.float64_t **fields,
@@ -610,6 +622,10 @@
                 cpos[1] += dds[1]
             cpos[0] += dds[0]
 
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void neighbor_process_particle(self, np.float64_t cpos[3],
                                np.float64_t[:,:] ppos,
                                np.float64_t **fields,
@@ -670,6 +686,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields):
@@ -728,6 +745,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields):
@@ -761,6 +779,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields):
@@ -796,6 +815,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields):
@@ -816,6 +836,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields):


https://bitbucket.org/yt_analysis/yt/commits/0447987365b1/
Changeset:   0447987365b1
Branch:      yt
User:        xarthisius
Date:        2015-12-14 19:12:55+00:00
Summary:     Merged in MatthewTurk/yt (pull request #1877)

[WIP] Converting many pointers to memoryviews
Affected #:  2 files

diff -r 1dab4da5db3f6996f4301481a384c8a4fbc79298 -r 0447987365b19d53451bd04609832f070f7362b2 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -41,31 +41,27 @@
     cdef int maxn
     cdef int curn
     cdef bint periodicity[3]
-    cdef np.int64_t *doffs
-    cdef np.int64_t *pinds
-    cdef np.int64_t *pcounts
-    cdef np.float64_t *ppos
     # Note that we are preallocating here, so this is *not* threadsafe.
     cdef NeighborList *neighbors
     cdef void (*pos_setup)(np.float64_t ipos[3], np.float64_t opos[3])
     cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
-                               np.float64_t dds[3], np.float64_t *ppos,
+                               np.float64_t dds[3], np.float64_t[:,:] ppos,
                                np.float64_t **fields, 
-                               np.int64_t *doffs, np.int64_t **nind, 
-                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t[:] doffs, np.int64_t **nind, 
+                               np.int64_t[:] pinds, np.int64_t[:] pcounts,
                                np.int64_t offset, np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
-                               int *nsize, np.float64_t *oct_left_edges,
-                               np.float64_t *oct_dds)
+                               int *nsize, np.float64_t[:,:] oct_left_edges,
+                               np.float64_t[:,:] oct_dds)
     cdef int neighbor_search(self, np.float64_t pos[3], OctreeContainer octree,
                              np.int64_t **nind, int *nsize, 
                              np.int64_t nneighbors, np.int64_t domain_id, 
                              Oct **oct = ?, int extra_layer = ?)
     cdef void neighbor_process_particle(self, np.float64_t cpos[3],
-                               np.float64_t *ppos,
+                               np.float64_t[:,:] ppos,
                                np.float64_t **fields, 
-                               np.int64_t *doffs, np.int64_t **nind, 
-                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t[:] doffs, np.int64_t **nind, 
+                               np.int64_t[:] pinds, np.int64_t[:] pcounts,
                                np.int64_t offset,
                                np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
@@ -76,13 +72,13 @@
     cdef void neighbor_find(self,
                             np.int64_t nneighbors,
                             np.int64_t *nind,
-                            np.int64_t *doffs,
-                            np.int64_t *pcounts,
-                            np.int64_t *pinds,
-                            np.float64_t *ppos,
+                            np.int64_t[:] doffs,
+                            np.int64_t[:] pcounts,
+                            np.int64_t[:] pinds,
+                            np.float64_t[:,:] ppos,
                             np.float64_t cpos[3],
-                            np.float64_t* oct_left_edges,
-                            np.float64_t* oct_dds)
+                            np.float64_t[:,:] oct_left_edges,
+                            np.float64_t[:,:] oct_dds)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields)

diff -r 1dab4da5db3f6996f4301481a384c8a4fbc79298 -r 0447987365b19d53451bd04609832f070f7362b2 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -42,6 +42,7 @@
 @cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
+ at cython.initializedcheck(False)
 cdef np.float64_t r2dist(np.float64_t ppos[3],
                          np.float64_t cpos[3],
                          np.float64_t DW[3],
@@ -80,6 +81,7 @@
         self.nvals = nvals
         self.nfields = nfields
         self.maxn = max_neighbors
+
         self.neighbors = <NeighborList *> malloc(
             sizeof(NeighborList) * self.maxn)
         self.neighbor_reset()
@@ -94,16 +96,17 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     def process_octree(self, OctreeContainer mesh_octree,
-                     np.ndarray[np.int64_t, ndim=1] mdom_ind,
-                     np.ndarray[np.float64_t, ndim=2] positions,
-                     np.ndarray[np.float64_t, ndim=2] oct_positions,
+                     np.int64_t [:] mdom_ind,
+                     np.float64_t[:,:] positions,
+                     np.float64_t[:,:] oct_positions,
                      fields = None, int domain_id = -1,
                      int domain_offset = 0,
                      periodicity = (True, True, True),
                      index_fields = None,
                      OctreeContainer particle_octree = None,
-                     np.ndarray[np.int64_t, ndim=1] pdom_ind = None,
+                     np.int64_t [:] pdom_ind = None,
                      geometry = "cartesian"):
         # This will be a several-step operation.
         #
@@ -134,10 +137,10 @@
             pdom_ind = mdom_ind
         cdef int nf, i, j, n
         cdef int dims[3]
+        cdef np.float64_t[:] *field_check
         cdef np.float64_t **field_pointers
         cdef np.float64_t *field_vals
         cdef np.float64_t pos[3]
-        cdef np.float64_t *ppos
         cdef np.float64_t dds[3]
         cdef np.float64_t **octree_field_pointers
         cdef int nsize = 0
@@ -146,15 +149,12 @@
         cdef Oct *oct
         cdef np.int64_t numpart, offset, local_ind, poff
         cdef np.int64_t moff_p, moff_m
-        cdef np.int64_t *doffs
-        cdef np.int64_t *pinds
-        cdef np.int64_t *pcounts
-        cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
-        cdef np.ndarray[np.int64_t, ndim=2] doff_m
+        cdef np.int64_t[:] pind, doff, pdoms, pcount
+        cdef np.int64_t[:,:] doff_m
         cdef np.ndarray[np.float64_t, ndim=1] tarr
         cdef np.ndarray[np.float64_t, ndim=4] iarr
-        cdef np.ndarray[np.float64_t, ndim=2] cart_positions
-        cdef np.ndarray[np.float64_t, ndim=2] oct_left_edges, oct_dds
+        cdef np.float64_t[:,:] cart_positions
+        cdef np.float64_t[:,:] oct_left_edges, oct_dds
         cdef OctInfo oinfo
         if geometry == "cartesian":
             self.pos_setup = cart_coord_setup
@@ -245,11 +245,6 @@
         #raise RuntimeError
         # Now doff is full of offsets to the first entry in the pind that
         # refers to that oct's particles.
-        ppos = <np.float64_t *> positions.data
-        cart_pos = <np.float64_t *> cart_positions.data
-        doffs = <np.int64_t*> doff.data
-        pinds = <np.int64_t*> pind.data
-        pcounts = <np.int64_t*> pcount.data
         cdef np.ndarray[np.uint8_t, ndim=1] visited
         visited = np.zeros(mdom_ind.shape[0], dtype="uint8")
         cdef int nproc = 0
@@ -263,10 +258,10 @@
             if offset < 0: continue
             nproc += 1
             self.neighbor_process(
-                dims, moi.left_edge, moi.dds, cart_pos, field_pointers, doffs,
-                &nind, pinds, pcounts, offset, index_field_pointers,
-                particle_octree, domain_id, &nsize, &oct_left_edges[0, 0],
-                &oct_dds[0, 0])
+                dims, moi.left_edge, moi.dds, cart_positions, field_pointers, doff,
+                &nind, pind, pcount, offset, index_field_pointers,
+                particle_octree, domain_id, &nsize, oct_left_edges,
+                oct_dds)
         #print "VISITED", visited.sum(), visited.size,
         #print 100.0*float(visited.sum())/visited.size
         if nind != NULL:
@@ -275,6 +270,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     def process_particles(self, OctreeContainer particle_octree,
                      np.ndarray[np.int64_t, ndim=1] pdom_ind,
                      np.ndarray[np.float64_t, ndim=2] positions,
@@ -293,7 +289,6 @@
         cdef int dims[3]
         cdef np.float64_t **field_pointers
         cdef np.float64_t *field_vals
-        cdef np.float64_t *ppos
         cdef np.float64_t dds[3]
         cdef np.float64_t pos[3]
         cdef np.float64_t **octree_field_pointers
@@ -304,10 +299,7 @@
         cdef Oct **neighbors = NULL
         cdef np.int64_t nneighbors, numpart, offset, local_ind
         cdef np.int64_t moff_p, moff_m, pind0, poff
-        cdef np.int64_t *doffs
-        cdef np.int64_t *pinds
-        cdef np.int64_t *pcounts
-        cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
+        cdef np.int64_t[:] pind, doff, pdoms, pcount
         cdef np.ndarray[np.float64_t, ndim=1] tarr
         cdef np.ndarray[np.float64_t, ndim=2] cart_positions
         if geometry == "cartesian":
@@ -376,11 +368,6 @@
         #raise RuntimeError
         # Now doff is full of offsets to the first entry in the pind that
         # refers to that oct's particles.
-        ppos = <np.float64_t *> positions.data
-        cart_pos = <np.float64_t *> cart_positions.data
-        doffs = <np.int64_t*> doff.data
-        pinds = <np.int64_t*> pind.data
-        pcounts = <np.int64_t*> pcount.data
         cdef int maxnei = 0
         cdef int nproc = 0
         for i in range(doff.shape[0]):
@@ -392,8 +379,8 @@
                 pind0 = pind[doff[i] + j]
                 for k in range(3):
                     pos[k] = positions[pind0, k]
-                self.neighbor_process_particle(pos, cart_pos, field_pointers,
-                            doffs, &nind, pinds, pcounts, pind0,
+                self.neighbor_process_particle(pos, cart_positions, field_pointers,
+                            doff, &nind, pind, pcount, pind0,
                             NULL, particle_octree, domain_id, &nsize)
         #print "VISITED", visited.sum(), visited.size,
         #print 100.0*float(visited.sum())/visited.size
@@ -468,6 +455,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     def process_grid(self, gobj,
                      np.ndarray[np.float64_t, ndim=2] positions,
                      fields = None):
@@ -524,16 +512,20 @@
         if self.curn < self.maxn:
             self.curn += 1
 
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void neighbor_find(self,
                             np.int64_t nneighbors,
                             np.int64_t *nind,
-                            np.int64_t *doffs,
-                            np.int64_t *pcounts,
-                            np.int64_t *pinds,
-                            np.float64_t *ppos,
+                            np.int64_t[:] doffs,
+                            np.int64_t[:] pcounts,
+                            np.int64_t[:] pinds,
+                            np.float64_t[:,:] ppos,
                             np.float64_t cpos[3],
-                            np.float64_t *oct_left_edges,
-                            np.float64_t *oct_dds,
+                            np.float64_t[:,:] oct_left_edges,
+                            np.float64_t[:,:] oct_dds,
                             ):
         # We are now given the number of neighbors, the indices into the
         # domains for them, and the number of particles for each.
@@ -545,7 +537,7 @@
             if nind[ni] == -1: continue
             # terminate early if all 8 corners of oct are farther away than
             # most distant currently known neighbor
-            if oct_left_edges != NULL and self.curn == self.maxn:
+            if oct_left_edges != None and self.curn == self.maxn:
                 r2_trunc = self.neighbors[self.curn - 1].r2
                 # iterate over each dimension in the outer loop so we can
                 # consolidate temporary storage
@@ -555,8 +547,8 @@
                 r2 = 0.0
                 for k in range(3):
                     # We start at left edge, then do halfway, then right edge.
-                    ex[0] = oct_left_edges[3*nind[ni] + k]
-                    ex[1] = ex[0] + oct_dds[3*nind[ni] + k]
+                    ex[0] = oct_left_edges[nind[ni], k]
+                    ex[1] = ex[0] + oct_dds[nind[ni], k]
                     # There are three possibilities; we are between, left-of,
                     # or right-of the extrema.  Thanks to
                     # http://stackoverflow.com/questions/5254838/calculating-distance-between-a-point-and-a-rectangular-box-nearest-point
@@ -581,19 +573,23 @@
             for i in range(pc):
                 pn = pinds[offset + i]
                 for j in range(3):
-                    pos[j] = ppos[pn * 3 + j]
+                    pos[j] = ppos[pn, j]
                 self.neighbor_eval(pn, pos, cpos)
 
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
-                               np.float64_t dds[3], np.float64_t *ppos,
+                               np.float64_t dds[3], np.float64_t[:,:] ppos,
                                np.float64_t **fields,
-                               np.int64_t *doffs, np.int64_t **nind,
-                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t [:] doffs, np.int64_t **nind,
+                               np.int64_t [:] pinds, np.int64_t[:] pcounts,
                                np.int64_t offset,
                                np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
-                               int *nsize, np.float64_t *oct_left_edges,
-                               np.float64_t *oct_dds):
+                               int *nsize, np.float64_t[:,:] oct_left_edges,
+                               np.float64_t[:,:] oct_dds):
         # Note that we assume that fields[0] == smoothing length in the native
         # units supplied.  We can now iterate over every cell in the block and
         # every particle to find the nearest.  We will use a priority heap.
@@ -626,11 +622,15 @@
                 cpos[1] += dds[1]
             cpos[0] += dds[0]
 
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void neighbor_process_particle(self, np.float64_t cpos[3],
-                               np.float64_t *ppos,
+                               np.float64_t[:,:] ppos,
                                np.float64_t **fields,
-                               np.int64_t *doffs, np.int64_t **nind,
-                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t[:] doffs, np.int64_t **nind,
+                               np.int64_t[:] pinds, np.int64_t[:] pcounts,
                                np.int64_t offset,
                                np.float64_t **index_fields,
                                OctreeContainer octree,
@@ -649,7 +649,7 @@
         nneighbors = self.neighbor_search(opos, octree,
                         nind, nsize, nneighbors, domain_id, &oct, 0)
         self.neighbor_find(nneighbors, nind[0], doffs, pcounts, pinds, ppos,
-                           opos, NULL, NULL)
+                           opos, None, None)
         self.process(offset, i, j, k, dim, opos, fields, index_fields)
 
 cdef class VolumeWeightedSmooth(ParticleSmoothOperation):
@@ -686,6 +686,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields):
@@ -744,6 +745,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields):
@@ -777,6 +779,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields):
@@ -812,6 +815,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields):
@@ -832,6 +836,7 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
+    @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields):

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list