[yt-svn] commit/yt: 14 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Fri Nov 14 11:32:42 PST 2014


14 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/573d154d8d17/
Changeset:   573d154d8d17
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-08-08 21:39:20+00:00
Summary:     Draft initial implementation of NN calculator field.
Affected #:  5 files

diff -r 49d4aca8c7e3f8a83057956b39a5840785a0daa5 -r 573d154d8d17f1a71e1b599a9a9bfd4dcb4e6322 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -208,6 +208,46 @@
             vals = np.asfortranarray(vals)
         return vals
 
+    def particle_operation(self, positions, fields = None,
+            index_fields = None, method = None, nneighbors = 64):
+        # Here we perform our particle deposition.
+        positions.convert_to_units("code_length")
+        morton = compute_morton(
+            positions[:,0], positions[:,1], positions[:,2],
+            self.ds.domain_left_edge,
+            self.ds.domain_right_edge)
+        morton.sort()
+        particle_octree = ParticleOctreeContainer([1, 1, 1],
+            self.ds.domain_left_edge,
+            self.ds.domain_right_edge,
+            over_refine = self._oref)
+        particle_octree.n_ref = nneighbors / 2
+        particle_octree.add(morton)
+        particle_octree.finalize()
+        pdom_ind = particle_octree.domain_ind(self.selector)
+        if fields is None: fields = []
+        if index_fields is None: index_fields = []
+        cls = getattr(particle_smooth, "%s_smooth" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        nz = self.nz
+        mdom_ind = self.domain_ind
+        nvals = (nz, nz, nz, (mdom_ind >= 0).sum())
+        op = cls(nvals, len(fields), nneighbors)
+        op.initialize()
+        mylog.debug("Smoothing %s particles into %s Octs",
+            positions.shape[0], nvals[-1])
+        op.process_particles(particle_octree, pdom_ind, positions, 
+            fields, self.domain_id, self._domain_offset, self.ds.periodicity,
+            index_fields, self.ds.geometry)
+        vals = op.finalize()
+        if vals is None: return
+        if isinstance(vals, list):
+            vals = [np.asfortranarray(v) for v in vals]
+        else:
+            vals = np.asfortranarray(vals)
+        return vals
+
     @cell_count_cache
     def select_icoords(self, dobj):
         return self.oct_handler.icoords(dobj.selector, domain_id = self.domain_id,

diff -r 49d4aca8c7e3f8a83057956b39a5840785a0daa5 -r 573d154d8d17f1a71e1b599a9a9bfd4dcb4e6322 yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -154,6 +154,9 @@
     def smooth(self, *args, **kwargs):
         return np.random.random((self.nd, self.nd, self.nd))
 
+    def particle_operation(self, *args, **kwargs):
+        return None
+
     def _read_data(self, field_name):
         self.requested.append(field_name)
         if hasattr(self.ds, "field_info"):

diff -r 49d4aca8c7e3f8a83057956b39a5840785a0daa5 -r 573d154d8d17f1a71e1b599a9a9bfd4dcb4e6322 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -567,3 +567,19 @@
                        units = field_units)
     return [field_name]
 
+def add_nearest_neighbor_field(ptype, coord_name, registry, nneighbors = 64):
+    field_name = (ptype, "nearest_neighbor_%s" % (nneighbors))
+    def _nth_neighbor(field, data):
+        pos = data[ptype, coord_name].in_units("code_length")
+        distances = 0.0 * pos[:,0]
+        data.particle_operation(pos, [distances],
+                         method="nth_neighbor",
+                         nneighbors = nneighbors)
+        # Now some quick unit conversions.
+        return distances
+    registry.add_field(field_name, function = _nth_neighbor,
+                       validators = [ValidateSpatial(0)],
+                       particle_type = True,
+                       units = "code_length")
+    return [field_name]
+

diff -r 49d4aca8c7e3f8a83057956b39a5840785a0daa5 -r 573d154d8d17f1a71e1b599a9a9bfd4dcb4e6322 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -57,6 +57,13 @@
                                np.int64_t *nind, np.int64_t *doffs,
                                np.int64_t *pinds, np.int64_t *pcounts,
                                np.int64_t offset, np.float64_t **index_fields)
+    cdef void neighbor_process_particle(self, np.float64_t cpos[3],
+                               np.float64_t *ppos,
+                               np.float64_t **fields, np.int64_t nneighbors,
+                               np.int64_t *nind, np.int64_t *doffs,
+                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t offset,
+                               np.float64_t **index_fields)
     cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
                             np.float64_t cpos[3])
     cdef void neighbor_reset(self)

diff -r 49d4aca8c7e3f8a83057956b39a5840785a0daa5 -r 573d154d8d17f1a71e1b599a9a9bfd4dcb4e6322 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -274,7 +274,145 @@
         #print 100.0*float(visited.sum())/visited.size
         if nind != NULL:
             free(nind)
-        
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def process_particles(self, OctreeContainer particle_octree,
+                     np.ndarray[np.int64_t, ndim=1] pdom_ind,
+                     np.ndarray[np.float64_t, ndim=2] positions,
+                     fields = None, int domain_id = -1,
+                     int domain_offset = 0,
+                     periodicity = (True, True, True),
+                     index_fields = None,
+                     geometry = "cartesian"):
+        cdef int nf, i, j, dims[3], n
+        cdef np.float64_t **field_pointers, *field_vals, pos[3], *ppos, dds[3]
+        cdef np.float64_t **octree_field_pointers
+        cdef int nsize = 0
+        cdef np.int64_t *nind = NULL
+        cdef OctInfo moi, poi
+        cdef Oct *oct, **neighbors = NULL
+        cdef np.int64_t nneighbors, numpart, offset, local_ind
+        cdef np.int64_t moff_p, moff_m
+        cdef np.int64_t *doffs, *pinds, *pcounts, poff
+        cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
+        cdef np.ndarray[np.float64_t, ndim=1] tarr
+        cdef np.ndarray[np.float64_t, ndim=4] iarr
+        cdef np.ndarray[np.float64_t, ndim=2] cart_positions
+        if geometry == "cartesian":
+            self.pos_setup = cart_coord_setup
+            cart_positions = positions
+        elif geometry == "spherical":
+            self.pos_setup = spherical_coord_setup
+            cart_positions = np.empty((positions.shape[0], 3), dtype="float64")
+
+            cart_positions[:,0] = positions[:,0] * \
+                                  np.sin(positions[:,1]) * \
+                                  np.cos(positions[:,2])
+            cart_positions[:,1] = positions[:,0] * \
+                                  np.sin(positions[:,1]) * \
+                                  np.sin(positions[:,2])
+            cart_positions[:,2] = positions[:,0] * \
+                                  np.cos(positions[:,1])
+            periodicity = (False, False, False)
+        else:
+            raise NotImplementedError
+        numpart = positions.shape[0]
+        pcount = np.zeros_like(pdom_ind)
+        doff = np.zeros_like(pdom_ind) - 1
+        moff_p = particle_octree.get_domain_offset(domain_id + domain_offset)
+        pdoms = np.zeros(positions.shape[0], dtype="int64") - 1
+        nf = len(fields)
+        if fields is None:
+            fields = []
+        field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
+        for i in range(nf):
+            tarr = fields[i]
+            field_pointers[i] = <np.float64_t *> tarr.data
+        if index_fields is None:
+            index_fields = []
+        nf = len(index_fields)
+        index_field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
+        for i in range(nf):
+            iarr = index_fields[i]
+            index_field_pointers[i] = <np.float64_t *> iarr.data
+        for i in range(3):
+            self.DW[i] = (particle_octree.DRE[i] - particle_octree.DLE[i])
+            self.periodicity[i] = periodicity[i]
+        for i in range(positions.shape[0]):
+            for j in range(3):
+                pos[j] = positions[i, j]
+            oct = particle_octree.get(pos)
+            if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
+                continue
+            # Note that this has to be our local index, not our in-file index.
+            # This is the particle count, which we'll use once we have sorted
+            # the particles to calculate the offsets into each oct's particles.
+            offset = oct.domain_ind - moff_p
+            pcount[offset] += 1
+            pdoms[i] = offset # We store the *actual* offset.
+        # Now we have oct assignments.  Let's sort them.
+        # Note that what we will be providing to our processing functions will
+        # actually be indirectly-sorted fields.  This preserves memory at the
+        # expense of additional pointer lookups.
+        pind = np.argsort(pdoms)
+        pind = np.asarray(pind, dtype='int64', order='C')
+        # So what this means is that we now have all the oct-0 particle indices
+        # in order, then the oct-1, etc etc.
+        # This now gives us the indices to the particles for each domain.
+        for i in range(positions.shape[0]):
+            # This value, poff, is the index of the particle in the *unsorted*
+            # arrays.
+            poff = pind[i] 
+            offset = pdoms[poff] 
+            # If we have yet to assign the starting index to this oct, we do so
+            # now.
+            if doff[offset] < 0: doff[offset] = i
+        #print domain_id, domain_offset, moff_p, moff_m
+        #raise RuntimeError
+        # Now doff is full of offsets to the first entry in the pind that
+        # refers to that oct's particles.
+        ppos = <np.float64_t *> positions.data
+        cart_pos = <np.float64_t *> cart_positions.data
+        doffs = <np.int64_t*> doff.data
+        pinds = <np.int64_t*> pind.data
+        pcounts = <np.int64_t*> pcount.data
+        nsize = 27
+        nind = <np.int64_t *> malloc(sizeof(np.int64_t)*nsize)
+        cdef int maxnei = 0
+        cdef int nproc = 0
+        for i in range(positions.shape[0]):
+            for j in range(3):
+                pos[j] = positions[i, j]
+            oct = particle_octree.get(pos, &poi)
+            neighbors = particle_octree.neighbors(&poi, &nneighbors, oct)
+            if nneighbors > maxnei:
+                maxnei = nneighbors
+            # Now we have all our neighbors.  And, we should be set for what
+            # else we need to do.
+            if nneighbors > nsize:
+                nind = <np.int64_t *> realloc(
+                    nind, sizeof(np.int64_t)*nneighbors)
+                nsize = nneighbors
+            for j in range(nneighbors):
+                # Particle octree neighbor indices
+                nind[j] = neighbors[j].domain_ind - moff_p
+                for n in range(j):
+                    if nind[j] == nind[n]:
+                        nind[j] = -1
+                    break
+            # This is allocated by the neighbors function, so we deallocate it.
+            free(neighbors)
+            nproc += 1
+            self.neighbor_process_particle(pos, cart_pos, field_pointers,
+                        nneighbors, nind, doffs, pinds, pcounts, i,
+                        index_field_pointers)
+        #print "VISITED", visited.sum(), visited.size,
+        #print 100.0*float(visited.sum())/visited.size
+        if nind != NULL:
+            free(nind)
+
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -402,6 +540,29 @@
                 cpos[1] += dds[1]
             cpos[0] += dds[0]
 
+    cdef void neighbor_process_particle(self, np.float64_t cpos[3],
+                               np.float64_t *ppos,
+                               np.float64_t **fields, np.int64_t nneighbors,
+                               np.int64_t *nind, np.int64_t *doffs,
+                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t offset,
+                               np.float64_t **index_fields):
+        # Note that we assume that fields[0] == smoothing length in the native
+        # units supplied.  We can now iterate over every cell in the block and
+        # every particle to find the nearest.  We will use a priority heap.
+        cdef int i, j, k, ntot, nntot, m, dim[3]
+        cdef np.float64_t opos[3]
+        self.pos_setup(cpos, opos)
+        self.neighbor_find(nneighbors, nind, doffs, pcounts, pinds, ppos, opos)
+        if self.curn <-1*self.maxn:
+            ntot = nntot = 0
+            for m in range(nneighbors):
+                if nind[m] < 0: continue
+                nntot += 1
+                ntot += pcounts[nind[m]]
+            print "SOMETHING WRONG", self.curn, nneighbors, ntot, nntot
+        self.process(offset, i, j, k, dim, opos, fields, index_fields)
+
 cdef class VolumeWeightedSmooth(ParticleSmoothOperation):
     cdef np.float64_t **fp
     cdef public object vals
@@ -539,3 +700,31 @@
         return
 
 idw_smooth = IDWInterpolationSmooth
+
+cdef class NthNeighborDistanceSmooth(ParticleSmoothOperation):
+
+    def initialize(self):
+        return
+
+    def finalize(self):
+        return
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef void process(self, np.int64_t offset, int i, int j, int k,
+                      int dim[3], np.float64_t cpos[3], np.float64_t **fields,
+                      np.float64_t **index_fields):
+        # We have our i, j, k for our cell, as well as the cell position.
+        # We also have a list of neighboring particles with particle numbers.
+        cdef int n, fi
+        cdef np.float64_t weight, r2, val, hsml, dens, mass, coeff, max_r
+        coeff = 0.0
+        cdef np.int64_t pn
+        # We get back our mass 
+        # rho_i = sum(j = 1 .. n) m_j * W_ij
+        max_r = sqrt(self.neighbors[self.curn-1].r2)
+        # We assume "offset" here is the particle index.
+        fields[0][offset] = max_r
+
+nth_neighbor_smooth = NthNeighborDistanceSmooth


https://bitbucket.org/yt_analysis/yt/commits/9c33fb91dcb0/
Changeset:   9c33fb91dcb0
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-08-09 12:51:13+00:00
Summary:     Adding fake_particle_ds
Affected #:  1 file

diff -r 573d154d8d17f1a71e1b599a9a9bfd4dcb4e6322 -r 9c33fb91dcb00b7c2f7e6661c51318b6b59f1e90 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -192,6 +192,37 @@
         data.append(gdata)
     return load_amr_grids(data, [32, 32, 32], 1.0)
 
+def fake_particle_ds(
+        fields = ("particle_position_x",
+                  "particle_position_y",
+                  "particle_position_z",
+                  "particle_mass", 
+                  "particle_velocity_x",
+                  "particle_velocity_y",
+                  "particle_velocity_z"),
+        units = ('cm', 'cm', 'cm', 'g', 'cm/s', 'cm/s', 'cm/s'),
+        negative = (False, False, False, False, True, True, True),
+        npart = 16**3, length_unit=1.0):
+    from yt.frontends.stream.api import load_particles
+    if not iterable(negative):
+        negative = [negative for f in fields]
+    assert(len(fields) == len(negative))
+    offsets = []
+    for n in negative:
+        if n:
+            offsets.append(0.5)
+        else:
+            offsets.append(0.0)
+    data = {}
+    for field, offset, u in zip(fields, offsets, units):
+        v = (np.random.random(npart) - offset)
+        data[field] = (v, u)
+    bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]])
+    ds = load_particles(data, 1.0, bbox=bbox)
+    return ds
+
+
+
 def expand_keywords(keywords, full=False):
     """
     expand_keywords is a means for testing all possible keyword


https://bitbucket.org/yt_analysis/yt/commits/de6c15e29d6d/
Changeset:   de6c15e29d6d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-08-10 15:48:30+00:00
Summary:     Finishing up nearest neighbor.
Affected #:  6 files

diff -r 9c33fb91dcb00b7c2f7e6661c51318b6b59f1e90 -r de6c15e29d6d313c07ee196d58823d45592157ee yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -177,7 +177,8 @@
                 self.ds.domain_left_edge,
                 self.ds.domain_right_edge,
                 over_refine = self._oref)
-            particle_octree.n_ref = nneighbors / 2
+            # This should ensure we get everything within on neighbor of home.
+            particle_octree.n_ref = nneighbors * 2
             particle_octree.add(morton)
             particle_octree.finalize()
             pdom_ind = particle_octree.domain_ind(self.selector)
@@ -220,8 +221,8 @@
         particle_octree = ParticleOctreeContainer([1, 1, 1],
             self.ds.domain_left_edge,
             self.ds.domain_right_edge,
-            over_refine = self._oref)
-        particle_octree.n_ref = nneighbors / 2
+            over_refine = 1)
+        particle_octree.n_ref = nneighbors * 2
         particle_octree.add(morton)
         particle_octree.finalize()
         pdom_ind = particle_octree.domain_ind(self.selector)

diff -r 9c33fb91dcb00b7c2f7e6661c51318b6b59f1e90 -r de6c15e29d6d313c07ee196d58823d45592157ee yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -71,7 +71,7 @@
                   int max_level = ?)
     cdef int get_root(self, int ind[3], Oct **o)
     cdef Oct **neighbors(self, OctInfo *oinfo, np.int64_t *nneighbors,
-                         Oct *o)
+                         Oct *o, bint periodicity[3])
     cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
     # This function must return the offset from global-to-local domains; i.e.,
     # OctAllocationContainer.offset if such a thing exists.

diff -r 9c33fb91dcb00b7c2f7e6661c51318b6b59f1e90 -r de6c15e29d6d313c07ee196d58823d45592157ee yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -340,7 +340,8 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef Oct** neighbors(self, OctInfo *oi, np.int64_t *nneighbors, Oct *o):
+    cdef Oct** neighbors(self, OctInfo *oi, np.int64_t *nneighbors, Oct *o,
+                         bint periodicity[3]):
         cdef Oct* candidate
         nn = 0
         # We are going to do a brute-force search here.
@@ -365,14 +366,23 @@
         my_list = olist = OctList_append(NULL, o)
         for i in range(3):
             npos[0] = (oi.ipos[0] + (1 - i))
-            if npos[0] < 0: npos[0] += ndim[0]
-            if npos[0] >= ndim[0]: npos[0] -= ndim[0]
+            if not periodicity[0] and not \
+               (0 <= npos[0] < ndim[0]):
+                continue
+            elif npos[0] < 0: npos[0] += ndim[0]
+            elif npos[0] >= ndim[0]: npos[0] -= ndim[0]
             for j in range(3):
                 npos[1] = (oi.ipos[1] + (1 - j))
-                if npos[1] < 0: npos[1] += ndim[1]
-                if npos[1] >= ndim[1]: npos[1] -= ndim[1]
+                if not periodicity[1] and not \
+                   (0 <= npos[1] < ndim[1]):
+                    continue
+                elif npos[1] < 0: npos[1] += ndim[1]
+                elif npos[1] >= ndim[1]: npos[1] -= ndim[1]
                 for k in range(3):
                     npos[2] = (oi.ipos[2] + (1 - k))
+                    if not periodicity[2] and not \
+                       (0 <= npos[2] < ndim[2]):
+                        continue
                     if npos[2] < 0: npos[2] += ndim[2]
                     if npos[2] >= ndim[2]: npos[2] -= ndim[2]
                     # Now we have our npos, which we just need to find.

diff -r 9c33fb91dcb00b7c2f7e6661c51318b6b59f1e90 -r de6c15e29d6d313c07ee196d58823d45592157ee yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -248,7 +248,8 @@
             if offset < 0: continue
             # These will be PARTICLE octree neighbors.
             oct = particle_octree.get(pos, &poi)
-            neighbors = particle_octree.neighbors(&poi, &nneighbors, oct)
+            neighbors = particle_octree.neighbors(&poi, &nneighbors, oct,
+                            self.periodicity)
             if nneighbors > maxnei:
                 maxnei = nneighbors
             # Now we have all our neighbors.  And, we should be set for what
@@ -263,7 +264,6 @@
                 for n in range(j):
                     if nind[j] == nind[n]:
                         nind[j] = -1
-                    break
             # This is allocated by the neighbors function, so we deallocate it.
             free(neighbors)
             nproc += 1
@@ -286,7 +286,7 @@
                      periodicity = (True, True, True),
                      index_fields = None,
                      geometry = "cartesian"):
-        cdef int nf, i, j, dims[3], n
+        cdef int nf, i, j, k, dims[3], n
         cdef np.float64_t **field_pointers, *field_vals, pos[3], *ppos, dds[3]
         cdef np.float64_t **octree_field_pointers
         cdef int nsize = 0
@@ -294,7 +294,7 @@
         cdef OctInfo moi, poi
         cdef Oct *oct, **neighbors = NULL
         cdef np.int64_t nneighbors, numpart, offset, local_ind
-        cdef np.int64_t moff_p, moff_m
+        cdef np.int64_t moff_p, moff_m, pind0
         cdef np.int64_t *doffs, *pinds, *pcounts, poff
         cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
         cdef np.ndarray[np.float64_t, ndim=1] tarr
@@ -379,14 +379,20 @@
         pinds = <np.int64_t*> pind.data
         pcounts = <np.int64_t*> pcount.data
         nsize = 27
+        cdef int nactual
         nind = <np.int64_t *> malloc(sizeof(np.int64_t)*nsize)
         cdef int maxnei = 0
         cdef int nproc = 0
-        for i in range(positions.shape[0]):
+        for i in range(doff.shape[0]):
+            if doff[i] < 0: continue
+            offset = pind[doff[i]]
             for j in range(3):
-                pos[j] = positions[i, j]
+                pos[j] = positions[offset, j]
             oct = particle_octree.get(pos, &poi)
-            neighbors = particle_octree.neighbors(&poi, &nneighbors, oct)
+            if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
+                continue
+            neighbors = particle_octree.neighbors(&poi, &nneighbors, oct,
+                            self.periodicity)
             if nneighbors > maxnei:
                 maxnei = nneighbors
             # Now we have all our neighbors.  And, we should be set for what
@@ -395,19 +401,23 @@
                 nind = <np.int64_t *> realloc(
                     nind, sizeof(np.int64_t)*nneighbors)
                 nsize = nneighbors
+            nactual = 0
             for j in range(nneighbors):
                 # Particle octree neighbor indices
                 nind[j] = neighbors[j].domain_ind - moff_p
                 for n in range(j):
                     if nind[j] == nind[n]:
                         nind[j] = -1
-                    break
             # This is allocated by the neighbors function, so we deallocate it.
             free(neighbors)
             nproc += 1
-            self.neighbor_process_particle(pos, cart_pos, field_pointers,
-                        nneighbors, nind, doffs, pinds, pcounts, i,
-                        index_field_pointers)
+            for j in range(pcount[i]):
+                pind0 = pind[doff[i] + j]
+                for k in range(3):
+                    pos[k] = positions[pind0, k]
+                self.neighbor_process_particle(pos, cart_pos, field_pointers,
+                            nneighbors, nind, doffs, pinds, pcounts, pind0,
+                            index_field_pointers)
         #print "VISITED", visited.sum(), visited.size,
         #print 100.0*float(visited.sum())/visited.size
         if nind != NULL:
@@ -435,7 +445,7 @@
     cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
                             np.float64_t cpos[3]):
         cdef NeighborList *cur
-        cdef int i
+        cdef int i, j
         # _c means candidate (what we're evaluating)
         # _o means other (the item in the list)
         cdef np.float64_t r2_c, r2_o
@@ -460,13 +470,14 @@
         # Early terminate
         if r2_c < 0: return
         pn_c = pn
-        for i in range((self.curn - 1), -1, -1):
+        for j in range(1, self.maxn + 1):
+            i = self.maxn - j
             # First we evaluate against i.  If our candidate radius is greater
             # than the one we're inspecting, we quit.
             cur = &self.neighbors[i]
             r2_o = cur.r2
             pn_o = cur.pn
-            if r2_c >= r2_o:
+            if r2_c > r2_o:
                 break
             # Now we know we need to swap them.  First we assign our candidate
             # values to cur.
@@ -551,16 +562,11 @@
         # units supplied.  We can now iterate over every cell in the block and
         # every particle to find the nearest.  We will use a priority heap.
         cdef int i, j, k, ntot, nntot, m, dim[3]
+        i = j = k = 0
+        dim[0] = dim[1] = dim[2] = 1
         cdef np.float64_t opos[3]
         self.pos_setup(cpos, opos)
         self.neighbor_find(nneighbors, nind, doffs, pcounts, pinds, ppos, opos)
-        if self.curn <-1*self.maxn:
-            ntot = nntot = 0
-            for m in range(nneighbors):
-                if nind[m] < 0: continue
-                nntot += 1
-                ntot += pcounts[nind[m]]
-            print "SOMETHING WRONG", self.curn, nneighbors, ntot, nntot
         self.process(offset, i, j, k, dim, opos, fields, index_fields)
 
 cdef class VolumeWeightedSmooth(ParticleSmoothOperation):
@@ -715,16 +721,9 @@
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields):
-        # We have our i, j, k for our cell, as well as the cell position.
-        # We also have a list of neighboring particles with particle numbers.
-        cdef int n, fi
-        cdef np.float64_t weight, r2, val, hsml, dens, mass, coeff, max_r
-        coeff = 0.0
-        cdef np.int64_t pn
-        # We get back our mass 
-        # rho_i = sum(j = 1 .. n) m_j * W_ij
+        cdef np.float64_t max_r
+        # We assume "offset" here is the particle index.
         max_r = sqrt(self.neighbors[self.curn-1].r2)
-        # We assume "offset" here is the particle index.
         fields[0][offset] = max_r
 
 nth_neighbor_smooth = NthNeighborDistanceSmooth

diff -r 9c33fb91dcb00b7c2f7e6661c51318b6b59f1e90 -r de6c15e29d6d313c07ee196d58823d45592157ee yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -215,6 +215,9 @@
             offsets.append(0.0)
     data = {}
     for field, offset, u in zip(fields, offsets, units):
+        if "position" in field:
+            v = np.random.normal(npart, 0.5, 0.25)
+            np.clip(v, 0.0, 1.0, v)
         v = (np.random.random(npart) - offset)
         data[field] = (v, u)
     bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]])

diff -r 9c33fb91dcb00b7c2f7e6661c51318b6b59f1e90 -r de6c15e29d6d313c07ee196d58823d45592157ee yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -576,7 +576,8 @@
             if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
                 continue
             # Now we have our primary oct, so we will get its neighbors.
-            neighbors = octree.neighbors(&oi, &nneighbors, oct)
+            neighbors = octree.neighbors(&oi, &nneighbors, oct,
+                                self.periodicity)
             # Now we have all our neighbors.  And, we should be set for what
             # else we need to do.
             if nneighbors > nsize:


https://bitbucket.org/yt_analysis/yt/commits/91ad5509b199/
Changeset:   91ad5509b199
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-08-11 13:47:08+00:00
Summary:     Adding neighbor search test file.
Affected #:  1 file

diff -r de6c15e29d6d313c07ee196d58823d45592157ee -r 91ad5509b1995d6d1790dc561f227fb8895b1f0c yt/geometry/tests/test_neighbor_search.py
--- /dev/null
+++ b/yt/geometry/tests/test_neighbor_search.py
@@ -0,0 +1,39 @@
+from yt.fields.particle_fields import \
+    add_nearest_neighbor_field
+from yt.testing import *
+
+def test_neighbor_search():
+    np.random.seed(0x4d3d3d3)
+    ds = fake_particle_ds(npart = 16**3)
+    ds.periodicity = (True, True, True)
+    ds.index
+    fn, = add_nearest_neighbor_field("all", "particle_position", ds)
+    dd = ds.all_data()
+    nearest_neighbors = dd[fn]
+    pos = dd["particle_position"]
+    all_neighbors = np.zeros_like(nearest_neighbors)
+    any_eq = np.zeros(pos.shape[0], dtype='bool')
+    min_in = np.zeros(pos.shape[0], dtype='int64')
+    for i in xrange(pos.shape[0]):
+        dd.set_field_parameter("center", pos[i,:])
+        #radius = dd["particle_radius"]
+        #radius.sort()
+        r2 = (pos[:,0]*pos[:,0])*0
+        for j in range(3):
+            DR = (pos[i,j] - pos[:,j])
+            DRo = DR.copy()
+            DR[DRo >  ds.domain_width[j]/2.0] -= ds.domain_width[j]
+            DR[DRo < -ds.domain_width[j]/2.0] += ds.domain_width[j]
+            r2 += DR*DR
+        radius = np.sqrt(r2)
+        iii = np.argsort(radius)
+        radius.sort()
+        assert(radius[0] == 0.0)
+        all_neighbors[i] = radius[63]
+        any_eq[i] = np.any( np.abs(radius - nearest_neighbors[i]) < 1e-7 )
+        min_in[i] = np.argmin(np.abs(radius - nearest_neighbors[i]))
+        #if i == 34: raise RuntimeError
+        #dd.field_data.pop(("all", "particle_radius"))
+    assert_equal((min_in == 63).sum(), min_in.size)
+    yield assert_equal, (min_in == 63).sum(), min_in.size
+    yield assert_array_almost_equal, nearest_neighbors, all_neighbors


https://bitbucket.org/yt_analysis/yt/commits/da69e13605c1/
Changeset:   da69e13605c1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-08-11 14:37:58+00:00
Summary:     Adding docstrings and documentation. Removing unused index_fields args.
Affected #:  3 files

diff -r 91ad5509b1995d6d1790dc561f227fb8895b1f0c -r da69e13605c1e7bdad22e0df0301bb4c2fd07353 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -350,3 +350,35 @@
 ``Temperature`` of the ``Gas`` particle type would be ``("deposit",
 "Gas_smoothed_Temperature")``, which in most cases would be aliased to the
 field ``("gas", "temperature")`` for convenience.
+
+Computing the Nth Nearest Neighbor
+------------------------------
+
+One particularly useful field that can be created is that of the distance to
+the Nth-nearest neighbor.  This field can then be used as input to smoothing
+operations, in the case when a particular particle type does not have an
+associated smoothing length or other length estimate.
+
+yt defines this field as a plugin, and it can be added like so:
+
+.. code-block:: python
+
+   import yt
+   from yt.fields.particle_fields import \
+     add_nearest_neighbor_field
+
+   ds = yt.load("snapshot_033/snap_033.0.hdf5")
+   fn, = add_nearest_neighbor_field("all", "particle_position", ds)
+
+   dd = ds.all_data()
+   print dd[fn]
+
+Note that ``fn`` here is the "field name" that yt adds.  It will be of the form
+``(ptype, nearest_neighbor_NN)`` where ``NN`` is the integer.  By default this
+is 64, but it can be supplied as the final argument to
+``add_nearest_neighbor_field``.  For the example above, it would be
+``nearest_neighbor_64``.
+
+This can then be used as input to the function
+``add_volume_weighted_smoothed_field``, which can enable smoothing particle
+types that would normally not be smoothed.

diff -r 91ad5509b1995d6d1790dc561f227fb8895b1f0c -r da69e13605c1e7bdad22e0df0301bb4c2fd07353 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -141,6 +141,31 @@
         return self._domain_ind
 
     def deposit(self, positions, fields = None, method = None):
+        r"""Operate on the mesh, in a particle-against-mesh fashion, with
+        exclusively local input.
+
+        This uses the octree indexing system to call a "deposition" operation
+        (defined in yt/geometry/particle_deposit.pyx) that can take input from
+        several particles (local to the mesh) and construct some value on the
+        mesh.  The canonical example is to sum the total mass in a mesh cell
+        and then divide by its volume.
+
+        Parameters
+        ----------
+        positions : array_like (Nx3)
+            The positions of all of the particles to be examined.  A new
+            indexed octree will be constructed on these particles.
+        fields : list of arrays
+            All the necessary fields for computing the particle operation.  For
+            instance, this might include mass, velocity, etc.  
+        method : string
+            This is the "method name" which will be looked up in the
+            `particle_deposit` namespace as `methodname_deposit`.
+
+        Returns
+        -------
+        List of fortran-ordered, mesh-like arrays.
+        """
         # Here we perform our particle deposition.
         if fields is None: fields = []
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
@@ -165,6 +190,41 @@
 
     def smooth(self, positions, fields = None, index_fields = None,
                method = None, create_octree = False, nneighbors = 64):
+        r"""Operate on the mesh, in a particle-against-mesh fashion, with
+        non-local input.
+
+        This uses the octree indexing system to call a "smoothing" operation
+        (defined in yt/geometry/particle_smooth.pyx) that can take input from
+        several (non-local) particles and construct some value on the mesh.
+        The canonical example is to conduct a smoothing kernel operation on the
+        mesh.
+
+        Parameters
+        ----------
+        positions : array_like (Nx3)
+            The positions of all of the particles to be examined.  A new
+            indexed octree will be constructed on these particles.
+        fields : list of arrays
+            All the necessary fields for computing the particle operation.  For
+            instance, this might include mass, velocity, etc.  
+        index_fields : list of arrays
+            All of the fields defined on the mesh that may be used as input to
+            the operation.
+        method : string
+            This is the "method name" which will be looked up in the
+            `particle_smooth` namespace as `methodname_smooth`.
+        create_octree : bool
+            Should we construct a new octree for indexing the particles?  In
+            cases where we are applying an operation on a subset of the
+            particles used to construct the mesh octree, this will ensure that
+            we are able to find and identify all relevant particles.
+        nneighbors : int, default 64
+            The number of neighbors to examine during the process.
+
+        Returns
+        -------
+        List of fortran-ordered, mesh-like arrays.
+        """
         # Here we perform our particle deposition.
         positions.convert_to_units("code_length")
         if create_octree:
@@ -177,7 +237,7 @@
                 self.ds.domain_left_edge,
                 self.ds.domain_right_edge,
                 over_refine = self._oref)
-            # This should ensure we get everything within on neighbor of home.
+            # This should ensure we get everything within one neighbor of home.
             particle_octree.n_ref = nneighbors * 2
             particle_octree.add(morton)
             particle_octree.finalize()
@@ -210,7 +270,39 @@
         return vals
 
     def particle_operation(self, positions, fields = None,
-            index_fields = None, method = None, nneighbors = 64):
+            method = None, nneighbors = 64):
+        r"""Operate on particles, in a particle-against-particle fashion.
+
+        This uses the octree indexing system to call a "smoothing" operation
+        (defined in yt/geometry/particle_smooth.pyx) that expects to be called
+        in a particle-by-particle fashion.  For instance, the canonical example
+        of this would be to compute the Nth nearest neighbor, or to compute the
+        density for a given particle based on some kernel operation.
+
+        Many of the arguments to this are identical to those used in the smooth
+        and deposit functions.  Note that the `fields` argument must not be
+        empty, as these fields will be modified in place.
+
+        Parameters
+        ----------
+        positions : array_like (Nx3)
+            The positions of all of the particles to be examined.  A new
+            indexed octree will be constructed on these particles.
+        fields : list of arrays
+            All the necessary fields for computing the particle operation.  For
+            instance, this might include mass, velocity, etc.  One of these
+            will likely be modified in place.
+        method : string
+            This is the "method name" which will be looked up in the
+            `particle_smooth` namespace as `methodname_smooth`.
+        nneighbors : int, default 64
+            The number of neighbors to examine during the process.
+
+        Returns
+        -------
+        Nothing.
+
+        """
         # Here we perform our particle deposition.
         positions.convert_to_units("code_length")
         morton = compute_morton(
@@ -227,7 +319,6 @@
         particle_octree.finalize()
         pdom_ind = particle_octree.domain_ind(self.selector)
         if fields is None: fields = []
-        if index_fields is None: index_fields = []
         cls = getattr(particle_smooth, "%s_smooth" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
@@ -240,7 +331,7 @@
             positions.shape[0], nvals[-1])
         op.process_particles(particle_octree, pdom_ind, positions, 
             fields, self.domain_id, self._domain_offset, self.ds.periodicity,
-            index_fields, self.ds.geometry)
+            self.ds.geometry)
         vals = op.finalize()
         if vals is None: return
         if isinstance(vals, list):

diff -r 91ad5509b1995d6d1790dc561f227fb8895b1f0c -r da69e13605c1e7bdad22e0df0301bb4c2fd07353 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -284,7 +284,6 @@
                      fields = None, int domain_id = -1,
                      int domain_offset = 0,
                      periodicity = (True, True, True),
-                     index_fields = None,
                      geometry = "cartesian"):
         cdef int nf, i, j, k, dims[3], n
         cdef np.float64_t **field_pointers, *field_vals, pos[3], *ppos, dds[3]
@@ -298,7 +297,6 @@
         cdef np.int64_t *doffs, *pinds, *pcounts, poff
         cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
         cdef np.ndarray[np.float64_t, ndim=1] tarr
-        cdef np.ndarray[np.float64_t, ndim=4] iarr
         cdef np.ndarray[np.float64_t, ndim=2] cart_positions
         if geometry == "cartesian":
             self.pos_setup = cart_coord_setup
@@ -330,13 +328,6 @@
         for i in range(nf):
             tarr = fields[i]
             field_pointers[i] = <np.float64_t *> tarr.data
-        if index_fields is None:
-            index_fields = []
-        nf = len(index_fields)
-        index_field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
-        for i in range(nf):
-            iarr = index_fields[i]
-            index_field_pointers[i] = <np.float64_t *> iarr.data
         for i in range(3):
             self.DW[i] = (particle_octree.DRE[i] - particle_octree.DLE[i])
             self.periodicity[i] = periodicity[i]
@@ -417,7 +408,7 @@
                     pos[k] = positions[pind0, k]
                 self.neighbor_process_particle(pos, cart_pos, field_pointers,
                             nneighbors, nind, doffs, pinds, pcounts, pind0,
-                            index_field_pointers)
+                            NULL)
         #print "VISITED", visited.sum(), visited.size,
         #print 100.0*float(visited.sum())/visited.size
         if nind != NULL:


https://bitbucket.org/yt_analysis/yt/commits/a4e019a10dc6/
Changeset:   a4e019a10dc6
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-08-11 15:34:40+00:00
Summary:     Removing duplicate assert
Affected #:  1 file

diff -r da69e13605c1e7bdad22e0df0301bb4c2fd07353 -r a4e019a10dc69d7fa438c68b041c379c1402aa63 yt/geometry/tests/test_neighbor_search.py
--- a/yt/geometry/tests/test_neighbor_search.py
+++ b/yt/geometry/tests/test_neighbor_search.py
@@ -34,6 +34,5 @@
         min_in[i] = np.argmin(np.abs(radius - nearest_neighbors[i]))
         #if i == 34: raise RuntimeError
         #dd.field_data.pop(("all", "particle_radius"))
-    assert_equal((min_in == 63).sum(), min_in.size)
     yield assert_equal, (min_in == 63).sum(), min_in.size
     yield assert_array_almost_equal, nearest_neighbors, all_neighbors


https://bitbucket.org/yt_analysis/yt/commits/1d017e9724f8/
Changeset:   1d017e9724f8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-08-11 15:42:27+00:00
Summary:     First attempt at a density smooth
Affected #:  2 files

diff -r a4e019a10dc69d7fa438c68b041c379c1402aa63 -r 1d017e9724f8794d2db0b805d620096486aed802 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -583,3 +583,24 @@
                        units = "code_length")
     return [field_name]
 
+def add_density_kernel(ptype, coord_name, mass_name, registry, nneighbors = 64):
+    field_name = (ptype, "smoothed_density")
+    field_units = registry[ptype, mass_name].units
+    def _nth_neighbor(field, data):
+        pos = data[ptype, coord_name].in_units("code_length")
+        mass = data[ptype, mass_name].in_units(field_units)
+        densities = mass * 0.0
+        data.particle_operation(pos, [mass, densities],
+                         method="s",
+                         nneighbors = nneighbors)
+        ones = pos.prod(axis=1) # Get us in code_length**3
+        ones[:] = 1.0
+        densities /= ones
+        # Now some quick unit conversions.
+        return densities
+    registry.add_field(field_name, function = _nth_neighbor,
+                       validators = [ValidateSpatial(0)],
+                       particle_type = True,
+                       units = "code_length")
+    return [field_name]
+

diff -r a4e019a10dc69d7fa438c68b041c379c1402aa63 -r 1d017e9724f8794d2db0b805d620096486aed802 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -718,3 +718,29 @@
         fields[0][offset] = max_r
 
 nth_neighbor_smooth = NthNeighborDistanceSmooth
+
+cdef class SmoothedDensityEstimate(ParticleSmoothOperation):
+    def initialize(self):
+        return
+
+    def finalize(self):
+        return
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef void process(self, np.int64_t offset, int i, int j, int k,
+                      int dim[3], np.float64_t cpos[3], np.float64_t **fields,
+                      np.float64_t **index_fields):
+        cdef np.float64_t r2, hsml, dens, mass
+        cdef int pn
+        # We assume "offset" here is the particle index.
+        hsml = sqrt(self.neighbors[self.curn-1].r2)
+        dens = 0.0
+        for pn in range(self.curn):
+            mass = fields[0][self.neighbors[pn].pn]
+            r2 = self.neighbors[pn].r2
+            dens += mass * sph_kernel(sqrt(r2) / hsml)
+        fields[1][offset] = dens
+
+density_smooth = SmoothedDensityEstimate


https://bitbucket.org/yt_analysis/yt/commits/279a73e539de/
Changeset:   279a73e539de
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-08-11 15:42:36+00:00
Summary:     Merging from gadget fix
Affected #:  19 files

diff -r 1d017e9724f8794d2db0b805d620096486aed802 -r 279a73e539de50e97d35c67a67103e466531839b doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -16,7 +16,7 @@
 
 DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
-BRANCH="yt-3.0" # This is the branch to which we will forcibly update.
+BRANCH="yt" # This is the branch to which we will forcibly update.
 
 if [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
 then

diff -r 1d017e9724f8794d2db0b805d620096486aed802 -r 279a73e539de50e97d35c67a67103e466531839b doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -20,7 +20,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | ARTIO                 |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
-| Athena                |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     N      |   Full   |
+| Athena                |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Castro                |     Y      |     Y     |   Partial  |   Y   |    Y     |    Y     |     N      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 

diff -r 1d017e9724f8794d2db0b805d620096486aed802 -r 279a73e539de50e97d35c67a67103e466531839b doc/source/yt3differences.rst
--- a/doc/source/yt3differences.rst
+++ b/doc/source/yt3differences.rst
@@ -62,7 +62,7 @@
   units.  The full field name will be will be ``(code_name, field_name)``. See
   :ref:`fields`.
 * Particle fields on-disk will also be in code units, and will be named
-  ``(particle_type, FieldName)``.  If there is only one particle type in the
+  ``(particle_type, field_name)``.  If there is only one particle type in the
   output file, all particles will use ``io`` as the particle type. See 
   :ref:`fields`.
 * The objects we used to refer to as "parameter files" we now refer to as a
@@ -160,7 +160,7 @@
 It's now possible to import all yt functionality using ``import yt``. Rather
 than using ``from yt.mods import *``, we suggest using ``import yt`` in new
 scripts.  Most commonly used yt functionality is attached to the ``yt`` module.
-Load a dataset with ``yt.load()``, create a phase plot using ``yt.PhasePlot,
+Load a dataset with ``yt.load()``, create a phase plot using ``yt.PhasePlot``,
 and much more, see :ref:`the api docs api-reference` to learn more about what's
 in the ``yt`` namespace, or just use tab completion in IPython: ``yt.<tab>``.
 
@@ -224,7 +224,7 @@
 The hierarchy object (``pf.h``) is now referred to as an index (``ds.index``).
 It is no longer necessary to directly refer to the ``index`` as often, since
 data objects are now attached to the to the ``dataset`` object.  Before, you
-would say ``ph.f.sphere()``, now you can say ``ds.sphere()``.
+would say ``pf.h.sphere()``, now you can say ``ds.sphere()``.
 
 New derived quantities interface
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -278,7 +278,7 @@
 Nearly all internal objects have been renamed.  Typically this means either
 removing ``AMR`` from the prefix or replacing it with ``YT``.  All names of
 objects remain the same for the purposes of selecting data and creating them;
-i.e., ``sphere`` objects are still called ``sphere`` - you can access create one
+i.e., ``sphere`` objects are still called ``sphere`` - you can access or create one
 via ``ds.sphere``.  For a detailed description and index see 
 :ref:`available-objects`.
 

diff -r 1d017e9724f8794d2db0b805d620096486aed802 -r 279a73e539de50e97d35c67a67103e466531839b setup.py
--- a/setup.py
+++ b/setup.py
@@ -118,7 +118,7 @@
 # End snippet
 ######
 
-VERSION = "3.0"
+VERSION = "3.1dev"
 
 if os.path.exists('MANIFEST'):
     os.remove('MANIFEST')

diff -r 1d017e9724f8794d2db0b805d620096486aed802 -r 279a73e539de50e97d35c67a67103e466531839b yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -72,7 +72,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-__version__ = "3.0-dev"
+__version__ = "3.1-dev"
 
 # First module imports
 import numpy as np # For modern purposes

diff -r 1d017e9724f8794d2db0b805d620096486aed802 -r 279a73e539de50e97d35c67a67103e466531839b yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -225,13 +225,13 @@
 
     # accumulate, if necessary
     if accumulation:
-        used = my_profile.used        
+        used = my_profile.used
         for field in my_profile.field_data:
             if weight_field is None:
                 my_profile.field_data[field][used] = \
                     np.cumsum(my_profile.field_data[field][used])
             else:
-                my_weight = my_profile.weight[:, 0]
+                my_weight = my_profile.weight
                 my_profile.field_data[field][used] = \
                   np.cumsum(my_profile.field_data[field][used] * my_weight[used]) / \
                   np.cumsum(my_weight[used])

diff -r 1d017e9724f8794d2db0b805d620096486aed802 -r 279a73e539de50e97d35c67a67103e466531839b yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -27,7 +27,7 @@
 from yt.extern.six import add_metaclass
 
 from yt.config import ytcfg
-from yt.funcs import mylog
+from yt.funcs import mylog, ensure_dir_exists
 from yt.utilities.performance_counters import \
     time_function, \
     yt_counters

diff -r 1d017e9724f8794d2db0b805d620096486aed802 -r 279a73e539de50e97d35c67a67103e466531839b yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -233,7 +233,6 @@
             fi += 1
         pi += npart
     num_p[0] = local_parts
-    del ds._instantiated_hierarchy
     del ds
 
 cdef class RockstarInterface:

diff -r 1d017e9724f8794d2db0b805d620096486aed802 -r 279a73e539de50e97d35c67a67103e466531839b yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -336,8 +336,8 @@
                                   registry=self.ds.unit_registry)
             if self.weight_field is None and not self._sum_only:
                 u_obj = Unit(units, registry=self.ds.unit_registry)
-                if (u_obj.is_code_unit and not u_obj.is_dimensionless) and \
-                  input_units != units or self.ds.no_cgs_equiv_length:
+                if ((u_obj.is_code_unit or self.ds.no_cgs_equiv_length) and
+                    not u_obj.is_dimensionless) and input_units != units:
                     final_unit = "(%s) * code_length" % units
                     self[field].convert_to_units(final_unit)
         for i in data.keys(): self[i] = data.pop(i)

diff -r 1d017e9724f8794d2db0b805d620096486aed802 -r 279a73e539de50e97d35c67a67103e466531839b yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -107,7 +107,7 @@
         self.directory = os.path.dirname(self.dataset.filename)
         self.dataset_type = dataset_type
         # for now, the index file is the dataset!
-        self.index_filename = self.dataset.filename
+        self.index_filename = os.path.join(os.getcwd(), self.dataset.filename)
         #self.directory = os.path.dirname(self.index_filename)
         self._fhandle = file(self.index_filename,'rb')
         GridIndex.__init__(self, ds, dataset_type)
@@ -366,7 +366,7 @@
         # Unfortunately we now have to mandate that the index gets 
         # instantiated so that we can make sure we have the correct left 
         # and right domain edges.
-        self.h
+        self.index
 
     def _set_code_unit_attributes(self):
         """
@@ -458,14 +458,13 @@
             self.hubble_constant = self.cosmological_simulation = 0.0
         self.parameters['Time'] = self.current_time # Hardcode time conversion for now.
         self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
-        if self.specified_parameters.has_key("gamma") :
+        if self.specified_parameters.has_key("gamma"):
             self.parameters["Gamma"] = self.specified_parameters["gamma"]
-        else :
+        else:
             self.parameters["Gamma"] = 5./3. 
         self.geometry = self.specified_parameters.get("geometry", "cartesian")
         self._handle.close()
 
-
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:

diff -r 1d017e9724f8794d2db0b805d620096486aed802 -r 279a73e539de50e97d35c67a67103e466531839b yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -25,6 +25,11 @@
 erg_units = "code_mass * (code_length/code_time)**2"
 rho_units = "code_mass / code_length**3"
 
+def velocity_field(comp):
+    def _velocity(field, data):
+        return data["athena", "momentum_%s" % comp]/data["athena","density"]
+    return _velocity
+
 class AthenaFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("density", ("code_mass/code_length**3", ["density"], None)),
@@ -41,19 +46,17 @@
     def setup_fluid_fields(self):
         # Add velocity fields
         for comp in "xyz":
-            vel_field = ("athena", "velocity_%s" % (comp))
-            mom_field = ("athena", "momentum_%s" % (comp))
+            vel_field = ("athena", "velocity_%s" % comp)
+            mom_field = ("athena", "momentum_%s" % comp)
             if vel_field in self.field_list:
                 self.add_output_field(vel_field, units="code_length/code_time")
-                self.alias(("gas","velocity_%s" % (comp)), vel_field,
+                self.alias(("gas","velocity_%s" % comp), vel_field,
                            units="cm/s")
             elif mom_field in self.field_list:
                 self.add_output_field(mom_field,
-                                      units="code_mass*code_length/code_time")
-                f = lambda data: data["athena","momentum_%s" % (comp)] / \
-                                 data["athena","density"]
-                self.add_field(("gas","velocity_%s" % (comp)),
-                               function=f, units = "cm/s")
+                                      units="code_mass/code_time/code_length**2")
+                self.add_field(("gas","velocity_%s" % comp),
+                               function=velocity_field(comp), units = "cm/s")
         # Add pressure, energy, and temperature fields
         def ekin1(data):
             return 0.5*(data["athena","momentum_x"]**2 +
@@ -96,6 +99,8 @@
                            function=_total_energy,
                            units="erg/g")
         elif ("athena","total_energy") in self.field_list:
+            self.add_output_field(("athena","total_energy"),
+                                  units=pres_units)
             def _pressure(field, data):
                 return eint_from_etot(data)*(data.ds.gamma-1.0)
             self.add_field(("gas","pressure"), function=_pressure,

diff -r 1d017e9724f8794d2db0b805d620096486aed802 -r 279a73e539de50e97d35c67a67103e466531839b yt/frontends/athena/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -0,0 +1,59 @@
+"""
+Athena frontend tests
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load
+from yt.frontends.athena.api import AthenaDataset
+
+_fields_cloud = ("scalar[0]", "density", "total_energy")
+
+cloud = "ShockCloud/id0/Cloud.0050.vtk"
+ at requires_ds(cloud)
+def test_cloud():
+    ds = data_dir_load(cloud)
+    yield assert_equal, str(ds), "Cloud.0050"
+    for test in small_patch_amr(cloud, _fields_cloud):
+        test_cloud.__name__ = test.description
+        yield test
+
+_fields_blast = ("temperature", "density", "velocity_magnitude")
+
+blast = "MHDBlast/id0/Blast.0100.vtk"
+ at requires_ds(blast)
+def test_blast():
+    ds = data_dir_load(blast)
+    yield assert_equal, str(ds), "Blast.0100"
+    for test in small_patch_amr(blast, _fields_blast):
+        test_blast.__name__ = test.description
+        yield test
+
+parameters_stripping = {"time_unit":3.086e14,
+                        "length_unit":8.0236e22,
+                        "mass_unit":9.999e-30*8.0236e22**3}
+
+_fields_stripping = ("temperature", "density", "specific_scalar[0]")
+
+stripping = "RamPressureStripping/id0/rps.0062.vtk"
+ at requires_ds(stripping, big_data=True)
+def test_stripping():
+    ds = data_dir_load(stripping, kwargs={"parameters":parameters_stripping})
+    yield assert_equal, str(ds), "rps.0062"
+    for test in small_patch_amr(stripping, _fields_stripping):
+        test_stripping.__name__ = test.description
+        yield test

diff -r 1d017e9724f8794d2db0b805d620096486aed802 -r 279a73e539de50e97d35c67a67103e466531839b yt/frontends/halo_catalogs/rockstar/definitions.py
--- a/yt/frontends/halo_catalogs/rockstar/definitions.py
+++ b/yt/frontends/halo_catalogs/rockstar/definitions.py
@@ -35,17 +35,25 @@
     ("unused", BINARY_HEADER_SIZE - 4*12 - 4 - 8*6 - 12, "c")
 )
 
-halo_dt = np.dtype([
+# Note the final field here, which is a field for min/max format revision in
+# which the field appears.
+
+KNOWN_REVISIONS=[0, 1]
+
+halo_dt = [
     ('particle_identifier', np.int64),
     ('particle_position_x', np.float32),
     ('particle_position_y', np.float32),
     ('particle_position_z', np.float32),
+    ('particle_mposition_x', np.float32, (0, 0)),
+    ('particle_mposition_y', np.float32, (0, 0)),
+    ('particle_mposition_z', np.float32, (0, 0)),
     ('particle_velocity_x', np.float32),
     ('particle_velocity_y', np.float32),
     ('particle_velocity_z', np.float32),
-    ('particle_corevel_x', np.float32),
-    ('particle_corevel_y', np.float32),
-    ('particle_corevel_z', np.float32),
+    ('particle_corevel_x', np.float32, (1, 100)),
+    ('particle_corevel_y', np.float32, (1, 100)),
+    ('particle_corevel_z', np.float32, (1, 100)),
     ('particle_bulkvel_x', np.float32),
     ('particle_bulkvel_y', np.float32),
     ('particle_bulkvel_z', np.float32),
@@ -75,15 +83,15 @@
     ('Ax', np.float32),
     ('Ay', np.float32),
     ('Az', np.float32),
-    ('b_to_a2', np.float32),
-    ('c_to_a2', np.float32),
-    ('A2x', np.float32),
-    ('A2y', np.float32),
-    ('A2z', np.float32),
+    ('b_to_a2', np.float32, (1, 100)),
+    ('c_to_a2', np.float32, (1, 100)),
+    ('A2x', np.float32, (1, 100)),
+    ('A2y', np.float32, (1, 100)),
+    ('A2z', np.float32, (1, 100)),
     ('bullock_spin', np.float32),
     ('kin_to_pot', np.float32),
-    ('m_pe_b', np.float32),
-    ('m_pe_d', np.float32),
+    ('m_pe_b', np.float32, (1, 100)),
+    ('m_pe_d', np.float32, (1, 100)),
     ('num_p', np.int64),
     ('num_child_particles', np.int64),
     ('p_start', np.int64),
@@ -93,7 +101,20 @@
     ('min_pos_err', np.float32),
     ('min_vel_err', np.float32),
     ('min_bulkvel_err', np.float32),
-], align=True)
+]
+
+halo_dts = {}
+
+for rev in KNOWN_REVISIONS:
+    halo_dts[rev] = []
+    for item in halo_dt:
+        if len(item) == 2:
+            halo_dts[rev].append(item)
+        else:
+            mi, ma = item[2]
+            if (mi <= rev) and (rev <= ma):
+                halo_dts[rev].append(item[:2])
+    halo_dts[rev] = np.dtype(halo_dts[rev], align=True)
 
 particle_dt = np.dtype([
     ('particle_identifier', np.int64),

diff -r 1d017e9724f8794d2db0b805d620096486aed802 -r 279a73e539de50e97d35c67a67103e466531839b yt/frontends/halo_catalogs/rockstar/io.py
--- a/yt/frontends/halo_catalogs/rockstar/io.py
+++ b/yt/frontends/halo_catalogs/rockstar/io.py
@@ -24,7 +24,7 @@
     BaseIOHandler
 
 import yt.utilities.fortran_utils as fpu
-from .definitions import halo_dt
+from .definitions import halo_dts
 from yt.utilities.lib.geometry_utils import compute_morton
 
 from yt.geometry.oct_container import _ORDER_MAX
@@ -32,6 +32,10 @@
 class IOHandlerRockstarBinary(BaseIOHandler):
     _dataset_type = "rockstar_binary"
 
+    def __init__(self, *args, **kwargs):
+        super(IOHandlerRockstarBinary, self).__init__(*args, **kwargs)
+        self._halo_dt = halo_dts[self.ds.parameters['format_revision']]
+
     def _read_fluid_selection(self, chunks, selector, fields, size):
         raise NotImplementedError
 
@@ -45,11 +49,12 @@
         for chunk in chunks:
             for obj in chunk.objs:
                 data_files.update(obj.data_files)
+        
         for data_file in sorted(data_files):
             pcount = data_file.header['num_halos']
             with open(data_file.filename, "rb") as f:
                 f.seek(data_file._position_offset, os.SEEK_SET)
-                halos = np.fromfile(f, dtype=halo_dt, count = pcount)
+                halos = np.fromfile(f, dtype=self._halo_dt, count = pcount)
                 x = halos['particle_position_x'].astype("float64")
                 y = halos['particle_position_y'].astype("float64")
                 z = halos['particle_position_z'].astype("float64")
@@ -70,7 +75,7 @@
             with open(data_file.filename, "rb") as f:
                 for ptype, field_list in sorted(ptf.items()):
                     f.seek(data_file._position_offset, os.SEEK_SET)
-                    halos = np.fromfile(f, dtype=halo_dt, count = pcount)
+                    halos = np.fromfile(f, dtype=self._halo_dt, count = pcount)
                     x = halos['particle_position_x'].astype("float64")
                     y = halos['particle_position_y'].astype("float64")
                     z = halos['particle_position_z'].astype("float64")
@@ -89,7 +94,7 @@
         ind = 0
         with open(data_file.filename, "rb") as f:
             f.seek(data_file._position_offset, os.SEEK_SET)
-            halos = np.fromfile(f, dtype=halo_dt, count = pcount)
+            halos = np.fromfile(f, dtype=self._halo_dt, count = pcount)
             pos = np.empty((halos.size, 3), dtype="float64")
             # These positions are in Mpc, *not* "code" units
             pos = data_file.ds.arr(pos, "code_length")
@@ -121,6 +126,6 @@
         return {'halos': data_file.header['num_halos']}
 
     def _identify_fields(self, data_file):
-        fields = [("halos", f) for f in halo_dt.fields if
+        fields = [("halos", f) for f in self._halo_dt.fields if
                   "padding" not in f]
         return fields, {}

diff -r 1d017e9724f8794d2db0b805d620096486aed802 -r 279a73e539de50e97d35c67a67103e466531839b yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -344,21 +344,26 @@
                         for ptype in self._ptypes):
                 continue
             pos += 4
+            any_ptypes = False
             for ptype in self._ptypes:
                 if field == "Mass" and ptype not in self.var_mass:
                     continue
                 if (ptype, field) not in field_list:
                     continue
                 offsets[(ptype, field)] = pos
+                any_ptypes = True
                 if field in self._vector_fields:
                     pos += 3 * pcount[ptype] * fs
                 else:
                     pos += pcount[ptype] * fs
             pos += 4
+            if not any_ptypes: pos -= 8
         if file_size is not None:
             if file_size != pos:
                 mylog.warning("Your Gadget-2 file may have extra " +
-                              "columns or different precision!")
+                              "columns or different precision!" +
+                              " (%s file vs %s computed)",
+                              file_size, pos)
         return offsets
 
     def _identify_fields(self, domain):

diff -r 1d017e9724f8794d2db0b805d620096486aed802 -r 279a73e539de50e97d35c67a67103e466531839b yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -758,7 +758,7 @@
 
 def big_patch_amr(ds_fn, fields, input_center="max", input_weight="density"):
     if not can_run_ds(ds_fn): return
-    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    dso = [ None, ("sphere", (input_center, (0.1, 'unitary')))]
     yield GridHierarchyTest(ds_fn)
     yield ParentageRelationshipsTest(ds_fn)
     for field in fields:

diff -r 1d017e9724f8794d2db0b805d620096486aed802 -r 279a73e539de50e97d35c67a67103e466531839b yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -252,7 +252,7 @@
 
 axis_wcs = [[1,2],[0,2],[0,1]]
 
-def construct_image(data_source):
+def construct_image(data_source, center=None):
     ds = data_source.ds
     axis = data_source.axis
     if hasattr(ds, "wcs"):
@@ -266,11 +266,14 @@
     else:
         # This is some other kind of dataset
         unit = ds.get_smallest_appropriate_unit(ds.domain_width.max())
+        if center is None:
+            crval = [0.0,0.0]
+        else:
+            crval = [(ds.domain_center-center)[idx].in_units(unit) for idx in axis_wcs[axis]]
         dx = ds.index.get_smallest_dx()
         nx, ny = (ds.domain_width[axis_wcs[axis]]/dx).ndarray_view().astype("int")
         crpix = [0.5*(nx+1), 0.5*(ny+1)]
         cdelt = [dx.in_units(unit)]*2
-        crval = [ds.domain_center[idx].in_units(unit) for idx in axis_wcs[axis]]
         cunit = [unit]*2
         ctype = ["LINEAR"]*2
     frb = data_source.to_frb((1.0,"unitary"), (nx,ny))
@@ -295,7 +298,7 @@
     fields : string or list of strings
         The fields to slice
     center : A sequence floats, a string, or a tuple.
-         The coordinate of the center of the image. If set to 'c', 'center' or
+         The coordinate of the origin of the image. If set to 'c', 'center' or
          left blank, the plot is centered on the middle of the domain. If set to
          'max' or 'm', the center will be located at the maximum of the
          ('gas', 'density') field. Units can be specified by passing in center
@@ -308,7 +311,7 @@
         axis = fix_axis(axis, ds)
         center = get_sanitized_center(center, ds)
         slc = ds.slice(axis, center[axis], **kwargs)
-        w, frb = construct_image(slc)
+        w, frb = construct_image(slc, center=center)
         super(FITSSlice, self).__init__(frb, fields=fields, wcs=w)
         for i, field in enumerate(fields):
             self[i].header["bunit"] = str(frb[field].units)
@@ -327,12 +330,21 @@
         The fields to project
     weight_field : string
         The field used to weight the projection.
+    center : A sequence floats, a string, or a tuple.
+        The coordinate of the origin of the image. If set to 'c', 'center' or
+        left blank, the plot is centered on the middle of the domain. If set to
+        'max' or 'm', the center will be located at the maximum of the
+        ('gas', 'density') field. Units can be specified by passing in center
+        as a tuple containing a coordinate and string unit name or by passing
+        in a YTArray.  If a list or unitless array is supplied, code units are
+        assumed.
     """
-    def __init__(self, ds, axis, fields, weight_field=None, **kwargs):
+    def __init__(self, ds, axis, fields, center="c", weight_field=None, **kwargs):
         fields = ensure_list(fields)
         axis = fix_axis(axis, ds)
+        center = get_sanitized_center(center, ds)
         prj = ds.proj(fields[0], axis, weight_field=weight_field, **kwargs)
-        w, frb = construct_image(prj)
+        w, frb = construct_image(prj, center=center)
         super(FITSProjection, self).__init__(frb, fields=fields, wcs=w)
         for i, field in enumerate(fields):
             self[i].header["bunit"] = str(frb[field].units)

diff -r 1d017e9724f8794d2db0b805d620096486aed802 -r 279a73e539de50e97d35c67a67103e466531839b yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -280,6 +280,7 @@
         name = old_object._type_name
         kwargs = dict((n, getattr(old_object, n))
                       for n in old_object._con_args)
+        kwargs['center'] = getattr(old_object, 'center', None)
         if data_source is not None:
             if name != "proj":
                 raise RuntimeError("The data_source keyword argument "
@@ -289,6 +290,12 @@
         self.ds = new_ds
         self.data_source = new_object
         self._data_valid = self._plot_valid = False
+        for d in 'xyz':
+            lim_name = d+'lim'
+            if hasattr(self, lim_name):
+                lim = getattr(self, lim_name)
+                lim = tuple(new_ds.quan(l.value, str(l.units)) for l in lim)
+                setattr(self, lim_name, lim)
         self._recreate_frb()
         self._setup_plots()
 


https://bitbucket.org/yt_analysis/yt/commits/4e731feebccd/
Changeset:   4e731feebccd
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-08-11 17:15:51+00:00
Summary:     A few modifications, adding smoothed density estimate.
Affected #:  2 files

diff -r 279a73e539de50e97d35c67a67103e466531839b -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -588,10 +588,10 @@
     field_units = registry[ptype, mass_name].units
     def _nth_neighbor(field, data):
         pos = data[ptype, coord_name].in_units("code_length")
-        mass = data[ptype, mass_name].in_units(field_units)
+        mass = data[ptype, mass_name].in_units("g")
         densities = mass * 0.0
         data.particle_operation(pos, [mass, densities],
-                         method="s",
+                         method="density",
                          nneighbors = nneighbors)
         ones = pos.prod(axis=1) # Get us in code_length**3
         ones[:] = 1.0
@@ -601,6 +601,6 @@
     registry.add_field(field_name, function = _nth_neighbor,
                        validators = [ValidateSpatial(0)],
                        particle_type = True,
-                       units = "code_length")
+                       units = "g/cm**3")
     return [field_name]
 

diff -r 279a73e539de50e97d35c67a67103e466531839b -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -732,15 +732,18 @@
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
                       np.float64_t **index_fields):
-        cdef np.float64_t r2, hsml, dens, mass
+        cdef np.float64_t r2, hsml, dens, mass, weight, lw
         cdef int pn
         # We assume "offset" here is the particle index.
         hsml = sqrt(self.neighbors[self.curn-1].r2)
         dens = 0.0
+        weight = 0.0
         for pn in range(self.curn):
             mass = fields[0][self.neighbors[pn].pn]
             r2 = self.neighbors[pn].r2
-            dens += mass * sph_kernel(sqrt(r2) / hsml)
-        fields[1][offset] = dens
+            lw = sph_kernel(sqrt(r2) / hsml)
+            dens += mass * lw
+        weight = (4.0/3.0) * 3.1415926 * hsml**3
+        fields[1][offset] = dens/weight
 
 density_smooth = SmoothedDensityEstimate


https://bitbucket.org/yt_analysis/yt/commits/398b2ee2dac8/
Changeset:   398b2ee2dac8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-10-22 23:56:00+00:00
Summary:     Merging, with a fair amount of refactoring.
Affected #:  230 files

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/helper_scripts/show_fields.py
--- a/doc/helper_scripts/show_fields.py
+++ b/doc/helper_scripts/show_fields.py
@@ -186,9 +186,20 @@
     this_f = getattr(frontends_module, frontend)
     field_info_names = [fi for fi in dir(this_f) if "FieldInfo" in fi]
     dataset_names = [dset for dset in dir(this_f) if "Dataset" in dset]
+
     if frontend == "sph":
         field_info_names = \
           ['TipsyFieldInfo' if 'Tipsy' in d else 'SPHFieldInfo' for d in dataset_names]
+    elif frontend == "boxlib":
+        field_info_names = []
+        for d in dataset_names:
+            if "Maestro" in d:  
+                field_info_names.append("MaestroFieldInfo")
+            elif "Castro" in d: 
+                field_info_names.append("CastroFieldInfo")
+            else: 
+                field_info_names.append("BoxlibFieldInfo")
+
     for dset_name, fi_name in zip(dataset_names, field_info_names):
         fi = getattr(this_f, fi_name)
         nfields = 0

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -500,13 +500,28 @@
     fi
     [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
     touch $LIB/extracted
+    BUILD_ARGS=""
+    case $LIB in
+        *h5py*)
+            BUILD_ARGS="--hdf5=${HDF5_DIR}"
+            ;;
+        *numpy*)
+            if [ -e ${DEST_DIR}/lib/python2.7/site-packages/numpy/__init__.py ]
+            then
+                VER=$(${DEST_DIR}/bin/python -c 'from distutils.version import StrictVersion as SV; \
+                                                 import numpy; print SV(numpy.__version__) < SV("1.8.0")')
+                if [ $VER == "True" ]
+                then
+                    echo "Removing previous NumPy instance (see issue #889)"
+                    rm -rf ${DEST_DIR}/lib/python2.7/site-packages/{numpy*,*.pth}
+                fi
+            fi
+            ;;
+        *)
+            ;;
+    esac
     cd $LIB
-    if [ ! -z `echo $LIB | grep h5py` ]
-    then
-	( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-    else
-        ( ${DEST_DIR}/bin/python2.7 setup.py build   $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-    fi
+    ( ${DEST_DIR}/bin/python2.7 setup.py build ${BUILD_ARGS} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( ${DEST_DIR}/bin/python2.7 setup.py install    2>&1 ) 1>> ${LOG_FILE} || do_exit
     touch done
     cd ..
@@ -580,56 +595,54 @@
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
-CYTHON='Cython-0.19.1'
-FORTHON='Forthon-0.8.11'
+CYTHON='Cython-0.20.2'
 PYX='PyX-0.12.1'
-PYTHON='Python-2.7.6'
+PYTHON='Python-2.7.8'
 BZLIB='bzip2-1.0.6'
 FREETYPE_VER='freetype-2.4.12'
-H5PY='h5py-2.1.3'
+H5PY='h5py-2.3.1'
 HDF5='hdf5-1.8.11'
-IPYTHON='ipython-2.1.0'
+IPYTHON='ipython-2.2.0'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
-MATPLOTLIB='matplotlib-1.3.0'
-MERCURIAL='mercurial-3.0'
-NOSE='nose-1.3.0'
-NUMPY='numpy-1.7.1'
+MATPLOTLIB='matplotlib-1.4.0'
+MERCURIAL='mercurial-3.1'
+NOSE='nose-1.3.4'
+NUMPY='numpy-1.8.2'
 PYTHON_HGLIB='python-hglib-1.0'
-PYZMQ='pyzmq-13.1.0'
+PYZMQ='pyzmq-14.3.1'
 ROCKSTAR='rockstar-0.99.6'
-SCIPY='scipy-0.12.0'
+SCIPY='scipy-0.14.0'
 SQLITE='sqlite-autoconf-3071700'
-SYMPY='sympy-0.7.3'
-TORNADO='tornado-3.1'
-ZEROMQ='zeromq-3.2.4'
+SYMPY='sympy-0.7.5'
+TORNADO='tornado-4.0.1'
+ZEROMQ='zeromq-4.0.4'
 ZLIB='zlib-1.2.8'
 
 # Now we dump all our SHA512 files out.
-echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0  Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
-echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
+echo '118e3ebd76f50bda8187b76654e65caab2c2c403df9b89da525c2c963dedc7b38d898ae0b92d44b278731d969a891eb3f7b5bcc138cfe3e037f175d4c87c29ec  Cython-0.20.2.tar.gz' > Cython-0.20.2.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '3df0ba4b1cfef5f02fb27925de4c2ca414eca9000af6a3d475d39063720afe987287c3d51377e0a36b88015573ef699f700782e1749c7a357b8390971d858a79  Python-2.7.6.tgz' > Python-2.7.6.tgz.sha512
+echo '4b05f0a490ddee37e8fc7970403bb8b72c38e5d173703db40310e78140d9d5c5732789d69c68dbd5605a623e4582f5b9671f82b8239ecdb34ad4261019dace6a  Python-2.7.8.tgz' > Python-2.7.8.tgz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
-echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202  h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
+echo 'f0da1d2ac855c02fb828444d719a1b23a580adb049335f3e732ace67558a125ac8cd3b3a68ac6bf9d10aa3ab19e4672b814eb28cc8c66910750c62efb655d744  h5py-2.3.1.tar.gz' > h5py-2.3.1.tar.gz.sha512
 echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1  hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
-echo '68c15f6402cacfd623f8e2b70c22d06541de3616fdb2d502ce93cd2fdb4e7507bb5b841a414a4123264221ee5ffb0ebefbb8541f79e647fcb9f73310b4c2d460  ipython-2.1.0.tar.gz' > ipython-2.1.0.tar.gz.sha512
+echo '4953bf5e9d6d5c6ad538d07d62b5b100fd86a37f6b861238501581c0059bd4655345ca05cf395e79709c38ce4cb9c6293f5d11ac0252a618ad8272b161140d13  ipython-2.2.0.tar.gz' > ipython-2.2.0.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
-echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
-echo '8cd387ea0d74d5ed01b58d5ef8e3fb408d4b05f7deb45a02e34fbb931fd920aafbfcb3a9b52a027ebcdb562837198637a0e51f2121c94e0fcf7f7d8c016f5342  mercurial-3.0.tar.gz' > mercurial-3.0.tar.gz.sha512
-echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4  nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
-echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
+echo '60aa386639dec17b4f579955df60f2aa7c8ccd589b3490bb9afeb2929ea418d5d1a36a0b02b8d4a6734293076e9069429956c56cf8bd099b756136f2657cf9d4  matplotlib-1.4.0.tar.gz' > matplotlib-1.4.0.tar.gz.sha512
+echo '1ee2fe7a241bf81087e55d9e4ee8fa986f41bb0655d4828d244322c18f3958a1f3111506e2df15aefcf86100b4fe530fcab2d4c041b5945599ed3b3a889d50f5  mercurial-3.1.tar.gz' > mercurial-3.1.tar.gz.sha512
+echo '19499ab08018229ea5195cdac739d6c7c247c5aa5b2c91b801cbd99bad12584ed84c5cfaaa6fa8b4893a46324571a2f8a1988a1381f4ddd58390e597bd7bdc24  nose-1.3.4.tar.gz' > nose-1.3.4.tar.gz.sha512
+echo '996e6b8e2d42f223e44660f56bf73eb8ab124f400d89218f8f5e4d7c9860ada44a4d7c54526137b0695c7a10f36e8834fbf0d42b7cb20bcdb5d5c245d673385c  numpy-1.8.2.tar.gz' > numpy-1.8.2.tar.gz.sha512
 echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
-echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b  pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
-echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4  scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
+echo '3d93a8fbd94fc3f1f90df68257cda548ba1adf3d7a819e7a17edc8681894003ac7ae6abd319473054340c11443a6a3817b931366fd7dae78e3807d549c544f8b  pyzmq-14.3.1.tar.gz' > pyzmq-14.3.1.tar.gz.sha512
+echo 'ad1278740c1dc44c5e1b15335d61c4552b66c0439325ed6eeebc5872a1c0ba3fce1dd8509116b318d01e2d41da2ee49ec168da330a7fafd22511138b29f7235d  scipy-0.14.0.tar.gz' > scipy-0.14.0.tar.gz.sha512
 echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
-echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
-echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf  tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
-echo 'd8eef84860bc5314b42a2cc210340572a9148e008ea65f7650844d0edbe457d6758785047c2770399607f69ba3b3a544db9775a5cdf961223f7e278ef7e0f5c6  zeromq-3.2.4.tar.gz' > zeromq-3.2.4.tar.gz.sha512
+echo '8a46e75abc3ed2388b5da9cb0e5874ae87580cf3612e2920b662d8f8eee8047efce5aa998eee96661d3565070b1a6b916c8bed74138b821f4e09115f14b6677d  sympy-0.7.5.tar.gz' > sympy-0.7.5.tar.gz.sha512
+echo 'a4e0231e77ebbc2885bab648b292b842cb15c84d66a1972de18cb00fcc611eae2794b872f070ab7d5af32dd0c6c1773527fe1332bd382c1821e1f2d5d76808fb  tornado-4.0.1.tar.gz' > tornado-4.0.1.tar.gz.sha512
+echo '7d70855d0537971841810a66b7a943a88304f6991ce445df19eea034aadc53dbce9d13be92bf44cfef1f3e19511a754eb01006a3968edc1ec3d1766ea4730cda  zeromq-4.0.4.tar.gz' > zeromq-4.0.4.tar.gz.sha512
 echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
@@ -653,7 +666,6 @@
 get_ytproject $H5PY.tar.gz
 get_ytproject $CYTHON.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject $FORTHON.tar.gz
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz
@@ -729,7 +741,7 @@
         cd $FREETYPE_VER
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make 2>&1 ) 1>> ${LOG_FILE} || do_exit
-		( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
@@ -932,7 +944,6 @@
 do_setup_py $IPYTHON
 do_setup_py $H5PY
 do_setup_py $CYTHON
-do_setup_py $FORTHON
 do_setup_py $NOSE
 do_setup_py $PYTHON_HGLIB
 do_setup_py $SYMPY
@@ -975,8 +986,11 @@
 
 if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
 then
-    echo "Installing pure-python readline"
-    ( ${DEST_DIR}/bin/pip install readline 2>&1 ) 1>> ${LOG_FILE}
+    if !( ( ${DEST_DIR}/bin/python2.7 -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
+    then
+        echo "Installing pure-python readline"
+        ( ${DEST_DIR}/bin/pip install gnureadline 2>&1 ) 1>> ${LOG_FILE}
+    fi
 fi
 
 if [ $INST_ENZO -eq 1 ]
@@ -1026,7 +1040,7 @@
     echo
     echo "To get started with yt, check out the orientation:"
     echo
-    echo "    http://yt-project.org/doc/bootcamp/"
+    echo "    http://yt-project.org/doc/quickstart/"
     echo
     echo "The source for yt is located at:"
     echo "    $YT_DIR"

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/source/_static/custom.css
--- a/doc/source/_static/custom.css
+++ b/doc/source/_static/custom.css
@@ -39,6 +39,13 @@
         padding-top: 10px;
         padding-bottom: 10px;
     }
+    /* since 3.1.0 */
+    .navbar-collapse.collapse.in { 
+        display: block!important;
+    }
+    .collapsing {
+        overflow: hidden!important;
+    }
 }
 
 /* 

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- a/doc/source/analyzing/analysis_modules/PPVCube.ipynb
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -291,7 +291,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "prj = yt.ProjectionPlot(ds, \"z\", [\"density\"], proj_style=\"sum\")\n",
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"density\"], method=\"sum\")\n",
       "prj.set_log(\"density\", True)\n",
       "prj.set_zlim(\"density\", 1.0e-3, 0.2)\n",
       "prj.show()"
@@ -304,4 +304,4 @@
    "metadata": {}
   }
  ]
-}
\ No newline at end of file
+}

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -72,6 +72,8 @@
 * Quantities
 * Callbacks
 
+A list of all available filters, quantities, and callbacks can be found in 
+:ref:`halo_analysis_ref`.  
 All interaction with this analysis can be performed by importing from 
 halo_analysis.
 
@@ -129,7 +131,14 @@
 are center_of_mass and bulk_velocity. Their definitions are available in 
 ``yt/analysis_modules/halo_analysis/halo_quantities.py``. If you think that 
 your quantity may be of use to the general community, add it to 
-``halo_quantities.py`` and issue a pull request.
+``halo_quantities.py`` and issue a pull request.  Default halo quantities are:
+
+* ``particle_identifier`` -- Halo ID (e.g. 0 to N)
+* ``particle_mass`` -- Mass of halo
+* ``particle_position_x`` -- Location of halo
+* ``particle_position_y`` -- Location of halo
+* ``particle_position_z`` -- Location of halo
+* ``virial_radius`` -- Virial radius of halo
 
 An example of adding a quantity:
 
@@ -154,6 +163,18 @@
    # ... Later on in your script
    hc.add_quantity("my_quantity") 
 
+This quantity will then be accessible for functions called later via the 
+*quantities* dictionary that is associated with the halo object.
+
+.. code-block:: python
+
+   def my_new_function(halo):
+       print halo.quantities["my_quantity"]
+   add_callback("print_quantity", my_new_function)
+
+   # ... Anywhere after "my_quantity" has been called
+   hc.add_callback("print_quantity")
+
 Callbacks
 ^^^^^^^^^
 
@@ -171,10 +192,10 @@
    hc.add_callback("sphere", factor=2.0)
     
 Currently available callbacks are located in 
-``yt/analysis_modules/halo_analysis/halo_callbacks.py``. New callbacks may 
+``yt/analysis_modules/halo_analysis/halo_callbacks.py``.  New callbacks may 
 be added by using the syntax shown below. If you think that your 
 callback may be of use to the general community, add it to 
-halo_callbacks.py and issue a pull request
+halo_callbacks.py and issue a pull request.
 
 An example of defining your own callback:
 

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -75,7 +75,8 @@
   mass. In simulations where the highest-resolution particles all have the 
   same mass (ie: zoom-in grid based simulations), one can set up a particle
   filter to select the lowest mass particles and perform the halo finding
-  only on those.
+  only on those.  See the this cookbook recipe for an example: 
+  :ref:`cookbook-rockstar-nested-grid`.
 
 To run the Rockstar Halo finding, you must launch python with MPI and 
 parallelization enabled. While Rockstar itself does not require MPI to run, 

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/source/analyzing/analysis_modules/halo_transition.rst
--- a/doc/source/analyzing/analysis_modules/halo_transition.rst
+++ b/doc/source/analyzing/analysis_modules/halo_transition.rst
@@ -52,7 +52,7 @@
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
    hc.create()
-   ad = hc.all_data()
+   ad = hc.halos_ds.all_data()
    masses = ad['particle_mass'][:]
 
 

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -225,12 +225,12 @@
 
 **Projection** 
     | Class :class:`~yt.data_objects.construction_data_containers.YTQuadTreeProjBase`
-    | Usage: ``proj(field, axis, weight_field=None, center=None, ds=None, data_source=None, style="integrate", field_parameters=None)``
+    | Usage: ``proj(field, axis, weight_field=None, center=None, ds=None, data_source=None, method="integrate", field_parameters=None)``
     | A 2D projection of a 3D volume along one of the axis directions.  
       By default, this is a line integral through the entire simulation volume 
       (although it can be a subset of that volume specified by a data object
       with the ``data_source`` keyword).  Alternatively, one can specify 
-      a weight_field and different ``style`` values to change the nature
+      a weight_field and different ``method`` values to change the nature
       of the projection outcome.  See :ref:`projection-types` for more information.
 
 **Streamline** 
@@ -263,7 +263,7 @@
 
    ds = load("my_data")
    sp = ds.sphere('c', (10, 'kpc'))
-   print ad.quantities.angular_momentum_vector()
+   print sp.quantities.angular_momentum_vector()
 
 Available Derived Quantities
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/source/analyzing/particle_filter.ipynb
--- a/doc/source/analyzing/particle_filter.ipynb
+++ b/doc/source/analyzing/particle_filter.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:4d705a81671d5692ed6691b3402115edbe9c98af815af5bb160ddf551bf02c76"
+  "signature": "sha256:427da1e1d02deb543246218dc8cce991268b518b25cfdd5944a4a436695f874b"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -40,11 +40,13 @@
      "source": [
       "We will filter these into young stars and old stars by masking on the ('Stars', 'creation_time') field. \n",
       "\n",
-      "In order to do this, we first make a function which applies our desired cut.  This function must accept two arguments: `pfilter` and `data`.  The second argument is a yt data container and is usually the only one used in a filter definition.\n",
+      "In order to do this, we first make a function which applies our desired cut.  This function must accept two arguments: `pfilter` and `data`.  The first argument is a `ParticleFilter` object that contains metadata about the filter its self.  The second argument is a yt data container.\n",
       "\n",
-      "Let's call \"young\" stars only those stars with ages less 5 million years.  Since Tipsy assigns a very large `creation_time` for stars in the initial conditions, we need to also exclude stars with negative ages.\n",
+      "Let's call \"young\" stars only those stars with ages less 5 million years.  Since Tipsy assigns a very large `creation_time` for stars in the initial conditions, we need to also exclude stars with negative ages. \n",
       "\n",
-      "Old stars either formed dynamically in the simulation (ages greater than 5 Myr) or were present in the initial conditions (negative ages)."
+      "Conversely, let's define \"old\" stars as those stars formed dynamically in the simulation with ages greater than 5 Myr.  We also include stars with negative ages, since these stars were included in the simulation initial conditions.\n",
+      "\n",
+      "We make use of `pfilter.filtered_type` so that the filter definition will use the same particle type as the one specified in the call to `add_particle_filter` below.  This makes the filter definition usable for arbitrary particle types.  Since we're only filtering the `\"Stars\"` particle type in this example, we could have also replaced `pfilter.filtered_type` with `\"Stars\"` and gotten the same result."
      ]
     },
     {
@@ -52,12 +54,12 @@
      "collapsed": false,
      "input": [
       "def young_stars(pfilter, data):\n",
-      "    age = data.ds.current_time - data[\"Stars\", \"creation_time\"]\n",
+      "    age = data.ds.current_time - data[pfilter.filtered_type, \"creation_time\"]\n",
       "    filter = np.logical_and(age.in_units('Myr') <= 5, age >= 0)\n",
       "    return filter\n",
       "\n",
       "def old_stars(pfilter, data):\n",
-      "    age = data.ds.current_time - data[\"Stars\", \"creation_time\"]\n",
+      "    age = data.ds.current_time - data[pfilter.filtered_type, \"creation_time\"]\n",
       "    filter = np.logical_or(age.in_units('Myr') >= 5, age < 0)\n",
       "    return filter"
      ],
@@ -140,4 +142,4 @@
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/source/analyzing/units/index.rst
--- a/doc/source/analyzing/units/index.rst
+++ b/doc/source/analyzing/units/index.rst
@@ -37,7 +37,7 @@
 .. note::
 
    The notebooks use sample datasets that are available for download at
-   http://yt-project.org/data.  See :ref:`bootcamp-introduction` for more
+   http://yt-project.org/data.  See :ref:`quickstart-introduction` for more
    details.
 
 Let us know if you would like to contribute other example notebooks, or have

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/source/bootcamp/1)_Introduction.ipynb
--- a/doc/source/bootcamp/1)_Introduction.ipynb
+++ /dev/null
@@ -1,72 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:39620670ce7751b23f30d2123fd3598de1c7843331f65de13e29f4ae9f759e0f"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Welcome to the yt bootcamp!\n",
-      "\n",
-      "In this brief tutorial, we'll go over how to load up data, analyze things, inspect your data, and make some visualizations.\n",
-      "\n",
-      "Our documentation page can provide information on a variety of the commands that are used here, both in narrative documentation as well as recipes for specific functionality in our cookbook.  The documentation exists at http://yt-project.org/doc/.  If you encounter problems, look for help here: http://yt-project.org/doc/help/index.html.\n",
-      "\n",
-      "## Acquiring the datasets for this tutorial\n",
-      "\n",
-      "If you are executing these tutorials interactively, you need some sample datasets on which to run the code.  You can download these datasets at http://yt-project.org/data/.  The datasets necessary for each lesson are noted next to the corresponding tutorial.\n",
-      "\n",
-      "## What's Next?\n",
-      "\n",
-      "The Notebooks are meant to be explored in this order:\n",
-      "\n",
-      "1. Introduction\n",
-      "2. Data Inspection (IsolatedGalaxy dataset)\n",
-      "3. Simple Visualization (enzo_tiny_cosmology & Enzo_64 datasets)\n",
-      "4. Data Objects and Time Series (IsolatedGalaxy dataset)\n",
-      "5. Derived Fields and Profiles (IsolatedGalaxy dataset)\n",
-      "6. Volume Rendering (IsolatedGalaxy dataset)"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The following code will download the data needed for this tutorial automatically using `curl`. It may take some time so please wait when the kernel is busy. You will need to set `download_datasets` to True before using it."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "download_datasets = False\n",
-      "if download_datasets:\n",
-      "    !curl -sSO http://yt-project.org/data/enzo_tiny_cosmology.tar\n",
-      "    print \"Got enzo_tiny_cosmology\"\n",
-      "    !tar xf enzo_tiny_cosmology.tar\n",
-      "    \n",
-      "    !curl -sSO http://yt-project.org/data/Enzo_64.tar\n",
-      "    print \"Got Enzo_64\"\n",
-      "    !tar xf Enzo_64.tar\n",
-      "    \n",
-      "    !curl -sSO http://yt-project.org/data/IsolatedGalaxy.tar\n",
-      "    print \"Got IsolatedGalaxy\"\n",
-      "    !tar xf IsolatedGalaxy.tar\n",
-      "    \n",
-      "    print \"All done!\""
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/source/bootcamp/2)_Data_Inspection.ipynb
--- a/doc/source/bootcamp/2)_Data_Inspection.ipynb
+++ /dev/null
@@ -1,384 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:a8fe78715c1f3900c37c675d84320fe65f0ba8734abba60fd12e74d957e5d8ee"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Starting Out and Loading Data\n",
-      "\n",
-      "We're going to get started by loading up yt.  This next command brings all of the libraries into memory and sets up our environment."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now that we've loaded yt, we can load up some data.  Let's load the `IsolatedGalaxy` dataset."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Fields and Facts\n",
-      "\n",
-      "When you call the `load` function, yt tries to do very little -- this is designed to be a fast operation, just setting up some information about the simulation.  Now, the first time you access the \"index\" it will read and load the mesh and then determine where data is placed in the physical domain and on disk.  Once it knows that, yt can tell you some statistics about the simulation:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.print_stats()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt can also tell you the fields it found on disk:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.field_list"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "And, all of the fields it thinks it knows how to generate:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.derived_field_list"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt can also transparently generate fields.  However, we encourage you to examine exactly what yt is doing when it generates those fields.  To see, you can ask for the source of a given field."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.field_info[\"gas\", \"vorticity_x\"].get_source()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt stores information about the domain of the simulation:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.domain_width"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt can also convert this into various units:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.domain_width.in_units(\"kpc\")\n",
-      "print ds.domain_width.in_units(\"au\")\n",
-      "print ds.domain_width.in_units(\"mile\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Mesh Structure\n",
-      "\n",
-      "If you're using a simulation type that has grids (for instance, here we're using an Enzo simulation) you can examine the structure of the mesh.  For the most part, you probably won't have to use this unless you're debugging a simulation or examining in detail what is going on."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.index.grid_left_edge"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "But, you may have to access information about individual grid objects!  Each grid object mediates accessing data from the disk and has a number of attributes that tell you about it.  The index (`ds.index` here) has an attribute `grids` which is all of the grid objects."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.index.grids[1]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g = ds.index.grids[1]\n",
-      "print g"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Grids have dimensions, extents, level, and even a list of Child grids."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.ActiveDimensions"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.LeftEdge, g.RightEdge"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.Level"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.Children"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Advanced Grid Inspection\n",
-      "\n",
-      "If we want to examine grids only at a given level, we can!  Not only that, but we can load data and take a look at various fields.\n",
-      "\n",
-      "*This section can be skipped!*"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "gs = ds.index.select_grids(ds.index.max_level)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g2 = gs[0]\n",
-      "print g2\n",
-      "print g2.Parent\n",
-      "print g2.get_global_startindex()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print g2[\"density\"][:,:,0]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print (g2.Parent.child_mask == 0).sum() * 8\n",
-      "print g2.ActiveDimensions.prod()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "for f in ds.field_list:\n",
-      "    fv = g[f]\n",
-      "    if fv.size == 0: continue\n",
-      "    print f, fv.min(), fv.max()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Examining Data in Regions\n",
-      "\n",
-      "yt provides data object selectors.  In subsequent notebooks we'll examine these in more detail, but we can select a sphere of data and perform a number of operations on it.  yt makes it easy to operate on fluid fields in an object in *bulk*, but you can also examine individual field values.\n",
-      "\n",
-      "This creates a sphere selector positioned at the most dense point in the simulation that has a radius of 10 kpc."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "sp = ds.sphere(\"max\", (10, 'kpc'))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print sp"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can calculate a bunch of bulk quantities.  Here's that list, but there's a list in the docs, too!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print sp.quantities.keys()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Let's look at the total mass.  This is how you call a given quantity.  yt calls these \"Derived Quantities\".  We'll talk about a few in a later notebook."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print sp.quantities.total_mass()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/source/bootcamp/3)_Simple_Visualization.ipynb
--- a/doc/source/bootcamp/3)_Simple_Visualization.ipynb
+++ /dev/null
@@ -1,275 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:c00ba7fdbbd9ea957d06060ad70f06f629b1fd4ebf5379c1fdad2697ab0a4cd6"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Simple Visualizations of Data\n",
-      "\n",
-      "Just like in our first notebook, we have to load yt and then some data."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "For this notebook, we'll load up a cosmology dataset."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
-      "print \"Redshift =\", ds.current_redshift"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In the terms that yt uses, a projection is a line integral through the domain.  This can either be unweighted (in which case a column density is returned) or weighted, in which case an average value is returned.  Projections are, like all other data objects in yt, full-fledged data objects that churn through data and present that to you.  However, we also provide a simple method of creating Projections and plotting them in a single step.  This is called a Plot Window, here specifically known as a `ProjectionPlot`.  One thing to note is that in yt, we project all the way through the entire domain at a single time.  This means that the first call to projecting can be somewhat time consuming, but panning, zooming and plotting are all quite fast.\n",
-      "\n",
-      "yt is designed to make it easy to make nice plots and straightforward to modify those plots directly.  The cookbook in the documentation includes detailed examples of this."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p = yt.ProjectionPlot(ds, \"y\", \"density\")\n",
-      "p.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The `show` command simply sends the plot to the IPython notebook.  You can also call `p.save()` which will save the plot to the file system.  This function accepts an argument, which will be pre-prended to the filename and can be used to name it based on the width or to supply a location.\n",
-      "\n",
-      "Now we'll zoom and pan a bit."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.zoom(2.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.pan_rel((0.1, 0.0))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.zoom(10.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.pan_rel((-0.25, -0.5))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.zoom(0.1)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we specify multiple fields, each time we call `show` we get multiple plots back.  Same for `save`!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p = yt.ProjectionPlot(ds, \"z\", [\"density\", \"temperature\"], weight_field=\"density\")\n",
-      "p.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can adjust the colormap on a field-by-field basis."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.set_cmap(\"temperature\", \"hot\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "And, we can re-center the plot on different locations.  One possible use of this would be to make a single `ProjectionPlot` which you move around to look at different regions in your simulation, saving at each one."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "v, c = ds.find_max(\"density\")\n",
-      "p.set_center((c[0], c[1]))\n",
-      "p.zoom(10)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Okay, let's load up a bigger simulation (from `Enzo_64` this time) and make a slice plot."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"Enzo_64/DD0043/data0043\")\n",
-      "s = yt.SlicePlot(ds, \"z\", [\"density\", \"velocity_magnitude\"], center=\"max\")\n",
-      "s.set_cmap(\"velocity_magnitude\", \"kamae\")\n",
-      "s.zoom(10.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can adjust the logging of various fields:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s.set_log(\"velocity_magnitude\", True)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt provides many different annotations for your plots.  You can see all of these in the documentation, or if you type `s.annotate_` and press tab, a list will show up here.  We'll annotate with velocity arrows."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s.annotate_velocity()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Contours can also be overlaid:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s = yt.SlicePlot(ds, \"x\", [\"density\"], center=\"max\")\n",
-      "s.annotate_contour(\"temperature\")\n",
-      "s.zoom(2.5)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Finally, we can save out to the file system."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s.save()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
--- a/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
+++ /dev/null
@@ -1,382 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:a46e1baa90d32045c2b524100f28bad41b3665249612c9a275ee0375a6f4be20"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Data Objects and Time Series Data\n",
-      "\n",
-      "Just like before, we will load up yt.  Since we'll be using pylab to plot some data in this notebook, we additionally tell matplotlib to place plots inline inside the notebook."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "%matplotlib inline\n",
-      "import yt\n",
-      "import numpy as np\n",
-      "from matplotlib import pylab\n",
-      "from yt.analysis_modules.halo_finding.api import HaloFinder"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Time Series Data\n",
-      "\n",
-      "Unlike before, instead of loading a single dataset, this time we'll load a bunch which we'll examine in sequence.  This command creates a `DatasetSeries` object, which can be iterated over (including in parallel, which is outside the scope of this bootcamp) and analyzed.  There are some other helpful operations it can provide, but we'll stick to the basics here.\n",
-      "\n",
-      "Note that you can specify either a list of filenames, or a glob (i.e., asterisk) pattern in this."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ts = yt.DatasetSeries(\"enzo_tiny_cosmology/*/*.hierarchy\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Example 1: Simple Time Series\n",
-      "\n",
-      "As a simple example of how we can use this functionality, let's find the min and max of the density as a function of time in this simulation.  To do this we use the construction `for ds in ts` where `ds` means \"Dataset\" and `ts` is the \"Time Series\" we just loaded up.  For each dataset, we'll create an object (`dd`) that covers the entire domain.  (`all_data` is a shorthand function for this.)  We'll then call the `extrema` Derived Quantity, and append the min and max to our extrema outputs."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "rho_ex = []\n",
-      "times = []\n",
-      "for ds in ts:\n",
-      "    dd = ds.all_data()\n",
-      "    rho_ex.append(dd.quantities.extrema(\"density\"))\n",
-      "    times.append(ds.current_time.in_units(\"Gyr\"))\n",
-      "rho_ex = np.array(rho_ex)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now we plot the minimum and the maximum:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pylab.semilogy(times, rho_ex[:,0], '-xk', label='Minimum')\n",
-      "pylab.semilogy(times, rho_ex[:,1], '-xr', label='Maximum')\n",
-      "pylab.ylabel(\"Density ($g/cm^3$)\")\n",
-      "pylab.xlabel(\"Time (Gyr)\")\n",
-      "pylab.legend()\n",
-      "pylab.ylim(1e-32, 1e-21)\n",
-      "pylab.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Example 2: Advanced Time Series\n",
-      "\n",
-      "Let's do something a bit different.  Let's calculate the total mass inside halos and outside halos.\n",
-      "\n",
-      "This actually touches a lot of different pieces of machinery in yt.  For every dataset, we will run the halo finder HOP.  Then, we calculate the total mass in the domain.  Then, for each halo, we calculate the sum of the baryon mass in that halo.  We'll keep running tallies of these two things."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.units import Msun\n",
-      "\n",
-      "mass = []\n",
-      "zs = []\n",
-      "for ds in ts:\n",
-      "    halos = HaloFinder(ds)\n",
-      "    dd = ds.all_data()\n",
-      "    total_mass = dd.quantities.total_quantity(\"cell_mass\").in_units(\"Msun\")\n",
-      "    total_in_baryons = 0.0*Msun\n",
-      "    for halo in halos:\n",
-      "        sp = halo.get_sphere()\n",
-      "        total_in_baryons += sp.quantities.total_quantity(\"cell_mass\").in_units(\"Msun\")\n",
-      "    mass.append(total_in_baryons/total_mass)\n",
-      "    zs.append(ds.current_redshift)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now let's plot them!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pylab.semilogx(zs, mass, '-xb')\n",
-      "pylab.xlabel(\"Redshift\")\n",
-      "pylab.ylabel(\"Mass in halos / Total mass\")\n",
-      "pylab.xlim(max(zs), min(zs))\n",
-      "pylab.ylim(-0.01, .18)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Data Objects\n",
-      "\n",
-      "Time series data have many applications, but most of them rely on examining the underlying data in some way.  Below, we'll see how to use and manipulate data objects.\n",
-      "\n",
-      "### Ray Queries\n",
-      "\n",
-      "yt provides the ability to examine rays, or lines, through the domain.  Note that these are not periodic, unlike most other data objects.  We create a ray object and can then examine quantities of it.  Rays have the special fields `t` and `dts`, which correspond to the time the ray enters a given cell and the distance it travels through that cell.\n",
-      "\n",
-      "To create a ray, we specify the start and end points.\n",
-      "\n",
-      "Note that we need to convert these arrays to numpy arrays due to a bug in matplotlib 1.3.1."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ray = ds.ray([0.1, 0.2, 0.3], [0.9, 0.8, 0.7])\n",
-      "pylab.semilogy(np.array(ray[\"t\"]), np.array(ray[\"density\"]))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ray[\"dts\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ray[\"t\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ray[\"x\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Slice Queries\n",
-      "\n",
-      "While slices are often used for visualization, they can be useful for other operations as well.  yt regards slices as multi-resolution objects.  They are an array of cells that are not all the same size; it only returns the cells at the highest resolution that it intersects.  (This is true for all yt data objects.)  Slices and projections have the special fields `px`, `py`, `pdx` and `pdy`, which correspond to the coordinates and half-widths in the pixel plane."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
-      "v, c = ds.find_max(\"density\")\n",
-      "sl = ds.slice(0, c[0])\n",
-      "print sl[\"index\", \"x\"]\n",
-      "print sl[\"index\", \"z\"]\n",
-      "print sl[\"pdx\"]\n",
-      "print sl[\"gas\", \"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we want to do something interesting with a `Slice`, we can turn it into a `FixedResolutionBuffer`.  This object can be queried and will return a 2D array of values."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "frb = sl.to_frb((50.0, 'kpc'), 1024)\n",
-      "print frb[\"gas\", \"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt provides a few functions for writing arrays to disk, particularly in image form.  Here we'll write out the log of `density`, and then use IPython to display it back here.  Note that for the most part, you will probably want to use a `PlotWindow` for this, but in the case that it is useful you can directly manipulate the data."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "yt.write_image(np.log10(frb[\"gas\", \"density\"]), \"temp.png\")\n",
-      "from IPython.display import Image\n",
-      "Image(filename = \"temp.png\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Off-Axis Slices\n",
-      "\n",
-      "yt provides not only slices, but off-axis slices that are sometimes called \"cutting planes.\"  These are specified by (in order) a normal vector and a center.  Here we've set the normal vector to `[0.2, 0.3, 0.5]` and the center to be the point of maximum density.\n",
-      "\n",
-      "We can then turn these directly into plot windows using `to_pw`.  Note that the `to_pw` and `to_frb` methods are available on slices, off-axis slices, and projections, and can be used on any of them."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cp = ds.cutting([0.2, 0.3, 0.5], \"max\")\n",
-      "pw = cp.to_pw(fields = [(\"gas\", \"density\")])"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Once we have our plot window from our cutting plane, we can show it here."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pw.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can, as noted above, do the same with our slice:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pws = sl.to_pw(fields=[\"density\"])\n",
-      "#pws.show()\n",
-      "print pws.plots.keys()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Covering Grids\n",
-      "\n",
-      "If we want to access a 3D array of data that spans multiple resolutions in our simulation, we can use a covering grid.  This will return a 3D array of data, drawing from up to the resolution level specified when creating the data.  For example, if you create a covering grid that spans two child grids of a single parent grid, it will fill those zones covered by a zone of a child grid with the data from that child grid.  Where it is covered only by the parent grid, the cells from the parent grid will be duplicated (appropriately) to fill the covering grid.\n",
-      "\n",
-      "There are two different types of covering grids: unsmoothed and smoothed.  Smoothed grids will be filled through a cascading interpolation process; they will be filled at level 0, interpolated to level 1, filled at level 1, interpolated to level 2, filled at level 2, etc.  This will help to reduce edge effects.  Unsmoothed covering grids will not be interpolated, but rather values will be duplicated multiple times.\n",
-      "\n",
-      "Here we create an unsmoothed covering grid at level 2, with the left edge at `[0.0, 0.0, 0.0]` and with dimensions equal to those that would cover the entire domain at level 2.  We can then ask for the Density field, which will be a 3D array."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cg = ds.covering_grid(2, [0.0, 0.0, 0.0], ds.domain_dimensions * 2**2)\n",
-      "print cg[\"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In this example, we do exactly the same thing: except we ask for a *smoothed* covering grid, which will reduce edge effects."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "scg = ds.smoothed_covering_grid(2, [0.0, 0.0, 0.0], ds.domain_dimensions * 2**2)\n",
-      "print scg[\"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
--- a/doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
+++ /dev/null
@@ -1,254 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:eca573e749829cacda0a8c07c6d5d11d07a5de657563a44b8c4ffff8f735caed"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Derived Fields and Profiles\n",
-      "\n",
-      "One of the most powerful features in yt is the ability to create derived fields that act and look exactly like fields that exist on disk.  This means that they will be generated on demand and can be used anywhere a field that exists on disk would be used.  Additionally, you can create them by just writing python functions."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "%matplotlib inline\n",
-      "import yt\n",
-      "import numpy as np\n",
-      "from yt import derived_field\n",
-      "from matplotlib import pylab"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Derived Fields\n",
-      "\n",
-      "This is an example of the simplest possible way to create a derived field.  All derived fields are defined by a function and some metadata; that metadata can include units, LaTeX-friendly names, conversion factors, and so on.  Fields can be defined in the way in the next cell.  What this does is create a function which accepts two arguments and then provide the units for that field.  In this case, our field is `dinosaurs` and our units are `K*cm/s`.  The function itself can access any fields that are in the simulation, and it does so by requesting data from the object called `data`."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "@derived_field(name = \"dinosaurs\", units = \"K * cm/s\")\n",
-      "def _dinos(field, data):\n",
-      "    return data[\"temperature\"] * data[\"velocity_magnitude\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "One important thing to note is that derived fields must be defined *before* any datasets are loaded.  Let's load up our data and take a look at some quantities."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
-      "dd = ds.all_data()\n",
-      "print dd.quantities.keys()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "One interesting question is, what are the minimum and maximum values of dinosaur production rates in our isolated galaxy?  We can do that by examining the `extrema` quantity -- the exact same way that we would for density, temperature, and so on."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd.quantities.extrema(\"dinosaurs\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can do the same for the average quantities as well."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd.quantities.weighted_average_quantity(\"dinosaurs\", weight=\"temperature\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## A Few Other Quantities\n",
-      "\n",
-      "We can ask other quantities of our data, as well.  For instance, this sequence of operations will find the most dense point, center a sphere on it, calculate the bulk velocity of that sphere, calculate the baryonic angular momentum vector, and then the density extrema.  All of this is done in a memory conservative way: if you have an absolutely enormous dataset, yt will split that dataset into pieces, apply intermediate reductions and then a final reduction to calculate your quantity."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "sp = ds.sphere(\"max\", (10.0, 'kpc'))\n",
-      "bv = sp.quantities.bulk_velocity()\n",
-      "L = sp.quantities.angular_momentum_vector()\n",
-      "rho_min, rho_max = sp.quantities.extrema(\"density\")\n",
-      "print bv\n",
-      "print L\n",
-      "print rho_min, rho_max"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Profiles\n",
-      "\n",
-      "yt provides the ability to bin in 1, 2 and 3 dimensions.  This means discretizing in one or more dimensions of phase space (density, temperature, etc) and then calculating either the total value of a field in each bin or the average value of a field in each bin.\n",
-      "\n",
-      "We do this using the objects `Profile1D`, `Profile2D`, and `Profile3D`.  The first two are the most common since they are the easiest to visualize.\n",
-      "\n",
-      "This first set of commands manually creates a profile object the sphere we created earlier, binned in 32 bins according to density between `rho_min` and `rho_max`, and then takes the density-weighted average of the fields `temperature` and (previously-defined) `dinosaurs`.  We then plot it in a loglog plot."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prof = yt.Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=\"cell_mass\")\n",
-      "prof.add_fields([\"temperature\",\"dinosaurs\"])\n",
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"temperature\"]), \"-x\")\n",
-      "pylab.xlabel('Density $(g/cm^3)$')\n",
-      "pylab.ylabel('Temperature $(K)$')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now we plot the `dinosaurs` field."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"dinosaurs\"]), '-x')\n",
-      "pylab.xlabel('Density $(g/cm^3)$')\n",
-      "pylab.ylabel('Dinosaurs $(K cm / s)$')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we want to see the total mass in every bin, we profile the `cell_mass` field with no weight.  Specifying `weight=None` will simply take the total value in every bin and add that up."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prof = yt.Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=None)\n",
-      "prof.add_fields([\"cell_mass\"])\n",
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"cell_mass\"].in_units(\"Msun\")), '-x')\n",
-      "pylab.xlabel('Density $(g/cm^3)$')\n",
-      "pylab.ylabel('Cell mass $(M_\\odot)$')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In addition to the low-level `ProfileND` interface, it's also quite straightforward to quickly create plots of profiles using the `ProfilePlot` class.  Let's redo the last plot using `ProfilePlot`"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prof = yt.ProfilePlot(sp, 'density', 'cell_mass', weight_field=None)\n",
-      "prof.set_unit('cell_mass', 'Msun')\n",
-      "prof.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Field Parameters\n",
-      "\n",
-      "Field parameters are a method of passing information to derived fields.  For instance, you might pass in information about a vector you want to use as a basis for a coordinate transformation.  yt often uses things like `bulk_velocity` to identify velocities that should be subtracted off.  Here we show how that works:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "sp_small = ds.sphere(\"max\", (50.0, 'kpc'))\n",
-      "bv = sp_small.quantities.bulk_velocity()\n",
-      "\n",
-      "sp = ds.sphere(\"max\", (0.1, 'Mpc'))\n",
-      "rv1 = sp.quantities.extrema(\"radial_velocity\")\n",
-      "\n",
-      "sp.clear_data()\n",
-      "sp.set_field_parameter(\"bulk_velocity\", bv)\n",
-      "rv2 = sp.quantities.extrema(\"radial_velocity\")\n",
-      "\n",
-      "print bv\n",
-      "print rv1\n",
-      "print rv2"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/source/bootcamp/6)_Volume_Rendering.ipynb
--- a/doc/source/bootcamp/6)_Volume_Rendering.ipynb
+++ /dev/null
@@ -1,96 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:2a24bbe82955f9d948b39cbd1b1302968ff57f62f73afb2c7a5c4953393d00ae"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# A Brief Demo of Volume Rendering\n",
-      "\n",
-      "This shows a small amount of volume rendering.  Really, just enough to get your feet wet!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt\n",
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "To create a volume rendering, we need a camera and a transfer function.  We'll use the `ColorTransferFunction`, which accepts (in log space) the minimum and maximum bounds of our transfer function.  This means behavior for data outside these values is undefined.\n",
-      "\n",
-      "We then add on \"layers\" like an onion.  This function can accept a width (here specified) in data units, and also a color map.  Here we add on four layers.\n",
-      "\n",
-      "Finally, we create a camera.  The focal point is `[0.5, 0.5, 0.5]`, the width is 20 kpc (including front-to-back integration) and we specify a transfer function.  Once we've done that, we call `show` to actually cast our rays and display them inline."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "tf = yt.ColorTransferFunction((-28, -24))\n",
-      "tf.add_layers(4, w=0.01)\n",
-      "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20, 'kpc'), 512, tf, fields=[\"density\"])\n",
-      "cam.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we want to apply a clipping, we can specify the `clip_ratio`.  This will clip the upper bounds to this value times the standard deviation of the values in the image array."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cam.show(clip_ratio=4)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "There are several other options we can specify.  Note that here we have turned on the use of ghost zones, shortened the data interval for the transfer function, and widened our gaussian layers."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "tf = yt.ColorTransferFunction((-28, -25))\n",
-      "tf.add_layers(4, w=0.03)\n",
-      "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20.0, 'kpc'), 512, tf, no_ghost=False)\n",
-      "cam.show(clip_ratio=4.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/source/bootcamp/data_inspection.rst
--- a/doc/source/bootcamp/data_inspection.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _data_inspection:
-
-Data Inspection
----------------
-
-.. notebook:: 2)_Data_Inspection.ipynb

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/source/bootcamp/data_objects_and_time_series.rst
--- a/doc/source/bootcamp/data_objects_and_time_series.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Data Objects and Time Series
-----------------------------
-
-.. notebook:: 4)_Data_Objects_and_Time_Series.ipynb

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/source/bootcamp/derived_fields_and_profiles.rst
--- a/doc/source/bootcamp/derived_fields_and_profiles.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Derived Fields and Profiles
----------------------------
-
-.. notebook:: 5)_Derived_Fields_and_Profiles.ipynb

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/source/bootcamp/index.rst
--- a/doc/source/bootcamp/index.rst
+++ /dev/null
@@ -1,59 +0,0 @@
-.. _bootcamp:
-
-yt Bootcamp
-===========
-
-The bootcamp is a series of worked examples of how to use much of the
-funtionality of yt.  These are simple, short introductions to give you a taste
-of what the code can do and are not meant to be detailed walkthroughs.
-
-There are two ways in which you can go through the bootcamp: interactively and 
-non-interactively.  We recommend the interactive method, but if you're pressed 
-on time, you can non-interactively go through the linked pages below and view the 
-worked examples.
-
-To execute the bootcamp interactively, you need to download the repository and
-start the IPython notebook.  If you do not already have the yt repository, the
-easiest way to get the repository is to clone it using mercurial:
-
-.. code-block:: bash
-
-   hg clone https://bitbucket.org/yt_analysis/yt
-
-Now start the IPython notebook from within the repository:
-
-.. code-block:: bash
-
-   cd yt/doc/source/bootcamp
-   yt notebook
-
-This command will give you information about the notebook server and how to
-access it.  You will basically just pick a password (for security reasons) and then 
-redirect your web browser to point to the notebook server.
-Once you have done so, choose "Introduction" from the list of
-notebooks, which includes an introduction and information about how to download
-the sample data.
-
-.. warning:: The pre-filled out notebooks are *far* less fun than running them
-             yourselves!  Check out the repo and give it a try.
-
-Here are the notebooks, which have been filled in for inspection:
-
-.. toctree::
-   :maxdepth: 1
-
-   introduction
-   data_inspection
-   simple_visualization
-   data_objects_and_time_series
-   derived_fields_and_profiles
-   volume_rendering
-
-.. note::
-
-   The notebooks use sample datasets that are available for download at
-   http://yt-project.org/data.  See :ref:`bootcamp-introduction` for more
-   details.
-
-Let us know if you would like to contribute other example notebooks, or have
-any suggestions for how these can be improved.

diff -r 4e731feebccde8da90fde746dd8c12881f3c0ad0 -r 398b2ee2dac82ac72e99406a36d368938d403f5b doc/source/bootcamp/introduction.rst
--- a/doc/source/bootcamp/introduction.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _bootcamp-introduction:
-
-Introduction
-------------
-
-.. notebook:: 1)_Introduction.ipynb

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/b1bf40415421/
Changeset:   b1bf40415421
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-11-10 22:27:21+00:00
Summary:     Responding to comments.
Affected #:  5 files

diff -r 398b2ee2dac82ac72e99406a36d368938d403f5b -r b1bf404154210b70892a5155980ea9d4533ae976 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -352,7 +352,7 @@
 field ``("gas", "temperature")`` for convenience.
 
 Computing the Nth Nearest Neighbor
-------------------------------
+----------------------------------
 
 One particularly useful field that can be created is that of the distance to
 the Nth-nearest neighbor.  This field can then be used as input to smoothing
@@ -374,8 +374,8 @@
    print dd[fn]
 
 Note that ``fn`` here is the "field name" that yt adds.  It will be of the form
-``(ptype, nearest_neighbor_NN)`` where ``NN`` is the integer.  By default this
-is 64, but it can be supplied as the final argument to
+``(ptype, nearest_neighbor_distance_NN)`` where ``NN`` is the integer.  By
+default this is 64, but it can be supplied as the final argument to
 ``add_nearest_neighbor_field``.  For the example above, it would be
 ``nearest_neighbor_64``.
 

diff -r 398b2ee2dac82ac72e99406a36d368938d403f5b -r b1bf404154210b70892a5155980ea9d4533ae976 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -335,6 +335,9 @@
         dt, t = dobj.selector.get_dt(self)
         return dt, t
 
+    def smooth(self, *args, **kwargs):
+        raise NotImplementedError
+
     def deposit(self, positions, fields = None, method = None):
         # Here we perform our particle deposition.
         cls = getattr(particle_deposit, "deposit_%s" % method, None)

diff -r 398b2ee2dac82ac72e99406a36d368938d403f5b -r b1bf404154210b70892a5155980ea9d4533ae976 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -160,7 +160,9 @@
             instance, this might include mass, velocity, etc.  
         method : string
             This is the "method name" which will be looked up in the
-            `particle_deposit` namespace as `methodname_deposit`.
+            `particle_deposit` namespace as `methodname_deposit`.  Current
+            methods include `count`, `simple_smooth`, `sum`, `std`, `cic`,
+            `weighted_mean`, `mesh_id`, and `nearest`.
 
         Returns
         -------
@@ -212,7 +214,9 @@
             the operation.
         method : string
             This is the "method name" which will be looked up in the
-            `particle_smooth` namespace as `methodname_smooth`.
+            `particle_smooth` namespace as `methodname_smooth`.  Current
+            methods include `volume_weighted`, `nearest`, `idw`,
+            `nth_neighbor`, and `density`.
         create_octree : bool
             Should we construct a new octree for indexing the particles?  In
             cases where we are applying an operation on a subset of the

diff -r 398b2ee2dac82ac72e99406a36d368938d403f5b -r b1bf404154210b70892a5155980ea9d4533ae976 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -575,7 +575,7 @@
     return [field_name]
 
 def add_nearest_neighbor_field(ptype, coord_name, registry, nneighbors = 64):
-    field_name = (ptype, "nearest_neighbor_%s" % (nneighbors))
+    field_name = (ptype, "nearest_neighbor_distance_%s" % (nneighbors))
     def _nth_neighbor(field, data):
         pos = data[ptype, coord_name].in_units("code_length")
         distances = 0.0 * pos[:,0]

diff -r 398b2ee2dac82ac72e99406a36d368938d403f5b -r b1bf404154210b70892a5155980ea9d4533ae976 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -263,6 +263,13 @@
                      int domain_offset = 0,
                      periodicity = (True, True, True),
                      geometry = "cartesian"):
+        # The other functions in this base class process particles in a way
+        # that results in a modification to the *mesh*.  This function is
+        # designed to process neighboring particles in such a way that a new
+        # *particle* field is defined -- this means that new particle
+        # attributes (*not* mesh attributes) can be created that rely on the
+        # values of nearby particles.  For instance, a smoothing kernel, or a
+        # nearest-neighbor field.
         cdef int nf, i, j, k, dims[3], n
         cdef np.float64_t **field_pointers, *field_vals, pos[3], *ppos, dds[3]
         cdef np.float64_t **octree_field_pointers


https://bitbucket.org/yt_analysis/yt/commits/76da17d4fec1/
Changeset:   76da17d4fec1
Branch:      yt
User:        MatthewTurk
Date:        2014-11-10 22:34:48+00:00
Summary:     Merging from the N-th neighbor PR.
Affected #:  12 files

diff -r feba284df7fea6ac46d0329b9ed2a0bfde6487a2 -r 76da17d4fec15eb2115904c4670deb2c4dbfbce6 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -350,3 +350,35 @@
 ``Temperature`` of the ``Gas`` particle type would be ``("deposit",
 "Gas_smoothed_Temperature")``, which in most cases would be aliased to the
 field ``("gas", "temperature")`` for convenience.
+
+Computing the Nth Nearest Neighbor
+----------------------------------
+
+One particularly useful field that can be created is that of the distance to
+the Nth-nearest neighbor.  This field can then be used as input to smoothing
+operations, in the case when a particular particle type does not have an
+associated smoothing length or other length estimate.
+
+yt defines this field as a plugin, and it can be added like so:
+
+.. code-block:: python
+
+   import yt
+   from yt.fields.particle_fields import \
+     add_nearest_neighbor_field
+
+   ds = yt.load("snapshot_033/snap_033.0.hdf5")
+   fn, = add_nearest_neighbor_field("all", "particle_position", ds)
+
+   dd = ds.all_data()
+   print dd[fn]
+
+Note that ``fn`` here is the "field name" that yt adds.  It will be of the form
+``(ptype, nearest_neighbor_distance_NN)`` where ``NN`` is the integer.  By
+default this is 64, but it can be supplied as the final argument to
+``add_nearest_neighbor_field``.  For the example above, it would be
+``nearest_neighbor_64``.
+
+This can then be used as input to the function
+``add_volume_weighted_smoothed_field``, which can enable smoothing particle
+types that would normally not be smoothed.

diff -r feba284df7fea6ac46d0329b9ed2a0bfde6487a2 -r 76da17d4fec15eb2115904c4670deb2c4dbfbce6 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -335,6 +335,9 @@
         dt, t = dobj.selector.get_dt(self)
         return dt, t
 
+    def smooth(self, *args, **kwargs):
+        raise NotImplementedError
+
     def deposit(self, positions, fields = None, method = None):
         # Here we perform our particle deposition.
         cls = getattr(particle_deposit, "deposit_%s" % method, None)

diff -r feba284df7fea6ac46d0329b9ed2a0bfde6487a2 -r 76da17d4fec15eb2115904c4670deb2c4dbfbce6 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -141,6 +141,33 @@
         return self._domain_ind
 
     def deposit(self, positions, fields = None, method = None):
+        r"""Operate on the mesh, in a particle-against-mesh fashion, with
+        exclusively local input.
+
+        This uses the octree indexing system to call a "deposition" operation
+        (defined in yt/geometry/particle_deposit.pyx) that can take input from
+        several particles (local to the mesh) and construct some value on the
+        mesh.  The canonical example is to sum the total mass in a mesh cell
+        and then divide by its volume.
+
+        Parameters
+        ----------
+        positions : array_like (Nx3)
+            The positions of all of the particles to be examined.  A new
+            indexed octree will be constructed on these particles.
+        fields : list of arrays
+            All the necessary fields for computing the particle operation.  For
+            instance, this might include mass, velocity, etc.  
+        method : string
+            This is the "method name" which will be looked up in the
+            `particle_deposit` namespace as `methodname_deposit`.  Current
+            methods include `count`, `simple_smooth`, `sum`, `std`, `cic`,
+            `weighted_mean`, `mesh_id`, and `nearest`.
+
+        Returns
+        -------
+        List of fortran-ordered, mesh-like arrays.
+        """
         # Here we perform our particle deposition.
         if fields is None: fields = []
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
@@ -165,6 +192,43 @@
 
     def smooth(self, positions, fields = None, index_fields = None,
                method = None, create_octree = False, nneighbors = 64):
+        r"""Operate on the mesh, in a particle-against-mesh fashion, with
+        non-local input.
+
+        This uses the octree indexing system to call a "smoothing" operation
+        (defined in yt/geometry/particle_smooth.pyx) that can take input from
+        several (non-local) particles and construct some value on the mesh.
+        The canonical example is to conduct a smoothing kernel operation on the
+        mesh.
+
+        Parameters
+        ----------
+        positions : array_like (Nx3)
+            The positions of all of the particles to be examined.  A new
+            indexed octree will be constructed on these particles.
+        fields : list of arrays
+            All the necessary fields for computing the particle operation.  For
+            instance, this might include mass, velocity, etc.  
+        index_fields : list of arrays
+            All of the fields defined on the mesh that may be used as input to
+            the operation.
+        method : string
+            This is the "method name" which will be looked up in the
+            `particle_smooth` namespace as `methodname_smooth`.  Current
+            methods include `volume_weighted`, `nearest`, `idw`,
+            `nth_neighbor`, and `density`.
+        create_octree : bool
+            Should we construct a new octree for indexing the particles?  In
+            cases where we are applying an operation on a subset of the
+            particles used to construct the mesh octree, this will ensure that
+            we are able to find and identify all relevant particles.
+        nneighbors : int, default 64
+            The number of neighbors to examine during the process.
+
+        Returns
+        -------
+        List of fortran-ordered, mesh-like arrays.
+        """
         # Here we perform our particle deposition.
         positions.convert_to_units("code_length")
         if create_octree:
@@ -177,7 +241,8 @@
                 self.ds.domain_left_edge,
                 self.ds.domain_right_edge,
                 over_refine = self._oref)
-            particle_octree.n_ref = nneighbors
+            # This should ensure we get everything within one neighbor of home.
+            particle_octree.n_ref = nneighbors * 2
             particle_octree.add(morton)
             particle_octree.finalize()
             pdom_ind = particle_octree.domain_ind(self.selector)
@@ -208,6 +273,77 @@
             vals = np.asfortranarray(vals)
         return vals
 
+    def particle_operation(self, positions, fields = None,
+            method = None, nneighbors = 64):
+        r"""Operate on particles, in a particle-against-particle fashion.
+
+        This uses the octree indexing system to call a "smoothing" operation
+        (defined in yt/geometry/particle_smooth.pyx) that expects to be called
+        in a particle-by-particle fashion.  For instance, the canonical example
+        of this would be to compute the Nth nearest neighbor, or to compute the
+        density for a given particle based on some kernel operation.
+
+        Many of the arguments to this are identical to those used in the smooth
+        and deposit functions.  Note that the `fields` argument must not be
+        empty, as these fields will be modified in place.
+
+        Parameters
+        ----------
+        positions : array_like (Nx3)
+            The positions of all of the particles to be examined.  A new
+            indexed octree will be constructed on these particles.
+        fields : list of arrays
+            All the necessary fields for computing the particle operation.  For
+            instance, this might include mass, velocity, etc.  One of these
+            will likely be modified in place.
+        method : string
+            This is the "method name" which will be looked up in the
+            `particle_smooth` namespace as `methodname_smooth`.
+        nneighbors : int, default 64
+            The number of neighbors to examine during the process.
+
+        Returns
+        -------
+        Nothing.
+
+        """
+        # Here we perform our particle deposition.
+        positions.convert_to_units("code_length")
+        morton = compute_morton(
+            positions[:,0], positions[:,1], positions[:,2],
+            self.ds.domain_left_edge,
+            self.ds.domain_right_edge)
+        morton.sort()
+        particle_octree = ParticleOctreeContainer([1, 1, 1],
+            self.ds.domain_left_edge,
+            self.ds.domain_right_edge,
+            over_refine = 1)
+        particle_octree.n_ref = nneighbors * 2
+        particle_octree.add(morton)
+        particle_octree.finalize()
+        pdom_ind = particle_octree.domain_ind(self.selector)
+        if fields is None: fields = []
+        cls = getattr(particle_smooth, "%s_smooth" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        nz = self.nz
+        mdom_ind = self.domain_ind
+        nvals = (nz, nz, nz, (mdom_ind >= 0).sum())
+        op = cls(nvals, len(fields), nneighbors)
+        op.initialize()
+        mylog.debug("Smoothing %s particles into %s Octs",
+            positions.shape[0], nvals[-1])
+        op.process_particles(particle_octree, pdom_ind, positions, 
+            fields, self.domain_id, self._domain_offset, self.ds.periodicity,
+            self.ds.geometry)
+        vals = op.finalize()
+        if vals is None: return
+        if isinstance(vals, list):
+            vals = [np.asfortranarray(v) for v in vals]
+        else:
+            vals = np.asfortranarray(vals)
+        return vals
+
     @cell_count_cache
     def select_icoords(self, dobj):
         return self.oct_handler.icoords(dobj.selector, domain_id = self.domain_id,

diff -r feba284df7fea6ac46d0329b9ed2a0bfde6487a2 -r 76da17d4fec15eb2115904c4670deb2c4dbfbce6 yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -158,6 +158,9 @@
     def smooth(self, *args, **kwargs):
         return np.random.random((self.nd, self.nd, self.nd))
 
+    def particle_operation(self, *args, **kwargs):
+        return None
+
     def _read_data(self, field_name):
         self.requested.append(field_name)
         if hasattr(self.ds, "field_info"):

diff -r feba284df7fea6ac46d0329b9ed2a0bfde6487a2 -r 76da17d4fec15eb2115904c4670deb2c4dbfbce6 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -574,3 +574,40 @@
                        units = field_units)
     return [field_name]
 
+def add_nearest_neighbor_field(ptype, coord_name, registry, nneighbors = 64):
+    field_name = (ptype, "nearest_neighbor_distance_%s" % (nneighbors))
+    def _nth_neighbor(field, data):
+        pos = data[ptype, coord_name].in_units("code_length")
+        distances = 0.0 * pos[:,0]
+        data.particle_operation(pos, [distances],
+                         method="nth_neighbor",
+                         nneighbors = nneighbors)
+        # Now some quick unit conversions.
+        return distances
+    registry.add_field(field_name, function = _nth_neighbor,
+                       validators = [ValidateSpatial(0)],
+                       particle_type = True,
+                       units = "code_length")
+    return [field_name]
+
+def add_density_kernel(ptype, coord_name, mass_name, registry, nneighbors = 64):
+    field_name = (ptype, "smoothed_density")
+    field_units = registry[ptype, mass_name].units
+    def _nth_neighbor(field, data):
+        pos = data[ptype, coord_name].in_units("code_length")
+        mass = data[ptype, mass_name].in_units("g")
+        densities = mass * 0.0
+        data.particle_operation(pos, [mass, densities],
+                         method="density",
+                         nneighbors = nneighbors)
+        ones = pos.prod(axis=1) # Get us in code_length**3
+        ones[:] = 1.0
+        densities /= ones
+        # Now some quick unit conversions.
+        return densities
+    registry.add_field(field_name, function = _nth_neighbor,
+                       validators = [ValidateSpatial(0)],
+                       particle_type = True,
+                       units = "g/cm**3")
+    return [field_name]
+

diff -r feba284df7fea6ac46d0329b9ed2a0bfde6487a2 -r 76da17d4fec15eb2115904c4670deb2c4dbfbce6 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -71,7 +71,7 @@
                   int max_level = ?)
     cdef int get_root(self, int ind[3], Oct **o)
     cdef Oct **neighbors(self, OctInfo *oinfo, np.int64_t *nneighbors,
-                         Oct *o)
+                         Oct *o, bint periodicity[3])
     cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
     # This function must return the offset from global-to-local domains; i.e.,
     # OctAllocationContainer.offset if such a thing exists.

diff -r feba284df7fea6ac46d0329b9ed2a0bfde6487a2 -r 76da17d4fec15eb2115904c4670deb2c4dbfbce6 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -334,7 +334,8 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef Oct** neighbors(self, OctInfo *oi, np.int64_t *nneighbors, Oct *o):
+    cdef Oct** neighbors(self, OctInfo *oi, np.int64_t *nneighbors, Oct *o,
+                         bint periodicity[3]):
         cdef Oct* candidate
         nn = 0
         # We are going to do a brute-force search here.
@@ -359,14 +360,23 @@
         my_list = olist = OctList_append(NULL, o)
         for i in range(3):
             npos[0] = (oi.ipos[0] + (1 - i))
-            if npos[0] < 0: npos[0] += ndim[0]
-            if npos[0] >= ndim[0]: npos[0] -= ndim[0]
+            if not periodicity[0] and not \
+               (0 <= npos[0] < ndim[0]):
+                continue
+            elif npos[0] < 0: npos[0] += ndim[0]
+            elif npos[0] >= ndim[0]: npos[0] -= ndim[0]
             for j in range(3):
                 npos[1] = (oi.ipos[1] + (1 - j))
-                if npos[1] < 0: npos[1] += ndim[1]
-                if npos[1] >= ndim[1]: npos[1] -= ndim[1]
+                if not periodicity[1] and not \
+                   (0 <= npos[1] < ndim[1]):
+                    continue
+                elif npos[1] < 0: npos[1] += ndim[1]
+                elif npos[1] >= ndim[1]: npos[1] -= ndim[1]
                 for k in range(3):
                     npos[2] = (oi.ipos[2] + (1 - k))
+                    if not periodicity[2] and not \
+                       (0 <= npos[2] < ndim[2]):
+                        continue
                     if npos[2] < 0: npos[2] += ndim[2]
                     if npos[2] >= ndim[2]: npos[2] -= ndim[2]
                     # Now we have our npos, which we just need to find.

diff -r feba284df7fea6ac46d0329b9ed2a0bfde6487a2 -r 76da17d4fec15eb2115904c4670deb2c4dbfbce6 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -62,6 +62,15 @@
     cdef int neighbor_search(self, np.float64_t pos[3], OctreeContainer octree,
                              np.int64_t **nind, int *nsize, 
                              np.int64_t nneighbors, np.int64_t domain_id, Oct **oct = ?)
+    cdef void neighbor_process_particle(self, np.float64_t cpos[3],
+                               np.float64_t *ppos,
+                               np.float64_t **fields, 
+                               np.int64_t *doffs, np.int64_t **nind, 
+                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t offset,
+                               np.float64_t **index_fields,
+                               OctreeContainer octree, np.int64_t domain_id,
+                               int *nsize)
     cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
                             np.float64_t cpos[3])
     cdef void neighbor_reset(self)

diff -r feba284df7fea6ac46d0329b9ed2a0bfde6487a2 -r 76da17d4fec15eb2115904c4670deb2c4dbfbce6 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -253,6 +253,126 @@
         if nind != NULL:
             free(nind)
 
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def process_particles(self, OctreeContainer particle_octree,
+                     np.ndarray[np.int64_t, ndim=1] pdom_ind,
+                     np.ndarray[np.float64_t, ndim=2] positions,
+                     fields = None, int domain_id = -1,
+                     int domain_offset = 0,
+                     periodicity = (True, True, True),
+                     geometry = "cartesian"):
+        # The other functions in this base class process particles in a way
+        # that results in a modification to the *mesh*.  This function is
+        # designed to process neighboring particles in such a way that a new
+        # *particle* field is defined -- this means that new particle
+        # attributes (*not* mesh attributes) can be created that rely on the
+        # values of nearby particles.  For instance, a smoothing kernel, or a
+        # nearest-neighbor field.
+        cdef int nf, i, j, k, dims[3], n
+        cdef np.float64_t **field_pointers, *field_vals, pos[3], *ppos, dds[3]
+        cdef np.float64_t **octree_field_pointers
+        cdef int nsize = 0
+        cdef np.int64_t *nind = NULL
+        cdef OctInfo moi, poi
+        cdef Oct *oct, **neighbors = NULL
+        cdef np.int64_t nneighbors, numpart, offset, local_ind
+        cdef np.int64_t moff_p, moff_m, pind0
+        cdef np.int64_t *doffs, *pinds, *pcounts, poff
+        cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
+        cdef np.ndarray[np.float64_t, ndim=1] tarr
+        cdef np.ndarray[np.float64_t, ndim=2] cart_positions
+        if geometry == "cartesian":
+            self.pos_setup = cart_coord_setup
+            cart_positions = positions
+        elif geometry == "spherical":
+            self.pos_setup = spherical_coord_setup
+            cart_positions = np.empty((positions.shape[0], 3), dtype="float64")
+
+            cart_positions[:,0] = positions[:,0] * \
+                                  np.sin(positions[:,1]) * \
+                                  np.cos(positions[:,2])
+            cart_positions[:,1] = positions[:,0] * \
+                                  np.sin(positions[:,1]) * \
+                                  np.sin(positions[:,2])
+            cart_positions[:,2] = positions[:,0] * \
+                                  np.cos(positions[:,1])
+            periodicity = (False, False, False)
+        else:
+            raise NotImplementedError
+        numpart = positions.shape[0]
+        pcount = np.zeros_like(pdom_ind)
+        doff = np.zeros_like(pdom_ind) - 1
+        moff_p = particle_octree.get_domain_offset(domain_id + domain_offset)
+        pdoms = np.zeros(positions.shape[0], dtype="int64") - 1
+        nf = len(fields)
+        if fields is None:
+            fields = []
+        field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
+        for i in range(nf):
+            tarr = fields[i]
+            field_pointers[i] = <np.float64_t *> tarr.data
+        for i in range(3):
+            self.DW[i] = (particle_octree.DRE[i] - particle_octree.DLE[i])
+            self.periodicity[i] = periodicity[i]
+        for i in range(positions.shape[0]):
+            for j in range(3):
+                pos[j] = positions[i, j]
+            oct = particle_octree.get(pos)
+            if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
+                continue
+            # Note that this has to be our local index, not our in-file index.
+            # This is the particle count, which we'll use once we have sorted
+            # the particles to calculate the offsets into each oct's particles.
+            offset = oct.domain_ind - moff_p
+            pcount[offset] += 1
+            pdoms[i] = offset # We store the *actual* offset.
+        # Now we have oct assignments.  Let's sort them.
+        # Note that what we will be providing to our processing functions will
+        # actually be indirectly-sorted fields.  This preserves memory at the
+        # expense of additional pointer lookups.
+        pind = np.argsort(pdoms)
+        pind = np.asarray(pind, dtype='int64', order='C')
+        # So what this means is that we now have all the oct-0 particle indices
+        # in order, then the oct-1, etc etc.
+        # This now gives us the indices to the particles for each domain.
+        for i in range(positions.shape[0]):
+            # This value, poff, is the index of the particle in the *unsorted*
+            # arrays.
+            poff = pind[i] 
+            offset = pdoms[poff] 
+            # If we have yet to assign the starting index to this oct, we do so
+            # now.
+            if doff[offset] < 0: doff[offset] = i
+        #print domain_id, domain_offset, moff_p, moff_m
+        #raise RuntimeError
+        # Now doff is full of offsets to the first entry in the pind that
+        # refers to that oct's particles.
+        ppos = <np.float64_t *> positions.data
+        cart_pos = <np.float64_t *> cart_positions.data
+        doffs = <np.int64_t*> doff.data
+        pinds = <np.int64_t*> pind.data
+        pcounts = <np.int64_t*> pcount.data
+        cdef int maxnei = 0
+        cdef int nproc = 0
+        for i in range(doff.shape[0]):
+            if doff[i] < 0: continue
+            offset = pind[doff[i]]
+            for j in range(3):
+                pos[j] = positions[offset, j]
+            for j in range(pcount[i]):
+                pind0 = pind[doff[i] + j]
+                for k in range(3):
+                    pos[k] = positions[pind0, k]
+                self.neighbor_process_particle(pos, cart_pos, field_pointers,
+                            doffs, &nind, pinds, pcounts, pind0,
+                            NULL, particle_octree, domain_id, &nsize)
+        #print "VISITED", visited.sum(), visited.size,
+        #print 100.0*float(visited.sum())/visited.size
+        if nind != NULL:
+            free(nind)
+
     cdef int neighbor_search(self, np.float64_t pos[3], OctreeContainer octree,
                              np.int64_t **nind, int *nsize, 
                              np.int64_t nneighbors, np.int64_t domain_id,
@@ -268,7 +388,7 @@
         if nind[0] == NULL:
             nsize[0] = 27
             nind[0] = <np.int64_t *> malloc(sizeof(np.int64_t)*nsize[0])
-        neighbors = octree.neighbors(&oi, &nneighbors, ooct)
+        neighbors = octree.neighbors(&oi, &nneighbors, ooct, self.periodicity)
         # Now we have all our neighbors.  And, we should be set for what
         # else we need to do.
         if nneighbors > nsize[0]:
@@ -309,7 +429,7 @@
     cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
                             np.float64_t cpos[3]):
         cdef NeighborList *cur
-        cdef int i
+        cdef int i, j
         # _c means candidate (what we're evaluating)
         # _o means other (the item in the list)
         cdef np.float64_t r2_c, r2_o
@@ -334,13 +454,14 @@
         # Early terminate
         if r2_c < 0: return
         pn_c = pn
-        for i in range((self.curn - 1), -1, -1):
+        for j in range(1, self.maxn + 1):
+            i = self.maxn - j
             # First we evaluate against i.  If our candidate radius is greater
             # than the one we're inspecting, we quit.
             cur = &self.neighbors[i]
             r2_o = cur.r2
             pn_o = cur.pn
-            if r2_c >= r2_o:
+            if r2_c > r2_o:
                 break
             # Now we know we need to swap them.  First we assign our candidate
             # values to cur.
@@ -419,6 +540,30 @@
                 cpos[1] += dds[1]
             cpos[0] += dds[0]
 
+    cdef void neighbor_process_particle(self, np.float64_t cpos[3],
+                               np.float64_t *ppos,
+                               np.float64_t **fields,
+                               np.int64_t *doffs, np.int64_t **nind,
+                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t offset,
+                               np.float64_t **index_fields,
+                               OctreeContainer octree, 
+                               np.int64_t domain_id, int *nsize):
+        # Note that we assume that fields[0] == smoothing length in the native
+        # units supplied.  We can now iterate over every cell in the block and
+        # every particle to find the nearest.  We will use a priority heap.
+        cdef int i, j, k, ntot, nntot, m, dim[3]
+        cdef Oct *oct = NULL
+        cdef np.int64_t nneighbors = 0
+        i = j = k = 0
+        dim[0] = dim[1] = dim[2] = 1
+        cdef np.float64_t opos[3]
+        self.pos_setup(cpos, opos)
+        nneighbors = self.neighbor_search(opos, octree,
+                        nind, nsize, nneighbors, domain_id, &oct)
+        self.neighbor_find(nneighbors, nind[0], doffs, pcounts, pinds, ppos, opos)
+        self.process(offset, i, j, k, dim, opos, fields, index_fields)
+
 cdef class VolumeWeightedSmooth(ParticleSmoothOperation):
     cdef np.float64_t **fp
     cdef public object vals
@@ -556,3 +701,53 @@
         return
 
 idw_smooth = IDWInterpolationSmooth
+
+cdef class NthNeighborDistanceSmooth(ParticleSmoothOperation):
+
+    def initialize(self):
+        return
+
+    def finalize(self):
+        return
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef void process(self, np.int64_t offset, int i, int j, int k,
+                      int dim[3], np.float64_t cpos[3], np.float64_t **fields,
+                      np.float64_t **index_fields):
+        cdef np.float64_t max_r
+        # We assume "offset" here is the particle index.
+        max_r = sqrt(self.neighbors[self.curn-1].r2)
+        fields[0][offset] = max_r
+
+nth_neighbor_smooth = NthNeighborDistanceSmooth
+
+cdef class SmoothedDensityEstimate(ParticleSmoothOperation):
+    def initialize(self):
+        return
+
+    def finalize(self):
+        return
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef void process(self, np.int64_t offset, int i, int j, int k,
+                      int dim[3], np.float64_t cpos[3], np.float64_t **fields,
+                      np.float64_t **index_fields):
+        cdef np.float64_t r2, hsml, dens, mass, weight, lw
+        cdef int pn
+        # We assume "offset" here is the particle index.
+        hsml = sqrt(self.neighbors[self.curn-1].r2)
+        dens = 0.0
+        weight = 0.0
+        for pn in range(self.curn):
+            mass = fields[0][self.neighbors[pn].pn]
+            r2 = self.neighbors[pn].r2
+            lw = sph_kernel(sqrt(r2) / hsml)
+            dens += mass * lw
+        weight = (4.0/3.0) * 3.1415926 * hsml**3
+        fields[1][offset] = dens/weight
+
+density_smooth = SmoothedDensityEstimate

diff -r feba284df7fea6ac46d0329b9ed2a0bfde6487a2 -r 76da17d4fec15eb2115904c4670deb2c4dbfbce6 yt/geometry/tests/test_neighbor_search.py
--- /dev/null
+++ b/yt/geometry/tests/test_neighbor_search.py
@@ -0,0 +1,38 @@
+from yt.fields.particle_fields import \
+    add_nearest_neighbor_field
+from yt.testing import *
+
+def test_neighbor_search():
+    np.random.seed(0x4d3d3d3)
+    ds = fake_particle_ds(npart = 16**3)
+    ds.periodicity = (True, True, True)
+    ds.index
+    fn, = add_nearest_neighbor_field("all", "particle_position", ds)
+    dd = ds.all_data()
+    nearest_neighbors = dd[fn]
+    pos = dd["particle_position"]
+    all_neighbors = np.zeros_like(nearest_neighbors)
+    any_eq = np.zeros(pos.shape[0], dtype='bool')
+    min_in = np.zeros(pos.shape[0], dtype='int64')
+    for i in xrange(pos.shape[0]):
+        dd.set_field_parameter("center", pos[i,:])
+        #radius = dd["particle_radius"]
+        #radius.sort()
+        r2 = (pos[:,0]*pos[:,0])*0
+        for j in range(3):
+            DR = (pos[i,j] - pos[:,j])
+            DRo = DR.copy()
+            DR[DRo >  ds.domain_width[j]/2.0] -= ds.domain_width[j]
+            DR[DRo < -ds.domain_width[j]/2.0] += ds.domain_width[j]
+            r2 += DR*DR
+        radius = np.sqrt(r2)
+        iii = np.argsort(radius)
+        radius.sort()
+        assert(radius[0] == 0.0)
+        all_neighbors[i] = radius[63]
+        any_eq[i] = np.any( np.abs(radius - nearest_neighbors[i]) < 1e-7 )
+        min_in[i] = np.argmin(np.abs(radius - nearest_neighbors[i]))
+        #if i == 34: raise RuntimeError
+        #dd.field_data.pop(("all", "particle_radius"))
+    yield assert_equal, (min_in == 63).sum(), min_in.size
+    yield assert_array_almost_equal, nearest_neighbors, all_neighbors

diff -r feba284df7fea6ac46d0329b9ed2a0bfde6487a2 -r 76da17d4fec15eb2115904c4670deb2c4dbfbce6 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -193,6 +193,40 @@
         data.append(gdata)
     return load_amr_grids(data, [32, 32, 32], 1.0)
 
+def fake_particle_ds(
+        fields = ("particle_position_x",
+                  "particle_position_y",
+                  "particle_position_z",
+                  "particle_mass", 
+                  "particle_velocity_x",
+                  "particle_velocity_y",
+                  "particle_velocity_z"),
+        units = ('cm', 'cm', 'cm', 'g', 'cm/s', 'cm/s', 'cm/s'),
+        negative = (False, False, False, False, True, True, True),
+        npart = 16**3, length_unit=1.0):
+    from yt.frontends.stream.api import load_particles
+    if not iterable(negative):
+        negative = [negative for f in fields]
+    assert(len(fields) == len(negative))
+    offsets = []
+    for n in negative:
+        if n:
+            offsets.append(0.5)
+        else:
+            offsets.append(0.0)
+    data = {}
+    for field, offset, u in zip(fields, offsets, units):
+        if "position" in field:
+            v = np.random.normal(npart, 0.5, 0.25)
+            np.clip(v, 0.0, 1.0, v)
+        v = (np.random.random(npart) - offset)
+        data[field] = (v, u)
+    bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]])
+    ds = load_particles(data, 1.0, bbox=bbox)
+    return ds
+
+
+
 def expand_keywords(keywords, full=False):
     """
     expand_keywords is a means for testing all possible keyword

diff -r feba284df7fea6ac46d0329b9ed2a0bfde6487a2 -r 76da17d4fec15eb2115904c4670deb2c4dbfbce6 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -576,7 +576,8 @@
             if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
                 continue
             # Now we have our primary oct, so we will get its neighbors.
-            neighbors = octree.neighbors(&oi, &nneighbors, oct)
+            neighbors = octree.neighbors(&oi, &nneighbors, oct,
+                                self.periodicity)
             # Now we have all our neighbors.  And, we should be set for what
             # else we need to do.
             if nneighbors > nsize:


https://bitbucket.org/yt_analysis/yt/commits/a11304472c94/
Changeset:   a11304472c94
Branch:      yt
User:        MatthewTurk
Date:        2014-11-10 23:40:03+00:00
Summary:     Adding in not-implemented particle_operation
Affected #:  1 file

diff -r 76da17d4fec15eb2115904c4670deb2c4dbfbce6 -r a11304472c9417432d83266ef98b38c12dac2298 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -338,6 +338,9 @@
     def smooth(self, *args, **kwargs):
         raise NotImplementedError
 
+    def particle_operation(self, *args, **kwargs):
+        raise NotImplementedError
+
     def deposit(self, positions, fields = None, method = None):
         # Here we perform our particle deposition.
         cls = getattr(particle_deposit, "deposit_%s" % method, None)


https://bitbucket.org/yt_analysis/yt/commits/a4af5c5f68c4/
Changeset:   a4af5c5f68c4
Branch:      yt
User:        MatthewTurk
Date:        2014-11-14 19:32:04+00:00
Summary:     Merging
Affected #:  2 files

diff -r a11304472c9417432d83266ef98b38c12dac2298 -r a4af5c5f68c4daacd7fd48140bee6876918c4b20 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -630,12 +630,30 @@
         # read the file line by line, storing important parameters
         for lineI, line in enumerate(lines):
             try:
-                param, sep, vals = [v.rstrip() for v in line.partition(' ')]
-                #param, sep, vals = map(rstrip,line.partition(' '))
+                param, sep, vals = line.partition('=')
+                if not sep:
+                    # No = sign present, so split by space instead
+                    param, sep, vals = line.partition(' ')
+                param = param.strip()
+                vals = vals.strip()
+                if not param:  # skip blank lines
+                    continue
+                if param[0] == '#':  # skip comment lines
+                    continue
+                if param[0] == '[':  # skip stanza headers
+                    continue
+                vals = vals.partition("#")[0] # strip trailing comments
+                try:
+                    self.parameters[param] = np.int64(vals)
+                except ValueError:
+                    try:
+                        self.parameters[param] = np.float64(vals)
+                    except ValueError:
+                        self.parameters[param] = vals
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
             if param == "GAMMA":
-                self.gamma = vals
+                self.gamma = np.float64(vals)
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r a11304472c9417432d83266ef98b38c12dac2298 -r a4af5c5f68c4daacd7fd48140bee6876918c4b20 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -24,10 +24,9 @@
 from yt.frontends.boxlib.fields import \
     rho_units, \
     mom_units, \
-    eden_units, \
-    _thermal_energy_density, \
-    _thermal_energy, \
-    _temperature
+    eden_units
+
+from yt.utilities.exceptions import YTFieldNotFound
 
 rho_units = "code_mass / code_length**3"
 mom_units = "code_mass / (code_time * code_length**2)"
@@ -82,20 +81,70 @@
     )
 
     def setup_fluid_fields(self):
+        def _thermal_energy_density(field, data):
+            try:
+                return data['energy-density'] - data['kinetic_energy_density'] - \
+                    data["magnetic_energy_density"]
+            except YTFieldNotFound:
+                return data['energy-density'] - data['kinetic_energy_density']
+
+        def _thermal_energy(field, data):
+            return data['thermal_energy_density']/data['density']
+
+        def _magnetic_energy_density(field, data):
+            ret = data["X-magnfield"]**2
+            if data.ds.dimensionality > 1:
+                ret = ret + data["Y-magnfield"]**2
+            if data.ds.dimensionality > 2:
+                ret = ret + data["Z-magnfield"]**2
+            return ret/2.0
+
+        def _magnetic_energy(field, data):
+            return data['magnetic_energy_density']/data['density']
+
+        def _kinetic_energy_density(field, data):
+            p2 = data['X-momentum']**2
+            if data.ds.dimensionality > 1:
+                p2 = p2 + data["Y-momentum"]**2
+            if data.ds.dimensionality > 2:
+                p2 = p2 + data["Z-momentum"]**2
+            return 0.5 * p2/data['density']
+
+        def _kinetic_energy(field, data):
+            return data['kinetic_energy_density']/data['density']
+
+        def _temperature(field, data):
+            c_v = data.ds.quan(data.ds.parameters['radiation.const_cv'], 
+                               'erg/g/K')
+            return (data["thermal_energy"]/c_v)
+
         def _get_vel(axis):
             def velocity(field, data):
                 return data["momentum_%s" % ax]/data["density"]
             return velocity
+
         for ax in 'xyz':
-            self.add_field("velocity_%s" % ax, function = _get_vel(ax),
+            self.add_field(("gas", "velocity_%s" % ax), function = _get_vel(ax),
                            units = "cm/s")
-        self.add_field("thermal_energy",
+        self.add_field(("gas", "thermal_energy"),
                        function = _thermal_energy,
                        units = "erg/g")
-        self.add_field("thermal_energy_density",
+        self.add_field(("gas", "thermal_energy_density"),
                        function = _thermal_energy_density,
                        units = "erg/cm**3")
-        self.add_field("temperature", function=_temperature,
+        self.add_field(("gas", "kinetic_energy"),
+                       function = _kinetic_energy,
+                       units = "erg/g")
+        self.add_field(("gas", "kinetic_energy_density"),
+                       function = _kinetic_energy_density,
+                       units = "erg/cm**3")
+        self.add_field(("gas", "magnetic_energy"),
+                       function = _magnetic_energy,
+                       units = "erg/g")
+        self.add_field(("gas", "magnetic_energy_density"),
+                       function = _magnetic_energy_density,
+                       units = "erg/cm**3")
+        self.add_field(("gas", "temperature"), function=_temperature,
                        units="K")

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list