[yt-svn] commit/yt: 7 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Aug 24 11:16:01 PDT 2016


7 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/117919879ade/
Changeset:   117919879ade
Branch:      yt
User:        MatthewTurk
Date:        2016-07-08 21:31:50+00:00
Summary:     Starting to split distance_queue into its own module
Affected #:  5 files

diff -r b264176ae1527a32714dbd782c7f5a3e79dc993a -r 117919879ade1f85feb9f7feecbb66b74d062fb8 setup.py
--- a/setup.py
+++ b/setup.py
@@ -186,7 +186,7 @@
     "particle_mesh_operations", "depth_first_octree", "fortran_reader",
     "interpolators", "misc_utilities", "basic_octree", "image_utilities",
     "points_in_volume", "quad_tree", "ray_integrators", "mesh_utilities",
-    "amr_kdtools"
+    "amr_kdtools", "distance_queue"
 ]
 for ext_name in lib_exts:
     cython_extensions.append(

diff -r b264176ae1527a32714dbd782c7f5a3e79dc993a -r 117919879ade1f85feb9f7feecbb66b74d062fb8 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -23,15 +23,12 @@
 from yt.utilities.lib.fp_utils cimport *
 from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
 from .particle_deposit cimport kernel_func, get_kernel_func, gind
+from yt.utilities.lib.distance_queue cimport NeighborList, Neighbor_compare, \
+    r2dist
 
 cdef extern from "platform_dep.h":
     void *alloca(int)
 
-cdef struct NeighborList
-cdef struct NeighborList:
-    np.int64_t pn       # Particle number
-    np.float64_t r2     # radius**2
-
 cdef class ParticleSmoothOperation:
     # We assume each will allocate and define their own temporary storage
     cdef kernel_func sph_kernel

diff -r b264176ae1527a32714dbd782c7f5a3e79dc993a -r 117919879ade1f85feb9f7feecbb66b74d062fb8 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -24,44 +24,6 @@
 from oct_container cimport Oct, OctAllocationContainer, \
     OctreeContainer, OctInfo
 
-cdef int Neighbor_compare(void *on1, void *on2) nogil:
-    cdef NeighborList *n1
-    cdef NeighborList *n2
-    n1 = <NeighborList *> on1
-    n2 = <NeighborList *> on2
-    # Note that we set this up so that "greatest" evaluates to the *end* of the
-    # list, so we can do standard radius comparisons.
-    if n1.r2 < n2.r2:
-        return -1
-    elif n1.r2 == n2.r2:
-        return 0
-    else:
-        return 1
-
- at cython.cdivision(True)
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.initializedcheck(False)
-cdef np.float64_t r2dist(np.float64_t ppos[3],
-                         np.float64_t cpos[3],
-                         np.float64_t DW[3],
-                         bint periodicity[3],
-                         np.float64_t max_dist2):
-    cdef int i
-    cdef np.float64_t r2, DR
-    r2 = 0.0
-    for i in range(3):
-        DR = (ppos[i] - cpos[i])
-        if not periodicity[i]:
-            pass
-        elif (DR > DW[i]/2.0):
-            DR -= DW[i]
-        elif (DR < -DW[i]/2.0):
-            DR += DW[i]
-        r2 += DR * DR
-        if max_dist2 >= 0.0 and r2 > max_dist2:
-            return -1.0
-    return r2
 
 cdef void spherical_coord_setup(np.float64_t ipos[3], np.float64_t opos[3]):
     opos[0] = ipos[0] * sin(ipos[1]) * cos(ipos[2])

diff -r b264176ae1527a32714dbd782c7f5a3e79dc993a -r 117919879ade1f85feb9f7feecbb66b74d062fb8 yt/utilities/lib/distance_queue.pxd
--- /dev/null
+++ b/yt/utilities/lib/distance_queue.pxd
@@ -0,0 +1,30 @@
+"""
+A queue for evaluating distances to discrete points
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+cimport cython
+cimport numpy as np
+import numpy as np
+
+cdef struct NeighborList:
+    np.int64_t pn       # Particle number
+    np.float64_t r2     # radius**2
+
+cdef int Neighbor_compare(void *on1, void *on2) nogil
+cdef np.float64_t r2dist(np.float64_t ppos[3],
+                         np.float64_t cpos[3],
+                         np.float64_t DW[3],
+                         bint periodicity[3],
+                         np.float64_t max_dist2)

diff -r b264176ae1527a32714dbd782c7f5a3e79dc993a -r 117919879ade1f85feb9f7feecbb66b74d062fb8 yt/utilities/lib/distance_queue.pyx
--- /dev/null
+++ b/yt/utilities/lib/distance_queue.pyx
@@ -0,0 +1,57 @@
+"""
+Distance queue implementation
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+cimport numpy as np
+import numpy as np
+cimport cython
+
+cdef int Neighbor_compare(void *on1, void *on2) nogil:
+    cdef NeighborList *n1
+    cdef NeighborList *n2
+    n1 = <NeighborList *> on1
+    n2 = <NeighborList *> on2
+    # Note that we set this up so that "greatest" evaluates to the *end* of the
+    # list, so we can do standard radius comparisons.
+    if n1.r2 < n2.r2:
+        return -1
+    elif n1.r2 == n2.r2:
+        return 0
+    else:
+        return 1
+
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.initializedcheck(False)
+cdef np.float64_t r2dist(np.float64_t ppos[3],
+                         np.float64_t cpos[3],
+                         np.float64_t DW[3],
+                         bint periodicity[3],
+                         np.float64_t max_dist2):
+    cdef int i
+    cdef np.float64_t r2, DR
+    r2 = 0.0
+    for i in range(3):
+        DR = (ppos[i] - cpos[i])
+        if not periodicity[i]:
+            pass
+        elif (DR > DW[i]/2.0):
+            DR -= DW[i]
+        elif (DR < -DW[i]/2.0):
+            DR += DW[i]
+        r2 += DR * DR
+        if max_dist2 >= 0.0 and r2 > max_dist2:
+            return -1.0
+    return r2


https://bitbucket.org/yt_analysis/yt/commits/59e3d0bacc72/
Changeset:   59e3d0bacc72
Branch:      yt
User:        MatthewTurk
Date:        2016-07-08 21:47:32+00:00
Summary:     Moving functionality to distance_queue
Affected #:  2 files

diff -r 117919879ade1f85feb9f7feecbb66b74d062fb8 -r 59e3d0bacc72a01f4b7848a6cc22de8910f29a82 yt/utilities/lib/distance_queue.pxd
--- a/yt/utilities/lib/distance_queue.pxd
+++ b/yt/utilities/lib/distance_queue.pxd
@@ -17,6 +17,8 @@
 cimport cython
 cimport numpy as np
 import numpy as np
+from libc.stdlib cimport malloc, free
+from libc.string cimport memmove
 
 cdef struct NeighborList:
     np.int64_t pn       # Particle number
@@ -28,3 +30,14 @@
                          np.float64_t DW[3],
                          bint periodicity[3],
                          np.float64_t max_dist2)
+
+cdef class DistanceQueue:
+    cdef int maxn
+    cdef int curn
+    cdef np.float64_t DW[3]
+    cdef bint periodicity[3]
+    cdef NeighborList* neighbors # flat array
+    cdef void setup(self, np.float64_t DW[3], bint periodicity[3])
+    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
+                            np.float64_t cpos[3])
+    cdef void neighbor_reset(self)

diff -r 117919879ade1f85feb9f7feecbb66b74d062fb8 -r 59e3d0bacc72a01f4b7848a6cc22de8910f29a82 yt/utilities/lib/distance_queue.pyx
--- a/yt/utilities/lib/distance_queue.pyx
+++ b/yt/utilities/lib/distance_queue.pyx
@@ -55,3 +55,69 @@
         if max_dist2 >= 0.0 and r2 > max_dist2:
             return -1.0
     return r2
+
+cdef class DistanceQueue:
+    def __cinit__(self, int maxn):
+        cdef int i
+        self.maxn = maxn
+        self.curn = 0
+        self.neighbors = <NeighborList *> malloc(
+            sizeof(NeighborList) * self.maxn)
+        self.neighbor_reset()
+        for i in range(3):
+            self.DW[i] = 0
+            self.periodicit[i] = False
+
+    cdef void setup(self, np.float64_t DW[3], bint periodicity[3]):
+        for i in range(3):
+            self.DW[i] = DW[i]
+            self.periodicity[i] = periodicity[i]
+
+    def __dealloc__(self):
+        free(self.neighbors)
+
+    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
+                            np.float64_t cpos[3]):
+        # Here's a python+numpy simulator of this:
+        # http://paste.yt-project.org/show/5445/
+        cdef int i, di
+        cdef np.float64_t r2, r2_trunc
+        if self.curn == self.maxn:
+            # Truncate calculation if it's bigger than this in any dimension
+            r2_trunc = self.neighbors[self.curn - 1].r2
+        else:
+            # Don't truncate our calculation
+            r2_trunc = -1
+        r2 = r2dist(ppos, cpos, self.DW, self.periodicity, r2_trunc)
+        if r2 == -1:
+            return
+        if self.curn == 0:
+            self.neighbors[0].r2 = r2
+            self.neighbors[0].pn = pn
+            self.curn += 1
+            return
+        # Now insert in a sorted way
+        di = -1
+        for i in range(self.curn - 1, -1, -1):
+            # We are checking if i is less than us, to see if we should insert
+            # to the right (i.e., i+1).
+            if self.neighbors[i].r2 < r2:
+                di = i
+                break
+        # The outermost one is already too small.
+        if di == self.maxn - 1:
+            return
+        if (self.maxn - (di + 2)) > 0:
+            memmove(<void *> (self.neighbors + di + 2),
+                    <void *> (self.neighbors + di + 1),
+                    sizeof(NeighborList) * (self.maxn - (di + 2)))
+        self.neighbors[di + 1].r2 = r2
+        self.neighbors[di + 1].pn = pn
+        if self.curn < self.maxn:
+            self.curn += 1
+
+    cdef void neighbor_reset(self):
+        for i in range(self.maxn):
+            self.neighbors[i].r2 = 1e300
+            self.neighbors[i].pn = -1
+        self.curn = 0


https://bitbucket.org/yt_analysis/yt/commits/2cad0bd1cfcd/
Changeset:   2cad0bd1cfcd
Branch:      yt
User:        MatthewTurk
Date:        2016-07-08 21:55:53+00:00
Summary:     Finish adding functionality to distance queue
Affected #:  2 files

diff -r 59e3d0bacc72a01f4b7848a6cc22de8910f29a82 -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 yt/utilities/lib/distance_queue.pxd
--- a/yt/utilities/lib/distance_queue.pxd
+++ b/yt/utilities/lib/distance_queue.pxd
@@ -37,7 +37,7 @@
     cdef np.float64_t DW[3]
     cdef bint periodicity[3]
     cdef NeighborList* neighbors # flat array
-    cdef void setup(self, np.float64_t DW[3], bint periodicity[3])
+    cdef void _setup(self, np.float64_t DW[3], bint periodicity[3])
     cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
                             np.float64_t cpos[3])
     cdef void neighbor_reset(self)

diff -r 59e3d0bacc72a01f4b7848a6cc22de8910f29a82 -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 yt/utilities/lib/distance_queue.pyx
--- a/yt/utilities/lib/distance_queue.pyx
+++ b/yt/utilities/lib/distance_queue.pyx
@@ -66,9 +66,14 @@
         self.neighbor_reset()
         for i in range(3):
             self.DW[i] = 0
-            self.periodicit[i] = False
+            self.periodicity[i] = False
 
-    cdef void setup(self, np.float64_t DW[3], bint periodicity[3]):
+    cdef void _setup(self, np.float64_t DW[3], bint periodicity[3]):
+        for i in range(3):
+            self.DW[i] = DW[i]
+            self.periodicity[i] = periodicity[i]
+
+    def setup(self, np.float64_t[:] DW, periodicity):
         for i in range(3):
             self.DW[i] = DW[i]
             self.periodicity[i] = periodicity[i]
@@ -121,3 +126,18 @@
             self.neighbors[i].r2 = 1e300
             self.neighbors[i].pn = -1
         self.curn = 0
+
+    def find_nearest(self, np.float64_t[:] center, np.float64_t[:,:] points):
+        cdef int i, j
+        cdef np.float64_t ppos[3], cpos[3]
+        self.neighbor_reset()
+        for i in range(3):
+            cpos[i] = center[i]
+        for j in range(points.shape[0]):
+            for i in range(3):
+                ppos[i] = points[j,i]
+            self.neighbor_eval(j, ppos, cpos)
+        rv = np.empty(self.curn, dtype="int64")
+        for i in range(self.curn):
+            rv[i] = self.neighbors[i].pn
+        return rv


https://bitbucket.org/yt_analysis/yt/commits/c8140fbbef25/
Changeset:   c8140fbbef25
Branch:      yt
User:        MatthewTurk
Date:        2016-08-08 20:12:00+00:00
Summary:     Merging with upstream
Affected #:  186 files

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -37,9 +37,11 @@
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c
 yt/utilities/lib/geometry_utils.c
+yt/utilities/lib/image_samplers.c
 yt/utilities/lib/image_utilities.c
 yt/utilities/lib/interpolators.c
 yt/utilities/lib/kdtree.c
+yt/utilities/lib/lenses.c
 yt/utilities/lib/line_integral_convolution.c
 yt/utilities/lib/mesh_construction.cpp
 yt/utilities/lib/mesh_intersection.cpp
@@ -49,6 +51,7 @@
 yt/utilities/lib/mesh_utilities.c
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/particle_mesh_operations.c
+yt/utilities/lib/partitioned_grid.c
 yt/utilities/lib/primitives.c
 yt/utilities/lib/origami.c
 yt/utilities/lib/particle_mesh_operations.c
@@ -62,6 +65,11 @@
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
 yt/utilities/lib/write_array.c
+yt/utilities/lib/perftools_wrap.c
+yt/utilities/lib/partitioned_grid.c
+yt/utilities/lib/volume_container.c
+yt/utilities/lib/lenses.c
+yt/utilities/lib/image_samplers.c
 syntax: glob
 *.pyc
 *.pyd

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5160,4 +5160,38 @@
 954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
 f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3
 079e456c38a87676472a458210077e2be325dc85 last_gplv3
+ca6e536c15a60070e6988fd472dc771a1897e170 yt-2.0
+882c41eed5dd4a3cdcbb567bcb79b833e46b1f42 yt-2.0.1
+a2b3521b1590c25029ca0bc602ad6cb7ae7b8ba2 yt-2.1
+41bd8aacfbc81fa66d7a3f2cd2880f10c3e237a4 yt-2.2
+3836676ee6307f9caf5ccdb0f0dd373676a68535 yt-2.3
+076cec2c57d2e4b508babbfd661f5daa1e34ec80 yt-2.4
+bd285a9a8a643ebb7b47b543e9343da84cd294c5 yt-2.5
+34a5e6774ceb26896c9d767563951d185a720774 yt-2.5.1
+2197c101413723de13e1d0dea153b182342ff719 yt-2.5.2
+59aa6445b5f4a26ecb2449f913c7f2b5fee04bee yt-2.5.3
+4da03e5f00b68c3a52107ff75ce48b09360b30c2 yt-2.5.4
+21c0314cee16242b6685e42a74d16f7a993c9a88 yt-2.5.5
+053487f48672b8fd5c43af992e92bc2f2499f31f yt-2.6
+d43ff9d8e20f2d2b8f31f4189141d2521deb341b yt-2.6.1
+f1e22ef9f3a225f818c43262e6ce9644e05ffa21 yt-2.6.2
+816186f16396a16853810ac9ebcde5057d8d5b1a yt-2.6.3
 f327552a6ede406b82711fb800ebcd5fe692d1cb yt-3.0a4
+73a9f749157260c8949f05c07715305aafa06408 yt-3.0.0
+0cf350f11a551f5a5b4039a70e9ff6d98342d1da yt-3.0.1
+511887af4c995a78fe606e58ce8162c88380ecdc yt-3.0.2
+fd7cdc4836188a3badf81adb477bcc1b9632e485 yt-3.1.0
+28733726b2a751e774c8b7ae46121aa57fd1060f yt-3.2
+425ff6dc64a8eb92354d7e6091653a397c068167 yt-3.2.1
+425ff6dc64a8eb92354d7e6091653a397c068167 yt-3.2.1
+0000000000000000000000000000000000000000 yt-3.2.1
+0000000000000000000000000000000000000000 yt-3.2.1
+f7ca21c7b3fdf25d2ccab139849ae457597cfd5c yt-3.2.1
+a7896583c06585be66de8404d76ad5bc3d2caa9a yt-3.2.2
+80aff0c49f40e04f00d7b39149c7fc297b8ed311 yt-3.2.3
+80aff0c49f40e04f00d7b39149c7fc297b8ed311 yt-3.2.3
+0000000000000000000000000000000000000000 yt-3.2.3
+0000000000000000000000000000000000000000 yt-3.2.3
+83d2c1e9313e7d83eb5b96888451ff2646fd8ff3 yt-3.2.3
+7edbfde96c3d55b227194394f46c0b2e6ed2b961 yt-3.3.0
+9bc3d0e9b750c923d44d73c447df64fc431f5838 yt-3.3.1

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -301,7 +301,15 @@
 This downloads that new forked repository to your local machine, so that you
 can access it, read it, make modifications, etc.  It will put the repository in
 a local directory of the same name as the repository in the current working
-directory.  You can see any past state of the code by using the hg log command.
+directory. You should also run the following command, to make sure you are at
+the "yt" branch, and not other ones like "stable" (this will be important
+later when you want to submit your pull requests):
+
+.. code-block:: bash
+
+   $ hg update yt
+
+You can see any past state of the code by using the hg log command.
 For example, the following command would show you the last 5 changesets
 (modifications to the code) that were submitted to that repository.
 

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -21,6 +21,7 @@
                 Daniel Fenn (df11c at my.fsu.edu)
                 John Forces (jforbes at ucolick.org)
                 Adam Ginsburg (keflavich at gmail.com)
+                Austin Gilbert (augilbert4 at gmail.com)
                 Sam Geen (samgeen at gmail.com)
                 Nathan Goldbaum (goldbaum at ucolick.org)
                 William Gray (graywilliamj at gmail.com)

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt setupext.py CONTRIBUTING.rst
+include README* CREDITS COPYING.txt CITATION  setupext.py CONTRIBUTING.rst
 include yt/visualization/mapserver/html/map_index.html
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -49,8 +49,8 @@
                     # in Python 3 (except Mercurial, which requires Python 2).
 INST_HG=1           # Install Mercurial or not?  If hg is not already
                     # installed, yt cannot be installed from source.
-INST_UNSTRUCTURED=0 # Install dependencies needed for unstructured mesh 
-                    # rendering?
+INST_EMBREE=0       # Install dependencies needed for Embree-accelerated 
+                    # ray tracing
 
 # These options control whether low-level system libraries are installed
 # they are necessary for building yt's dependencies from source and are 
@@ -75,6 +75,7 @@
 INST_H5PY=1     # Install h5py?
 INST_ASTROPY=0  # Install astropy?
 INST_NOSE=1     # Install nose?
+INST_NETCDF4=0  # Install netcdf4 and its python bindings?
 
 # These options allow you to customize the builds of yt dependencies.
 # They are only used if INST_CONDA=0.
@@ -115,10 +116,31 @@
         echo
         echo "    $ source deactivate"
         echo
-        echo "or install yt into your current environment"
+        echo "or install yt into your current environment with:"
+        echo
+        echo "    $ conda install -c conda-forge yt"
+        echo
         exit 1
     fi
     DEST_SUFFIX="yt-conda"
+    if [ -n "${PYTHONPATH}" ]
+    then
+        echo "WARNING WARNING WARNING WARNING WARNING WARNING WARNING"
+        echo "*******************************************************"
+        echo
+        echo "The PYTHONPATH environment variable is set to:"
+        echo
+        echo "    $PYTHONPATH"
+        echo
+        echo "If dependencies of yt (numpy, scipy, matplotlib) are installed"
+        echo "to this path, this may cause issues. Exit the install script"
+        echo "with Ctrl-C and unset PYTHONPATH if you are unsure."
+        echo "Hit enter to continue."
+        echo
+        echo "WARNING WARNING WARNING WARNING WARNING WARNING WARNING"
+        echo "*******************************************************"
+        read -p "[hit enter]"
+    fi
 else
     if [ $INST_YT_SOURCE -eq 0 ]
     then
@@ -466,21 +488,19 @@
     ( $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
-# set paths needed for unstructured mesh rendering support
+# set paths needed for Embree
 
-if [ $INST_UNSTRUCTURED -ne 0 ]
+if [ $INST_EMBREE -ne 0 ]
 then
     if [ $INST_YT_SOURCE -eq 0 ]
     then
-        echo "yt must be compiled from source to install support for"
-        echo "unstructured mesh rendering. Please set INST_YT_SOURCE to 1"
-        echo "and re-run the install script."
+        echo "yt must be compiled from source to install Embree support."
+        echo "Please set INST_YT_SOURCE to 1 and re-run the install script."
         exit 1
     fi
     if [ $INST_CONDA -eq 0 ]
     then
-        echo "unstructured mesh rendering support has not yet been implemented"
-        echo "for INST_CONDA=0."
+        echo "Embree support has not yet been implemented for INST_CONDA=0."
         exit 1
     fi
     if [ `uname` = "Darwin" ]
@@ -492,8 +512,8 @@
         EMBREE="embree-2.8.0.x86_64.linux"
         EMBREE_URL="https://github.com/embree/embree/releases/download/v2.8.0/$EMBREE.tar.gz"
     else
-        echo "Unstructured mesh rendering is not supported on this platform."
-        echo "Set INST_UNSTRUCTURED=0 and re-run the install script."
+        echo "Embree is not supported on this platform."
+        echo "Set INST_EMBREE=0 and re-run the install script."
         exit 1
     fi
     PYEMBREE_URL="https://github.com/scopatz/pyembree/archive/master.zip"
@@ -510,6 +530,17 @@
     fi
 fi
 
+if [ $INST_NETCDF4 -ne 0 ]
+then
+    if [ $INST_CONDA -eq 0 ]
+    then
+        echo "This script can only install netcdf4 through conda."
+        echo "Please set INST_CONDA to 1"
+        echo "and re-run the install script"
+        exit 1
+    fi
+fi
+
 echo
 echo
 echo "========================================================================"
@@ -524,11 +555,11 @@
 echo
 
 printf "%-18s = %s so I " "INST_CONDA" "${INST_CONDA}"
-get_willwont ${INST_PY3}
+get_willwont ${INST_CONDA}
 echo "be installing a conda-based python environment"
 
 printf "%-18s = %s so I " "INST_YT_SOURCE" "${INST_YT_SOURCE}"
-get_willwont ${INST_PY3}
+get_willwont ${INST_YT_SOURCE}
 echo "be compiling yt from source"
 
 printf "%-18s = %s so I " "INST_PY3" "${INST_PY3}"
@@ -539,9 +570,9 @@
 get_willwont ${INST_HG}
 echo "be installing Mercurial"
 
-printf "%-18s = %s so I " "INST_UNSTRUCTURED" "${INST_UNSTRUCTURED}"
-get_willwont ${INST_UNSTRUCTURED}
-echo "be installing unstructured mesh rendering"
+printf "%-18s = %s so I " "INST_EMBREE" "${INST_EMBREE}"
+get_willwont ${INST_EMBREE}
+echo "be installing Embree"
 
 if [ $INST_CONDA -eq 0 ]
 then
@@ -744,6 +775,12 @@
     ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
+function test_install
+{
+    echo "Testing that yt can be imported"
+    ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import yt" 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
 ORIG_PWD=`pwd`
 
 if [ -z "${DEST_DIR}" ]
@@ -1238,6 +1275,8 @@
     ( cp ${YT_DIR}/doc/activate.csh ${DEST_DIR}/bin/activate.csh 2>&1 ) 1>> ${LOG_FILE}
     sed -i.bak -e "s,__YT_DIR__,${DEST_DIR}," ${DEST_DIR}/bin/activate.csh
 
+    test_install
+
     function print_afterword
     {
         echo
@@ -1385,7 +1424,7 @@
     fi
     YT_DEPS+=('sympy')
 
-    if [ $INST_UNSTRUCTURED -eq 1 ]
+    if [ $INST_NETCDF4 -eq 1 ]
     then
         YT_DEPS+=('netcdf4')   
     fi
@@ -1399,14 +1438,21 @@
         log_cmd conda install --yes ${YT_DEP}
     done
 
+    if [ $INST_PY3 -eq 1 ]
+    then
+        echo "Installing mercurial"
+        log_cmd conda create -y -n py27 python=2.7 mercurial
+        log_cmd ln -s ${DEST_DIR}/envs/py27/bin/hg ${DEST_DIR}/bin
+    fi
+
     log_cmd pip install python-hglib
 
     log_cmd hg clone https://bitbucket.org/yt_analysis/yt_conda ${DEST_DIR}/src/yt_conda
     
-    if [ $INST_UNSTRUCTURED -eq 1 ]
+    if [ $INST_EMBREE -eq 1 ]
     then
         
-        echo "Installing embree"
+        echo "Installing Embree"
         if [ ! -d ${DEST_DIR}/src ]
         then
             mkdir ${DEST_DIR}/src
@@ -1453,22 +1499,15 @@
         fi
     fi
 
-    if [ $INST_PY3 -eq 1 ]
-    then
-        echo "Installing mercurial"
-        log_cmd conda create -y -n py27 python=2.7 mercurial
-        log_cmd ln -s ${DEST_DIR}/envs/py27/bin/hg ${DEST_DIR}/bin
-    fi
-
     if [ $INST_YT_SOURCE -eq 0 ]
     then
         echo "Installing yt"
-        log_cmd conda install --yes yt
+        log_cmd conda install -c conda-forge --yes yt
     else
         echo "Building yt from source"
         YT_DIR="${DEST_DIR}/src/yt-hg"
         log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
-        if [ $INST_UNSTRUCTURED -eq 1 ]
+        if [ $INST_EMBREE -eq 1 ]
         then
             echo $DEST_DIR > ${YT_DIR}/embree.cfg
         fi
@@ -1478,10 +1517,12 @@
             ROCKSTAR_LIBRARY_PATH=${DEST_DIR}/lib
         fi
         pushd ${YT_DIR} &> /dev/null
-        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH python setup.py develop 2>&1) 1>> ${LOG_FILE}
+        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH python setup.py develop 2>&1) 1>> ${LOG_FILE} || do_exit
         popd &> /dev/null
     fi
 
+    test_install
+
     echo
     echo
     echo "========================================================================"

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -4,10 +4,9 @@
 =======================
 .. sectionauthor:: Geoffrey So <gso at physics.ucsd.edu>
 
-.. warning:: This is my first attempt at modifying the yt source code,
-   so the program may be bug ridden.  Please send yt-dev an email and
-   address to Geoffrey So if you discover something wrong with this
-   portion of the code.
+.. warning:: This functionality is currently broken and needs to
+   be updated to make use of the :ref:`halo_catalog` framework.
+   Anyone interested in doing so should contact the yt-dev list.
 
 Purpose
 -------

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -3,14 +3,16 @@
 Halo Analysis
 =============
 
-Using halo catalogs, understanding the different halo finding methods,
-and using the halo mass function.
+This section covers halo finding, performing extra analysis on halos,
+and the halo mass function calculator.  If you already have halo
+catalogs and simply want to load them into yt, see
+:ref:`halo-catalog-data`.
 
 .. toctree::
    :maxdepth: 2
 
+   halo_catalogs
+   halo_mass_function
    halo_transition
-   halo_catalogs
-   halo_finders
-   halo_mass_function
    halo_merger_tree
+   ellipsoid_analysis

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -1,28 +1,42 @@
 .. _halo_catalog:
 
-Halo Catalogs
-=============
+Halo Finding and Analysis
+=========================
 
-Creating Halo Catalogs
-----------------------
+In yt-3.x, halo finding and analysis are combined into a single
+framework called the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
+This framework is substantially different from the halo analysis
+machinery available in yt-2.x and is entirely backward incompatible.
+For a direct translation of various halo analysis tasks using yt-2.x
+to yt-3.x, see :ref:`halo-transition`.
 
-In yt 3.0, operations relating to the analysis of halos (halo finding,
-merger tree creation, and individual halo analysis) are all brought
-together into a single framework. This framework is substantially
-different from the halo analysis machinery available in yt-2.x and is
-entirely backward incompatible.
-For a direct translation of various halo analysis tasks using yt-2.x
-to yt-3.0 please see :ref:`halo-transition`.
+.. _halo_catalog_finding:
 
-A catalog of halos can be created from any initial dataset given to halo
-catalog through data_ds. These halos can be found using friends-of-friends,
-HOP, and Rockstar. The finder_method keyword dictates which halo finder to
-use. The available arguments are :ref:`fof`, :ref:`hop`, and :ref:`rockstar`.
-For more details on the relative differences between these halo finders see
-:ref:`halo_finding`.
+Halo Finding
+------------
 
-The class which holds all of the halo information is the
-:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
+If you already have a halo catalog, either produced by one of the methods
+below or in a format described in :ref:`halo-catalog-data`, and want to
+perform further analysis, skip to :ref:`halo_catalog_analysis`.
+
+Three halo finding methods exist within yt.  These are:
+
+* :ref:`fof_finding`: a basic friend-of-friends algorithm (e.g. `Efstathiou et al. (1985)
+  <http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_)
+* :ref:`hop_finding`: `Eisenstein and Hut (1998)
+  <http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_.
+* :ref:`rockstar_finding`: a 6D phase-space halo finder developed by Peter Behroozi that
+  scales well and does substructure finding (`Behroozi et al.
+  2011 <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_)
+
+Halo finding is performed through the creation of a
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+object.  The dataset on which halo finding is to be performed should
+be loaded and given to the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+along with the ``finder_method`` keyword to specify the method to be
+used.
 
 .. code-block:: python
 
@@ -31,28 +45,195 @@
 
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
+   hc.create()
 
-A halo catalog may also be created from already run rockstar outputs.
-This method is not implemented for previously run friends-of-friends or
-HOP finders. Even though rockstar creates one file per processor,
-specifying any one file allows the full catalog to be loaded. Here we
-only specify the file output by the processor with ID 0. Note that the
-argument for supplying a rockstar output is `halos_ds`, not `data_ds`.
+The ``finder_method`` options should be given as "fof", "hop", or
+"rockstar".  Each of these methods has their own set of keyword
+arguments to control functionality.  These can specified in the form
+of a dictinoary using the ``finder_kwargs`` keyword.
 
 .. code-block:: python
 
-   halos_ds = yt.load(path+'rockstar_halos/halos_0.0.bin')
-   hc = HaloCatalog(halos_ds=halos_ds)
+   import yt
+   from yt.analysis_modules.halo_analysis.api import HaloCatalog
 
-Although supplying only the binary output of the rockstar halo finder
-is sufficient for creating a halo catalog, it is not possible to find
-any new information about the identified halos. To associate the halos
-with the dataset from which they were found, supply arguments to both
-halos_ds and data_ds.
+   data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='fof',
+                    finder_kwargs={"ptype": "stars",
+                                   "padding": 0.02})
+   hc.create()
+
+For a full list of keywords for each halo finder, see
+:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`,
+:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`,
+and
+:class:`~yt.analysis_modules.halo_finding.rockstar.rockstar.RockstarHaloFinder`.
+
+.. _fof_finding:
+
+FOF
+^^^
+
+This is a basic friends-of-friends algorithm.  See
+`Efstathiou et al. (1985)
+<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_ for more
+details as well as
+:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`.
+
+.. _hop_finding:
+
+HOP
+^^^
+
+The version of HOP used in yt is an upgraded version of the
+`publicly available HOP code
+<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support
+for 64-bit floats and integers has been added, as well as
+parallel analysis through spatial decomposition. HOP builds
+groups in this fashion:
+
+#. Estimates the local density at each particle using a
+   smoothing kernel.
+
+#. Builds chains of linked particles by 'hopping' from one
+   particle to its densest neighbor. A particle which is
+   its own densest neighbor is the end of the chain.
+
+#. All chains that share the same densest particle are
+   grouped together.
+
+#. Groups are included, linked together, or discarded
+   depending on the user-supplied over density
+   threshold parameter. The default is 160.0.
+
+See the `HOP method paper
+<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for
+full details as well as
+:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`.
+
+.. _rockstar_finding:
+
+Rockstar
+^^^^^^^^
+
+Rockstar uses an adaptive hierarchical refinement of friends-of-friends
+groups in six phase-space dimensions and one time dimension, which
+allows for robust (grid-independent, shape-independent, and noise-
+resilient) tracking of substructure. The code is prepackaged with yt,
+but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead
+developer is Peter Behroozi, and the methods are described in
+`Behroozi et al. 2011 <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_.
+In order to run the Rockstar halo finder in yt, make sure you've
+:ref:`installed it so that it can integrate with yt <rockstar-installation>`.
+
+At the moment, Rockstar does not support multiple particle masses,
+instead using a fixed particle mass. This will not affect most dark matter
+simulations, but does make it less useful for finding halos from the stellar
+mass. In simulations where the highest-resolution particles all have the
+same mass (ie: zoom-in grid based simulations), one can set up a particle
+filter to select the lowest mass particles and perform the halo finding
+only on those.  See the this cookbook recipe for an example:
+:ref:`cookbook-rockstar-nested-grid`.
+
+To run the Rockstar Halo finding, you must launch python with MPI and
+parallelization enabled. While Rockstar itself does not require MPI to run,
+the MPI libraries allow yt to distribute particle information across multiple
+nodes.
+
+.. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
+   connected by an Infiniband network can be problematic. Therefore, for now
+   we recommend forcing the use of the non-Infiniband network (e.g. Ethernet)
+   using this flag: ``--mca btl ^openib``.
+   For example, here is how Rockstar might be called using 24 cores:
+   ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
+
+The script above configures the Halo finder, launches a server process which
+disseminates run information and coordinates writer-reader processes.
+Afterwards, it launches reader and writer tasks, filling the available MPI
+slots, which alternately read particle information and analyze for halo
+content.
+
+The RockstarHaloFinder class has these options that can be supplied to the
+halo catalog through the ``finder_kwargs`` argument:
+
+* ``dm_type``, the index of the dark matter particle. Default is 1.
+* ``outbase``, This is where the out*list files that Rockstar makes should be
+  placed. Default is 'rockstar_halos'.
+* ``num_readers``, the number of reader tasks (which are idle most of the
+  time.) Default is 1.
+* ``num_writers``, the number of writer tasks (which are fed particles and
+  do most of the analysis). Default is MPI_TASKS-num_readers-1.
+  If left undefined, the above options are automatically
+  configured from the number of available MPI tasks.
+* ``force_res``, the resolution that Rockstar uses for various calculations
+  and smoothing lengths. This is in units of Mpc/h.
+  If no value is provided, this parameter is automatically set to
+  the width of the smallest grid element in the simulation from the
+  last data snapshot (i.e. the one where time has evolved the
+  longest) in the time series:
+  ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
+* ``total_particles``, if supplied, this is a pre-calculated
+  total number of dark matter
+  particles present in the simulation. For example, this is useful
+  when analyzing a series of snapshots where the number of dark
+  matter particles should not change and this will save some disk
+  access time. If left unspecified, it will
+  be calculated automatically. Default: ``None``.
+* ``dm_only``, if set to ``True``, it will be assumed that there are
+  only dark matter particles present in the simulation.
+  This option does not modify the halos found by Rockstar, however
+  this option can save disk access time if there are no star particles
+  (or other non-dark matter particles) in the simulation. Default: ``False``.
+
+Rockstar dumps halo information in a series of text (halo*list and
+out*list) and binary (halo*bin) files inside the ``outbase`` directory.
+We use the halo list classes to recover the information.
+
+Inside the ``outbase`` directory there is a text file named ``datasets.txt``
+that records the connection between ds names and the Rockstar file names.
+
+.. _rockstar-installation:
+
+Installing Rockstar
+"""""""""""""""""""
+
+Because of changes in the Rockstar API over time, yt only currently works with
+a slightly older version of Rockstar.  This version of Rockstar has been
+slightly patched and modified to run as a library inside of yt. By default it
+is not installed with yt, but installation is very easy.  The
+:ref:`install-script` used to install yt from source has a line:
+``INST_ROCKSTAR=0`` that must be changed to ``INST_ROCKSTAR=1``.  You can
+rerun this installer script over the top of an existing installation, and
+it will only install components missing from the existing installation.
+You can do this as follows.  Put your freshly modified install_script in
+the parent directory of the yt installation directory (e.g. the parent of
+``$YT_DEST``, ``yt-x86_64``, ``yt-i386``, etc.), and rerun the installer:
+
+.. code-block:: bash
+
+    cd $YT_DEST
+    cd ..
+    vi install_script.sh  // or your favorite editor to change INST_ROCKSTAR=1
+    bash < install_script.sh
+
+This will download Rockstar and install it as a library in yt.
+
+.. _halo_catalog_analysis:
+
+Extra Halo Analysis
+-------------------
+
+As a reminder, all halo catalogs created by the methods outlined in
+:ref:`halo_catalog_finding` as well as those in the formats discussed in
+:ref:`halo-catalog-data` can be loaded in to yt as first-class datasets.
+Once a halo catalog has been created, further analysis can be performed
+by providing both the halo catalog and the original simulation dataset to
+the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
 
 .. code-block:: python
 
-   halos_ds = yt.load(path+'rockstar_halos/halos_0.0.bin')
+   halos_ds = yt.load('rockstar_halos/halos_0.0.bin')
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
 
@@ -60,24 +241,28 @@
 associated with either dataset, to control the spatial region in
 which halo analysis will be performed.
 
-Analysis Using Halo Catalogs
-----------------------------
-
-Analysis is done by adding actions to the
+The :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+allows the user to create a pipeline of analysis actions that will be
+performed on all halos in the existing catalog.  The analysis can be
+performed in parallel with separate processors or groups of processors
+being allocated to perform the entire pipeline on individual halos.
+The pipeline is setup by adding actions to the
 :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
 Each action is represented by a callback function that will be run on
 each halo.  There are four types of actions:
 
-* Filters
-* Quantities
-* Callbacks
-* Recipes
+* :ref:`halo_catalog_filters`
+* :ref:`halo_catalog_quantities`
+* :ref:`halo_catalog_callbacks`
+* :ref:`halo_catalog_recipes`
 
 A list of all available filters, quantities, and callbacks can be found in
 :ref:`halo_analysis_ref`.
 All interaction with this analysis can be performed by importing from
 halo_analysis.
 
+.. _halo_catalog_filters:
+
 Filters
 ^^^^^^^
 
@@ -118,6 +303,8 @@
    # ... Later on in your script
    hc.add_filter("my_filter")
 
+.. _halo_catalog_quantities:
+
 Quantities
 ^^^^^^^^^^
 
@@ -176,6 +363,8 @@
    # ... Anywhere after "my_quantity" has been called
    hc.add_callback("print_quantity")
 
+.. _halo_catalog_callbacks:
+
 Callbacks
 ^^^^^^^^^
 
@@ -214,6 +403,8 @@
    # ...  Later on in your script
    hc.add_callback("my_callback")
 
+.. _halo_catalog_recipes:
+
 Recipes
 ^^^^^^^
 
@@ -258,8 +449,8 @@
 object as the first argument, recipe functions should take a ``HaloCatalog``
 object as the first argument.
 
-Running Analysis
-----------------
+Running the Pipeline
+--------------------
 
 After all callbacks, quantities, and filters have been added, the
 analysis begins with a call to HaloCatalog.create.
@@ -290,7 +481,7 @@
 
 A :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
 saved to disk can be reloaded as a yt dataset with the
-standard call to load. Any side data, such as profiles, can be reloaded
+standard call to ``yt.load``. Any side data, such as profiles, can be reloaded
 with a ``load_profiles`` callback and a call to
 :func:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog.load`.
 
@@ -303,8 +494,8 @@
                    filename="virial_profiles")
    hc.load()
 
-Worked Example of Halo Catalog in Action
-----------------------------------------
+Halo Catalog in Action
+----------------------
 
 For a full example of how to use these methods together see
 :ref:`halo-analysis-example`.

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ /dev/null
@@ -1,231 +0,0 @@
-.. _halo_finding:
-
-Halo Finding
-============
-
-There are three methods of finding particle haloes in yt. The
-default method is called HOP, a method described
-in `Eisenstein and Hut (1998)
-<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_. A basic
-friends-of-friends (e.g. `Efstathiou et al. (1985)
-<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_) halo
-finder is also implemented. Finally Rockstar (`Behroozi et a.
-(2011) <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_) is
-a 6D-phase space halo finder developed by Peter Behroozi that
-excels in finding subhalos and substrcture, but does not allow
-multiple particle masses.
-
-.. _hop:
-
-HOP
----
-
-The version of HOP used in yt is an upgraded version of the
-`publicly available HOP code
-<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support
-for 64-bit floats and integers has been added, as well as
-parallel analysis through spatial decomposition. HOP builds
-groups in this fashion:
-
-#. Estimates the local density at each particle using a
-   smoothing kernel.
-
-#. Builds chains of linked particles by 'hopping' from one
-   particle to its densest neighbor. A particle which is
-   its own densest neighbor is the end of the chain.
-
-#. All chains that share the same densest particle are
-   grouped together.
-
-#. Groups are included, linked together, or discarded
-   depending on the user-supplied over density
-   threshold parameter. The default is 160.0.
-
-Please see the `HOP method paper 
-<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for 
-full details and the 
-:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`
-documentation.
-
-.. _fof:
-
-FOF
----
-
-A basic friends-of-friends halo finder is included.  See the
-:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`
-documentation.
-
-.. _rockstar:
-
-Rockstar Halo Finding
----------------------
-
-Rockstar uses an adaptive hierarchical refinement of friends-of-friends
-groups in six phase-space dimensions and one time dimension, which
-allows for robust (grid-independent, shape-independent, and noise-
-resilient) tracking of substructure. The code is prepackaged with yt,
-but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead
-developer is Peter Behroozi, and the methods are described in `Behroozi
-et al. 2011 <http://arxiv.org/abs/1110.4372>`_.
-In order to run the Rockstar halo finder in yt, make sure you've
-:ref:`installed it so that it can integrate with yt <rockstar-installation>`.
-
-At the moment, Rockstar does not support multiple particle masses,
-instead using a fixed particle mass. This will not affect most dark matter
-simulations, but does make it less useful for finding halos from the stellar
-mass. In simulations where the highest-resolution particles all have the
-same mass (ie: zoom-in grid based simulations), one can set up a particle
-filter to select the lowest mass particles and perform the halo finding
-only on those.  See the this cookbook recipe for an example:
-:ref:`cookbook-rockstar-nested-grid`.
-
-To run the Rockstar Halo finding, you must launch python with MPI and
-parallelization enabled. While Rockstar itself does not require MPI to run,
-the MPI libraries allow yt to distribute particle information across multiple
-nodes.
-
-.. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
-   connected by an Infiniband network can be problematic. Therefore, for now
-   we recommend forcing the use of the non-Infiniband network (e.g. Ethernet)
-   using this flag: ``--mca btl ^openib``.
-   For example, here is how Rockstar might be called using 24 cores:
-   ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
-
-The script above configures the Halo finder, launches a server process which
-disseminates run information and coordinates writer-reader processes.
-Afterwards, it launches reader and writer tasks, filling the available MPI
-slots, which alternately read particle information and analyze for halo
-content.
-
-The RockstarHaloFinder class has these options that can be supplied to the
-halo catalog through the ``finder_kwargs`` argument:
-
-* ``dm_type``, the index of the dark matter particle. Default is 1.
-* ``outbase``, This is where the out*list files that Rockstar makes should be
-  placed. Default is 'rockstar_halos'.
-* ``num_readers``, the number of reader tasks (which are idle most of the
-  time.) Default is 1.
-* ``num_writers``, the number of writer tasks (which are fed particles and
-  do most of the analysis). Default is MPI_TASKS-num_readers-1.
-  If left undefined, the above options are automatically
-  configured from the number of available MPI tasks.
-* ``force_res``, the resolution that Rockstar uses for various calculations
-  and smoothing lengths. This is in units of Mpc/h.
-  If no value is provided, this parameter is automatically set to
-  the width of the smallest grid element in the simulation from the
-  last data snapshot (i.e. the one where time has evolved the
-  longest) in the time series:
-  ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
-* ``total_particles``, if supplied, this is a pre-calculated
-  total number of dark matter
-  particles present in the simulation. For example, this is useful
-  when analyzing a series of snapshots where the number of dark
-  matter particles should not change and this will save some disk
-  access time. If left unspecified, it will
-  be calculated automatically. Default: ``None``.
-* ``dm_only``, if set to ``True``, it will be assumed that there are
-  only dark matter particles present in the simulation.
-  This option does not modify the halos found by Rockstar, however
-  this option can save disk access time if there are no star particles
-  (or other non-dark matter particles) in the simulation. Default: ``False``.
-
-Rockstar dumps halo information in a series of text (halo*list and
-out*list) and binary (halo*bin) files inside the ``outbase`` directory.
-We use the halo list classes to recover the information.
-
-Inside the ``outbase`` directory there is a text file named ``datasets.txt``
-that records the connection between ds names and the Rockstar file names.
-
-For more information, see the
-:class:`~yt.analysis_modules.halo_finding.halo_objects.RockstarHalo` and
-:class:`~yt.analysis_modules.halo_finding.halo_objects.Halo` classes.
-
-.. _parallel-hop-and-fof:
-
-Parallel HOP and FOF
---------------------
-
-Both the HOP and FoF halo finders can run in parallel using simple
-spatial decomposition. In order to run them in parallel it is helpful
-to understand how it works. Below in the first plot (i) is a simplified
-depiction of three haloes labeled 1,2 and 3:
-
-.. image:: _images/ParallelHaloFinder.png
-   :width: 500
-
-Halo 3 is twice reflected around the periodic boundary conditions.
-
-In (ii), the volume has been sub-divided into four equal subregions,
-A,B,C and D, shown with dotted lines. Notice that halo 2 is now in
-two different subregions, C and D, and that halo 3 is now in three,
-A, B and D. If the halo finder is run on these four separate subregions,
-halo 1 is be identified as a single halo, but haloes 2 and 3 are split
-up into multiple haloes, which is incorrect. The solution is to give
-each subregion padding to oversample into neighboring regions.
-
-In (iii), subregion C has oversampled into the other three regions,
-with the periodic boundary conditions taken into account, shown by
-dot-dashed lines. The other subregions oversample in a similar way.
-
-The halo finder is then run on each padded subregion independently
-and simultaneously. By oversampling like this, haloes 2 and 3 will
-both be enclosed fully in at least one subregion and identified
-completely.
-
-Haloes identified with centers of mass inside the padded part of a
-subregion are thrown out, eliminating the problem of halo duplication.
-The centers for the three haloes are shown with stars. Halo 1 will
-belong to subregion A, 2 to C and 3 to B.
-
-To run with parallel halo finding, you must supply a value for
-padding in the finder_kwargs argument. The ``padding`` parameter
-is in simulation units and defaults to 0.02. This parameter is how
-much padding is added to each of the six sides of a subregion.
-This value should be 2x-3x larger than the largest expected halo
-in the simulation. It is unlikely, of course, that the largest
-object in the simulation will be on a subregion boundary, but there
-is no way of knowing before the halo finder is run.
-
-.. code-block:: python
-
-  import yt
-  from yt.analysis_modules.halo_analysis.api import *
-  ds = yt.load("data0001")
-
-  hc = HaloCatalog(data_ds = ds, finder_method = 'hop', finder_kwargs={'padding':0.02})
-  # --or--
-  hc = HaloCatalog(data_ds = ds, finder_method = 'fof', finder_kwargs={'padding':0.02})
-
-In general, a little bit of padding goes a long way, and too much
-just slows down the analysis and doesn't improve the answer (but
-doesn't change it).  It may be worth your time to run the parallel
-halo finder at a few paddings to find the right amount, especially
-if you're analyzing many similar datasets.
-
-.. _rockstar-installation:
-
-Rockstar Installation
----------------------
-
-Because of changes in the Rockstar API over time, yt only currently works with
-a slightly older version of Rockstar.  This version of Rockstar has been
-slightly patched and modified to run as a library inside of yt. By default it
-is not installed with yt, but installation is very easy.  The
-:ref:`install-script` used to install yt from source has a line:
-``INST_ROCKSTAR=0`` that must be changed to ``INST_ROCKSTAR=1``.  You can
-rerun this installer script over the top of an existing installation, and
-it will only install components missing from the existing installation.
-You can do this as follows.  Put your freshly modified install_script in
-the parent directory of the yt installation directory (e.g. the parent of
-``$YT_DEST``, ``yt-x86_64``, ``yt-i386``, etc.), and rerun the installer:
-
-.. code-block:: bash
-
-    cd $YT_DEST
-    cd ..
-    vi install_script.sh  // or your favorite editor to change INST_ROCKSTAR=1
-    bash < install_script.sh
-
-This will download Rockstar and install it as a library in yt.  You should now
-be able to use Rockstar and yt together.

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/analyzing/analysis_modules/halo_transition.rst
--- a/doc/source/analyzing/analysis_modules/halo_transition.rst
+++ b/doc/source/analyzing/analysis_modules/halo_transition.rst
@@ -1,11 +1,12 @@
 .. _halo-transition:
 
-Getting up to Speed with Halo Analysis in yt-3.0
-================================================
+Transitioning From yt-2 to yt-3
+===============================
 
 If you're used to halo analysis in yt-2.x, heres a guide to
 how to update your analysis pipeline to take advantage of
-the new halo catalog infrastructure.
+the new halo catalog infrastructure.  If you're starting
+from scratch, see :ref:`halo_catalog`.
 
 Finding Halos
 -------------

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/analyzing/analysis_modules/index.rst
--- a/doc/source/analyzing/analysis_modules/index.rst
+++ b/doc/source/analyzing/analysis_modules/index.rst
@@ -19,4 +19,3 @@
    two_point_functions
    clump_finding
    particle_trajectories
-   ellipsoid_analysis

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -99,9 +99,9 @@
    To work out the following examples, you should install
    `AtomDB <http://www.atomdb.org>`_ and get the files from the
    `xray_data <http://yt-project.org/data/xray_data.tar.gz>`_ auxiliary
-   data package (see the ``xray_data`` `README <xray_data_README.html>`_
-   for details on the latter). Make sure that in what follows you
-   specify the full path to the locations of these files.
+   data package (see the :ref:`xray_data_README` for details on the latter). 
+   Make sure that in what follows you specify the full path to the locations 
+   of these files.
 
 To generate photons from this dataset, we have several different things
 we need to set up. The first is a standard yt data object. It could

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/analyzing/analysis_modules/radial_column_density.rst
--- a/doc/source/analyzing/analysis_modules/radial_column_density.rst
+++ /dev/null
@@ -1,93 +0,0 @@
-.. _radial-column-density:
-
-Radial Column Density
-=====================
-.. sectionauthor:: Stephen Skory <s at skory.us>
-.. versionadded:: 2.3
-
-.. note::
-
-    As of :code:`yt-3.0`, the radial column density analysis module is not
-    currently functional.  This functionality is still available in
-    :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`,
-    help is needed to port them over.  Contact the yt-users mailing list if you
-    are interested in doing this.
-
-This module allows the calculation of column densities around a point over a
-field such as ``NumberDensity`` or ``Density``.
-This uses :ref:`healpix_volume_rendering` to interpolate column densities
-on the grid cells.
-
-Details
--------
-
-This module allows the calculation of column densities around a single point.
-For example, this is useful for looking at the gas around a radiating source.
-Briefly summarized, the calculation is performed by first creating a number
-of HEALPix shells around the central point.
-Next, the value of the column density at cell centers is found by
-linearly interpolating the values on the inner and outer shell.
-This is added as derived field, which can be used like any other derived field.
-
-Basic Example
--------------
-
-In this simple example below, the radial column density for the field
-``NumberDensity`` is calculated and added as a derived field named
-``RCDNumberDensity``.
-The calculations will use the starting point of (x, y, z) = (0.5, 0.5, 0.5) and
-go out to a maximum radius of 0.5 in code units.
-Due to the way normalization is handled in HEALPix, the column density
-calculation can extend out only as far as the nearest face of the volume.
-For example, with a center point of (0.2, 0.3, 0.4), the column density
-is calculated out to only a radius of 0.2.
-The column density will be output as zero (0.0) outside the maximum radius.
-Just like a real number column density, when the derived is added using
-``add_field``, we give the units as :math:`1/\rm{cm}^2`.
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.radial_column_density.api import *
-  ds = load("data0030")
-
-  rcdnumdens = RadialColumnDensity(ds, 'NumberDensity', [0.5, 0.5, 0.5],
-    max_radius = 0.5)
-  def _RCDNumberDensity(field, data, rcd = rcdnumdens):
-      return rcd._build_derived_field(data)
-  add_field('RCDNumberDensity', _RCDNumberDensity, units=r'1/\rm{cm}^2')
-
-  dd = ds.all_data()
-  print(dd['RCDNumberDensity'])
-
-The field ``RCDNumberDensity`` can be used just like any other derived field
-in yt.
-
-Additional Parameters
----------------------
-
-Each of these parameters is added to the call to ``RadialColumnDensity()``,
-just like ``max_radius`` is used above.
-
-  * ``steps`` : integer - Because this implementation uses linear
-    interpolation to calculate the column
-    density at each cell, the accuracy of the solution goes up as the number of
-    HEALPix surfaces is increased.
-    The ``steps`` parameter controls the number of HEALPix surfaces, and a larger
-    number is more accurate, but slower. Default = 10.
-
-  * ``base`` : string - This controls where the surfaces are placed, with
-    linear "lin" or logarithmic "log" spacing. The inner-most
-    surface is always set to the size of the smallest cell.
-    Default = "lin".
-
-  * ``Nside`` : int
-    The resolution of column density calculation as performed by
-    HEALPix. Higher numbers mean higher quality. Max = 8192.
-    Default = 32.
-
-  * ``ang_divs`` : imaginary integer
-    This number controls the gridding of the HEALPix projection onto
-    the spherical surfaces. Higher numbers mean higher quality.
-    Default = 800j.
-

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/analyzing/analysis_modules/xray_data_README.rst
--- a/doc/source/analyzing/analysis_modules/xray_data_README.rst
+++ b/doc/source/analyzing/analysis_modules/xray_data_README.rst
@@ -1,3 +1,5 @@
+.. _xray_data_README:
+
 Auxiliary Data Files for use with yt's Photon Simulator
 =======================================================
 

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -131,6 +131,16 @@
 
    ds.r[:,-180:0,:]
 
+If you specify a single slice, it will be repeated along all three dimensions.
+For instance, this will give all data:::
+
+   ds.r[:]
+
+And this will select a box running from 0.4 to 0.6 along all three
+dimensions:::
+
+   ds.r[0.4:0.6]
+
 Selecting Fixed Resolution Regions
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -21,7 +21,7 @@
 * Derived Quantities (total mass, angular momentum, etc) (:ref:`creating_derived_quantities`,
   :ref:`derived-quantities`)
 * 1-, 2-, and 3-D profiles (:ref:`generating-profiles-and-histograms`)
-* Halo finding (:ref:`halo_finding`)
+* Halo analysis (:ref:`halo-analysis`)
 * Volume rendering (:ref:`volume_rendering`)
 * Isocontours & flux calculations (:ref:`extracting-isocontour-information`)
 
@@ -194,7 +194,7 @@
 
 The following operations use spatial decomposition:
 
-* :ref:`halo_finding`
+* :ref:`halo-analysis`
 * :ref:`volume_rendering`
 
 Grid Decomposition
@@ -501,7 +501,7 @@
 subtle art in estimating the amount of memory needed for halo finding, but a
 rule of thumb is that the HOP halo finder is the most memory intensive
 (:func:`HaloFinder`), and Friends of Friends (:func:`FOFHaloFinder`) being the
-most memory-conservative. For more information, see :ref:`halo_finding`.
+most memory-conservative. For more information, see :ref:`halo-analysis`.
 
 **Volume Rendering**
 

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/analyzing/saving_data.rst
--- a/doc/source/analyzing/saving_data.rst
+++ b/doc/source/analyzing/saving_data.rst
@@ -1,4 +1,4 @@
-.. _saving_data
+.. _saving_data:
 
 Saving Reloadable Data
 ======================

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -67,9 +67,9 @@
 # built documents.
 #
 # The short X.Y version.
-version = '3.3-dev'
+version = '3.4-dev'
 # The full version, including alpha/beta/rc tags.
-release = '3.3-dev'
+release = '3.4-dev'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -38,7 +38,7 @@
 # again.
 
 render_source.set_volume(kd_low_res)
-render_source.set_fields('density')
+render_source.set_field('density')
 sc.render()
 sc.save("v1.png", sigma_clip=6.0)
 

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -56,6 +56,16 @@
 
 .. yt_cookbook:: simulation_analysis.py
 
+Smoothed Fields
+~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to create a smoothed field,
+corresponding to a user-created derived field, using the
+:meth:`~yt.fields.particle_fields.add_volume_weighted_smoothed_field` method.
+See :ref:`gadget-notebook` for how to work with Gadget data.
+
+.. yt_cookbook:: smoothed_field.py
+
 
 .. _cookbook-time-series-analysis:
 
@@ -93,16 +103,6 @@
 
 .. yt_cookbook:: hse_field.py
 
-Smoothed Fields
-~~~~~~~~~~~~~~~
-
-This recipe demonstrates how to create a smoothed field,
-corresponding to a user-created derived field, using the
-:meth:`~yt.fields.particle_fields.add_volume_weighted_smoothed_field` method.
-See :ref:`gadget-notebook` for how to work with Gadget data.
-
-.. yt_cookbook:: smoothed_field.py
-
 Using Particle Filters to Calculate Star Formation Rates
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/cookbook/colormaps.py
--- a/doc/source/cookbook/colormaps.py
+++ b/doc/source/cookbook/colormaps.py
@@ -7,11 +7,11 @@
 p = yt.ProjectionPlot(ds, "z", "density", width=(100, 'kpc'))
 p.save()
 
-# Change the colormap to 'jet' and save again.  We must specify
+# Change the colormap to 'dusk' and save again.  We must specify
 # a different filename here or it will save it over the top of
 # our first projection.
-p.set_cmap(field="density", cmap='jet')
-p.save('proj_with_jet_cmap.png')
+p.set_cmap(field="density", cmap='dusk')
+p.save('proj_with_dusk_cmap.png')
 
 # Change the colormap to 'hot' and save again.
 p.set_cmap(field="density", cmap='hot')

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -303,6 +303,26 @@
 
 .. yt_cookbook:: vol-annotated.py
 
+.. _cookbook-vol-points:
+
+Volume Rendering with Points
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to make a volume rendering composited with point
+sources. This could represent star or dark matter particles, for example.
+
+.. yt_cookbook:: vol-points.py
+
+.. _cookbook-vol-lines:
+
+Volume Rendering with Lines
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to make a volume rendering composited with line
+sources.
+
+.. yt_cookbook:: vol-lines.py
+
 .. _cookbook-opengl_vr:
 
 Advanced Interactive Data Visualization

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -65,10 +65,13 @@
 
 .. yt_cookbook:: light_ray.py
 
+.. _cookbook-single-dataset-light-ray:
+
+Single Dataset Light Ray
+~~~~~~~~~~~~~~~~~~~~~~~~
+
 This script demonstrates how to make a light ray from a single dataset.
 
-.. _cookbook-single-dataset-light-ray:
-
 .. yt_cookbook:: single_dataset_light_ray.py
 
 Creating and Fitting Absorption Spectra

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -1,22 +1,20 @@
 import yt
-import numpy as np
-from yt.visualization.volume_rendering.api import BoxSource, CoordinateVectorSource
 
 # Load the dataset.
 ds = yt.load("Enzo_64/DD0043/data0043")
-sc = yt.create_scene(ds, ('gas','density'))
-sc.get_source(0).transfer_function.grey_opacity=True
+sc = yt.create_scene(ds, ('gas', 'density'))
 
-sc.annotate_domain(ds)
-sc.render()
-sc.save("%s_vr_domain.png" % ds)
+# You may need to adjust the alpha values to get a rendering with good contrast
+# For annotate_domain, the fourth color value is alpha.
 
-sc.annotate_grids(ds)
-sc.render()
-sc.save("%s_vr_grids.png" % ds)
+# Draw the domain boundary
+sc.annotate_domain(ds, color=[1, 1, 1, 0.01])
+sc.save("%s_vr_domain.png" % ds, sigma_clip=4)
 
-# Here we can draw the coordinate vectors on top of the image by processing
-# it through the camera. Then save it out.
-sc.annotate_axes()
-sc.render()
-sc.save("%s_vr_coords.png" % ds)
+# Draw the grid boundaries
+sc.annotate_grids(ds, alpha=0.01)
+sc.save("%s_vr_grids.png" % ds, sigma_clip=4)
+
+# Draw a coordinate axes triad
+sc.annotate_axes(alpha=0.01)
+sc.save("%s_vr_coords.png" % ds, sigma_clip=4)

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/cookbook/single_dataset_light_ray.py
--- a/doc/source/cookbook/single_dataset_light_ray.py
+++ b/doc/source/cookbook/single_dataset_light_ray.py
@@ -8,9 +8,12 @@
 
 # With a single dataset, a start_position and
 # end_position or trajectory must be given.
-# Trajectory should be given as (r, theta, phi)
-lr.make_light_ray(start_position=[0., 0., 0.],
-                  end_position=[1., 1., 1.],
+# These positions can be defined as xyz coordinates,
+# but here we just use the two opposite corners of the 
+# simulation box.  Alternatively, trajectory should 
+# be given as (r, theta, phi)
+lr.make_light_ray(start_position=ds.domain_left_edge,
+                  end_position=ds.domain_right_edge,
                   solution_filename='lightraysolution.txt',
                   data_filename='lightray.h5',
                   fields=['temperature', 'density'])

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/cookbook/vol-annotated.py
--- a/doc/source/cookbook/vol-annotated.py
+++ b/doc/source/cookbook/vol-annotated.py
@@ -1,75 +1,29 @@
-#!/usr/bin/env python
+import yt
 
-import numpy as np
-import pylab
+ds = yt.load('Enzo_64/DD0043/data0043')
 
-import yt
-import yt.visualization.volume_rendering.old_camera as vr
+sc = yt.create_scene(ds, lens_type='perspective')
 
-ds = yt.load("maestro_subCh_plt00248")
+source = sc[0]
 
-dd = ds.all_data()
+source.set_field('density')
+source.set_log(True)
 
-# field in the dataset we will visualize
-field = ('boxlib', 'radial_velocity')
+# Set up the camera parameters: focus, width, resolution, and image orientation
+sc.camera.focus = ds.domain_center
+sc.camera.resolution = 1024
+sc.camera.north_vector = [0, 0, 1]
+sc.camera.position = [1.7, 1.7, 1.7]
 
-# the values we wish to highlight in the rendering.  We'll put a Gaussian
-# centered on these with width sigma
-vals = [-1.e7, -5.e6, -2.5e6, 2.5e6, 5.e6, 1.e7]
-sigma = 2.e5
+# You may need to adjust the alpha values to get an image with good contrast.
+# For the annotate_domain call, the fourth value in the color tuple is the
+# alpha value.
+sc.annotate_axes(alpha=.02)
+sc.annotate_domain(ds, color=[1, 1, 1, .01])
 
-mi, ma = min(vals), max(vals)
+text_string = "T = {} Gyr".format(float(ds.current_time.to('Gyr')))
 
-# Instantiate the ColorTransferfunction.
-tf =  yt.ColorTransferFunction((mi, ma))
-
-for v in vals:
-    tf.sample_colormap(v, sigma**2, colormap="coolwarm")
-
-
-# volume rendering requires periodic boundaries.  This dataset has
-# solid walls.  We need to hack it for now (this will be fixed in
-# a later yt)
-ds.periodicity = (True, True, True)
-
-
-# Set up the camera parameters: center, looking direction, width, resolution
-c = np.array([0.0, 0.0, 0.0])
-L = np.array([1.0, 1.0, 1.2])
-W = 1.5*ds.domain_width
-N = 720
-
-# +z is "up" for our dataset
-north=[0.0,0.0,1.0]
-
-# Create a camera object
-cam = vr.Camera(c, L, W, N, transfer_function=tf, ds=ds,
-                no_ghost=False, north_vector=north,
-                fields = [field], log_fields = [False])
-
-im = cam.snapshot()
-
-# add an axes triad
-cam.draw_coordinate_vectors(im)
-
-# add the domain box to the image
-nim = cam.draw_domain(im)
-
-# increase the contrast -- for some reason, the enhance default
-# to save_annotated doesn't do the trick
-max_val = im[:,:,:3].std() * 4.0
-nim[:,:,:3] /= max_val
-
-# we want to write the simulation time on the figure, so create a
-# figure and annotate it
-f = pylab.figure()
-
-pylab.text(0.2, 0.85, "{:.3g} s".format(float(ds.current_time.d)),
-           transform=f.transFigure, color="white")
-
-# tell the camera to use our figure
-cam._render_figure = f
-
-# save annotated -- this added the transfer function values,
-# and the clear_fig=False ensures it writes onto our existing figure.
-cam.save_annotated("vol_annotated.png", nim, dpi=145, clear_fig=False)
+# save an annotated version of the volume rendering including a representation
+# of the transfer function and a nice label showing the simulation time.
+sc.save_annotated("vol_annotated.png", sigma_clip=6,
+                  text_annotate=[[(.1, 1.05), text_string]])

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/cookbook/vol-lines.py
--- /dev/null
+++ b/doc/source/cookbook/vol-lines.py
@@ -0,0 +1,22 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import LineSource
+from yt.units import kpc
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
+
+nlines = 50
+vertices = (np.random.random([nlines, 2, 3]) - 0.5) * 200 * kpc
+colors = np.random.random([nlines, 4])
+colors[:, 3] = 0.1
+
+lines = LineSource(vertices, colors)
+sc.add_source(lines)
+
+sc.camera.width = 300*kpc
+
+sc.save(sigma_clip=4.0)

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/cookbook/vol-points.py
--- /dev/null
+++ b/doc/source/cookbook/vol-points.py
@@ -0,0 +1,29 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import PointSource
+from yt.units import kpc
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
+
+npoints = 1000
+
+# Random particle positions
+vertices = np.random.random([npoints, 3])*200*kpc
+
+# Random colors
+colors = np.random.random([npoints, 4])
+
+# Set alpha value to something that produces a good contrast with the volume
+# rendering
+colors[:, 3] = 0.1
+
+points = PointSource(vertices, colors=colors)
+sc.add_source(points)
+
+sc.camera.width = 300*kpc
+
+sc.save(sigma_clip=5)

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/developing/building_the_docs.rst
--- a/doc/source/developing/building_the_docs.rst
+++ b/doc/source/developing/building_the_docs.rst
@@ -176,6 +176,7 @@
 .. _Sphinx: http://sphinx-doc.org/
 .. _pandoc: http://johnmacfarlane.net/pandoc/
 .. _ffmpeg: http://www.ffmpeg.org/
+.. _IPython: https://ipython.org/
 
 You will also need the full yt suite of `yt test data
 <http://yt-project.org/data/>`_, including the larger datasets that are not used

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/developing/extensions.rst
--- /dev/null
+++ b/doc/source/developing/extensions.rst
@@ -0,0 +1,54 @@
+.. _extensions:
+
+Extension Packages
+==================
+
+.. note:: For some additional discussion, see `YTEP-0029
+          <http://ytep.readthedocs.io/en/latest/YTEPs/YTEP-0029.html>`_, where
+          this plan was designed.
+
+As of version 3.3 of yt, we have put into place new methods for easing the
+process of developing "extensions" to yt.  Extensions might be analysis
+packages, visualization tools, or other software projects that use yt as a base
+engine but that are versioned, developed and distributed separately.  This
+brings with it the advantage of retaining control over the versioning,
+contribution guidelines, scope, etc, while also providing a mechanism for
+disseminating information about it, and potentially a method of interacting
+with other extensions.
+
+We have created a few pieces of infrastructure for developing extensions,
+making them discoverable, and distributing them to collaborators.
+
+If you have a module you would like to retain some external control over, or
+that you don't feel would fit into yt, we encourage you to build it as an
+extension module and distribute and version it independently.
+
+Hooks for Extensions
+--------------------
+
+Starting with version 3.3 of yt, any package named with the prefix ``yt_`` is
+importable from the namespace ``yt.extensions``.  For instance, the
+``yt_interaction`` package ( https://bitbucket.org/data-exp-lab/yt_interaction
+) is importable as ``yt.extensions.interaction``.
+
+In subsequent versions, we plan to include in yt a catalog of known extensions
+and where to find them; this will put discoverability directly into the code
+base.
+
+Extension Template
+------------------
+
+A template for starting an extension module (or converting an existing set of
+code to an extension module) can be found at
+https://bitbucket.org/yt_analysis/yt_extension_template .
+
+To get started, download a zipfile of the template (
+https://bitbucket.org/yt_analysis/yt_extension_template/get/tip.zip ) and
+follow the directions in ``README.md`` to modify the metadata.
+
+Distributing Extensions
+-----------------------
+
+We encourage you to version on your choice of hosting platform (Bitbucket,
+GitHub, etc), and to distribute your extension widely.  We are presently
+working on deploying a method for listing extension modules on the yt webpage.

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/developing/index.rst
--- a/doc/source/developing/index.rst
+++ b/doc/source/developing/index.rst
@@ -19,6 +19,7 @@
    developing
    building_the_docs
    testing
+   extensions
    debugdrive
    releasing
    creating_datatypes

diff -r 2cad0bd1cfcdd226aeedc640b7574b488c735263 -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -103,7 +103,7 @@
    accept no arguments. The test function should do some work that tests some
    functionality and should also verify that the results are correct using
    assert statements or functions.  
-# Tests can ``yield`` a tuple of the form ``function``, ``argument_one``,
+#. Tests can ``yield`` a tuple of the form ``function``, ``argument_one``,
    ``argument_two``, etc.  For example ``yield assert_equal, 1.0, 1.0`` would be
    captured by nose as a test that asserts that 1.0 is equal to 1.0.
 #. Use ``fake_random_ds`` to test on datasets, and be sure to test for
@@ -487,7 +487,7 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 Before any code is added to or modified in the yt codebase, each incoming
 changeset is run against all available unit and answer tests on our `continuous
-integration server <http://tests.yt-project.org>`_. While unit tests are
+integration server <https://tests.yt-project.org>`_. While unit tests are
 autodiscovered by `nose <http://nose.readthedocs.org/en/latest/>`_ itself,
 answer tests require definition of which set of tests constitute to a given
 answer. Configuration for the integration server is stored in

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/090dbc4bcf53/
Changeset:   090dbc4bcf53
Branch:      yt
User:        MatthewTurk
Date:        2016-08-08 20:41:37+00:00
Summary:     Removing functions that are now in DistanceQueue
Affected #:  2 files

diff -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 -r 090dbc4bcf534386e70613dc4fba5f14c1ca9ab0 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -24,7 +24,7 @@
 from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
 from .particle_deposit cimport kernel_func, get_kernel_func, gind
 from yt.utilities.lib.distance_queue cimport NeighborList, Neighbor_compare, \
-    r2dist
+    r2dist, DistanceQueue
 
 cdef extern from "platform_dep.h":
     void *alloca(int)
@@ -36,10 +36,8 @@
     cdef np.float64_t DW[3]
     cdef int nfields
     cdef int maxn
-    cdef int curn
     cdef bint periodicity[3]
     # Note that we are preallocating here, so this is *not* threadsafe.
-    cdef NeighborList *neighbors
     cdef void (*pos_setup)(np.float64_t ipos[3], np.float64_t opos[3])
     cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
                                np.float64_t dds[3], np.float64_t[:,:] ppos,
@@ -49,7 +47,7 @@
                                np.int64_t offset, np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
                                int *nsize, np.float64_t[:,:] oct_left_edges,
-                               np.float64_t[:,:] oct_dds)
+                               np.float64_t[:,:] oct_dds, DistanceQueue dq)
     cdef int neighbor_search(self, np.float64_t pos[3], OctreeContainer octree,
                              np.int64_t **nind, int *nsize, 
                              np.int64_t nneighbors, np.int64_t domain_id, 
@@ -62,10 +60,7 @@
                                np.int64_t offset,
                                np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
-                               int *nsize)
-    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
-                            np.float64_t cpos[3])
-    cdef void neighbor_reset(self)
+                               int *nsize, DistanceQueue dq)
     cdef void neighbor_find(self,
                             np.int64_t nneighbors,
                             np.int64_t *nind,
@@ -75,7 +70,7 @@
                             np.float64_t[:,:] ppos,
                             np.float64_t cpos[3],
                             np.float64_t[:,:] oct_left_edges,
-                            np.float64_t[:,:] oct_dds)
+                            np.float64_t[:,:] oct_dds, DistanceQueue dq)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **index_fields)
+                      np.float64_t **index_fields, DistanceQueue dq)

diff -r c8140fbbef25f2dc6ac4e21e71fb9cc1045a06a1 -r 090dbc4bcf534386e70613dc4fba5f14c1ca9ab0 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -42,10 +42,6 @@
         self.nvals = nvals
         self.nfields = nfields
         self.maxn = max_neighbors
-
-        self.neighbors = <NeighborList *> malloc(
-            sizeof(NeighborList) * self.maxn)
-        self.neighbor_reset()
         self.sph_kernel = get_kernel_func(kernel_name)
 
     def initialize(self, *args):
@@ -209,6 +205,9 @@
         cdef np.ndarray[np.uint8_t, ndim=1] visited
         visited = np.zeros(mdom_ind.shape[0], dtype="uint8")
         cdef int nproc = 0
+        # This should be thread-private if we ever go to OpenMP
+        cdef DistanceQueue dist_queue = DistanceQueue(self.maxn)
+        dist_queue._setup(self.DW, self.periodicity)
         for i in range(oct_positions.shape[0]):
             for j in range(3):
                 pos[j] = oct_positions[i, j]
@@ -222,7 +221,7 @@
                 dims, moi.left_edge, moi.dds, cart_positions, field_pointers, doff,
                 &nind, pind, pcount, offset, index_field_pointers,
                 particle_octree, domain_id, &nsize, oct_left_edges,
-                oct_dds)
+                oct_dds, dist_queue)
         #print "VISITED", visited.sum(), visited.size,
         #print 100.0*float(visited.sum())/visited.size
         if nind != NULL:
@@ -331,6 +330,9 @@
         # refers to that oct's particles.
         cdef int maxnei = 0
         cdef int nproc = 0
+        # This should be thread-private if we ever go to OpenMP
+        cdef DistanceQueue dist_queue = DistanceQueue(self.maxn)
+        dist_queue._setup(self.DW, self.periodicity)
         for i in range(doff.shape[0]):
             if doff[i] < 0: continue
             offset = pind[doff[i]]
@@ -342,7 +344,8 @@
                     pos[k] = positions[pind0, k]
                 self.neighbor_process_particle(pos, cart_positions, field_pointers,
                             doff, &nind, pind, pcount, pind0,
-                            NULL, particle_octree, domain_id, &nsize)
+                            NULL, particle_octree, domain_id, &nsize,
+                            dist_queue)
         #print "VISITED", visited.sum(), visited.size,
         #print 100.0*float(visited.sum())/visited.size
         if nind != NULL:
@@ -425,55 +428,9 @@
 
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **ifields):
+                      np.float64_t **ifields, DistanceQueue dq):
         raise NotImplementedError
 
-    cdef void neighbor_reset(self):
-        self.curn = 0
-        for i in range(self.maxn):
-            self.neighbors[i].pn = -1
-            self.neighbors[i].r2 = 1e300
-
-    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
-                            np.float64_t cpos[3]):
-        # Here's a python+numpy simulator of this:
-        # http://paste.yt-project.org/show/5445/
-        cdef int i, di
-        cdef np.float64_t r2, r2_trunc
-        if self.curn == self.maxn:
-            # Truncate calculation if it's bigger than this in any dimension
-            r2_trunc = self.neighbors[self.curn - 1].r2
-        else:
-            # Don't truncate our calculation
-            r2_trunc = -1
-        r2 = r2dist(ppos, cpos, self.DW, self.periodicity, r2_trunc)
-        if r2 == -1:
-            return
-        if self.curn == 0:
-            self.neighbors[0].r2 = r2
-            self.neighbors[0].pn = pn
-            self.curn += 1
-            return
-        # Now insert in a sorted way
-        di = -1
-        for i in range(self.curn - 1, -1, -1):
-            # We are checking if i is less than us, to see if we should insert
-            # to the right (i.e., i+1).
-            if self.neighbors[i].r2 < r2:
-                di = i
-                break
-        # The outermost one is already too small.
-        if di == self.maxn - 1:
-            return
-        if (self.maxn - (di + 2)) > 0:
-            memmove(<void *> (self.neighbors + di + 2),
-                    <void *> (self.neighbors + di + 1),
-                    sizeof(NeighborList) * (self.maxn - (di + 2)))
-        self.neighbors[di + 1].r2 = r2
-        self.neighbors[di + 1].pn = pn
-        if self.curn < self.maxn:
-            self.curn += 1
-
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -488,6 +445,7 @@
                             np.float64_t cpos[3],
                             np.float64_t[:,:] oct_left_edges,
                             np.float64_t[:,:] oct_dds,
+                            DistanceQueue dq
                             ):
         # We are now given the number of neighbors, the indices into the
         # domains for them, and the number of particles for each.
@@ -497,13 +455,13 @@
         cdef np.float64_t ex[2] 
         cdef np.float64_t DR[2]
         cdef np.float64_t cp, r2_trunc, r2, dist
-        self.neighbor_reset()
+        dq.neighbor_reset()
         for ni in range(nneighbors):
             if nind[ni] == -1: continue
             # terminate early if all 8 corners of oct are farther away than
             # most distant currently known neighbor
-            if oct_left_edges != None and self.curn == self.maxn:
-                r2_trunc = self.neighbors[self.curn - 1].r2
+            if oct_left_edges != None and dq.curn == dq.maxn:
+                r2_trunc = dq.neighbors[dq.curn - 1].r2
                 # iterate over each dimension in the outer loop so we can
                 # consolidate temporary storage
                 # What this next bit does is figure out which component is the
@@ -539,7 +497,7 @@
                 pn = pinds[offset + i]
                 for j in range(3):
                     pos[j] = ppos[pn, j]
-                self.neighbor_eval(pn, pos, cpos)
+                dq.neighbor_eval(pn, pos, cpos)
 
     @cython.cdivision(True)
     @cython.boundscheck(False)
@@ -554,7 +512,8 @@
                                np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
                                int *nsize, np.float64_t[:,:] oct_left_edges,
-                               np.float64_t[:,:] oct_dds):
+                               np.float64_t[:,:] oct_dds,
+                               DistanceQueue dq):
         # Note that we assume that fields[0] == smoothing length in the native
         # units supplied.  We can now iterate over every cell in the block and
         # every particle to find the nearest.  We will use a priority heap.
@@ -572,17 +531,18 @@
                     nneighbors = self.neighbor_search(opos, octree,
                                     nind, nsize, nneighbors, domain_id, &oct, 0)
                     self.neighbor_find(nneighbors, nind[0], doffs, pcounts,
-                                       pinds, ppos, opos, oct_left_edges, oct_dds)
+                                       pinds, ppos, opos, oct_left_edges,
+                                       oct_dds, dq)
                     # Now we have all our neighbors in our neighbor list.
-                    if self.curn <-1*self.maxn:
+                    if dq.curn <-1*dq.maxn:
                         ntot = nntot = 0
                         for m in range(nneighbors):
                             if nind[0][m] < 0: continue
                             nntot += 1
                             ntot += pcounts[nind[0][m]]
-                        print "SOMETHING WRONG", self.curn, nneighbors, ntot, nntot
+                        print "SOMETHING WRONG", dq.curn, nneighbors, ntot, nntot
                     self.process(offset, i, j, k, dim, opos, fields,
-                                 index_fields)
+                                 index_fields, dq)
                     cpos[2] += dds[2]
                 cpos[1] += dds[1]
             cpos[0] += dds[0]
@@ -599,7 +559,8 @@
                                np.int64_t offset,
                                np.float64_t **index_fields,
                                OctreeContainer octree,
-                               np.int64_t domain_id, int *nsize):
+                               np.int64_t domain_id, int *nsize,
+                               DistanceQueue dq):
         # Note that we assume that fields[0] == smoothing length in the native
         # units supplied.  We can now iterate over every cell in the block and
         # every particle to find the nearest.  We will use a priority heap.
@@ -614,8 +575,8 @@
         nneighbors = self.neighbor_search(opos, octree,
                         nind, nsize, nneighbors, domain_id, &oct, 0)
         self.neighbor_find(nneighbors, nind[0], doffs, pcounts, pinds, ppos,
-                           opos, None, None)
-        self.process(offset, i, j, k, dim, opos, fields, index_fields)
+                           opos, None, None, dq)
+        self.process(offset, i, j, k, dim, opos, fields, index_fields, dq)
 
 cdef class VolumeWeightedSmooth(ParticleSmoothOperation):
     # This smoothing function evaluates the field, *without* normalization, at
@@ -654,7 +615,7 @@
     @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **index_fields):
+                      np.float64_t **index_fields, DistanceQueue dq):
         # We have our i, j, k for our cell, as well as the cell position.
         # We also have a list of neighboring particles with particle numbers.
         cdef int n, fi
@@ -664,13 +625,13 @@
         cdef np.int64_t pn
         # We get back our mass
         # rho_i = sum(j = 1 .. n) m_j * W_ij
-        max_r = sqrt(self.neighbors[self.curn-1].r2)
+        max_r = sqrt(dq.neighbors[dq.curn-1].r2)
         max_hsml = index_fields[0][gind(i,j,k,dim) + offset]
-        for n in range(self.curn):
+        for n in range(dq.curn):
             # No normalization for the moment.
             # fields[0] is the smoothing length.
-            r2 = self.neighbors[n].r2
-            pn = self.neighbors[n].pn
+            r2 = dq.neighbors[n].r2
+            pn = dq.neighbors[n].pn
             # Smoothing kernel weight function
             mass = fields[0][pn]
             hsml = fields[1][pn]
@@ -713,15 +674,15 @@
     @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **index_fields):
+                      np.float64_t **index_fields, DistanceQueue dq):
         # We have our i, j, k for our cell, as well as the cell position.
         # We also have a list of neighboring particles with particle numbers.
         cdef np.int64_t pn
         # We get back our mass
         # rho_i = sum(j = 1 .. n) m_j * W_ij
-        pn = self.neighbors[0].pn
+        pn = dq.neighbors[0].pn
         self.fp[gind(i,j,k,dim) + offset] = fields[0][pn]
-        #self.fp[gind(i,j,k,dim) + offset] = self.neighbors[0].r2
+        #self.fp[gind(i,j,k,dim) + offset] = dq.neighbors[0].r2
         return
 
 nearest_smooth = NearestNeighborSmooth
@@ -747,18 +708,18 @@
     @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **index_fields):
+                      np.float64_t **index_fields, DistanceQueue dq):
         # We have our i, j, k for our cell, as well as the cell position.
         # We also have a list of neighboring particles with particle numbers.
         cdef np.int64_t pn, ni, di
         cdef np.float64_t total_weight = 0.0, total_value = 0.0, r2, val, w
         # We're going to do a very simple IDW average
-        if self.neighbors[0].r2 == 0.0:
-            pn = self.neighbors[0].pn
+        if dq.neighbors[0].r2 == 0.0:
+            pn = dq.neighbors[0].pn
             self.fp[gind(i,j,k,dim) + offset] = fields[0][pn]
-        for ni in range(self.curn):
-            r2 = self.neighbors[ni].r2
-            val = fields[0][self.neighbors[ni].pn]
+        for ni in range(dq.curn):
+            r2 = dq.neighbors[ni].r2
+            val = fields[0][dq.neighbors[ni].pn]
             w = r2
             for di in range(self.p2 - 1):
                 w *= r2
@@ -783,10 +744,10 @@
     @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **index_fields):
+                      np.float64_t **index_fields, DistanceQueue dq):
         cdef np.float64_t max_r
         # We assume "offset" here is the particle index.
-        max_r = sqrt(self.neighbors[self.curn-1].r2)
+        max_r = sqrt(dq.neighbors[dq.curn-1].r2)
         fields[0][offset] = max_r
 
 nth_neighbor_smooth = NthNeighborDistanceSmooth
@@ -804,16 +765,16 @@
     @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **index_fields):
+                      np.float64_t **index_fields, DistanceQueue dq):
         cdef np.float64_t r2, hsml, dens, mass, weight, lw
         cdef int pn
         # We assume "offset" here is the particle index.
-        hsml = sqrt(self.neighbors[self.curn-1].r2)
+        hsml = sqrt(dq.neighbors[dq.curn-1].r2)
         dens = 0.0
         weight = 0.0
-        for pn in range(self.curn):
-            mass = fields[0][self.neighbors[pn].pn]
-            r2 = self.neighbors[pn].r2
+        for pn in range(dq.curn):
+            mass = fields[0][dq.neighbors[pn].pn]
+            r2 = dq.neighbors[pn].r2
             lw = self.sph_kernel(sqrt(r2) / hsml)
             dens += mass * lw
         weight = (4.0/3.0) * 3.1415926 * hsml**3


https://bitbucket.org/yt_analysis/yt/commits/22c735f34eaf/
Changeset:   22c735f34eaf
Branch:      yt
User:        MatthewTurk
Date:        2016-08-12 18:05:42+00:00
Summary:     Adding some light docstrings
Affected #:  1 file

diff -r 090dbc4bcf534386e70613dc4fba5f14c1ca9ab0 -r 22c735f34eafead17393bf3524e39611d72b3a8c yt/utilities/lib/distance_queue.pyx
--- a/yt/utilities/lib/distance_queue.pyx
+++ b/yt/utilities/lib/distance_queue.pyx
@@ -57,6 +57,9 @@
     return r2
 
 cdef class DistanceQueue:
+    """This is a distance queue object, designed to incrementally evaluate N
+    positions against a reference point.  It is initialized with the maximum
+    number that are to be retained (i.e., maxn-nearest neighbors)."""
     def __cinit__(self, int maxn):
         cdef int i
         self.maxn = maxn
@@ -128,6 +131,9 @@
         self.curn = 0
 
     def find_nearest(self, np.float64_t[:] center, np.float64_t[:,:] points):
+        """This function accepts a center and a set of [N,3] points, and it
+        will return the indices into the points array of the maxn closest
+        neighbors."""
         cdef int i, j
         cdef np.float64_t ppos[3], cpos[3]
         self.neighbor_reset()


https://bitbucket.org/yt_analysis/yt/commits/6c9ed4a3752d/
Changeset:   6c9ed4a3752d
Branch:      yt
User:        jzuhone
Date:        2016-08-24 18:15:23+00:00
Summary:     Merged in MatthewTurk/yt (pull request #2329)

Split out distance queue into its own Cython module
Affected #:  5 files

diff -r 1ba5918b38d61ac6f9134e4ca918f0bde599b349 -r 6c9ed4a3752d649d872cf47adffd419a3c8bf903 setup.py
--- a/setup.py
+++ b/setup.py
@@ -196,7 +196,7 @@
     "particle_mesh_operations", "depth_first_octree", "fortran_reader",
     "interpolators", "misc_utilities", "basic_octree", "image_utilities",
     "points_in_volume", "quad_tree", "ray_integrators", "mesh_utilities",
-    "amr_kdtools", "lenses",
+    "amr_kdtools", "lenses", "distance_queue"
 ]
 for ext_name in lib_exts:
     cython_extensions.append(

diff -r 1ba5918b38d61ac6f9134e4ca918f0bde599b349 -r 6c9ed4a3752d649d872cf47adffd419a3c8bf903 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -23,15 +23,12 @@
 from yt.utilities.lib.fp_utils cimport *
 from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
 from .particle_deposit cimport kernel_func, get_kernel_func, gind
+from yt.utilities.lib.distance_queue cimport NeighborList, Neighbor_compare, \
+    r2dist, DistanceQueue
 
 cdef extern from "platform_dep.h":
     void *alloca(int)
 
-cdef struct NeighborList
-cdef struct NeighborList:
-    np.int64_t pn       # Particle number
-    np.float64_t r2     # radius**2
-
 cdef class ParticleSmoothOperation:
     # We assume each will allocate and define their own temporary storage
     cdef kernel_func sph_kernel
@@ -39,10 +36,8 @@
     cdef np.float64_t DW[3]
     cdef int nfields
     cdef int maxn
-    cdef int curn
     cdef bint periodicity[3]
     # Note that we are preallocating here, so this is *not* threadsafe.
-    cdef NeighborList *neighbors
     cdef void (*pos_setup)(np.float64_t ipos[3], np.float64_t opos[3])
     cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
                                np.float64_t dds[3], np.float64_t[:,:] ppos,
@@ -52,7 +47,7 @@
                                np.int64_t offset, np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
                                int *nsize, np.float64_t[:,:] oct_left_edges,
-                               np.float64_t[:,:] oct_dds)
+                               np.float64_t[:,:] oct_dds, DistanceQueue dq)
     cdef int neighbor_search(self, np.float64_t pos[3], OctreeContainer octree,
                              np.int64_t **nind, int *nsize, 
                              np.int64_t nneighbors, np.int64_t domain_id, 
@@ -65,10 +60,7 @@
                                np.int64_t offset,
                                np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
-                               int *nsize)
-    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
-                            np.float64_t cpos[3])
-    cdef void neighbor_reset(self)
+                               int *nsize, DistanceQueue dq)
     cdef void neighbor_find(self,
                             np.int64_t nneighbors,
                             np.int64_t *nind,
@@ -78,7 +70,7 @@
                             np.float64_t[:,:] ppos,
                             np.float64_t cpos[3],
                             np.float64_t[:,:] oct_left_edges,
-                            np.float64_t[:,:] oct_dds)
+                            np.float64_t[:,:] oct_dds, DistanceQueue dq)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **index_fields)
+                      np.float64_t **index_fields, DistanceQueue dq)

diff -r 1ba5918b38d61ac6f9134e4ca918f0bde599b349 -r 6c9ed4a3752d649d872cf47adffd419a3c8bf903 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -24,44 +24,6 @@
 from oct_container cimport Oct, OctAllocationContainer, \
     OctreeContainer, OctInfo
 
-cdef int Neighbor_compare(void *on1, void *on2) nogil:
-    cdef NeighborList *n1
-    cdef NeighborList *n2
-    n1 = <NeighborList *> on1
-    n2 = <NeighborList *> on2
-    # Note that we set this up so that "greatest" evaluates to the *end* of the
-    # list, so we can do standard radius comparisons.
-    if n1.r2 < n2.r2:
-        return -1
-    elif n1.r2 == n2.r2:
-        return 0
-    else:
-        return 1
-
- at cython.cdivision(True)
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.initializedcheck(False)
-cdef np.float64_t r2dist(np.float64_t ppos[3],
-                         np.float64_t cpos[3],
-                         np.float64_t DW[3],
-                         bint periodicity[3],
-                         np.float64_t max_dist2):
-    cdef int i
-    cdef np.float64_t r2, DR
-    r2 = 0.0
-    for i in range(3):
-        DR = (ppos[i] - cpos[i])
-        if not periodicity[i]:
-            pass
-        elif (DR > DW[i]/2.0):
-            DR -= DW[i]
-        elif (DR < -DW[i]/2.0):
-            DR += DW[i]
-        r2 += DR * DR
-        if max_dist2 >= 0.0 and r2 > max_dist2:
-            return -1.0
-    return r2
 
 cdef void spherical_coord_setup(np.float64_t ipos[3], np.float64_t opos[3]):
     opos[0] = ipos[0] * sin(ipos[1]) * cos(ipos[2])
@@ -80,10 +42,6 @@
         self.nvals = nvals
         self.nfields = nfields
         self.maxn = max_neighbors
-
-        self.neighbors = <NeighborList *> malloc(
-            sizeof(NeighborList) * self.maxn)
-        self.neighbor_reset()
         self.sph_kernel = get_kernel_func(kernel_name)
 
     def initialize(self, *args):
@@ -247,6 +205,9 @@
         cdef np.ndarray[np.uint8_t, ndim=1] visited
         visited = np.zeros(mdom_ind.shape[0], dtype="uint8")
         cdef int nproc = 0
+        # This should be thread-private if we ever go to OpenMP
+        cdef DistanceQueue dist_queue = DistanceQueue(self.maxn)
+        dist_queue._setup(self.DW, self.periodicity)
         for i in range(oct_positions.shape[0]):
             for j in range(3):
                 pos[j] = oct_positions[i, j]
@@ -260,7 +221,7 @@
                 dims, moi.left_edge, moi.dds, cart_positions, field_pointers, doff,
                 &nind, pind, pcount, offset, index_field_pointers,
                 particle_octree, domain_id, &nsize, oct_left_edges,
-                oct_dds)
+                oct_dds, dist_queue)
         #print "VISITED", visited.sum(), visited.size,
         #print 100.0*float(visited.sum())/visited.size
         if nind != NULL:
@@ -369,6 +330,9 @@
         # refers to that oct's particles.
         cdef int maxnei = 0
         cdef int nproc = 0
+        # This should be thread-private if we ever go to OpenMP
+        cdef DistanceQueue dist_queue = DistanceQueue(self.maxn)
+        dist_queue._setup(self.DW, self.periodicity)
         for i in range(doff.shape[0]):
             if doff[i] < 0: continue
             offset = pind[doff[i]]
@@ -380,7 +344,8 @@
                     pos[k] = positions[pind0, k]
                 self.neighbor_process_particle(pos, cart_positions, field_pointers,
                             doff, &nind, pind, pcount, pind0,
-                            NULL, particle_octree, domain_id, &nsize)
+                            NULL, particle_octree, domain_id, &nsize,
+                            dist_queue)
         #print "VISITED", visited.sum(), visited.size,
         #print 100.0*float(visited.sum())/visited.size
         if nind != NULL:
@@ -463,55 +428,9 @@
 
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **ifields):
+                      np.float64_t **ifields, DistanceQueue dq):
         raise NotImplementedError
 
-    cdef void neighbor_reset(self):
-        self.curn = 0
-        for i in range(self.maxn):
-            self.neighbors[i].pn = -1
-            self.neighbors[i].r2 = 1e300
-
-    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
-                            np.float64_t cpos[3]):
-        # Here's a python+numpy simulator of this:
-        # http://paste.yt-project.org/show/5445/
-        cdef int i, di
-        cdef np.float64_t r2, r2_trunc
-        if self.curn == self.maxn:
-            # Truncate calculation if it's bigger than this in any dimension
-            r2_trunc = self.neighbors[self.curn - 1].r2
-        else:
-            # Don't truncate our calculation
-            r2_trunc = -1
-        r2 = r2dist(ppos, cpos, self.DW, self.periodicity, r2_trunc)
-        if r2 == -1:
-            return
-        if self.curn == 0:
-            self.neighbors[0].r2 = r2
-            self.neighbors[0].pn = pn
-            self.curn += 1
-            return
-        # Now insert in a sorted way
-        di = -1
-        for i in range(self.curn - 1, -1, -1):
-            # We are checking if i is less than us, to see if we should insert
-            # to the right (i.e., i+1).
-            if self.neighbors[i].r2 < r2:
-                di = i
-                break
-        # The outermost one is already too small.
-        if di == self.maxn - 1:
-            return
-        if (self.maxn - (di + 2)) > 0:
-            memmove(<void *> (self.neighbors + di + 2),
-                    <void *> (self.neighbors + di + 1),
-                    sizeof(NeighborList) * (self.maxn - (di + 2)))
-        self.neighbors[di + 1].r2 = r2
-        self.neighbors[di + 1].pn = pn
-        if self.curn < self.maxn:
-            self.curn += 1
-
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -526,6 +445,7 @@
                             np.float64_t cpos[3],
                             np.float64_t[:,:] oct_left_edges,
                             np.float64_t[:,:] oct_dds,
+                            DistanceQueue dq
                             ):
         # We are now given the number of neighbors, the indices into the
         # domains for them, and the number of particles for each.
@@ -535,13 +455,13 @@
         cdef np.float64_t ex[2] 
         cdef np.float64_t DR[2]
         cdef np.float64_t cp, r2_trunc, r2, dist
-        self.neighbor_reset()
+        dq.neighbor_reset()
         for ni in range(nneighbors):
             if nind[ni] == -1: continue
             # terminate early if all 8 corners of oct are farther away than
             # most distant currently known neighbor
-            if oct_left_edges != None and self.curn == self.maxn:
-                r2_trunc = self.neighbors[self.curn - 1].r2
+            if oct_left_edges != None and dq.curn == dq.maxn:
+                r2_trunc = dq.neighbors[dq.curn - 1].r2
                 # iterate over each dimension in the outer loop so we can
                 # consolidate temporary storage
                 # What this next bit does is figure out which component is the
@@ -577,7 +497,7 @@
                 pn = pinds[offset + i]
                 for j in range(3):
                     pos[j] = ppos[pn, j]
-                self.neighbor_eval(pn, pos, cpos)
+                dq.neighbor_eval(pn, pos, cpos)
 
     @cython.cdivision(True)
     @cython.boundscheck(False)
@@ -592,7 +512,8 @@
                                np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
                                int *nsize, np.float64_t[:,:] oct_left_edges,
-                               np.float64_t[:,:] oct_dds):
+                               np.float64_t[:,:] oct_dds,
+                               DistanceQueue dq):
         # Note that we assume that fields[0] == smoothing length in the native
         # units supplied.  We can now iterate over every cell in the block and
         # every particle to find the nearest.  We will use a priority heap.
@@ -610,17 +531,18 @@
                     nneighbors = self.neighbor_search(opos, octree,
                                     nind, nsize, nneighbors, domain_id, &oct, 0)
                     self.neighbor_find(nneighbors, nind[0], doffs, pcounts,
-                                       pinds, ppos, opos, oct_left_edges, oct_dds)
+                                       pinds, ppos, opos, oct_left_edges,
+                                       oct_dds, dq)
                     # Now we have all our neighbors in our neighbor list.
-                    if self.curn <-1*self.maxn:
+                    if dq.curn <-1*dq.maxn:
                         ntot = nntot = 0
                         for m in range(nneighbors):
                             if nind[0][m] < 0: continue
                             nntot += 1
                             ntot += pcounts[nind[0][m]]
-                        print "SOMETHING WRONG", self.curn, nneighbors, ntot, nntot
+                        print "SOMETHING WRONG", dq.curn, nneighbors, ntot, nntot
                     self.process(offset, i, j, k, dim, opos, fields,
-                                 index_fields)
+                                 index_fields, dq)
                     cpos[2] += dds[2]
                 cpos[1] += dds[1]
             cpos[0] += dds[0]
@@ -637,7 +559,8 @@
                                np.int64_t offset,
                                np.float64_t **index_fields,
                                OctreeContainer octree,
-                               np.int64_t domain_id, int *nsize):
+                               np.int64_t domain_id, int *nsize,
+                               DistanceQueue dq):
         # Note that we assume that fields[0] == smoothing length in the native
         # units supplied.  We can now iterate over every cell in the block and
         # every particle to find the nearest.  We will use a priority heap.
@@ -652,8 +575,8 @@
         nneighbors = self.neighbor_search(opos, octree,
                         nind, nsize, nneighbors, domain_id, &oct, 0)
         self.neighbor_find(nneighbors, nind[0], doffs, pcounts, pinds, ppos,
-                           opos, None, None)
-        self.process(offset, i, j, k, dim, opos, fields, index_fields)
+                           opos, None, None, dq)
+        self.process(offset, i, j, k, dim, opos, fields, index_fields, dq)
 
 cdef class VolumeWeightedSmooth(ParticleSmoothOperation):
     # This smoothing function evaluates the field, *without* normalization, at
@@ -692,7 +615,7 @@
     @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **index_fields):
+                      np.float64_t **index_fields, DistanceQueue dq):
         # We have our i, j, k for our cell, as well as the cell position.
         # We also have a list of neighboring particles with particle numbers.
         cdef int n, fi
@@ -702,13 +625,13 @@
         cdef np.int64_t pn
         # We get back our mass
         # rho_i = sum(j = 1 .. n) m_j * W_ij
-        max_r = sqrt(self.neighbors[self.curn-1].r2)
+        max_r = sqrt(dq.neighbors[dq.curn-1].r2)
         max_hsml = index_fields[0][gind(i,j,k,dim) + offset]
-        for n in range(self.curn):
+        for n in range(dq.curn):
             # No normalization for the moment.
             # fields[0] is the smoothing length.
-            r2 = self.neighbors[n].r2
-            pn = self.neighbors[n].pn
+            r2 = dq.neighbors[n].r2
+            pn = dq.neighbors[n].pn
             # Smoothing kernel weight function
             mass = fields[0][pn]
             hsml = fields[1][pn]
@@ -751,15 +674,15 @@
     @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **index_fields):
+                      np.float64_t **index_fields, DistanceQueue dq):
         # We have our i, j, k for our cell, as well as the cell position.
         # We also have a list of neighboring particles with particle numbers.
         cdef np.int64_t pn
         # We get back our mass
         # rho_i = sum(j = 1 .. n) m_j * W_ij
-        pn = self.neighbors[0].pn
+        pn = dq.neighbors[0].pn
         self.fp[gind(i,j,k,dim) + offset] = fields[0][pn]
-        #self.fp[gind(i,j,k,dim) + offset] = self.neighbors[0].r2
+        #self.fp[gind(i,j,k,dim) + offset] = dq.neighbors[0].r2
         return
 
 nearest_smooth = NearestNeighborSmooth
@@ -785,18 +708,18 @@
     @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **index_fields):
+                      np.float64_t **index_fields, DistanceQueue dq):
         # We have our i, j, k for our cell, as well as the cell position.
         # We also have a list of neighboring particles with particle numbers.
         cdef np.int64_t pn, ni, di
         cdef np.float64_t total_weight = 0.0, total_value = 0.0, r2, val, w
         # We're going to do a very simple IDW average
-        if self.neighbors[0].r2 == 0.0:
-            pn = self.neighbors[0].pn
+        if dq.neighbors[0].r2 == 0.0:
+            pn = dq.neighbors[0].pn
             self.fp[gind(i,j,k,dim) + offset] = fields[0][pn]
-        for ni in range(self.curn):
-            r2 = self.neighbors[ni].r2
-            val = fields[0][self.neighbors[ni].pn]
+        for ni in range(dq.curn):
+            r2 = dq.neighbors[ni].r2
+            val = fields[0][dq.neighbors[ni].pn]
             w = r2
             for di in range(self.p2 - 1):
                 w *= r2
@@ -821,10 +744,10 @@
     @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **index_fields):
+                      np.float64_t **index_fields, DistanceQueue dq):
         cdef np.float64_t max_r
         # We assume "offset" here is the particle index.
-        max_r = sqrt(self.neighbors[self.curn-1].r2)
+        max_r = sqrt(dq.neighbors[dq.curn-1].r2)
         fields[0][offset] = max_r
 
 nth_neighbor_smooth = NthNeighborDistanceSmooth
@@ -842,16 +765,16 @@
     @cython.initializedcheck(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **index_fields):
+                      np.float64_t **index_fields, DistanceQueue dq):
         cdef np.float64_t r2, hsml, dens, mass, weight, lw
         cdef int pn
         # We assume "offset" here is the particle index.
-        hsml = sqrt(self.neighbors[self.curn-1].r2)
+        hsml = sqrt(dq.neighbors[dq.curn-1].r2)
         dens = 0.0
         weight = 0.0
-        for pn in range(self.curn):
-            mass = fields[0][self.neighbors[pn].pn]
-            r2 = self.neighbors[pn].r2
+        for pn in range(dq.curn):
+            mass = fields[0][dq.neighbors[pn].pn]
+            r2 = dq.neighbors[pn].r2
             lw = self.sph_kernel(sqrt(r2) / hsml)
             dens += mass * lw
         weight = (4.0/3.0) * 3.1415926 * hsml**3

diff -r 1ba5918b38d61ac6f9134e4ca918f0bde599b349 -r 6c9ed4a3752d649d872cf47adffd419a3c8bf903 yt/utilities/lib/distance_queue.pxd
--- /dev/null
+++ b/yt/utilities/lib/distance_queue.pxd
@@ -0,0 +1,43 @@
+"""
+A queue for evaluating distances to discrete points
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+cimport cython
+cimport numpy as np
+import numpy as np
+from libc.stdlib cimport malloc, free
+from libc.string cimport memmove
+
+cdef struct NeighborList:
+    np.int64_t pn       # Particle number
+    np.float64_t r2     # radius**2
+
+cdef int Neighbor_compare(void *on1, void *on2) nogil
+cdef np.float64_t r2dist(np.float64_t ppos[3],
+                         np.float64_t cpos[3],
+                         np.float64_t DW[3],
+                         bint periodicity[3],
+                         np.float64_t max_dist2)
+
+cdef class DistanceQueue:
+    cdef int maxn
+    cdef int curn
+    cdef np.float64_t DW[3]
+    cdef bint periodicity[3]
+    cdef NeighborList* neighbors # flat array
+    cdef void _setup(self, np.float64_t DW[3], bint periodicity[3])
+    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
+                            np.float64_t cpos[3])
+    cdef void neighbor_reset(self)

diff -r 1ba5918b38d61ac6f9134e4ca918f0bde599b349 -r 6c9ed4a3752d649d872cf47adffd419a3c8bf903 yt/utilities/lib/distance_queue.pyx
--- /dev/null
+++ b/yt/utilities/lib/distance_queue.pyx
@@ -0,0 +1,149 @@
+"""
+Distance queue implementation
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+cimport numpy as np
+import numpy as np
+cimport cython
+
+cdef int Neighbor_compare(void *on1, void *on2) nogil:
+    cdef NeighborList *n1
+    cdef NeighborList *n2
+    n1 = <NeighborList *> on1
+    n2 = <NeighborList *> on2
+    # Note that we set this up so that "greatest" evaluates to the *end* of the
+    # list, so we can do standard radius comparisons.
+    if n1.r2 < n2.r2:
+        return -1
+    elif n1.r2 == n2.r2:
+        return 0
+    else:
+        return 1
+
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.initializedcheck(False)
+cdef np.float64_t r2dist(np.float64_t ppos[3],
+                         np.float64_t cpos[3],
+                         np.float64_t DW[3],
+                         bint periodicity[3],
+                         np.float64_t max_dist2):
+    cdef int i
+    cdef np.float64_t r2, DR
+    r2 = 0.0
+    for i in range(3):
+        DR = (ppos[i] - cpos[i])
+        if not periodicity[i]:
+            pass
+        elif (DR > DW[i]/2.0):
+            DR -= DW[i]
+        elif (DR < -DW[i]/2.0):
+            DR += DW[i]
+        r2 += DR * DR
+        if max_dist2 >= 0.0 and r2 > max_dist2:
+            return -1.0
+    return r2
+
+cdef class DistanceQueue:
+    """This is a distance queue object, designed to incrementally evaluate N
+    positions against a reference point.  It is initialized with the maximum
+    number that are to be retained (i.e., maxn-nearest neighbors)."""
+    def __cinit__(self, int maxn):
+        cdef int i
+        self.maxn = maxn
+        self.curn = 0
+        self.neighbors = <NeighborList *> malloc(
+            sizeof(NeighborList) * self.maxn)
+        self.neighbor_reset()
+        for i in range(3):
+            self.DW[i] = 0
+            self.periodicity[i] = False
+
+    cdef void _setup(self, np.float64_t DW[3], bint periodicity[3]):
+        for i in range(3):
+            self.DW[i] = DW[i]
+            self.periodicity[i] = periodicity[i]
+
+    def setup(self, np.float64_t[:] DW, periodicity):
+        for i in range(3):
+            self.DW[i] = DW[i]
+            self.periodicity[i] = periodicity[i]
+
+    def __dealloc__(self):
+        free(self.neighbors)
+
+    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
+                            np.float64_t cpos[3]):
+        # Here's a python+numpy simulator of this:
+        # http://paste.yt-project.org/show/5445/
+        cdef int i, di
+        cdef np.float64_t r2, r2_trunc
+        if self.curn == self.maxn:
+            # Truncate calculation if it's bigger than this in any dimension
+            r2_trunc = self.neighbors[self.curn - 1].r2
+        else:
+            # Don't truncate our calculation
+            r2_trunc = -1
+        r2 = r2dist(ppos, cpos, self.DW, self.periodicity, r2_trunc)
+        if r2 == -1:
+            return
+        if self.curn == 0:
+            self.neighbors[0].r2 = r2
+            self.neighbors[0].pn = pn
+            self.curn += 1
+            return
+        # Now insert in a sorted way
+        di = -1
+        for i in range(self.curn - 1, -1, -1):
+            # We are checking if i is less than us, to see if we should insert
+            # to the right (i.e., i+1).
+            if self.neighbors[i].r2 < r2:
+                di = i
+                break
+        # The outermost one is already too small.
+        if di == self.maxn - 1:
+            return
+        if (self.maxn - (di + 2)) > 0:
+            memmove(<void *> (self.neighbors + di + 2),
+                    <void *> (self.neighbors + di + 1),
+                    sizeof(NeighborList) * (self.maxn - (di + 2)))
+        self.neighbors[di + 1].r2 = r2
+        self.neighbors[di + 1].pn = pn
+        if self.curn < self.maxn:
+            self.curn += 1
+
+    cdef void neighbor_reset(self):
+        for i in range(self.maxn):
+            self.neighbors[i].r2 = 1e300
+            self.neighbors[i].pn = -1
+        self.curn = 0
+
+    def find_nearest(self, np.float64_t[:] center, np.float64_t[:,:] points):
+        """This function accepts a center and a set of [N,3] points, and it
+        will return the indices into the points array of the maxn closest
+        neighbors."""
+        cdef int i, j
+        cdef np.float64_t ppos[3], cpos[3]
+        self.neighbor_reset()
+        for i in range(3):
+            cpos[i] = center[i]
+        for j in range(points.shape[0]):
+            for i in range(3):
+                ppos[i] = points[j,i]
+            self.neighbor_eval(j, ppos, cpos)
+        rv = np.empty(self.curn, dtype="int64")
+        for i in range(self.curn):
+            rv[i] = self.neighbors[i].pn
+        return rv

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list