[yt-svn] commit/yt: 20 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Fri Sep 30 10:46:51 PDT 2016


20 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/6195b3ba99bb/
Changeset:   6195b3ba99bb
Branch:      yt
User:        MatthewTurk
Date:        2016-06-30 18:18:16+00:00
Summary:     First set of changes for boolean objects
Affected #:  4 files

diff -r 667d2aa2b7683bb2f695059d2f23dec485645d85 -r 6195b3ba99bb2c3a335e4966e7ff7e8e06d19376 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1314,7 +1314,6 @@
             self.index._identify_base_chunk(self)
         return self._current_chunk.fcoords_vertex
 
-
 class YTSelectionContainer0D(YTSelectionContainer):
     _spatial = False
     _dimensionality = 0
@@ -1760,6 +1759,37 @@
         """
         return self.quantities.total_quantity(("index", "cell_volume"))
 
+    def __or__(self, other):
+        assert(isinstance(other, YTSelectionContainer3D))
+        assert(self.ds is other.ds)
+        # Should maybe do something with field parameters here
+        return YTBooleanOperator("OR", self, other, ds = self.ds)
+
+    def __invert__(self):
+        # ~obj
+        asel = yt.geometry.selection_routines.AlwaysSelector(self.ds)
+        return YTBooleanOperator("NOT", self, asel, ds = self.ds)
+
+    def __xor__(self, other):
+        assert(isinstance(other, YTSelectionContainer3D))
+        assert(self.ds is other.ds)
+        return YTBooleanOperator("XOR", self, other, ds = self.ds)
+
+    def __and__(self, other):
+        assert(isinstance(other, YTSelectionContainer3D))
+        assert(self.ds is other.ds)
+        return YTBooleanOperator("AND", self, other, ds = self.ds)
+
+class YTBooleanOperator(YTSelectionContainer3D):
+    _type_name = "bool"
+    def __init__(self, op, dobj1, dobj2, ds = None, field_parameters = None,
+                 data_source = None):
+        YTSelectionContainer3D.__init__(self, None, ds, field_parameters,
+                data_source)
+        name = "Boolean%sSelector" % (op.upper(),)
+        sel_cls = getattr(yt.geometry.selection_routines, name)
+        self._selector = sel_cls(dobj1, dobj2)
+
 # Many of these items are set up specifically to ensure that
 # we are not breaking old pickle files.  This means we must only call the
 # _reconstruct_object and that we cannot mandate any additional arguments to

diff -r 667d2aa2b7683bb2f695059d2f23dec485645d85 -r 6195b3ba99bb2c3a335e4966e7ff7e8e06d19376 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -26,6 +26,7 @@
     fix_length
 from yt.geometry.selection_routines import \
     points_in_cells
+import yt.geometry.selection_routines as selection_routines
 from yt.units.yt_array import \
     YTArray
 from yt.utilities.exceptions import \

diff -r 667d2aa2b7683bb2f695059d2f23dec485645d85 -r 6195b3ba99bb2c3a335e4966e7ff7e8e06d19376 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -75,6 +75,11 @@
     cdef SelectorObject base_selector
     cdef public np.int64_t domain_id
 
+cdef class BooleanSelector(SelectorObject):
+    cdef SelectorObject sel1
+    cdef SelectorObject sel2
+    cdef int operation(self, int rv1, int rv2) nogil
+
 cdef inline np.float64_t _periodic_dist(np.float64_t x1, np.float64_t x2,
                                         np.float64_t dw, bint periodic) nogil:
     cdef np.float64_t rel = x1 - x2

diff -r 667d2aa2b7683bb2f695059d2f23dec485645d85 -r 6195b3ba99bb2c3a335e4966e7ff7e8e06d19376 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -2088,3 +2088,92 @@
             if mask[p]: break
 
     return mask
+
+cdef class BooleanSelector(SelectorObject):
+
+    def __init__(self, dobj1, dobj2):
+        # Note that this has a different API than the other selector objects,
+        # so will not work as a traditional data selector.
+        self.min_level = -1
+        self.max_level = 100
+        if not hasattr(dobj1, "selector"):
+            self.sel1 = dobj1
+        else:
+            self.sel1 = dobj1.selector
+        if not hasattr(dobj2, "selector"):
+            self.sel2 = dobj2
+        else:
+            self.sel2 = dobj2.selector
+
+    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil:
+        cdef int rv1 = self.sel1.select_cell(pos, dds)
+        cdef int rv2 = self.sel2.select_cell(pos, dds)
+        return self.operation(rv1, rv2)
+
+    cdef int select_point(self, np.float64_t pos[3]) nogil:
+        cdef int rv1 = self.sel1.select_point(pos)
+        cdef int rv2 = self.sel2.select_point(pos)
+        return self.operation(rv1, rv2)
+
+    cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil:
+        cdef int rv1 = self.sel1.select_sphere(pos, radius)
+        cdef int rv2 = self.sel2.select_sphere(pos, radius)
+        return self.operation(rv1, rv2)
+
+    cdef int select_bbox(self, np.float64_t left_edge[3],
+                               np.float64_t right_edge[3]) nogil:
+        cdef int rv1 = self.sel1.select_bbox(left_edge, right_edge)
+        cdef int rv2 = self.sel2.select_bbox(left_edge, right_edge)
+        return self.operation(rv1, rv2)
+
+    cdef int operation(self, int rv1, int rv2) nogil:
+        return 0
+
+cdef class BooleanANDSelector(BooleanSelector):
+    cdef int operation(self, int rv1, int rv2) nogil:
+        if rv1 == 1 and rv2 == 1: return 1
+        return 0
+
+    def _hash_vals(self):
+        return (self.sel1._hash_vals() +
+                ("and",) +
+                self.sel2._hash_vals())
+
+cdef class BooleanORSelector(BooleanSelector):
+    cdef int operation(self, int rv1, int rv2) nogil:
+        if rv1 == 1 or rv2 == 1: return 1
+        return 0
+
+    def _hash_vals(self):
+        return (self.sel1._hash_vals() +
+                ("or",) +
+                self.sel2._hash_vals())
+
+cdef class BooleanNOTSelector(BooleanSelector):
+    # This selector mandates that sel2 is an AlwaysSelector, or something like
+    # that, as it's ignored.
+    cdef int operation(self, int rv1, int rv2) nogil:
+        # Ignore t
+        if rv2 == 0:
+            # This shouldn't happen!
+            return -1
+        elif rv1 == 0:
+            return 1
+        elif rv1 == 1:
+            return 0
+
+    def _hash_vals(self):
+        return (self.sel1._hash_vals() +
+                ("not",) +
+                self.sel2._hash_vals())
+
+cdef class BooleanXORSelector(BooleanSelector):
+    cdef int operation(self, int rv1, int rv2) nogil:
+        if rv1 == rv2:
+            return 0
+        return 1
+
+    def _hash_vals(self):
+        return (self.sel1._hash_vals() +
+                ("xor",) +
+                self.sel2._hash_vals())


https://bitbucket.org/yt_analysis/yt/commits/9d40a5c2cc3f/
Changeset:   9d40a5c2cc3f
Branch:      yt
User:        MatthewTurk
Date:        2016-06-30 20:18:47+00:00
Summary:     A few minor changes; almost to the end result I want.
Affected #:  3 files

diff -r 6195b3ba99bb2c3a335e4966e7ff7e8e06d19376 -r 9d40a5c2cc3ffd5152d8dc559ee4a1a99e076927 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1782,13 +1782,17 @@
 
 class YTBooleanOperator(YTSelectionContainer3D):
     _type_name = "bool"
+    _con_args = ("op", "dobj1", "dobj2")
     def __init__(self, op, dobj1, dobj2, ds = None, field_parameters = None,
                  data_source = None):
         YTSelectionContainer3D.__init__(self, None, ds, field_parameters,
                 data_source)
-        name = "Boolean%sSelector" % (op.upper(),)
+        self.op = op.upper()
+        self.dobj1 = dobj1
+        self.dobj2 = dobj2
+        name = "Boolean%sSelector" % (self.op,)
         sel_cls = getattr(yt.geometry.selection_routines, name)
-        self._selector = sel_cls(dobj1, dobj2)
+        self._selector = sel_cls(self)
 
 # Many of these items are set up specifically to ensure that
 # we are not breaking old pickle files.  This means we must only call the

diff -r 6195b3ba99bb2c3a335e4966e7ff7e8e06d19376 -r 9d40a5c2cc3ffd5152d8dc559ee4a1a99e076927 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -76,8 +76,8 @@
     cdef public np.int64_t domain_id
 
 cdef class BooleanSelector(SelectorObject):
-    cdef SelectorObject sel1
-    cdef SelectorObject sel2
+    cdef public SelectorObject sel1
+    cdef public SelectorObject sel2
     cdef int operation(self, int rv1, int rv2) nogil
 
 cdef inline np.float64_t _periodic_dist(np.float64_t x1, np.float64_t x2,

diff -r 6195b3ba99bb2c3a335e4966e7ff7e8e06d19376 -r 9d40a5c2cc3ffd5152d8dc559ee4a1a99e076927 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -2091,19 +2091,28 @@
 
 cdef class BooleanSelector(SelectorObject):
 
-    def __init__(self, dobj1, dobj2):
+    def __init__(self, dobj):
         # Note that this has a different API than the other selector objects,
         # so will not work as a traditional data selector.
-        self.min_level = -1
-        self.max_level = 100
-        if not hasattr(dobj1, "selector"):
-            self.sel1 = dobj1
+        if not hasattr(dobj.dobj1, "selector"):
+            self.sel1 = dobj.dobj1
         else:
-            self.sel1 = dobj1.selector
-        if not hasattr(dobj2, "selector"):
-            self.sel2 = dobj2
+            self.sel1 = dobj.dobj1.selector
+        if not hasattr(dobj.dobj2, "selector"):
+            self.sel2 = dobj.dobj2
         else:
-            self.sel2 = dobj2.selector
+            self.sel2 = dobj.dobj2.selector
+
+    cdef int select_bbox(self, np.float64_t left_edge[3],
+                               np.float64_t right_edge[3]) nogil:
+        cdef int rv1 = self.sel1.select_bbox(left_edge, right_edge)
+        cdef int rv2 = self.sel2.select_bbox(left_edge, right_edge)
+        return self.operation(rv1, rv2)
+
+    cdef int select_grid(self, np.float64_t left_edge[3],
+                         np.float64_t right_edge[3], np.int32_t level,
+                         Oct *o = NULL) nogil:
+        return -1
 
     cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil:
         cdef int rv1 = self.sel1.select_cell(pos, dds)
@@ -2120,18 +2129,12 @@
         cdef int rv2 = self.sel2.select_sphere(pos, radius)
         return self.operation(rv1, rv2)
 
-    cdef int select_bbox(self, np.float64_t left_edge[3],
-                               np.float64_t right_edge[3]) nogil:
-        cdef int rv1 = self.sel1.select_bbox(left_edge, right_edge)
-        cdef int rv2 = self.sel2.select_bbox(left_edge, right_edge)
-        return self.operation(rv1, rv2)
-
     cdef int operation(self, int rv1, int rv2) nogil:
-        return 0
+        return -1
 
 cdef class BooleanANDSelector(BooleanSelector):
     cdef int operation(self, int rv1, int rv2) nogil:
-        if rv1 == 1 and rv2 == 1: return 1
+        if rv1 == rv2 == 1: return 1
         return 0
 
     def _hash_vals(self):
@@ -2144,6 +2147,22 @@
         if rv1 == 1 or rv2 == 1: return 1
         return 0
 
+    def select_grids(self,
+                     np.ndarray[np.float64_t, ndim=2] left_edges,
+                     np.ndarray[np.float64_t, ndim=2] right_edges,
+                     np.ndarray[np.int32_t, ndim=2] levels):
+        cdef np.ndarray[np.uint8_t, ndim=1, cast=True] rv1
+        cdef np.ndarray[np.uint8_t, ndim=1, cast=True] rv2
+        cdef np.ndarray[np.uint8_t, ndim=1] rv
+        rv = np.ones(left_edges.shape[0], dtype="uint8")
+        rv1 = self.sel1.select_grids(left_edges, right_edges, levels)
+        rv2 = self.sel2.select_grids(left_edges, right_edges, levels)
+        cdef int i
+        for i in range(rv1.size):
+            if rv1[i] == rv2[i] == 0: rv[i] = 0
+        return rv.astype("bool")
+
+
     def _hash_vals(self):
         return (self.sel1._hash_vals() +
                 ("or",) +
@@ -2169,9 +2188,19 @@
 
 cdef class BooleanXORSelector(BooleanSelector):
     cdef int operation(self, int rv1, int rv2) nogil:
-        if rv1 == rv2:
-            return 0
-        return 1
+        if rv1 == 1 and rv2 == 0:
+            return 1
+        elif rv1 == 0 and rv2 == 1:
+            return 1
+        return 0
+
+    cdef int select_grid(self, np.float64_t left_edge[3],
+                         np.float64_t right_edge[3], np.int32_t level,
+                         Oct *o = NULL) nogil:
+        cdef int rv1 = self.sel1.select_grid(left_edge, right_edge, level, o)
+        cdef int rv2 = self.sel2.select_grid(left_edge, right_edge, level, o)
+        if rv1 == 1 or rv2 == 1: return 1
+        return 0
 
     def _hash_vals(self):
         return (self.sel1._hash_vals() +


https://bitbucket.org/yt_analysis/yt/commits/6218ca4cb896/
Changeset:   6218ca4cb896
Branch:      yt
User:        MatthewTurk
Date:        2016-06-30 21:44:01+00:00
Summary:     Starting to flesh out the correct methods for boolean ops
Affected #:  1 file

diff -r 9d40a5c2cc3ffd5152d8dc559ee4a1a99e076927 -r 6218ca4cb896c6dd3e4e5a49ac158f657977ac04 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -2103,39 +2103,44 @@
         else:
             self.sel2 = dobj.dobj2.selector
 
+cdef class BooleanANDSelector(BooleanSelector):
     cdef int select_bbox(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3]) nogil:
         cdef int rv1 = self.sel1.select_bbox(left_edge, right_edge)
+        if rv1 == 0: return 0
         cdef int rv2 = self.sel2.select_bbox(left_edge, right_edge)
-        return self.operation(rv1, rv2)
+        if rv2 == 0: return 0
+        return 1
 
     cdef int select_grid(self, np.float64_t left_edge[3],
                          np.float64_t right_edge[3], np.int32_t level,
                          Oct *o = NULL) nogil:
-        return -1
+        cdef int rv1 = self.sel1.select_grid(left_edge, right_edge, level, o)
+        if rv1 == 0: return 0
+        cdef int rv2 = self.sel2.select_bbox(left_edge, right_edge, level, o)
+        if rv2 == 0: return 0
+        return 1
 
     cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil:
         cdef int rv1 = self.sel1.select_cell(pos, dds)
+        if rv1 == 0: return 0
         cdef int rv2 = self.sel2.select_cell(pos, dds)
-        return self.operation(rv1, rv2)
+        if rv2 == 0: return 0
+        return 1
 
     cdef int select_point(self, np.float64_t pos[3]) nogil:
         cdef int rv1 = self.sel1.select_point(pos)
+        if rv1 == 0: return 0
         cdef int rv2 = self.sel2.select_point(pos)
-        return self.operation(rv1, rv2)
+        if rv2 == 0: return 0
+        return 1
 
     cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil:
         cdef int rv1 = self.sel1.select_sphere(pos, radius)
+        if rv1 == 0: return 0
         cdef int rv2 = self.sel2.select_sphere(pos, radius)
-        return self.operation(rv1, rv2)
-
-    cdef int operation(self, int rv1, int rv2) nogil:
-        return -1
-
-cdef class BooleanANDSelector(BooleanSelector):
-    cdef int operation(self, int rv1, int rv2) nogil:
-        if rv1 == rv2 == 1: return 1
-        return 0
+        if rv2 == 0: return 0
+        return 1
 
     def _hash_vals(self):
         return (self.sel1._hash_vals() +
@@ -2143,25 +2148,43 @@
                 self.sel2._hash_vals())
 
 cdef class BooleanORSelector(BooleanSelector):
-    cdef int operation(self, int rv1, int rv2) nogil:
-        if rv1 == 1 or rv2 == 1: return 1
+    cdef int select_bbox(self, np.float64_t left_edge[3],
+                               np.float64_t right_edge[3]) nogil:
+        cdef int rv1 = self.sel1.select_bbox(left_edge, right_edge)
+        if rv1 == 1: return 1
+        cdef int rv2 = self.sel2.select_bbox(left_edge, right_edge)
+        if rv2 == 1: return 1
         return 0
 
-    def select_grids(self,
-                     np.ndarray[np.float64_t, ndim=2] left_edges,
-                     np.ndarray[np.float64_t, ndim=2] right_edges,
-                     np.ndarray[np.int32_t, ndim=2] levels):
-        cdef np.ndarray[np.uint8_t, ndim=1, cast=True] rv1
-        cdef np.ndarray[np.uint8_t, ndim=1, cast=True] rv2
-        cdef np.ndarray[np.uint8_t, ndim=1] rv
-        rv = np.ones(left_edges.shape[0], dtype="uint8")
-        rv1 = self.sel1.select_grids(left_edges, right_edges, levels)
-        rv2 = self.sel2.select_grids(left_edges, right_edges, levels)
-        cdef int i
-        for i in range(rv1.size):
-            if rv1[i] == rv2[i] == 0: rv[i] = 0
-        return rv.astype("bool")
+    cdef int select_grid(self, np.float64_t left_edge[3],
+                         np.float64_t right_edge[3], np.int32_t level,
+                         Oct *o = NULL) nogil:
+        cdef int rv1 = self.sel1.select_grid(left_edge, right_edge, level, o)
+        if rv1 == 1: return 1
+        cdef int rv2 = self.sel2.select_bbox(left_edge, right_edge, level, o)
+        if rv2 == 1: return 1
+        return 0
 
+    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil:
+        cdef int rv1 = self.sel1.select_cell(pos, dds)
+        if rv1 == 1: return 1
+        cdef int rv2 = self.sel2.select_cell(pos, dds)
+        if rv2 == 1: return 1
+        return 0
+
+    cdef int select_point(self, np.float64_t pos[3]) nogil:
+        cdef int rv1 = self.sel1.select_point(pos)
+        if rv1 == 1: return 1
+        cdef int rv2 = self.sel2.select_point(pos)
+        if rv2 == 1: return 1
+        return 0
+
+    cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil:
+        cdef int rv1 = self.sel1.select_sphere(pos, radius)
+        if rv1 == 1: return 1
+        cdef int rv2 = self.sel2.select_sphere(pos, radius)
+        if rv2 == 1: return 1
+        return 0
 
     def _hash_vals(self):
         return (self.sel1._hash_vals() +
@@ -2169,6 +2192,42 @@
                 self.sel2._hash_vals())
 
 cdef class BooleanNOTSelector(BooleanSelector):
+    cdef int select_bbox(self, np.float64_t left_edge[3],
+                               np.float64_t right_edge[3]) nogil:
+        # We always return True here, because we don't have a "fully included"
+        # check anywhere else.
+        return 1
+
+    cdef int select_grid(self, np.float64_t left_edge[3],
+                         np.float64_t right_edge[3], np.int32_t level,
+                         Oct *o = NULL) nogil:
+        cdef int rv1 = self.sel1.select_grid(left_edge, right_edge, level, o)
+        if rv1 == 0: return 0
+        cdef int rv2 = self.sel2.select_bbox(left_edge, right_edge, level, o)
+        if rv2 == 0: return 0
+        return 1
+
+    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil:
+        cdef int rv1 = self.sel1.select_cell(pos, dds)
+        if rv1 == 0: return 0
+        cdef int rv2 = self.sel2.select_cell(pos, dds)
+        if rv2 == 0: return 0
+        return 1
+
+    cdef int select_point(self, np.float64_t pos[3]) nogil:
+        cdef int rv1 = self.sel1.select_point(pos)
+        if rv1 == 0: return 0
+        cdef int rv2 = self.sel2.select_point(pos)
+        if rv2 == 0: return 0
+        return 1
+
+    cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil:
+        cdef int rv1 = self.sel1.select_sphere(pos, radius)
+        if rv1 == 0: return 0
+        cdef int rv2 = self.sel2.select_sphere(pos, radius)
+        if rv2 == 0: return 0
+        return 1
+
     # This selector mandates that sel2 is an AlwaysSelector, or something like
     # that, as it's ignored.
     cdef int operation(self, int rv1, int rv2) nogil:


https://bitbucket.org/yt_analysis/yt/commits/a423ab9b41a2/
Changeset:   a423ab9b41a2
Branch:      yt
User:        MatthewTurk
Date:        2016-06-30 21:54:11+00:00
Summary:     This is somewhat slower, but works.
Affected #:  2 files

diff -r 6218ca4cb896c6dd3e4e5a49ac158f657977ac04 -r a423ab9b41a28b0039f6750962f39763f56a0d4b yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -78,7 +78,6 @@
 cdef class BooleanSelector(SelectorObject):
     cdef public SelectorObject sel1
     cdef public SelectorObject sel2
-    cdef int operation(self, int rv1, int rv2) nogil
 
 cdef inline np.float64_t _periodic_dist(np.float64_t x1, np.float64_t x2,
                                         np.float64_t dw, bint periodic) nogil:

diff -r 6218ca4cb896c6dd3e4e5a49ac158f657977ac04 -r a423ab9b41a28b0039f6750962f39763f56a0d4b yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -2117,7 +2117,7 @@
                          Oct *o = NULL) nogil:
         cdef int rv1 = self.sel1.select_grid(left_edge, right_edge, level, o)
         if rv1 == 0: return 0
-        cdef int rv2 = self.sel2.select_bbox(left_edge, right_edge, level, o)
+        cdef int rv2 = self.sel2.select_grid(left_edge, right_edge, level, o)
         if rv2 == 0: return 0
         return 1
 
@@ -2161,7 +2161,7 @@
                          Oct *o = NULL) nogil:
         cdef int rv1 = self.sel1.select_grid(left_edge, right_edge, level, o)
         if rv1 == 1: return 1
-        cdef int rv2 = self.sel2.select_bbox(left_edge, right_edge, level, o)
+        cdef int rv2 = self.sel2.select_grid(left_edge, right_edge, level, o)
         if rv2 == 1: return 1
         return 0
 
@@ -2201,66 +2201,58 @@
     cdef int select_grid(self, np.float64_t left_edge[3],
                          np.float64_t right_edge[3], np.int32_t level,
                          Oct *o = NULL) nogil:
-        cdef int rv1 = self.sel1.select_grid(left_edge, right_edge, level, o)
-        if rv1 == 0: return 0
-        cdef int rv2 = self.sel2.select_bbox(left_edge, right_edge, level, o)
-        if rv2 == 0: return 0
         return 1
 
     cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil:
         cdef int rv1 = self.sel1.select_cell(pos, dds)
-        if rv1 == 0: return 0
+        if rv1 == 0: return 1
+        return 0
+
+    cdef int select_point(self, np.float64_t pos[3]) nogil:
+        cdef int rv1 = self.sel1.select_point(pos)
+        if rv1 == 0: return 1
+        return 0
+
+    cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil:
+        cdef int rv1 = self.sel1.select_sphere(pos, radius)
+        if rv1 == 0: return 1
+        return 0
+
+    def _hash_vals(self):
+        return (self.sel1._hash_vals() +
+                ("not",))
+
+cdef class BooleanXORSelector(BooleanSelector):
+
+    cdef int select_bbox(self, np.float64_t left_edge[3],
+                               np.float64_t right_edge[3]) nogil:
+        # We always return True here, because we don't have a "fully included"
+        # check anywhere else.
+        return 1
+
+    cdef int select_grid(self, np.float64_t left_edge[3],
+                         np.float64_t right_edge[3], np.int32_t level,
+                         Oct *o = NULL) nogil:
+        return 1
+
+    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil:
+        cdef int rv1 = self.sel1.select_cell(pos, dds)
         cdef int rv2 = self.sel2.select_cell(pos, dds)
-        if rv2 == 0: return 0
+        if rv1 == rv2: return 0
         return 1
 
     cdef int select_point(self, np.float64_t pos[3]) nogil:
         cdef int rv1 = self.sel1.select_point(pos)
-        if rv1 == 0: return 0
         cdef int rv2 = self.sel2.select_point(pos)
-        if rv2 == 0: return 0
+        if rv1 == rv2: return 0
         return 1
 
     cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil:
         cdef int rv1 = self.sel1.select_sphere(pos, radius)
-        if rv1 == 0: return 0
         cdef int rv2 = self.sel2.select_sphere(pos, radius)
-        if rv2 == 0: return 0
+        if rv1 == rv2: return 0
         return 1
 
-    # This selector mandates that sel2 is an AlwaysSelector, or something like
-    # that, as it's ignored.
-    cdef int operation(self, int rv1, int rv2) nogil:
-        # Ignore t
-        if rv2 == 0:
-            # This shouldn't happen!
-            return -1
-        elif rv1 == 0:
-            return 1
-        elif rv1 == 1:
-            return 0
-
-    def _hash_vals(self):
-        return (self.sel1._hash_vals() +
-                ("not",) +
-                self.sel2._hash_vals())
-
-cdef class BooleanXORSelector(BooleanSelector):
-    cdef int operation(self, int rv1, int rv2) nogil:
-        if rv1 == 1 and rv2 == 0:
-            return 1
-        elif rv1 == 0 and rv2 == 1:
-            return 1
-        return 0
-
-    cdef int select_grid(self, np.float64_t left_edge[3],
-                         np.float64_t right_edge[3], np.int32_t level,
-                         Oct *o = NULL) nogil:
-        cdef int rv1 = self.sel1.select_grid(left_edge, right_edge, level, o)
-        cdef int rv2 = self.sel2.select_grid(left_edge, right_edge, level, o)
-        if rv1 == 1 or rv2 == 1: return 1
-        return 0
-
     def _hash_vals(self):
         return (self.sel1._hash_vals() +
                 ("xor",) +


https://bitbucket.org/yt_analysis/yt/commits/903f1ab03825/
Changeset:   903f1ab03825
Branch:      yt
User:        MatthewTurk
Date:        2016-06-30 21:58:14+00:00
Summary:     Adding docstring to the boolean class
Affected #:  1 file

diff -r a423ab9b41a28b0039f6750962f39763f56a0d4b -r 903f1ab0382536398e397b08854cf4c556d488e0 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1781,6 +1781,35 @@
         return YTBooleanOperator("AND", self, other, ds = self.ds)
 
 class YTBooleanOperator(YTSelectionContainer3D):
+    """
+    This is a boolean operation, accepting AND, OR, XOR, and NOT for combining
+    multiple data objects.
+
+    This object is not designed to be created directly; it is designed to be
+    created implicitly by using one of the bitwise operations (&, |, ^, ~) on
+    one or two other data objects.  These correspond to the appropriate boolean
+    operations, and the resultant object can be nested.
+
+    Parameters
+    ----------
+    op : string
+        Can be AND, OR, XOR or NOT.
+    dobj1 : YTSelectionContainer3D
+        The first selection object
+    dobj2 : YTSelectionContainer3D
+        The second object
+
+    Examples
+    --------
+
+    >>> import yt
+    >>> ds = yt.load("RedshiftOutput0005")
+    >>> sp = ds.sphere("c", 0.1)
+    >>> dd = ds.r[:,:,:]
+    >>> new_obj = sp ^ dd
+    >>> print(new_obj.sum("cell_volume"), dd.sum("cell_volume") -
+    ...    sp.sum("cell_volume"))
+    """
     _type_name = "bool"
     _con_args = ("op", "dobj1", "dobj2")
     def __init__(self, op, dobj1, dobj2, ds = None, field_parameters = None,


https://bitbucket.org/yt_analysis/yt/commits/d58022cb1b69/
Changeset:   d58022cb1b69
Branch:      yt
User:        MatthewTurk
Date:        2016-07-01 17:45:54+00:00
Summary:     First attempt at negation
Affected #:  2 files

diff -r 903f1ab0382536398e397b08854cf4c556d488e0 -r d58022cb1b69ce16cfd82f81b29cf4302f5a0593 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1780,6 +1780,14 @@
         assert(self.ds is other.ds)
         return YTBooleanOperator("AND", self, other, ds = self.ds)
 
+    def __add__(self, other):
+        return self.__or__(other)
+
+    def __sub__(self, other):
+        assert(isinstance(other, YTSelectionContainer3D))
+        assert(self.ds is other.ds)
+        return YTBooleanOperator("NEG", self, other, ds = self.ds)
+
 class YTBooleanOperator(YTSelectionContainer3D):
     """
     This is a boolean operation, accepting AND, OR, XOR, and NOT for combining

diff -r 903f1ab0382536398e397b08854cf4c556d488e0 -r d58022cb1b69ce16cfd82f81b29cf4302f5a0593 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -2257,3 +2257,42 @@
         return (self.sel1._hash_vals() +
                 ("xor",) +
                 self.sel2._hash_vals())
+
+cdef class BooleanNEGSelector(BooleanSelector):
+
+    cdef int select_bbox(self, np.float64_t left_edge[3],
+                               np.float64_t right_edge[3]) nogil:
+        # We always return True here, because we don't have a "fully included"
+        # check anywhere else.
+        return self.sel1.select_bbox(left_edge, right_edge)
+
+    cdef int select_grid(self, np.float64_t left_edge[3],
+                         np.float64_t right_edge[3], np.int32_t level,
+                         Oct *o = NULL) nogil:
+        return self.sel1.select_grid(left_edge, right_edge, level, o)
+
+    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil:
+        cdef int rv1 = self.sel1.select_cell(pos, dds)
+        if rv1 == 0: return 0
+        cdef int rv2 = self.sel2.select_cell(pos, dds)
+        if rv2 == 1: return 0
+        return 1
+
+    cdef int select_point(self, np.float64_t pos[3]) nogil:
+        cdef int rv1 = self.sel1.select_point(pos)
+        if rv1 == 0: return 0
+        cdef int rv2 = self.sel2.select_point(pos)
+        if rv2 == 1: return 0
+        return 1
+
+    cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil:
+        cdef int rv1 = self.sel1.select_sphere(pos, radius)
+        if rv1 == 0: return 0
+        cdef int rv2 = self.sel2.select_sphere(pos, radius)
+        if rv2 == 1: return 0
+        return 1
+
+    def _hash_vals(self):
+        return (self.sel1._hash_vals() +
+                ("neg",) +
+                self.sel2._hash_vals())


https://bitbucket.org/yt_analysis/yt/commits/b5cf48adca57/
Changeset:   b5cf48adca57
Branch:      yt
User:        MatthewTurk
Date:        2016-07-01 21:54:28+00:00
Summary:     Adding tests ported from yt-2.x.
Affected #:  1 file

diff -r d58022cb1b69ce16cfd82f81b29cf4302f5a0593 -r b5cf48adca5704110cee7d233060ec73b07efe51 yt/data_objects/tests/test_boolean_regions.py
--- /dev/null
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -0,0 +1,332 @@
+from yt.testing import \
+        fake_amr_ds, \
+        assert_array_equal
+import numpy as np
+
+def get_ds():
+    from yt.utilities.lib.geometry_utils import compute_morton
+    def _morton_index(field, data):
+        eps = np.finfo("f8").eps
+        uq = data.ds.domain_left_edge.uq
+        LE = data.ds.domain_left_edge - eps * uq
+        RE = data.ds.domain_right_edge + eps * uq
+        # .ravel() only copies if it needs to
+        morton = compute_morton(data["index", "x"].ravel(),
+                                data["index", "y"].ravel(),
+                                data["index", "z"].ravel(), LE, RE)
+        morton.shape = data["index", "x"].shape
+        return morton.view("f8")
+    ds = fake_amr_ds()
+    ds.add_field(("index", "morton_index"), function=_morton_index,
+                       units = "")
+    return ds
+
+def test_boolean_spheres_no_overlap():
+    r"""Test to make sure that boolean objects (spheres, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping spheres. This also checks that the original spheres
+    don't change as part of constructing the booleans.
+    """
+    ds = get_ds()
+    sp1 = ds.sphere([0.25, 0.25, 0.25], 0.15)
+    sp2 = ds.sphere([0.75, 0.75, 0.75], 0.15)
+    # Store the original indices
+    i1 = sp1["index","morton_index"]
+    i1.sort()
+    i2 = sp2["index","morton_index"]
+    i2.sort()
+    ii = np.concatenate((i1, i2))
+    ii.sort()
+    # Make some booleans
+    bo1 = sp1 & sp2
+    bo2 = sp1 - sp2
+    bo3 = sp1 | sp2 # also works with +
+    # This makes sure the original containers didn't change.
+    new_i1 = sp1["index","morton_index"]
+    new_i1.sort()
+    new_i2 = sp2["index","morton_index"]
+    new_i2.sort()
+    assert_array_equal(new_i1, i1)
+    assert_array_equal(new_i2, i2)
+    # Now make sure the indices also behave as we expect.
+    empty = np.array([])
+    assert_array_equal(bo1["index","morton_index"], empty)
+    b2 = bo2["index","morton_index"]
+    b2.sort()
+    assert_array_equal(b2, i1)
+    b3 = bo3["index","morton_index"]
+    b3.sort()
+    assert_array_equal(b3, ii)
+ 
+def test_boolean_spheres_overlap():
+    r"""Test to make sure that boolean objects (spheres, overlap)
+    behave the way we expect.
+
+    Test overlapping spheres.
+    """
+    ds = get_ds()
+    sp1 = ds.sphere([0.45, 0.45, 0.45], 0.15)
+    sp2 = ds.sphere([0.55, 0.55, 0.55], 0.15)
+    # Get indices of both.
+    i1 = sp1["index","morton_index"]
+    i2 = sp2["index","morton_index"]
+    # Make some booleans
+    bo1 = sp1 & sp2
+    bo2 = sp1 - sp2
+    bo3 = sp1 | sp2
+    # Now make sure the indices also behave as we expect.
+    lens = np.intersect1d(i1, i2)
+    apple = np.setdiff1d(i1, i2)
+    both = np.union1d(i1, i2)
+    b1 = bo1["index","morton_index"]
+    b1.sort()
+    b2 = bo2["index","morton_index"]
+    b2.sort()
+    b3 = bo3["index","morton_index"]
+    b3.sort()
+    assert_array_equal(b1, lens)
+    assert_array_equal(b2, apple)
+    assert_array_equal(b3, both)
+
+def test_boolean_regions_no_overlap():
+    r"""Test to make sure that boolean objects (regions, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping regions. This also checks that the original regions
+    don't change as part of constructing the booleans.
+    """
+    ds = get_ds()
+    re1 = ds.region([0.25]*3, [0.2]*3, [0.3]*3)
+    re2 = ds.region([0.65]*3, [0.6]*3, [0.7]*3)
+    # Store the original indices
+    i1 = re1["index","morton_index"]
+    i1.sort()
+    i2 = re2["index","morton_index"]
+    i2.sort()
+    ii = np.concatenate((i1, i2))
+    ii.sort()
+    # Make some booleans
+    bo1 = re1 & re2
+    bo2 = re1 - re2
+    bo3 = re1 | re2
+    # This makes sure the original containers didn't change.
+    new_i1 = re1["index","morton_index"]
+    new_i1.sort()
+    new_i2 = re2["index","morton_index"]
+    new_i2.sort()
+    assert_array_equal(new_i1, i1)
+    assert_array_equal(new_i2, i2)
+    # Now make sure the indices also behave as we expect.
+    empty = np.array([])
+    assert_array_equal(bo1["index","morton_index"], empty)
+    b2 = bo2["index","morton_index"]
+    b2.sort()
+    assert_array_equal(b2, i1 )
+    b3 = bo3["index","morton_index"]
+    b3.sort()
+    assert_array_equal(b3, ii)
+
+def test_boolean_regions_overlap():
+    r"""Test to make sure that boolean objects (regions, overlap)
+    behave the way we expect.
+
+    Test overlapping regions.
+    """
+    ds = get_ds()
+    re1 = ds.region([0.55]*3, [0.5]*3, [0.6]*3)
+    re2 = ds.region([0.6]*3, [0.55]*3, [0.65]*3)
+    # Get indices of both.
+    i1 = re1["index","morton_index"]
+    i2 = re2["index","morton_index"]
+    # Make some booleans
+    bo1 = re1 & re2
+    bo2 = re1 - re2
+    bo3 = re1 | re2
+    # Now make sure the indices also behave as we expect.
+    cube = np.intersect1d(i1, i2)
+    bite_cube = np.setdiff1d(i1, i2)
+    both = np.union1d(i1, i2)
+    b1 = bo1["index","morton_index"]
+    b1.sort()
+    b2 = bo2["index","morton_index"]
+    b2.sort()
+    b3 = bo3["index","morton_index"]
+    b3.sort()
+    assert_array_equal(b1, cube)
+    assert_array_equal(b2, bite_cube)
+    assert_array_equal(b3, both)
+
+def test_boolean_cylinders_no_overlap():
+    r"""Test to make sure that boolean objects (cylinders, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping cylinders. This also checks that the original cylinders
+    don't change as part of constructing the booleans.
+    """
+    ds = get_ds()
+    cyl1 = ds.disk([0.25]*3, [1, 0, 0], 0.1, 0.1)
+    cyl2 = ds.disk([0.75]*3, [1, 0, 0], 0.1, 0.1)
+    # Store the original indices
+    i1 = cyl1["index","morton_index"]
+    i1.sort()
+    i2 = cyl2["index","morton_index"]
+    i2.sort()
+    ii = np.concatenate((i1, i2))
+    ii.sort()
+    # Make some booleans
+    bo1 = cyl1 & cyl2
+    bo2 = cyl1 - cyl2
+    bo3 = cyl1 | cyl2
+    # This makes sure the original containers didn't change.
+    new_i1 = cyl1["index","morton_index"]
+    new_i1.sort()
+    new_i2 = cyl2["index","morton_index"]
+    new_i2.sort()
+    assert_array_equal(new_i1, i1)
+    assert_array_equal(new_i2, i2)
+    # Now make sure the indices also behave as we expect.
+    empty = np.array([])
+    assert_array_equal(bo1["index","morton_index"], empty)
+    b2 = bo2["index","morton_index"]
+    b2.sort()
+    assert_array_equal(b2, i1)
+    b3 = bo3["index","morton_index"]
+    b3.sort()
+    assert_array_equal(b3, ii)
+
+def test_boolean_cylinders_overlap():
+    r"""Test to make sure that boolean objects (cylinders, overlap)
+    behave the way we expect.
+
+    Test overlapping cylinders.
+    """
+    ds = get_ds()
+    cyl1 = ds.disk([0.45]*3, [1, 0, 0], 0.2, 0.2)
+    cyl2 = ds.disk([0.55]*3, [1, 0, 0], 0.2, 0.2)
+    # Get indices of both.
+    i1 = cyl1["index","morton_index"]
+    i2 = cyl2["index","morton_index"]
+    # Make some booleans
+    bo1 = cyl1 & cyl2
+    bo2 = cyl1 - cyl2
+    bo3 = cyl1 | cyl2
+    # Now make sure the indices also behave as we expect.
+    vlens = np.intersect1d(i1, i2)
+    bite_disk = np.setdiff1d(i1, i2)
+    both = np.union1d(i1, i2)
+    b1 = bo1["index","morton_index"]
+    b1.sort()
+    b2 = bo2["index","morton_index"]
+    b2.sort()
+    b3 = bo3["index","morton_index"]
+    b3.sort()
+    assert_array_equal(b1, vlens)
+    assert_array_equal(b2, bite_disk)
+    assert_array_equal(b3, both)
+
+def test_boolean_ellipsoids_no_overlap():
+    r"""Test to make sure that boolean objects (ellipsoids, no overlap)
+    behave the way we expect.
+
+    Test non-overlapping ellipsoids. This also checks that the original
+    ellipsoids don't change as part of constructing the booleans.
+    """
+    ds = get_ds()
+    ell1 = ds.ellipsoid([0.25]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
+    ell2 = ds.ellipsoid([0.75]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
+    # Store the original indices
+    i1 = ell1["index","morton_index"]
+    i1.sort()
+    i2 = ell2["index","morton_index"]
+    i2.sort()
+    ii = np.concatenate((i1, i2))
+    ii.sort()
+    # Make some booleans
+    bo1 = ell1 & ell2
+    bo2 = ell1 - ell2
+    bo3 = ell1 | ell2
+    # This makes sure the original containers didn't change.
+    new_i1 = ell1["index","morton_index"]
+    new_i1.sort()
+    new_i2 = ell2["index","morton_index"]
+    new_i2.sort()
+    assert_array_equal(new_i1, i1 )
+    assert_array_equal(new_i2, i2)
+    # Now make sure the indices also behave as we expect.
+    empty = np.array([])
+    assert_array_equal(bo1["index","morton_index"], empty)
+    b2 = bo2["index","morton_index"]
+    b2.sort()
+    assert_array_equal(b2, i1)
+    b3 = bo3["index","morton_index"]
+    b3.sort()
+    assert_array_equal(b3, ii)
+
+def test_boolean_ellipsoids_overlap():
+    r"""Test to make sure that boolean objects (ellipsoids, overlap)
+    behave the way we expect.
+
+    Test overlapping ellipsoids.
+    """
+    ds = get_ds()
+    ell1 = ds.ellipsoid([0.45]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
+    ell2 = ds.ellipsoid([0.55]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
+    # Get indices of both.
+    i1 = ell1["index","morton_index"]
+    i2 = ell2["index","morton_index"]
+    # Make some booleans
+    bo1 = ell1 & ell2
+    bo2 = ell1 - ell2
+    bo3 = ell1 | ell2
+    # Now make sure the indices also behave as we expect.
+    overlap = np.intersect1d(i1, i2)
+    diff = np.setdiff1d(i1, i2)
+    both = np.union1d(i1, i2)
+    b1 = bo1["index","morton_index"]
+    b1.sort()
+    b2 = bo2["index","morton_index"]
+    b2.sort()
+    b3 = bo3["index","morton_index"]
+    b3.sort()
+    assert_array_equal(b1, overlap)
+    assert_array_equal(b2, diff)
+    assert_array_equal(b3, both)
+
+def test_boolean_mix_periodicity():
+    r"""Test that a hybrid boolean region behaves as we expect.
+
+    This also tests nested logic and that periodicity works.
+    """
+    ds = get_ds()
+    re = ds.region([0.5]*3, [0.0]*3, [1]*3) # whole thing
+    sp = ds.sphere([0.95]*3, 0.3) # wraps around
+    cyl = ds.disk([0.05]*3, [1,1,1], 0.1, 0.4) # wraps around
+    # Get original indices
+    rei = re["index","morton_index"]
+    spi = sp["index","morton_index"]
+    cyli = cyl["index","morton_index"]
+    # Make some booleans
+    # whole box minux spherical bites at corners
+    bo1 = re - sp
+    # sphere plus cylinder
+    bo2 = sp | cyl
+    # a jumble, the region minus the sp+cyl
+    bo3 = re - (sp | cyl)
+    # Now make sure the indices also behave as we expect.
+    expect = np.setdiff1d(rei, spi)
+    ii = bo1["index","morton_index"]
+    ii.sort()
+    assert_array_equal(expect, ii)
+    #
+    expect = np.union1d(spi, cyli)
+    ii = bo2["index","morton_index"]
+    ii.sort()
+    assert_array_equal(expect, ii)
+    #
+    expect = np.union1d(spi, cyli)
+    expect = np.setdiff1d(rei, expect)
+    ii = bo3["index","morton_index"]
+    ii.sort()
+    assert_array_equal(expect, ii)
+


https://bitbucket.org/yt_analysis/yt/commits/909a044596ec/
Changeset:   909a044596ec
Branch:      yt
User:        MatthewTurk
Date:        2016-07-03 19:31:12+00:00
Summary:     Adding chained selectors
Affected #:  1 file

diff -r b5cf48adca5704110cee7d233060ec73b07efe51 -r 909a044596ec11dd1decfb94d14d3f42f4f1f109 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -2296,3 +2296,191 @@
         return (self.sel1._hash_vals() +
                 ("neg",) +
                 self.sel2._hash_vals())
+
+cdef class ChainedBooleanSelector(SelectorObject):
+    cdef int n_obj
+    cdef np.ndarray selectors
+    def __init__(self, dobj):
+        # These are data objects, not selectors
+        self.n_obj = len(dobj.data_objects)
+        self.selectors = np.empty(self.n_obj, dtype="object")
+        for i in range(self.n_obj):
+            self.selectors[i] = dobj.data_objects[i].selector
+
+    cdef int select_bbox(self, np.float64_t left_edge[3],
+                               np.float64_t right_edge[3]) nogil:
+        with gil:
+            return self._select_bbox(left_edge, right_edge)
+
+    cdef int _select_bbox(self, np.float64_t left_edge[3],
+                               np.float64_t right_edge[3]):
+        return 0
+
+    cdef int select_grid(self, np.float64_t left_edge[3],
+                         np.float64_t right_edge[3], np.int32_t level,
+                         Oct *o = NULL) nogil:
+        with gil:
+            return self._select_grid(left_edge, right_edge, level, o)
+
+    cdef int _select_grid(self, np.float64_t left_edge[3],
+                         np.float64_t right_edge[3], np.int32_t level,
+                         Oct *o = NULL):
+        return 0
+
+    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil:
+        with gil:
+            return self._select_cell(pos, dds)
+
+    cdef int _select_cell(self, np.float64_t pos[3], np.float64_t dds[3]):
+        return 0
+
+    cdef int select_point(self, np.float64_t pos[3]) nogil:
+        with gil:
+            return self._select_point(pos)
+
+    cdef int _select_point(self, np.float64_t pos[3]):
+        return 0
+
+    cdef int select_sphere(self, np.float64_t pos[3],
+                                 np.float64_t radius) nogil:
+        with gil:
+            return self._select_sphere(pos, radius)
+
+    cdef int _select_sphere(self, np.float64_t pos[3],
+                                 np.float64_t radius):
+        return 0
+
+cdef class ChainedBooleanANDSelector(ChainedBooleanSelector):
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef int _select_bbox(self, np.float64_t left_edge[3],
+                                np.float64_t right_edge[3]):
+        cdef np.ndarray[object, ndim=1] sels = self.selectors
+        cdef int i
+        for i in range(self.n_obj):
+            if (<SelectorObject>sels[i]).select_bbox(left_edge, right_edge) == 0:
+                return 0
+        return 1
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef int _select_grid(self, np.float64_t left_edge[3],
+                         np.float64_t right_edge[3], np.int32_t level,
+                         Oct *o = NULL):
+        cdef np.ndarray[object, ndim=1] sels
+        cdef int i
+        for i in range(self.n_obj):
+            if (<SelectorObject>self.selectors[i]).select_grid(left_edge, right_edge, level, o) == 0:
+                return 0
+            return 1
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef int _select_cell(self, np.float64_t pos[3], np.float64_t dds[3]):
+        cdef np.ndarray[object, ndim=1] sels
+        cdef int i
+        for i in range(self.n_obj):
+            if (<SelectorObject>self.selectors[i]).select_cell(pos, dds) == 0:
+                return 0
+        return 1
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef int _select_point(self, np.float64_t pos[3]):
+        cdef np.ndarray[object, ndim=1] sels
+        cdef int i
+        for i in range(self.n_obj):
+            if (<SelectorObject>self.selectors[i]).select_point(pos) == 0:
+                return 0
+        return 1
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef int _select_sphere(self, np.float64_t pos[3], np.float64_t radius):
+        cdef np.ndarray[object, ndim=1] sels
+        cdef int i
+        for i in range(self.n_obj):
+            if (<SelectorObject>self.selectors[i]).select_sphere(pos, radius) == 0:
+                return 0
+        return 1
+
+    def _hash_vals(self):
+        v = ("chained_and",)
+        for s in self.selectors:
+            v += s._hash_vals()
+        return v
+
+intersection_selector = ChainedBooleanANDSelector
+
+cdef class ChainedBooleanORSelector(ChainedBooleanSelector):
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef int _select_bbox(self, np.float64_t left_edge[3],
+                               np.float64_t right_edge[3]):
+        cdef np.ndarray[object, ndim=1] sels
+        cdef int i
+        for i in range(self.n_obj):
+            if (<SelectorObject>self.selectors[i]).select_bbox(left_edge, right_edge) == 1:
+                return 1
+        return 0
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef int _select_grid(self, np.float64_t left_edge[3],
+                         np.float64_t right_edge[3], np.int32_t level,
+                         Oct *o = NULL):
+        cdef np.ndarray[object, ndim=1] sels
+        cdef int i
+        for i in range(self.n_obj):
+            if (<SelectorObject>self.selectors[i]).select_grid(left_edge, right_edge, level, o) == 1:
+                return 1
+        return 0
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef int _select_cell(self, np.float64_t pos[3], np.float64_t dds[3]):
+        cdef np.ndarray[object, ndim=1] sels
+        cdef int i
+        for i in range(self.n_obj):
+            if (<SelectorObject>self.selectors[i]).select_cell(pos, dds) == 1:
+                return 1
+        return 0
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef int _select_point(self, np.float64_t pos[3]):
+        cdef np.ndarray[object, ndim=1] sels
+        cdef int i
+        for i in range(self.n_obj):
+            if (<SelectorObject>self.selectors[i]).select_point(pos) == 1:
+                return 1
+        return 0
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef int _select_sphere(self, np.float64_t pos[3], np.float64_t radius):
+        cdef np.ndarray[object, ndim=1] sels
+        cdef int i
+        for i in range(self.n_obj):
+            if (<SelectorObject>self.selectors[i]).select_sphere(pos, radius) == 1:
+                return 1
+        return 0
+
+    def _hash_vals(self):
+        v = ("chained_or",)
+        for s in self.selectors:
+            v += s._hash_vals()
+        return v
+
+union_selector = ChainedBooleanORSelector
+


https://bitbucket.org/yt_analysis/yt/commits/0e16f3cbc6a1/
Changeset:   0e16f3cbc6a1
Branch:      yt
User:        MatthewTurk
Date:        2016-07-03 19:45:16+00:00
Summary:     Adding union and intersection objects
Affected #:  1 file

diff -r 909a044596ec11dd1decfb94d14d3f42f4f1f109 -r 0e16f3cbc6a1ba3b73d65402b8b16ed935f26901 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -857,3 +857,75 @@
     @property
     def fwidth(self):
         return self.base_object.fwidth[self._cond_ind,:]
+
+class YTDataObjectIntersection(YTSelectionContainer3D):
+    """
+    This is a more efficient method of selecting the intersection of multiple
+    data selection objects.
+
+    Creating one of these objects returns the intersection of all of the
+    sub-objects; it is designed to be a faster method than chaining & ("and")
+    operations to create a single, large intersection.
+
+    Parameters
+    ----------
+    data_objects : Iterable of YTSelectionContainer3D
+        The data objects to intersect
+
+    Examples
+    --------
+
+    >>> import yt
+    >>> ds = yt.load("RedshiftOutput0005")
+    >>> sp1 = ds.sphere((0.4, 0.5, 0.6), 0.15)
+    >>> sp2 = ds.sphere((0.38, 0.51, 0.55), 0.1)
+    >>> sp3 = ds.sphere((0.35, 0.5, 0.6), 0.15)
+    >>> new_obj = ds.intersection((sp1, sp2, sp3))
+    >>> print(new_obj.sum("cell_volume"))
+    """
+    _type_name = "intersection"
+    _con_args = ("data_objects",)
+    def __init__(self, data_objects, ds = None, field_parameters = None,
+                 data_source = None):
+        YTSelectionContainer3D.__init__(self, None, ds, field_parameters,
+                data_source)
+        # ensure_list doesn't check for tuples
+        if isinstance(data_objects, tuple):
+            data_objects = list(data_objects)
+        self.data_objects = ensure_list(data_objects)
+
+class YTDataObjectUnion(YTSelectionContainer3D):
+    """
+    This is a more efficient method of selecting the union of multiple
+    data selection objects.
+
+    Creating one of these objects returns the union of all of the sub-objects;
+    it is designed to be a faster method than chaining | (or) operations to
+    create a single, large union.
+
+    Parameters
+    ----------
+    data_objects : Iterable of YTSelectionContainer3D
+        The data objects to union
+
+    Examples
+    --------
+
+    >>> import yt
+    >>> ds = yt.load("RedshiftOutput0005")
+    >>> sp1 = ds.sphere((0.4, 0.5, 0.6), 0.1)
+    >>> sp2 = ds.sphere((0.3, 0.5, 0.15), 0.1)
+    >>> sp3 = ds.sphere((0.5, 0.5, 0.9), 0.1)
+    >>> new_obj = ds.union((sp1, sp2, sp3))
+    >>> print(new_obj.sum("cell_volume"))
+    """
+    _type_name = "union"
+    _con_args = ("data_objects",)
+    def __init__(self, data_objects, ds = None, field_parameters = None,
+                 data_source = None):
+        YTSelectionContainer3D.__init__(self, None, ds, field_parameters,
+                data_source)
+        # ensure_list doesn't check for tuples
+        if isinstance(data_objects, tuple):
+            data_objects = list(data_objects)
+        self.data_objects = ensure_list(data_objects)


https://bitbucket.org/yt_analysis/yt/commits/49ffeecf539d/
Changeset:   49ffeecf539d
Branch:      yt
User:        MatthewTurk
Date:        2016-07-04 01:59:43+00:00
Summary:     Adding tests for union and intersection
Affected #:  1 file

diff -r 0e16f3cbc6a1ba3b73d65402b8b16ed935f26901 -r 49ffeecf539d7dfb4685c4abf39dc918c09d0e9c yt/data_objects/tests/test_boolean_regions.py
--- a/yt/data_objects/tests/test_boolean_regions.py
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -42,6 +42,8 @@
     bo1 = sp1 & sp2
     bo2 = sp1 - sp2
     bo3 = sp1 | sp2 # also works with +
+    bo4 = ds.union([sp1, sp2])
+    bo5 = ds.intersection([sp1, sp2])
     # This makes sure the original containers didn't change.
     new_i1 = sp1["index","morton_index"]
     new_i1.sort()
@@ -52,12 +54,16 @@
     # Now make sure the indices also behave as we expect.
     empty = np.array([])
     assert_array_equal(bo1["index","morton_index"], empty)
+    assert_array_equal(bo5["index","morton_index"], empty)
     b2 = bo2["index","morton_index"]
     b2.sort()
     assert_array_equal(b2, i1)
     b3 = bo3["index","morton_index"]
     b3.sort()
     assert_array_equal(b3, ii)
+    b4 = bo4["index","morton_index"]
+    b4.sort()
+    assert_array_equal(b4, ii)
  
 def test_boolean_spheres_overlap():
     r"""Test to make sure that boolean objects (spheres, overlap)
@@ -75,6 +81,8 @@
     bo1 = sp1 & sp2
     bo2 = sp1 - sp2
     bo3 = sp1 | sp2
+    bo4 = ds.union([sp1, sp2])
+    bo5 = ds.intersection([sp1, sp2])
     # Now make sure the indices also behave as we expect.
     lens = np.intersect1d(i1, i2)
     apple = np.setdiff1d(i1, i2)
@@ -88,6 +96,12 @@
     assert_array_equal(b1, lens)
     assert_array_equal(b2, apple)
     assert_array_equal(b3, both)
+    b4 = bo4["index","morton_index"]
+    b4.sort()
+    b5 = bo5["index","morton_index"]
+    b5.sort()
+    assert_array_equal(b3, b4)
+    assert_array_equal(b1, b5)
 
 def test_boolean_regions_no_overlap():
     r"""Test to make sure that boolean objects (regions, no overlap)
@@ -110,6 +124,8 @@
     bo1 = re1 & re2
     bo2 = re1 - re2
     bo3 = re1 | re2
+    bo4 = ds.union([re1, re2])
+    bo5 = ds.intersection([re1, re2])
     # This makes sure the original containers didn't change.
     new_i1 = re1["index","morton_index"]
     new_i1.sort()
@@ -120,12 +136,18 @@
     # Now make sure the indices also behave as we expect.
     empty = np.array([])
     assert_array_equal(bo1["index","morton_index"], empty)
+    assert_array_equal(bo5["index","morton_index"], empty)
     b2 = bo2["index","morton_index"]
     b2.sort()
     assert_array_equal(b2, i1 )
     b3 = bo3["index","morton_index"]
     b3.sort()
     assert_array_equal(b3, ii)
+    b4 = bo4["index","morton_index"]
+    b4.sort()
+    b5 = bo5["index","morton_index"]
+    b5.sort()
+    assert_array_equal(b3, b4)
 
 def test_boolean_regions_overlap():
     r"""Test to make sure that boolean objects (regions, overlap)
@@ -143,6 +165,8 @@
     bo1 = re1 & re2
     bo2 = re1 - re2
     bo3 = re1 | re2
+    bo4 = ds.union([re1, re2])
+    bo5 = ds.intersection([re1, re2])
     # Now make sure the indices also behave as we expect.
     cube = np.intersect1d(i1, i2)
     bite_cube = np.setdiff1d(i1, i2)
@@ -156,6 +180,13 @@
     assert_array_equal(b1, cube)
     assert_array_equal(b2, bite_cube)
     assert_array_equal(b3, both)
+    b4 = bo4["index","morton_index"]
+    b4.sort()
+    b5 = bo5["index","morton_index"]
+    b5.sort()
+    assert_array_equal(b3, b4)
+    assert_array_equal(b1, b5)
+
 
 def test_boolean_cylinders_no_overlap():
     r"""Test to make sure that boolean objects (cylinders, no overlap)
@@ -178,6 +209,8 @@
     bo1 = cyl1 & cyl2
     bo2 = cyl1 - cyl2
     bo3 = cyl1 | cyl2
+    bo4 = ds.union([cyl1, cyl2])
+    bo5 = ds.intersection([cyl1, cyl2])
     # This makes sure the original containers didn't change.
     new_i1 = cyl1["index","morton_index"]
     new_i1.sort()
@@ -188,12 +221,18 @@
     # Now make sure the indices also behave as we expect.
     empty = np.array([])
     assert_array_equal(bo1["index","morton_index"], empty)
+    assert_array_equal(bo5["index","morton_index"], empty)
     b2 = bo2["index","morton_index"]
     b2.sort()
     assert_array_equal(b2, i1)
     b3 = bo3["index","morton_index"]
     b3.sort()
     assert_array_equal(b3, ii)
+    b4 = bo4["index","morton_index"]
+    b4.sort()
+    b5 = bo5["index","morton_index"]
+    b5.sort()
+    assert_array_equal(b3, b4)
 
 def test_boolean_cylinders_overlap():
     r"""Test to make sure that boolean objects (cylinders, overlap)
@@ -211,6 +250,8 @@
     bo1 = cyl1 & cyl2
     bo2 = cyl1 - cyl2
     bo3 = cyl1 | cyl2
+    bo4 = ds.union([cyl1, cyl2])
+    bo5 = ds.intersection([cyl1, cyl2])
     # Now make sure the indices also behave as we expect.
     vlens = np.intersect1d(i1, i2)
     bite_disk = np.setdiff1d(i1, i2)
@@ -224,6 +265,13 @@
     assert_array_equal(b1, vlens)
     assert_array_equal(b2, bite_disk)
     assert_array_equal(b3, both)
+    b4 = bo4["index","morton_index"]
+    b4.sort()
+    b5 = bo5["index","morton_index"]
+    b5.sort()
+    assert_array_equal(b3, b4)
+    assert_array_equal(b1, b5)
+
 
 def test_boolean_ellipsoids_no_overlap():
     r"""Test to make sure that boolean objects (ellipsoids, no overlap)
@@ -246,6 +294,8 @@
     bo1 = ell1 & ell2
     bo2 = ell1 - ell2
     bo3 = ell1 | ell2
+    bo4 = ds.union([ell1, ell2])
+    bo5 = ds.intersection([ell1, ell2])
     # This makes sure the original containers didn't change.
     new_i1 = ell1["index","morton_index"]
     new_i1.sort()
@@ -256,12 +306,19 @@
     # Now make sure the indices also behave as we expect.
     empty = np.array([])
     assert_array_equal(bo1["index","morton_index"], empty)
+    assert_array_equal(bo5["index","morton_index"], empty)
     b2 = bo2["index","morton_index"]
     b2.sort()
     assert_array_equal(b2, i1)
     b3 = bo3["index","morton_index"]
     b3.sort()
     assert_array_equal(b3, ii)
+    b4 = bo4["index","morton_index"]
+    b4.sort()
+    b5 = bo5["index","morton_index"]
+    b5.sort()
+    assert_array_equal(b3, b4)
+
 
 def test_boolean_ellipsoids_overlap():
     r"""Test to make sure that boolean objects (ellipsoids, overlap)
@@ -279,6 +336,8 @@
     bo1 = ell1 & ell2
     bo2 = ell1 - ell2
     bo3 = ell1 | ell2
+    bo4 = ds.union([ell1, ell2])
+    bo5 = ds.intersection([ell1, ell2])
     # Now make sure the indices also behave as we expect.
     overlap = np.intersect1d(i1, i2)
     diff = np.setdiff1d(i1, i2)
@@ -292,6 +351,12 @@
     assert_array_equal(b1, overlap)
     assert_array_equal(b2, diff)
     assert_array_equal(b3, both)
+    b4 = bo4["index","morton_index"]
+    b4.sort()
+    b5 = bo5["index","morton_index"]
+    b5.sort()
+    assert_array_equal(b3, b4)
+    assert_array_equal(b1, b5)
 
 def test_boolean_mix_periodicity():
     r"""Test that a hybrid boolean region behaves as we expect.
@@ -314,6 +379,8 @@
     # a jumble, the region minus the sp+cyl
     bo3 = re - (sp | cyl)
     # Now make sure the indices also behave as we expect.
+    bo4 = ds.union([re, sp, cyl])
+    bo5 = ds.intersection([re, sp, cyl])
     expect = np.setdiff1d(rei, spi)
     ii = bo1["index","morton_index"]
     ii.sort()
@@ -329,4 +396,14 @@
     ii = bo3["index","morton_index"]
     ii.sort()
     assert_array_equal(expect, ii)
+    b4 = bo4["index","morton_index"]
+    b4.sort()
+    b5 = bo5["index","morton_index"]
+    b5.sort()
+    ii = np.union1d(np.union1d(rei, cyli), spi)
+    ii.sort()
+    assert_array_equal(ii, b4)
+    ii = np.intersect1d(np.intersect1d(rei, cyli), spi)
+    ii.sort()
+    assert_array_equal(ii, b5)
 


https://bitbucket.org/yt_analysis/yt/commits/35989901ce9e/
Changeset:   35989901ce9e
Branch:      yt
User:        MatthewTurk
Date:        2016-07-04 02:11:17+00:00
Summary:     Adding documentation for boolean objects.
Affected #:  1 file

diff -r 49ffeecf539d7dfb4685c4abf39dc918c09d0e9c -r 35989901ce9ed88caa2e5bb2eb29577cd1c6fe0c doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -302,9 +302,12 @@
     | easily lead to empty data for non-intersecting regions.
     | Usage: ``slice(axis, coord, ds, data_source=sph)``
 
-**Boolean Regions**
-    | **Note: not yet implemented in yt 3.0**
-    | Usage: ``boolean()``
+**Union Regions**
+    | Usage: ``union()``
+    | See :ref:`boolean_data_objects`.
+
+**Intersection Regions**
+    | Usage: ``intersection()``
     | See :ref:`boolean_data_objects`.
 
 **Filter**
@@ -604,37 +607,48 @@
 Combining Objects: Boolean Data Objects
 ---------------------------------------
 
-.. note:: Boolean Data Objects have not yet been ported to yt 3.0 from
-    yt 2.x.  If you are interested in aiding in this port, please contact
-    the yt-dev mailing list.  Until it is ported, this functionality below
-    will not work.
+A special type of data object is the *boolean* data object, which works with
+three-dimensional data selection.  It is built by relating already existing
+data objects with the bitwise operators for AND, OR and XOR, as well as the
+subtraction operator.  These are created by using the operators ``&`` for an
+intersection ("AND"), ``|`` for a union ("OR"), ``^`` for an exclusive or
+("XOR"), and ``+`` and ``-`` for addition ("OR") and subtraction ("NEG").
+Here are some examples:
 
-A special type of data object is the *boolean* data object.
-It works only on three-dimensional objects.
-It is built by relating already existing data objects with boolean operators.
-The boolean logic may be nested using parentheses, and
-it supports the standard "AND", "OR", and "NOT" operators:
+.. code-block:: python
 
-* **"AND"** When two data objects are related with an "AND", the combined
-  data object is the volume of the simulation covered by both objects, and
-  not by just a single object.
-* **"OR"** When two data objects are related with an "OR", the combined
-  data object is the volume(s) of the simulation covered by either of the
-  objects.
-  For example, this may be used to combine disjoint objects into one.
-* **"NOT"** When two data objects are related with a "NOT", the combined
-  data object is the volume of the first object that the second does not
-  cover.
-  For example, this may be used to cut out part(s) of the first data object
-  utilizing the second data object.
-* **"(" or ")"** Nested logic is surrounded by parentheses. The order of
-  operations is such that the boolean logic is evaluated inside the
-  inner-most parentheses, first, then goes upwards.
-  The logic is read left-to-right at all levels (crucial for the "NOT"
-  operator).
+   import yt
+   ds = yt.load("snapshot_010.hdf5")
 
-Please see the :ref:`cookbook` for some examples of how to use the boolean
-data object.
+   sp1 = ds.sphere("c", (0.1, "unitary"))
+   sp2 = ds.sphere(sp1.center + 2.0 * sp1.radius, (0.2, "unitary"))
+   sp3 = ds.sphere("c", (0.05, "unitary"))
+
+   new_obj = sp1 + sp2
+   cutout = sp1 - sp3
+   sp4 = sp1 ^ sp2
+   sp5 = sp1 & sp2
+   
+
+Note that the ``+`` operation and the ``|`` operation are identical.  For when
+multiple objects are to be combined in an intersection or a union, there are
+the data objects ``intersection`` and ``union`` which can be called, and which
+will yield slightly higher performance than a sequence of calls to ``+`` or
+``&``.  For instance:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("Enzo_64/DD0043/data0043")
+   sp1 = ds.sphere( (0.1, 0.2, 0.3), (0.05, "unitary"))
+   sp2 = ds.sphere( (0.2, 0.2, 0.3), (0.10, "unitary"))
+   sp3 = ds.sphere( (0.3, 0.2, 0.3), (0.15, "unitary"))
+
+   isp = ds.intersection( [sp1, sp2, sp3] )
+   usp = ds.union( [sp1, sp2, sp3] )
+
+The ``isp`` and ``usp`` objects will act the same as a set of chained ``&`` and
+``|`` operations (respectively) but will be somewhat faster.
 
 .. _extracting-connected-sets:
 


https://bitbucket.org/yt_analysis/yt/commits/48ee00753385/
Changeset:   48ee00753385
Branch:      yt
User:        MatthewTurk
Date:        2016-07-05 13:43:47+00:00
Summary:     Adding xor tests, fixing some language.
Affected #:  2 files

diff -r 35989901ce9ed88caa2e5bb2eb29577cd1c6fe0c -r 48ee00753385c3b09579ea47e15e3525ea1cd4d2 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -648,7 +648,7 @@
    usp = ds.union( [sp1, sp2, sp3] )
 
 The ``isp`` and ``usp`` objects will act the same as a set of chained ``&`` and
-``|`` operations (respectively) but will be somewhat faster.
+``|`` operations (respectively) but are somewhat easier to construct.
 
 .. _extracting-connected-sets:
 

diff -r 35989901ce9ed88caa2e5bb2eb29577cd1c6fe0c -r 48ee00753385c3b09579ea47e15e3525ea1cd4d2 yt/data_objects/tests/test_boolean_regions.py
--- a/yt/data_objects/tests/test_boolean_regions.py
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -64,6 +64,10 @@
     b4 = bo4["index","morton_index"]
     b4.sort()
     assert_array_equal(b4, ii)
+    bo6 = sp1 ^ sp2
+    b6 = bo6["index", "morton_index"]
+    b6.sort()
+    assert_array_equal(b6, np.setxor1d(i1, i2))
  
 def test_boolean_spheres_overlap():
     r"""Test to make sure that boolean objects (spheres, overlap)
@@ -102,6 +106,10 @@
     b5.sort()
     assert_array_equal(b3, b4)
     assert_array_equal(b1, b5)
+    bo6 = sp1 ^ sp2
+    b6 = bo6["index", "morton_index"]
+    b6.sort()
+    assert_array_equal(b6, np.setxor1d(i1, i2))
 
 def test_boolean_regions_no_overlap():
     r"""Test to make sure that boolean objects (regions, no overlap)
@@ -148,6 +156,10 @@
     b5 = bo5["index","morton_index"]
     b5.sort()
     assert_array_equal(b3, b4)
+    bo6 = re1 ^ re2
+    b6 = bo6["index", "morton_index"]
+    b6.sort()
+    assert_array_equal(b6, np.setxor1d(i1, i2))
 
 def test_boolean_regions_overlap():
     r"""Test to make sure that boolean objects (regions, overlap)
@@ -186,7 +198,10 @@
     b5.sort()
     assert_array_equal(b3, b4)
     assert_array_equal(b1, b5)
-
+    bo6 = re1 ^ re2
+    b6 = bo6["index", "morton_index"]
+    b6.sort()
+    assert_array_equal(b6, np.setxor1d(i1, i2))
 
 def test_boolean_cylinders_no_overlap():
     r"""Test to make sure that boolean objects (cylinders, no overlap)
@@ -233,6 +248,10 @@
     b5 = bo5["index","morton_index"]
     b5.sort()
     assert_array_equal(b3, b4)
+    bo6 = cyl1 ^ cyl2
+    b6 = bo6["index", "morton_index"]
+    b6.sort()
+    assert_array_equal(b6, np.setxor1d(i1, i2))
 
 def test_boolean_cylinders_overlap():
     r"""Test to make sure that boolean objects (cylinders, overlap)
@@ -271,7 +290,10 @@
     b5.sort()
     assert_array_equal(b3, b4)
     assert_array_equal(b1, b5)
-
+    bo6 = cyl1 ^ cyl2
+    b6 = bo6["index", "morton_index"]
+    b6.sort()
+    assert_array_equal(b6, np.setxor1d(i1, i2))
 
 def test_boolean_ellipsoids_no_overlap():
     r"""Test to make sure that boolean objects (ellipsoids, no overlap)
@@ -318,7 +340,10 @@
     b5 = bo5["index","morton_index"]
     b5.sort()
     assert_array_equal(b3, b4)
-
+    bo6 = ell1 ^ ell2
+    b6 = bo6["index", "morton_index"]
+    b6.sort()
+    assert_array_equal(b6, np.setxor1d(i1, i2))
 
 def test_boolean_ellipsoids_overlap():
     r"""Test to make sure that boolean objects (ellipsoids, overlap)
@@ -357,6 +382,10 @@
     b5.sort()
     assert_array_equal(b3, b4)
     assert_array_equal(b1, b5)
+    bo6 = ell1 ^ ell2
+    b6 = bo6["index", "morton_index"]
+    b6.sort()
+    assert_array_equal(b6, np.setxor1d(i1, i2))
 
 def test_boolean_mix_periodicity():
     r"""Test that a hybrid boolean region behaves as we expect.
@@ -407,3 +436,8 @@
     ii.sort()
     assert_array_equal(ii, b5)
 
+    bo6 = (re ^ sp) ^ cyl
+    b6 = bo6["index", "morton_index"]
+    b6.sort()
+    assert_array_equal(b6, np.setxor1d(np.setxor1d(rei, spi), cyli))
+


https://bitbucket.org/yt_analysis/yt/commits/a5e1caae47a5/
Changeset:   a5e1caae47a5
Branch:      yt
User:        MatthewTurk
Date:        2016-07-05 20:49:20+00:00
Summary:     Fixing unused import
Affected #:  1 file

diff -r 48ee00753385c3b09579ea47e15e3525ea1cd4d2 -r a5e1caae47a537ab2040e075946869ae8d46efa6 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -26,7 +26,6 @@
     fix_length
 from yt.geometry.selection_routines import \
     points_in_cells
-import yt.geometry.selection_routines as selection_routines
 from yt.units.yt_array import \
     YTArray
 from yt.utilities.exceptions import \


https://bitbucket.org/yt_analysis/yt/commits/ec2c64261461/
Changeset:   ec2c64261461
Branch:      yt
User:        ngoldbaum
Date:        2016-07-06 16:45:08+00:00
Summary:     refactoring the chained selectors to remove some indirection
Affected #:  1 file

diff -r a5e1caae47a537ab2040e075946869ae8d46efa6 -r ec2c64261461f123b6964f26b470622a92f24275 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -2307,106 +2307,62 @@
         for i in range(self.n_obj):
             self.selectors[i] = dobj.data_objects[i].selector
 
-    cdef int select_bbox(self, np.float64_t left_edge[3],
-                               np.float64_t right_edge[3]) nogil:
-        with gil:
-            return self._select_bbox(left_edge, right_edge)
-
-    cdef int _select_bbox(self, np.float64_t left_edge[3],
-                               np.float64_t right_edge[3]):
-        return 0
-
-    cdef int select_grid(self, np.float64_t left_edge[3],
-                         np.float64_t right_edge[3], np.int32_t level,
-                         Oct *o = NULL) nogil:
-        with gil:
-            return self._select_grid(left_edge, right_edge, level, o)
-
-    cdef int _select_grid(self, np.float64_t left_edge[3],
-                         np.float64_t right_edge[3], np.int32_t level,
-                         Oct *o = NULL):
-        return 0
-
-    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil:
-        with gil:
-            return self._select_cell(pos, dds)
-
-    cdef int _select_cell(self, np.float64_t pos[3], np.float64_t dds[3]):
-        return 0
-
-    cdef int select_point(self, np.float64_t pos[3]) nogil:
-        with gil:
-            return self._select_point(pos)
-
-    cdef int _select_point(self, np.float64_t pos[3]):
-        return 0
-
-    cdef int select_sphere(self, np.float64_t pos[3],
-                                 np.float64_t radius) nogil:
-        with gil:
-            return self._select_sphere(pos, radius)
-
-    cdef int _select_sphere(self, np.float64_t pos[3],
-                                 np.float64_t radius):
-        return 0
-
 cdef class ChainedBooleanANDSelector(ChainedBooleanSelector):
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    cdef int _select_bbox(self, np.float64_t left_edge[3],
-                                np.float64_t right_edge[3]):
-        cdef np.ndarray[object, ndim=1] sels = self.selectors
-        cdef int i
-        for i in range(self.n_obj):
-            if (<SelectorObject>sels[i]).select_bbox(left_edge, right_edge) == 0:
-                return 0
+    cdef int select_bbox(self, np.float64_t left_edge[3],
+                         np.float64_t right_edge[3]) nogil:
+        with gil:
+            for i in range(self.n_obj):
+                if (<SelectorObject>self.selectors[i]).select_bbox(
+                        left_edge, right_edge) == 0:
+                    return 0
         return 1
 
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    cdef int _select_grid(self, np.float64_t left_edge[3],
+    cdef int select_grid(self, np.float64_t left_edge[3],
                          np.float64_t right_edge[3], np.int32_t level,
-                         Oct *o = NULL):
-        cdef np.ndarray[object, ndim=1] sels
-        cdef int i
-        for i in range(self.n_obj):
-            if (<SelectorObject>self.selectors[i]).select_grid(left_edge, right_edge, level, o) == 0:
-                return 0
-            return 1
-
-    @cython.cdivision(True)
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    cdef int _select_cell(self, np.float64_t pos[3], np.float64_t dds[3]):
-        cdef np.ndarray[object, ndim=1] sels
-        cdef int i
-        for i in range(self.n_obj):
-            if (<SelectorObject>self.selectors[i]).select_cell(pos, dds) == 0:
-                return 0
+                         Oct *o = NULL) nogil:
+        with gil:
+            for i in range(self.n_obj):
+                if (<SelectorObject>self.selectors[i]).select_grid(
+                        left_edge, right_edge, level, o) == 0:
+                    return 0
         return 1
 
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    cdef int _select_point(self, np.float64_t pos[3]):
-        cdef np.ndarray[object, ndim=1] sels
-        cdef int i
-        for i in range(self.n_obj):
-            if (<SelectorObject>self.selectors[i]).select_point(pos) == 0:
-                return 0
+    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil:
+        with gil:
+            for i in range(self.n_obj):
+                if (<SelectorObject>self.selectors[i]).select_cell(
+                        pos, dds) == 0:
+                    return 0
         return 1
 
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    cdef int _select_sphere(self, np.float64_t pos[3], np.float64_t radius):
-        cdef np.ndarray[object, ndim=1] sels
-        cdef int i
-        for i in range(self.n_obj):
-            if (<SelectorObject>self.selectors[i]).select_sphere(pos, radius) == 0:
-                return 0
+    cdef int select_point(self, np.float64_t pos[3]) nogil:
+        with gil:
+            for i in range(self.n_obj):
+                if (<SelectorObject>self.selectors[i]).select_point(pos) == 0:
+                    return 0
+        return 1
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil:
+        with gil:
+            for i in range(self.n_obj):
+                if (<SelectorObject>self.selectors[i]).select_sphere(
+                        pos, radius) == 0:
+                    return 0
         return 1
 
     def _hash_vals(self):
@@ -2421,59 +2377,58 @@
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    cdef int _select_bbox(self, np.float64_t left_edge[3],
-                               np.float64_t right_edge[3]):
-        cdef np.ndarray[object, ndim=1] sels
-        cdef int i
-        for i in range(self.n_obj):
-            if (<SelectorObject>self.selectors[i]).select_bbox(left_edge, right_edge) == 1:
-                return 1
+    cdef int select_bbox(self, np.float64_t left_edge[3],
+                         np.float64_t right_edge[3]) nogil:
+        with gil:
+            for i in range(self.n_obj):
+                if (<SelectorObject>self.selectors[i]).select_bbox(
+                        left_edge, right_edge) == 1:
+                    return 1
         return 0
 
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    cdef int _select_grid(self, np.float64_t left_edge[3],
+    cdef int select_grid(self, np.float64_t left_edge[3],
                          np.float64_t right_edge[3], np.int32_t level,
-                         Oct *o = NULL):
-        cdef np.ndarray[object, ndim=1] sels
-        cdef int i
-        for i in range(self.n_obj):
-            if (<SelectorObject>self.selectors[i]).select_grid(left_edge, right_edge, level, o) == 1:
-                return 1
+                         Oct *o = NULL) nogil:
+        with gil:
+            for i in range(self.n_obj):
+                if (<SelectorObject>self.selectors[i]).select_grid(
+                        left_edge, right_edge, level, o) == 1:
+                    return 1
         return 0
 
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    cdef int _select_cell(self, np.float64_t pos[3], np.float64_t dds[3]):
-        cdef np.ndarray[object, ndim=1] sels
-        cdef int i
-        for i in range(self.n_obj):
-            if (<SelectorObject>self.selectors[i]).select_cell(pos, dds) == 1:
-                return 1
+    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil:
+        with gil:
+            for i in range(self.n_obj):
+                if (<SelectorObject>self.selectors[i]).select_cell(
+                        pos, dds) == 1:
+                    return 1
         return 0
 
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    cdef int _select_point(self, np.float64_t pos[3]):
-        cdef np.ndarray[object, ndim=1] sels
-        cdef int i
-        for i in range(self.n_obj):
-            if (<SelectorObject>self.selectors[i]).select_point(pos) == 1:
-                return 1
+    cdef int select_point(self, np.float64_t pos[3]) nogil:
+        with gil:
+            for i in range(self.n_obj):
+                if (<SelectorObject>self.selectors[i]).select_point(pos) == 1:
+                    return 1
         return 0
 
     @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    cdef int _select_sphere(self, np.float64_t pos[3], np.float64_t radius):
-        cdef np.ndarray[object, ndim=1] sels
-        cdef int i
-        for i in range(self.n_obj):
-            if (<SelectorObject>self.selectors[i]).select_sphere(pos, radius) == 1:
-                return 1
+    cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil:
+        with gil:
+            for i in range(self.n_obj):
+                if (<SelectorObject>self.selectors[i]).select_sphere(
+                        pos, radius) == 1:
+                    return 1
         return 0
 
     def _hash_vals(self):


https://bitbucket.org/yt_analysis/yt/commits/b345fa3445df/
Changeset:   b345fa3445df
Branch:      yt
User:        MatthewTurk
Date:        2016-07-06 19:44:51+00:00
Summary:     Updating indentation
Affected #:  1 file

diff -r ec2c64261461f123b6964f26b470622a92f24275 -r b345fa3445df59a1bd569c913cce116fb65d4ffa yt/data_objects/tests/test_boolean_regions.py
--- a/yt/data_objects/tests/test_boolean_regions.py
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -1,6 +1,6 @@
 from yt.testing import \
-        fake_amr_ds, \
-        assert_array_equal
+    fake_amr_ds, \
+    assert_array_equal
 import numpy as np
 
 def get_ds():


https://bitbucket.org/yt_analysis/yt/commits/dcf81a60ce52/
Changeset:   dcf81a60ce52
Branch:      yt
User:        MatthewTurk
Date:        2016-08-12 17:50:33+00:00
Summary:     Merging with upstream
Affected #:  204 files

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -37,9 +37,11 @@
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c
 yt/utilities/lib/geometry_utils.c
+yt/utilities/lib/image_samplers.c
 yt/utilities/lib/image_utilities.c
 yt/utilities/lib/interpolators.c
 yt/utilities/lib/kdtree.c
+yt/utilities/lib/lenses.c
 yt/utilities/lib/line_integral_convolution.c
 yt/utilities/lib/mesh_construction.cpp
 yt/utilities/lib/mesh_intersection.cpp
@@ -49,6 +51,7 @@
 yt/utilities/lib/mesh_utilities.c
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/particle_mesh_operations.c
+yt/utilities/lib/partitioned_grid.c
 yt/utilities/lib/primitives.c
 yt/utilities/lib/origami.c
 yt/utilities/lib/particle_mesh_operations.c
@@ -62,6 +65,11 @@
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
 yt/utilities/lib/write_array.c
+yt/utilities/lib/perftools_wrap.c
+yt/utilities/lib/partitioned_grid.c
+yt/utilities/lib/volume_container.c
+yt/utilities/lib/lenses.c
+yt/utilities/lib/image_samplers.c
 syntax: glob
 *.pyc
 *.pyd

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5160,4 +5160,38 @@
 954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
 f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3
 079e456c38a87676472a458210077e2be325dc85 last_gplv3
+ca6e536c15a60070e6988fd472dc771a1897e170 yt-2.0
+882c41eed5dd4a3cdcbb567bcb79b833e46b1f42 yt-2.0.1
+a2b3521b1590c25029ca0bc602ad6cb7ae7b8ba2 yt-2.1
+41bd8aacfbc81fa66d7a3f2cd2880f10c3e237a4 yt-2.2
+3836676ee6307f9caf5ccdb0f0dd373676a68535 yt-2.3
+076cec2c57d2e4b508babbfd661f5daa1e34ec80 yt-2.4
+bd285a9a8a643ebb7b47b543e9343da84cd294c5 yt-2.5
+34a5e6774ceb26896c9d767563951d185a720774 yt-2.5.1
+2197c101413723de13e1d0dea153b182342ff719 yt-2.5.2
+59aa6445b5f4a26ecb2449f913c7f2b5fee04bee yt-2.5.3
+4da03e5f00b68c3a52107ff75ce48b09360b30c2 yt-2.5.4
+21c0314cee16242b6685e42a74d16f7a993c9a88 yt-2.5.5
+053487f48672b8fd5c43af992e92bc2f2499f31f yt-2.6
+d43ff9d8e20f2d2b8f31f4189141d2521deb341b yt-2.6.1
+f1e22ef9f3a225f818c43262e6ce9644e05ffa21 yt-2.6.2
+816186f16396a16853810ac9ebcde5057d8d5b1a yt-2.6.3
 f327552a6ede406b82711fb800ebcd5fe692d1cb yt-3.0a4
+73a9f749157260c8949f05c07715305aafa06408 yt-3.0.0
+0cf350f11a551f5a5b4039a70e9ff6d98342d1da yt-3.0.1
+511887af4c995a78fe606e58ce8162c88380ecdc yt-3.0.2
+fd7cdc4836188a3badf81adb477bcc1b9632e485 yt-3.1.0
+28733726b2a751e774c8b7ae46121aa57fd1060f yt-3.2
+425ff6dc64a8eb92354d7e6091653a397c068167 yt-3.2.1
+425ff6dc64a8eb92354d7e6091653a397c068167 yt-3.2.1
+0000000000000000000000000000000000000000 yt-3.2.1
+0000000000000000000000000000000000000000 yt-3.2.1
+f7ca21c7b3fdf25d2ccab139849ae457597cfd5c yt-3.2.1
+a7896583c06585be66de8404d76ad5bc3d2caa9a yt-3.2.2
+80aff0c49f40e04f00d7b39149c7fc297b8ed311 yt-3.2.3
+80aff0c49f40e04f00d7b39149c7fc297b8ed311 yt-3.2.3
+0000000000000000000000000000000000000000 yt-3.2.3
+0000000000000000000000000000000000000000 yt-3.2.3
+83d2c1e9313e7d83eb5b96888451ff2646fd8ff3 yt-3.2.3
+7edbfde96c3d55b227194394f46c0b2e6ed2b961 yt-3.3.0
+9bc3d0e9b750c923d44d73c447df64fc431f5838 yt-3.3.1

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -301,7 +301,15 @@
 This downloads that new forked repository to your local machine, so that you
 can access it, read it, make modifications, etc.  It will put the repository in
 a local directory of the same name as the repository in the current working
-directory.  You can see any past state of the code by using the hg log command.
+directory. You should also run the following command, to make sure you are at
+the "yt" branch, and not other ones like "stable" (this will be important
+later when you want to submit your pull requests):
+
+.. code-block:: bash
+
+   $ hg update yt
+
+You can see any past state of the code by using the hg log command.
 For example, the following command would show you the last 5 changesets
 (modifications to the code) that were submitted to that repository.
 

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -21,6 +21,7 @@
                 Daniel Fenn (df11c at my.fsu.edu)
                 John Forces (jforbes at ucolick.org)
                 Adam Ginsburg (keflavich at gmail.com)
+                Austin Gilbert (augilbert4 at gmail.com)
                 Sam Geen (samgeen at gmail.com)
                 Nathan Goldbaum (goldbaum at ucolick.org)
                 William Gray (graywilliamj at gmail.com)

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt setupext.py CONTRIBUTING.rst
+include README* CREDITS COPYING.txt CITATION  setupext.py CONTRIBUTING.rst
 include yt/visualization/mapserver/html/map_index.html
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/extensions/config_help.py
--- /dev/null
+++ b/doc/extensions/config_help.py
@@ -0,0 +1,34 @@
+import re
+import subprocess
+from docutils import statemachine
+from sphinx.util.compat import Directive
+
+def setup(app):
+    app.add_directive('config_help', GetConfigHelp)
+    setup.app = app
+    setup.config = app.config
+    setup.confdir = app.confdir
+
+    retdict = dict(
+        version='1.0',
+        parallel_read_safe=True,
+        parallel_write_safe=True
+    )
+
+    return retdict
+
+class GetConfigHelp(Directive):
+    required_arguments = 1
+    optional_arguments = 0
+    final_argument_whitespace = True
+
+    def run(self):
+        rst_file = self.state_machine.document.attributes['source']
+        data = subprocess.check_output(
+            self.arguments[0].split(" ") + ['-h']).decode('utf8').split('\n')
+        ind = next((i for i, val in enumerate(data)
+                    if re.match('\s{0,3}\{.*\}\s*$', val)))
+        lines = ['.. code-block:: none', ''] + data[ind + 1:]
+        self.state_machine.insert_input(
+            statemachine.string2lines("\n".join(lines)), rst_file)
+        return []

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/helper_scripts/generate_doap.py
--- a/doc/helper_scripts/generate_doap.py
+++ b/doc/helper_scripts/generate_doap.py
@@ -132,7 +132,6 @@
             f.write(templates["foaf"] % {'realname': dev_name})
             f.write("</developer>\n")
         for release in known_releases + get_release_tags():
-            print release
             f.write(templates["release"] % {
                 'name': "yt " + release[0], 'revision': release[0], 'date': release[1]}
             )

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -49,8 +49,8 @@
                     # in Python 3 (except Mercurial, which requires Python 2).
 INST_HG=1           # Install Mercurial or not?  If hg is not already
                     # installed, yt cannot be installed from source.
-INST_UNSTRUCTURED=0 # Install dependencies needed for unstructured mesh 
-                    # rendering?
+INST_EMBREE=0       # Install dependencies needed for Embree-accelerated 
+                    # ray tracing
 
 # These options control whether low-level system libraries are installed
 # they are necessary for building yt's dependencies from source and are 
@@ -75,6 +75,7 @@
 INST_H5PY=1     # Install h5py?
 INST_ASTROPY=0  # Install astropy?
 INST_NOSE=1     # Install nose?
+INST_NETCDF4=0  # Install netcdf4 and its python bindings?
 
 # These options allow you to customize the builds of yt dependencies.
 # They are only used if INST_CONDA=0.
@@ -115,10 +116,31 @@
         echo
         echo "    $ source deactivate"
         echo
-        echo "or install yt into your current environment"
+        echo "or install yt into your current environment with:"
+        echo
+        echo "    $ conda install -c conda-forge yt"
+        echo
         exit 1
     fi
     DEST_SUFFIX="yt-conda"
+    if [ -n "${PYTHONPATH}" ]
+    then
+        echo "WARNING WARNING WARNING WARNING WARNING WARNING WARNING"
+        echo "*******************************************************"
+        echo
+        echo "The PYTHONPATH environment variable is set to:"
+        echo
+        echo "    $PYTHONPATH"
+        echo
+        echo "If dependencies of yt (numpy, scipy, matplotlib) are installed"
+        echo "to this path, this may cause issues. Exit the install script"
+        echo "with Ctrl-C and unset PYTHONPATH if you are unsure."
+        echo "Hit enter to continue."
+        echo
+        echo "WARNING WARNING WARNING WARNING WARNING WARNING WARNING"
+        echo "*******************************************************"
+        read -p "[hit enter]"
+    fi
 else
     if [ $INST_YT_SOURCE -eq 0 ]
     then
@@ -466,21 +488,19 @@
     ( $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
-# set paths needed for unstructured mesh rendering support
+# set paths needed for Embree
 
-if [ $INST_UNSTRUCTURED -ne 0 ]
+if [ $INST_EMBREE -ne 0 ]
 then
     if [ $INST_YT_SOURCE -eq 0 ]
     then
-        echo "yt must be compiled from source to install support for"
-        echo "unstructured mesh rendering. Please set INST_YT_SOURCE to 1"
-        echo "and re-run the install script."
+        echo "yt must be compiled from source to install Embree support."
+        echo "Please set INST_YT_SOURCE to 1 and re-run the install script."
         exit 1
     fi
     if [ $INST_CONDA -eq 0 ]
     then
-        echo "unstructured mesh rendering support has not yet been implemented"
-        echo "for INST_CONDA=0."
+        echo "Embree support has not yet been implemented for INST_CONDA=0."
         exit 1
     fi
     if [ `uname` = "Darwin" ]
@@ -492,8 +512,8 @@
         EMBREE="embree-2.8.0.x86_64.linux"
         EMBREE_URL="https://github.com/embree/embree/releases/download/v2.8.0/$EMBREE.tar.gz"
     else
-        echo "Unstructured mesh rendering is not supported on this platform."
-        echo "Set INST_UNSTRUCTURED=0 and re-run the install script."
+        echo "Embree is not supported on this platform."
+        echo "Set INST_EMBREE=0 and re-run the install script."
         exit 1
     fi
     PYEMBREE_URL="https://github.com/scopatz/pyembree/archive/master.zip"
@@ -510,6 +530,17 @@
     fi
 fi
 
+if [ $INST_NETCDF4 -ne 0 ]
+then
+    if [ $INST_CONDA -eq 0 ]
+    then
+        echo "This script can only install netcdf4 through conda."
+        echo "Please set INST_CONDA to 1"
+        echo "and re-run the install script"
+        exit 1
+    fi
+fi
+
 echo
 echo
 echo "========================================================================"
@@ -524,11 +555,11 @@
 echo
 
 printf "%-18s = %s so I " "INST_CONDA" "${INST_CONDA}"
-get_willwont ${INST_PY3}
+get_willwont ${INST_CONDA}
 echo "be installing a conda-based python environment"
 
 printf "%-18s = %s so I " "INST_YT_SOURCE" "${INST_YT_SOURCE}"
-get_willwont ${INST_PY3}
+get_willwont ${INST_YT_SOURCE}
 echo "be compiling yt from source"
 
 printf "%-18s = %s so I " "INST_PY3" "${INST_PY3}"
@@ -539,9 +570,9 @@
 get_willwont ${INST_HG}
 echo "be installing Mercurial"
 
-printf "%-18s = %s so I " "INST_UNSTRUCTURED" "${INST_UNSTRUCTURED}"
-get_willwont ${INST_UNSTRUCTURED}
-echo "be installing unstructured mesh rendering"
+printf "%-18s = %s so I " "INST_EMBREE" "${INST_EMBREE}"
+get_willwont ${INST_EMBREE}
+echo "be installing Embree"
 
 if [ $INST_CONDA -eq 0 ]
 then
@@ -744,6 +775,12 @@
     ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
+function test_install
+{
+    echo "Testing that yt can be imported"
+    ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import yt" 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
 ORIG_PWD=`pwd`
 
 if [ -z "${DEST_DIR}" ]
@@ -1238,6 +1275,8 @@
     ( cp ${YT_DIR}/doc/activate.csh ${DEST_DIR}/bin/activate.csh 2>&1 ) 1>> ${LOG_FILE}
     sed -i.bak -e "s,__YT_DIR__,${DEST_DIR}," ${DEST_DIR}/bin/activate.csh
 
+    test_install
+
     function print_afterword
     {
         echo
@@ -1385,7 +1424,7 @@
     fi
     YT_DEPS+=('sympy')
 
-    if [ $INST_UNSTRUCTURED -eq 1 ]
+    if [ $INST_NETCDF4 -eq 1 ]
     then
         YT_DEPS+=('netcdf4')   
     fi
@@ -1399,14 +1438,21 @@
         log_cmd conda install --yes ${YT_DEP}
     done
 
+    if [ $INST_PY3 -eq 1 ]
+    then
+        echo "Installing mercurial"
+        log_cmd conda create -y -n py27 python=2.7 mercurial
+        log_cmd ln -s ${DEST_DIR}/envs/py27/bin/hg ${DEST_DIR}/bin
+    fi
+
     log_cmd pip install python-hglib
 
     log_cmd hg clone https://bitbucket.org/yt_analysis/yt_conda ${DEST_DIR}/src/yt_conda
     
-    if [ $INST_UNSTRUCTURED -eq 1 ]
+    if [ $INST_EMBREE -eq 1 ]
     then
         
-        echo "Installing embree"
+        echo "Installing Embree"
         if [ ! -d ${DEST_DIR}/src ]
         then
             mkdir ${DEST_DIR}/src
@@ -1453,22 +1499,15 @@
         fi
     fi
 
-    if [ $INST_PY3 -eq 1 ]
-    then
-        echo "Installing mercurial"
-        log_cmd conda create -y -n py27 python=2.7 mercurial
-        log_cmd ln -s ${DEST_DIR}/envs/py27/bin/hg ${DEST_DIR}/bin
-    fi
-
     if [ $INST_YT_SOURCE -eq 0 ]
     then
         echo "Installing yt"
-        log_cmd conda install --yes yt
+        log_cmd conda install -c conda-forge --yes yt
     else
         echo "Building yt from source"
         YT_DIR="${DEST_DIR}/src/yt-hg"
         log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
-        if [ $INST_UNSTRUCTURED -eq 1 ]
+        if [ $INST_EMBREE -eq 1 ]
         then
             echo $DEST_DIR > ${YT_DIR}/embree.cfg
         fi
@@ -1478,10 +1517,12 @@
             ROCKSTAR_LIBRARY_PATH=${DEST_DIR}/lib
         fi
         pushd ${YT_DIR} &> /dev/null
-        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH python setup.py develop 2>&1) 1>> ${LOG_FILE}
+        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH python setup.py develop 2>&1) 1>> ${LOG_FILE} || do_exit
         popd &> /dev/null
     fi
 
+    test_install
+
     echo
     echo
     echo "========================================================================"

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/_static/apiKey01.jpg
Binary file doc/source/_static/apiKey01.jpg has changed

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/_static/apiKey02.jpg
Binary file doc/source/_static/apiKey02.jpg has changed

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/_static/apiKey03.jpg
Binary file doc/source/_static/apiKey03.jpg has changed

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/_static/apiKey04.jpg
Binary file doc/source/_static/apiKey04.jpg has changed

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -4,10 +4,9 @@
 =======================
 .. sectionauthor:: Geoffrey So <gso at physics.ucsd.edu>
 
-.. warning:: This is my first attempt at modifying the yt source code,
-   so the program may be bug ridden.  Please send yt-dev an email and
-   address to Geoffrey So if you discover something wrong with this
-   portion of the code.
+.. warning:: This functionality is currently broken and needs to
+   be updated to make use of the :ref:`halo_catalog` framework.
+   Anyone interested in doing so should contact the yt-dev list.
 
 Purpose
 -------

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -3,14 +3,16 @@
 Halo Analysis
 =============
 
-Using halo catalogs, understanding the different halo finding methods,
-and using the halo mass function.
+This section covers halo finding, performing extra analysis on halos,
+and the halo mass function calculator.  If you already have halo
+catalogs and simply want to load them into yt, see
+:ref:`halo-catalog-data`.
 
 .. toctree::
    :maxdepth: 2
 
+   halo_catalogs
+   halo_mass_function
    halo_transition
-   halo_catalogs
-   halo_finders
-   halo_mass_function
    halo_merger_tree
+   ellipsoid_analysis

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -1,28 +1,42 @@
 .. _halo_catalog:
 
-Halo Catalogs
-=============
+Halo Finding and Analysis
+=========================
 
-Creating Halo Catalogs
-----------------------
+In yt-3.x, halo finding and analysis are combined into a single
+framework called the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
+This framework is substantially different from the halo analysis
+machinery available in yt-2.x and is entirely backward incompatible.
+For a direct translation of various halo analysis tasks using yt-2.x
+to yt-3.x, see :ref:`halo-transition`.
 
-In yt 3.0, operations relating to the analysis of halos (halo finding,
-merger tree creation, and individual halo analysis) are all brought
-together into a single framework. This framework is substantially
-different from the halo analysis machinery available in yt-2.x and is
-entirely backward incompatible.
-For a direct translation of various halo analysis tasks using yt-2.x
-to yt-3.0 please see :ref:`halo-transition`.
+.. _halo_catalog_finding:
 
-A catalog of halos can be created from any initial dataset given to halo
-catalog through data_ds. These halos can be found using friends-of-friends,
-HOP, and Rockstar. The finder_method keyword dictates which halo finder to
-use. The available arguments are :ref:`fof`, :ref:`hop`, and :ref:`rockstar`.
-For more details on the relative differences between these halo finders see
-:ref:`halo_finding`.
+Halo Finding
+------------
 
-The class which holds all of the halo information is the
-:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
+If you already have a halo catalog, either produced by one of the methods
+below or in a format described in :ref:`halo-catalog-data`, and want to
+perform further analysis, skip to :ref:`halo_catalog_analysis`.
+
+Three halo finding methods exist within yt.  These are:
+
+* :ref:`fof_finding`: a basic friend-of-friends algorithm (e.g. `Efstathiou et al. (1985)
+  <http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_)
+* :ref:`hop_finding`: `Eisenstein and Hut (1998)
+  <http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_.
+* :ref:`rockstar_finding`: a 6D phase-space halo finder developed by Peter Behroozi that
+  scales well and does substructure finding (`Behroozi et al.
+  2011 <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_)
+
+Halo finding is performed through the creation of a
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+object.  The dataset on which halo finding is to be performed should
+be loaded and given to the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+along with the ``finder_method`` keyword to specify the method to be
+used.
 
 .. code-block:: python
 
@@ -31,28 +45,195 @@
 
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
+   hc.create()
 
-A halo catalog may also be created from already run rockstar outputs.
-This method is not implemented for previously run friends-of-friends or
-HOP finders. Even though rockstar creates one file per processor,
-specifying any one file allows the full catalog to be loaded. Here we
-only specify the file output by the processor with ID 0. Note that the
-argument for supplying a rockstar output is `halos_ds`, not `data_ds`.
+The ``finder_method`` options should be given as "fof", "hop", or
+"rockstar".  Each of these methods has their own set of keyword
+arguments to control functionality.  These can specified in the form
+of a dictinoary using the ``finder_kwargs`` keyword.
 
 .. code-block:: python
 
-   halos_ds = yt.load(path+'rockstar_halos/halos_0.0.bin')
-   hc = HaloCatalog(halos_ds=halos_ds)
+   import yt
+   from yt.analysis_modules.halo_analysis.api import HaloCatalog
 
-Although supplying only the binary output of the rockstar halo finder
-is sufficient for creating a halo catalog, it is not possible to find
-any new information about the identified halos. To associate the halos
-with the dataset from which they were found, supply arguments to both
-halos_ds and data_ds.
+   data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='fof',
+                    finder_kwargs={"ptype": "stars",
+                                   "padding": 0.02})
+   hc.create()
+
+For a full list of keywords for each halo finder, see
+:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`,
+:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`,
+and
+:class:`~yt.analysis_modules.halo_finding.rockstar.rockstar.RockstarHaloFinder`.
+
+.. _fof_finding:
+
+FOF
+^^^
+
+This is a basic friends-of-friends algorithm.  See
+`Efstathiou et al. (1985)
+<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_ for more
+details as well as
+:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`.
+
+.. _hop_finding:
+
+HOP
+^^^
+
+The version of HOP used in yt is an upgraded version of the
+`publicly available HOP code
+<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support
+for 64-bit floats and integers has been added, as well as
+parallel analysis through spatial decomposition. HOP builds
+groups in this fashion:
+
+#. Estimates the local density at each particle using a
+   smoothing kernel.
+
+#. Builds chains of linked particles by 'hopping' from one
+   particle to its densest neighbor. A particle which is
+   its own densest neighbor is the end of the chain.
+
+#. All chains that share the same densest particle are
+   grouped together.
+
+#. Groups are included, linked together, or discarded
+   depending on the user-supplied over density
+   threshold parameter. The default is 160.0.
+
+See the `HOP method paper
+<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for
+full details as well as
+:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`.
+
+.. _rockstar_finding:
+
+Rockstar
+^^^^^^^^
+
+Rockstar uses an adaptive hierarchical refinement of friends-of-friends
+groups in six phase-space dimensions and one time dimension, which
+allows for robust (grid-independent, shape-independent, and noise-
+resilient) tracking of substructure. The code is prepackaged with yt,
+but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead
+developer is Peter Behroozi, and the methods are described in
+`Behroozi et al. 2011 <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_.
+In order to run the Rockstar halo finder in yt, make sure you've
+:ref:`installed it so that it can integrate with yt <rockstar-installation>`.
+
+At the moment, Rockstar does not support multiple particle masses,
+instead using a fixed particle mass. This will not affect most dark matter
+simulations, but does make it less useful for finding halos from the stellar
+mass. In simulations where the highest-resolution particles all have the
+same mass (ie: zoom-in grid based simulations), one can set up a particle
+filter to select the lowest mass particles and perform the halo finding
+only on those.  See the this cookbook recipe for an example:
+:ref:`cookbook-rockstar-nested-grid`.
+
+To run the Rockstar Halo finding, you must launch python with MPI and
+parallelization enabled. While Rockstar itself does not require MPI to run,
+the MPI libraries allow yt to distribute particle information across multiple
+nodes.
+
+.. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
+   connected by an Infiniband network can be problematic. Therefore, for now
+   we recommend forcing the use of the non-Infiniband network (e.g. Ethernet)
+   using this flag: ``--mca btl ^openib``.
+   For example, here is how Rockstar might be called using 24 cores:
+   ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
+
+The script above configures the Halo finder, launches a server process which
+disseminates run information and coordinates writer-reader processes.
+Afterwards, it launches reader and writer tasks, filling the available MPI
+slots, which alternately read particle information and analyze for halo
+content.
+
+The RockstarHaloFinder class has these options that can be supplied to the
+halo catalog through the ``finder_kwargs`` argument:
+
+* ``dm_type``, the index of the dark matter particle. Default is 1.
+* ``outbase``, This is where the out*list files that Rockstar makes should be
+  placed. Default is 'rockstar_halos'.
+* ``num_readers``, the number of reader tasks (which are idle most of the
+  time.) Default is 1.
+* ``num_writers``, the number of writer tasks (which are fed particles and
+  do most of the analysis). Default is MPI_TASKS-num_readers-1.
+  If left undefined, the above options are automatically
+  configured from the number of available MPI tasks.
+* ``force_res``, the resolution that Rockstar uses for various calculations
+  and smoothing lengths. This is in units of Mpc/h.
+  If no value is provided, this parameter is automatically set to
+  the width of the smallest grid element in the simulation from the
+  last data snapshot (i.e. the one where time has evolved the
+  longest) in the time series:
+  ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
+* ``total_particles``, if supplied, this is a pre-calculated
+  total number of dark matter
+  particles present in the simulation. For example, this is useful
+  when analyzing a series of snapshots where the number of dark
+  matter particles should not change and this will save some disk
+  access time. If left unspecified, it will
+  be calculated automatically. Default: ``None``.
+* ``dm_only``, if set to ``True``, it will be assumed that there are
+  only dark matter particles present in the simulation.
+  This option does not modify the halos found by Rockstar, however
+  this option can save disk access time if there are no star particles
+  (or other non-dark matter particles) in the simulation. Default: ``False``.
+
+Rockstar dumps halo information in a series of text (halo*list and
+out*list) and binary (halo*bin) files inside the ``outbase`` directory.
+We use the halo list classes to recover the information.
+
+Inside the ``outbase`` directory there is a text file named ``datasets.txt``
+that records the connection between ds names and the Rockstar file names.
+
+.. _rockstar-installation:
+
+Installing Rockstar
+"""""""""""""""""""
+
+Because of changes in the Rockstar API over time, yt only currently works with
+a slightly older version of Rockstar.  This version of Rockstar has been
+slightly patched and modified to run as a library inside of yt. By default it
+is not installed with yt, but installation is very easy.  The
+:ref:`install-script` used to install yt from source has a line:
+``INST_ROCKSTAR=0`` that must be changed to ``INST_ROCKSTAR=1``.  You can
+rerun this installer script over the top of an existing installation, and
+it will only install components missing from the existing installation.
+You can do this as follows.  Put your freshly modified install_script in
+the parent directory of the yt installation directory (e.g. the parent of
+``$YT_DEST``, ``yt-x86_64``, ``yt-i386``, etc.), and rerun the installer:
+
+.. code-block:: bash
+
+    cd $YT_DEST
+    cd ..
+    vi install_script.sh  // or your favorite editor to change INST_ROCKSTAR=1
+    bash < install_script.sh
+
+This will download Rockstar and install it as a library in yt.
+
+.. _halo_catalog_analysis:
+
+Extra Halo Analysis
+-------------------
+
+As a reminder, all halo catalogs created by the methods outlined in
+:ref:`halo_catalog_finding` as well as those in the formats discussed in
+:ref:`halo-catalog-data` can be loaded in to yt as first-class datasets.
+Once a halo catalog has been created, further analysis can be performed
+by providing both the halo catalog and the original simulation dataset to
+the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
 
 .. code-block:: python
 
-   halos_ds = yt.load(path+'rockstar_halos/halos_0.0.bin')
+   halos_ds = yt.load('rockstar_halos/halos_0.0.bin')
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
 
@@ -60,24 +241,28 @@
 associated with either dataset, to control the spatial region in
 which halo analysis will be performed.
 
-Analysis Using Halo Catalogs
-----------------------------
-
-Analysis is done by adding actions to the
+The :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+allows the user to create a pipeline of analysis actions that will be
+performed on all halos in the existing catalog.  The analysis can be
+performed in parallel with separate processors or groups of processors
+being allocated to perform the entire pipeline on individual halos.
+The pipeline is setup by adding actions to the
 :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
 Each action is represented by a callback function that will be run on
 each halo.  There are four types of actions:
 
-* Filters
-* Quantities
-* Callbacks
-* Recipes
+* :ref:`halo_catalog_filters`
+* :ref:`halo_catalog_quantities`
+* :ref:`halo_catalog_callbacks`
+* :ref:`halo_catalog_recipes`
 
 A list of all available filters, quantities, and callbacks can be found in
 :ref:`halo_analysis_ref`.
 All interaction with this analysis can be performed by importing from
 halo_analysis.
 
+.. _halo_catalog_filters:
+
 Filters
 ^^^^^^^
 
@@ -118,6 +303,8 @@
    # ... Later on in your script
    hc.add_filter("my_filter")
 
+.. _halo_catalog_quantities:
+
 Quantities
 ^^^^^^^^^^
 
@@ -176,6 +363,8 @@
    # ... Anywhere after "my_quantity" has been called
    hc.add_callback("print_quantity")
 
+.. _halo_catalog_callbacks:
+
 Callbacks
 ^^^^^^^^^
 
@@ -214,6 +403,8 @@
    # ...  Later on in your script
    hc.add_callback("my_callback")
 
+.. _halo_catalog_recipes:
+
 Recipes
 ^^^^^^^
 
@@ -258,8 +449,8 @@
 object as the first argument, recipe functions should take a ``HaloCatalog``
 object as the first argument.
 
-Running Analysis
-----------------
+Running the Pipeline
+--------------------
 
 After all callbacks, quantities, and filters have been added, the
 analysis begins with a call to HaloCatalog.create.
@@ -290,7 +481,7 @@
 
 A :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
 saved to disk can be reloaded as a yt dataset with the
-standard call to load. Any side data, such as profiles, can be reloaded
+standard call to ``yt.load``. Any side data, such as profiles, can be reloaded
 with a ``load_profiles`` callback and a call to
 :func:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog.load`.
 
@@ -303,8 +494,8 @@
                    filename="virial_profiles")
    hc.load()
 
-Worked Example of Halo Catalog in Action
-----------------------------------------
+Halo Catalog in Action
+----------------------
 
 For a full example of how to use these methods together see
 :ref:`halo-analysis-example`.

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ /dev/null
@@ -1,231 +0,0 @@
-.. _halo_finding:
-
-Halo Finding
-============
-
-There are three methods of finding particle haloes in yt. The
-default method is called HOP, a method described
-in `Eisenstein and Hut (1998)
-<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_. A basic
-friends-of-friends (e.g. `Efstathiou et al. (1985)
-<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_) halo
-finder is also implemented. Finally Rockstar (`Behroozi et a.
-(2011) <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_) is
-a 6D-phase space halo finder developed by Peter Behroozi that
-excels in finding subhalos and substrcture, but does not allow
-multiple particle masses.
-
-.. _hop:
-
-HOP
----
-
-The version of HOP used in yt is an upgraded version of the
-`publicly available HOP code
-<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support
-for 64-bit floats and integers has been added, as well as
-parallel analysis through spatial decomposition. HOP builds
-groups in this fashion:
-
-#. Estimates the local density at each particle using a
-   smoothing kernel.
-
-#. Builds chains of linked particles by 'hopping' from one
-   particle to its densest neighbor. A particle which is
-   its own densest neighbor is the end of the chain.
-
-#. All chains that share the same densest particle are
-   grouped together.
-
-#. Groups are included, linked together, or discarded
-   depending on the user-supplied over density
-   threshold parameter. The default is 160.0.
-
-Please see the `HOP method paper 
-<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for 
-full details and the 
-:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`
-documentation.
-
-.. _fof:
-
-FOF
----
-
-A basic friends-of-friends halo finder is included.  See the
-:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`
-documentation.
-
-.. _rockstar:
-
-Rockstar Halo Finding
----------------------
-
-Rockstar uses an adaptive hierarchical refinement of friends-of-friends
-groups in six phase-space dimensions and one time dimension, which
-allows for robust (grid-independent, shape-independent, and noise-
-resilient) tracking of substructure. The code is prepackaged with yt,
-but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead
-developer is Peter Behroozi, and the methods are described in `Behroozi
-et al. 2011 <http://arxiv.org/abs/1110.4372>`_.
-In order to run the Rockstar halo finder in yt, make sure you've
-:ref:`installed it so that it can integrate with yt <rockstar-installation>`.
-
-At the moment, Rockstar does not support multiple particle masses,
-instead using a fixed particle mass. This will not affect most dark matter
-simulations, but does make it less useful for finding halos from the stellar
-mass. In simulations where the highest-resolution particles all have the
-same mass (ie: zoom-in grid based simulations), one can set up a particle
-filter to select the lowest mass particles and perform the halo finding
-only on those.  See the this cookbook recipe for an example:
-:ref:`cookbook-rockstar-nested-grid`.
-
-To run the Rockstar Halo finding, you must launch python with MPI and
-parallelization enabled. While Rockstar itself does not require MPI to run,
-the MPI libraries allow yt to distribute particle information across multiple
-nodes.
-
-.. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
-   connected by an Infiniband network can be problematic. Therefore, for now
-   we recommend forcing the use of the non-Infiniband network (e.g. Ethernet)
-   using this flag: ``--mca btl ^openib``.
-   For example, here is how Rockstar might be called using 24 cores:
-   ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
-
-The script above configures the Halo finder, launches a server process which
-disseminates run information and coordinates writer-reader processes.
-Afterwards, it launches reader and writer tasks, filling the available MPI
-slots, which alternately read particle information and analyze for halo
-content.
-
-The RockstarHaloFinder class has these options that can be supplied to the
-halo catalog through the ``finder_kwargs`` argument:
-
-* ``dm_type``, the index of the dark matter particle. Default is 1.
-* ``outbase``, This is where the out*list files that Rockstar makes should be
-  placed. Default is 'rockstar_halos'.
-* ``num_readers``, the number of reader tasks (which are idle most of the
-  time.) Default is 1.
-* ``num_writers``, the number of writer tasks (which are fed particles and
-  do most of the analysis). Default is MPI_TASKS-num_readers-1.
-  If left undefined, the above options are automatically
-  configured from the number of available MPI tasks.
-* ``force_res``, the resolution that Rockstar uses for various calculations
-  and smoothing lengths. This is in units of Mpc/h.
-  If no value is provided, this parameter is automatically set to
-  the width of the smallest grid element in the simulation from the
-  last data snapshot (i.e. the one where time has evolved the
-  longest) in the time series:
-  ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
-* ``total_particles``, if supplied, this is a pre-calculated
-  total number of dark matter
-  particles present in the simulation. For example, this is useful
-  when analyzing a series of snapshots where the number of dark
-  matter particles should not change and this will save some disk
-  access time. If left unspecified, it will
-  be calculated automatically. Default: ``None``.
-* ``dm_only``, if set to ``True``, it will be assumed that there are
-  only dark matter particles present in the simulation.
-  This option does not modify the halos found by Rockstar, however
-  this option can save disk access time if there are no star particles
-  (or other non-dark matter particles) in the simulation. Default: ``False``.
-
-Rockstar dumps halo information in a series of text (halo*list and
-out*list) and binary (halo*bin) files inside the ``outbase`` directory.
-We use the halo list classes to recover the information.
-
-Inside the ``outbase`` directory there is a text file named ``datasets.txt``
-that records the connection between ds names and the Rockstar file names.
-
-For more information, see the
-:class:`~yt.analysis_modules.halo_finding.halo_objects.RockstarHalo` and
-:class:`~yt.analysis_modules.halo_finding.halo_objects.Halo` classes.
-
-.. _parallel-hop-and-fof:
-
-Parallel HOP and FOF
---------------------
-
-Both the HOP and FoF halo finders can run in parallel using simple
-spatial decomposition. In order to run them in parallel it is helpful
-to understand how it works. Below in the first plot (i) is a simplified
-depiction of three haloes labeled 1,2 and 3:
-
-.. image:: _images/ParallelHaloFinder.png
-   :width: 500
-
-Halo 3 is twice reflected around the periodic boundary conditions.
-
-In (ii), the volume has been sub-divided into four equal subregions,
-A,B,C and D, shown with dotted lines. Notice that halo 2 is now in
-two different subregions, C and D, and that halo 3 is now in three,
-A, B and D. If the halo finder is run on these four separate subregions,
-halo 1 is be identified as a single halo, but haloes 2 and 3 are split
-up into multiple haloes, which is incorrect. The solution is to give
-each subregion padding to oversample into neighboring regions.
-
-In (iii), subregion C has oversampled into the other three regions,
-with the periodic boundary conditions taken into account, shown by
-dot-dashed lines. The other subregions oversample in a similar way.
-
-The halo finder is then run on each padded subregion independently
-and simultaneously. By oversampling like this, haloes 2 and 3 will
-both be enclosed fully in at least one subregion and identified
-completely.
-
-Haloes identified with centers of mass inside the padded part of a
-subregion are thrown out, eliminating the problem of halo duplication.
-The centers for the three haloes are shown with stars. Halo 1 will
-belong to subregion A, 2 to C and 3 to B.
-
-To run with parallel halo finding, you must supply a value for
-padding in the finder_kwargs argument. The ``padding`` parameter
-is in simulation units and defaults to 0.02. This parameter is how
-much padding is added to each of the six sides of a subregion.
-This value should be 2x-3x larger than the largest expected halo
-in the simulation. It is unlikely, of course, that the largest
-object in the simulation will be on a subregion boundary, but there
-is no way of knowing before the halo finder is run.
-
-.. code-block:: python
-
-  import yt
-  from yt.analysis_modules.halo_analysis.api import *
-  ds = yt.load("data0001")
-
-  hc = HaloCatalog(data_ds = ds, finder_method = 'hop', finder_kwargs={'padding':0.02})
-  # --or--
-  hc = HaloCatalog(data_ds = ds, finder_method = 'fof', finder_kwargs={'padding':0.02})
-
-In general, a little bit of padding goes a long way, and too much
-just slows down the analysis and doesn't improve the answer (but
-doesn't change it).  It may be worth your time to run the parallel
-halo finder at a few paddings to find the right amount, especially
-if you're analyzing many similar datasets.
-
-.. _rockstar-installation:
-
-Rockstar Installation
----------------------
-
-Because of changes in the Rockstar API over time, yt only currently works with
-a slightly older version of Rockstar.  This version of Rockstar has been
-slightly patched and modified to run as a library inside of yt. By default it
-is not installed with yt, but installation is very easy.  The
-:ref:`install-script` used to install yt from source has a line:
-``INST_ROCKSTAR=0`` that must be changed to ``INST_ROCKSTAR=1``.  You can
-rerun this installer script over the top of an existing installation, and
-it will only install components missing from the existing installation.
-You can do this as follows.  Put your freshly modified install_script in
-the parent directory of the yt installation directory (e.g. the parent of
-``$YT_DEST``, ``yt-x86_64``, ``yt-i386``, etc.), and rerun the installer:
-
-.. code-block:: bash
-
-    cd $YT_DEST
-    cd ..
-    vi install_script.sh  // or your favorite editor to change INST_ROCKSTAR=1
-    bash < install_script.sh
-
-This will download Rockstar and install it as a library in yt.  You should now
-be able to use Rockstar and yt together.

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/analyzing/analysis_modules/halo_transition.rst
--- a/doc/source/analyzing/analysis_modules/halo_transition.rst
+++ b/doc/source/analyzing/analysis_modules/halo_transition.rst
@@ -1,11 +1,12 @@
 .. _halo-transition:
 
-Getting up to Speed with Halo Analysis in yt-3.0
-================================================
+Transitioning From yt-2 to yt-3
+===============================
 
 If you're used to halo analysis in yt-2.x, heres a guide to
 how to update your analysis pipeline to take advantage of
-the new halo catalog infrastructure.
+the new halo catalog infrastructure.  If you're starting
+from scratch, see :ref:`halo_catalog`.
 
 Finding Halos
 -------------

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/analyzing/analysis_modules/index.rst
--- a/doc/source/analyzing/analysis_modules/index.rst
+++ b/doc/source/analyzing/analysis_modules/index.rst
@@ -19,4 +19,3 @@
    two_point_functions
    clump_finding
    particle_trajectories
-   ellipsoid_analysis

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -99,9 +99,9 @@
    To work out the following examples, you should install
    `AtomDB <http://www.atomdb.org>`_ and get the files from the
    `xray_data <http://yt-project.org/data/xray_data.tar.gz>`_ auxiliary
-   data package (see the ``xray_data`` `README <xray_data_README.html>`_
-   for details on the latter). Make sure that in what follows you
-   specify the full path to the locations of these files.
+   data package (see the :ref:`xray_data_README` for details on the latter). 
+   Make sure that in what follows you specify the full path to the locations 
+   of these files.
 
 To generate photons from this dataset, we have several different things
 we need to set up. The first is a standard yt data object. It could

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/analyzing/analysis_modules/radial_column_density.rst
--- a/doc/source/analyzing/analysis_modules/radial_column_density.rst
+++ /dev/null
@@ -1,93 +0,0 @@
-.. _radial-column-density:
-
-Radial Column Density
-=====================
-.. sectionauthor:: Stephen Skory <s at skory.us>
-.. versionadded:: 2.3
-
-.. note::
-
-    As of :code:`yt-3.0`, the radial column density analysis module is not
-    currently functional.  This functionality is still available in
-    :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`,
-    help is needed to port them over.  Contact the yt-users mailing list if you
-    are interested in doing this.
-
-This module allows the calculation of column densities around a point over a
-field such as ``NumberDensity`` or ``Density``.
-This uses :ref:`healpix_volume_rendering` to interpolate column densities
-on the grid cells.
-
-Details
--------
-
-This module allows the calculation of column densities around a single point.
-For example, this is useful for looking at the gas around a radiating source.
-Briefly summarized, the calculation is performed by first creating a number
-of HEALPix shells around the central point.
-Next, the value of the column density at cell centers is found by
-linearly interpolating the values on the inner and outer shell.
-This is added as derived field, which can be used like any other derived field.
-
-Basic Example
--------------
-
-In this simple example below, the radial column density for the field
-``NumberDensity`` is calculated and added as a derived field named
-``RCDNumberDensity``.
-The calculations will use the starting point of (x, y, z) = (0.5, 0.5, 0.5) and
-go out to a maximum radius of 0.5 in code units.
-Due to the way normalization is handled in HEALPix, the column density
-calculation can extend out only as far as the nearest face of the volume.
-For example, with a center point of (0.2, 0.3, 0.4), the column density
-is calculated out to only a radius of 0.2.
-The column density will be output as zero (0.0) outside the maximum radius.
-Just like a real number column density, when the derived is added using
-``add_field``, we give the units as :math:`1/\rm{cm}^2`.
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.radial_column_density.api import *
-  ds = load("data0030")
-
-  rcdnumdens = RadialColumnDensity(ds, 'NumberDensity', [0.5, 0.5, 0.5],
-    max_radius = 0.5)
-  def _RCDNumberDensity(field, data, rcd = rcdnumdens):
-      return rcd._build_derived_field(data)
-  add_field('RCDNumberDensity', _RCDNumberDensity, units=r'1/\rm{cm}^2')
-
-  dd = ds.all_data()
-  print(dd['RCDNumberDensity'])
-
-The field ``RCDNumberDensity`` can be used just like any other derived field
-in yt.
-
-Additional Parameters
----------------------
-
-Each of these parameters is added to the call to ``RadialColumnDensity()``,
-just like ``max_radius`` is used above.
-
-  * ``steps`` : integer - Because this implementation uses linear
-    interpolation to calculate the column
-    density at each cell, the accuracy of the solution goes up as the number of
-    HEALPix surfaces is increased.
-    The ``steps`` parameter controls the number of HEALPix surfaces, and a larger
-    number is more accurate, but slower. Default = 10.
-
-  * ``base`` : string - This controls where the surfaces are placed, with
-    linear "lin" or logarithmic "log" spacing. The inner-most
-    surface is always set to the size of the smallest cell.
-    Default = "lin".
-
-  * ``Nside`` : int
-    The resolution of column density calculation as performed by
-    HEALPix. Higher numbers mean higher quality. Max = 8192.
-    Default = 32.
-
-  * ``ang_divs`` : imaginary integer
-    This number controls the gridding of the HEALPix projection onto
-    the spherical surfaces. Higher numbers mean higher quality.
-    Default = 800j.
-

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/analyzing/analysis_modules/xray_data_README.rst
--- a/doc/source/analyzing/analysis_modules/xray_data_README.rst
+++ b/doc/source/analyzing/analysis_modules/xray_data_README.rst
@@ -1,3 +1,5 @@
+.. _xray_data_README:
+
 Auxiliary Data Files for use with yt's Photon Simulator
 =======================================================
 

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -131,6 +131,16 @@
 
    ds.r[:,-180:0,:]
 
+If you specify a single slice, it will be repeated along all three dimensions.
+For instance, this will give all data:::
+
+   ds.r[:]
+
+And this will select a box running from 0.4 to 0.6 along all three
+dimensions:::
+
+   ds.r[0.4:0.6]
+
 Selecting Fixed Resolution Regions
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -21,7 +21,7 @@
 * Derived Quantities (total mass, angular momentum, etc) (:ref:`creating_derived_quantities`,
   :ref:`derived-quantities`)
 * 1-, 2-, and 3-D profiles (:ref:`generating-profiles-and-histograms`)
-* Halo finding (:ref:`halo_finding`)
+* Halo analysis (:ref:`halo-analysis`)
 * Volume rendering (:ref:`volume_rendering`)
 * Isocontours & flux calculations (:ref:`extracting-isocontour-information`)
 
@@ -194,7 +194,7 @@
 
 The following operations use spatial decomposition:
 
-* :ref:`halo_finding`
+* :ref:`halo-analysis`
 * :ref:`volume_rendering`
 
 Grid Decomposition
@@ -501,7 +501,7 @@
 subtle art in estimating the amount of memory needed for halo finding, but a
 rule of thumb is that the HOP halo finder is the most memory intensive
 (:func:`HaloFinder`), and Friends of Friends (:func:`FOFHaloFinder`) being the
-most memory-conservative. For more information, see :ref:`halo_finding`.
+most memory-conservative. For more information, see :ref:`halo-analysis`.
 
 **Volume Rendering**
 

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/analyzing/saving_data.rst
--- a/doc/source/analyzing/saving_data.rst
+++ b/doc/source/analyzing/saving_data.rst
@@ -1,4 +1,4 @@
-.. _saving_data
+.. _saving_data:
 
 Saving Reloadable Data
 ======================

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -31,7 +31,8 @@
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
               'sphinx.ext.pngmath', 'sphinx.ext.viewcode',
-              'sphinx.ext.napoleon', 'yt_cookbook', 'yt_colormaps']
+              'sphinx.ext.napoleon', 'yt_cookbook', 'yt_colormaps',
+              'config_help']
 
 if not on_rtd:
     extensions.append('sphinx.ext.autosummary')
@@ -67,9 +68,9 @@
 # built documents.
 #
 # The short X.Y version.
-version = '3.3-dev'
+version = '3.4-dev'
 # The full version, including alpha/beta/rc tags.
-release = '3.3-dev'
+release = '3.4-dev'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -38,7 +38,7 @@
 # again.
 
 render_source.set_volume(kd_low_res)
-render_source.set_fields('density')
+render_source.set_field('density')
 sc.render()
 sc.save("v1.png", sigma_clip=6.0)
 

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -56,6 +56,16 @@
 
 .. yt_cookbook:: simulation_analysis.py
 
+Smoothed Fields
+~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to create a smoothed field,
+corresponding to a user-created derived field, using the
+:meth:`~yt.fields.particle_fields.add_volume_weighted_smoothed_field` method.
+See :ref:`gadget-notebook` for how to work with Gadget data.
+
+.. yt_cookbook:: smoothed_field.py
+
 
 .. _cookbook-time-series-analysis:
 
@@ -93,16 +103,6 @@
 
 .. yt_cookbook:: hse_field.py
 
-Smoothed Fields
-~~~~~~~~~~~~~~~
-
-This recipe demonstrates how to create a smoothed field,
-corresponding to a user-created derived field, using the
-:meth:`~yt.fields.particle_fields.add_volume_weighted_smoothed_field` method.
-See :ref:`gadget-notebook` for how to work with Gadget data.
-
-.. yt_cookbook:: smoothed_field.py
-
 Using Particle Filters to Calculate Star Formation Rates
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/cookbook/colormaps.py
--- a/doc/source/cookbook/colormaps.py
+++ b/doc/source/cookbook/colormaps.py
@@ -7,11 +7,11 @@
 p = yt.ProjectionPlot(ds, "z", "density", width=(100, 'kpc'))
 p.save()
 
-# Change the colormap to 'jet' and save again.  We must specify
+# Change the colormap to 'dusk' and save again.  We must specify
 # a different filename here or it will save it over the top of
 # our first projection.
-p.set_cmap(field="density", cmap='jet')
-p.save('proj_with_jet_cmap.png')
+p.set_cmap(field="density", cmap='dusk')
+p.save('proj_with_dusk_cmap.png')
 
 # Change the colormap to 'hot' and save again.
 p.set_cmap(field="density", cmap='hot')

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -303,6 +303,26 @@
 
 .. yt_cookbook:: vol-annotated.py
 
+.. _cookbook-vol-points:
+
+Volume Rendering with Points
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to make a volume rendering composited with point
+sources. This could represent star or dark matter particles, for example.
+
+.. yt_cookbook:: vol-points.py
+
+.. _cookbook-vol-lines:
+
+Volume Rendering with Lines
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to make a volume rendering composited with line
+sources.
+
+.. yt_cookbook:: vol-lines.py
+
 .. _cookbook-opengl_vr:
 
 Advanced Interactive Data Visualization

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -65,10 +65,13 @@
 
 .. yt_cookbook:: light_ray.py
 
+.. _cookbook-single-dataset-light-ray:
+
+Single Dataset Light Ray
+~~~~~~~~~~~~~~~~~~~~~~~~
+
 This script demonstrates how to make a light ray from a single dataset.
 
-.. _cookbook-single-dataset-light-ray:
-
 .. yt_cookbook:: single_dataset_light_ray.py
 
 Creating and Fitting Absorption Spectra

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -1,22 +1,20 @@
 import yt
-import numpy as np
-from yt.visualization.volume_rendering.api import BoxSource, CoordinateVectorSource
 
 # Load the dataset.
 ds = yt.load("Enzo_64/DD0043/data0043")
-sc = yt.create_scene(ds, ('gas','density'))
-sc.get_source(0).transfer_function.grey_opacity=True
+sc = yt.create_scene(ds, ('gas', 'density'))
 
-sc.annotate_domain(ds)
-sc.render()
-sc.save("%s_vr_domain.png" % ds)
+# You may need to adjust the alpha values to get a rendering with good contrast
+# For annotate_domain, the fourth color value is alpha.
 
-sc.annotate_grids(ds)
-sc.render()
-sc.save("%s_vr_grids.png" % ds)
+# Draw the domain boundary
+sc.annotate_domain(ds, color=[1, 1, 1, 0.01])
+sc.save("%s_vr_domain.png" % ds, sigma_clip=4)
 
-# Here we can draw the coordinate vectors on top of the image by processing
-# it through the camera. Then save it out.
-sc.annotate_axes()
-sc.render()
-sc.save("%s_vr_coords.png" % ds)
+# Draw the grid boundaries
+sc.annotate_grids(ds, alpha=0.01)
+sc.save("%s_vr_grids.png" % ds, sigma_clip=4)
+
+# Draw a coordinate axes triad
+sc.annotate_axes(alpha=0.01)
+sc.save("%s_vr_coords.png" % ds, sigma_clip=4)

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/cookbook/single_dataset_light_ray.py
--- a/doc/source/cookbook/single_dataset_light_ray.py
+++ b/doc/source/cookbook/single_dataset_light_ray.py
@@ -8,9 +8,12 @@
 
 # With a single dataset, a start_position and
 # end_position or trajectory must be given.
-# Trajectory should be given as (r, theta, phi)
-lr.make_light_ray(start_position=[0., 0., 0.],
-                  end_position=[1., 1., 1.],
+# These positions can be defined as xyz coordinates,
+# but here we just use the two opposite corners of the 
+# simulation box.  Alternatively, trajectory should 
+# be given as (r, theta, phi)
+lr.make_light_ray(start_position=ds.domain_left_edge,
+                  end_position=ds.domain_right_edge,
                   solution_filename='lightraysolution.txt',
                   data_filename='lightray.h5',
                   fields=['temperature', 'density'])

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/cookbook/vol-annotated.py
--- a/doc/source/cookbook/vol-annotated.py
+++ b/doc/source/cookbook/vol-annotated.py
@@ -1,75 +1,29 @@
-#!/usr/bin/env python
+import yt
 
-import numpy as np
-import pylab
+ds = yt.load('Enzo_64/DD0043/data0043')
 
-import yt
-import yt.visualization.volume_rendering.old_camera as vr
+sc = yt.create_scene(ds, lens_type='perspective')
 
-ds = yt.load("maestro_subCh_plt00248")
+source = sc[0]
 
-dd = ds.all_data()
+source.set_field('density')
+source.set_log(True)
 
-# field in the dataset we will visualize
-field = ('boxlib', 'radial_velocity')
+# Set up the camera parameters: focus, width, resolution, and image orientation
+sc.camera.focus = ds.domain_center
+sc.camera.resolution = 1024
+sc.camera.north_vector = [0, 0, 1]
+sc.camera.position = [1.7, 1.7, 1.7]
 
-# the values we wish to highlight in the rendering.  We'll put a Gaussian
-# centered on these with width sigma
-vals = [-1.e7, -5.e6, -2.5e6, 2.5e6, 5.e6, 1.e7]
-sigma = 2.e5
+# You may need to adjust the alpha values to get an image with good contrast.
+# For the annotate_domain call, the fourth value in the color tuple is the
+# alpha value.
+sc.annotate_axes(alpha=.02)
+sc.annotate_domain(ds, color=[1, 1, 1, .01])
 
-mi, ma = min(vals), max(vals)
+text_string = "T = {} Gyr".format(float(ds.current_time.to('Gyr')))
 
-# Instantiate the ColorTransferfunction.
-tf =  yt.ColorTransferFunction((mi, ma))
-
-for v in vals:
-    tf.sample_colormap(v, sigma**2, colormap="coolwarm")
-
-
-# volume rendering requires periodic boundaries.  This dataset has
-# solid walls.  We need to hack it for now (this will be fixed in
-# a later yt)
-ds.periodicity = (True, True, True)
-
-
-# Set up the camera parameters: center, looking direction, width, resolution
-c = np.array([0.0, 0.0, 0.0])
-L = np.array([1.0, 1.0, 1.2])
-W = 1.5*ds.domain_width
-N = 720
-
-# +z is "up" for our dataset
-north=[0.0,0.0,1.0]
-
-# Create a camera object
-cam = vr.Camera(c, L, W, N, transfer_function=tf, ds=ds,
-                no_ghost=False, north_vector=north,
-                fields = [field], log_fields = [False])
-
-im = cam.snapshot()
-
-# add an axes triad
-cam.draw_coordinate_vectors(im)
-
-# add the domain box to the image
-nim = cam.draw_domain(im)
-
-# increase the contrast -- for some reason, the enhance default
-# to save_annotated doesn't do the trick
-max_val = im[:,:,:3].std() * 4.0
-nim[:,:,:3] /= max_val
-
-# we want to write the simulation time on the figure, so create a
-# figure and annotate it
-f = pylab.figure()
-
-pylab.text(0.2, 0.85, "{:.3g} s".format(float(ds.current_time.d)),
-           transform=f.transFigure, color="white")
-
-# tell the camera to use our figure
-cam._render_figure = f
-
-# save annotated -- this added the transfer function values,
-# and the clear_fig=False ensures it writes onto our existing figure.
-cam.save_annotated("vol_annotated.png", nim, dpi=145, clear_fig=False)
+# save an annotated version of the volume rendering including a representation
+# of the transfer function and a nice label showing the simulation time.
+sc.save_annotated("vol_annotated.png", sigma_clip=6,
+                  text_annotate=[[(.1, 1.05), text_string]])

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/cookbook/vol-lines.py
--- /dev/null
+++ b/doc/source/cookbook/vol-lines.py
@@ -0,0 +1,22 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import LineSource
+from yt.units import kpc
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
+
+nlines = 50
+vertices = (np.random.random([nlines, 2, 3]) - 0.5) * 200 * kpc
+colors = np.random.random([nlines, 4])
+colors[:, 3] = 0.1
+
+lines = LineSource(vertices, colors)
+sc.add_source(lines)
+
+sc.camera.width = 300*kpc
+
+sc.save(sigma_clip=4.0)

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/cookbook/vol-points.py
--- /dev/null
+++ b/doc/source/cookbook/vol-points.py
@@ -0,0 +1,29 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import PointSource
+from yt.units import kpc
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
+
+npoints = 1000
+
+# Random particle positions
+vertices = np.random.random([npoints, 3])*200*kpc
+
+# Random colors
+colors = np.random.random([npoints, 4])
+
+# Set alpha value to something that produces a good contrast with the volume
+# rendering
+colors[:, 3] = 0.1
+
+points = PointSource(vertices, colors=colors)
+sc.add_source(points)
+
+sc.camera.width = 300*kpc
+
+sc.save(sigma_clip=5)

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/cookbook/yt_gadget_owls_analysis.ipynb
--- a/doc/source/cookbook/yt_gadget_owls_analysis.ipynb
+++ b/doc/source/cookbook/yt_gadget_owls_analysis.ipynb
@@ -20,7 +20,7 @@
    "source": [
     "The first thing you will need to run these examples is a working installation of yt.  The author or these examples followed the instructions under \"Get yt: from source\" at http://yt-project.org/ to install an up to date development version of yt.\n",
     "\n",
-    "Next you should set the default ``test_data_dir`` in the ``.yt/config`` file in your home directory.  Note that you may have to create the directory and file if it doesn't exist already.\n",
+    "Next you should set the default ``test_data_dir`` in the ``~/.config/yt/ytrc`` file in your home directory.  Note that you may have to create the directory and file if it doesn't exist already.\n",
     "\n",
     "> [yt]\n",
     "\n",

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/developing/building_the_docs.rst
--- a/doc/source/developing/building_the_docs.rst
+++ b/doc/source/developing/building_the_docs.rst
@@ -176,6 +176,7 @@
 .. _Sphinx: http://sphinx-doc.org/
 .. _pandoc: http://johnmacfarlane.net/pandoc/
 .. _ffmpeg: http://www.ffmpeg.org/
+.. _IPython: https://ipython.org/
 
 You will also need the full yt suite of `yt test data
 <http://yt-project.org/data/>`_, including the larger datasets that are not used

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/developing/extensions.rst
--- /dev/null
+++ b/doc/source/developing/extensions.rst
@@ -0,0 +1,54 @@
+.. _extensions:
+
+Extension Packages
+==================
+
+.. note:: For some additional discussion, see `YTEP-0029
+          <http://ytep.readthedocs.io/en/latest/YTEPs/YTEP-0029.html>`_, where
+          this plan was designed.
+
+As of version 3.3 of yt, we have put into place new methods for easing the
+process of developing "extensions" to yt.  Extensions might be analysis
+packages, visualization tools, or other software projects that use yt as a base
+engine but that are versioned, developed and distributed separately.  This
+brings with it the advantage of retaining control over the versioning,
+contribution guidelines, scope, etc, while also providing a mechanism for
+disseminating information about it, and potentially a method of interacting
+with other extensions.
+
+We have created a few pieces of infrastructure for developing extensions,
+making them discoverable, and distributing them to collaborators.
+
+If you have a module you would like to retain some external control over, or
+that you don't feel would fit into yt, we encourage you to build it as an
+extension module and distribute and version it independently.
+
+Hooks for Extensions
+--------------------
+
+Starting with version 3.3 of yt, any package named with the prefix ``yt_`` is
+importable from the namespace ``yt.extensions``.  For instance, the
+``yt_interaction`` package ( https://bitbucket.org/data-exp-lab/yt_interaction
+) is importable as ``yt.extensions.interaction``.
+
+In subsequent versions, we plan to include in yt a catalog of known extensions
+and where to find them; this will put discoverability directly into the code
+base.
+
+Extension Template
+------------------
+
+A template for starting an extension module (or converting an existing set of
+code to an extension module) can be found at
+https://bitbucket.org/yt_analysis/yt_extension_template .
+
+To get started, download a zipfile of the template (
+https://bitbucket.org/yt_analysis/yt_extension_template/get/tip.zip ) and
+follow the directions in ``README.md`` to modify the metadata.
+
+Distributing Extensions
+-----------------------
+
+We encourage you to version on your choice of hosting platform (Bitbucket,
+GitHub, etc), and to distribute your extension widely.  We are presently
+working on deploying a method for listing extension modules on the yt webpage.

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/developing/index.rst
--- a/doc/source/developing/index.rst
+++ b/doc/source/developing/index.rst
@@ -19,6 +19,7 @@
    developing
    building_the_docs
    testing
+   extensions
    debugdrive
    releasing
    creating_datatypes

diff -r b345fa3445df59a1bd569c913cce116fb65d4ffa -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -103,7 +103,7 @@
    accept no arguments. The test function should do some work that tests some
    functionality and should also verify that the results are correct using
    assert statements or functions.  
-# Tests can ``yield`` a tuple of the form ``function``, ``argument_one``,
+#. Tests can ``yield`` a tuple of the form ``function``, ``argument_one``,
    ``argument_two``, etc.  For example ``yield assert_equal, 1.0, 1.0`` would be
    captured by nose as a test that asserts that 1.0 is equal to 1.0.
 #. Use ``fake_random_ds`` to test on datasets, and be sure to test for
@@ -285,15 +285,12 @@
 
 These datasets are available at http://yt-project.org/data/.
 
-Next, modify the file ``~/.yt/config`` to include a section ``[yt]``
-with the parameter ``test_data_dir``.  Set this to point to the
-directory with the test data you want to test with.  Here is an example
-config file:
+Next, add the config parameter ``test_data_dir`` pointing to 
+directory with the test data you want to test with, e.g.:
 
 .. code-block:: none
 
-   [yt]
-   test_data_dir = /Users/tomservo/src/yt-data
+   $ yt config set yt test_data_dir /Users/tomservo/src/yt-data
 
 More data will be added over time.  To run the answer tests, you must first
 generate a set of test answers locally on a "known good" revision, then update
@@ -313,7 +310,7 @@
 This command will create a set of local answers from the tipsy frontend tests
 and store them in ``$HOME/Documents/test`` (this can but does not have to be the
 same directory as the ``test_data_dir`` configuration variable defined in your
-``.yt/config`` file) in a file named ``local-tipsy``. To run the tipsy
+``~/.config/yt/ytrc`` file) in a file named ``local-tipsy``. To run the tipsy
 frontend's answer tests using a different yt changeset, update to that
 changeset, recompile if necessary, and run the tests using the following
 command:
@@ -487,7 +484,7 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 Before any code is added to or modified in the yt codebase, each incoming
 changeset is run against all available unit and answer tests on our `continuous
-integration server <http://tests.yt-project.org>`_. While unit tests are
+integration server <https://tests.yt-project.org>`_. While unit tests are
 autodiscovered by `nose <http://nose.readthedocs.org/en/latest/>`_ itself,
 answer tests require definition of which set of tests constitute to a given
 answer. Configuration for the integration server is stored in

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/2b577ca93d6e/
Changeset:   2b577ca93d6e
Branch:      yt
User:        MatthewTurk
Date:        2016-08-12 17:59:50+00:00
Summary:     Responding to comments
Affected #:  4 files

diff -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -49,7 +49,9 @@
     YTFieldNotFound, \
     YTFieldTypeNotFound, \
     YTDataSelectorNotImplemented, \
-    YTDimensionalityError
+    YTDimensionalityError, \
+    YTBooleanObjectError, \
+    YTBooleanObjectsWrongDataset
 from yt.utilities.lib.marching_cubes import \
     march_cubes_grid, march_cubes_grid_flux
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -1760,35 +1762,43 @@
         return self.quantities.total_quantity(("index", "cell_volume"))
 
     def __or__(self, other):
-        assert(isinstance(other, YTSelectionContainer3D))
-        assert(self.ds is other.ds)
+        if not isinstance(other, YTSelectionContainer3D):
+            raise YTBooleanObjectError(other)
+        if not self.ds is other.ds:
+            raise YTBooleanObjectsWrongDataset()
         # Should maybe do something with field parameters here
-        return YTBooleanOperator("OR", self, other, ds = self.ds)
+        return YTBooleanContainer("OR", self, other, ds = self.ds)
 
     def __invert__(self):
         # ~obj
         asel = yt.geometry.selection_routines.AlwaysSelector(self.ds)
-        return YTBooleanOperator("NOT", self, asel, ds = self.ds)
+        return YTBooleanContainer("NOT", self, asel, ds = self.ds)
 
     def __xor__(self, other):
-        assert(isinstance(other, YTSelectionContainer3D))
-        assert(self.ds is other.ds)
-        return YTBooleanOperator("XOR", self, other, ds = self.ds)
+        if not isinstance(other, YTSelectionContainer3D):
+            raise YTBooleanObjectError(other)
+        if not self.ds is other.ds:
+            raise YTBooleanObjectsWrongDataset()
+        return YTBooleanContainer("XOR", self, other, ds = self.ds)
 
     def __and__(self, other):
-        assert(isinstance(other, YTSelectionContainer3D))
-        assert(self.ds is other.ds)
-        return YTBooleanOperator("AND", self, other, ds = self.ds)
+        if not isinstance(other, YTSelectionContainer3D):
+            raise YTBooleanObjectError(other)
+        if not self.ds is other.ds:
+            raise YTBooleanObjectsWrongDataset()
+        return YTBooleanContainer("AND", self, other, ds = self.ds)
 
     def __add__(self, other):
         return self.__or__(other)
 
     def __sub__(self, other):
-        assert(isinstance(other, YTSelectionContainer3D))
-        assert(self.ds is other.ds)
-        return YTBooleanOperator("NEG", self, other, ds = self.ds)
+        if not isinstance(other, YTSelectionContainer3D):
+            raise YTBooleanObjectError(other)
+        if not self.ds is other.ds:
+            raise YTBooleanObjectsWrongDataset()
+        return YTBooleanContainer("NEG", self, other, ds = self.ds)
 
-class YTBooleanOperator(YTSelectionContainer3D):
+class YTBooleanContainer(YTSelectionContainer3D):
     """
     This is a boolean operation, accepting AND, OR, XOR, and NOT for combining
     multiple data objects.
@@ -1801,7 +1811,7 @@
     Parameters
     ----------
     op : string
-        Can be AND, OR, XOR or NOT.
+        Can be AND, OR, XOR, NOT or NEG.
     dobj1 : YTSelectionContainer3D
         The first selection object
     dobj2 : YTSelectionContainer3D

diff -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -857,7 +857,7 @@
     def fwidth(self):
         return self.base_object.fwidth[self._cond_ind,:]
 
-class YTDataObjectIntersection(YTSelectionContainer3D):
+class YTIntersectionContainer3D(YTSelectionContainer3D):
     """
     This is a more efficient method of selecting the intersection of multiple
     data selection objects.

diff -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 yt/data_objects/tests/test_boolean_regions.py
--- a/yt/data_objects/tests/test_boolean_regions.py
+++ b/yt/data_objects/tests/test_boolean_regions.py
@@ -3,23 +3,9 @@
     assert_array_equal
 import numpy as np
 
-def get_ds():
-    from yt.utilities.lib.geometry_utils import compute_morton
-    def _morton_index(field, data):
-        eps = np.finfo("f8").eps
-        uq = data.ds.domain_left_edge.uq
-        LE = data.ds.domain_left_edge - eps * uq
-        RE = data.ds.domain_right_edge + eps * uq
-        # .ravel() only copies if it needs to
-        morton = compute_morton(data["index", "x"].ravel(),
-                                data["index", "y"].ravel(),
-                                data["index", "z"].ravel(), LE, RE)
-        morton.shape = data["index", "x"].shape
-        return morton.view("f8")
-    ds = fake_amr_ds()
-    ds.add_field(("index", "morton_index"), function=_morton_index,
-                       units = "")
-    return ds
+# We use morton indices in this test because they are single floating point
+# values that uniquely identify each cell.  That's a convenient way to compare
+# inclusion in set operations, since there are no duplicates.
 
 def test_boolean_spheres_no_overlap():
     r"""Test to make sure that boolean objects (spheres, no overlap)
@@ -28,7 +14,7 @@
     Test non-overlapping spheres. This also checks that the original spheres
     don't change as part of constructing the booleans.
     """
-    ds = get_ds()
+    ds = fake_amr_ds()
     sp1 = ds.sphere([0.25, 0.25, 0.25], 0.15)
     sp2 = ds.sphere([0.75, 0.75, 0.75], 0.15)
     # Store the original indices
@@ -75,7 +61,7 @@
 
     Test overlapping spheres.
     """
-    ds = get_ds()
+    ds = fake_amr_ds()
     sp1 = ds.sphere([0.45, 0.45, 0.45], 0.15)
     sp2 = ds.sphere([0.55, 0.55, 0.55], 0.15)
     # Get indices of both.
@@ -118,7 +104,7 @@
     Test non-overlapping regions. This also checks that the original regions
     don't change as part of constructing the booleans.
     """
-    ds = get_ds()
+    ds = fake_amr_ds()
     re1 = ds.region([0.25]*3, [0.2]*3, [0.3]*3)
     re2 = ds.region([0.65]*3, [0.6]*3, [0.7]*3)
     # Store the original indices
@@ -167,7 +153,7 @@
 
     Test overlapping regions.
     """
-    ds = get_ds()
+    ds = fake_amr_ds()
     re1 = ds.region([0.55]*3, [0.5]*3, [0.6]*3)
     re2 = ds.region([0.6]*3, [0.55]*3, [0.65]*3)
     # Get indices of both.
@@ -210,7 +196,7 @@
     Test non-overlapping cylinders. This also checks that the original cylinders
     don't change as part of constructing the booleans.
     """
-    ds = get_ds()
+    ds = fake_amr_ds()
     cyl1 = ds.disk([0.25]*3, [1, 0, 0], 0.1, 0.1)
     cyl2 = ds.disk([0.75]*3, [1, 0, 0], 0.1, 0.1)
     # Store the original indices
@@ -259,7 +245,7 @@
 
     Test overlapping cylinders.
     """
-    ds = get_ds()
+    ds = fake_amr_ds()
     cyl1 = ds.disk([0.45]*3, [1, 0, 0], 0.2, 0.2)
     cyl2 = ds.disk([0.55]*3, [1, 0, 0], 0.2, 0.2)
     # Get indices of both.
@@ -302,7 +288,7 @@
     Test non-overlapping ellipsoids. This also checks that the original
     ellipsoids don't change as part of constructing the booleans.
     """
-    ds = get_ds()
+    ds = fake_amr_ds()
     ell1 = ds.ellipsoid([0.25]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
     ell2 = ds.ellipsoid([0.75]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
     # Store the original indices
@@ -351,7 +337,7 @@
 
     Test overlapping ellipsoids.
     """
-    ds = get_ds()
+    ds = fake_amr_ds()
     ell1 = ds.ellipsoid([0.45]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
     ell2 = ds.ellipsoid([0.55]*3, 0.05, 0.05, 0.05, np.array([0.1]*3), 0.1)
     # Get indices of both.
@@ -392,7 +378,7 @@
 
     This also tests nested logic and that periodicity works.
     """
-    ds = get_ds()
+    ds = fake_amr_ds()
     re = ds.region([0.5]*3, [0.0]*3, [1]*3) # whole thing
     sp = ds.sphere([0.95]*3, 0.3) # wraps around
     cyl = ds.disk([0.05]*3, [1,1,1], 0.1, 0.4) # wraps around

diff -r dcf81a60ce52d364caee5f71d784f87f89eb5b35 -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -593,3 +593,19 @@
         v += self.message
         v += " Specified bounds are %s" % self.bounds
         return v
+
+class YTBooleanObjectError(YTException):
+    def __init__(self, bad_object):
+        self.bad_object = bad_object
+
+    def __str__(self):
+        v  = "Supplied:\n%s\nto a boolean operation" % (self.bad_object)
+        v += " but it is not a YTSelectionContainer3D object."
+        return v
+
+class YTBooleanObjectsWrongDataset(YTException):
+    def __init__(self):
+        pass
+
+    def __str__(self):
+        return "Boolean data objects must share a common dataset object."


https://bitbucket.org/yt_analysis/yt/commits/d18775c81220/
Changeset:   d18775c81220
Branch:      yt
User:        MatthewTurk
Date:        2016-09-29 22:17:31+00:00
Summary:     Merging from upstream
Affected #:  175 files

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -23,6 +23,7 @@
 yt/geometry/particle_smooth.c
 yt/geometry/selection_routines.c
 yt/utilities/amr_utils.c
+yt/utilities/lib/autogenerated_element_samplers.c
 yt/utilities/kdtree/forthonf2c.h
 yt/utilities/libconfig_wrapper.c
 yt/utilities/spatial/ckdtree.c
@@ -33,6 +34,7 @@
 yt/utilities/lib/bounding_volume_hierarchy.c
 yt/utilities/lib/contour_finding.c
 yt/utilities/lib/depth_first_octree.c
+yt/utilities/lib/distance_queue.c
 yt/utilities/lib/element_mappings.c
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c
@@ -61,6 +63,7 @@
 yt/utilities/lib/quad_tree.c
 yt/utilities/lib/ray_integrators.c
 yt/utilities/lib/ragged_arrays.c
+yt/utilities/lib/cosmology_time.c
 yt/utilities/lib/grid_traversal.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -3,6 +3,7 @@
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js
 include yt/visualization/mapserver/html/leaflet/images/*.png
+include yt/utilities/mesh_types.yaml
 exclude scripts/pr_backport.py
 recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE* *.cu
 recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.html

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -1429,25 +1429,24 @@
         YT_DEPS+=('netcdf4')   
     fi
     
-    # Here is our dependency list for yt
-    log_cmd conda update --yes conda
+    log_cmd ${DEST_DIR}/bin/conda update --yes conda
     
     log_cmd echo "DEPENDENCIES" ${YT_DEPS[@]}
     for YT_DEP in "${YT_DEPS[@]}"; do
         echo "Installing $YT_DEP"
-        log_cmd conda install --yes ${YT_DEP}
+        log_cmd ${DEST_DIR}/bin/conda install --yes ${YT_DEP}
     done
 
     if [ $INST_PY3 -eq 1 ]
     then
         echo "Installing mercurial"
-        log_cmd conda create -y -n py27 python=2.7 mercurial
+        log_cmd ${DEST_DIR}/bin/conda create -y -n py27 python=2.7 mercurial
         log_cmd ln -s ${DEST_DIR}/envs/py27/bin/hg ${DEST_DIR}/bin
     fi
 
-    log_cmd pip install python-hglib
+    log_cmd ${DEST_DIR}/bin/pip install python-hglib
 
-    log_cmd hg clone https://bitbucket.org/yt_analysis/yt_conda ${DEST_DIR}/src/yt_conda
+    log_cmd ${DEST_DIR}/bin/hg clone https://bitbucket.org/yt_analysis/yt_conda ${DEST_DIR}/src/yt_conda
     
     if [ $INST_EMBREE -eq 1 ]
     then
@@ -1474,17 +1473,17 @@
         ( ${GETFILE} "$PYEMBREE_URL" 2>&1 ) 1>> ${LOG_FILE} || do_exit
         log_cmd unzip ${DEST_DIR}/src/master.zip
         pushd ${DEST_DIR}/src/pyembree-master &> /dev/null
-        log_cmd python setup.py install build_ext -I${DEST_DIR}/include -L${DEST_DIR}/lib
+        log_cmd ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py install build_ext -I${DEST_DIR}/include -L${DEST_DIR}/lib
         popd &> /dev/null
     fi
 
     if [ $INST_ROCKSTAR -eq 1 ]
     then
         echo "Building Rockstar"
-        ( hg clone http://bitbucket.org/MatthewTurk/rockstar ${DEST_DIR}/src/rockstar/ 2>&1 ) 1>> ${LOG_FILE}
-        ROCKSTAR_PACKAGE=$(conda build ${DEST_DIR}/src/yt_conda/rockstar --output)
-        log_cmd conda build ${DEST_DIR}/src/yt_conda/rockstar
-        log_cmd conda install $ROCKSTAR_PACKAGE
+        ( ${DEST_DIR}/bin/hg clone http://bitbucket.org/MatthewTurk/rockstar ${DEST_DIR}/src/rockstar/ 2>&1 ) 1>> ${LOG_FILE}
+        ROCKSTAR_PACKAGE=$(${DEST_DIR}/bin/conda build ${DEST_DIR}/src/yt_conda/rockstar --output)
+        log_cmd ${DEST_DIR}/bin/conda build ${DEST_DIR}/src/yt_conda/rockstar
+        log_cmd ${DEST_DIR}/bin/conda install $ROCKSTAR_PACKAGE
         ROCKSTAR_DIR=${DEST_DIR}/src/rockstar
     fi
 
@@ -1493,20 +1492,20 @@
     then
         if [ $INST_PY3 -eq 1 ]
         then
-            log_cmd pip install pyx
+            log_cmd ${DEST_DIR}/bin/pip install pyx
         else
-            log_cmd pip install pyx==0.12.1
+            log_cmd ${DEST_DIR}/bin/pip install pyx==0.12.1
         fi
     fi
 
     if [ $INST_YT_SOURCE -eq 0 ]
     then
         echo "Installing yt"
-        log_cmd conda install -c conda-forge --yes yt
+        log_cmd ${DEST_DIR}/bin/conda install -c conda-forge --yes yt
     else
         echo "Building yt from source"
         YT_DIR="${DEST_DIR}/src/yt-hg"
-        log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
+        log_cmd ${DEST_DIR}/bin/hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
         if [ $INST_EMBREE -eq 1 ]
         then
             echo $DEST_DIR > ${YT_DIR}/embree.cfg
@@ -1517,7 +1516,7 @@
             ROCKSTAR_LIBRARY_PATH=${DEST_DIR}/lib
         fi
         pushd ${YT_DIR} &> /dev/null
-        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH python setup.py develop 2>&1) 1>> ${LOG_FILE} || do_exit
+        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py develop 2>&1) 1>> ${LOG_FILE} || do_exit
         popd &> /dev/null
     fi
 

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -116,7 +116,12 @@
 Continuum features with optical depths that follow a power law can also be
 added.  Like adding lines, you must specify details like the wavelength
 and the field in the dataset and LightRay that is tied to this feature.
-Below, we will add H Lyman continuum.
+The wavelength refers to the location at which the continuum begins to be 
+applied to the dataset, and as it moves to lower wavelength values, the 
+optical depth value decreases according to the defined power law.  The 
+normalization value is the column density of the linked field which results
+in an optical depth of 1 at the defined wavelength.  Below, we add the hydrogen 
+Lyman continuum.
 
 .. code-block:: python
 
@@ -131,7 +136,7 @@
 Making the Spectrum
 ^^^^^^^^^^^^^^^^^^^
 
-Once all the lines and continuum are added, it is time to make a spectrum out
+Once all the lines and continuua are added, it is time to make a spectrum out
 of some light ray data.
 
 .. code-block:: python

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -13,8 +13,14 @@
 the result of user-specified functions, such as checking for gravitational
 boundedness.  A sample recipe can be found in :ref:`cookbook-find_clumps`.
 
+Setting up the Clump Finder
+---------------------------
+
 The clump finder requires a data object (see :ref:`data-objects`) and a field
-over which the contouring is to be performed.
+over which the contouring is to be performed.  The data object is then used
+to create the initial
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object that
+acts as the base for clump finding.
 
 .. code:: python
 
@@ -28,11 +34,15 @@
 
    master_clump = Clump(data_source, ("gas", "density"))
 
+Clump Validators
+----------------
+
 At this point, every isolated contour will be considered a clump,
 whether this is physical or not.  Validator functions can be added to
 determine if an individual contour should be considered a real clump.
-These functions are specified with the ``Clump.add_validator`` function.
-Current, two validators exist: a minimum number of cells and gravitational
+These functions are specified with the
+:func:`~yt.analysis_modules.level_sets.clump_handling.Clump.add_validator`
+function.  Current, two validators exist: a minimum number of cells and gravitational
 boundedness.
 
 .. code:: python
@@ -52,7 +62,8 @@
        return (clump["gas", "cell_mass"].sum() >= min_mass)
    add_validator("minimum_gas_mass", _minimum_gas_mass)
 
-The ``add_validator`` function adds the validator to a registry that can
+The :func:`~yt.analysis_modules.level_sets.clump_validators.add_validator`
+function adds the validator to a registry that can
 be accessed by the clump finder.  Then, the validator can be added to the
 clump finding just like the others.
 
@@ -60,9 +71,15 @@
 
    master_clump.add_validator("minimum_gas_mass", ds.quan(1.0, "Msun"))
 
-The clump finding algorithm accepts the ``Clump`` object, the initial minimum
-and maximum of the contouring field, and the step size.  The lower value of the
-contour finder will be continually multiplied by the step size.
+Running the Clump Finder
+------------------------
+
+Clump finding then proceeds by calling the
+:func:`~yt.analysis_modules.level_sets.clump_handling.find_clumps` function.
+This function accepts the
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object, the initial
+minimum and maximum of the contouring field, and the step size.  The lower value
+of the contour finder will be continually multiplied by the step size.
 
 .. code:: python
 
@@ -71,41 +88,27 @@
    step = 2.0
    find_clumps(master_clump, c_min, c_max, step)
 
-After the clump finding has finished, the master clump will represent the top
-of a hierarchy of clumps.  The ``children`` attribute within a ``Clump`` object
-contains a list of all sub-clumps.  Each sub-clump is also a ``Clump`` object
-with its own ``children`` attribute, and so on.
+Calculating Clump Quantities
+----------------------------
 
-A number of helper routines exist for examining the clump hierarchy.
-
-.. code:: python
-
-   # Write a text file of the full hierarchy.
-   write_clump_index(master_clump, 0, "%s_clump_hierarchy.txt" % ds)
-
-   # Write a text file of only the leaf nodes.
-   write_clumps(master_clump,0, "%s_clumps.txt" % ds)
-
-   # Get a list of just the leaf nodes.
-   leaf_clumps = get_lowest_clumps(master_clump)
-
-``Clump`` objects can be used like all other data containers.
-
-.. code:: python
-
-   print(leaf_clumps[0]["gas", "density"])
-   print(leaf_clumps[0].quantities.total_mass())
-
-The writing functions will write out a series or properties about each
-clump by default.  Additional properties can be appended with the
-``Clump.add_info_item`` function.
+By default, a number of quantities will be calculated for each clump when the
+clump finding process has finished.  The default quantities are: ``total_cells``,
+``cell_mass``, ``mass_weighted_jeans_mass``, ``volume_weighted_jeans_mass``,
+``max_grid_level``, ``min_number_density``, and ``max_number_density``.
+Additional items can be added with the
+:func:`~yt.analysis_modules.level_sets.clump_handling.Clump.add_info_item`
+function.
 
 .. code:: python
 
    master_clump.add_info_item("total_cells")
 
 Just like the validators, custom info items can be added by defining functions
-that minimally accept a ``Clump`` object and return a string to be printed.
+that minimally accept a
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object and return
+a format string to be printed and the value.  These are then added to the list
+of available info items by calling
+:func:`~yt.analysis_modules.level_sets.clump_info_items.add_clump_info`:
 
 .. code:: python
 
@@ -121,10 +124,47 @@
 
    master_clump.add_info_item("mass_weighted_jeans_mass")
 
-By default, the following info items are activated: **total_cells**,
-**cell_mass**, **mass_weighted_jeans_mass**, **volume_weighted_jeans_mass**,
-**max_grid_level**, **min_number_density**, **max_number_density**, and
-**distance_to_main_clump**.
+Beside the quantities calculated by default, the following are available:
+``center_of_mass`` and ``distance_to_main_clump``.
+
+Working with Clumps
+-------------------
+
+After the clump finding has finished, the master clump will represent the top
+of a hierarchy of clumps.  The ``children`` attribute within a
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object
+contains a list of all sub-clumps.  Each sub-clump is also a
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object
+with its own ``children`` attribute, and so on.
+
+.. code:: python
+
+   print(master_clump["gas", "density"])
+   print(master_clump.children)
+   print(master_clump.children[0]["gas", "density"])
+
+The entire clump tree can traversed with a loop syntax:
+
+.. code:: python
+
+   for clump in master_clump:
+       print(clump.clump_id)
+
+The :func:`~yt.analysis_modules.level_sets.clump_handling.get_lowest_clumps`
+function will return a list of the individual clumps that have no children
+of their own (the leaf clumps).
+
+.. code:: python
+
+   # Get a list of just the leaf nodes.
+   leaf_clumps = get_lowest_clumps(master_clump)
+
+   print(leaf_clumps[0]["gas", "density"])
+   print(leaf_clumps[0]["all", "particle_mass"])
+   print(leaf_clumps[0].quantities.total_mass())
+
+Visualizing Clumps
+------------------
 
 Clumps can be visualized using the ``annotate_clumps`` callback.
 
@@ -134,3 +174,44 @@
                            center='c', width=(20,'kpc'))
    prj.annotate_clumps(leaf_clumps)
    prj.save('clumps')
+
+Saving and Reloading Clump Data
+-------------------------------
+
+The clump tree can be saved as a reloadable dataset with the
+:func:`~yt.analysis_modules.level_sets.clump_handling.Clump.save_as_dataset`
+function.  This will save all info items that have been calculated as well as
+any field values specified with the *fields* keyword.  This function
+can be called for any clump in the tree, saving that clump and all those
+below it.
+
+.. code:: python
+
+   fn = master_clump.save_as_dataset(fields=["density", "particle_mass"])
+
+The clump tree can then be reloaded as a regular dataset.  The ``tree`` attribute
+associated with the dataset provides access to the clump tree.  The tree can be
+iterated over in the same fashion as the original tree.
+
+.. code:: python
+
+   ds_clumps = yt.load(fn)
+   for clump ds_clumps.tree:
+       print(clump.clump_id)
+
+The ``leaves`` attribute returns a list of all leaf clumps.
+
+.. code:: python
+
+   print(ds_clumps.leaves)
+
+Info items for each clump can be accessed with the `clump` field type.  Gas
+or grid fields should be accessed using the `grid` field type and particle
+fields should be access using the specific particle type.
+
+.. code:: python
+
+   my_clump = ds_clumps.leaves[0]
+   print(my_clumps["clump", "cell_mass"])
+   print(my_clumps["grid", "density"])
+   print(my_clumps["all", "particle_mass"])

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/analyzing/analysis_modules/cosmology_calculator.rst
--- a/doc/source/analyzing/analysis_modules/cosmology_calculator.rst
+++ b/doc/source/analyzing/analysis_modules/cosmology_calculator.rst
@@ -31,13 +31,13 @@
    print("hubble distance", co.hubble_distance())
 
    # distance from z = 0 to 0.5
-   print("comoving radial distance", co.comoving_radial_distance(0, 0.5).in_units("Mpc/h"))
+   print("comoving radial distance", co.comoving_radial_distance(0, 0.5).in_units("Mpccm/h"))
 
    # transverse distance
-   print("transverse distance", co.comoving_transverse_distance(0, 0.5).in_units("Mpc/h"))
+   print("transverse distance", co.comoving_transverse_distance(0, 0.5).in_units("Mpccm/h"))
 
    # comoving volume
-   print("comoving volume", co.comoving_volume(0, 0.5).in_units("Gpc**3"))
+   print("comoving volume", co.comoving_volume(0, 0.5).in_units("Gpccm**3"))
 
    # angulare diameter distance
    print("angular diameter distance", co.angular_diameter_distance(0, 0.5).in_units("Mpc/h"))
@@ -67,7 +67,16 @@
    # convert redshift to time after Big Bang (same as Hubble time)
    print("t from z", co.t_from_z(0.5).in_units("Gyr"))
 
-Note, that all distances returned are comoving distances.  All of the above
+.. warning::
+
+   Cosmological distance calculations return values that are either
+   in the comoving or proper frame, depending on the specific quantity.  For
+   simplicity, the proper and comoving frames are set equal to each other
+   within the cosmology calculator.  This means that for some distance value,
+   x, x.to("Mpc") and x.to("Mpccm") will be the same.  The user should take
+   care to understand which reference frame is correct for the given calculation.
+
+All of the above
 functions accept scalar values and arrays.  The helper functions, `co.quan`
 and `co.arr` exist to create unitful `YTQuantities` and `YTArray` with the
 unit registry of the cosmology calculator.  For more information on the usage

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/analyzing/analysis_modules/light_ray_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
@@ -49,13 +49,18 @@
 * ``deltaz_min`` (*float*):  Specifies the minimum Delta-z between
   consecutive datasets in the returned list.  Default: 0.0.
 
-* ``minimum_coherent_box_fraction`` (*float*): Used with
-  ``use_minimum_datasets`` set to False, this parameter specifies the
-  fraction of the total box size to be traversed before rerandomizing the
-  projection axis and center.  This was invented to allow light rays with
-  thin slices to sample coherent large scale structure, but in practice
-  does not work so well.  Try setting this parameter to 1 and see what
-  happens.  Default: 0.0.
+* ``max_box_fraction`` (*float*):  In terms of the size of the domain, the
+  maximum length a light ray segment can be in order to span the redshift interval
+  from one dataset to another.  If using a zoom-in simulation, this parameter can
+  be set to the length of the high resolution region so as to limit ray segments
+  to that size.  If the high resolution region is not cubical, the smallest side
+  should be used.  Default: 1.0 (the size of the box)
+
+* ``minimum_coherent_box_fraction`` (*float*): Use to specify the minimum
+  length of a ray, in terms of the size of the domain, before the trajectory
+  is re-randomized.  Set to 0 to have ray trajectory randomized for every
+  dataset.  Set to np.inf (infinity) to use a single trajectory for the
+  entire ray.  Default: 0.0.
 
 * ``time_data`` (*bool*): Whether or not to include time outputs when
   gathering datasets for time series.  Default: True.
@@ -67,7 +72,7 @@
 ---------------------
 
 Once the LightRay object has been instantiated, the
-:func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay,make_light_ray`
+:func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`
 function will trace out the rays in each dataset and collect information for all the
 fields requested.  The output file will be an HDF5 file containing all the
 cell field values for all the cells that were intersected by the ray.  A
@@ -85,6 +90,21 @@
 
 * ``seed`` (*int*): Seed for the random number generator.  Default: None.
 
+* ``periodic`` (*bool*): If True, ray trajectories will make use of periodic
+  boundaries.  If False, ray trajectories will not be periodic.  Default : True.
+
+* ``left_edge`` (iterable of *floats* or *YTArray*): The left corner of the
+  region in which rays are to be generated.  If None, the left edge will be
+  that of the domain.  Default: None.
+
+* ``right_edge`` (iterable of *floats* or *YTArray*): The right corner of
+  the region in which rays are to be generated.  If None, the right edge
+  will be that of the domain.  Default: None.
+
+* ``min_level`` (*int*): The minimum refinement level of the spatial region in
+  which the ray passes.  This can be used with zoom-in simulations where the
+  high resolution region does not keep a constant geometry.  Default: None.
+
 * ``start_position`` (*list* of floats): Used only if creating a light ray
   from a single dataset.  The coordinates of the starting position of the
   ray.  Default: None.
@@ -122,7 +142,82 @@
   slice and 1 to have all processors work together on each projection.
   Default: 1
 
-.. note:: As of :code:`yt-3.0`, the functionality for recording properties of the nearest halo to each element of the ray no longer exists.  This is still available in :code:`yt-2.x`.  If you would like to use this feature in :code:`yt-3.x`, help is needed to port it over.  Contact the yt-users mailing list if you are interested in doing this.
+Useful Tips for Making LightRays
+--------------------------------
+
+Below are some tips that may come in handy for creating proper LightRays.
+
+How many snapshots do I need?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The number of snapshots required to traverse some redshift interval depends
+on the simulation box size and cosmological parameters.  Before running an
+expensive simulation only to find out that you don't have enough outputs
+to span the redshift interval you want, have a look at
+:ref:`planning-cosmology-simulations`.  The functionality described there
+will allow you to calculate the precise number of snapshots and specific
+redshifts at which they should be written.
+
+My snapshots are too far apart!
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``max_box_fraction`` keyword, provided when creating the `Lightray`,
+allows the user to control how long a ray segment can be for an
+individual dataset.  Be default, the `LightRay` generator will try to
+make segments no longer than the size of the box to avoid sampling the
+same structures more than once.  However, this can be increased in the
+case that the redshift interval between datasets is longer than the
+box size.  Increasing this value should be done with caution as longer
+ray segments run a greater risk of coming back to somewhere near their
+original position.
+
+What if I have a zoom-in simulation?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A zoom-in simulation has a high resolution region embedded within a
+larger, low resolution volume.  In this type of simulation, it is likely
+that you will want the ray segments to stay within the high resolution
+region.  To do this, you must first specify the size of the high
+resolution region when creating the `LightRay` using the
+``max_box_fraction`` keyword.  This will make sure that
+the calculation of the spacing of the segment datasets only takes into
+account the high resolution region and not the full box size.  If your
+high resolution region is not a perfect cube, specify the smallest side.
+Then, in the call to
+:func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`,
+use the ``left_edge`` and ``right_edge`` keyword arguments to specify the
+precise location of the high resolution region.
+
+Technically speaking, the ray segments should no longer be periodic
+since the high resolution region is only a sub-volume within the
+larger domain.  To make the ray segments non-periodic, set the
+``periodic`` keyword to False.  The LightRay generator will continue
+to generate randomly oriented segments until it finds one that fits
+entirely within the high resolution region.  If you have a high
+resolution region that can move and change shape slightly as structure
+forms, use the `min_level` keyword to mandate that the ray segment only
+pass through cells that are refined to at least some minimum level.
+
+If the size of the high resolution region is not large enough to
+span the required redshift interval, the `LightRay` generator can
+be configured to treat the high resolution region as if it were
+periodic simply by setting the ``periodic`` keyword to True.  This
+option should be used with caution as it will lead to the creation
+of disconnected ray segments within a single dataset.
+
+I want a continous trajectory over the entire ray.
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Set the ``minimum_coherent_box_fraction`` keyword argument to a very
+large number, like infinity (`numpy.inf`).
+
+.. note::
+
+   As of :code:`yt-3.0`, the functionality for recording properties of
+   the nearest halo to each element of the ray no longer exists.  This
+   is still available in :code:`yt-2.x`.  If you would like to use this
+   feature in :code:`yt-3.x`, help is needed to port it over.  Contact
+   the yt-users mailing list if you are interested in doing this.
 
 What Can I do with this?
 ------------------------

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
--- a/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
+++ b/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
@@ -4,7 +4,7 @@
 ===================================================
 
 If you want to run a cosmological simulation that will have just enough data
-outputs to create a cosmology splice, the
+outputs to create a light cone or light ray, the
 :meth:`~yt.analysis_modules.cosmological_observation.cosmology_splice.CosmologySplice.plan_cosmology_splice`
 function will calculate a list of redshifts outputs that will minimally
 connect a redshift interval.

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -107,11 +107,10 @@
    import yt
    ds = yt.load("galaxy0030/galaxy0030")
    source = ds.sphere( "c", (10, "kpc"))
-   profile = yt.create_profile(source,
-                               [("gas", "density")],          # the bin field
-                               [("gas", "temperature"),       # profile field
-                                ("gas", "radial_velocity")],  # profile field
-                               weight_field=("gas", "cell_mass"))
+   profile = source.profile([("gas", "density")],          # the bin field
+                            [("gas", "temperature"),       # profile field
+                             ("gas", "radial_velocity")],  # profile field
+                            weight_field=("gas", "cell_mass"))
 
 The binning, weight, and profile data can now be access as:
 
@@ -142,11 +141,10 @@
 
 .. code-block:: python
 
-   profile2d = yt.create_profile(source,
-                                 [("gas", "density"),      # the x bin field
-                                  ("gas", "temperature")], # the y bin field
-                                 [("gas", "cell_mass")],   # the profile field
-                                 weight_field=None)
+   profile2d = source.profile([("gas", "density"),      # the x bin field
+                               ("gas", "temperature")], # the y bin field
+                              [("gas", "cell_mass")],   # the profile field
+                              weight_field=None)
 
 Accessing the x, y, and profile fields work just as with one-dimensional profiles:
 
@@ -161,7 +159,10 @@
 phase plot that shows the distribution of mass in the density-temperature
 plane, with the average temperature overplotted.  The
 :func:`~matplotlib.pyplot.pcolormesh` function can be used to manually plot
-the 2D profile.
+the 2D profile.  If you want to generate a default profile plot, you can simply
+call:::
+
+  profile.plot()
 
 Three-dimensional profiles can be generated and accessed following
 the same procedures.  Additional keyword arguments are available to control

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -21,7 +21,7 @@
 ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
 im, sc = yt.volume_render(ds, 'density', fname='v0.png')
 sc.camera.set_width(ds.arr(100, 'kpc'))
-render_source = sc.get_source(0)
+render_source = sc.get_source()
 kd=render_source.volume
 
 # Print out specifics of KD Tree

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/cookbook/custom_transfer_function_volume_rendering.py
--- a/doc/source/cookbook/custom_transfer_function_volume_rendering.py
+++ b/doc/source/cookbook/custom_transfer_function_volume_rendering.py
@@ -10,7 +10,7 @@
 # Modify the transfer function
 
 # First get the render source, in this case the entire domain, with field ('gas','density')
-render_source = sc.get_source(0)
+render_source = sc.get_source()
 
 # Clear the transfer function
 render_source.transfer_function.clear()

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/cookbook/embedded_webm_animation.ipynb
--- a/doc/source/cookbook/embedded_webm_animation.ipynb
+++ /dev/null
@@ -1,137 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "This example shows how to embed an animation produced by `matplotlib` into an IPython notebook.  This example makes use of `matplotlib`'s [animation toolkit](http://matplotlib.org/api/animation_api.html) to transform individual frames into a final rendered movie.  \n",
-    "\n",
-    "Matplotlib uses [`ffmpeg`](http://www.ffmpeg.org/) to generate the movie, so you must install `ffmpeg` for this example to work correctly.  Usually the best way to install `ffmpeg` is using your system's package manager."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import yt\n",
-    "from matplotlib import animation"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "First, we need to construct a function that will embed the video produced by ffmpeg directly into the notebook document. This makes use of the [HTML5 video tag](http://www.w3schools.com/html/html5_video.asp) and the WebM video format.  WebM is supported by Chrome, Firefox, and Opera, but not Safari and Internet Explorer.  If you have trouble viewing the video you may need to use a different video format.  Since this uses `libvpx` to construct the frames, you will need to ensure that ffmpeg has been compiled with `libvpx` support."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "from tempfile import NamedTemporaryFile\n",
-    "import base64\n",
-    "\n",
-    "VIDEO_TAG = \"\"\"<video controls>\n",
-    " <source src=\"data:video/x-webm;base64,{0}\" type=\"video/webm\">\n",
-    " Your browser does not support the video tag.\n",
-    "</video>\"\"\"\n",
-    "\n",
-    "def anim_to_html(anim):\n",
-    "    if not hasattr(anim, '_encoded_video'):\n",
-    "        with NamedTemporaryFile(suffix='.webm') as f:\n",
-    "            anim.save(f.name, fps=6, extra_args=['-vcodec', 'libvpx'])\n",
-    "            video = open(f.name, \"rb\").read()\n",
-    "        anim._encoded_video = base64.b64encode(video)\n",
-    "    \n",
-    "    return VIDEO_TAG.format(anim._encoded_video.decode('ascii'))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Next, we define a function to actually display the video inline in the notebook."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "from IPython.display import HTML\n",
-    "\n",
-    "def display_animation(anim):\n",
-    "    plt.close(anim._fig)\n",
-    "    return HTML(anim_to_html(anim))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Finally, we set up the animation itsself.  We use yt to load the data and create each frame and use matplotlib to stitch the frames together.  Note that we customize the plot a bit by calling the `set_zlim` function.  Customizations only need to be applied to the first frame - they will carry through to the rest.\n",
-    "\n",
-    "This may take a while to run, be patient."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import matplotlib.pyplot as plt\n",
-    "from matplotlib.backends.backend_agg import FigureCanvasAgg\n",
-    "\n",
-    "prj = yt.ProjectionPlot(yt.load('Enzo_64/DD0000/data0000'), 0, 'density', weight_field='density',width=(180,'Mpccm'))\n",
-    "prj.set_zlim('density',1e-32,1e-26)\n",
-    "fig = prj.plots['density'].figure\n",
-    "\n",
-    "# animation function.  This is called sequentially\n",
-    "def animate(i):\n",
-    "    ds = yt.load('Enzo_64/DD%04i/data%04i' % (i,i))\n",
-    "    prj._switch_ds(ds)\n",
-    "\n",
-    "# call the animator.  blit=True means only re-draw the parts that have changed.\n",
-    "anim = animation.FuncAnimation(fig, animate, frames=44, interval=200, blit=False)\n",
-    "\n",
-    "# call our new function to display the animation\n",
-    "display_animation(anim)"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 2",
-   "language": "python",
-   "name": "python2"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 2
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython2",
-   "version": "2.7.10"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/cookbook/embedded_webm_animation.rst
--- a/doc/source/cookbook/embedded_webm_animation.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Making animations using matplotlib and ffmpeg
----------------------------------------------
-
-.. notebook:: embedded_webm_animation.ipynb

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -27,14 +27,14 @@
 # As many validators can be added as you want.
 master_clump.add_validator("min_cells", 20)
 
+# Calculate center of mass for all clumps.
+master_clump.add_info_item("center_of_mass")
+
 # Begin clump finding.
 find_clumps(master_clump, c_min, c_max, step)
 
-# Write out the full clump hierarchy.
-write_clump_index(master_clump, 0, "%s_clump_hierarchy.txt" % ds)
-
-# Write out only the leaf nodes of the hierarchy.
-write_clumps(master_clump,0, "%s_clumps.txt" % ds)
+# Save the clump tree as a reloadable dataset
+fn = master_clump.save_as_dataset(fields=["density", "particle_mass"])
 
 # We can traverse the clump hierarchy to get a list of all of the 'leaf' clumps
 leaf_clumps = get_lowest_clumps(master_clump)
@@ -46,5 +46,17 @@
 # Next we annotate the plot with contours on the borders of the clumps
 prj.annotate_clumps(leaf_clumps)
 
-# Lastly, we write the plot to disk.
+# Save the plot to disk.
 prj.save('clumps')
+
+# Reload the clump dataset.
+cds = yt.load(fn)
+
+# Query fields for clumps in the tree.
+print (cds.tree["clump", "center_of_mass"])
+print (cds.tree.children[0]["grid", "density"])
+print (cds.tree.children[1]["all", "particle_mass"])
+
+# Get all of the leaf clumps.
+print (cds.leaves)
+print (cds.leaves[0]["clump", "cell_mass"])

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/cookbook/index.rst
--- a/doc/source/cookbook/index.rst
+++ b/doc/source/cookbook/index.rst
@@ -41,7 +41,6 @@
 
    notebook_tutorial
    custom_colorbar_tickmarks
-   embedded_webm_animation
    gadget_notebook
    owls_notebook
    ../visualizing/transfer_function_helper

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -8,7 +8,7 @@
 im, sc = yt.volume_render(ds, field=("gas","density"), fname="v0.png", sigma_clip=6.0)
 
 sc.camera.set_width(ds.arr(0.1,'code_length'))
-tf = sc.get_source(0).transfer_function
+tf = sc.get_source().transfer_function
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(-3,0,4), colormap = 'RdBu_r')
@@ -19,7 +19,7 @@
 # accentuate the outer regions of the galaxy. Let's start by bringing up the
 # alpha values for each contour to go between 0.1 and 1.0
 
-tf = sc.get_source(0).transfer_function
+tf = sc.get_source().transfer_function
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(0,0,4), colormap = 'RdBu_r')

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -250,6 +250,7 @@
 
 * ``InteractingJets/jet_000002``
 * ``WaveDarkMatter/psiDM_000020``
+* ``Plummer/plummer_000000``
 
 Halo Catalog
 ~~~~~~~~~~~~

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -834,8 +834,8 @@
    ds = yt.load("snapshot_061.hdf5")
 
 Gadget data in raw binary format can also be loaded with the ``load`` command.
-This is only supported for snapshots created with the ``SnapFormat`` parameter
-set to 1 (the standard for Gadget-2).
+This is supported for snapshots created with the ``SnapFormat`` parameter
+set to 1 (the standard for Gadget-2) or 2.
 
 .. code-block:: python
 
@@ -1069,6 +1069,8 @@
 This means that the yt fields, e.g., ``("gas","density")``, will be in cgs units, but the GAMER fields,
 e.g., ``("gamer","Dens")``, will be in code units.
 
+Particle data are supported and are always stored in the same file as the grid data.
+
 .. rubric:: Caveats
 
 * GAMER data in raw binary format (i.e., OPT__OUTPUT_TOTAL = C-binary) is not supported.
@@ -1262,23 +1264,37 @@
 
 .. code-block:: python
 
-   import yt
-   import numpy
-   from yt.utilities.exodusII_reader import get_data
+    import yt
+    import numpy as np
 
-   coords, connectivity, data = get_data("MOOSE_sample_data/out.e-s010")
+    coords = np.array([[0.0, 0.0],
+                       [1.0, 0.0],
+                       [1.0, 1.0],
+                       [0.0, 1.0]], dtype=np.float64)
 
-This uses a publically available `MOOSE <http://mooseframework.org/>`
-dataset along with the get_data function to parse the coords, connectivity,
-and data. Then, these can be loaded as an in-memory dataset as follows:
+     connect = np.array([[0, 1, 3],
+                         [1, 2, 3]], dtype=np.int64)
+
+     data = {}
+     data['connect1', 'test'] = np.array([[0.0, 1.0, 3.0],
+                                          [1.0, 2.0, 3.0]], dtype=np.float64)
+
+Here, we have made up a simple, 2D unstructured mesh dataset consisting of two
+triangles and one node-centered data field. This data can be loaded as an in-memory
+dataset as follows:
 
 .. code-block:: python
 
-    mesh_id = 0
-    ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+    ds = yt.load_unstructured_mesh(connect, coords, data)
 
-Note that load_unstructured_mesh can take either a single or a list of meshes.
-Here, we have selected only the first mesh to load.
+Note that load_unstructured_mesh can take either a single mesh or a list of meshes.
+Here, we only have one mesh. The in-memory dataset can then be visualized as usual,
+e.g.:
+
+.. code-block:: python
+
+    sl = yt.SlicePlot(ds, 'z', 'test')
+    sl.annotate_mesh_lines()
 
 .. rubric:: Caveats
 
@@ -1517,6 +1533,57 @@
    # The halo mass
    print(ad["FOF", "particle_mass"])
 
+.. _loading-openpmd-data:
+
+openPMD Data
+---------
+
+`openPMD <http://www.openpmd.org>`_ is an open source meta-standard and naming
+scheme for mesh based data and particle data. It does not actually define a file
+format.
+
+HDF5-containers respecting the minimal set of meta information from
+versions 1.0.0 and 1.0.1 of the standard are compatible.
+Support for the ED-PIC extension is not available. Mesh data in cartesian coordinates
+and particle data can be read by this frontend.
+
+To load the first in-file iteration of a openPMD datasets using the standard HDF5
+output format:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load('example-3d/hdf5/data00000100.h5')
+
+If you operate on large files, you may want to modify the virtual chunking behaviour through
+``open_pmd_virtual_gridsize``. The supplied value is an estimate of the size of a single read request
+for each particle attribute/mesh (in Byte).
+
+.. code-block:: python
+
+  import yt
+  ds = yt.load('example-3d/hdf5/data00000100.h5', open_pmd_virtual_gridsize=10e4)
+  sp = yt.SlicePlot(ds, 'x', 'rho')
+  sp.show()
+
+Particle data is fully supported:
+
+.. code-block:: python
+
+  import yt
+  ds = yt.load('example-3d/hdf5/data00000100.h5')
+  ad = f.all_data()
+  ppp = yt.ParticlePhasePlot(ad, 'particle_position_y', 'particle_momentum_y', 'particle_weighting')
+  ppp.show()
+
+.. rubric:: Caveats
+
+* 1D, 2D and 3D data is compatible, but lower dimensional data might yield
+  strange results since it gets padded and treated as 3D. Extraneous dimensions are
+  set to be of length 1.0m and have a width of one cell.
+* The frontend has hardcoded logic for renaming the openPMD ``position``
+  of particles to ``positionCoarse``
+
 .. _loading-pyne-data:
 
 PyNE Data

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/quickstart/index.rst
--- a/doc/source/quickstart/index.rst
+++ b/doc/source/quickstart/index.rst
@@ -12,15 +12,27 @@
 on time, you can non-interactively go through the linked pages below and view the
 worked examples.
 
-To execute the quickstart interactively, you need to download the repository and
-start the IPython notebook.  If you do not already have the yt repository, the
-easiest way to get the repository is to clone it using mercurial:
+To execute the quickstart interactively, you have a couple of options: 1) run
+the notebook from your own system or 2) run it from the url
+https://demo.use.yt. Option 1 requires an existing installation of yt (see
+:ref:`getting-and-installing-yt`), a copy of the yt source (which you may
+already have depending on your installation choice), and a download of the
+tutorial data-sets (total about 3 GB). If you know you are going to be a yt user
+and have the time to download the data-sets, option 1 is a good choice. However,
+if you're only interested in getting a feel for yt and its capabilities, or you
+already have yt but don't want to spend time downloading the data, go ahead to
+https://demo.use.yt.
+
+If you're running the tutorial from your own system and you do not already have
+the yt repository, the easiest way to get the repository is to clone it using
+mercurial:
 
 .. code-block:: bash
 
    hg clone https://bitbucket.org/yt_analysis/yt
 
-Now start the IPython notebook from within the repository:
+Now start the IPython notebook from within the repository (we presume you have
+yt installed):
 
 .. code-block:: bash
 

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -323,6 +323,21 @@
    ~yt.frontends.moab.io.IOHandlerMoabH5MHex8
    ~yt.frontends.moab.io.IOHandlerMoabPyneHex8
 
+OpenPMD
+^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.frontends.open_pmd.data_structures.OpenPMDGrid
+   ~yt.frontends.open_pmd.data_structures.OpenPMDHierarchy
+   ~yt.frontends.open_pmd.data_structures.OpenPMDDataset
+   ~yt.frontends.open_pmd.fields.OpenPMDFieldInfo
+   ~yt.frontends.open_pmd.io.IOHandlerOpenPMDHDF5
+   ~yt.frontends.open_pmd.misc.parse_unit_dimension
+   ~yt.frontends.open_pmd.misc.is_const_component
+   ~yt.frontends.open_pmd.misc.get_component
+
 RAMSES
 ^^^^^^
 
@@ -399,6 +414,8 @@
    ~yt.frontends.ytdata.data_structures.YTNonspatialHierarchy
    ~yt.frontends.ytdata.data_structures.YTNonspatialGrid
    ~yt.frontends.ytdata.data_structures.YTProfileDataset
+   ~yt.frontends.ytdata.data_structures.YTClumpTreeDataset
+   ~yt.frontends.ytdata.data_structures.YTClumpContainer
    ~yt.frontends.ytdata.fields.YTDataContainerFieldInfo
    ~yt.frontends.ytdata.fields.YTGridFieldInfo
    ~yt.frontends.ytdata.io.IOHandlerYTDataContainerHDF5
@@ -442,6 +459,26 @@
    ~yt.data_objects.profiles.ParticleProfile
    ~yt.data_objects.profiles.create_profile
 
+.. _clump_finding:
+
+Clump Finding
+^^^^^^^^^^^^^
+
+The ``Clump`` object and associated functions can be used for identification
+of topologically disconnected structures, i.e., clump finding.
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.analysis_modules.level_sets.clump_handling.Clump
+   ~yt.analysis_modules.level_sets.clump_handling.Clump.add_info_item
+   ~yt.analysis_modules.level_sets.clump_handling.Clump.add_validator
+   ~yt.analysis_modules.level_sets.clump_handling.Clump.save_as_dataset
+   ~yt.analysis_modules.level_sets.clump_handling.find_clumps
+   ~yt.analysis_modules.level_sets.clump_handling.get_lowest_clumps
+   ~yt.analysis_modules.level_sets.clump_info_items.add_clump_info
+   ~yt.analysis_modules.level_sets.clump_validators.add_validator
+
 .. _halo_analysis_ref:
 
 Halo Analysis

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -34,7 +34,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Gadget                |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
-| GAMER                 |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
+| GAMER                 |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Gasoline              |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
@@ -48,6 +48,8 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Nyx                   |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
+| openPMD               |     Y      |     Y     |      N     |   Y   |    Y     |    Y     |     N      | Partial  |
++-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Orion                 |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | OWLS/EAGLE            |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -5,7 +5,7 @@
 how much output it displays, loading custom fields, loading custom colormaps,
 accessing test datasets regardless of where you are in the file system, etc.
 This customization is done through :ref:`configuration-file` and
-:ref:`plugin-file` both of which exist in your ``$HOME/.yt`` directory.
+:ref:`plugin-file` both of which exist in your ``$HOME/.config/yt`` directory.
 
 .. _configuration-file:
 
@@ -149,9 +149,10 @@
 Plugin File Format
 ^^^^^^^^^^^^^^^^^^
 
-yt will look for and recognize the file ``$HOME/.yt/my_plugins.py`` as a plugin
-file, which should contain python code.  If accessing yt functions and classes
-they will not require the ``yt.`` prefix, because of how they are loaded.
+yt will look for and recognize the file ``$HOME/.config/yt/my_plugins.py`` as a
+plugin file, which should contain python code.  If accessing yt functions and
+classes they will not require the ``yt.`` prefix, because of how they are
+loaded.
 
 For example, if I created a plugin file containing:
 
@@ -159,7 +160,8 @@
 
    def _myfunc(field, data):
        return np.random.random(data["density"].shape)
-   add_field("random", function=_myfunc, units='auto')
+   add_field('random', function=_myfunc,
+             dimensions='dimensionless', units='auto')
 
 then all of my data objects would have access to the field ``random``.
 

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -155,7 +155,7 @@
    "source": [
     "im, sc = yt.volume_render(ds, ['temperature'])\n",
     "\n",
-    "source = sc.get_source(0)\n",
+    "source = sc.get_source()\n",
     "source.set_transfer_function(tfh.tf)\n",
     "im2 = sc.render()\n",
     "\n",

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/visualizing/Volume_Rendering_Tutorial.ipynb
--- a/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb
+++ b/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb
@@ -57,7 +57,7 @@
    },
    "outputs": [],
    "source": [
-    "print (sc.get_source(0))"
+    "print (sc.get_source())"
    ]
   },
   {
@@ -177,7 +177,7 @@
     "tfh.tf.add_layers(10, colormap='gist_rainbow')\n",
     "\n",
     "# Grab the first render source and set it to use the new transfer function\n",
-    "render_source = sc.get_source(0)\n",
+    "render_source = sc.get_source()\n",
     "render_source.transfer_function = tfh.tf\n",
     "\n",
     "sc.render()\n",

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -105,7 +105,7 @@
     sc = yt.create_scene(ds)
 
     # override the default colormap
-    ms = sc.get_source(0)
+    ms = sc.get_source()
     ms.cmap = 'Eos A'
 
     # adjust the camera position and orientation
@@ -133,7 +133,7 @@
     sc = yt.create_scene(ds)
 
     # override the default colormap
-    ms = sc.get_source(0)
+    ms = sc.get_source()
     ms.cmap = 'Eos A'
 
     # adjust the camera position and orientation
@@ -165,7 +165,7 @@
     sc = yt.create_scene(ds, ('connect2', 'diffused'))
 
     # override the default colormap
-    ms = sc.get_source(0)
+    ms = sc.get_source()
     ms.cmap = 'Eos A'
 
     # adjust the camera position and orientation
@@ -196,7 +196,7 @@
     sc = yt.create_scene(ds, ("connect1", "u"))
 
     # override the default colormap
-    ms = sc.get_source(0)
+    ms = sc.get_source()
     ms.cmap = 'Eos A'
 
     # adjust the camera position and orientation
@@ -224,7 +224,7 @@
    sc = yt.create_scene(ds, ('connect2', 'diffused'))
 
    # override the default colormap
-   ms = sc.get_source(0)
+   ms = sc.get_source()
    ms.cmap = 'Eos A'
 
    # adjust the camera position and orientation
@@ -250,7 +250,7 @@
 
     # override the default colormap. This time we also override
     # the default color bounds
-    ms = sc.get_source(0)
+    ms = sc.get_source()
     ms.cmap = 'hot'
     ms.color_bounds = (500.0, 1700.0)
 
@@ -287,7 +287,7 @@
 
     # override the default colormap. This time we also override
     # the default color bounds
-    ms = sc.get_source(0)
+    ms = sc.get_source()
     ms.cmap = 'hot'
     ms.color_bounds = (500.0, 1700.0)
 
@@ -320,7 +320,7 @@
     sc = yt.create_scene(ds, ("connect2", "diffused"))
 
     # override the default colormap
-    ms = sc.get_source(0)
+    ms = sc.get_source()
     ms.cmap = 'Eos A'
 
     # Create a perspective Camera
@@ -392,7 +392,7 @@
     sc = yt.create_scene(ds)
 
     # override the default colormap
-    ms = sc.get_source(0)
+    ms = sc.get_source()
     ms.cmap = 'Eos A'
 
     # adjust the camera position and orientation

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 setup.py
--- a/setup.py
+++ b/setup.py
@@ -114,6 +114,9 @@
               ["yt/utilities/spatial/ckdtree.pyx"],
               include_dirs=["yt/utilities/lib/"],
               libraries=std_libs),
+    Extension("yt.utilities.lib.autogenerated_element_samplers",
+              ["yt/utilities/lib/autogenerated_element_samplers.pyx"],
+              include_dirs=["yt/utilities/lib/"]),
     Extension("yt.utilities.lib.bitarray",
               ["yt/utilities/lib/bitarray.pyx"],
               libraries=std_libs),
@@ -152,6 +155,8 @@
     Extension("yt.utilities.lib.primitives",
               ["yt/utilities/lib/primitives.pyx"],
               libraries=std_libs),
+    Extension("yt.utilities.lib.cosmology_time",
+              ["yt/utilities/lib/cosmology_time.pyx"]),
     Extension("yt.utilities.lib.origami",
               ["yt/utilities/lib/origami.pyx",
                "yt/utilities/lib/origami_tags.c"],
@@ -193,7 +198,7 @@
     "particle_mesh_operations", "depth_first_octree", "fortran_reader",
     "interpolators", "misc_utilities", "basic_octree", "image_utilities",
     "points_in_volume", "quad_tree", "ray_integrators", "mesh_utilities",
-    "amr_kdtools", "lenses",
+    "amr_kdtools", "lenses", "distance_queue"
 ]
 for ext_name in lib_exts:
     cython_extensions.append(

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -20,7 +20,7 @@
   local_gadget_000:
     - yt/frontends/gadget/tests/test_outputs.py
 
-  local_gamer_000:
+  local_gamer_001:
     - yt/frontends/gamer/tests/test_outputs.py
 
   local_gdf_000:
@@ -35,10 +35,10 @@
     - yt/frontends/owls_subfind/tests/test_outputs.py
     - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g5
     - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g42
-  
+
   local_owls_000:
     - yt/frontends/owls/tests/test_outputs.py
-  
+
   local_pw_006:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
@@ -46,32 +46,35 @@
     - yt/visualization/tests/test_particle_plot.py:test_particle_projection_answers
     - yt/visualization/tests/test_particle_plot.py:test_particle_projection_filter
     - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers
-  
+
   local_tipsy_001:
     - yt/frontends/tipsy/tests/test_outputs.py
-  
-  local_varia_003:
+
+  local_varia_004:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py
     - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
     - yt/visualization/volume_rendering/tests/test_vr_orientation.py
     - yt/visualization/volume_rendering/tests/test_mesh_render.py
+    - yt/visualization/tests/test_mesh_slices.py:test_tri2
 
   local_orion_000:
     - yt/frontends/boxlib/tests/test_orion.py
-  
+
   local_ramses_000:
     - yt/frontends/ramses/tests/test_outputs.py
-  
+
   local_ytdata_000:
     - yt/frontends/ytdata
 
-  local_absorption_spectrum_001:
+  local_absorption_spectrum_005:
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_novpec
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_sph
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo_sph
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_with_continuum
 
   local_axialpix_001:
     - yt/geometry/coordinates/tests/test_axial_pixelization.py:test_axial_pixelization
@@ -79,6 +82,7 @@
 other_tests:
   unittests:
      - '-v'
+     - '--exclude=test_mesh_slices'  # disable randomly failing test
   cookbook:
      - '-v'
      - 'doc/source/cookbook/tests/test_cookbook.py'

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -203,6 +203,13 @@
             input_ds = input_file
         field_data = input_ds.all_data()
 
+        # temperature field required to calculate voigt profile widths
+        if ('temperature' not in input_ds.derived_field_list) and \
+           (('gas', 'temperature') not in input_ds.derived_field_list):
+            raise RuntimeError(
+                "('gas', 'temperature') field required to be present in %s "
+                "for AbsorptionSpectrum to function." % input_file)
+
         self.tau_field = np.zeros(self.lambda_field.size)
         self.absorbers_list = []
 
@@ -210,6 +217,7 @@
             comm = _get_comm(())
             njobs = min(comm.size, len(self.line_list))
 
+        mylog.info("Creating spectrum")
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity,
                                     output_absorbers_file,
                                     subgrid_resolution=subgrid_resolution,
@@ -268,47 +276,96 @@
                 redshift_eff = ((1 + redshift) * \
                                 (1 + field_data['redshift_dopp'])) - 1.
 
+        if not use_peculiar_velocity:
+            redshift_eff = redshift
+
         return redshift, redshift_eff
 
     def _add_continua_to_spectrum(self, field_data, use_peculiar_velocity,
                                   observing_redshift=0.):
         """
-        Add continuum features to the spectrum.
+        Add continuum features to the spectrum.  Continuua are recorded as
+        a name, associated field, wavelength, normalization value, and index.
+        Continuua are applied at and below the denoted wavelength, where the
+        optical depth decreases as a power law of desired index.  For positive 
+        index values, this means optical depth is highest at the denoted 
+        wavelength, and it drops with shorter and shorter wavelengths.  
+        Consequently, transmitted flux undergoes a discontinuous cutoff at the 
+        denoted wavelength, and then slowly increases with decreasing wavelength 
+        according to the power law.
         """
         # Change the redshifts of continuum sources to account for the
         # redshift at which the observer sits
         redshift, redshift_eff = self._apply_observing_redshift(field_data,
                                  use_peculiar_velocity, observing_redshift)
 
-        # Only add continuum features down to tau of 1.e-4.
-        min_tau = 1.e-3
+        # min_tau is the minimum optical depth value that warrants 
+        # accounting for an absorber.  for a single absorber, noticeable 
+        # continuum effects begin for tau = 1e-3 (leading to transmitted 
+        # flux of e^-tau ~ 0.999).  but we apply a cutoff to remove
+        # absorbers with insufficient column_density to contribute 
+        # significantly to a continuum (see below).  because lots of 
+        # low column density absorbers can add up to a significant
+        # continuum effect, we normalize min_tau by the n_absorbers.
+        n_absorbers = field_data['dl'].size
+        min_tau = 1.e-3/n_absorbers
 
         for continuum in self.continuum_list:
-            column_density = field_data[continuum['field_name']] * field_data['dl']
+
+            # Normalization is in cm**-2, so column density must be as well
+            column_density = (field_data[continuum['field_name']] * 
+                              field_data['dl']).in_units('cm**-2')
+            if (column_density == 0).all():
+                mylog.info("Not adding continuum %s: insufficient column density" % continuum['label'])
+                continue
 
             # redshift_eff field combines cosmological and velocity redshifts
             if use_peculiar_velocity:
                 delta_lambda = continuum['wavelength'] * redshift_eff
             else:
                 delta_lambda = continuum['wavelength'] * redshift
+
+            # right index of continuum affected area is wavelength itself
             this_wavelength = delta_lambda + continuum['wavelength']
-            right_index = np.digitize(this_wavelength, self.lambda_field).clip(0, self.n_lambda)
+            right_index = np.digitize(this_wavelength, 
+                                      self.lambda_field).clip(0, self.n_lambda)
+            # left index of continuum affected area wavelength at which 
+            # optical depth reaches tau_min
             left_index = np.digitize((this_wavelength *
-                                     np.power((min_tau * continuum['normalization'] /
-                                               column_density), (1. / continuum['index']))),
-                                    self.lambda_field).clip(0, self.n_lambda)
+                              np.power((min_tau * continuum['normalization'] /
+                                        column_density),
+                                       (1. / continuum['index']))),
+                              self.lambda_field).clip(0, self.n_lambda)
 
+            # Only calculate the effects of continuua where normalized 
+            # column_density is greater than min_tau
+            # because lower column will not have significant contribution
             valid_continuua = np.where(((column_density /
                                          continuum['normalization']) > min_tau) &
                                        (right_index - left_index > 1))[0]
+            if valid_continuua.size == 0:
+                mylog.info("Not adding continuum %s: insufficient column density or out of range" %
+                    continuum['label'])
+                continue
+
             pbar = get_pbar("Adding continuum - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
                             valid_continuua.size)
+
+            # Tau value is (wavelength / continuum_wavelength)**index / 
+            #              (column_dens / norm)
+            # i.e. a power law decreasing as wavelength decreases
+
+            # Step through the absorber list and add continuum tau for each to
+            # the total optical depth for all wavelengths
             for i, lixel in enumerate(valid_continuua):
-                line_tau = np.power((self.lambda_field[left_index[lixel]:right_index[lixel]] /
-                                     this_wavelength[lixel]), continuum['index']) * \
-                                     column_density[lixel] / continuum['normalization']
-                self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
+                cont_tau = \
+                    np.power((self.lambda_field[left_index[lixel] :
+                                                right_index[lixel]] /
+                                   this_wavelength[lixel]), \
+                              continuum['index']) * \
+                    (column_density[lixel] / continuum['normalization'])
+                self.tau_field[left_index[lixel]:right_index[lixel]] += cont_tau
                 pbar.update(i)
             pbar.finish()
 
@@ -333,6 +390,9 @@
         # and deposit the lines into the spectrum
         for line in parallel_objects(self.line_list, njobs=njobs):
             column_density = field_data[line['field_name']] * field_data['dl']
+            if (column_density == 0).all():
+                mylog.info("Not adding line %s: insufficient column density" % line['label'])
+                continue
 
             # redshift_eff field combines cosmological and velocity redshifts
             # so delta_lambda gives the offset in angstroms from the rest frame
@@ -376,7 +436,10 @@
             cdens = column_density.in_units("cm**-2").d # cm**-2
             thermb = thermal_b.in_cgs().d  # thermal b coefficient; cm / s
             dlambda = delta_lambda.d  # lambda offset; angstroms
-            vlos = field_data['velocity_los'].in_units("km/s").d # km/s
+            if use_peculiar_velocity:
+                vlos = field_data['velocity_los'].in_units("km/s").d # km/s
+            else:
+                vlos = np.zeros(field_data['temperature'].size)
 
             # When we actually deposit the voigt profile, sometimes we will
             # have underresolved lines (ie lines with smaller widths than
@@ -413,6 +476,12 @@
             # observed spectrum where it occurs and deposit a voigt profile
             for i in parallel_objects(np.arange(n_absorbers), njobs=-1):
 
+                # if there is a ray element with temperature = 0 or column
+                # density = 0, skip it
+                if (thermal_b[i] == 0.) or (cdens[i] == 0.):
+                    pbar.update(i)
+                    continue
+
                 # the virtual window into which the line is deposited initially
                 # spans a region of 2 coarse spectral bins
                 # (one on each side of the center_index) but the window

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -33,7 +33,8 @@
 COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
 GIZMO_PLUS = "gizmo_cosmology_plus/N128L16.param"
 GIZMO_PLUS_SINGLE = "gizmo_cosmology_plus/snap_N128L16_151.hdf5"
-
+ISO_GALAXY = "IsolatedGalaxy/galaxy0030/galaxy0030"
+FIRE = "FIRE_M12i_ref11/snapshot_600.hdf5"
 
 @requires_file(COSMO_PLUS)
 @requires_answer_testing()
@@ -145,6 +146,58 @@
     shutil.rmtree(tmpdir)
 
 @requires_file(COSMO_PLUS_SINGLE)
+ at requires_answer_testing()
+def test_absorption_spectrum_non_cosmo_novpec():
+    """
+    This test generates an absorption spectrum from a simple light ray on a
+    grid dataset
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS_SINGLE)
+
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5', use_peculiar_velocity=False)
+
+    sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
+
+    my_label = 'HI Lya'
+    field = 'H_number_density'
+    wavelength = 1215.6700  # Angstromss
+    f_value = 4.164E-01
+    gamma = 6.265e+08
+    mass = 1.00794
+
+    sp.add_line(my_label, field, wavelength, f_value,
+                gamma, mass, label_threshold=1.e10)
+
+    wavelength, flux = sp.make_spectrum('lightray.h5',
+                                        output_file='spectrum.h5',
+                                        line_list_file='lines.txt',
+                                        use_peculiar_velocity=False)
+
+    # load just-generated hdf5 file of spectral data (for consistency)
+    data = h5.File('spectrum.h5', 'r')
+
+    for key in data.keys():
+        func = lambda x=key: data[x][:]
+        func.__name__ = "{}_non_cosmo_novpec".format(key)
+        test = GenericArrayTest(None, func)
+        test_absorption_spectrum_non_cosmo_novpec.__name__ = test.description
+        yield test
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(COSMO_PLUS_SINGLE)
 def test_equivalent_width_conserved():
     """
     This tests that the equivalent width of the optical depth is conserved 
@@ -360,3 +413,146 @@
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
+
+ at requires_file(ISO_GALAXY)
+ at requires_answer_testing()
+def test_absorption_spectrum_with_continuum():
+    """
+    This test generates an absorption spectrum from a simple light ray on a
+    grid dataset and adds Lyman alpha and Lyman continuum to it
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = load(ISO_GALAXY)
+    lr = LightRay(ds)
+
+    ray_start = ds.domain_left_edge
+    ray_end = ds.domain_right_edge
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    sp = AbsorptionSpectrum(800.0, 1300.0, 5001)
+
+    my_label = 'HI Lya'
+    field = 'H_number_density'
+    wavelength = 1215.6700  # Angstromss
+    f_value = 4.164E-01
+    gamma = 6.265e+08
+    mass = 1.00794
+
+    sp.add_line(my_label, field, wavelength, f_value,
+                gamma, mass, label_threshold=1.e10)
+
+    my_label = 'Ly C'
+    field = 'H_number_density'
+    wavelength = 912.323660  # Angstroms
+    normalization = 1.6e17
+    index = 3.0
+
+    sp.add_continuum(my_label, field, wavelength, normalization, index)
+
+    wavelength, flux = sp.make_spectrum('lightray.h5',
+                                        output_file='spectrum.h5',
+                                        line_list_file='lines.txt',
+                                        use_peculiar_velocity=True)
+
+    # load just-generated hdf5 file of spectral data (for consistency)
+    data = h5.File('spectrum.h5', 'r')
+    
+    for key in data.keys():
+        func = lambda x=key: data[x][:]
+        func.__name__ = "{}_continuum".format(key)
+        test = GenericArrayTest(None, func)
+        test_absorption_spectrum_with_continuum.__name__ = test.description
+        yield test
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(FIRE)
+def test_absorption_spectrum_with_zero_field():
+    """
+    This test generates an absorption spectrum with some 
+    particle dataset
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = load(FIRE)
+    lr = LightRay(ds)
+
+    # Define species and associated parameters to add to continuum
+    # Parameters used for both adding the transition to the spectrum
+    # and for fitting
+    # Note that for single species that produce multiple lines
+    # (as in the OVI doublet), 'numLines' will be equal to the number
+    # of lines, and f,gamma, and wavelength will have multiple values.
+
+    HI_parameters = {
+        'name': 'HI',
+        'field': 'H_number_density',
+        'f': [.4164],
+        'Gamma': [6.265E8],
+        'wavelength': [1215.67],
+        'mass': 1.00794,
+        'numLines': 1,
+        'maxN': 1E22, 'minN': 1E11,
+        'maxb': 300, 'minb': 1,
+        'maxz': 6, 'minz': 0,
+        'init_b': 30,
+        'init_N': 1E14
+    }
+
+    species_dicts = {'HI': HI_parameters}
+
+
+    # Get all fields that need to be added to the light ray
+    fields = [('gas','temperature')]
+    for s, params in species_dicts.items():
+        fields.append(params['field'])
+
+    # With a single dataset, a start_position and
+    # end_position or trajectory must be given.
+    # Trajectory should be given as (r, theta, phi)
+    lr.make_light_ray(
+        start_position=ds.arr([0., 0., 0.], 'unitary'),
+        end_position=ds.arr([1., 1., 1.], 'unitary'),
+        solution_filename='test_lightraysolution.txt',
+        data_filename='test_lightray.h5',
+        fields=fields)
+    
+    # Create an AbsorptionSpectrum object extending from
+    # lambda = 900 to lambda = 1800, with 10000 pixels
+    sp = AbsorptionSpectrum(900.0, 1400.0, 50000)
+    
+    # Iterate over species
+    for s, params in species_dicts.items():
+        # Iterate over transitions for a single species
+        for i in range(params['numLines']):
+            # Add the lines to the spectrum
+            sp.add_line(
+                s, params['field'],
+                params['wavelength'][i], params['f'][i],
+                params['Gamma'][i], params['mass'],
+                label_threshold=1.e10)
+    
+    
+    # Make and save spectrum
+    wavelength, flux = sp.make_spectrum(
+        'test_lightray.h5',
+        output_file='test_spectrum.h5',
+        line_list_file='test_lines.txt',
+        use_peculiar_velocity=True)
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

diff -r 2b577ca93d6ea2a4f43dd8126a7b7dbbb4ee9278 -r d18775c812205191c1b71900cc2fd2d4528357d2 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -21,6 +21,8 @@
 from yt.funcs import mylog
 from yt.utilities.cosmology import \
     Cosmology
+from yt.utilities.physical_constants import \
+    c
 
 class CosmologySplice(object):
     """
@@ -67,7 +69,11 @@
         max_box_fraction : float
             In terms of the size of the domain, the maximum length a light
             ray segment can be in order to span the redshift interval from
-            one dataset to another.
+            one dataset to another.  If using a zoom-in simulation, this
+            parameter can be set to the length of the high resolution
+            region so as to limit ray segments to that size.  If the
+            high resolution region is not cubical, the smallest side
+            should be used.
             Default: 1.0 (the size of the box)
         deltaz_min : float
             Specifies the minimum delta z between consecutive datasets
@@ -115,6 +121,7 @@
                 output['next'] = self.splice_outputs[i + 1]
 
         # Calculate maximum delta z for each data dump.
+        self.max_box_fraction = max_box_fraction
         self._calculate_deltaz_max()
 
         # Calculate minimum delta z for each data dump.
@@ -144,7 +151,7 @@
             self.splice_outputs.sort(key=lambda obj:np.fabs(z - obj['redshift']))
             cosmology_splice.append(self.splice_outputs[0])
             z = cosmology_splice[-1]["redshift"]
-            z_target = z - max_box_fraction * cosmology_splice[-1]["dz_max"]
+            z_target = z - cosmology_splice[-1]["dz_max"]
 
             # fill redshift space with datasets
             while ((z_target > near_redshift) and
@@ -172,7 +179,7 @@
 
                 cosmology_splice.append(current_slice)
                 z = current_slice["redshift"]
-                z_target = z - max_box_fraction * current_slice["dz_max"]
+                z_target = z - current_slice["dz_max"]
 
         # Make light ray using maximum number of datasets (minimum spacing).
         else:
@@ -199,8 +206,8 @@
         mylog.info("create_cosmology_splice: Used %d data dumps to get from z = %f to %f." %
                    (len(cosmology_splice), far_redshift, near_redshift))
         
-        # change the 'next' and 'previous' pointers to point to the correct outputs for the created
-        # splice
+        # change the 'next' and 'previous' pointers to point to the correct outputs
+        # for the created splice
         for i, output in enumerate(cosmology_splice):
             if len(cosmology_splice) == 1:
                 output['previous'] = None
@@ -264,7 +271,8 @@
                 rounded += np.power(10.0, (-1.0*decimals))
             z = rounded
 
-            deltaz_max = self._deltaz_forward(z, self.simulation.box_size)
+            deltaz_max = self._deltaz_forward(z, self.simulation.box_size *
+                                              self.max_box_fraction)
             outputs.append({'redshift': z, 'dz_max': deltaz_max})
             z -= deltaz_max
 
@@ -282,72 +290,23 @@
         from z to (z - delta z).
         """
 
-        d_Tolerance = 1e-4
-        max_Iterations = 100
+        target_distance = self.simulation.box_size * \
+          self.max_box_fraction
+        for output in self.splice_outputs:
+            output['dz_max'] = self._deltaz_forward(output['redshift'],
+                                                    target_distance)
 
-        target_distance = self.simulation.box_size
-
-        for output in self.splice_outputs:
-            z = output['redshift']
-
-            # Calculate delta z that corresponds to the length of the box
-            # at a given redshift using Newton's method.
-            z1 = z
-            z2 = z1 - 0.1 # just an initial guess
-            distance1 = self.simulation.quan(0.0, "Mpccm / h")
-            distance2 = self.cosmology.comoving_radial_distance(z2, z)
-            iteration = 1
-
-            while ((np.abs(distance2-target_distance)/distance2) > d_Tolerance):
-                m = (distance2 - distance1) / (z2 - z1)
-                z1 = z2
-                distance1 = distance2
-                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z)
-                iteration += 1
-                if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
-                                "exceeded for z = %f (delta z = %f)." %
-                                (z, np.abs(z2 - z)))
-                    break
-            output['dz_max'] = np.abs(z2 - z)
-            
     def _calculate_deltaz_min(self, deltaz_min=0.0):
         r"""Calculate delta z that corresponds to a single top grid pixel
         going from z to (z - delta z).
         """
 
-        d_Tolerance = 1e-4
-        max_Iterations = 100
-
         target_distance = self.simulation.box_size / \
           self.simulation.domain_dimensions[0]
-
         for output in self.splice_outputs:
-            z = output['redshift']
-
-            # Calculate delta z that corresponds to the length of a
-            # top grid pixel at a given redshift using Newton's method.
-            z1 = z
-            z2 = z1 - 0.01 # just an initial guess
-            distance1 = self.simulation.quan(0.0, "Mpccm / h")
-            distance2 = self.cosmology.comoving_radial_distance(z2, z)
-            iteration = 1
-
-            while ((np.abs(distance2 - target_distance) / distance2) > d_Tolerance):
-                m = (distance2 - distance1) / (z2 - z1)
-                z1 = z2
-                distance1 = distance2
-                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z)
-                iteration += 1
-                if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
-                                "exceeded for z = %f (delta z = %f)." %
-                                (z, np.abs(z2 - z)))
-                    break
-            # Use this calculation or the absolute minimum specified by the user.
-            output['dz_min'] = max(np.abs(z2 - z), deltaz_min)
+            zf = self._deltaz_forward(output['redshift'],
+                                      target_distance)
+            output['dz_min'] = max(zf, deltaz_min)
 
     def _deltaz_forward(self, z, target_distance):
         r"""Calculate deltaz corresponding to moving a comoving distance
@@ -357,10 +316,13 @@
         d_Tolerance = 1e-4
         max_Iterations = 100
 
-        # Calculate delta z that corresponds to the length of the
-        # box at a given redshift.
         z1 = z
-        z2 = z1 - 0.1 # just an initial guess
+        # Use Hubble's law for initial guess
+        target_distance = self.cosmology.quan(target_distance.to("Mpccm / h"))
+        v = self.cosmology.hubble_parameter(z) * target_distance
+        v = min(v, 0.9 * c)
+        dz = np.sqrt((1. + v/c) / (1. - v/c)) - 1.
+        z2 = z1 - dz
         distance1 = self.cosmology.quan(0.0, "Mpccm / h")
         distance2 = self.cosmology.comoving_radial_distance(z2, z)
         iteration = 1

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/97967ff3468e/
Changeset:   97967ff3468e
Branch:      yt
User:        MatthewTurk
Date:        2016-09-30 12:43:36+00:00
Summary:     Fixing style errors
Affected #:  1 file

diff -r d18775c812205191c1b71900cc2fd2d4528357d2 -r 97967ff3468e5365f139b706c162f024ada733e5 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1876,7 +1876,7 @@
     def __or__(self, other):
         if not isinstance(other, YTSelectionContainer3D):
             raise YTBooleanObjectError(other)
-        if not self.ds is other.ds:
+        if self.ds is not other.ds:
             raise YTBooleanObjectsWrongDataset()
         # Should maybe do something with field parameters here
         return YTBooleanContainer("OR", self, other, ds = self.ds)
@@ -1889,14 +1889,14 @@
     def __xor__(self, other):
         if not isinstance(other, YTSelectionContainer3D):
             raise YTBooleanObjectError(other)
-        if not self.ds is other.ds:
+        if self.ds is not other.ds:
             raise YTBooleanObjectsWrongDataset()
         return YTBooleanContainer("XOR", self, other, ds = self.ds)
 
     def __and__(self, other):
         if not isinstance(other, YTSelectionContainer3D):
             raise YTBooleanObjectError(other)
-        if not self.ds is other.ds:
+        if self.ds is not other.ds:
             raise YTBooleanObjectsWrongDataset()
         return YTBooleanContainer("AND", self, other, ds = self.ds)
 
@@ -1906,7 +1906,7 @@
     def __sub__(self, other):
         if not isinstance(other, YTSelectionContainer3D):
             raise YTBooleanObjectError(other)
-        if not self.ds is other.ds:
+        if self.ds is not other.ds:
             raise YTBooleanObjectsWrongDataset()
         return YTBooleanContainer("NEG", self, other, ds = self.ds)
 


https://bitbucket.org/yt_analysis/yt/commits/743f56472b10/
Changeset:   743f56472b10
Branch:      yt
User:        MatthewTurk
Date:        2016-09-30 16:16:01+00:00
Summary:     Changing a few things to sample datasets
Affected #:  2 files

diff -r 97967ff3468e5365f139b706c162f024ada733e5 -r 743f56472b10f18505a7e1c1061b496050e8aaf1 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1933,7 +1933,7 @@
     --------
 
     >>> import yt
-    >>> ds = yt.load("RedshiftOutput0005")
+    >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
     >>> sp = ds.sphere("c", 0.1)
     >>> dd = ds.r[:,:,:]
     >>> new_obj = sp ^ dd

diff -r 97967ff3468e5365f139b706c162f024ada733e5 -r 743f56472b10f18505a7e1c1061b496050e8aaf1 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -924,7 +924,7 @@
     --------
 
     >>> import yt
-    >>> ds = yt.load("RedshiftOutput0005")
+    >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
     >>> sp1 = ds.sphere((0.4, 0.5, 0.6), 0.1)
     >>> sp2 = ds.sphere((0.3, 0.5, 0.15), 0.1)
     >>> sp3 = ds.sphere((0.5, 0.5, 0.9), 0.1)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list