[yt-svn] commit/yt: 27 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Nov 23 13:23:14 PST 2015


27 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/0e2b4a169174/
Changeset:   0e2b4a169174
Branch:      yt
User:        MatthewTurk
Date:        2015-09-19 19:17:10+00:00
Summary:     Initial import of dataset access.
Affected #:  3 files

diff -r f818f29712491ce9f597decaea69297a06603393 -r 0e2b4a16917418247c7935b0574e343a9683ec8f yt/data_objects/dataset_access.py
--- /dev/null
+++ b/yt/data_objects/dataset_access.py
@@ -0,0 +1,104 @@
+"""
+An object that can live on the dataset to facilitate data access.
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+import weakref
+import types
+
+import yt.units
+from yt.utilities.exceptions import YTDimensionalityError
+
+class DatasetAccess(object):
+    _all_data = None
+    def __init__(self, ds):
+        self.ds = weakref.proxy(ds)
+
+    @property
+    def all_data(self):
+        if self._all_data is None:
+            self._all_data = self.ds.all_data()
+        return self._all_data
+
+    def __getitem__(self, item):
+        # At first, we will only implement this as accepting a slice that is
+        # (optionally) unitful corresponding to a specific set of coordinates
+        # that result in a rectangular prism or a slice.
+        if isinstance(item, types.StringTypes):
+            # This is some field; we will instead pass this back to the
+            # all_data object.
+            return self.all_data[item]
+        if isinstance(item, tuple) and isinstance(item[1], types.StringTypes):
+            return self.all_data[item]
+        if len(item) != self.ds.dimensionality:
+            # Not the right specification, and we don't want to do anything
+            # implicitly.
+            raise YTDimensionalityError(len(item), self.ds.dimensionality)
+        if self.ds.dimensionality != 3:
+            # We'll pass on this for the time being.
+            raise RuntimeError
+
+        # OK, now we need to look at our slices.  How many are a specific
+        # coordinate?
+        
+        if not all(isinstance(v, slice) for v in item):
+            return self._create_slice(item)
+        else:
+            return self._create_region(item)
+            
+    def _spec_to_value(self, input_tuple):
+        if not isinstance(input_tuple, tuple):
+            return input_tuple
+        v, u = input_tuple
+        value = self.ds.quan(v, u)
+        return value
+
+    def _create_slice(self, slice_tuple):
+        axis = None
+        new_slice = []
+        for ax, v in enumerate(slice_tuple):
+            if not isinstance(v, slice):
+                if axis is not None: raise RuntimeError
+                axis = ax
+                coord = self._spec_to_value(v)
+                new_slice.append(slice(None, None, None))
+            else:
+                new_slice.append(v)
+        # This new slice doesn't need to be a tuple
+        source = self._create_region(new_slice)
+        sl = self.ds.slice(axis, coord, data_source = source)
+        return sl
+
+    def _slice_to_edges(self, ax, val):
+        if val.start is None:
+            l = self.ds.domain_left_edge[ax]
+        else:
+            l = self._spec_to_value(val.start)
+        if val.stop is None:
+            r = self.ds.domain_right_edge[ax]
+        else:
+            r = self._spec_to_value(val.stop)
+        if val.step is not None:
+            raise NotImplementedError("Step not implemented.")
+        if r < l:
+            raise RuntimeError
+        return l, r
+
+    def _create_region(self, bounds_tuple):
+        left_edge = []
+        right_edge = []
+        for ax, b in enumerate(bounds_tuple):
+            l, r = self._slice_to_edges(ax, b)
+            left_edge.append(l)
+            right_edge.append(r)
+        center = [ (l + r)/2.0 for l, r in zip(left_edge, right_edge)]
+        return self.ds.region(center, left_edge, right_edge)

diff -r f818f29712491ce9f597decaea69297a06603393 -r 0e2b4a16917418247c7935b0574e343a9683ec8f yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -48,6 +48,8 @@
 from yt.units.yt_array import \
     YTArray, \
     YTQuantity
+from yt.data_objects.dataset_access import \
+    DatasetAccess
 
 from yt.geometry.coordinates.api import \
     CoordinateHandler, \
@@ -161,6 +163,7 @@
         self.file_style = file_style
         self.conversion_factors = {}
         self.parameters = {}
+        self.d = DatasetAccess(self)
         self.known_filters = self.known_filters or {}
         self.particle_unions = self.particle_unions or {}
         self.field_units = self.field_units or {}

diff -r f818f29712491ce9f597decaea69297a06603393 -r 0e2b4a16917418247c7935b0574e343a9683ec8f yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -510,3 +510,12 @@
 
     def __str__(self):
         return self.message
+
+class YTDimensionalityError(YTException):
+    def __init__(self, wrong, right):
+        self.wrong = wrong
+        self.right = right
+
+    def __str__(self):
+        return 'Dimensionality specified was %s but we need %s' % (
+            self.wrong, self.right)


https://bitbucket.org/yt_analysis/yt/commits/28ee9e0c1dca/
Changeset:   28ee9e0c1dca
Branch:      yt
User:        MatthewTurk
Date:        2015-09-19 19:41:02+00:00
Summary:     Add tests for dataset_access.
Affected #:  1 file

diff -r 0e2b4a16917418247c7935b0574e343a9683ec8f -r 28ee9e0c1dca863df019036e38ab6f4f624fccff yt/data_objects/tests/test_dataset_access.py
--- /dev/null
+++ b/yt/data_objects/tests/test_dataset_access.py
@@ -0,0 +1,34 @@
+from yt.testing import fake_amr_ds, assert_equal
+
+# This will test the "dataset access" method.
+
+def test_region_from_d():
+    ds = fake_amr_ds(fields=["density"])
+    # We'll do a couple here
+
+    # First, no string units
+    reg1 = ds.d[0.2:0.3,0.4:0.6,:]
+    reg2 = ds.region([0.25, 0.5, 0.5], [0.2, 0.4, 0.0], [0.3, 0.6, 1.0])
+    yield assert_equal, reg1["density"], reg2["density"]
+
+    # Now, string units in some -- 1.0 == cm
+    reg1 = ds.d[(0.1, 'cm'):(0.5, 'cm'), :, (0.25, 'cm'): (0.35, 'cm')]
+    reg2 = ds.region([0.3, 0.5, 0.3], [0.1, 0.0, 0.25], [0.5, 1.0, 0.35])
+    yield assert_equal, reg1["density"], reg2["density"]
+
+    # And, lots of : usage!
+    reg1 = ds.d[:, :, :]
+    reg2 = ds.all_data()
+    yield assert_equal, reg1["density"], reg2["density"]
+
+def test_accessing_all_data():
+    # This will test first that we can access all_data, and next that we can
+    # access it multiple times and get the *same object*.
+    ds = fake_amr_ds(fields=["density"])
+    dd = ds.all_data()
+    yield assert_equal, ds.d["density"], dd["density"]
+    # Now let's assert that it's the same object
+    rho = ds.d["density"]
+    rho *= 2.0
+    yield assert_equal, dd["density"]*2.0, ds.d["density"]
+    yield assert_equal, dd["gas", "density"]*2.0, ds.d["gas", "density"]


https://bitbucket.org/yt_analysis/yt/commits/63d64d341a31/
Changeset:   63d64d341a31
Branch:      yt
User:        samskillman
Date:        2015-09-20 00:43:46+00:00
Summary:     Add implementation of .sum() and .mean() off of the YTDataContainer
Affected #:  2 files

diff -r f818f29712491ce9f597decaea69297a06603393 -r 63d64d341a31736bd957a141f671dcafa9173ebe yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -485,6 +485,22 @@
         else:
             data_collection.append(gdata)
 
+    # Numpy-like Operations
+    def mean(self, field, axis=None, weight='ones'):
+        if axis in self.ds.coordinates.axis_name:
+            r = self.ds.proj(field, axis, data_source=self, weight_field=None)
+        elif axis is None:
+            if weight is None:
+                r = self.quantities.total_quantity(field)
+            else:
+                r = self.quantities.weighted_average_quantity(field, weight)
+        else:
+            raise NotImplementedError("Unknown axis %s" % axis)
+        return r
+
+    def sum(self, field, axis=None):
+        return self.mean(field, axis=axis, weight=None)
+
     @property
     def _hash(self):
         s = "%s" % self

diff -r f818f29712491ce9f597decaea69297a06603393 -r 63d64d341a31736bd957a141f671dcafa9173ebe yt/data_objects/tests/test_numpy_ops.py
--- /dev/null
+++ b/yt/data_objects/tests/test_numpy_ops.py
@@ -0,0 +1,50 @@
+from yt.testing import fake_random_ds, assert_equal
+import numpy as np
+
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+
+def test_mean_and_sum():
+    for nprocs in [1]:
+        ds = fake_random_ds(16, nprocs=nprocs,
+                            fields=("density",))
+        ad = ds.all_data()
+
+        # Sums
+        q = ad.sum('density')
+
+        q1 = ad.quantities.total_quantity('density')
+
+        yield assert_equal, q, q1
+
+        q2 = ad.mean('density', weight=None)
+
+        yield assert_equal, q, q2
+
+        # Weighted Averages
+        w = ad.mean("density")
+
+        w1 = ad.quantities.weighted_average_quantity('density', 'ones')
+
+        yield assert_equal, w, w1
+
+        w = ad.mean("density", weight="density")
+
+        w1 = ad.quantities.weighted_average_quantity('density', 'density')
+
+        yield assert_equal, w, w1
+
+        # Projections
+        p = ad.sum('density', axis=0)
+
+        p1 = ds.proj('density', 0, data_source=ad)
+
+        yield assert_equal, p['density'], p1['density']
+
+
+if __name__ == "__main__":
+    for args in test_mean_and_sum():
+        args[0](*args[1:])


https://bitbucket.org/yt_analysis/yt/commits/7a1eeab224ab/
Changeset:   7a1eeab224ab
Branch:      yt
User:        MatthewTurk
Date:        2015-09-19 19:43:38+00:00
Summary:     Merging with Sam
Affected #:  2 files

diff -r 28ee9e0c1dca863df019036e38ab6f4f624fccff -r 7a1eeab224ab8837909af98542dfa01b470bff44 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -485,6 +485,22 @@
         else:
             data_collection.append(gdata)
 
+    # Numpy-like Operations
+    def mean(self, field, axis=None, weight='ones'):
+        if axis in self.ds.coordinates.axis_name:
+            r = self.ds.proj(field, axis, data_source=self, weight_field=None)
+        elif axis is None:
+            if weight is None:
+                r = self.quantities.total_quantity(field)
+            else:
+                r = self.quantities.weighted_average_quantity(field, weight)
+        else:
+            raise NotImplementedError("Unknown axis %s" % axis)
+        return r
+
+    def sum(self, field, axis=None):
+        return self.mean(field, axis=axis, weight=None)
+
     @property
     def _hash(self):
         s = "%s" % self

diff -r 28ee9e0c1dca863df019036e38ab6f4f624fccff -r 7a1eeab224ab8837909af98542dfa01b470bff44 yt/data_objects/tests/test_numpy_ops.py
--- /dev/null
+++ b/yt/data_objects/tests/test_numpy_ops.py
@@ -0,0 +1,50 @@
+from yt.testing import fake_random_ds, assert_equal
+import numpy as np
+
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+
+def test_mean_and_sum():
+    for nprocs in [1]:
+        ds = fake_random_ds(16, nprocs=nprocs,
+                            fields=("density",))
+        ad = ds.all_data()
+
+        # Sums
+        q = ad.sum('density')
+
+        q1 = ad.quantities.total_quantity('density')
+
+        yield assert_equal, q, q1
+
+        q2 = ad.mean('density', weight=None)
+
+        yield assert_equal, q, q2
+
+        # Weighted Averages
+        w = ad.mean("density")
+
+        w1 = ad.quantities.weighted_average_quantity('density', 'ones')
+
+        yield assert_equal, w, w1
+
+        w = ad.mean("density", weight="density")
+
+        w1 = ad.quantities.weighted_average_quantity('density', 'density')
+
+        yield assert_equal, w, w1
+
+        # Projections
+        p = ad.sum('density', axis=0)
+
+        p1 = ds.proj('density', 0, data_source=ad)
+
+        yield assert_equal, p['density'], p1['density']
+
+
+if __name__ == "__main__":
+    for args in test_mean_and_sum():
+        args[0](*args[1:])


https://bitbucket.org/yt_analysis/yt/commits/9b3840e251cc/
Changeset:   9b3840e251cc
Branch:      yt
User:        MatthewTurk
Date:        2015-09-19 19:53:58+00:00
Summary:     Add some tests, fix some bugs.
Affected #:  3 files

diff -r 7a1eeab224ab8837909af98542dfa01b470bff44 -r 9b3840e251cced5fc3ef67685864b0db4b7c4550 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -488,7 +488,7 @@
     # Numpy-like Operations
     def mean(self, field, axis=None, weight='ones'):
         if axis in self.ds.coordinates.axis_name:
-            r = self.ds.proj(field, axis, data_source=self, weight_field=None)
+            r = self.ds.proj(field, axis, data_source=self, weight_field=weight)
         elif axis is None:
             if weight is None:
                 r = self.quantities.total_quantity(field)

diff -r 7a1eeab224ab8837909af98542dfa01b470bff44 -r 9b3840e251cced5fc3ef67685864b0db4b7c4550 yt/data_objects/dataset_access.py
--- a/yt/data_objects/dataset_access.py
+++ b/yt/data_objects/dataset_access.py
@@ -57,7 +57,8 @@
             
     def _spec_to_value(self, input_tuple):
         if not isinstance(input_tuple, tuple):
-            return input_tuple
+            # We now assume that it's in code_length
+            return self.ds.quan(input_tuple, 'code_length')
         v, u = input_tuple
         value = self.ds.quan(v, u)
         return value

diff -r 7a1eeab224ab8837909af98542dfa01b470bff44 -r 9b3840e251cced5fc3ef67685864b0db4b7c4550 yt/data_objects/tests/test_dataset_access.py
--- a/yt/data_objects/tests/test_dataset_access.py
+++ b/yt/data_objects/tests/test_dataset_access.py
@@ -16,6 +16,11 @@
     reg2 = ds.region([0.3, 0.5, 0.3], [0.1, 0.0, 0.25], [0.5, 1.0, 0.35])
     yield assert_equal, reg1["density"], reg2["density"]
 
+    # Now, string units in some -- 1.0 == cm
+    reg1 = ds.d[(0.1, 'cm'):(0.5, 'cm'), :, 0.25:0.35]
+    reg2 = ds.region([0.3, 0.5, 0.3], [0.1, 0.0, 0.25], [0.5, 1.0, 0.35])
+    yield assert_equal, reg1["density"], reg2["density"]
+
     # And, lots of : usage!
     reg1 = ds.d[:, :, :]
     reg2 = ds.all_data()


https://bitbucket.org/yt_analysis/yt/commits/8a743d5387cb/
Changeset:   8a743d5387cb
Branch:      yt
User:        MatthewTurk
Date:        2015-09-21 19:02:23+00:00
Summary:     Swap out some tests for clarity.
Affected #:  1 file

diff -r 9b3840e251cced5fc3ef67685864b0db4b7c4550 -r 8a743d5387cb0a02eb15be08251ae3aae887f518 yt/data_objects/tests/test_numpy_ops.py
--- a/yt/data_objects/tests/test_numpy_ops.py
+++ b/yt/data_objects/tests/test_numpy_ops.py
@@ -20,10 +20,6 @@
 
         yield assert_equal, q, q1
 
-        q2 = ad.mean('density', weight=None)
-
-        yield assert_equal, q, q2
-
         # Weighted Averages
         w = ad.mean("density")
 
@@ -44,6 +40,11 @@
 
         yield assert_equal, p['density'], p1['density']
 
+        # Check by axis-name
+        p = ad.sum('density', axis='x')
+
+        yield assert_equal, p['density'], p1['density']
+
 
 if __name__ == "__main__":
     for args in test_mean_and_sum():


https://bitbucket.org/yt_analysis/yt/commits/2f55ea1cda8c/
Changeset:   2f55ea1cda8c
Branch:      yt
User:        MatthewTurk
Date:        2015-09-22 00:34:16+00:00
Summary:     Adding stubs and changing behavior as per YTEP discussion.
Affected #:  3 files

diff -r 8a743d5387cb0a02eb15be08251ae3aae887f518 -r 2f55ea1cda8ca4886250797e8a6c533e62bf0eb5 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -455,6 +455,12 @@
         pw = self._get_pw(fields, center, width, origin, 'Projection')
         return pw
 
+    def plot(self, fields=None):
+        pw = self._get_pw(fields, self.ds.domain_center, self.ds.domain_width,
+                    'native', 'Projection')
+        pw.show()
+        return pw
+
 class YTCoveringGridBase(YTSelectionContainer3D):
     """A 3D region with all data extracted to a single, specified
     resolution.  Left edge should align with a cell boundary, but

diff -r 8a743d5387cb0a02eb15be08251ae3aae887f518 -r 2f55ea1cda8ca4886250797e8a6c533e62bf0eb5 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -485,6 +485,27 @@
         else:
             data_collection.append(gdata)
 
+    def argmax(self, field, axis=None):
+        raise NotImplementedError
+
+    def argmin(self, field, axis=None):
+        raise NotImplementedError
+
+    def max(self, field, axis=None):
+        raise NotImplementedError
+
+    def min(self, field, axis=None):
+        raise NotImplementedError
+
+    def std(self, field, weight=None):
+        raise NotImplementedError
+
+    def ptp(self, field):
+        raise NotImplementedError
+
+    def hist(self, field, weight = None, bins = None):
+        raise NotImplementedError
+
     # Numpy-like Operations
     def mean(self, field, axis=None, weight='ones'):
         if axis in self.ds.coordinates.axis_name:
@@ -499,7 +520,23 @@
         return r
 
     def sum(self, field, axis=None):
-        return self.mean(field, axis=axis, weight=None)
+        # Because we're using ``sum`` to specifically mean a sum or a
+        # projection with the method="sum", we do not utilize the ``mean``
+        # function.
+        if axis in self.ds.coordinates.axis_name:
+            r = self.ds.proj(field, axis, data_source=self, method="sum")
+        elif axis is None:
+            r = self.quantities.total_quantity(field)
+        else:
+            raise NotImplementedError("Unknown axis %s" % axis)
+        return r
+
+    def integrate(self, field, axis=None):
+        if axis in self.ds.coordinates.axis_name:
+            r = self.ds.proj(field, axis, data_source=self)
+        else:
+            raise NotImplementedError("Unknown axis %s" % axis)
+        return r
 
     @property
     def _hash(self):

diff -r 8a743d5387cb0a02eb15be08251ae3aae887f518 -r 2f55ea1cda8ca4886250797e8a6c533e62bf0eb5 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -295,6 +295,12 @@
         pw = self._get_pw(fields, center, width, origin, 'Slice')
         return pw
 
+    def plot(self, fields=None):
+        pw = self._get_pw(fields, self.ds.domain_center, self.ds.domain_width,
+                    'native', 'Slice')
+        pw.show()
+        return pw
+
 class YTCuttingPlaneBase(YTSelectionContainer2D):
     """
     This is a data object corresponding to an oblique slice through the


https://bitbucket.org/yt_analysis/yt/commits/93a0e66a54c4/
Changeset:   93a0e66a54c4
Branch:      yt
User:        MatthewTurk
Date:        2015-09-22 02:52:23+00:00
Summary:     Cache if we access ds.d[:,:,:]
Affected #:  1 file

diff -r 2f55ea1cda8ca4886250797e8a6c533e62bf0eb5 -r 93a0e66a54c4ed652d7f16bc445968840d2788a7 yt/data_objects/dataset_access.py
--- a/yt/data_objects/dataset_access.py
+++ b/yt/data_objects/dataset_access.py
@@ -53,6 +53,8 @@
         if not all(isinstance(v, slice) for v in item):
             return self._create_slice(item)
         else:
+            if all(s.start is s.stop is s.step is None for s in item):
+                return self.all_data
             return self._create_region(item)
             
     def _spec_to_value(self, input_tuple):


https://bitbucket.org/yt_analysis/yt/commits/04306ed99bf6/
Changeset:   04306ed99bf6
Branch:      yt
User:        MatthewTurk
Date:        2015-09-22 16:56:34+00:00
Summary:     Setting up sane defaults for slices and projections when plotted.
Affected #:  3 files

diff -r 93a0e66a54c4ed652d7f16bc445968840d2788a7 -r 04306ed99bf648ff7c6d53a52b45904ee2a072a0 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -456,8 +456,21 @@
         return pw
 
     def plot(self, fields=None):
-        pw = self._get_pw(fields, self.ds.domain_center, self.ds.domain_width,
-                    'native', 'Projection')
+        if hasattr(self.data_source, "left_edge") and \
+            hasattr(self.data_source, "right_edge"):
+            left_edge = self.data_source.left_edge
+            right_edge = self.data_source.right_edge
+            center = (left_edge + right_edge)/2.0
+            width = right_edge - left_edge
+            xax = self.ds.coordinates.x_axis[self.axis]
+            yax = self.ds.coordinates.y_axis[self.axis]
+            lx, rx = left_edge[xax], right_edge[xax]
+            ly, ry = left_edge[yax], right_edge[yax]
+            width = (rx-lx), (ry-ly)
+        else:
+            width = self.ds.domain_width
+            center = self.ds.domain_center
+        pw = self._get_pw(fields, center, width, 'native', 'Projection')
         pw.show()
         return pw
 

diff -r 93a0e66a54c4ed652d7f16bc445968840d2788a7 -r 04306ed99bf648ff7c6d53a52b45904ee2a072a0 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -485,6 +485,7 @@
         else:
             data_collection.append(gdata)
 
+    # Numpy-like Operations
     def argmax(self, field, axis=None):
         raise NotImplementedError
 
@@ -506,7 +507,6 @@
     def hist(self, field, weight = None, bins = None):
         raise NotImplementedError
 
-    # Numpy-like Operations
     def mean(self, field, axis=None, weight='ones'):
         if axis in self.ds.coordinates.axis_name:
             r = self.ds.proj(field, axis, data_source=self, weight_field=weight)
@@ -524,7 +524,8 @@
         # projection with the method="sum", we do not utilize the ``mean``
         # function.
         if axis in self.ds.coordinates.axis_name:
-            r = self.ds.proj(field, axis, data_source=self, method="sum")
+            with self._field_parameter_state({'axis':axis}):
+                r = self.ds.proj(field, axis, data_source=self, method="sum")
         elif axis is None:
             r = self.quantities.total_quantity(field)
         else:

diff -r 93a0e66a54c4ed652d7f16bc445968840d2788a7 -r 04306ed99bf648ff7c6d53a52b45904ee2a072a0 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -296,8 +296,21 @@
         return pw
 
     def plot(self, fields=None):
-        pw = self._get_pw(fields, self.ds.domain_center, self.ds.domain_width,
-                    'native', 'Slice')
+        if hasattr(self._data_source, "left_edge") and \
+            hasattr(self._data_source, "right_edge"):
+            left_edge = self._data_source.left_edge
+            right_edge = self._data_source.right_edge
+            center = (left_edge + right_edge)/2.0
+            width = right_edge - left_edge
+            xax = self.ds.coordinates.x_axis[self.axis]
+            yax = self.ds.coordinates.y_axis[self.axis]
+            lx, rx = left_edge[xax], right_edge[xax]
+            ly, ry = left_edge[yax], right_edge[yax]
+            width = (rx-lx), (ry-ly)
+        else:
+            width = self.ds.domain_width
+            center = self.ds.domain_center
+        pw = self._get_pw(fields, center, width, 'native', 'Slice')
         pw.show()
         return pw
 


https://bitbucket.org/yt_analysis/yt/commits/df9387eb7c20/
Changeset:   df9387eb7c20
Branch:      yt
User:        MatthewTurk
Date:        2015-09-23 14:22:48+00:00
Summary:     Implement max, min, fix and add tests.
Affected #:  2 files

diff -r 04306ed99bf648ff7c6d53a52b45904ee2a072a0 -r df9387eb7c201cb0b3dee3e05527bd695cf2d5bb yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -492,11 +492,49 @@
     def argmin(self, field, axis=None):
         raise NotImplementedError
 
+    def _compute_extrema(self, field):
+        if self._extrema_cache is None:
+            self._extrema_cache = {}
+        if field not in self._extrema_cache:
+            # Note we still need to call extrema for each field, as of right
+            # now
+            mi, ma = self.quantities.extrema(field)
+            self._extrema_cache[field] = (mi, ma)
+        return self._extrema_cache[field]
+
+    _extrema_cache = None
     def max(self, field, axis=None):
-        raise NotImplementedError
+        if axis is None:
+            rv = ()
+            fields = ensure_list(field)
+            for f in fields:
+                rv += (self._compute_extrema(f)[1],)
+            if len(fields) == 1:
+                return rv[0]
+            else:
+                return rv
+        elif axis in self.ds.coordinates.axis_name:
+            r = self.ds.proj(field, axis, data_source=self, method="mip")
+            return r
+        else:
+            raise NotImplementedError("Unknown axis %s" % axis)
 
     def min(self, field, axis=None):
-        raise NotImplementedError
+        if axis is None:
+            rv = ()
+            fields = ensure_list(field)
+            for f in ensure_list(fields):
+                rv += (self._compute_extrema(f)[0],)
+            if len(fields) == 1:
+                return rv[0]
+            else:
+                return rv
+            return rv
+        elif axis in self.ds.coordinates.axis_name:
+            raise NotImplementedError("Minimum intensity projection not"
+                                      " implemented.")
+        else:
+            raise NotImplementedError("Unknown axis %s" % axis)
 
     def std(self, field, weight=None):
         raise NotImplementedError

diff -r 04306ed99bf648ff7c6d53a52b45904ee2a072a0 -r df9387eb7c201cb0b3dee3e05527bd695cf2d5bb yt/data_objects/tests/test_numpy_ops.py
--- a/yt/data_objects/tests/test_numpy_ops.py
+++ b/yt/data_objects/tests/test_numpy_ops.py
@@ -1,4 +1,4 @@
-from yt.testing import fake_random_ds, assert_equal
+from yt.testing import fake_random_ds, fake_amr_ds, assert_equal
 import numpy as np
 
 
@@ -7,10 +7,12 @@
     ytcfg["yt", "__withintesting"] = "True"
 
 
-def test_mean_and_sum():
-    for nprocs in [1]:
-        ds = fake_random_ds(16, nprocs=nprocs,
-                            fields=("density",))
+def test_mean_sum_integrate():
+    for nprocs in [-1, 1, 2, 16]:
+        if nprocs == -1:
+            ds = fake_amr_ds(fields=("density",))
+        else:
+            ds = fake_random_ds(32, nprocs=nprocs, fields=("density",))
         ad = ds.all_data()
 
         # Sums
@@ -36,7 +38,7 @@
         # Projections
         p = ad.sum('density', axis=0)
 
-        p1 = ds.proj('density', 0, data_source=ad)
+        p1 = ds.proj('density', 0, data_source=ad, method="sum")
 
         yield assert_equal, p['density'], p1['density']
 
@@ -45,6 +47,50 @@
 
         yield assert_equal, p['density'], p1['density']
 
+        # Now we check proper projections
+        p = ad.integrate("density", axis=0)
+        p1 = ds.proj("density", 0, data_source=ad)
+
+        yield assert_equal, p['density'], p1['density']
+
+        # Check by axis-name
+        p = ad.integrate('density', axis='x')
+
+        yield assert_equal, p['density'], p1['density']
+
+def test_min_max():
+    for nprocs in [-1, 1, 2, 16]:
+        if nprocs == -1:
+            ds = fake_amr_ds(fields=("density","temperature"))
+        else:
+            ds = fake_random_ds(32, nprocs=nprocs,
+                fields=("density","temperature"))
+
+        ad = ds.all_data()
+
+        q = ad.min("density").v
+        yield assert_equal, q, ad["density"].min()
+
+        q = ad.max("density").v
+        yield assert_equal, q, ad["density"].max()
+
+        p = ad.max("density", axis=1)
+        p1 = ds.proj("density", 1, data_source=ad, method="mip")
+        yield assert_equal, p["density"], p1["density"]
+
+        p = ad.max("density", axis="y")
+        p1 = ds.proj("density", 1, data_source=ad, method="mip")
+        yield assert_equal, p["density"], p1["density"]
+
+        # Test that we can get multiple in a single pass
+
+        qrho, qtemp = ad.max(["density", "temperature"])
+        yield assert_equal, qrho, ad["density"].max()
+        yield assert_equal, qtemp, ad["temperature"].max()
+
+        qrho, qtemp = ad.min(["density", "temperature"])
+        yield assert_equal, qrho, ad["density"].min()
+        yield assert_equal, qtemp, ad["temperature"].min()
 
 if __name__ == "__main__":
     for args in test_mean_and_sum():


https://bitbucket.org/yt_analysis/yt/commits/f9124f173e2f/
Changeset:   f9124f173e2f
Branch:      yt
User:        MatthewTurk
Date:        2015-09-29 02:04:44+00:00
Summary:     Adding fill_region_float, and implement fluids for arbitrary_grid.

This is a direct port of the pixelizer code, but in 3D and without
x/y hardcoded into the variable names.

Implemented slicing for arbitrary grids, too.
Affected #:  3 files

diff -r df9387eb7c201cb0b3dee3e05527bd695cf2d5bb -r f9124f173e2f985cc4bd8a943cbd01937e384e27 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -35,7 +35,7 @@
 from yt.utilities.lib.Interpolators import \
     ghost_zone_interpolate
 from yt.utilities.lib.misc_utilities import \
-    fill_region
+    fill_region, fill_region_float
 from yt.utilities.lib.marching_cubes import \
     march_cubes_grid, march_cubes_grid_flux
 from yt.utilities.data_point_utilities import CombineGrids,\
@@ -803,7 +803,19 @@
         self._setup_data_source()
 
     def _fill_fields(self, fields):
-        raise NotImplementedError
+        fields = [f for f in fields if f not in self.field_data]
+        if len(fields) == 0: return
+        assert(len(fields) == 1)
+        field = fields[0]
+        dest = np.zeros(self.ActiveDimensions, dtype="float64")
+        for chunk in self._data_source.chunks(fields, "io"):
+            fill_region_float(chunk.fcoords, chunk.fwidth, chunk[field],
+                              self.left_edge, self.right_edge, dest, 1,
+                              self.ds.domain_width,
+                              int(any(self.ds.periodicity)))
+        fi = self.ds._get_field_info(field)
+        self[field] = self.ds.arr(dest, fi.units)
+        
 
 class LevelState(object):
     current_dx = None

diff -r df9387eb7c201cb0b3dee3e05527bd695cf2d5bb -r f9124f173e2f985cc4bd8a943cbd01937e384e27 yt/data_objects/dataset_access.py
--- a/yt/data_objects/dataset_access.py
+++ b/yt/data_objects/dataset_access.py
@@ -90,8 +90,6 @@
             r = self.ds.domain_right_edge[ax]
         else:
             r = self._spec_to_value(val.stop)
-        if val.step is not None:
-            raise NotImplementedError("Step not implemented.")
         if r < l:
             raise RuntimeError
         return l, r
@@ -99,9 +97,13 @@
     def _create_region(self, bounds_tuple):
         left_edge = []
         right_edge = []
+        dims = []
         for ax, b in enumerate(bounds_tuple):
             l, r = self._slice_to_edges(ax, b)
             left_edge.append(l)
             right_edge.append(r)
+            dims.append(getattr(b.step, "imag", None))
         center = [ (l + r)/2.0 for l, r in zip(left_edge, right_edge)]
+        if all(d is not None for d in dims):
+            return self.ds.arbitrary_grid(left_edge, right_edge, dims)
         return self.ds.region(center, left_edge, right_edge)

diff -r df9387eb7c201cb0b3dee3e05527bd695cf2d5bb -r f9124f173e2f985cc4bd8a943cbd01937e384e27 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -19,6 +19,7 @@
 cimport cython
 cimport libc.math as math
 from fp_utils cimport fmin, fmax, i64min, i64max
+from yt.geometry.selection_routines cimport _ensure_code
 
 cdef extern from "stdlib.h":
     # NOTE that size_t might not be int
@@ -679,3 +680,110 @@
                                         ifield[i]
                                     tot += 1
     return tot
+
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def fill_region_float(np.ndarray[np.float64_t, ndim=2] fcoords,
+                      np.ndarray[np.float64_t, ndim=2] fwidth,
+                      np.ndarray[np.float64_t, ndim=1] data,
+                      np.ndarray[np.float64_t, ndim=1] box_left_edge,
+                      np.ndarray[np.float64_t, ndim=1] box_right_edge,
+                      np.ndarray[np.float64_t, ndim=3] dest,
+                      int antialias = 1,
+                      period = None,
+                      int check_period = 1):
+    cdef np.float64_t ds_period[3]
+    cdef np.float64_t box_dds[3], box_idds[3], width[3], LE[3], RE[3]
+    cdef np.int64_t i, j, k, p, xi, yi, ji
+    cdef np.int64_t dims[3], ld[3], ud[3]
+    cdef np.float64_t overlap[3]
+    cdef np.float64_t dsp, osp[3], odsp[3], sp[3], lfd[3], ufd[3]
+    # These are the temp vars we get from the arrays
+    # Some periodicity helpers
+    cdef int diter[3][2]
+    cdef np.float64_t diterv[3][2]
+    if period is not None:
+        for i in range(3):
+            ds_period[i] = period[i]
+    else:
+        ds_period[0] = ds_period[1] = ds_period[2]
+    box_left_edge = _ensure_code(box_left_edge)
+    box_right_edge = _ensure_code(box_right_edge)
+    _ensure_code(fcoords)
+    _ensure_code(fwidth)
+    for i in range(3):
+        LE[i] = box_left_edge[i]
+        RE[i] = box_right_edge[i]
+        width[i] = RE[i] - LE[i]
+        dims[i] = dest.shape[i]
+        box_dds[i] = width[i] / dims[i]
+        box_idds[i] = 1.0/box_dds[i]
+        diter[i][0] = diter[i][1] = 0
+        diterv[i][0] = diterv[i][1] = 0.0
+        overlap[i] = 1.0 
+    with nogil:
+        for p in range(fcoords.shape[0]):
+            for i in range(3):
+               diter[i][1] = 999
+               diterv[i][0] = 0.0
+               odsp[i] = fwidth[p,i]*0.5
+               osp[i] = fcoords[p,i]+0.5*odsp[i]
+               overlap[i] = 1.0
+            dsp = data[p]
+            if check_period == 1:
+                for i in range(3):
+                    if (osp[i] - odsp[i] < LE[i]):
+                        diter[i][1] = +1
+                        diterv[i][1] = ds_period[i]
+                    elif (osp[i] + odsp[i] > RE[i]):
+                        diter[i][1] = -1
+                        diterv[i][1] = -ds_period[i]
+            for xi in range(2):
+                if diter[0][xi] == 999: continue
+                sp[0] = osp[0] + diterv[0][xi]
+                if (sp[0] + odsp[0] < LE[0]) or (sp[0] - odsp[0] > RE[0]): continue
+                for yi in range(2):
+                    if diter[1][yi] == 999: continue
+                    sp[1] = osp[1] + diterv[1][yi]
+                    if (sp[1] + odsp[1] < LE[1]) or (sp[1] - odsp[1] > RE[1]): continue
+                    for zi in range(2):
+                        if diter[2][zi] == 999: continue
+                        sp[2] = osp[2] + diterv[2][yi]
+                        if (sp[2] + odsp[2] < LE[2]) or (sp[2] - odsp[2] > RE[2]): continue
+                        for i in range(3):
+                            ld[i] = <np.int64_t> fmax(((sp[i]-odsp[i]-LE[i])*box_idds[i]),0)
+                            # NOTE: This is a different way of doing it than in the C
+                            # routines.  In C, we were implicitly casting the
+                            # initialization to int, but *not* the conditional, which
+                            # was allowed an extra value:
+                            #     for(j=lc;j<rc;j++)
+                            # here, when assigning lc (double) to j (int) it got
+                            # truncated, but no similar truncation was done in the
+                            # comparison of j to rc (double).  So give ourselves a
+                            # bonus row and bonus column here.
+                            ud[i] = <np.int64_t> fmin(((sp[i]+odsp[i]-LE[i])*box_idds[i] + 1), dims[i])
+                        for i in range(ld[0], ud[0]):
+                            lfd[0] = box_dds[0] * i + LE[0]
+                            ufd[0] = box_dds[0] * (i + 1) + LE[0]
+                            if antialias == 1:
+                                overlap[0] = ((fmin(ufd[0], sp[0]+odsp[0])
+                                           - fmax(lfd[0], (sp[0]-odsp[0])))*box_idds[0])
+                            if overlap[0] < 0.0: continue
+                            for j in range(ld[1], ud[1]):
+                                lfd[1] = box_dds[1] * j + LE[1]
+                                ufd[1] = box_dds[1] * (j + 1) + LE[1]
+                                if antialias == 1:
+                                    overlap[1] = ((fmin(ufd[1], sp[1]+odsp[1])
+                                               - fmax(lfd[1], (sp[1]-odsp[1])))*box_idds[1])
+                                if overlap[1] < 0.0: continue
+                            for k in range(ld[2], ud[2]):
+                                lfd[2] = box_dds[2] * k + LE[2]
+                                ufd[2] = box_dds[2] * (k + 1) + LE[2]
+                                if antialias == 1:
+                                    overlap[2] = ((fmin(ufd[2], sp[2]+odsp[2])
+                                               - fmax(lfd[2], (sp[2]-odsp[2])))*box_idds[2])
+                                    if overlap[2] < 0.0: continue
+                                    dest[i,j,k] += dsp * (overlap[0]*overlap[1]*overlap[2])
+                                else:
+                                    dest[i,j,k] = dsp


https://bitbucket.org/yt_analysis/yt/commits/910eb7407d01/
Changeset:   910eb7407d01
Branch:      yt
User:        MatthewTurk
Date:        2015-09-29 20:36:08+00:00
Summary:     Fix errors in arbitrary_grid pixelization and add tests.
Affected #:  2 files

diff -r f9124f173e2f985cc4bd8a943cbd01937e384e27 -r 910eb7407d01937ed2ae39438a01c2efa892ae45 yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -106,3 +106,14 @@
             deposited_mass = obj["deposit", "all_density"].sum() * volume
 
             yield assert_equal, deposited_mass, ds.quan(1.0, 'g')
+
+    # Test that we get identical results to the covering grid for unigrid data.
+    # Testing AMR data is much harder.
+    for nprocs in [1, 2, 4, 8]:
+        ds = fake_random_ds(32, nprocs = nprocs)
+        for ref_level in [0, 1, 2]:
+            cg = ds.covering_grid(ref_level, [0.0, 0.0, 0.0],
+                    2**ref_level * ds.domain_dimensions)
+            ag = ds.arbitrary_grid([0.0, 0.0, 0.0], [1.0, 1.0, 1.0],
+                    2**ref_level * ds.domain_dimensions)
+            yield assert_almost_equal, cg["density"], ag["density"]

diff -r f9124f173e2f985cc4bd8a943cbd01937e384e27 -r 910eb7407d01937ed2ae39438a01c2efa892ae45 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -707,7 +707,7 @@
         for i in range(3):
             ds_period[i] = period[i]
     else:
-        ds_period[0] = ds_period[1] = ds_period[2]
+        ds_period[0] = ds_period[1] = ds_period[2] = 0.0
     box_left_edge = _ensure_code(box_left_edge)
     box_right_edge = _ensure_code(box_right_edge)
     _ensure_code(fcoords)
@@ -726,9 +726,8 @@
         for p in range(fcoords.shape[0]):
             for i in range(3):
                diter[i][1] = 999
-               diterv[i][0] = 0.0
                odsp[i] = fwidth[p,i]*0.5
-               osp[i] = fcoords[p,i]+0.5*odsp[i]
+               osp[i] = fcoords[p,i] # already centered
                overlap[i] = 1.0
             dsp = data[p]
             if check_period == 1:
@@ -764,26 +763,26 @@
                             # bonus row and bonus column here.
                             ud[i] = <np.int64_t> fmin(((sp[i]+odsp[i]-LE[i])*box_idds[i] + 1), dims[i])
                         for i in range(ld[0], ud[0]):
-                            lfd[0] = box_dds[0] * i + LE[0]
-                            ufd[0] = box_dds[0] * (i + 1) + LE[0]
                             if antialias == 1:
+                                lfd[0] = box_dds[0] * i + LE[0]
+                                ufd[0] = box_dds[0] * (i + 1) + LE[0]
                                 overlap[0] = ((fmin(ufd[0], sp[0]+odsp[0])
                                            - fmax(lfd[0], (sp[0]-odsp[0])))*box_idds[0])
                             if overlap[0] < 0.0: continue
                             for j in range(ld[1], ud[1]):
-                                lfd[1] = box_dds[1] * j + LE[1]
-                                ufd[1] = box_dds[1] * (j + 1) + LE[1]
                                 if antialias == 1:
+                                    lfd[1] = box_dds[1] * j + LE[1]
+                                    ufd[1] = box_dds[1] * (j + 1) + LE[1]
                                     overlap[1] = ((fmin(ufd[1], sp[1]+odsp[1])
                                                - fmax(lfd[1], (sp[1]-odsp[1])))*box_idds[1])
                                 if overlap[1] < 0.0: continue
-                            for k in range(ld[2], ud[2]):
-                                lfd[2] = box_dds[2] * k + LE[2]
-                                ufd[2] = box_dds[2] * (k + 1) + LE[2]
-                                if antialias == 1:
-                                    overlap[2] = ((fmin(ufd[2], sp[2]+odsp[2])
-                                               - fmax(lfd[2], (sp[2]-odsp[2])))*box_idds[2])
-                                    if overlap[2] < 0.0: continue
-                                    dest[i,j,k] += dsp * (overlap[0]*overlap[1]*overlap[2])
-                                else:
-                                    dest[i,j,k] = dsp
+                                for k in range(ld[2], ud[2]):
+                                    if antialias == 1:
+                                        lfd[2] = box_dds[2] * k + LE[2]
+                                        ufd[2] = box_dds[2] * (k + 1) + LE[2]
+                                        overlap[2] = ((fmin(ufd[2], sp[2]+odsp[2])
+                                                   - fmax(lfd[2], (sp[2]-odsp[2])))*box_idds[2])
+                                        if overlap[2] < 0.0: continue
+                                        dest[i,j,k] += dsp * (overlap[0]*overlap[1]*overlap[2])
+                                    else:
+                                        dest[i,j,k] = dsp


https://bitbucket.org/yt_analysis/yt/commits/6089fb142cef/
Changeset:   6089fb142cef
Branch:      yt
User:        MatthewTurk
Date:        2015-10-03 00:12:41+00:00
Summary:     Adding first pass at allowing streams to be called from load()
Affected #:  1 file

diff -r f9124f173e2f985cc4bd8a943cbd01937e384e27 -r 6089fb142cefeb2577a56d6a9f4e2a7703f9eb2c yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -55,12 +55,21 @@
             from yt.data_objects.time_series import DatasetSeries
             ts = DatasetSeries.from_filenames(*args, **kwargs)
             return ts
-        except YTOutputNotIdentified:
+        except (TypeError, YTOutputNotIdentified):
             pass
-        mylog.error("None of the arguments provided to load() is a valid file")
-        mylog.error("Please check that you have used a correct path")
-        raise YTOutputNotIdentified(args, kwargs)
+        # We check if either the first argument is a dict or list, in which
+        # case we try identifying candidates.
+        if len(args) > 0 and isinstance(args[0], (list, dict)):
+            # This fixes issues where it is assumed the first argument is a
+            # file
+            args = ["nonexistent"] + args
+            # Better way to do this is to override the output_type_registry
+        else:
+            mylog.error("None of the arguments provided to load() is a valid file")
+            mylog.error("Please check that you have used a correct path")
+            raise YTOutputNotIdentified(args, kwargs)
     for n, c in output_type_registry.items():
+        print n
         if n is None: continue
         if c._is_valid(*args, **kwargs): candidates.append(n)
 


https://bitbucket.org/yt_analysis/yt/commits/bbd19a403c46/
Changeset:   bbd19a403c46
Branch:      yt
User:        MatthewTurk
Date:        2015-10-03 04:37:18+00:00
Summary:     Use better system to avoid spurious load errors
Affected #:  1 file

diff -r 6089fb142cefeb2577a56d6a9f4e2a7703f9eb2c -r bbd19a403c466fc515a0b212e7f17ab2b11159b6 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -50,6 +50,7 @@
                     valid_file.append(False)
         else:
             valid_file.append(False)
+    types_to_check = output_type_registry
     if not any(valid_file):
         try:
             from yt.data_objects.time_series import DatasetSeries
@@ -62,13 +63,14 @@
         if len(args) > 0 and isinstance(args[0], (list, dict)):
             # This fixes issues where it is assumed the first argument is a
             # file
-            args = ["nonexistent"] + args
+            types_to_check = dict((n, v) for n, v in
+                    output_type_registry.items() if n.startswith("stream_"))
             # Better way to do this is to override the output_type_registry
         else:
             mylog.error("None of the arguments provided to load() is a valid file")
             mylog.error("Please check that you have used a correct path")
             raise YTOutputNotIdentified(args, kwargs)
-    for n, c in output_type_registry.items():
+    for n, c in types_to_check.items():
         print n
         if n is None: continue
         if c._is_valid(*args, **kwargs): candidates.append(n)


https://bitbucket.org/yt_analysis/yt/commits/19366e378965/
Changeset:   19366e378965
Branch:      yt
User:        MatthewTurk
Date:        2015-10-03 04:38:21+00:00
Summary:     merge
Affected #:  2 files

diff -r bbd19a403c466fc515a0b212e7f17ab2b11159b6 -r 19366e378965226b483861a61b512a7df663e73e yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -106,3 +106,14 @@
             deposited_mass = obj["deposit", "all_density"].sum() * volume
 
             yield assert_equal, deposited_mass, ds.quan(1.0, 'g')
+
+    # Test that we get identical results to the covering grid for unigrid data.
+    # Testing AMR data is much harder.
+    for nprocs in [1, 2, 4, 8]:
+        ds = fake_random_ds(32, nprocs = nprocs)
+        for ref_level in [0, 1, 2]:
+            cg = ds.covering_grid(ref_level, [0.0, 0.0, 0.0],
+                    2**ref_level * ds.domain_dimensions)
+            ag = ds.arbitrary_grid([0.0, 0.0, 0.0], [1.0, 1.0, 1.0],
+                    2**ref_level * ds.domain_dimensions)
+            yield assert_almost_equal, cg["density"], ag["density"]

diff -r bbd19a403c466fc515a0b212e7f17ab2b11159b6 -r 19366e378965226b483861a61b512a7df663e73e yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -707,7 +707,7 @@
         for i in range(3):
             ds_period[i] = period[i]
     else:
-        ds_period[0] = ds_period[1] = ds_period[2]
+        ds_period[0] = ds_period[1] = ds_period[2] = 0.0
     box_left_edge = _ensure_code(box_left_edge)
     box_right_edge = _ensure_code(box_right_edge)
     _ensure_code(fcoords)
@@ -726,9 +726,8 @@
         for p in range(fcoords.shape[0]):
             for i in range(3):
                diter[i][1] = 999
-               diterv[i][0] = 0.0
                odsp[i] = fwidth[p,i]*0.5
-               osp[i] = fcoords[p,i]+0.5*odsp[i]
+               osp[i] = fcoords[p,i] # already centered
                overlap[i] = 1.0
             dsp = data[p]
             if check_period == 1:
@@ -764,26 +763,26 @@
                             # bonus row and bonus column here.
                             ud[i] = <np.int64_t> fmin(((sp[i]+odsp[i]-LE[i])*box_idds[i] + 1), dims[i])
                         for i in range(ld[0], ud[0]):
-                            lfd[0] = box_dds[0] * i + LE[0]
-                            ufd[0] = box_dds[0] * (i + 1) + LE[0]
                             if antialias == 1:
+                                lfd[0] = box_dds[0] * i + LE[0]
+                                ufd[0] = box_dds[0] * (i + 1) + LE[0]
                                 overlap[0] = ((fmin(ufd[0], sp[0]+odsp[0])
                                            - fmax(lfd[0], (sp[0]-odsp[0])))*box_idds[0])
                             if overlap[0] < 0.0: continue
                             for j in range(ld[1], ud[1]):
-                                lfd[1] = box_dds[1] * j + LE[1]
-                                ufd[1] = box_dds[1] * (j + 1) + LE[1]
                                 if antialias == 1:
+                                    lfd[1] = box_dds[1] * j + LE[1]
+                                    ufd[1] = box_dds[1] * (j + 1) + LE[1]
                                     overlap[1] = ((fmin(ufd[1], sp[1]+odsp[1])
                                                - fmax(lfd[1], (sp[1]-odsp[1])))*box_idds[1])
                                 if overlap[1] < 0.0: continue
-                            for k in range(ld[2], ud[2]):
-                                lfd[2] = box_dds[2] * k + LE[2]
-                                ufd[2] = box_dds[2] * (k + 1) + LE[2]
-                                if antialias == 1:
-                                    overlap[2] = ((fmin(ufd[2], sp[2]+odsp[2])
-                                               - fmax(lfd[2], (sp[2]-odsp[2])))*box_idds[2])
-                                    if overlap[2] < 0.0: continue
-                                    dest[i,j,k] += dsp * (overlap[0]*overlap[1]*overlap[2])
-                                else:
-                                    dest[i,j,k] = dsp
+                                for k in range(ld[2], ud[2]):
+                                    if antialias == 1:
+                                        lfd[2] = box_dds[2] * k + LE[2]
+                                        ufd[2] = box_dds[2] * (k + 1) + LE[2]
+                                        overlap[2] = ((fmin(ufd[2], sp[2]+odsp[2])
+                                                   - fmax(lfd[2], (sp[2]-odsp[2])))*box_idds[2])
+                                        if overlap[2] < 0.0: continue
+                                        dest[i,j,k] += dsp * (overlap[0]*overlap[1]*overlap[2])
+                                    else:
+                                        dest[i,j,k] = dsp


https://bitbucket.org/yt_analysis/yt/commits/e3c9aad9aa58/
Changeset:   e3c9aad9aa58
Branch:      yt
User:        MatthewTurk
Date:        2015-10-14 23:46:10+00:00
Summary:     Rename .d to .r and DatasetAccess to RegionExpression.
Affected #:  4 files

diff -r 19366e378965226b483861a61b512a7df663e73e -r e3c9aad9aa584653d4b0de3af22498d7fee85866 yt/data_objects/dataset_access.py
--- a/yt/data_objects/dataset_access.py
+++ /dev/null
@@ -1,109 +0,0 @@
-"""
-An object that can live on the dataset to facilitate data access.
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2015, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-import weakref
-import types
-
-import yt.units
-from yt.utilities.exceptions import YTDimensionalityError
-
-class DatasetAccess(object):
-    _all_data = None
-    def __init__(self, ds):
-        self.ds = weakref.proxy(ds)
-
-    @property
-    def all_data(self):
-        if self._all_data is None:
-            self._all_data = self.ds.all_data()
-        return self._all_data
-
-    def __getitem__(self, item):
-        # At first, we will only implement this as accepting a slice that is
-        # (optionally) unitful corresponding to a specific set of coordinates
-        # that result in a rectangular prism or a slice.
-        if isinstance(item, types.StringTypes):
-            # This is some field; we will instead pass this back to the
-            # all_data object.
-            return self.all_data[item]
-        if isinstance(item, tuple) and isinstance(item[1], types.StringTypes):
-            return self.all_data[item]
-        if len(item) != self.ds.dimensionality:
-            # Not the right specification, and we don't want to do anything
-            # implicitly.
-            raise YTDimensionalityError(len(item), self.ds.dimensionality)
-        if self.ds.dimensionality != 3:
-            # We'll pass on this for the time being.
-            raise RuntimeError
-
-        # OK, now we need to look at our slices.  How many are a specific
-        # coordinate?
-        
-        if not all(isinstance(v, slice) for v in item):
-            return self._create_slice(item)
-        else:
-            if all(s.start is s.stop is s.step is None for s in item):
-                return self.all_data
-            return self._create_region(item)
-            
-    def _spec_to_value(self, input_tuple):
-        if not isinstance(input_tuple, tuple):
-            # We now assume that it's in code_length
-            return self.ds.quan(input_tuple, 'code_length')
-        v, u = input_tuple
-        value = self.ds.quan(v, u)
-        return value
-
-    def _create_slice(self, slice_tuple):
-        axis = None
-        new_slice = []
-        for ax, v in enumerate(slice_tuple):
-            if not isinstance(v, slice):
-                if axis is not None: raise RuntimeError
-                axis = ax
-                coord = self._spec_to_value(v)
-                new_slice.append(slice(None, None, None))
-            else:
-                new_slice.append(v)
-        # This new slice doesn't need to be a tuple
-        source = self._create_region(new_slice)
-        sl = self.ds.slice(axis, coord, data_source = source)
-        return sl
-
-    def _slice_to_edges(self, ax, val):
-        if val.start is None:
-            l = self.ds.domain_left_edge[ax]
-        else:
-            l = self._spec_to_value(val.start)
-        if val.stop is None:
-            r = self.ds.domain_right_edge[ax]
-        else:
-            r = self._spec_to_value(val.stop)
-        if r < l:
-            raise RuntimeError
-        return l, r
-
-    def _create_region(self, bounds_tuple):
-        left_edge = []
-        right_edge = []
-        dims = []
-        for ax, b in enumerate(bounds_tuple):
-            l, r = self._slice_to_edges(ax, b)
-            left_edge.append(l)
-            right_edge.append(r)
-            dims.append(getattr(b.step, "imag", None))
-        center = [ (l + r)/2.0 for l, r in zip(left_edge, right_edge)]
-        if all(d is not None for d in dims):
-            return self.ds.arbitrary_grid(left_edge, right_edge, dims)
-        return self.ds.region(center, left_edge, right_edge)

diff -r 19366e378965226b483861a61b512a7df663e73e -r e3c9aad9aa584653d4b0de3af22498d7fee85866 yt/data_objects/region_expression.py
--- /dev/null
+++ b/yt/data_objects/region_expression.py
@@ -0,0 +1,109 @@
+"""
+An object that can live on the dataset to facilitate data access.
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+import weakref
+import types
+
+import yt.units
+from yt.utilities.exceptions import YTDimensionalityError
+
+class RegionExpression(object):
+    _all_data = None
+    def __init__(self, ds):
+        self.ds = weakref.proxy(ds)
+
+    @property
+    def all_data(self):
+        if self._all_data is None:
+            self._all_data = self.ds.all_data()
+        return self._all_data
+
+    def __getitem__(self, item):
+        # At first, we will only implement this as accepting a slice that is
+        # (optionally) unitful corresponding to a specific set of coordinates
+        # that result in a rectangular prism or a slice.
+        if isinstance(item, types.StringTypes):
+            # This is some field; we will instead pass this back to the
+            # all_data object.
+            return self.all_data[item]
+        if isinstance(item, tuple) and isinstance(item[1], types.StringTypes):
+            return self.all_data[item]
+        if len(item) != self.ds.dimensionality:
+            # Not the right specification, and we don't want to do anything
+            # implicitly.
+            raise YTDimensionalityError(len(item), self.ds.dimensionality)
+        if self.ds.dimensionality != 3:
+            # We'll pass on this for the time being.
+            raise RuntimeError
+
+        # OK, now we need to look at our slices.  How many are a specific
+        # coordinate?
+        
+        if not all(isinstance(v, slice) for v in item):
+            return self._create_slice(item)
+        else:
+            if all(s.start is s.stop is s.step is None for s in item):
+                return self.all_data
+            return self._create_region(item)
+            
+    def _spec_to_value(self, input_tuple):
+        if not isinstance(input_tuple, tuple):
+            # We now assume that it's in code_length
+            return self.ds.quan(input_tuple, 'code_length')
+        v, u = input_tuple
+        value = self.ds.quan(v, u)
+        return value
+
+    def _create_slice(self, slice_tuple):
+        axis = None
+        new_slice = []
+        for ax, v in enumerate(slice_tuple):
+            if not isinstance(v, slice):
+                if axis is not None: raise RuntimeError
+                axis = ax
+                coord = self._spec_to_value(v)
+                new_slice.append(slice(None, None, None))
+            else:
+                new_slice.append(v)
+        # This new slice doesn't need to be a tuple
+        source = self._create_region(new_slice)
+        sl = self.ds.slice(axis, coord, data_source = source)
+        return sl
+
+    def _slice_to_edges(self, ax, val):
+        if val.start is None:
+            l = self.ds.domain_left_edge[ax]
+        else:
+            l = self._spec_to_value(val.start)
+        if val.stop is None:
+            r = self.ds.domain_right_edge[ax]
+        else:
+            r = self._spec_to_value(val.stop)
+        if r < l:
+            raise RuntimeError
+        return l, r
+
+    def _create_region(self, bounds_tuple):
+        left_edge = []
+        right_edge = []
+        dims = []
+        for ax, b in enumerate(bounds_tuple):
+            l, r = self._slice_to_edges(ax, b)
+            left_edge.append(l)
+            right_edge.append(r)
+            dims.append(getattr(b.step, "imag", None))
+        center = [ (l + r)/2.0 for l, r in zip(left_edge, right_edge)]
+        if all(d is not None for d in dims):
+            return self.ds.arbitrary_grid(left_edge, right_edge, dims)
+        return self.ds.region(center, left_edge, right_edge)

diff -r 19366e378965226b483861a61b512a7df663e73e -r e3c9aad9aa584653d4b0de3af22498d7fee85866 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -48,8 +48,8 @@
 from yt.units.yt_array import \
     YTArray, \
     YTQuantity
-from yt.data_objects.dataset_access import \
-    DatasetAccess
+from yt.data_objects.region_expression import \
+    RegionExpression
 
 from yt.geometry.coordinates.api import \
     CoordinateHandler, \
@@ -163,7 +163,7 @@
         self.file_style = file_style
         self.conversion_factors = {}
         self.parameters = {}
-        self.d = DatasetAccess(self)
+        self.region_expression = self.r = RegionExpression(self)
         self.known_filters = self.known_filters or {}
         self.particle_unions = self.particle_unions or {}
         self.field_units = self.field_units or {}

diff -r 19366e378965226b483861a61b512a7df663e73e -r e3c9aad9aa584653d4b0de3af22498d7fee85866 yt/data_objects/tests/test_dataset_access.py
--- a/yt/data_objects/tests/test_dataset_access.py
+++ b/yt/data_objects/tests/test_dataset_access.py
@@ -7,22 +7,22 @@
     # We'll do a couple here
 
     # First, no string units
-    reg1 = ds.d[0.2:0.3,0.4:0.6,:]
+    reg1 = ds.r[0.2:0.3,0.4:0.6,:]
     reg2 = ds.region([0.25, 0.5, 0.5], [0.2, 0.4, 0.0], [0.3, 0.6, 1.0])
     yield assert_equal, reg1["density"], reg2["density"]
 
     # Now, string units in some -- 1.0 == cm
-    reg1 = ds.d[(0.1, 'cm'):(0.5, 'cm'), :, (0.25, 'cm'): (0.35, 'cm')]
+    reg1 = ds.r[(0.1, 'cm'):(0.5, 'cm'), :, (0.25, 'cm'): (0.35, 'cm')]
     reg2 = ds.region([0.3, 0.5, 0.3], [0.1, 0.0, 0.25], [0.5, 1.0, 0.35])
     yield assert_equal, reg1["density"], reg2["density"]
 
     # Now, string units in some -- 1.0 == cm
-    reg1 = ds.d[(0.1, 'cm'):(0.5, 'cm'), :, 0.25:0.35]
+    reg1 = ds.r[(0.1, 'cm'):(0.5, 'cm'), :, 0.25:0.35]
     reg2 = ds.region([0.3, 0.5, 0.3], [0.1, 0.0, 0.25], [0.5, 1.0, 0.35])
     yield assert_equal, reg1["density"], reg2["density"]
 
     # And, lots of : usage!
-    reg1 = ds.d[:, :, :]
+    reg1 = ds.r[:, :, :]
     reg2 = ds.all_data()
     yield assert_equal, reg1["density"], reg2["density"]
 
@@ -31,9 +31,9 @@
     # access it multiple times and get the *same object*.
     ds = fake_amr_ds(fields=["density"])
     dd = ds.all_data()
-    yield assert_equal, ds.d["density"], dd["density"]
+    yield assert_equal, ds.r["density"], dd["density"]
     # Now let's assert that it's the same object
-    rho = ds.d["density"]
+    rho = ds.r["density"]
     rho *= 2.0
-    yield assert_equal, dd["density"]*2.0, ds.d["density"]
-    yield assert_equal, dd["gas", "density"]*2.0, ds.d["gas", "density"]
+    yield assert_equal, dd["density"]*2.0, ds.r["density"]
+    yield assert_equal, dd["gas", "density"]*2.0, ds.r["gas", "density"]


https://bitbucket.org/yt_analysis/yt/commits/ef2d115e64bd/
Changeset:   ef2d115e64bd
Branch:      yt
User:        MatthewTurk
Date:        2015-10-22 21:55:45+00:00
Summary:     Merging from upstream
Affected #:  189 files

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -10,6 +10,7 @@
 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.c
 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/analysis_modules/ppv_cube/ppv_utils.c
+yt/analysis_modules/photon_simulator/utils.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/frontends/sph/smoothing_kernel.c
 yt/geometry/fake_octree.c

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,10 +1,11 @@
-include distribute_setup.py README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt
+include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt
 include yt/visualization/mapserver/html/map_index.html
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js
 include yt/visualization/mapserver/html/leaflet/images/*.png
+exclude scripts/pr_backport.py
 recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE* *.cu
-recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.inc *.html
+recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.html
 recursive-include doc *.h *.c *.sh *.svgz *.pdf *.svg *.pyx
 include doc/README doc/activate doc/activate.csh doc/cheatsheet.tex
 include doc/extensions/README doc/Makefile
@@ -12,5 +13,3 @@
 prune doc/build
 recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
 prune yt/frontends/_skeleton
-prune tests
-exclude clean.sh .hgchurn

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -132,7 +132,7 @@
 
 .. code:: python
 
-    apec_model = TableApecModel("atomdb_v2.0.2",
+    apec_model = TableApecModel("$SPECTRAL_DATA/spectral",
                                 0.01, 20.0, 20000,
                                 thermal_broad=False,
                                 apec_vers="2.0.2")

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e doc/source/analyzing/filtering.rst
--- a/doc/source/analyzing/filtering.rst
+++ b/doc/source/analyzing/filtering.rst
@@ -23,7 +23,7 @@
 ----------------------
 
 Mesh fields can be filtered by two methods: cut region objects 
-(:class:`~yt.data_objects.selection_data_containers.YTCutRegionBase`) 
+(:class:`~yt.data_objects.selection_data_containers.YTCutRegion`) 
 and NumPy boolean masks.  Boolean masks are simpler, but they only work
 for examining datasets, whereas cut regions objects create wholly new
 data objects suitable for full analysis (data examination, image generation, 

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -173,7 +173,7 @@
 ---------------------------------
 
 To calculate the values along a line connecting two points in a simulation, you
-can use the object :class:`~yt.data_objects.selection_data_containers.YTRayBase`,
+can use the object :class:`~yt.data_objects.selection_data_containers.YTRay`,
 accessible as the ``ray`` property on a index.  (See :ref:`data-objects`
 for more information on this.)  To do so, you can supply two points and access
 fields within the returned object.  For instance, this code will generate a ray

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -1,4 +1,4 @@
-.. _data-objects:
+.. _Data-objects:
 
 Data Objects
 ============
@@ -97,7 +97,7 @@
 """"""""""
 
 **Point** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTPointBase`    
+    | Class :class:`~yt.data_objects.selection_data_containers.YTPoint`    
     | Usage: ``point(coord, ds=None, field_parameters=None, data_source=None)``
     | A point defined by a single cell at specified coordinates.
 
@@ -105,7 +105,7 @@
 """"""""""
 
 **Ray (Axis-Aligned)** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTOrthoRayBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTOrthoRay`
     | Usage: ``ortho_ray(axis, coord, ds=None, field_parameters=None, data_source=None)``
     | A line (of data cells) stretching through the full domain 
       aligned with one of the x,y,z axes.  Defined by an axis and a point
@@ -113,7 +113,7 @@
       :ref:`note about ray data value ordering <ray-data-ordering>`.
 
 **Ray (Arbitrarily-Aligned)** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTRayBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTRay`
     | Usage: ``ray(start_coord, end_coord, ds=None, field_parameters=None, data_source=None)``
     | A line (of data cells) defined by arbitrary start and end coordinates. 
       Please see this 
@@ -123,13 +123,13 @@
 """"""""""
 
 **Slice (Axis-Aligned)** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTSliceBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTSlice`
     | Usage: ``slice(axis, coord, center=None, ds=None, field_parameters=None, data_source=None)``
     | A plane normal to one of the axes and intersecting a particular 
       coordinate.
 
 **Slice (Arbitrarily-Aligned)** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTCuttingPlaneBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTCuttingPlane`
     | Usage: ``cutting(normal, coord, north_vector=None, ds=None, field_parameters=None, data_source=None)``
     | A plane normal to a specified vector and intersecting a particular 
       coordinate.
@@ -145,7 +145,7 @@
       ``ds.region(ds.domain_center, ds.domain_left_edge, ds.domain_right_edge)``.
 
 **Box Region** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTRegionBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTRegion`
     | Usage: ``region(center, left_edge, right_edge, fields=None, ds=None, field_parameters=None, data_source=None)``
     | Alternatively: ``box(left_edge, right_edge, fields=None, ds=None, field_parameters=None, data_source=None)``
     | A box-like region aligned with the grid axis orientation.  It is 
@@ -156,14 +156,14 @@
       is assumed to be the midpoint between the left and right edges.
 
 **Disk/Cylinder** 
-    | Class: :class:`~yt.data_objects.selection_data_containers.YTDiskBase`
+    | Class: :class:`~yt.data_objects.selection_data_containers.YTDisk`
     | Usage: ``disk(center, normal, radius, height, fields=None, ds=None, field_parameters=None, data_source=None)``
     | A cylinder defined by a point at the center of one of the circular bases,
       a normal vector to it defining the orientation of the length of the
       cylinder, and radius and height values for the cylinder's dimensions.
 
 **Ellipsoid** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTEllipsoidBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTEllipsoid`
     | Usage: ``ellipsoid(center, semi_major_axis_length, semi_medium_axis_length, semi_minor_axis_length, semi_major_vector, tilt, fields=None, ds=None, field_parameters=None, data_source=None)``
     | An ellipsoid with axis magnitudes set by semi_major_axis_length, 
      semi_medium_axis_length, and semi_minor_axis_length.  semi_major_vector 
@@ -171,7 +171,7 @@
      of the semi-medium and semi_minor axes.
 
 **Sphere** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTSphereBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTSphere`
     | Usage: ``sphere(center, radius, ds=None, field_parameters=None, data_source=None)``
     | A sphere defined by a central coordinate and a radius.
 
@@ -194,7 +194,7 @@
     | See :ref:`boolean_data_objects`.
 
 **Filter** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTCutRegionBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTCutRegion`
     | Usage: ``cut_region(base_object, conditionals, ds=None, field_parameters=None)``
     | A ``cut_region`` is a filter which can be applied to any other data 
       object.  The filter is defined by the conditionals present, which 
@@ -203,7 +203,7 @@
       For more detailed information and examples, see :ref:`cut-regions`.
 
 **Collection of Data Objects** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTDataCollectionBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTDataCollection`
     | Usage: ``data_collection(center, obj_list, ds=None, field_parameters=None)``
     | A ``data_collection`` is a list of data objects that can be 
       sampled and processed as a whole in a single data object.
@@ -214,13 +214,13 @@
 ^^^^^^^^^^^^^^^^^^^^
 
 **Fixed-Resolution Region** 
-    | Class :class:`~yt.data_objects.construction_data_containers.YTCoveringGridBase`
+    | Class :class:`~yt.data_objects.construction_data_containers.YTCoveringGrid`
     | Usage: ``covering_grid(level, left_edge, dimensions, fields=None, ds=None, num_ghost_zones=0, use_pbar=True, field_parameters=None)``
     | A 3D region with all data extracted to a single, specified resolution.
       See :ref:`examining-grid-data-in-a-fixed-resolution-array`.
 
 **Fixed-Resolution Region with Smoothing** 
-    | Class :class:`~yt.data_objects.construction_data_containers.YTSmoothedCoveringGridBase`
+    | Class :class:`~yt.data_objects.construction_data_containers.YTSmoothedCoveringGrid`
     | Usage: ``smoothed_covering_grid(level, left_edge, dimensions, fields=None, ds=None, num_ghost_zones=0, use_pbar=True, field_parameters=None)``
     | A 3D region with all data extracted and interpolated to a single, 
       specified resolution.  Identical to covering_grid, except that it 
@@ -228,7 +228,7 @@
       :ref:`examining-grid-data-in-a-fixed-resolution-array`.
 
 **Fixed-Resolution Region for Particle Deposition** 
-    | Class :class:`~yt.data_objects.construction_data_containers.YTArbitraryGridBase`
+    | Class :class:`~yt.data_objects.construction_data_containers.YTArbitraryGrid`
     | Usage: ``arbitrary_grid(left_edge, right_edge, dimensions, ds=None, field_parameters=None)``
     | When particles are deposited on to mesh fields, they use the existing
       mesh structure, but this may have too much or too little resolution
@@ -238,7 +238,7 @@
       information.
 
 **Projection** 
-    | Class :class:`~yt.data_objects.construction_data_containers.YTQuadTreeProjBase`
+    | Class :class:`~yt.data_objects.construction_data_containers.YTQuadTreeProj`
     | Usage: ``proj(field, axis, weight_field=None, center=None, ds=None, data_source=None, method="integrate", field_parameters=None)``
     | A 2D projection of a 3D volume along one of the axis directions.  
       By default, this is a line integral through the entire simulation volume 
@@ -248,14 +248,14 @@
       of the projection outcome.  See :ref:`projection-types` for more information.
 
 **Streamline** 
-    | Class :class:`~yt.data_objects.construction_data_containers.YTStreamlineBase`
+    | Class :class:`~yt.data_objects.construction_data_containers.YTStreamline`
     | Usage: ``streamline(coord_list, length, fields=None, ds=None, field_parameters=None)``
     | A ``streamline`` can be traced out by identifying a starting coordinate (or 
       list of coordinates) and allowing it to trace a vector field, like gas
       velocity.  See :ref:`streamlines` for more information.
 
 **Surface** 
-    | Class :class:`~yt.data_objects.construction_data_containers.YTSurfaceBase`
+    | Class :class:`~yt.data_objects.construction_data_containers.YTSurface`
     | Usage: ``surface(data_source, field, field_value)``
     | The surface defined by all an isocontour in any mesh field.  An existing 
       data object must be provided as the source, as well as a mesh field
@@ -358,7 +358,7 @@
 holdover from the time when yt was used exclusively for data that came in
 regularly structured grid patches, and does not necessarily work as well for
 data that is composed of discrete objects like particles.  To augment this, the
-:class:`~yt.data_objects.construction_data_containers.YTArbitraryGridBase` object 
+:class:`~yt.data_objects.construction_data_containers.YTArbitraryGrid` object 
 was created, which enables construction of meshes (onto which particles can be
 deposited or smoothed) in arbitrary regions.  This eliminates any assumptions
 on yt's part about how the data is organized, and will allow for more
@@ -444,7 +444,7 @@
 set of level sets.  The second (``connected_sets``) will be a dict of dicts.
 The key for the first (outer) dict is the level of the contour, corresponding
 to ``contour_values``.  The inner dict returned is keyed by the contour ID.  It
-contains :class:`~yt.data_objects.selection_data_containers.YTCutRegionBase`
+contains :class:`~yt.data_objects.selection_data_containers.YTCutRegion`
 objects.  These can be queried just as any other data object.  The clump finder 
 (:ref:`clump_finding`) differs from the above method in that the contour 
 identification is performed recursively within each individual structure, and 

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e doc/source/developing/index.rst
--- a/doc/source/developing/index.rst
+++ b/doc/source/developing/index.rst
@@ -21,6 +21,7 @@
    building_the_docs
    testing
    debugdrive
+   releasing
    creating_datatypes
    creating_derived_fields
    creating_derived_quantities

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e doc/source/developing/releasing.rst
--- /dev/null
+++ b/doc/source/developing/releasing.rst
@@ -0,0 +1,208 @@
+How to Do a Release
+-------------------
+
+Periodically, the yt development community issues new releases. Since yt follows
+`semantic versioning <http://semver.org/>`_, the type of release can be read off
+from the version number used. Version numbers should follow the scheme
+``MAJOR.MINOR.PATCH``. There are three kinds of possible releases:
+
+* Bugfix releases
+
+  These releases are regularly scheduled and will optimally happen approximately
+  once a month. These releases should contain only fixes for bugs discovered in
+  earlier releases and should not contain new features or API changes. Bugfix
+  releases should increment the ``PATCH`` version number. Bugfix releases should
+  *not* be generated by merging from the ``yt`` branch, instead bugfix pull
+  requests should be manually backported using the PR backport script, described
+  below. Version ``3.2.2`` is a bugfix release.
+
+* Minor releases
+
+  These releases happen when new features are deemed ready to be merged into the
+  ``stable`` branch and should not happen on a regular schedule. Minor releases
+  can also include fixes for bugs if the fix is determined to be too invasive
+  for a bugfix release. Minor releases should *not* inlucde
+  backwards-incompatible changes and should not change APIs.  If an API change
+  is deemed to be necessary, the old API should continue to function but might
+  trigger deprecation warnings. Minor releases should happen by merging the
+  ``yt`` branch into the ``stable`` branch. Minor releases should increment the
+  ``MINOR`` version number and reset the ``PATCH`` version number to zero.
+  Version ``3.3.0`` is a minor release.
+
+* Major releases
+
+  These releases happen when the development community decides to make major
+  backwards-incompatible changes. In principle a major version release could
+  include arbitrary changes to the library. Major version releases should only
+  happen after extensive discussion and vetting among the developer and user
+  community. Like minor releases, a major release should happen by merging the
+  ``yt`` branch into the ``stable`` branch. Major releases should increment the
+  ``MAJOR`` version number and reset the ``MINOR`` and ``PATCH`` version numbers
+  to zero. If it ever happens, version ``4.0.0`` will be a major release.
+
+The job of doing a release differs depending on the kind of release. Below, we
+describe the necessary steps for each kind of release in detail.
+
+Doing a Bugfix Release
+~~~~~~~~~~~~~~~~~~~~~~
+
+As described above, bugfix releases are regularly scheduled updates for minor
+releases to ensure fixes for bugs make their way out to users in a timely
+manner. Since bugfix releases should not include new features, we do not issue
+bugfix releases by simply merging from the development ``yt`` branch into the
+``stable`` branch.  Instead, we make use of the ``pr_backport.py`` script to
+manually cherry-pick bugfixes from the from ``yt`` branch onto the ``stable``
+branch.
+
+The backport script issues interactive prompts to backport individual pull
+requests to the ``stable`` branch in a temporary clone of the main yt mercurial
+repository on bitbucket. The script is written this way to to avoid editing
+history in a clone of the repository that a developer uses for day-to-day work
+and to avoid mixing work-in-progress changes with changes that have made their
+way to the "canonical" yt repository on bitbucket.
+
+Rather than automatically manipulating the temporary repository by scripting
+mercurial commands using ``python-hglib``, the script must be "operated" by a
+human who is ready to think carefully about what the script is telling them
+to do. Most operations will merely require copy/pasting a suggested mercurial
+command. However, some changes will require manual backporting.
+
+To run the backport script, first open two terminal windows. The first window
+will be used to run the backport script. The second terminal will be used to
+manipulate a temporary clone of the yt mercurial repository. In the first
+window, navigate to the ``scripts`` directory at the root of the yt repository
+and run the backport script,
+
+.. code-block:: bash
+
+   $ cd $YT_HG/scripts
+   $ python pr_backport.py
+
+You will then need to wait for about a minute (depending on the speed of your
+internet connection and bitbucket's servers) while the script makes a clone of
+the main yt repository and then gathers information about pull requests that
+have been merged since the last tagged release. Once this step finishes, you
+will be prompted to navigate to the temporary folder in a new separate terminal
+session. Do so, and then hit the enter key in the original terminal session.
+
+For each pull request in the set of pull requests that were merged since the
+last tagged release that were pointed at the "main" line of development
+(e.g. not the ``experimental`` bookmark), you will be prompted by the script
+with the PR number, title, description, and a suggested mercurial
+command to use to backport the pull request. If the pull request consists of a
+single changeset, you will be prompted to use ``hg graft``. If it contains more
+than one changeset, you will be prompted to use ``hg rebase``. Note that
+``rebase`` is an optional extension for mercurial that is not turned on by
+default. To enable it, add a section like the following in your ``.hgrc`` file:
+
+.. code-block:: none
+
+   [extensions]
+   rebase=
+
+Since ``rebase`` is bundled with core mercurial, you do not need to specify a
+path to the rebase extension, just say ``rebase=`` and mercurial will find the
+version of ``rebase`` bundled with mercurial. Note also that mercurial does not
+automatically update to the tip of the rebased head after executing ``hg
+rebase`` so you will need to manually issue ``hg update stable`` to move your
+working directory to the new head of the stable branch. The backport script
+should prompt you with a suggestion to update as well.
+
+If the pull request contains merge commits, you must take care to *not* backport
+commits that merge with the main line of development on the ``yt`` branch. Doing
+so may bring unrelated changes, including new features, into a bugfix
+release. If the pull request you'd like to backport contains merge commits, the
+backport script should warn you to be extra careful.
+
+Once you've finished backporting, the script will let you know that you are done
+and warn you to push your work. The temporary repository you have been working
+with will be deleted as soon as the script exits, so take care to push your work
+on the ``stable`` branch to your fork on bitbucket. Once you've pushed to your
+fork, you will be able to issue a pull request containing the backported fixes
+just like any other yt pull request.
+
+Doing a Minor or Major Release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This is much simpler than a bugfix release.  All that needs to happen is the
+``yt`` branch must get merged into the ``stable`` branch, and any conflicts that
+happen must be resolved, almost certainly in favor of the yt branch. This can
+happen either using a merge tool like ``vimdiff`` and ``kdiff3`` or by telling
+mercurial to write merge markers. If you prefer merge markers, the following
+configuration options should be turned on in your ``hgrc`` to get more detail
+during the merge:
+
+.. code-block:: none
+
+   [ui]
+   merge = internal:merge3
+   mergemarkers = detailed
+
+The first option tells mercurial to write merge markers that show the state of
+the conflicted region of the code on both sides of the merge as well as the
+"base" most recent common ancestor changeset. The second option tells mercurial
+to add extra information about the code near the merge markers.
+
+
+Incrementing Version Numbers and Tagging a Release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Before creating the tag for the release, you must increment the version numbers
+that are hard-coded in a few files in the yt source so that version metadata
+for the code is generated correctly. This includes things like ``yt.__version__``
+and the version that gets read by the Python Package Index (PyPI) infrastructure.
+
+The paths relative to the root of the repository for the three files that need
+to be edited are:
+
+* ``doc/source/conf.py``
+
+  The ``version`` and ``release`` variables need to be updated.
+
+* ``setup.py``
+
+  The ``VERSION`` variable needs to be updated
+
+* ``yt/__init__.py``
+
+  The ``__version__`` variable must be updated.
+
+Once these files have been updated, commit these updates. This is the commit we
+will tag for the release.
+
+To actually create the tag, issue the following command:
+
+.. code-block:: bash
+
+   hg tag <tag-name>
+
+Where ``<tag-name>`` follows the project's naming scheme for tags
+(e.g. ``yt-3.2.1``). Commit the tag, and you should be ready to upload the
+release to pypi.
+
+If you are doing a minor or major version number release, you will also need to
+update back to the development branch and update the development version numbers
+in the same files.
+
+
+Uploading to PyPI
+~~~~~~~~~~~~~~~~~
+
+To actually upload the release to the Python Package Index, you just need to
+issue the following command:
+
+.. code-block:: bash
+
+   python setup.py sdist upload -r https://pypi.python.org/pypi
+
+You will be prompted for your PyPI credentials and then the package should
+upload. Note that for this to complete successfully, you will need an account on
+PyPI and that account will need to be registered as an "owner" of the yt
+package. Right now there are three owners: Matt Turk, Britton Smith, and Nathan
+Goldbaum.
+
+After the release is uploaded to PyPI, you should send out an announcement
+e-mail to the yt mailing lists as well as other possibly interested mailing
+lists for all but bugfix releases. In addition, you should contact John ZuHone
+about uploading binary wheels to PyPI for Windows and OS X users and contact
+Nathan Goldbaum about getting the Anaconda packages updated.

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -302,18 +302,19 @@
 .. code-block:: bash
 
    $ cd $YT_HG
-   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store frontends.tipsy
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store --answer-name=local-tipsy frontends.tipsy
 
 This command will create a set of local answers from the tipsy frontend tests
 and store them in ``$HOME/Documents/test`` (this can but does not have to be the
 same directory as the ``test_data_dir`` configuration variable defined in your
-``.yt/config`` file). To run the tipsy frontend's answer tests using a different
-yt changeset, update to that changeset, recompile if necessary, and run the
-tests using the following command:
+``.yt/config`` file) in a file named ``local-tipsy``. To run the tipsy
+frontend's answer tests using a different yt changeset, update to that
+changeset, recompile if necessary, and run the tests using the following
+command:
 
 .. code-block:: bash
 
-   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test frontends.tipsy
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-name=local-tipsy frontends.tipsy
 
 The results from a nose testing session are pretty straightforward to
 understand, the results for each test are printed directly to STDOUT.  If a test

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e doc/source/faq/index.rst
--- a/doc/source/faq/index.rst
+++ b/doc/source/faq/index.rst
@@ -329,8 +329,8 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Using the Ray objects 
-(:class:`~yt.data_objects.selection_data_containers.YTOrthoRayBase` and 
-:class:`~yt.data_objects.selection_data_containers.YTRayBase`) with AMR data 
+(:class:`~yt.data_objects.selection_data_containers.YTOrthoRay` and 
+:class:`~yt.data_objects.selection_data_containers.YTRay`) with AMR data 
 gives non-contiguous cell information in the Ray's data array. The 
 higher-resolution cells are appended to the end of the array.  Unfortunately, 
 due to how data is loaded by chunks for data containers, there is really no 

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -87,17 +87,17 @@
 .. autosummary::
    :toctree: generated/
 
-   ~yt.data_objects.selection_data_containers.YTPointBase
-   ~yt.data_objects.selection_data_containers.YTOrthoRayBase
-   ~yt.data_objects.selection_data_containers.YTRayBase
-   ~yt.data_objects.selection_data_containers.YTSliceBase
-   ~yt.data_objects.selection_data_containers.YTCuttingPlaneBase
-   ~yt.data_objects.selection_data_containers.YTDiskBase
-   ~yt.data_objects.selection_data_containers.YTRegionBase
-   ~yt.data_objects.selection_data_containers.YTDataCollectionBase
-   ~yt.data_objects.selection_data_containers.YTSphereBase
-   ~yt.data_objects.selection_data_containers.YTEllipsoidBase
-   ~yt.data_objects.selection_data_containers.YTCutRegionBase
+   ~yt.data_objects.selection_data_containers.YTPoint
+   ~yt.data_objects.selection_data_containers.YTOrthoRay
+   ~yt.data_objects.selection_data_containers.YTRay
+   ~yt.data_objects.selection_data_containers.YTSlice
+   ~yt.data_objects.selection_data_containers.YTCuttingPlane
+   ~yt.data_objects.selection_data_containers.YTDisk
+   ~yt.data_objects.selection_data_containers.YTRegion
+   ~yt.data_objects.selection_data_containers.YTDataCollection
+   ~yt.data_objects.selection_data_containers.YTSphere
+   ~yt.data_objects.selection_data_containers.YTEllipsoid
+   ~yt.data_objects.selection_data_containers.YTCutRegion
    ~yt.data_objects.grid_patch.AMRGridPatch
 
 Construction Objects
@@ -110,12 +110,12 @@
 .. autosummary::
    :toctree: generated/
 
-   ~yt.data_objects.construction_data_containers.YTStreamlineBase
-   ~yt.data_objects.construction_data_containers.YTQuadTreeProjBase
-   ~yt.data_objects.construction_data_containers.YTCoveringGridBase
-   ~yt.data_objects.construction_data_containers.YTArbitraryGridBase
-   ~yt.data_objects.construction_data_containers.YTSmoothedCoveringGridBase
-   ~yt.data_objects.construction_data_containers.YTSurfaceBase
+   ~yt.data_objects.construction_data_containers.YTStreamline
+   ~yt.data_objects.construction_data_containers.YTQuadTreeProj
+   ~yt.data_objects.construction_data_containers.YTCoveringGrid
+   ~yt.data_objects.construction_data_containers.YTArbitraryGrid
+   ~yt.data_objects.construction_data_containers.YTSmoothedCoveringGrid
+   ~yt.data_objects.construction_data_containers.YTSurface
 
 Time Series Objects
 ^^^^^^^^^^^^^^^^^^^
@@ -211,8 +211,6 @@
    ~yt.frontends.boxlib.data_structures.OrionDataset
    ~yt.frontends.boxlib.fields.BoxlibFieldInfo
    ~yt.frontends.boxlib.io.IOHandlerBoxlib
-   ~yt.frontends.boxlib.io.IOHandlerCastro
-   ~yt.frontends.boxlib.io.IOHandlerNyx
    ~yt.frontends.boxlib.io.IOHandlerOrion
 
 Chombo

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -22,7 +22,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Athena                |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
-| Castro                |     Y      |     Y     |   Partial  |   Y   |    Y     |    Y     |     N      |   Full   |
+| Castro                |     Y      |     N     |   Partial  |   Y   |    Y     |    Y     |     N      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Chombo                |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
@@ -42,7 +42,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | MOAB                  |     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
-| Nyx                   |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
+| Nyx                   |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Orion                 |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e doc/source/visualizing/callbacks.rst
--- a/doc/source/visualizing/callbacks.rst
+++ b/doc/source/visualizing/callbacks.rst
@@ -677,9 +677,13 @@
    (This is a proxy for
    :class:`~yt.visualization.plot_modifications.RayCallback`.)
 
-    Adds a line representing the projected path of a ray across the plot.
-    The ray can be either a YTOrthoRayBase, YTRayBase, or a LightRay object.
-    annotate_ray() will properly account for periodic rays across the volume.
+    Adds a line representing the projected path of a ray across the plot.  The
+    ray can be either a
+    :class:`~yt.data_objects.selection_data_containers.YTOrthoRay`,
+    :class:`~yt.data_objects.selection_data_contaners.YTRay`, or a
+    :class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`
+    object.  annotate_ray() will properly account for periodic rays across the
+    volume.
 
 .. python-script::
 

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e doc/source/visualizing/manual_plotting.rst
--- a/doc/source/visualizing/manual_plotting.rst
+++ b/doc/source/visualizing/manual_plotting.rst
@@ -125,7 +125,7 @@
 This is perhaps the simplest thing to do. yt provides a number of one
 dimensional objects, and these return a 1-D numpy array of their contents with
 direct dictionary access. As a simple example, take a
-:class:`~yt.data_objects.selection_data_containers.YTOrthoRayBase` object, which can be
+:class:`~yt.data_objects.selection_data_containers.YTOrthoRay` object, which can be
 created from a index by calling ``pf.ortho_ray(axis, center)``.
 
 .. python-script::

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -192,7 +192,7 @@
 
 Off axis slice plots can be generated in much the same way as
 grid-aligned slices.  Off axis slices use
-:class:`~yt.data_objects.selection_data_containers.YTCuttingPlaneBase` to slice
+:class:`~yt.data_objects.selection_data_containers.YTCuttingPlane` to slice
 through simulation domains at an arbitrary oblique angle.  A
 :class:`~yt.visualization.plot_window.OffAxisSlicePlot` can be
 instantiated by specifying a dataset, the normal to the cutting
@@ -670,7 +670,7 @@
    plot = yt.ProfilePlot(my_galaxy, "density", ["temperature"])
    plot.save()
 
-This will create a :class:`~yt.data_objects.selection_data_containers.YTDiskBase`
+This will create a :class:`~yt.data_objects.selection_data_containers.YTDisk`
 centered at [0.5, 0.5, 0.5], with a normal vector of [0.0, 0.0, 1.0], radius of
 10 kiloparsecs and height of 3 kiloparsecs and will then make a plot of the
 mass-weighted average temperature as a function of density for all of the gas

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e doc/source/visualizing/sketchfab.rst
--- a/doc/source/visualizing/sketchfab.rst
+++ b/doc/source/visualizing/sketchfab.rst
@@ -47,7 +47,7 @@
 both of these operations will run in parallel.  For more information on enabling
 parallelism in yt, see :ref:`parallel-computation`.
 
-Alternatively, you can make an object called ``YTSurfaceBase`` that makes
+Alternatively, you can make an object called ``YTSurface`` that makes
 this process much easier.  You can create one of these objects by specifying a
 source data object and a field over which to identify a surface at a given
 value.  For example:
@@ -101,7 +101,7 @@
 discuss morphological properties of a dataset with collaborators.  It's also
 just plain cool.
 
-The ``YTSurfaceBase`` object includes a method to upload directly to Sketchfab,
+The ``YTSurface`` object includes a method to upload directly to Sketchfab,
 but it requires that you get an API key first.  You can get this API key by
 creating an account and then going to your "dashboard," where it will be listed
 on the right hand side.  Once you've obtained it, put it into your

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e doc/source/visualizing/streamlines.rst
--- a/doc/source/visualizing/streamlines.rst
+++ b/doc/source/visualizing/streamlines.rst
@@ -19,7 +19,7 @@
 returned a set of 3D positions that can, in turn, be used to visualize
 the 3D path of the streamlines.  Additionally, individual streamlines
 can be converted into
-:class:`~yt.data_objects.construction_data_containers.YTStreamlineBase` objects,
+:class:`~yt.data_objects.construction_data_containers.YTStreamline` objects,
 and queried for all the available fields along the streamline.
 
 The implementation of streamlining  in yt is described below.
@@ -100,7 +100,7 @@
     let us know on the yt-dev mailing list.
 
 Once the streamlines are found, a
-:class:`~yt.data_objects.construction_data_containers.YTStreamlineBase` object can
+:class:`~yt.data_objects.construction_data_containers.YTStreamline` object can
 be created using the
 :meth:`~yt.visualization.streamlines.Streamlines.path` function, which
 takes as input the index of the streamline requested. This conversion

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e doc/source/yt3differences.rst
--- a/doc/source/yt3differences.rst
+++ b/doc/source/yt3differences.rst
@@ -295,7 +295,7 @@
 Previously, projections were inconsistent with the other data objects.
 (The API for Plot Windows is the same.)  The argument order is now ``field``
 then ``axis`` as seen here: 
-:class:`~yt.data_objects.construction_data_containers.YTQuadTreeProjBase`.
+:class:`~yt.data_objects.construction_data_containers.YTQuadTreeProj`.
 
 Field Parameters
 ^^^^^^^^^^^^^^^^

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e scripts/pr_backport.py
--- /dev/null
+++ b/scripts/pr_backport.py
@@ -0,0 +1,311 @@
+import hglib
+import requests
+import shutil
+import tempfile
+
+from datetime import datetime
+from distutils.version import LooseVersion
+from time import strptime, mktime
+
+MERGED_PR_ENDPOINT = ("http://bitbucket.org/api/2.0/repositories/yt_analysis/"
+                      "yt/pullrequests/?state=MERGED")
+
+YT_REPO = "https://bitbucket.org/yt_analysis/yt"
+
+
+def clone_new_repo(source=None):
+    """Clones a new copy of yt_analysis/yt and returns a path to it"""
+    path = tempfile.mkdtemp()
+    dest_repo_path = path+'/yt-backport'
+    if source is None:
+        source = YT_REPO
+    hglib.clone(source=source, dest=dest_repo_path)
+    with hglib.open(dest_repo_path) as client:
+        # Changesets that are on the yt branch but aren't topological ancestors
+        # of whichever changeset the experimental bookmark is pointing at
+        client.update('heads(branch(yt) - ::bookmark(experimental))')
+    return dest_repo_path
+
+
+def get_first_commit_after_last_major_release(repo_path):
+    """Returns the SHA1 hash of the first commit to the yt branch that wasn't
+    included in the last tagged release.
+    """
+    with hglib.open(repo_path) as client:
+        tags = client.log("reverse(tag())")
+        tags = sorted([LooseVersion(t[2]) for t in tags])
+        for t in tags[::-1]:
+            if t.version[0:2] != ['yt', '-']:
+                continue
+            if len(t.version) == 4 or t.version[4] == 0:
+                last_major_tag = t
+                break
+        last_before_release = client.log(
+            "last(ancestors(%s) and branch(yt))" % str(last_major_tag))
+        first_after_release = client.log(
+            "first(descendants(%s) and branch(yt) and not %s)"
+            % (last_before_release[0][1], last_before_release[0][1]))
+    return str(first_after_release[0][1][:12])
+
+
+def get_branch_tip(repo_path, branch, exclude=None):
+    """Returns the SHA1 hash of the most recent commit on the given branch"""
+    revset = "head() and branch(%s)" % branch
+    if exclude is not None:
+        revset += "and not %s" % exclude
+    with hglib.open(repo_path) as client:
+        change = client.log(revset)[0][1][:12]
+    return change
+
+
+def get_lineage_between_release_and_tip(repo_path, first, last):
+    """Returns the lineage of changesets that were at one point the public tip"""
+    with hglib.open(repo_path) as client:
+        lineage = client.log("'%s'::'%s' and p1('%s'::'%s') + '%s'"
+                             % (first, last, first, last, last))
+        return lineage
+
+
+def get_pull_requests_since_last_release(repo_path):
+    """Returns a list of pull requests made since the last tagged release"""
+    r = requests.get(MERGED_PR_ENDPOINT)
+    done = False
+    merged_prs = []
+    with hglib.open(repo_path) as client:
+        last_tag = client.log("reverse(tag())")[0]
+    while not done:
+        if r.status_code != 200:
+            raise RuntimeError
+        data = r.json()
+        prs = data['values']
+        for pr in prs:
+            activity = requests.get(pr['links']['activity']['href']).json()
+            merge_date = None
+            for action in activity['values']:
+                if 'update' in action and action['update']['state'] == 'MERGED':
+                    merge_date = action['update']['date']
+                    merge_date = merge_date.split('.')[0]
+                    timestamp = mktime(strptime(merge_date, "%Y-%m-%dT%H:%M:%S"))
+                    merge_date = datetime.fromtimestamp(timestamp)
+                    break
+            if merge_date is None:
+                break
+            if merge_date < last_tag[6]:
+                done = True
+                break
+            merged_prs.append(pr)
+        r = requests.get(data['next'])
+    return merged_prs
+
+
+def cache_commit_data(prs):
+    """Avoid repeated calls to bitbucket API to get the list of commits per PR"""
+    commit_data = {}
+    for pr in prs:
+        data = requests.get(pr['links']['commits']['href']).json()
+        if data.keys() == [u'error']:
+            # this happens when commits have been stripped, e.g.
+            # https://bitbucket.org/yt_analysis/yt/pull-requests/1641
+            continue
+        done = False
+        commits = []
+        while not done:
+            commits.extend(data['values'])
+            if 'next' not in data:
+                done = True
+            else:
+                data = requests.get(data['next']).json()
+        commit_data[pr['id']] = commits
+    return commit_data
+
+
+def find_commit_in_prs(needle, commit_data, prs):
+    """Finds the commit `needle` PR in the commit_data dictionary
+
+    If found, returns the pr the needle commit is in. If the commit was not
+    part of the PRs in the dictionary, returns None.
+    """
+    for pr_id in commit_data:
+        commits = commit_data[pr_id]
+        for commit in commits:
+            if commit['hash'] == needle[1]:
+                pr = [pr for pr in prs if pr['id'] == pr_id][0]
+                return pr
+    return None
+
+
+def find_merge_commit_in_prs(needle, prs):
+    """Find the merge commit `needle` in the list of `prs`
+
+    If found, returns the pr the merge commit comes from. If not found, return
+    None
+    """
+    for pr in prs[::-1]:
+        if pr['merge_commit'] is not None:
+            if pr['merge_commit']['hash'] == needle[1][:12]:
+                return pr
+    return None
+
+
+def create_commits_to_prs_mapping(linege, prs):
+    """create a mapping from commits to the pull requests that the commit is
+    part of
+    """
+    commits_to_prs = {}
+    # make a copy of this list to avoid side effects from calling this function
+    my_prs = list(prs)
+    commit_data = cache_commit_data(my_prs)
+    for commit in lineage:
+        cset_hash = commit[1]
+        message = commit[5]
+        if message.startswith('Merged in') and '(pull request #' in message:
+            pr = find_merge_commit_in_prs(commit, my_prs)
+            if pr is None:
+                continue
+            commits_to_prs[cset_hash] = pr
+            # Since we know this PR won't have another commit associated with it,
+            # remove from global list to reduce number of network accesses
+            my_prs.remove(commits_to_prs[cset_hash])
+        else:
+            pr = find_commit_in_prs(commit, commit_data, my_prs)
+            commits_to_prs[cset_hash] = pr
+    return commits_to_prs
+
+
+def invert_commits_to_prs_mapping(commits_to_prs):
+    """invert the mapping from individual commits to pull requests"""
+    inv_map = {}
+    for k, v in commits_to_prs.iteritems():
+        # can't save v itself in inv_map since it's an unhashable dictionary
+        if v is not None:
+            created_date = v['created_on'].split('.')[0]
+            timestamp = mktime(strptime(created_date, "%Y-%m-%dT%H:%M:%S"))
+            created_date = datetime.fromtimestamp(timestamp)
+            pr_desc = (v['id'], v['title'], created_date,
+                       v['links']['html']['href'], v['description'])
+        else:
+            pr_desc = None
+        inv_map[pr_desc] = inv_map.get(pr_desc, [])
+        inv_map[pr_desc].append(k)
+    return inv_map
+
+
+def get_last_descendant(repo_path, commit):
+    """get the most recent descendant of a commit"""
+    with hglib.open(repo_path) as client:
+        com = client.log('last(%s::)' % commit)
+    return com[0][1][:12]
+
+def screen_already_backported(repo_path, inv_map):
+    with hglib.open(repo_path) as client:
+        tags = client.log("reverse(tag())")
+        major_tags = [t for t in tags if t[2].endswith('.0')]
+        most_recent_major_tag_name = major_tags[0][2]
+        lineage = client.log(
+            "descendants(%s) and branch(stable)" % most_recent_major_tag_name)
+        prs_to_screen = []
+        for pr in inv_map:
+            for commit in lineage:
+                if commit[5].startswith('Backporting PR #%s' % pr[0]):
+                    prs_to_screen.append(pr)
+        for pr in prs_to_screen:
+            del inv_map[pr]
+        return inv_map
+
+def commit_already_on_stable(repo_path, commit):
+    with hglib.open(repo_path) as client:
+        commit_info = client.log(commit)[0]
+        most_recent_tag_name = client.log("reverse(tag())")[0][2]
+        lineage = client.log(
+            "descendants(%s) and branch(stable)" % most_recent_tag_name)
+        # if there is a stable commit with the same commit message,
+        # it's been grafted
+        if any([commit_info[5] == c[5] for c in lineage]):
+            return True
+        return False
+
+def backport_pr_commits(repo_path, inv_map, last_stable, prs):
+    """backports pull requests to the stable branch.
+
+    Accepts a dictionary mapping pull requests to a list of commits that
+    are in the pull request.
+    """
+    pr_list = inv_map.keys()
+    pr_list = sorted(pr_list, key=lambda x: x[2])
+    for pr_desc in pr_list:
+        merge_warn = False
+        merge_commits = []
+        pr = [pr for pr in prs if pr['id'] == pr_desc[0]][0]
+        data = requests.get(pr['links']['commits']['href']).json()
+        commits = data['values']
+        while 'next' in data:
+            data = requests.get(data['next']).json()
+            commits.extend(data['values'])
+        commits = [com['hash'][:12] for com in commits]
+        with hglib.open(repo_path) as client:
+            for com in commits:
+                if client.log('merge() and %s' % com) != []:
+                    merge_warn = True
+                    merge_commits.append(com)
+        if len(commits) > 1:
+            revset = " | ".join(commits)
+            revset = '"%s"' % revset
+            message = "Backporting PR #%s %s" % \
+                (pr['id'], pr['links']['html']['href'])
+            dest = get_last_descendant(repo_path, last_stable)
+            message = \
+                "hg rebase -r %s --keep --collapse -m \"%s\" -d %s\n" % \
+                (revset, message, dest)
+            message += "hg update stable\n\n"
+            if merge_warn is True:
+                if len(merge_commits) > 1:
+                    merge_commits = ", ".join(merge_commits)
+                else:
+                    merge_commits = merge_commits[0]
+                message += \
+                    "WARNING, PULL REQUEST CONTAINS MERGE COMMITS, CONSIDER\n" \
+                    "BACKPORTING BY HAND TO AVOID BACKPORTING UNWANTED CHANGES\n"
+                message += \
+                    "Merge commits are %s\n\n" % merge_commits
+        else:
+            if commit_already_on_stable(repo_path, commits[0]) is True:
+                continue
+            message = "hg graft %s\n" % commits[0]
+        print "PR #%s\nTitle: %s\nCreated on: %s\nLink: %s\n%s" % pr_desc
+        print "To backport, issue the following command(s):\n"
+        print message
+        raw_input('Press any key to continue')
+
+
+if __name__ == "__main__":
+    print ""
+    print "Gathering PR information, this may take a minute."
+    print "Don't worry, yt loves you."
+    print ""
+    repo_path = clone_new_repo()
+    try:
+        last_major_release = get_first_commit_after_last_major_release(repo_path)
+        last_dev = get_branch_tip(repo_path, 'yt', 'experimental')
+        last_stable = get_branch_tip(repo_path, 'stable')
+        lineage = get_lineage_between_release_and_tip(
+            repo_path, last_major_release, last_dev)
+        prs = get_pull_requests_since_last_release(repo_path)
+        commits_to_prs = create_commits_to_prs_mapping(lineage, prs)
+        inv_map = invert_commits_to_prs_mapping(commits_to_prs)
+        # for now, ignore commits that aren't part of a pull request since
+        # the last bugfix release. These are mostly commits in pull requests
+        # from before the last bugfix release but might include commits that
+        # were pushed directly to the repo.
+        del inv_map[None]
+
+        inv_map = screen_already_backported(repo_path, inv_map)
+        print "In another terminal window, navigate to the following path:"
+        print "%s" % repo_path
+        raw_input("Press any key to continue")
+        backport_pr_commits(repo_path, inv_map, last_stable, prs)
+        raw_input(
+            "Now you need to push your backported changes. The temporary\n"
+            "repository currently being used will be deleted as soon as you\n"
+            "press any key.")
+    finally:
+        shutil.rmtree(repo_path)

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e scripts/yt_lodgeit.py
--- a/scripts/yt_lodgeit.py
+++ /dev/null
@@ -1,320 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-    LodgeIt!
-    ~~~~~~~~
-
-    A script that pastes stuff into the yt-project pastebin on
-    paste.yt-project.org.
-
-    Modified (very, very slightly) from the original script by the authors
-    below.
-
-    .lodgeitrc / _lodgeitrc
-    -----------------------
-
-    Under UNIX create a file called ``~/.lodgeitrc``, under Windows
-    create a file ``%APPDATA%/_lodgeitrc`` to override defaults::
-
-        language=default_language
-        clipboard=true/false
-        open_browser=true/false
-        encoding=fallback_charset
-
-    :authors: 2007-2008 Georg Brandl <georg at python.org>,
-              2006 Armin Ronacher <armin.ronacher at active-4.com>,
-              2006 Matt Good <matt at matt-good.net>,
-              2005 Raphael Slinckx <raphael at slinckx.net>
-"""
-import os
-import sys
-from optparse import OptionParser
-
-
-SCRIPT_NAME = os.path.basename(sys.argv[0])
-VERSION = '0.3'
-SERVICE_URL = 'http://paste.yt-project.org/'
-SETTING_KEYS = ['author', 'title', 'language', 'private', 'clipboard',
-                'open_browser']
-
-# global server proxy
-_xmlrpc_service = None
-
-
-def fail(msg, code):
-    """Bail out with an error message."""
-    print >> sys.stderr, 'ERROR: %s' % msg
-    sys.exit(code)
-
-
-def load_default_settings():
-    """Load the defaults from the lodgeitrc file."""
-    settings = {
-        'language':     None,
-        'clipboard':    True,
-        'open_browser': False,
-        'encoding':     'iso-8859-15'
-    }
-    rcfile = None
-    if os.name == 'posix':
-        rcfile = os.path.expanduser('~/.lodgeitrc')
-    elif os.name == 'nt' and 'APPDATA' in os.environ:
-        rcfile = os.path.expandvars(r'$APPDATA\_lodgeitrc')
-    if rcfile:
-        try:
-            f = open(rcfile)
-            for line in f:
-                if line.strip()[:1] in '#;':
-                    continue
-                p = line.split('=', 1)
-                if len(p) == 2:
-                    key = p[0].strip().lower()
-                    if key in settings:
-                        if key in ('clipboard', 'open_browser'):
-                            settings[key] = p[1].strip().lower() in \
-                                            ('true', '1', 'on', 'yes')
-                        else:
-                            settings[key] = p[1].strip()
-            f.close()
-        except IOError:
-            pass
-    settings['tags'] = []
-    settings['title'] = None
-    return settings
-
-
-def make_utf8(text, encoding):
-    """Convert a text to UTF-8, brute-force."""
-    try:
-        u = unicode(text, 'utf-8')
-        uenc = 'utf-8'
-    except UnicodeError:
-        try:
-            u = unicode(text, encoding)
-            uenc = 'utf-8'
-        except UnicodeError:
-            u = unicode(text, 'iso-8859-15', 'ignore')
-            uenc = 'iso-8859-15'
-    try:
-        import chardet
-    except ImportError:
-        return u.encode('utf-8')
-    d = chardet.detect(text)
-    if d['encoding'] == uenc:
-        return u.encode('utf-8')
-    return unicode(text, d['encoding'], 'ignore').encode('utf-8')
-
-
-def get_xmlrpc_service():
-    """Create the XMLRPC server proxy and cache it."""
-    global _xmlrpc_service
-    import xmlrpclib
-    if _xmlrpc_service is None:
-        try:
-            _xmlrpc_service = xmlrpclib.ServerProxy(SERVICE_URL + 'xmlrpc/',
-                                                    allow_none=True)
-        except Exception, err:
-            fail('Could not connect to Pastebin: %s' % err, -1)
-    return _xmlrpc_service
-
-
-def copy_url(url):
-    """Copy the url into the clipboard."""
-    # try windows first
-    try:
-        import win32clipboard
-    except ImportError:
-        # then give pbcopy a try.  do that before gtk because
-        # gtk might be installed on os x but nobody is interested
-        # in the X11 clipboard there.
-        from subprocess import Popen, PIPE
-        try:
-            client = Popen(['pbcopy'], stdin=PIPE)
-        except OSError:
-            try:
-                import pygtk
-                pygtk.require('2.0')
-                import gtk
-                import gobject
-            except ImportError:
-                return
-            gtk.clipboard_get(gtk.gdk.SELECTION_CLIPBOARD).set_text(url)
-            gobject.idle_add(gtk.main_quit)
-            gtk.main()
-        else:
-            client.stdin.write(url)
-            client.stdin.close()
-            client.wait()
-    else:
-        win32clipboard.OpenClipboard()
-        win32clipboard.EmptyClipboard()
-        win32clipboard.SetClipboardText(url)
-        win32clipboard.CloseClipboard()
-
-
-def open_webbrowser(url):
-    """Open a new browser window."""
-    import webbrowser
-    webbrowser.open(url)
-
-
-def language_exists(language):
-    """Check if a language alias exists."""
-    xmlrpc = get_xmlrpc_service()
-    langs = xmlrpc.pastes.getLanguages()
-    return language in langs
-
-
-def get_mimetype(data, filename):
-    """Try to get MIME type from data."""
-    try:
-        import gnomevfs
-    except ImportError:
-        from mimetypes import guess_type
-        if filename:
-            return guess_type(filename)[0]
-    else:
-        if filename:
-            return gnomevfs.get_mime_type(os.path.abspath(filename))
-        return gnomevfs.get_mime_type_for_data(data)
-
-
-def print_languages():
-    """Print a list of all supported languages, with description."""
-    xmlrpc = get_xmlrpc_service()
-    languages = xmlrpc.pastes.getLanguages().items()
-    languages.sort(lambda a, b: cmp(a[1].lower(), b[1].lower()))
-    print 'Supported Languages:'
-    for alias, name in languages:
-        print '    %-30s%s' % (alias, name)
-
-
-def download_paste(uid):
-    """Download a paste given by ID."""
-    xmlrpc = get_xmlrpc_service()
-    paste = xmlrpc.pastes.getPaste(uid)
-    if not paste:
-        fail('Paste "%s" does not exist.' % uid, 5)
-    print paste['code'].encode('utf-8')
-
-
-def create_paste(code, language, filename, mimetype, private):
-    """Create a new paste."""
-    xmlrpc = get_xmlrpc_service()
-    rv = xmlrpc.pastes.newPaste(language, code, None, filename, mimetype,
-                                private)
-    if not rv:
-        fail('Could not create paste. Something went wrong '
-             'on the server side.', 4)
-    return rv
-
-
-def compile_paste(filenames, langopt):
-    """Create a single paste out of zero, one or multiple files."""
-    def read_file(f):
-        try:
-            return f.read()
-        finally:
-            f.close()
-    mime = ''
-    lang = langopt or ''
-    if not filenames:
-        data = read_file(sys.stdin)
-        if not langopt:
-            mime = get_mimetype(data, '') or ''
-        fname = ""
-    elif len(filenames) == 1:
-        fname = filenames[0]
-        data = read_file(open(filenames[0], 'rb'))
-        if not langopt:
-            mime = get_mimetype(data, filenames[0]) or ''
-    else:
-        result = []
-        for fname in filenames:
-            data = read_file(open(fname, 'rb'))
-            if langopt:
-                result.append('### %s [%s]\n\n' % (fname, langopt))
-            else:
-                result.append('### %s\n\n' % fname)
-            result.append(data)
-            result.append('\n\n')
-        data = ''.join(result)
-        lang = 'multi'
-    return data, lang, fname, mime
-
-
-def main():
-    """Main script entry point."""
-
-    usage = ('Usage: %%prog [options] [FILE ...]\n\n'
-             'Read the files and paste their contents to %s.\n'
-             'If no file is given, read from standard input.\n'
-             'If multiple files are given, they are put into a single paste.'
-             % SERVICE_URL)
-    parser = OptionParser(usage=usage)
-
-    settings = load_default_settings()
-
-    parser.add_option('-v', '--version', action='store_true',
-                      help='Print script version')
-    parser.add_option('-L', '--languages', action='store_true', default=False,
-                      help='Retrieve a list of supported languages')
-    parser.add_option('-l', '--language', default=settings['language'],
-                      help='Used syntax highlighter for the file')
-    parser.add_option('-e', '--encoding', default=settings['encoding'],
-                      help='Specify the encoding of a file (default is '
-                           'utf-8 or guessing if available)')
-    parser.add_option('-b', '--open-browser', dest='open_browser',
-                      action='store_true',
-                      default=settings['open_browser'],
-                      help='Open the paste in a web browser')
-    parser.add_option('-p', '--private', action='store_true', default=False,
-                      help='Paste as private')
-    parser.add_option('--no-clipboard', dest='clipboard',
-                      action='store_false',
-                      default=settings['clipboard'],
-                      help="Don't copy the url into the clipboard")
-    parser.add_option('--download', metavar='UID',
-                      help='Download a given paste')
-
-    opts, args = parser.parse_args()
-
-    # special modes of operation:
-    # - paste script version
-    if opts.version:
-        print '%s: version %s' % (SCRIPT_NAME, VERSION)
-        sys.exit()
-    # - print list of languages
-    elif opts.languages:
-        print_languages()
-        sys.exit()
-    # - download Paste
-    elif opts.download:
-        download_paste(opts.download)
-        sys.exit()
-
-    # check language if given
-    if opts.language and not language_exists(opts.language):
-        fail('Language %s is not supported.' % opts.language, 3)
-
-    # load file(s)
-    try:
-        data, language, filename, mimetype = compile_paste(args, opts.language)
-    except Exception, err:
-        fail('Error while reading the file(s): %s' % err, 2)
-    if not data:
-        fail('Aborted, no content to paste.', 4)
-
-    # create paste
-    code = make_utf8(data, opts.encoding)
-    pid = create_paste(code, language, filename, mimetype, opts.private)
-    url = '%sshow/%s/' % (SERVICE_URL, pid)
-    print url
-    if opts.open_browser:
-        open_webbrowser(url)
-    if opts.clipboard:
-        copy_url(url)
-
-
-if __name__ == '__main__':
-    sys.exit(main())

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e setup.py
--- a/setup.py
+++ b/setup.py
@@ -164,7 +164,7 @@
     config.make_config_py()
     # config.make_svn_version_py()
     config.add_subpackage('yt', 'yt')
-    config.add_scripts("scripts/*")
+    config.add_scripts("scripts/iyt")
 
     return config
 

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -15,7 +15,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 
 from .absorption_line import tau_profile
@@ -159,7 +159,9 @@
         field_data = {}
         if use_peculiar_velocity:
             input_fields.append('velocity_los')
+            input_fields.append('redshift_eff')
             field_units["velocity_los"] = "cm/s"
+            field_units["redshift_eff"] = ""
         for feature in self.line_list + self.continuum_list:
             if not feature['field_name'] in input_fields:
                 input_fields.append(feature['field_name'])
@@ -204,11 +206,11 @@
 
         for continuum in self.continuum_list:
             column_density = field_data[continuum['field_name']] * field_data['dl']
-            delta_lambda = continuum['wavelength'] * field_data['redshift']
+            # redshift_eff field combines cosmological and velocity redshifts
             if use_peculiar_velocity:
-                # include factor of (1 + z) because our velocity is in proper frame.
-                delta_lambda += continuum['wavelength'] * (1 + field_data['redshift']) * \
-                    field_data['velocity_los'] / speed_of_light_cgs
+                delta_lambda = continuum['wavelength'] * field_data['redshift_eff']
+            else:
+                delta_lambda = continuum['wavelength'] * field_data['redshift']
             this_wavelength = delta_lambda + continuum['wavelength']
             right_index = np.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
             left_index = np.digitize((this_wavelength *
@@ -242,11 +244,11 @@
 
         for line in parallel_objects(self.line_list, njobs=njobs):
             column_density = field_data[line['field_name']] * field_data['dl']
-            delta_lambda = line['wavelength'] * field_data['redshift']
+            # redshift_eff field combines cosmological and velocity redshifts
             if use_peculiar_velocity:
-                # include factor of (1 + z) because our velocity is in proper frame.
-                delta_lambda += line['wavelength'] * (1 + field_data['redshift']) * \
-                    field_data['velocity_los'] / speed_of_light_cgs
+                delta_lambda = line['wavelength'] * field_data['redshift_eff']
+            else:
+                delta_lambda = line['wavelength'] * field_data['redshift']
             thermal_b =  np.sqrt((2 * boltzmann_constant_cgs *
                                   field_data['temperature']) /
                                   line['atomic_mass'])

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -1,4 +1,4 @@
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 
 from yt.analysis_modules.absorption_spectrum.absorption_line import \

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 import os
 

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 
 from yt.analysis_modules.cosmological_observation.cosmology_splice import \
@@ -29,6 +29,7 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects, \
     parallel_root_only
+from yt.utilities.physical_constants import speed_of_light_cgs
 
 class LightRay(CosmologySplice):
     """
@@ -365,7 +366,7 @@
         all_fields.extend(['dl', 'dredshift', 'redshift'])
         if get_los_velocity:
             all_fields.extend(['velocity_x', 'velocity_y',
-                               'velocity_z', 'velocity_los'])
+                               'velocity_z', 'velocity_los', 'redshift_eff'])
             data_fields.extend(['velocity_x', 'velocity_y', 'velocity_z'])
 
         all_ray_storage = {}
@@ -457,6 +458,28 @@
             sub_data['redshift'] = my_segment['redshift'] - \
               sub_data['dredshift'].cumsum() + sub_data['dredshift']
 
+            # When velocity_los is present, add effective redshift 
+            # (redshift_eff) field by combining cosmological redshift and 
+            # doppler redshift.
+            
+            # first convert los velocities to comoving frame (ie mult. by (1+z)), 
+            # then calculate doppler redshift:
+            # 1 + redshift_dopp = sqrt((1+v/c) / (1-v/c))
+
+            # then to add cosmological redshift and doppler redshift, follow
+            # eqn 3.75 in Peacock's Cosmological Physics:
+            # 1 + z_obs = (1 + z_cosmo) * (1 + z_doppler)
+            # Alternatively, see eqn 5.49 in Peebles for a similar result.
+            if get_los_velocity:
+
+                velocity_los_cm = (1 + sub_data['redshift']) * \
+                                  sub_data['velocity_los']
+                redshift_dopp = ((1 + velocity_los_cm / speed_of_light_cgs) /
+                                (1 - velocity_los_cm / speed_of_light_cgs))**(0.5) - 1
+                sub_data['redshift_eff'] = ((1 + redshift_dopp) * \
+                                           (1 + sub_data['redshift'])) - 1
+                del velocity_los_cm, redshift_dopp
+
             # Remove empty lixels.
             sub_dl_nonzero = sub_data['dl'].nonzero()
             for field in all_fields:

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
@@ -34,7 +34,7 @@
 
 
 import numpy as np
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import glob
 import os
 

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 import os
 

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e yt/analysis_modules/halo_analysis/halo_catalog.py
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ b/yt/analysis_modules/halo_analysis/halo_catalog.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 import os
 

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -14,7 +14,7 @@
 #-----------------------------------------------------------------------------
 
 import gc
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 import math
 import numpy as np
 import glob
@@ -299,7 +299,7 @@
 
         Returns
         -------
-        sphere : `yt.data_objects.api.YTSphereBase`
+        sphere : `yt.data_objects.api.YTSphere`
             The empty data source.
 
         Examples
@@ -668,7 +668,7 @@
 
         Returns
         -------
-        ellipsoid : `yt.data_objects.data_containers.YTEllipsoidBase`
+        ellipsoid : `yt.data_objects.data_containers.YTEllipsoid`
             The ellipsoidal data object.
 
         Examples
@@ -861,7 +861,7 @@
 
         Returns
         -------
-        ellipsoid : `yt.data_objects.data_containers.YTEllipsoidBase`
+        ellipsoid : `yt.data_objects.data_containers.YTEllipsoid`
             The ellipsoidal data object.
 
         Examples
@@ -890,7 +890,7 @@
 
         Returns
         -------
-        sphere : `yt.data_objects.api.YTSphereBase`
+        sphere : `yt.data_objects.api.YTSphere`
             The empty data source.
 
         Examples

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -22,7 +22,7 @@
 from collections import OrderedDict
 
 import numpy as np
-import h5py
+from yt.utilities.on_demand_imports import _h5py as h5py
 
 class ParticleTrajectories(object):
     r"""A collection of particle trajectories in time over a series of

diff -r e3c9aad9aa584653d4b0de3af22498d7fee85866 -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -199,15 +199,24 @@
                 ei = start_e
                 for cn, Z in zip(number_of_photons[ibegin:iend], metalZ[ibegin:iend]):
                     if cn == 0: continue
+                    # The rather verbose form of the few next statements is a
+                    # result of code optimization and shouldn't be changed
+                    # without checking for perfomance degradation. See
+                    # https://bitbucket.org/yt_analysis/yt/pull-requests/1766
+                    # for details.
                     if self.method == "invert_cdf":
-                        cumspec = cumspec_c + Z*cumspec_m
-                        cumspec /= cumspec[-1]
+                        cumspec = cumspec_c
+                        cumspec += Z * cumspec_m
+                        norm_factor = 1.0 / cumspec[-1]
+                        cumspec *= norm_factor
                         randvec = np.random.uniform(size=cn)
                         randvec.sort()
                         cell_e = np.interp(randvec, cumspec, ebins)
                     elif self.method == "accept_reject":
-                        tot_spec = cspec.d+Z*mspec.d
-                        tot_spec /= tot_spec.sum()
+                        tot_spec = cspec.d
+                        tot_spec += Z * mspec.d
+                        norm_factor = 1.0 / tot_spec.sum()
+                        tot_spec *= norm_factor
                         eidxs = np.random.choice(nchan, size=cn, p=tot_spec)
                         cell_e = emid[eidxs]
                     energies[ei:ei+cn] = cell_e

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/1b0244a45b8b/
Changeset:   1b0244a45b8b
Branch:      yt
User:        MatthewTurk
Date:        2015-11-11 14:41:02+00:00
Summary:     Merging with mainline development.
Affected #:  180 files

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -32,6 +32,7 @@
 yt/utilities/lib/CICDeposit.c
 yt/utilities/lib/ContourFinding.c
 yt/utilities/lib/DepthFirstOctree.c
+yt/utilities/lib/element_mappings.c
 yt/utilities/lib/FixedInterpolator.c
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/get_yt.sh
--- a/doc/get_yt.sh
+++ b/doc/get_yt.sh
@@ -23,7 +23,7 @@
 DEST_SUFFIX="yt-conda"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
 BRANCH="yt" # This is the branch to which we will forcibly update.
-INST_YT_SOURCE=1 # Do we do a source install of yt?
+INST_YT_SOURCE=0 # Do we do a source install of yt?
 
 ##################################################################
 #                                                                #
@@ -37,7 +37,7 @@
 # ( SOMECOMMAND 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
 MINICONDA_URLBASE="http://repo.continuum.io/miniconda"
-MINICONDA_VERSION="1.9.1"
+MINICONDA_VERSION="latest"
 YT_RECIPE_REPO="https://bitbucket.org/yt_analysis/yt_conda/raw/default"
 
 function do_exit
@@ -61,12 +61,14 @@
     ( $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
-function get_ytproject
-{
-    [ -e $1 ] && return
-    echo "Downloading $1 from yt-project.org"
-    ${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
-    ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
+# These are needed to prevent pushd and popd from printing to stdout
+
+function pushd () {
+    command pushd "$@" > /dev/null
+}
+
+function popd () {
+    command popd "$@" > /dev/null
 }
 
 function get_ytdata
@@ -101,122 +103,125 @@
 echo "This will install Miniconda from Continuum Analytics, the necessary"
 echo "packages to run yt, and create a self-contained environment for you to"
 echo "use yt.  Additionally, Conda itself provides the ability to install"
-echo "many other packages that can be used for other purposes."
+echo "many other packages that can be used for other purposes using the"
+echo "'conda install' command."
 echo
 MYOS=`uname -s`       # A guess at the OS
-if [ "${MYOS##Darwin}" != "${MYOS}" ]
+if [ $INST_YT_SOURCE -ne 0 ]
 then
-  echo "Looks like you're running on Mac OSX."
-  echo
-  echo "NOTE: you must have the Xcode command line tools installed."
-  echo
-  echo "The instructions for obtaining these tools varies according"
-  echo "to your exact OS version.  On older versions of OS X, you"
-  echo "must register for an account on the apple developer tools"
-  echo "website: https://developer.apple.com/downloads to obtain the"
-  echo "download link."
-  echo
-  echo "We have gathered some additional instructions for each"
-  echo "version of OS X below. If you have trouble installing yt"
-  echo "after following these instructions, don't hesitate to contact"
-  echo "the yt user's e-mail list."
-  echo
-  echo "You can see which version of OSX you are running by clicking"
-  echo "'About This Mac' in the apple menu on the left hand side of"
-  echo "menu bar.  We're assuming that you've installed all operating"
-  echo "system updates; if you have an older version, we suggest"
-  echo "running software update and installing all available updates."
-  echo
-  echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
-  echo "Apple developer tools website."
-  echo
-  echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
-  echo "developer tools website.  You can either download the"
-  echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-  echo "Software Update to update to XCode 3.2.6 or"
-  echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
-  echo "bundle (4.1 GB)."
-  echo
-  echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
-  echo "(search for Xcode)."
-  echo "Alternatively, download the Xcode command line tools from"
-  echo "the Apple developer tools website."
-  echo
-  echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
-  echo "(search for Xcode)."
-  echo "Additionally, you will have to manually install the Xcode"
-  echo "command line tools, see:"
-  echo "http://stackoverflow.com/questions/9353444"
-  echo "Alternatively, download the Xcode command line tools from"
-  echo "the Apple developer tools website."
-  echo
-  echo "NOTE: It's possible that the installation will fail, if so,"
-  echo "please set the following environment variables, remove any"
-  echo "broken installation tree, and re-run this script verbatim."
-  echo
-  echo "$ export CC=gcc"
-  echo "$ export CXX=g++"
-  echo
-  MINICONDA_OS="MacOSX-x86_64"
+    if [ "${MYOS##Darwin}" != "${MYOS}" ]
+    then
+        echo "Looks like you're running on Mac OSX."
+        echo
+        echo "NOTE: you must have the Xcode command line tools installed."
+        echo
+        echo "The instructions for obtaining these tools varies according"
+        echo "to your exact OS version.  On older versions of OS X, you"
+        echo "must register for an account on the apple developer tools"
+        echo "website: https://developer.apple.com/downloads to obtain the"
+        echo "download link."
+        echo
+        echo "We have gathered some additional instructions for each"
+        echo "version of OS X below. If you have trouble installing yt"
+        echo "after following these instructions, don't hesitate to contact"
+        echo "the yt user's e-mail list."
+        echo
+        echo "You can see which version of OSX you are running by clicking"
+        echo "'About This Mac' in the apple menu on the left hand side of"
+        echo "menu bar.  We're assuming that you've installed all operating"
+        echo "system updates; if you have an older version, we suggest"
+        echo "running software update and installing all available updates."
+        echo
+        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
+        echo "Apple developer tools website."
+        echo
+        echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
+        echo "developer tools website.  You can either download the"
+        echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
+        echo "Software Update to update to XCode 3.2.6 or"
+        echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
+        echo "bundle (4.1 GB)."
+        echo
+        echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
+        echo "(search for Xcode)."
+        echo "Alternatively, download the Xcode command line tools from"
+        echo "the Apple developer tools website."
+        echo
+        echo "OS X 10.8.4, 10.9, 10.10, and 10.11:"
+        echo "download the appropriate version of Xcode from the"
+        echo "mac app store (search for Xcode)."
+        echo
+        echo "Additionally, you will have to manually install the Xcode"
+        echo "command line tools."
+        echo
+        echo "For OS X 10.8, see:"
+        echo "http://stackoverflow.com/questions/9353444"
+        echo
+        echo "For OS X 10.9 and newer the command line tools can be installed"
+        echo "with the following command:"
+        echo "    xcode-select --install"
+    fi
+    if [ "${MYOS##Linux}" != "${MYOS}" ]
+    then
+        echo "Looks like you're on Linux."
+        echo
+        echo "Please make sure you have the developer tools for your OS "
+        echo "installed."
+        echo
+        if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
+        then
+            echo "Looks like you're on an OpenSUSE-compatible machine."
+            echo
+            echo "You need to have these packages installed:"
+            echo
+            echo "  * devel_C_C++"
+            echo "  * libuuid-devel"
+            echo "  * gcc-c++"
+            echo "  * chrpath"
+            echo
+            echo "You can accomplish this by executing:"
+            echo
+            echo "$ sudo zypper install -t pattern devel_C_C++"
+            echo "$ sudo zypper install gcc-c++ libuuid-devel zip"
+            echo "$ sudo zypper install chrpath"
+        fi
+        if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
+        then
+            echo "Looks like you're on an Ubuntu-compatible machine."
+            echo
+            echo "You need to have these packages installed:"
+            echo
+            echo "  * libssl-dev"
+            echo "  * build-essential"
+            echo "  * libncurses5"
+            echo "  * libncurses5-dev"
+            echo "  * uuid-dev"
+            echo "  * chrpath"
+            echo
+            echo "You can accomplish this by executing:"
+            echo
+            echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev chrpath"
+            echo
+        fi
+        echo
+        echo "If you are running on a supercomputer or other module-enabled"
+        echo "system, please make sure that the GNU module has been loaded."
+        echo
+    fi
 fi
-if [ "${MYOS##Linux}" != "${MYOS}" ]
+if [ "${MYOS##x86_64}" != "${MYOS}" ]
 then
-  echo "Looks like you're on Linux."
-  echo
-  echo "Please make sure you have the developer tools for your OS installed."
-  echo
-  if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
-  then
-    echo "Looks like you're on an OpenSUSE-compatible machine."
-    echo
-    echo "You need to have these packages installed:"
-    echo
-    echo "  * devel_C_C++"
-    echo "  * libopenssl-devel"
-    echo "  * libuuid-devel"
-    echo "  * zip"
-    echo "  * gcc-c++"
-    echo "  * chrpath"
-    echo
-    echo "You can accomplish this by executing:"
-    echo
-    echo "$ sudo zypper install -t pattern devel_C_C++"
-    echo "$ sudo zypper install gcc-c++ libopenssl-devel libuuid-devel zip"
-    echo "$ sudo zypper install chrpath"
-  fi
-  if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
-  then
-    echo "Looks like you're on an Ubuntu-compatible machine."
-    echo
-    echo "You need to have these packages installed:"
-    echo
-    echo "  * libssl-dev"
-    echo "  * build-essential"
-    echo "  * libncurses5"
-    echo "  * libncurses5-dev"
-    echo "  * zip"
-    echo "  * uuid-dev"
-    echo "  * chrpath"
-    echo
-    echo "You can accomplish this by executing:"
-    echo
-    echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev chrpath"
-    echo
-  fi
-  echo
-  echo "If you are running on a supercomputer or other module-enabled"
-  echo "system, please make sure that the GNU module has been loaded."
-  echo
-  if [ "${MYOS##x86_64}" != "${MYOS}" ]
-  then
     MINICONDA_OS="Linux-x86_64"
-  elif [ "${MYOS##i386}" != "${MYOS}" ]
-  then
+elif [ "${MYOS##i386}" != "${MYOS}" ]
+then
     MINICONDA_OS="Linux-x86"
-  else
-    echo "Not sure which type of Linux you're on.  Going with x86_64."
+elif [ "${MYOS##Darwin}" != "${MYOS}" ]
+then
+     MINICONDA_OS="MacOSX-x86_64"
+else
+    echo "Not sure which Linux distro you are running."
+    echo "Going with x86_64 architecture."
     MINICONDA_OS="Linux-x86_64"
-  fi
 fi
 echo
 echo "If you'd rather not continue, hit Ctrl-C."
@@ -233,7 +238,7 @@
 if type -P wget &>/dev/null
 then
     echo "Using wget"
-    export GETFILE="wget -nv"
+    export GETFILE="wget -nv -nc"
 else
     echo "Using curl"
     export GETFILE="curl -sSO"
@@ -250,9 +255,6 @@
 
 log_cmd bash ./${MINICONDA_PKG} -b -p $DEST_DIR
 
-# I don't think we need OR want this anymore:
-#export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
-
 # This we *do* need.
 export PATH=${DEST_DIR}/bin:$PATH
 
@@ -261,51 +263,40 @@
 
 declare -a YT_DEPS
 YT_DEPS+=('python')
-YT_DEPS+=('distribute')
-YT_DEPS+=('libpng')
+YT_DEPS+=('setuptools')
 YT_DEPS+=('numpy')
-YT_DEPS+=('pygments')
-YT_DEPS+=('jinja2')
-YT_DEPS+=('tornado')
-YT_DEPS+=('pyzmq')
+YT_DEPS+=('jupyter')
 YT_DEPS+=('ipython')
 YT_DEPS+=('sphinx')
 YT_DEPS+=('h5py')
 YT_DEPS+=('matplotlib')
 YT_DEPS+=('cython')
 YT_DEPS+=('nose')
+YT_DEPS+=('conda-build')
+YT_DEPS+=('mercurial')
+YT_DEPS+=('sympy')
 
 # Here is our dependency list for yt
-log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/free
-log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/dev
-log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/gpl
 log_cmd conda update --yes conda
 
-echo "Current dependencies: ${YT_DEPS[@]}"
 log_cmd echo "DEPENDENCIES" ${YT_DEPS[@]}
-log_cmd conda install --yes ${YT_DEPS[@]}
-
-echo "Installing mercurial."
-get_ytrecipe mercurial
+for YT_DEP in "${YT_DEPS[@]}"; do
+    echo "Installing $YT_DEP"
+    log_cmd conda install --yes ${YT_DEP}
+done
 
 if [ $INST_YT_SOURCE -eq 0 ]
 then
-  echo "Installing yt as a package."
-  get_ytrecipe yt
+  echo "Installing yt"
+  log_cmd conda install --yes yt
 else
-  # We do a source install.
-  YT_DIR="${DEST_DIR}/src/yt-hg"
-  export PNG_DIR=${DEST_DIR}
-  export FTYPE_DIR=${DEST_DIR}
-  export HDF5_DIR=${DEST_DIR}
-  log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
-  pushd ${YT_DIR}
-  log_cmd python setup.py develop
-  popd
-  log_cmd cp ${YT_DIR}/doc/activate ${DEST_DIR}/bin/activate 
-  log_cmd sed -i.bak -e "s,__YT_DIR__,${DEST_DIR}," ${DEST_DIR}/bin/activate
-  log_cmd cp ${YT_DIR}/doc/activate.csh ${DEST_DIR}/bin/activate.csh
-  log_cmd sed -i.bak -e "s,__YT_DIR__,${DEST_DIR}," ${DEST_DIR}/bin/activate.csh
+    # We do a source install.
+    echo "Installing yt from source"
+    YT_DIR="${DEST_DIR}/src/yt-hg"
+    log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
+    pushd ${YT_DIR}
+    log_cmd python setup.py develop
+    popd
 fi
 
 echo
@@ -314,34 +305,26 @@
 echo
 echo "yt and the Conda system are now installed in $DEST_DIR ."
 echo
-if [ $INST_YT_SOURCE -eq 0 ]
-then
-  echo "You must now modify your PATH variable by prepending:"
-  echo 
-  echo "   $DEST_DIR/bin"
-  echo
-  echo "For example, if you use bash, place something like this at the end"
-  echo "of your ~/.bashrc :"
-  echo
-  echo "   export PATH=$DEST_DIR/bin:$PATH"
-else
-  echo "To run from this new installation, use the activate script for this "
-  echo "environment."
-  echo
-  echo "    $ source $DEST_DIR/bin/activate"
-  echo
-  echo "This modifies the environment variables YT_DEST, PATH, PYTHONPATH, and"
-  echo "LD_LIBRARY_PATH to match your new yt install.  If you use csh, just"
-  echo "append .csh to the above."
-fi
+echo "You must now modify your PATH variable by prepending:"
+echo 
+echo "   $DEST_DIR/bin"
+echo
+echo "On Bash-style shells you can copy/paste the following command to "
+echo "temporarily activate the yt installtion:"
+echo
+echo "    export PATH=$DEST_DIR/bin:\$PATH"
+echo
+echo "and on csh-style shells"
+echo
+echo "    setenv PATH $DEST_DIR/bin:\$PATH"
+echo
+echo "You can also the init file appropriate for your shell to include the same"
+echo "command."
 echo
 echo "To get started with yt, check out the orientation:"
 echo
 echo "    http://yt-project.org/doc/orientation/"
 echo
-echo "or just activate your environment and run 'yt serve' to bring up the"
-echo "yt GUI."
-echo
 echo "For support, see the website and join the mailing list:"
 echo
 echo "    http://yt-project.org/"

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -233,53 +233,61 @@
         echo
         echo "NOTE: you must have the Xcode command line tools installed."
         echo
-	echo "The instructions for obtaining these tools varies according"
-	echo "to your exact OS version.  On older versions of OS X, you"
-	echo "must register for an account on the apple developer tools"
-	echo "website: https://developer.apple.com/downloads to obtain the"
-	echo "download link."
-	echo
-	echo "We have gathered some additional instructions for each"
-	echo "version of OS X below. If you have trouble installing yt"
-	echo "after following these instructions, don't hesitate to contact"
-	echo "the yt user's e-mail list."
-	echo
-	echo "You can see which version of OSX you are running by clicking"
-	echo "'About This Mac' in the apple menu on the left hand side of"
-	echo "menu bar.  We're assuming that you've installed all operating"
-	echo "system updates; if you have an older version, we suggest"
-	echo "running software update and installing all available updates."
-	echo
+        echo "The instructions for obtaining these tools varies according"
+        echo "to your exact OS version.  On older versions of OS X, you"
+        echo "must register for an account on the apple developer tools"
+        echo "website: https://developer.apple.com/downloads to obtain the"
+        echo "download link."
+        echo
+        echo "We have gathered some additional instructions for each"
+        echo "version of OS X below. If you have trouble installing yt"
+        echo "after following these instructions, don't hesitate to contact"
+        echo "the yt user's e-mail list."
+        echo
+        echo "You can see which version of OSX you are running by clicking"
+        echo "'About This Mac' in the apple menu on the left hand side of"
+        echo "menu bar.  We're assuming that you've installed all operating"
+        echo "system updates; if you have an older version, we suggest"
+        echo "running software update and installing all available updates."
+        echo
         echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
-	echo "Apple developer tools website."
+        echo "Apple developer tools website."
         echo
         echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
-	echo "developer tools website.  You can either download the"
-	echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-	echo "Software Update to update to XCode 3.2.6 or"
-	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
-	echo "bundle (4.1 GB)."
+        echo "developer tools website.  You can either download the"
+        echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
+        echo "Software Update to update to XCode 3.2.6 or"
+        echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
+        echo "bundle (4.1 GB)."
         echo
         echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
-	echo "(search for Xcode)."
+        echo "(search for Xcode)."
         echo "Alternatively, download the Xcode command line tools from"
         echo "the Apple developer tools website."
         echo
-	echo "OS X 10.8.4, 10.9, and 10.10: download the appropriate version of"
-	echo "Xcode from the mac app store (search for Xcode)."
-    echo
-	echo "Additionally, you will have to manually install the Xcode"
-	echo "command line tools."
-    echo
-    echo "For OS X 10.8, see:"
-   	echo "http://stackoverflow.com/questions/9353444"
-	echo
-    echo "For OS X 10.9 and 10.10, the command line tools can be installed"
-    echo "with the following command:"
-    echo "    xcode-select --install"
-    echo
-    OSX_VERSION=`sw_vers -productVersion`
-    if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
+        echo "OS X 10.8.4, 10.9, 10.10, and 10.11:"
+        echo "download the appropriate version of Xcode from the"
+        echo "mac app store (search for Xcode)."
+        echo
+        echo "Additionally, you will have to manually install the Xcode"
+        echo "command line tools."
+        echo
+        echo "For OS X 10.8, see:"
+        echo "http://stackoverflow.com/questions/9353444"
+        echo
+        echo "For OS X 10.9 and newer the command line tools can be installed"
+        echo "with the following command:"
+        echo "    xcode-select --install"
+        echo
+        echo "For OS X 10.11, you will additionally need to install the OpenSSL"
+        echo "library using a package manager like homebrew or macports."
+        echo "If you install fails with a message like"
+        echo "    ImportError: cannot import HTTPSHandler"
+        echo "then you do not have the OpenSSL headers available in a location"
+        echo "visible to your C compiler. Consider installing yt using the"
+        echo "get_yt.sh script instead, as that bundles OpenSSL."
+        OSX_VERSION=`sw_vers -productVersion`
+        if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
         then
             MPL_SUPP_CFLAGS="${MPL_SUPP_CFLAGS} -mmacosx-version-min=10.7"
             MPL_SUPP_CXXFLAGS="${MPL_SUPP_CXXFLAGS} -mmacosx-version-min=10.7"
@@ -358,17 +366,17 @@
     fi
     if [ $INST_SCIPY -eq 1 ]
     then
-	echo
-	echo "Looks like you've requested that the install script build SciPy."
-	echo
-	echo "If the SciPy build fails, please uncomment one of the the lines"
-	echo "at the top of the install script that sets NUMPY_ARGS, delete"
-	echo "any broken installation tree, and re-run the install script"
-	echo "verbatim."
-	echo
-	echo "If that doesn't work, don't hesitate to ask for help on the yt"
-	echo "user's mailing list."
-	echo
+    echo
+    echo "Looks like you've requested that the install script build SciPy."
+    echo
+    echo "If the SciPy build fails, please uncomment one of the the lines"
+    echo "at the top of the install script that sets NUMPY_ARGS, delete"
+    echo "any broken installation tree, and re-run the install script"
+    echo "verbatim."
+    echo
+    echo "If that doesn't work, don't hesitate to ask for help on the yt"
+    echo "user's mailing list."
+    echo
     fi
     if [ ! -z "${CFLAGS}" ]
     then
@@ -490,9 +498,9 @@
 
 if [ $INST_PY3 -eq 1 ]
 then
-	 PYTHON_EXEC='python3.4'
+     PYTHON_EXEC='python3.4'
 else 
-	 PYTHON_EXEC='python2.7'
+     PYTHON_EXEC='python2.7'
 fi
 
 function do_setup_py
@@ -899,28 +907,28 @@
 else
     if [ ! -e $SCIPY/done ]
     then
-	if [ ! -e BLAS/done ]
-	then
-	    tar xfz blas.tar.gz
-	    echo "Building BLAS"
-	    cd BLAS
-	    gfortran -O2 -fPIC -fno-second-underscore -c *.f
-	    ( ar r libfblas.a *.o 2>&1 ) 1>> ${LOG_FILE}
-	    ( ranlib libfblas.a 2>&1 ) 1>> ${LOG_FILE}
-	    rm -rf *.o
-	    touch done
-	    cd ..
-	fi
-	if [ ! -e $LAPACK/done ]
-	then
-	    tar xfz $LAPACK.tar.gz
-	    echo "Building LAPACK"
-	    cd $LAPACK/
-	    cp INSTALL/make.inc.gfortran make.inc
-	    ( make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 2>&1 ) 1>> ${LOG_FILE} || do_exit
-	    touch done
-	    cd ..
-	fi
+    if [ ! -e BLAS/done ]
+    then
+        tar xfz blas.tar.gz
+        echo "Building BLAS"
+        cd BLAS
+        gfortran -O2 -fPIC -fno-second-underscore -c *.f
+        ( ar r libfblas.a *.o 2>&1 ) 1>> ${LOG_FILE}
+        ( ranlib libfblas.a 2>&1 ) 1>> ${LOG_FILE}
+        rm -rf *.o
+        touch done
+        cd ..
+    fi
+    if [ ! -e $LAPACK/done ]
+    then
+        tar xfz $LAPACK.tar.gz
+        echo "Building LAPACK"
+        cd $LAPACK/
+        cp INSTALL/make.inc.gfortran make.inc
+        ( make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        touch done
+        cd ..
+    fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
     export LAPACK=$PWD/$LAPACK/liblapack.a
@@ -1030,7 +1038,7 @@
 cd $MY_PWD
 
 if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import readline" 2>&1 )>> ${LOG_FILE}) || \
-	[[ "${MYOS##Darwin}" != "${MYOS}" && $INST_PY3 -eq 1 ]] 
+    [[ "${MYOS##Darwin}" != "${MYOS}" && $INST_PY3 -eq 1 ]] 
 then
     if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
     then

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -1,3 +1,5 @@
+.. _photon_simulator:
+
 Constructing Mock X-ray Observations
 ------------------------------------
 
@@ -98,9 +100,8 @@
    `AtomDB <http://www.atomdb.org>`_ and get the files from the
    `xray_data <http://yt-project.org/data/xray_data.tar.gz>`_ auxiliary
    data package (see the ``xray_data`` `README <xray_data_README.html>`_ 
-   for details on the latter). Make sure that
-   in what follows you specify the full path to the locations of these
-   files.
+   for details on the latter). Make sure that in what follows you 
+   specify the full path to the locations of these files.
 
 To generate photons from this dataset, we have several different things
 we need to set up. The first is a standard yt data object. It could
@@ -197,7 +198,7 @@
 
 .. code:: python
 
-    A = 6000.
+    A = 3000.
     exp_time = 4.0e5
     redshift = 0.05
     cosmo = Cosmology()
@@ -298,7 +299,7 @@
 
 The second option, ``TableAbsorbModel``, takes as input an HDF5 file
 containing two datasets, ``"energy"`` (in keV), and ``"cross_section"``
-(in cm2), and the Galactic column density :math:`N_H`:
+(in :math:`cm^2`), and the Galactic column density :math:`N_H`:
 
 .. code:: python
 
@@ -307,7 +308,7 @@
 Now we're ready to project the photons. First, we choose a line-of-sight
 vector ``normal``. Second, we'll adjust the exposure time and the redshift.
 Third, we'll pass in the absorption ``SpectrumModel``. Fourth, we'll
-specify a ``sky_center`` in RA,DEC on the sky in degrees.
+specify a ``sky_center`` in RA and DEC on the sky in degrees.
 
 Also, we're going to convolve the photons with instrument ``responses``.
 For this, you need a ARF/RMF pair with matching energy bins. This is of
@@ -322,8 +323,8 @@
 
 .. code:: python
 
-    ARF = "chandra_ACIS-S3_onaxis_arf.fits"
-    RMF = "chandra_ACIS-S3_onaxis_rmf.fits"
+    ARF = "acisi_aimpt_cy17.arf"
+    RMF = "acisi_aimpt_cy17.rmf"
     normal = [0.0,0.0,1.0]
     events = photons.project_photons(normal, exp_time_new=2.0e5, redshift_new=0.07, dist_new=None, 
                                      absorb_model=abs_model, sky_center=(187.5,12.333), responses=[ARF,RMF], 
@@ -540,7 +541,7 @@
 
    sphere = ds.sphere("c", (1.0,"Mpc"))
        
-   A = 6000.
+   A = 3000.
    exp_time = 2.0e5
    redshift = 0.05
    cosmo = Cosmology()
@@ -555,7 +556,8 @@
 
 
    events = photons.project_photons([0.0,0.0,1.0], 
-                                    responses=["sim_arf.fits","sim_rmf.fits"], 
+                                    responses=["acisi_aimpt_cy17.arf",
+                                               "acisi_aimpt_cy17.rmf"], 
                                     absorb_model=abs_model,
                                     north_vector=[0.0,1.0,0.0])
 

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -374,6 +374,17 @@
 "Gas_smoothed_Temperature")``, which in most cases would be aliased to the
 field ``("gas", "temperature")`` for convenience.
 
+Other smoothing kernels besides the cubic spline one are available through a
+keyword argument ``kernel_name`` of the method ``add_smoothed_particle_field``.
+Current available kernel names include:
+
+* ``cubic``, ``quartic``, and ``quintic`` - spline kernels.
+* ``wendland2``, ``wendland4`` and ``wendland6`` - Wendland kernels.
+
+The added smoothed particle field can be accessed by
+``("deposit", "particletype_kernelname_smoothed_fieldname")`` (except for the
+cubic spline kernel, which obeys the naming scheme given above).
+
 Computing the Nth Nearest Neighbor
 ----------------------------------
 

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -54,10 +54,13 @@
  
 .. code-block:: python
 
-   frb.export_hdf5("my_images.h5", fields=["density","temperature"])
+   frb.save_as_dataset("my_images.h5", fields=["density","temperature"])
    frb.export_fits("my_images.fits", fields=["density","temperature"],
                    clobber=True, units="kpc")
 
+In the HDF5 case, the created file can be reloaded just like a regular dataset with
+``yt.load`` and will, itself, be a first-class dataset.  For more information on
+this, see :ref:`saving-grid-data-containers`.
 In the FITS case, there is an option for setting the ``units`` of the coordinate system in
 the file. If you want to overwrite a file with the same name, set ``clobber=True``. 
 

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/analyzing/index.rst
--- a/doc/source/analyzing/index.rst
+++ b/doc/source/analyzing/index.rst
@@ -20,5 +20,6 @@
    units/index
    filtering
    generating_processed_data
+   saving_data
    time_series_analysis
    parallel_computation

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -457,69 +457,9 @@
 ---------------------------
 
 Often, when operating interactively or via the scripting interface, it is
-convenient to save an object or multiple objects out to disk and then restart
-the calculation later.  For example, this is useful after clump finding 
-(:ref:`clump_finding`), which can be very time consuming.  
-Typically, the save and load operations are used on 3D data objects.  yt
-has a separate set of serialization operations for 2D objects such as
-projections.
-
-yt will save out objects to disk under the presupposition that the
-construction of the objects is the difficult part, rather than the generation
-of the data -- this means that you can save out an object as a description of
-how to recreate it in space, but not the actual data arrays affiliated with
-that object.  The information that is saved includes the dataset off of
-which the object "hangs."  It is this piece of information that is the most
-difficult; the object, when reloaded, must be able to reconstruct a dataset
-from whatever limited information it has in the save file.
-
-You can save objects to an output file using the function 
-:func:`~yt.data_objects.index.save_object`: 
-
-.. code-block:: python
-
-   import yt
-   ds = yt.load("my_data")
-   sp = ds.sphere([0.5, 0.5, 0.5], (10.0, 'kpc'))
-   sp.save_object("sphere_name", "save_file.cpkl")
-
-This will store the object as ``sphere_name`` in the file
-``save_file.cpkl``, which will be created or accessed using the standard
-python module :mod:`shelve`.  
-
-To re-load an object saved this way, you can use the shelve module directly:
-
-.. code-block:: python
-
-   import yt
-   import shelve
-   ds = yt.load("my_data") 
-   saved_fn = shelve.open("save_file.cpkl")
-   ds, sp = saved_fn["sphere_name"]
-
-Additionally, we can store multiple objects in a single shelve file, so we 
-have to call the sphere by name.
-
-For certain data objects such as projections, serialization can be performed
-automatically if ``serialize`` option is set to ``True`` in :ref:`the
-configuration file <configuration-file>` or set directly in the script:
-
-.. code-block:: python
-
-   from yt.config import ytcfg; ytcfg["yt", "serialize"] = "True"
-
-.. note:: Use serialization with caution. Enabling serialization means that
-   once a projection of a dataset has been created (and stored in the .yt file
-   in the same directory), any subsequent changes to that dataset will be
-   ignored when attempting to create the same projection. So if you take a
-   density projection of your dataset in the 'x' direction, then somehow tweak
-   that dataset significantly, and take the density projection again, yt will
-   default to finding the original projection and 
-   :ref:`not your new one <faq-old-data>`.
-
-.. note:: It's also possible to use the standard :mod:`cPickle` module for
-          loading and storing objects -- so in theory you could even save a
-          list of objects!
-
-This method works for clumps, as well, and the entire clump index will be
-stored and restored upon load.
+convenient to save an object to disk and then restart the calculation later or
+transfer the data from a container to another filesystem.  This can be
+particularly useful when working with extremely large datasets.  Field data
+can be saved to disk in a format that allows for it to be reloaded just like
+a regular dataset.  For information on how to do this, see
+:ref:`saving-data-containers`.

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/analyzing/saving_data.rst
--- /dev/null
+++ b/doc/source/analyzing/saving_data.rst
@@ -0,0 +1,243 @@
+.. _saving_data
+
+Saving Reloadable Data
+======================
+
+Most of the data loaded into or generated with yt can be saved to a
+format that can be reloaded as a first-class dataset.  This includes
+the following:
+
+  * geometric data containers (regions, spheres, disks, rays, etc.)
+
+  * grid data containers (covering grids, arbitrary grids, fixed
+    resolution buffers)
+
+  * spatial plots (projections, slices, cutting planes)
+
+  * profiles
+
+  * generic array data
+
+In the case of projections, slices, and profiles, reloaded data can be
+used to remake plots.  For information on this, see :ref:`remaking-plots`.
+
+.. _saving-data-containers:
+
+Geometric Data Containers
+-------------------------
+
+Data from geometric data containers can be saved with the
+:func:`~yt.data_objects.data_containers.save_as_dataset`` function.
+
+.. notebook-cell::
+
+   import yt
+   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+
+   sphere = ds.sphere([0.5]*3, (10, "Mpc"))
+   fn = sphere.save_as_dataset(fields=["density", "particle_mass"])
+   print (fn)
+
+This function will return the name of the file to which the dataset
+was saved.  The filename will be a combination of the name of the
+original dataset and the type of data container.  Optionally, a
+specific filename can be given with the ``filename`` keyword.  If no
+fields are given, the fields that have previously been queried will
+be saved.
+
+The newly created dataset can be loaded like all other supported
+data through ``yt.load``.  Once loaded, field data can be accessed
+through the traditional data containers or through the ``data``
+attribute, which will be a data container configured like the
+original data container used to make the dataset.  Grid data is
+accessed by the ``grid`` data type and particle data is accessed
+with the original particle type.  As with the original dataset, grid
+positions and cell sizes are accessible with, for example,
+("grid", "x") and ("grid", "dx").  Particle positions are
+accessible as (<particle_type>, "particle_position_x").  All original
+simulation parameters are accessible in the ``parameters``
+dictionary, normally associated with all datasets.
+
+.. code-block:: python
+
+   sphere_ds = yt.load("DD0046_sphere.h5")
+
+   # use the original data container
+   print (sphere_ds.data["grid", "density"])
+
+   # create a new data container
+   ad = sphere_ds.all_data()
+
+   # grid data
+   print (ad["grid", "density"])
+   print (ad["grid", "x"])
+   print (ad["grid", "dx"])
+
+   # particle data
+   print (ad["all", "particle_mass"])
+   print (ad["all", "particle_position_x"])
+
+Note that because field data queried from geometric containers is
+returned as unordered 1D arrays, data container datasets are treated,
+effectively, as particle data.  Thus, 3D indexing of grid data from
+these datasets is not possible.
+
+.. _saving-grid-data-containers:
+
+Grid Data Containers
+--------------------
+
+Data containers that return field data as multidimensional arrays
+can be saved so as to preserve this type of access.  This includes
+covering grids, arbitrary grids, and fixed resolution buffers.
+Saving data from these containers works just as with geometric data
+containers.  Field data can be accessed through geometric data
+containers.
+
+.. code-block:: python
+
+   cg = ds.covering_grid(level=0, left_edge=[0.25]*3, dims=[16]*3)
+   fn = cg.save_as_dataset(fields=["density", "particle_mass"])
+
+   cg_ds = yt.load(fn)
+   ad = cg_ds.all_data()
+   print (ad["grid", "density"])
+
+Multidimensional indexing of field data is also available through
+the ``data`` attribute.
+
+.. code-block:: python
+
+   print (cg_ds.data["grid", "density"])
+
+Fixed resolution buffers work just the same.
+
+.. code-block:: python
+
+   my_proj = ds.proj("density", "x", weight_field="density")
+   frb = my_proj.to_frb(1.0, (800, 800))
+   fn = frb.save_as_dataset(fields=["density"])
+   frb_ds = yt.load(fn)
+   print (frb_ds.data["density"])
+
+.. _saving-spatial-plots:
+
+Spatial Plots
+-------------
+
+Spatial plots, such as projections, slices, and off-axis slices
+(cutting planes) can also be saved and reloaded.
+
+.. code-block:: python
+
+   proj = ds.proj("density", "x", weight_field="density")
+   proj.save_as_dataset()
+
+Once reloaded, they can be handed to their associated plotting
+functions to make images.
+
+.. code-block:: python
+
+   proj_ds = yt.load("DD0046_proj.h5")
+   p = yt.ProjectionPlot(proj_ds, "x", "density",
+                         weight_field="density")
+   p.save()
+
+.. _saving-profile-data:
+
+Profiles
+--------
+
+Profiles created with :func:`~yt.data_objects.profiles.create_profile`,
+:class:`~yt.visualization.profile_plotter.ProfilePlot`, and
+:class:`~yt.visualization.profile_plotter.PhasePlot` can be saved with
+the :func:`~yt.data_objects.profiles.save_as_dataset` function, which
+works just as above.  Profile datasets are a type of non-spatial grid
+datasets.  Geometric selection is not possible, but data can be
+accessed through the ``.data`` attribute.
+
+.. notebook-cell::
+
+   import yt
+   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+   ad = ds.all_data()
+
+   profile_2d = yt.create_profile(ad, ["density", "temperature"],
+                                  "cell_mass", weight_field=None,
+                                  n_bins=(128, 128))
+   profile_2d.save_as_dataset()
+
+   prof_2d_ds = yt.load("DD0046_Profile2D.h5")
+   print (prof_2d_ds.data["cell_mass"])
+
+The x, y (if at least 2D), and z (if 3D) bin fields can be accessed as 1D
+arrays with "x", "y", and "z".
+
+.. code-block:: python
+
+   print (prof_2d_ds.data["x"])
+
+The bin fields can also be returned with the same shape as the profile
+data by accessing them with their original names.  This allows for
+boolean masking of profile data using the bin fields.
+
+.. code-block:: python
+
+   # density is the x bin field
+   print (prof_2d_ds.data["density"])
+
+For 1, 2, and 3D profile datasets, a fake profile object will be
+constructed by accessing the ".profile" attribute.  This is used
+primarily in the case of 1 and 2D profiles to create figures using
+:class:`~yt.visualization.profile_plotter.ProfilePlot` and
+:class:`~yt.visualization.profile_plotter.PhasePlot`.
+
+.. code-block:: python
+
+   p = yt.PhasePlot(prof_2d_ds.data, "density", "temperature",
+                    "cell_mass", weight_field=None)
+   p.save()
+
+.. _saving-array-data:
+
+Generic Array Data
+------------------
+
+Generic arrays can be saved and reloaded as non-spatial data using
+the :func:`~yt.frontends.ytdata.utilities.save_as_dataset` function,
+also available as ``yt.save_as_dataset``.  As with profiles, geometric
+selection is not possible, but the data can be accessed through the
+``.data`` attribute.
+
+.. notebook-cell::
+
+   import yt
+   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+
+   region = ds.box([0.25]*3, [0.75]*3)
+   sphere = ds.sphere(ds.domain_center, (10, "Mpc"))
+   my_data = {}
+   my_data["region_density"] = region["density"]
+   my_data["sphere_density"] = sphere["density"]
+   yt.save_as_dataset(ds, "test_data.h5", my_data)
+
+   array_ds = yt.load("test_data.h5")
+   print (array_ds.data["region_density"])
+   print (array_ds.data["sphere_density"])
+
+Array data can be saved with or without a dataset loaded.  If no
+dataset has been loaded, as fake dataset can be provided as a
+dictionary.
+
+.. notebook-cell::
+
+   import numpy as np
+   import yt
+
+   my_data = {"density": yt.YTArray(np.random.random(10), "g/cm**3"),
+              "temperature": yt.YTArray(np.random.random(10), "K")}
+   fake_ds = {"current_time": yt.YTQuantity(10, "Myr")}
+   yt.save_as_dataset(fake_ds, "random_data.h5", my_data)
+
+   new_ds = yt.load("random_data.h5")
+   print (new_ds.data["density"])

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -4,6 +4,13 @@
 # In this example we will show how to use the AMRKDTree to take a simulation
 # with 8 levels of refinement and only use levels 0-3 to render the dataset.
 
+# Currently this cookbook is flawed in that the data that is covered by the
+# higher resolution data gets masked during the rendering.  This should be
+# fixed by changing either the data source or the code in
+# yt/utilities/amr_kdtree.py where data is being masked for the partitioned
+# grid.  Right now the quick fix is to create a data_collection, but this
+# will only work for patch based simulations that have ds.index.grids.
+
 # We begin by loading up yt, and importing the AMRKDTree
 import numpy as np
 
@@ -12,58 +19,58 @@
 
 # Load up a dataset and define the kdtree
 ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-kd = AMRKDTree(ds)
+im, sc = yt.volume_render(ds, 'density', fname='v0.png')
+sc.camera.set_width(ds.arr(100, 'kpc'))
+render_source = sc.get_source(0)
+kd=render_source.volume
 
 # Print out specifics of KD Tree
 print("Total volume of all bricks = %i" % kd.count_volume())
 print("Total number of cells = %i" % kd.count_cells())
 
-# Define a camera and take an volume rendering.
-tf = yt.ColorTransferFunction((-30, -22))
-cam = ds.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256,
-                  tf, volume=kd)
-tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5], colormap='RdBu_r')
-cam.snapshot("v1.png", clip_ratio=6.0)
-
-# This rendering is okay, but lets say I'd like to improve it, and I don't want
-# to spend the time rendering the high resolution data.  What we can do is
-# generate a low resolution version of the AMRKDTree and pass that in to the
-# camera.  We do this by specifying a maximum refinement level of 6.
-
-kd_low_res = AMRKDTree(ds, max_level=6)
+new_source = ds.all_data()
+new_source.max_level=3
+kd_low_res = AMRKDTree(ds, data_source=new_source)
 print(kd_low_res.count_volume())
 print(kd_low_res.count_cells())
 
 # Now we pass this in as the volume to our camera, and render the snapshot
 # again.
 
-cam.volume = kd_low_res
-cam.snapshot("v4.png", clip_ratio=6.0)
+render_source.set_volume(kd_low_res)
+render_source.set_fields('density')
+sc.render()
+sc.save("v1.png", sigma_clip=6.0)
 
 # This operation was substantiall faster.  Now lets modify the low resolution
 # rendering until we find something we like.
 
+tf = render_source.transfer_function
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=np.ones(4, dtype='float64'), colormap='RdBu_r')
-cam.snapshot("v2.png", clip_ratio=6.0)
+sc.render()
+sc.save("v2.png", sigma_clip=6.0)
 
 # This looks better.  Now let's try turning on opacity.
 
 tf.grey_opacity = True
-cam.snapshot("v4.png", clip_ratio=6.0)
-
-# That seemed to pick out som interesting structures.  Now let's bump up the
-# opacity.
-
+sc.render()
+sc.save("v3.png", sigma_clip=6.0)
+#
+## That seemed to pick out som interesting structures.  Now let's bump up the
+## opacity.
+#
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=10.0 * np.ones(4, dtype='float64'), colormap='RdBu_r')
-cam.snapshot("v3.png", clip_ratio=6.0)
-
-# This looks pretty good, now lets go back to the full resolution AMRKDTree
-
-cam.volume = kd
-cam.snapshot("v4.png", clip_ratio=6.0)
+sc.render()
+sc.save("v4.png", sigma_clip=6.0)
+#
+## This looks pretty good, now lets go back to the full resolution AMRKDTree
+#
+render_source.set_volume(kd)
+sc.render()
+sc.save("v5.png", sigma_clip=6.0)
 
 # This looks great!

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -3,40 +3,29 @@
 
 # Follow the simple_volume_rendering cookbook for the first part of this.
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")  # load data
-ad = ds.all_data()
-mi, ma = ad.quantities.extrema("density")
-
-# Set up transfer function
-tf = yt.ColorTransferFunction((np.log10(mi), np.log10(ma)))
-tf.add_layers(6, w=0.05)
-
-# Set up camera paramters
-c = [0.5, 0.5, 0.5]  # Center
-L = [1, 1, 1]  # Normal Vector
-W = 1.0  # Width
-Nvec = 512  # Pixels on a side
-
-# Specify a north vector, which helps with rotations.
-north_vector = [0., 0., 1.]
+sc = yt.create_scene(ds)
+cam = sc.camera
+cam.resolution = (512, 512)
+cam.set_width(ds.domain_width/20.0)
 
 # Find the maximum density location, store it in max_c
 v, max_c = ds.find_max('density')
 
-# Initialize the Camera
-cam = ds.camera(c, L, W, (Nvec, Nvec), tf, north_vector=north_vector)
 frame = 0
-
-# Do a rotation over 5 frames
-for i, snapshot in enumerate(cam.rotation(np.pi, 5, clip_ratio=8.0)):
-    snapshot.write_png('camera_movement_%04i.png' % frame)
-    frame += 1
-
 # Move to the maximum density location over 5 frames
-for i, snapshot in enumerate(cam.move_to(max_c, 5, clip_ratio=8.0)):
-    snapshot.write_png('camera_movement_%04i.png' % frame)
+for _ in cam.iter_move(max_c, 5):
+    sc.render()
+    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
     frame += 1
 
 # Zoom in by a factor of 10 over 5 frames
-for i, snapshot in enumerate(cam.zoomin(10.0, 5, clip_ratio=8.0)):
-    snapshot.write_png('camera_movement_%04i.png' % frame)
+for _ in cam.iter_zoom(10.0, 5):
+    sc.render()
+    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
     frame += 1
+
+# Do a rotation over 5 frames
+for _ in cam.iter_rotate(np.pi, 5):
+    sc.render()
+    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    frame += 1

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -196,10 +196,41 @@
 
 In this recipe, we move a camera through a domain and take multiple volume
 rendering snapshots.
-See :ref:`volume_rendering` for more information.
+See :ref:`camera_movement` for more information.
 
 .. yt_cookbook:: camera_movement.py
 
+Volume Rendering with Custom Camera
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In this recipe we modify the :ref:`cookbook-simple_volume_rendering` recipe to
+use customized camera properties. See :ref:`volume_rendering` for more
+information.
+
+.. yt_cookbook:: custom_camera_volume_rendering.py
+
+.. _cookbook-custom-transfer-function:
+
+Volume Rendering with a Custom Transfer Function
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In this recipe we modify the :ref:`cookbook-simple_volume_rendering` recipe to
+use customized camera properties. See :ref:`volume_rendering` for more
+information.
+
+.. yt_cookbook:: custom_transfer_function_volume_rendering.py
+
+.. _cookbook-sigma_clip:
+
+Volume Rendering with Sigma Clipping
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In this recipe we output several images with different values of sigma_clip
+set in order to change the contrast of the resulting image.  See 
+:ref:`sigma_clip` for more information.
+
+.. yt_cookbook:: sigma_clip.py
+
 Zooming into an Image
 ~~~~~~~~~~~~~~~~~~~~~
 
@@ -212,6 +243,15 @@
 
 .. yt_cookbook:: zoomin_frames.py
 
+.. _cookbook-various_lens:
+
+Various Lens Types for Volume Rendering
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This example illustrates the usage and feature of different lenses for volume rendering.
+
+.. yt_cookbook:: various_lens.py
+
 .. _cookbook-opaque_rendering:
 
 Opaque Volume Rendering
@@ -220,7 +260,7 @@
 This recipe demonstrates how to make semi-opaque volume renderings, but also
 how to step through and try different things to identify the type of volume
 rendering you want.
-See :ref:`volume_rendering` for more information.
+See :ref:`opaque_rendering` for more information.
 
 .. yt_cookbook:: opaque_rendering.py
 
@@ -235,23 +275,27 @@
 
 .. yt_cookbook:: amrkdtree_downsampling.py
 
+.. _cookbook-volume_rendering_annotations:
+
 Volume Rendering with Bounding Box and Overlaid Grids
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 This recipe demonstrates how to overplot a bounding box on a volume rendering
 as well as overplotting grids representing the level of refinement achieved
 in different regions of the code.
-See :ref:`volume_rendering` for more information.
+See :ref:`volume_rendering_annotations` for more information.
 
 .. yt_cookbook:: rendering_with_box_and_grids.py
 
 Volume Rendering with Annotation
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 This recipe demonstrates how to write the simulation time, show an
 axis triad indicating the direction of the coordinate system, and show
-the transfer function on a volume rendering.
-See :ref:`volume_rendering` for more information.
+the transfer function on a volume rendering.  Please note that this 
+recipe relies on the old volume rendering interface.  While one can
+continue to use this interface, it may be incompatible with some of the
+new developments and the infrastructure described in :ref:`volume_rendering`.
 
 .. yt_cookbook:: vol-annotated.py
 

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/cookbook/custom_camera_volume_rendering.py
--- /dev/null
+++ b/doc/source/cookbook/custom_camera_volume_rendering.py
@@ -0,0 +1,22 @@
+import yt
+
+# Load the dataset
+ds = yt.load("Enzo_64/DD0043/data0043")
+
+# Create a volume rendering
+sc = yt.create_scene(ds, field=('gas', 'density'))
+
+# Now increase the resolution
+sc.camera.resolution = (1024, 1024)
+
+# Set the camera focus to a position that is offset from the center of
+# the domain
+sc.camera.focus = ds.arr([0.3, 0.3, 0.3], 'unitary')
+
+# Move the camera position to the other side of the dataset
+sc.camera.position = ds.arr([0, 0, 0], 'unitary')
+
+# save to disk with a custom filename and apply sigma clipping to eliminate
+# very bright pixels, producing an image with better contrast.
+sc.render()
+sc.save('custom.png', sigma_clip=4)

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/cookbook/custom_transfer_function_volume_rendering.py
--- /dev/null
+++ b/doc/source/cookbook/custom_transfer_function_volume_rendering.py
@@ -0,0 +1,24 @@
+import yt
+import numpy as np
+
+# Load the dataset
+ds = yt.load("Enzo_64/DD0043/data0043")
+
+# Create a volume rendering
+sc = yt.create_scene(ds, field=('gas', 'density'))
+
+# Modify the transfer function
+
+# First get the render source, in this case the entire domain, with field ('gas','density')
+render_source = sc.get_source(0)
+
+# Clear the transfer function
+render_source.transfer_function.clear()
+
+# Map a range of density values (in log space) to the Reds_r colormap
+render_source.transfer_function.map_to_colormap(
+    np.log10(ds.quan(5.0e-31, 'g/cm**3')),
+    np.log10(ds.quan(1.0e-29, 'g/cm**3')),
+    scale=30.0, colormap='RdBu_r')
+
+sc.save('new_tf.png')

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/cookbook/image_background_colors.py
--- a/doc/source/cookbook/image_background_colors.py
+++ b/doc/source/cookbook/image_background_colors.py
@@ -2,27 +2,14 @@
 # volume renderings, to pngs with varying backgrounds.
 
 # First we use the simple_volume_rendering.py recipe from above to generate
-# a standard volume rendering.  The only difference is that we use 
-# grey_opacity=True with our TransferFunction, as the colored background 
-# functionality requires images with an opacity between 0 and 1. 
-
-# We have removed all the comments from the volume rendering recipe for 
-# brevity here, but consult the recipe for more details.
+# a standard volume rendering.
 
 import yt
 import numpy as np
 
 ds = yt.load("Enzo_64/DD0043/data0043")
-ad = ds.all_data()
-mi, ma = ad.quantities.extrema("density")
-tf = yt.ColorTransferFunction((np.log10(mi)+1, np.log10(ma)), grey_opacity=True)
-tf.add_layers(5, w=0.02, colormap="spectral")
-c = [0.5, 0.5, 0.5]
-L = [0.5, 0.2, 0.7]
-W = 1.0
-Npixels = 512
-cam = ds.camera(c, L, W, Npixels, tf)
-im = cam.snapshot("original.png" % ds, clip_ratio=8.0)
+im, sc = yt.volume_render(ds, 'density')
+im.write_png("original.png", sigma_clip=8.0)
 
 # Our image array can now be transformed to include different background
 # colors.  By default, the background color is black.  The following
@@ -35,10 +22,10 @@
 # None  (0.,0.,0.,0.) <-- Transparent!
 # any rgba list/array: [r,g,b,a], bounded by 0..1
 
-# We include the clip_ratio=8 keyword here to bring out more contrast between
+# We include the sigma_clip=8 keyword here to bring out more contrast between
 # the background and foreground, but it is entirely optional.
 
-im.write_png('black_bg.png', background='black', clip_ratio=8.0)
-im.write_png('white_bg.png', background='white', clip_ratio=8.0)
-im.write_png('green_bg.png', background=[0.,1.,0.,1.], clip_ratio=8.0)
-im.write_png('transparent_bg.png', background=None, clip_ratio=8.0)
+im.write_png('black_bg.png', background='black', sigma_clip=8.0)
+im.write_png('white_bg.png', background='white', sigma_clip=8.0)
+im.write_png('green_bg.png', background=[0.,1.,0.,1.], sigma_clip=8.0)
+im.write_png('transparent_bg.png', background=None, sigma_clip=8.0)

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/cookbook/index.rst
--- a/doc/source/cookbook/index.rst
+++ b/doc/source/cookbook/index.rst
@@ -44,8 +44,10 @@
    embedded_webm_animation
    gadget_notebook
    owls_notebook
+   ../visualizing/transfer_function_helper
    ../analyzing/analysis_modules/sunyaev_zeldovich
    fits_radio_cubes
    fits_xray_images
    tipsy_notebook
    halo_analysis_example
+   ../visualizing/volume_rendering_tutorial

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/cookbook/offaxis_projection.py
--- a/doc/source/cookbook/offaxis_projection.py
+++ b/doc/source/cookbook/offaxis_projection.py
@@ -11,7 +11,7 @@
 # objects, you could set it the way you would a cutting plane -- but for this
 # dataset, we'll just choose an off-axis value at random.  This gets normalized
 # automatically.
-L = [0.5, 0.4, 0.7]
+L = [1.0, 0.0, 0.0]
 
 # Our "width" is the width of the image plane as well as the depth.
 # The first element is the left to right width, the second is the
@@ -26,7 +26,7 @@
 # Create the off axis projection.
 # Setting no_ghost to False speeds up the process, but makes a
 # slighly lower quality image.
-image = yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
+image, sc= yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
 
 # Write out the final image and give it a name
 # relating to what our dataset is called.

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/cookbook/offaxis_projection_colorbar.py
--- a/doc/source/cookbook/offaxis_projection_colorbar.py
+++ b/doc/source/cookbook/offaxis_projection_colorbar.py
@@ -32,7 +32,7 @@
 # Also note that we set the field which we want to project as "density", but
 # really we could use any arbitrary field like "temperature", "metallicity"
 # or whatever.
-image = yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
+image, sc = yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
 
 # Image is now an NxN array representing the intensities of the various pixels.
 # And now, we call our direct image saver.  We save the log of the result.

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -3,44 +3,51 @@
 
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
-# We start by building a transfer function, and initializing a camera.
+# We start by building a default volume rendering scene 
 
-tf = yt.ColorTransferFunction((-30, -22))
-cam = ds.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256, tf)
+im, sc = yt.volume_render(ds, field=("gas","density"), fname="v0.png", sigma_clip=6.0)
 
-# Now let's add some isocontours, and take a snapshot.
-
-tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5], colormap = 'RdBu_r')
-cam.snapshot("v1.png", clip_ratio=6.0)
+sc.camera.set_width(ds.arr(0.1,'code_length'))
+tf = sc.get_source(0).transfer_function 
+tf.clear()
+tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
+        alpha=np.logspace(-3,0,4), colormap = 'RdBu_r')
+sc.render()
+sc.save("v1.png", sigma_clip=6.0)
 
 # In this case, the default alphas used (np.logspace(-3,0,Nbins)) does not
 # accentuate the outer regions of the galaxy. Let's start by bringing up the
 # alpha values for each contour to go between 0.1 and 1.0
 
+tf = sc.get_source(0).transfer_function 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(0,0,4), colormap = 'RdBu_r')
-cam.snapshot("v2.png", clip_ratio=6.0)
+sc.render()
+sc.save("v2.png", sigma_clip=6.0)
 
 # Now let's set the grey_opacity to True.  This should make the inner portions
 # start to be obcured
 
 tf.grey_opacity = True
-cam.snapshot("v3.png", clip_ratio=6.0)
+sc.render()
+sc.save("v3.png", sigma_clip=6.0)
 
 # That looks pretty good, but let's start bumping up the opacity.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-cam.snapshot("v4.png", clip_ratio=6.0)
+sc.render()
+sc.save("v4.png", sigma_clip=6.0)
 
 # Let's bump up again to see if we can obscure the inner contour.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=30.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-cam.snapshot("v5.png", clip_ratio=6.0)
+sc.render()
+sc.save("v5.png", sigma_clip=6.0)
 
 # Now we are losing sight of everything.  Let's see if we can obscure the next
 # layer
@@ -48,13 +55,15 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=100.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-cam.snapshot("v6.png", clip_ratio=6.0)
+sc.render()
+sc.save("v6.png", sigma_clip=6.0)
 
 # That is very opaque!  Now lets go back and see what it would look like with
 # grey_opacity = False
 
 tf.grey_opacity=False
-cam.snapshot("v7.png", clip_ratio=6.0)
+sc.render()
+sc.save("v7.png", sigma_clip=6.0)
 
 # That looks pretty different, but the main thing is that you can see that the
 # inner contours are somewhat visible again.  

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -1,61 +1,22 @@
 import yt
 import numpy as np
+from yt.visualization.volume_rendering.api import BoxSource, CoordinateVectorSource
 
 # Load the dataset.
 ds = yt.load("Enzo_64/DD0043/data0043")
+sc = yt.create_scene(ds, ('gas','density'))
+sc.get_source(0).transfer_function.grey_opacity=True
 
-# Create a data container (like a sphere or region) that
-# represents the entire domain.
-ad = ds.all_data()
+sc.annotate_domain(ds)
+sc.render()
+sc.save("%s_vr_domain.png" % ds)
 
-# Get the minimum and maximum densities.
-mi, ma = ad.quantities.extrema("density")
-
-# Create a transfer function to map field values to colors.
-# We bump up our minimum to cut out some of the background fluid
-tf = yt.ColorTransferFunction((np.log10(mi)+2.0, np.log10(ma)))
-
-# Add three Gaussians, evenly spaced between the min and
-# max specified above with widths of 0.02 and using the
-# gist_stern colormap.
-tf.add_layers(3, w=0.02, colormap="gist_stern")
-
-# Choose a center for the render.
-c = [0.5, 0.5, 0.5]
-
-# Choose a vector representing the viewing direction.
-L = [0.5, 0.2, 0.7]
-
-# Set the width of the image.
-# Decreasing or increasing this value
-# results in a zoom in or out.
-W = 1.0
-
-# The number of pixels along one side of the image.
-# The final image will have Npixel^2 pixels.
-Npixels = 512
-
-# Create a camera object.
-# This object creates the images and
-# can be moved and rotated.
-cam = ds.camera(c, L, W, Npixels, tf)
-
-# Create a snapshot.
-# The return value of this function could also be accepted, modified (or saved
-# for later manipulation) and then put written out using write_bitmap.
-# clip_ratio applies a maximum to the function, which is set to that value
-# times the .std() of the array.
-im = cam.snapshot("%s_volume_rendered.png" % ds, clip_ratio=8.0)
-
-# Add the domain edges, with an alpha blending of 0.3:
-nim = cam.draw_domain(im, alpha=0.3)
-nim.write_png('%s_vr_domain.png' % ds)
-
-# Add the grids, colored by the grid level with the algae colormap
-nim = cam.draw_grids(im, alpha=0.3, cmap='algae')
-nim.write_png('%s_vr_grids.png' % ds)
+sc.annotate_grids(ds)
+sc.render()
+sc.save("%s_vr_grids.png" % ds)
 
 # Here we can draw the coordinate vectors on top of the image by processing
 # it through the camera. Then save it out.
-cam.draw_coordinate_vectors(nim)
-nim.write_png("%s_vr_vectors.png" % ds)
+sc.annotate_axes()
+sc.render()
+sc.save("%s_vr_coords.png" % ds)

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/cookbook/sigma_clip.py
--- /dev/null
+++ b/doc/source/cookbook/sigma_clip.py
@@ -0,0 +1,17 @@
+import yt
+
+# Load the dataset.
+ds = yt.load("enzo_tiny_cosmology/RD0009/RD0009")
+
+# Create a volume rendering, which will determine data bounds, use the first
+# acceptable field in the field_list, and set up a default transfer function.
+
+# Render and save output images with different levels of sigma clipping.
+# Sigma clipping removes the highest intensity pixels in a volume render, 
+# which affects the overall contrast of the image.
+sc = yt.create_scene(ds, field=('gas', 'density'))
+sc.render()
+sc.save('clip_0.png')
+sc.save('clip_2.png', sigma_clip=2)
+sc.save('clip_4.png', sigma_clip=4)
+sc.save('clip_6.png', sigma_clip=6)

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/cookbook/simple_volume_rendering.py
--- a/doc/source/cookbook/simple_volume_rendering.py
+++ b/doc/source/cookbook/simple_volume_rendering.py
@@ -1,48 +1,10 @@
 import yt
-import numpy as np
 
 # Load the dataset.
 ds = yt.load("Enzo_64/DD0043/data0043")
 
-# Create a data container (like a sphere or region) that
-# represents the entire domain.
-ad = ds.all_data()
+# Create a volume rendering, which will determine data bounds, use the first
+# acceptable field in the field_list, and set up a default transfer function.
 
-# Get the minimum and maximum densities.
-mi, ma = ad.quantities.extrema("density")
-
-# Create a transfer function to map field values to colors.
-# We bump up our minimum to cut out some of the background fluid
-tf = yt.ColorTransferFunction((np.log10(mi)+1, np.log10(ma)))
-
-# Add five Gaussians, evenly spaced between the min and
-# max specified above with widths of 0.02 and using the
-# spectral colormap.
-tf.add_layers(5, w=0.02, colormap="spectral")
-
-# Choose a center for the render.
-c = [0.5, 0.5, 0.5]
-
-# Choose a vector representing the viewing direction.
-L = [0.5, 0.2, 0.7]
-
-# Set the width of the image.
-# Decreasing or increasing this value
-# results in a zoom in or out.
-W = 1.0
-
-# The number of pixels along one side of the image.
-# The final image will have Npixel^2 pixels.
-Npixels = 512
-
-# Create a camera object.
-# This object creates the images and
-# can be moved and rotated.
-cam = ds.camera(c, L, W, Npixels, tf)
-
-# Create a snapshot.
-# The return value of this function could also be accepted, modified (or saved
-# for later manipulation) and then put written out using write_bitmap.
-# clip_ratio applies a maximum to the function, which is set to that value
-# times the .std() of the array.
-cam.snapshot("%s_volume_rendered.png" % ds, clip_ratio=8.0)
+# This will save a file named 'data0043_Render_density.png' to disk.
+im, sc = yt.volume_render(ds, field=('gas', 'density'))

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/cookbook/various_lens.py
--- /dev/null
+++ b/doc/source/cookbook/various_lens.py
@@ -0,0 +1,120 @@
+import yt
+from yt.visualization.volume_rendering.api import Scene, Camera, VolumeSource
+import numpy as np
+
+field = ("gas", "density")
+
+# normal_vector points from camera to the center of tbe final projection.
+# Now we look at the positive x direction.
+normal_vector = [1., 0., 0.]
+# north_vector defines the "top" direction of the projection, which is
+# positive z direction here.
+north_vector = [0., 0., 1.]
+
+# Follow the simple_volume_rendering cookbook for the first part of this.
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+sc = Scene()
+vol = VolumeSource(ds, field=field)
+tf = vol.transfer_function
+tf.grey_opacity = True
+
+# Plane-parallel lens
+cam = Camera(ds, lens_type='plane-parallel')
+# Set the resolution of tbe final projection.
+cam.resolution = [250, 250]
+# Set the location of the camera to be (x=0.2, y=0.5, z=0.5)
+# For plane-parallel lens, the location info along the normal_vector (here
+# is x=0.2) is ignored. 
+cam.position = ds.arr(np.array([0.2, 0.5, 0.5]), 'code_length')
+# Set the orientation of the camera.
+cam.switch_orientation(normal_vector=normal_vector,
+                       north_vector=north_vector)
+# Set the width of the camera, where width[0] and width[1] specify the length and
+# height of final projection, while width[2] in plane-parallel lens is not used.
+cam.set_width(ds.domain_width * 0.5)
+sc.camera = cam
+sc.add_source(vol)
+sc.render()
+sc.save('lens_plane-parallel.png', sigma_clip=6.0)
+
+# Perspective lens
+cam = Camera(ds, lens_type='perspective')
+cam.resolution = [250, 250]
+# Standing at (x=0.2, y=0.5, z=0.5), we look at the area of x>0.2 (with some open angle
+# specified by camera width) along the positive x direction.
+cam.position = ds.arr([0.2, 0.5, 0.5], 'code_length')
+cam.switch_orientation(normal_vector=normal_vector,
+                       north_vector=north_vector)
+# Set the width of the camera, where width[0] and width[1] specify the length and
+# height of the final projection, while width[2] specifies the distance between the
+# camera and the final image.
+cam.set_width(ds.domain_width * 0.5)
+sc.camera = cam
+sc.add_source(vol)
+sc.render()
+sc.save('lens_perspective.png', sigma_clip=6.0)
+
+# Stereo-perspective lens
+cam = Camera(ds, lens_type='stereo-perspective')
+# Set the size ratio of the final projection to be 2:1, since stereo-perspective lens
+# will generate the final image with both left-eye and right-eye ones jointed together.
+cam.resolution = [500, 250]
+cam.position = ds.arr([0.2, 0.5, 0.5], 'code_length')
+cam.switch_orientation(normal_vector=normal_vector,
+                       north_vector=north_vector)
+cam.set_width(ds.domain_width*0.5)
+# Set the distance between left-eye and right-eye.
+cam.lens.disparity = ds.domain_width[0] * 1.e-3
+sc.camera = cam
+sc.add_source(vol)
+sc.render()
+sc.save('lens_stereo-perspective.png', sigma_clip=6.0)
+
+# Fisheye lens
+dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 10)
+cam = Camera(dd, lens_type='fisheye')
+cam.resolution = [250, 250]
+v, c = ds.find_max(field)
+cam.set_position(c - 0.0005 * ds.domain_width)
+cam.switch_orientation(normal_vector=normal_vector,
+                       north_vector=north_vector)
+cam.set_width(ds.domain_width)
+cam.lens.fov = 360.0
+sc.camera = cam
+sc.add_source(vol)
+sc.render()
+sc.save('lens_fisheye.png', sigma_clip=6.0)
+
+# Spherical lens
+cam = Camera(ds, lens_type='spherical')
+# Set the size ratio of the final projection to be 2:1, since spherical lens
+# will generate the final image with length of 2*pi and height of pi.
+cam.resolution = [500, 250]
+# Standing at (x=0.4, y=0.5, z=0.5), we look in all the radial directions
+# from this point in spherical coordinate.
+cam.position = ds.arr([0.4, 0.5, 0.5], 'code_length')
+cam.switch_orientation(normal_vector=normal_vector,
+                       north_vector=north_vector)
+# In (stereo)spherical camera, camera width is not used since the entire volume
+# will be rendered
+sc.camera = cam
+sc.add_source(vol)
+sc.render()
+sc.save('lens_spherical.png', sigma_clip=6.0)
+
+# Stereo-spherical lens
+cam = Camera(ds, lens_type='stereo-spherical')
+# Set the size ratio of the final projection to be 4:1, since spherical-perspective lens
+# will generate the final image with both left-eye and right-eye ones jointed together.
+cam.resolution = [1000, 250]
+cam.position = ds.arr([0.4, 0.5, 0.5], 'code_length')
+cam.switch_orientation(normal_vector=normal_vector,
+                       north_vector=north_vector)
+# In (stereo)spherical camera, camera width is not used since the entire volume
+# will be rendered
+# Set the distance between left-eye and right-eye.
+cam.lens.disparity = ds.domain_width[0] * 1.e-3
+sc.camera = cam
+sc.add_source(vol)
+sc.render()
+sc.save('lens_stereo-spherical.png', sigma_clip=6.0)

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/cookbook/vol-annotated.py
--- a/doc/source/cookbook/vol-annotated.py
+++ b/doc/source/cookbook/vol-annotated.py
@@ -4,7 +4,7 @@
 import pylab
 
 import yt
-import yt.visualization.volume_rendering.api as vr
+import yt.visualization.volume_rendering.old_camera as vr
 
 ds = yt.load("maestro_subCh_plt00248")
 
@@ -17,11 +17,11 @@
 # centered on these with width sigma        
 vals = [-1.e7, -5.e6, -2.5e6, 2.5e6, 5.e6, 1.e7]
 sigma = 2.e5
-        
+
 mi, ma = min(vals), max(vals)
 
 # Instantiate the ColorTransferfunction.
-tf =  vr.ColorTransferFunction((mi, ma))
+tf =  yt.ColorTransferFunction((mi, ma))
 
 for v in vals:
     tf.sample_colormap(v, sigma**2, colormap="coolwarm")
@@ -69,7 +69,7 @@
 
 # tell the camera to use our figure
 cam._render_figure = f
-    
+
 # save annotated -- this added the transfer function values, 
 # and the clear_fig=False ensures it writes onto our existing figure.
 cam.save_annotated("vol_annotated.png", nim, dpi=145, clear_fig=False)

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1026,6 +1026,60 @@
 * Some functions may behave oddly or not work at all.
 * Data must already reside in memory.
 
+Unstructured Grid Data
+----------------------
+
+See :ref:`loading-numpy-array`,
+:func:`~yt.frontends.stream.data_structures.load_unstructured_mesh` for
+more detail.
+
+In addition to the above grid types, you can also load data stored on
+unstructured meshes. This type of mesh is used, for example, in many
+finite element calculations. Currently, hexahedral, tetrahedral, and
+wedge-shaped mesh element are supported.
+
+To load an unstructured mesh, you need to specify the following. First,
+you need to have a coordinates array, which should be an (L, 3) array
+that stores the (x, y, z) positions of all of the vertices in the mesh.
+Second, you need to specify a connectivity array, which describes how
+those vertices are connected into mesh elements. The connectivity array
+should be (N, M), where N is the number of elements and M is the
+connectivity length, i.e. the number of vertices per element. Finally,
+you must also specify a data dictionary, where the keys should be
+the names of the fields and the values should be numpy arrays that
+contain the field data. These arrays can either supply the cell-averaged
+data for each element, in which case they would be (N, 1), or they
+can have node-centered data, in which case they would also be (N, M).
+
+Here is an example of how to load an in-memory, unstructured mesh dataset:
+
+.. code-block:: python
+
+   import yt
+   import numpy
+   from yt.utilities.exodusII_reader import get_data
+
+   coords, connectivity, data = get_data("MOOSE_sample_data/out.e-s010")
+
+This uses a publically available `MOOSE <http://mooseframework.org/>` 
+dataset along with the get_data function to parse the coords, connectivity, 
+and data. Then, these can be loaded as an in-memory dataset as follows:
+
+.. code-block:: python
+
+    mesh_id = 0
+    ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+
+Note that load_unstructured_mesh can take either a single or a list of meshes.
+Here, we have selected only the first mesh to load.
+
+.. rubric:: Caveats
+
+* Units will be incorrect unless the data has already been converted to cgs.
+* Integration is not implemented.
+* Some functions may behave oddly or not work at all.
+* Data must already reside in memory.
+
 Generic Particle Data
 ---------------------
 

diff -r ef2d115e64bd2311e0c8709b9d6041a8d3ade95e -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -290,14 +290,14 @@
 
 .. code-block:: bash
 
-  $ pip install -r requirements.txt
+  $ pip install numpy matplotlib cython cython h5py nose sympy
 
 If you're using IPython notebooks, you can install its dependencies
 with ``pip`` as well:
 
 .. code-block:: bash
 
-  $ pip install -r optional-requirements.txt
+  $ pip install ipython[notebook]
 
 From here, you can use ``pip`` (which comes with ``Python``) to install the latest
 stable version of yt:

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/838745f7506c/
Changeset:   838745f7506c
Branch:      yt
User:        MatthewTurk
Date:        2015-11-18 03:03:09+00:00
Summary:     Adding narrative docs for selecting data the new way
Affected #:  1 file

diff -r 1b0244a45b8ba6360559a1ddb55599b7b5ccc8f0 -r 838745f7506c177314320a988bac4fc01b0436d2 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -63,6 +63,110 @@
    for i in range(sp["temperature"].size):
        print "(%f,  %f,  %f)    %f" % (sp["x"][i], sp["y"][i], sp["z"][i], sp["temperature"][i])
 
+.. _quickly-selecting-data:
+
+Slicing Syntax for Selecting Data
+---------------------------------
+
+yt provides a mechanism for rapidly selecting data from a dataset.  This allows
+for region selection based on the full domain of the object.  Selecting in this
+manner is exposed through a slice-like syntax.  All of these attributes are
+exposed through the ``RegionExpression`` object, which is an attribute of a
+``DataSet`` object, called ``r``.
+
+Getting All The Data
+^^^^^^^^^^^^^^^^^^^^
+
+The ``.r`` attribute serves as a persistent means of accessing the full data
+from a dataset.  You can access this shorthand operation by querying any field
+on the ``.r`` object, like so:
+
+.. code-block:: python
+
+   ds = yt.load("RedshiftOutput0005")
+   rho = ds.r["density"]
+
+This will return a *flattened* array of data.  The region expression object
+(``r``) doesn't have any derived quantities on it.
+
+.. warning::
+
+   One thing to keep in mind with access data in this way is that it is
+   *persistent*.  It is loaded into memory, and then retained until the dataset
+   is deleted or garbage collected.
+
+Selecting Multiresolution Regions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To select rectilinear regions, where the data is selected the same way that it
+is selected in a :ref:`region-reference`, you can utilize slice-like syntax,
+supplying start and stop, but not supplying a step argument.  This requires
+that three components of the slice must be specified.  These take a start and a
+stop, and are for the three axes in simulation order (if your data is ordered
+z, y, x for instance, this would be in z, y, x order).
+
+The slices can have both position and, optionally, unit values.  These define
+the value with respect to the ``domain_left_edge`` of the dataset.  So for
+instance, you could specify it like so:::
+
+   ds.r[(100, 'kpc'):(200,'kpc'),:,:]
+
+This would return a region that included everything between 100 kpc from the
+left edge of the dataset to 200 kpc from the left edge of the dataset in the
+first dimension, and which spans the entire dataset in the second and third
+dimensions.  By default, if the units are unspecified, they are in the "native"
+code units of the dataset.
+
+This works in all types of datasets, as well.  For instance, if you have a
+geographic dataset (which is usually ordered latitude, longitude, altitude) you
+can easily select, for instance, one hemisphere with a region selection:::
+
+   ds.r[:,-180:0,:]
+
+Selecting Fixed Resolution Regions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+yt also provides functionality for selecting regions that have been turned into
+voxels.  This returns an :ref:`arbitrary-grid` object.  It can be created by
+specifying a complex slice "step", where the start and stop follow the same
+rules as above.  This is similar to how the numpy ``mgrid`` operation works.
+For instance, this code block will generate a grid covering the full domain,
+but converted to being 21x35x100 dimensions:::
+
+  region = ds.r[::21j, ::35j, ::100j]
+
+The left and right edges, as above, can be specified to provide bounds as well.
+For instance, to select a 10 meter cube, with 24 cells in each dimension, we
+could supply:::
+
+  region = ds.r[(20,'m'):(30,'m'):24j, (30,'m'):(40,'m'):24j,
+                (7,'m'):(17,'m'):24j]
+
+This can select both particles and mesh fields.  Mesh fields will be 3D arrays,
+and generated through volume-weighted overlap calculations.
+
+Selecting Slices
+^^^^^^^^^^^^^^^^
+
+If one dimension is specified as a single value, that will be the dimension
+along which a slice is made.  This provides a simple means of generating a
+slice from a subset of the data.  For instance, to create a slice of a dataset,
+you can very simply specify the full domain along two axes:::
+
+   sl = ds.r[:,:,0.25]
+
+This can also be very easily plotted:::
+
+   sl = ds.r[:,:,0.25]
+   sl.plot()
+
+This accepts arguments the same way:::
+
+
+   sl = ds.r[(20.1, 'km'):(31.0, 'km'), (504.143,'m'):(1000.0,'m'),
+             (900.1, 'm')]
+   sl.plot()
+
 .. _available-objects:
 
 Available Objects
@@ -144,6 +248,8 @@
       creating a Region covering the entire dataset domain.  It is effectively 
       ``ds.region(ds.domain_center, ds.domain_left_edge, ds.domain_right_edge)``.
 
+.. _region-reference:
+
 **Box Region** 
     | Class :class:`~yt.data_objects.selection_data_containers.YTRegion`
     | Usage: ``region(center, left_edge, right_edge, fields=None, ds=None, field_parameters=None, data_source=None)``
@@ -227,15 +333,15 @@
       interpolates as necessary from coarse regions to fine.  See 
       :ref:`examining-grid-data-in-a-fixed-resolution-array`.
 
-**Fixed-Resolution Region for Particle Deposition** 
+**Fixed-Resolution Region**
     | Class :class:`~yt.data_objects.construction_data_containers.YTArbitraryGrid`
     | Usage: ``arbitrary_grid(left_edge, right_edge, dimensions, ds=None, field_parameters=None)``
     | When particles are deposited on to mesh fields, they use the existing
       mesh structure, but this may have too much or too little resolution
       relative to the particle locations (or it may not exist at all!).  An
       `arbitrary_grid` provides a means for generating a new independent mesh 
-      structure for particle deposition.  See :ref:`arbitrary-grid` for more 
-      information.
+      structure for particle deposition and simple mesh field interpolation.
+      See :ref:`arbitrary-grid` for more information.
 
 **Projection** 
     | Class :class:`~yt.data_objects.construction_data_containers.YTQuadTreeProj`
@@ -279,6 +385,67 @@
    sp = ds.sphere('c', (10, 'kpc'))
    print sp.quantities.angular_momentum_vector()
 
+Quickly Processing Data
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Most data objects now have multiple numpy-like methods that allow you to
+quickly process data.  More of these methods will be added over time and added
+to this list.  Most, if not all, of these map to other yt operations and are
+designed as syntactic sugar to slightly simplify otherwise somewhat obtuse
+pipelines.
+
+These operations are parallelized.
+
+You can compute the extrema of a field by using the ``max`` or ``min``
+functions.  This will cache the extrema in between, so calling ``min`` right
+after ``max`` will be considerably faster.  Here is an example:::
+
+  ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+  reg = ds.r[0.3:0.6, 0.2:0.4, 0.9:0.95]
+  min_rho = reg.min("density")
+  max_rho = reg.max("density")
+
+The ``max`` operation can also compute the maximum intensity projection:::
+
+  proj = reg.max("density", axis="x")
+  proj.plot()
+
+The ``min`` operator does not do this, however, as a minimum intensity
+projection is not currently implemented.
+
+You can also compute the ``mean`` value, which accepts a field, axis and wight
+function.  If the axis is not specified, it will return the average value of
+the specified field, weighted by the weight argument.  The weight argument
+defaults to ``ones``, which performs an arithmetic average.  For instance:::
+
+  mean_rho = reg.mean("density")
+  rho_by_vol = reg.mean("density", weight="cell_volume")
+
+If an axis is provided, it will project along that axis and return it to you:::
+
+  rho_proj = reg.mean("temperature", axis="y", weight="density")
+  rho_proj.plot()
+
+The ``sum`` function will add all the values in the data object.  It accepts a
+field and, optionally, an axis.  If the axis is left unspecified, it will sum
+the values in the object:::
+
+  vol = reg.sum("cell_volume")
+
+If the axis is specified, it will compute a projection using the method ``sum``
+(which does *not* take into account varying path length!) and return that to
+you.::
+
+  cell_count = reg.sum("ones", axis="z")
+  cell_count.plot()
+
+To compute a projection where the path length *is* taken into account, you can
+use the ``integrate`` function:::
+
+  proj = reg.integrate("density", "x")
+
+All of these projections supply the data object as their base input.
+
 Available Derived Quantities
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -350,8 +517,8 @@
 
 .. _arbitrary-grid:
 
-Arbitrary Grids Objects for Particle Deposition
------------------------------------------------
+Arbitrary Grids Objects
+-----------------------
 
 The covering grid and smoothed covering grid objects mandate that they be
 exactly aligned with the mesh.  This is a
@@ -379,6 +546,13 @@
 While these cannot yet be used as input to projections or slices, slices and
 projections can be taken of the data in them and visualized by hand.
 
+These objects, as of yt 3.3, are now also able to "voxelize" mesh fields.  This
+means that you can query the "density" field and it will return the density
+field as deposited, identically to how it would be deposited in a fixed
+resolution buffer.  Note that this means that contributions from misaligned or
+partially-overlapping cells are added in a volume-weighted way, which makes it
+inappropriate for some types of analysis.
+
 .. _boolean_data_objects:
 
 Combining Objects: Boolean Data Objects


https://bitbucket.org/yt_analysis/yt/commits/0defa67e4faa/
Changeset:   0defa67e4faa
Branch:      yt
User:        MatthewTurk
Date:        2015-11-18 04:55:54+00:00
Summary:     Adding docstrings
Affected #:  1 file

diff -r 838745f7506c177314320a988bac4fc01b0436d2 -r 0defa67e4faa97ddabd245c0dc2549f9549cb3e3 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -622,6 +622,30 @@
 
     _extrema_cache = None
     def max(self, field, axis=None):
+        r"""Compute the maximum of a field, optionally along an axis.
+
+        This will, in a parallel-aware fashion, compute the maximum of the
+        given field.  Supplying an axis will result in a return value of a
+        YTProjection, with method 'mip' for maximum intensity.  If the max has
+        already been requested, it will use the cached extrema value.
+
+        Parameters
+        ----------
+        field : string or tuple of strings
+            The field to maximize.
+        axis : string, optional
+            If supplied, the axis to project the maximum along.
+
+        Returns
+        -------
+        Either a scalar or a YTProjection.
+
+        Examples
+        --------
+
+        >>> max_temp = reg.max("temperature")
+        >>> max_temp_proj = reg.max("temperature", axis="x")
+        """
         if axis is None:
             rv = ()
             fields = ensure_list(field)
@@ -638,6 +662,28 @@
             raise NotImplementedError("Unknown axis %s" % axis)
 
     def min(self, field, axis=None):
+        r"""Compute the minimum of a field.
+
+        This will, in a parallel-aware fashion, compute the minimum of the
+        given field.  Supplying an axis is not currently supported.  If the max
+        has already been requested, it will use the cached extrema value.
+
+        Parameters
+        ----------
+        field : string or tuple of strings
+            The field to minimize.
+        axis : string, optional
+            If supplied, the axis to compute the minimum along.
+
+        Returns
+        -------
+        Scalar.
+
+        Examples
+        --------
+
+        >>> min_temp = reg.min("temperature")
+        """
         if axis is None:
             rv = ()
             fields = ensure_list(field)
@@ -664,6 +710,34 @@
         raise NotImplementedError
 
     def mean(self, field, axis=None, weight='ones'):
+        r"""Compute the mean of a field, optionally along an axis, with a
+        weight.
+
+        This will, in a parallel-aware fashion, compute the mean of the
+        given field.  If an axis is supplied, it will return a projection,
+        where the weight is also supplied.  By default the weight is "ones",
+        resulting in a strict average.
+
+        Parameters
+        ----------
+        field : string or tuple of strings
+            The field to average.
+        axis : string, optional
+            If supplied, the axis to compute the mean along (i.e., to project
+            along)
+        weight : string, optional
+            The field to use as a weight.
+
+        Returns
+        -------
+        Scalar or YTProjection.
+
+        Examples
+        --------
+
+        >>> avg_rho = reg.mean("density", weight="cell_volume")
+        >>> rho_weighted_T = reg.mean("temperature", axis="y", weight="density")
+        """
         if axis in self.ds.coordinates.axis_name:
             r = self.ds.proj(field, axis, data_source=self, weight_field=weight)
         elif axis is None:
@@ -676,6 +750,30 @@
         return r
 
     def sum(self, field, axis=None):
+        r"""Compute the sum of a field, optionally along an axis.
+
+        This will, in a parallel-aware fashion, compute the sum of the given
+        field.  If an axis is specified, it will return a projection (using
+        method type "sum", which does not take into account path length) along
+        that axis.
+
+        Parameters
+        ----------
+        field : string or tuple of strings
+            The field to sum.
+        axis : string, optional
+            If supplied, the axis to sum along.
+
+        Returns
+        -------
+        Either a scalar or a YTProjection.
+
+        Examples
+        --------
+
+        >>> total_vol = reg.sum("cell_volume")
+        >>> cell_count = reg.sum("ones", axis="x")
+        """
         # Because we're using ``sum`` to specifically mean a sum or a
         # projection with the method="sum", we do not utilize the ``mean``
         # function.
@@ -689,6 +787,26 @@
         return r
 
     def integrate(self, field, axis=None):
+        r"""Compute the integral (projection) of a field along an axis.
+
+        This projects a field along an axis.
+
+        Parameters
+        ----------
+        field : string or tuple of strings
+            The field to project.
+        axis : string
+            The axis to project along.
+
+        Returns
+        -------
+        YTProjection
+
+        Examples
+        --------
+
+        >>> column_density = reg.integrate("density", axis="z")
+        """
         if axis in self.ds.coordinates.axis_name:
             r = self.ds.proj(field, axis, data_source=self)
         else:


https://bitbucket.org/yt_analysis/yt/commits/20fc1a7a6fce/
Changeset:   20fc1a7a6fce
Branch:      yt
User:        MatthewTurk
Date:        2015-11-18 13:58:08+00:00
Summary:     Fixing style issues, including one I don't know why broke is a problem.
Affected #:  3 files

diff -r 0defa67e4faa97ddabd245c0dc2549f9549cb3e3 -r 20fc1a7a6fce1e7e37345ff4abc3a81249a357cb yt/data_objects/region_expression.py
--- a/yt/data_objects/region_expression.py
+++ b/yt/data_objects/region_expression.py
@@ -11,11 +11,9 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
 import weakref
 import types
 
-import yt.units
 from yt.utilities.exceptions import YTDimensionalityError
 
 class RegionExpression(object):
@@ -103,7 +101,7 @@
             left_edge.append(l)
             right_edge.append(r)
             dims.append(getattr(b.step, "imag", None))
-        center = [ (l + r)/2.0 for l, r in zip(left_edge, right_edge)]
+        center = [ (cl + cr)/2.0 for cl, cr in zip(left_edge, right_edge)]
         if all(d is not None for d in dims):
             return self.ds.arbitrary_grid(left_edge, right_edge, dims)
         return self.ds.region(center, left_edge, right_edge)

diff -r 0defa67e4faa97ddabd245c0dc2549f9549cb3e3 -r 20fc1a7a6fce1e7e37345ff4abc3a81249a357cb yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -116,4 +116,4 @@
                     2**ref_level * ds.domain_dimensions)
             ag = ds.arbitrary_grid([0.0, 0.0, 0.0], [1.0, 1.0, 1.0],
                     2**ref_level * ds.domain_dimensions)
-            yield assert_almost_equal, cg["density"], ag["density"]
+            yield assert_rel_equal, cg["density"], ag["density"], 7

diff -r 0defa67e4faa97ddabd245c0dc2549f9549cb3e3 -r 20fc1a7a6fce1e7e37345ff4abc3a81249a357cb yt/data_objects/tests/test_numpy_ops.py
--- a/yt/data_objects/tests/test_numpy_ops.py
+++ b/yt/data_objects/tests/test_numpy_ops.py
@@ -1,5 +1,4 @@
 from yt.testing import fake_random_ds, fake_amr_ds, assert_equal
-import numpy as np
 
 
 def setup():
@@ -91,7 +90,3 @@
         qrho, qtemp = ad.min(["density", "temperature"])
         yield assert_equal, qrho, ad["density"].min()
         yield assert_equal, qtemp, ad["temperature"].min()
-
-if __name__ == "__main__":
-    for args in test_mean_and_sum():
-        args[0](*args[1:])


https://bitbucket.org/yt_analysis/yt/commits/3105e0285651/
Changeset:   3105e0285651
Branch:      yt
User:        MatthewTurk
Date:        2015-11-19 19:24:44+00:00
Summary:     Comment from Nathan
Affected #:  1 file

diff -r 20fc1a7a6fce1e7e37345ff4abc3a81249a357cb -r 3105e02856510624784bc76913afdda594d09312 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -68,11 +68,11 @@
 Slicing Syntax for Selecting Data
 ---------------------------------
 
-yt provides a mechanism for rapidly selecting data from a dataset.  This allows
-for region selection based on the full domain of the object.  Selecting in this
-manner is exposed through a slice-like syntax.  All of these attributes are
-exposed through the ``RegionExpression`` object, which is an attribute of a
-``DataSet`` object, called ``r``.
+yt provides a mechanism for easily selecting data while doing interactive work
+on the command line.  This allows for region selection based on the full domain
+of the object.  Selecting in this manner is exposed through a slice-like
+syntax.  All of these attributes are exposed through the ``RegionExpression``
+object, which is an attribute of a ``DataSet`` object, called ``r``.
 
 Getting All The Data
 ^^^^^^^^^^^^^^^^^^^^
@@ -87,7 +87,14 @@
    rho = ds.r["density"]
 
 This will return a *flattened* array of data.  The region expression object
-(``r``) doesn't have any derived quantities on it.
+(``r``) doesn't have any derived quantities on it.  This is completely
+equivalent to this set of statements:
+
+.. code-block:: python
+
+   ds = yt.load("RedshiftOutput0005")
+   dd = ds.all_data()
+   rho = dd["density"]
 
 .. warning::
 


https://bitbucket.org/yt_analysis/yt/commits/d0b23c46ae65/
Changeset:   d0b23c46ae65
Branch:      yt
User:        MatthewTurk
Date:        2015-11-19 19:33:15+00:00
Summary:     Add some equivalencies.
Affected #:  1 file

diff -r 3105e02856510624784bc76913afdda594d09312 -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -412,11 +412,20 @@
   min_rho = reg.min("density")
   max_rho = reg.max("density")
 
+This is equivalent to:::
+
+  min_rho, max_rho = reg.quantities.extrema("density")
+
 The ``max`` operation can also compute the maximum intensity projection:::
 
   proj = reg.max("density", axis="x")
   proj.plot()
 
+This is equivalent to:::
+
+  proj = ds.proj("density", "x", data_source=reg, method="mip")
+  proj.plot()
+
 The ``min`` operator does not do this, however, as a minimum intensity
 projection is not currently implemented.
 
@@ -428,6 +437,12 @@
   mean_rho = reg.mean("density")
   rho_by_vol = reg.mean("density", weight="cell_volume")
 
+This is equivalent to:::
+
+  mean_rho = reg.quantities.weighted_average("density", weight_field="ones")
+  rho_by_vol = reg.quantities.weighted_average("density",
+                    weight_field="cell_volume")
+
 If an axis is provided, it will project along that axis and return it to you:::
 
   rho_proj = reg.mean("temperature", axis="y", weight="density")


https://bitbucket.org/yt_analysis/yt/commits/8628bface7a4/
Changeset:   8628bface7a4
Branch:      yt
User:        MatthewTurk
Date:        2015-11-19 19:33:41+00:00
Summary:     Merging with tip
Affected #:  176 files

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 coding_styleguide.txt
--- /dev/null
+++ b/coding_styleguide.txt
@@ -0,0 +1,101 @@
+Style Guide for Coding in yt
+============================
+
+Coding Style Guide
+------------------
+
+ * In general, follow PEP-8 guidelines.
+   http://www.python.org/dev/peps/pep-0008/
+ * Classes are ``ConjoinedCapitals``, methods and functions are
+   ``lowercase_with_underscores``.
+ * Use 4 spaces, not tabs, to represent indentation.
+ * Line widths should not be more than 80 characters.
+ * Do not use nested classes unless you have a very good reason to, such as
+   requiring a namespace or class-definition modification.  Classes should live
+   at the top level.  ``__metaclass__`` is exempt from this.
+ * Do not use unnecessary parenthesis in conditionals.  ``if((something) and
+   (something_else))`` should be rewritten as
+   ``if something and something_else``. Python is more forgiving than C.
+ * Avoid copying memory when possible. For example, don't do
+   ``a = a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3``
+   should be ``np.multiply(a, 3, a)``.
+ * In general, avoid all double-underscore method names: ``__something`` is
+   usually unnecessary.
+ * When writing a subclass, use the super built-in to access the super class,
+   rather than explicitly. Ex: ``super(SpecialGridSubclass, self).__init__()``
+   rather than ``SpecialGrid.__init__()``.
+ * Docstrings should describe input, output, behavior, and any state changes
+   that occur on an object.  See the file ``doc/docstring_example.txt`` for a
+   fiducial example of a docstring.
+ * Use only one top-level import per line. Unless there is a good reason not to,
+   imports should happen at the top of the file, after the copyright blurb.
+ * Never compare with ``True`` or ``False`` using ``==`` or ``!=``, always use
+   ``is`` or ``is not``.
+ * If you are comparing with a numpy boolean array, just refer to the array.
+   Ex: do ``np.all(array)`` instead of ``np.all(array == True)``.
+ * Never comapre with None using ``==`` or ``!=``, use ``is None`` or
+   ``is not None``.
+ * Use ``statement is not True`` instead of ``not statement is True``
+ * Only one statement per line, do not use semicolons to put two or more
+   statements on a single line.
+ * Only declare local variables if they will be used later. If you do not use the
+   return value of a function, do not store it in a variable.
+ * Add tests for new functionality. When fixing a bug, consider adding a test to
+   prevent the bug from recurring.
+
+API Guide
+---------
+
+ * Do not use ``from some_module import *``
+ * Internally, only import from source files directly -- instead of:
+
+     ``from yt.visualization.api import ProjectionPlot``
+
+   do:
+
+     ``from yt.visualization.plot_window import ProjectionPlot``
+
+ * Import symbols from the module where they are defined, avoid transitive
+   imports.
+ * Import standard library modules, functions, and classes from builtins, do not
+   import them from other yt files.
+ * Numpy is to be imported as ``np``.
+ * Do not use too many keyword arguments.  If you have a lot of keyword
+   arguments, then you are doing too much in ``__init__`` and not enough via
+   parameter setting.
+ * In function arguments, place spaces before commas.  ``def something(a,b,c)``
+   should be ``def something(a, b, c)``.
+ * Don't create a new class to replicate the functionality of an old class --
+   replace the old class.  Too many options makes for a confusing user
+   experience.
+ * Parameter files external to yt are a last resort.
+ * The usage of the ``**kwargs`` construction should be avoided.  If they cannot
+   be avoided, they must be explained, even if they are only to be passed on to
+   a nested function.
+
+Variable Names and Enzo-isms
+----------------------------
+Avoid Enzo-isms.  This includes but is not limited to:
+
+ * Hard-coding parameter names that are the same as those in Enzo.  The
+   following translation table should be of some help.  Note that the
+   parameters are now properties on a ``Dataset`` subclass: you access them
+   like ds.refine_by .
+
+    - ``RefineBy `` => `` refine_by``
+    - ``TopGridRank `` => `` dimensionality``
+    - ``TopGridDimensions `` => `` domain_dimensions``
+    - ``InitialTime `` => `` current_time``
+    - ``DomainLeftEdge `` => `` domain_left_edge``
+    - ``DomainRightEdge `` => `` domain_right_edge``
+    - ``CurrentTimeIdentifier `` => `` unique_identifier``
+    - ``CosmologyCurrentRedshift `` => `` current_redshift``
+    - ``ComovingCoordinates `` => `` cosmological_simulation``
+    - ``CosmologyOmegaMatterNow `` => `` omega_matter``
+    - ``CosmologyOmegaLambdaNow `` => `` omega_lambda``
+    - ``CosmologyHubbleConstantNow `` => `` hubble_constant``
+
+ * Do not assume that the domain runs from 0 .. 1.  This is not true
+   everywhere.
+ * Variable names should be short but descriptive.
+ * No globals!

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-Style Guide for Coding in yt
-============================
-
-Coding Style Guide
-------------------
-
- * In general, follow PEP-8 guidelines.
-   http://www.python.org/dev/peps/pep-0008/
- * Classes are ConjoinedCapitals, methods and functions are
-   lowercase_with_underscores.
- * Use 4 spaces, not tabs, to represent indentation.
- * Line widths should not be more than 80 characters.
- * Do not use nested classes unless you have a very good reason to, such as
-   requiring a namespace or class-definition modification.  Classes should live
-   at the top level.  __metaclass__ is exempt from this.
- * Do not use unnecessary parenthesis in conditionals.  if((something) and
-   (something_else)) should be rewritten as if something and something_else.
-   Python is more forgiving than C.
- * Avoid copying memory when possible. For example, don't do 
-   "a = a.reshape(3,4)" when "a.shape = (3,4)" will do, and "a = a * 3" should
-   be "np.multiply(a, 3, a)".
- * In general, avoid all double-underscore method names: __something is usually
-   unnecessary.
- * When writing a subclass, use the super built-in to access the super class,
-   rather than explicitly. Ex: "super(SpecialGrid, self).__init__()" rather than
-   "SpecialGrid.__init__()".
- * Doc strings should describe input, output, behavior, and any state changes
-   that occur on an object.  See the file `doc/docstring_example.txt` for a
-   fiducial example of a docstring.
-
-API Guide
----------
-
- * Do not import "*" from anything other than "yt.funcs".
- * Internally, only import from source files directly -- instead of:
-
-   from yt.visualization.api import ProjectionPlot
-
-   do:
-
-   from yt.visualization.plot_window import ProjectionPlot
-
- * Numpy is to be imported as "np", after a long time of using "na".
- * Do not use too many keyword arguments.  If you have a lot of keyword
-   arguments, then you are doing too much in __init__ and not enough via
-   parameter setting.
- * In function arguments, place spaces before commas.  def something(a,b,c)
-   should be def something(a, b, c).
- * Don't create a new class to replicate the functionality of an old class --
-   replace the old class.  Too many options makes for a confusing user
-   experience.
- * Parameter files external to yt are a last resort.
- * The usage of the **kwargs construction should be avoided.  If they cannot
-   be avoided, they must be explained, even if they are only to be passed on to
-   a nested function.
-
-Variable Names and Enzo-isms
-----------------------------
-
- * Avoid Enzo-isms.  This includes but is not limited to:
-   * Hard-coding parameter names that are the same as those in Enzo.  The
-     following translation table should be of some help.  Note that the
-     parameters are now properties on a Dataset subclass: you access them
-     like ds.refine_by .
-     * RefineBy => refine_by
-     * TopGridRank => dimensionality
-     * TopGridDimensions => domain_dimensions
-     * InitialTime => current_time
-     * DomainLeftEdge => domain_left_edge
-     * DomainRightEdge => domain_right_edge
-     * CurrentTimeIdentifier => unique_identifier
-     * CosmologyCurrentRedshift => current_redshift
-     * ComovingCoordinates => cosmological_simulation
-     * CosmologyOmegaMatterNow => omega_matter
-     * CosmologyOmegaLambdaNow => omega_lambda
-     * CosmologyHubbleConstantNow => hubble_constant
-   * Do not assume that the domain runs from 0 .. 1.  This is not true
-     everywhere.
- * Variable names should be short but descriptive.
- * No globals!

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 doc/get_yt.sh
--- a/doc/get_yt.sh
+++ b/doc/get_yt.sh
@@ -314,12 +314,12 @@
 echo
 echo "    export PATH=$DEST_DIR/bin:\$PATH"
 echo
-echo "and on csh-style shells"
+echo "and on csh-style shells:"
 echo
 echo "    setenv PATH $DEST_DIR/bin:\$PATH"
 echo
-echo "You can also the init file appropriate for your shell to include the same"
-echo "command."
+echo "You can also update the init file appropriate for your shell to include"
+echo "the same command."
 echo
 echo "To get started with yt, check out the orientation:"
 echo

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -59,7 +59,7 @@
   from yt.analysis_modules.halo_finding.api import *
 
   ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
-  halo_list = parallelHF(ds)
+  halo_list = HaloFinder(ds)
   halo_list.dump('MyHaloList')
 
 Ellipsoid Parameters

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -501,11 +501,7 @@
 subtle art in estimating the amount of memory needed for halo finding, but a
 rule of thumb is that the HOP halo finder is the most memory intensive
 (:func:`HaloFinder`), and Friends of Friends (:func:`FOFHaloFinder`) being the
-most memory-conservative.  It has been found that :func:`parallelHF` needs
-roughly 1 MB of memory per 5,000 particles, although recent work has improved
-this and the memory requirement is now smaller than this. But this is a good
-starting point for beginning to calculate the memory required for halo-finding.
-For more information, see :ref:`halo_finding`.
+most memory-conservative. For more information, see :ref:`halo_finding`.
 
 **Volume Rendering**
 

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -67,7 +67,7 @@
 # built documents.
 #
 # The short X.Y version.
-version = '3.3'
+version = '3.3-dev'
 # The full version, including alpha/beta/rc tags.
 release = '3.3-dev'
 

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -494,80 +494,4 @@
 
 .. _code-style-guide:
 
-Code Style Guide
-----------------
-
-To keep things tidy, we try to stick with a couple simple guidelines.
-
-General Guidelines
-++++++++++++++++++
-
-* In general, follow `PEP-8 <http://www.python.org/dev/peps/pep-0008/>`_ guidelines.
-* Classes are ConjoinedCapitals, methods and functions are
-  ``lowercase_with_underscores.``
-* Use 4 spaces, not tabs, to represent indentation.
-* Line widths should not be more than 80 characters.
-* Do not use nested classes unless you have a very good reason to, such as
-  requiring a namespace or class-definition modification.  Classes should live
-  at the top level.  ``__metaclass__`` is exempt from this.
-* Do not use unnecessary parentheses in conditionals.  ``if((something) and
-  (something_else))`` should be rewritten as ``if something and
-  something_else``.  Python is more forgiving than C.
-* Avoid copying memory when possible. For example, don't do ``a =
-  a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3`` should be
-  ``np.multiply(a, 3, a)``.
-* In general, avoid all double-underscore method names: ``__something`` is
-  usually unnecessary.
-* Doc strings should describe input, output, behavior, and any state changes
-  that occur on an object.  See the file `doc/docstring_example.txt` for a
-  fiducial example of a docstring.
-
-API Guide
-+++++++++
-
-* Do not import "*" from anything other than ``yt.funcs``.
-* Internally, only import from source files directly; instead of: ``from
-  yt.visualization.api import SlicePlot`` do
-  ``from yt.visualization.plot_window import SlicePlot``.
-* Numpy is to be imported as ``np``.
-* Do not use too many keyword arguments.  If you have a lot of keyword
-  arguments, then you are doing too much in ``__init__`` and not enough via
-  parameter setting.
-* In function arguments, place spaces before commas.  ``def something(a,b,c)``
-  should be ``def something(a, b, c)``.
-* Don't create a new class to replicate the functionality of an old class --
-  replace the old class.  Too many options makes for a confusing user
-  experience.
-* Parameter files external to yt are a last resort.
-* The usage of the ``**kwargs`` construction should be avoided.  If they
-  cannot be avoided, they must be explained, even if they are only to be
-  passed on to a nested function.
-* Constructor APIs should be kept as *simple* as possible.
-* Variable names should be short but descriptive.
-* No global variables!
-
-Variable Names and Enzo-isms
-++++++++++++++++++++++++++++
-
-* Avoid Enzo-isms.  This includes but is not limited to:
-
-  + Hard-coding parameter names that are the same as those in Enzo.  The
-    following translation table should be of some help.  Note that the
-    parameters are now properties on a Dataset subclass: you access them
-    like ``ds.refine_by`` .
-
-    - ``RefineBy `` => `` refine_by``
-    - ``TopGridRank `` => `` dimensionality``
-    - ``TopGridDimensions `` => `` domain_dimensions``
-    - ``InitialTime `` => `` current_time``
-    - ``DomainLeftEdge `` => `` domain_left_edge``
-    - ``DomainRightEdge `` => `` domain_right_edge``
-    - ``CurrentTimeIdentifier `` => `` unique_identifier``
-    - ``CosmologyCurrentRedshift `` => `` current_redshift``
-    - ``ComovingCoordinates `` => `` cosmological_simulation``
-    - ``CosmologyOmegaMatterNow `` => `` omega_matter``
-    - ``CosmologyOmegaLambdaNow `` => `` omega_lambda``
-    - ``CosmologyHubbleConstantNow `` => `` hubble_constant``
-
-  + Do not assume that the domain runs from 0 to 1.  This is not true
-    for many codes and datasets.
+.. include:: ../../../coding_styleguide.txt

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -6,10 +6,10 @@
 Beginning with version 3.3, yt has the ability to volume render unstructured
 meshes from, for example, finite element calculations. In order to use this
 capability, a few additional dependencies are required beyond those you get
-when you run the install script. First, `embree <https://embree.github.io>`
+when you run the install script. First, `embree <https://embree.github.io>`_
 (a fast software ray-tracing library from Intel) must be installed, either
 by compiling from source or by using one of the pre-built binaries available
-at Embree's `downloads <https://embree.github.io/downloads.html>` page. Once
+at Embree's `downloads <https://embree.github.io/downloads.html>`_ page. Once
 Embree is installed, you must also create a symlink next to the library. For
 example, if the libraries were installed at /usr/local/lib/, you must do
 
@@ -18,7 +18,7 @@
     sudo ln -s /usr/local/lib/libembree.2.6.1.dylib /usr/local/lib/libembree.so
 
 Second, the python bindings for embree (called 
-`pyembree <https://github.com/scopatz/pyembree>`) must also be installed. To
+`pyembree <https://github.com/scopatz/pyembree>`_) must also be installed. To
 do so, first obtain a copy, by .e.g. cloning the repo:
 
 .. code-block:: bash

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 scripts/pr_backport.py
--- a/scripts/pr_backport.py
+++ b/scripts/pr_backport.py
@@ -23,7 +23,12 @@
     with hglib.open(dest_repo_path) as client:
         # Changesets that are on the yt branch but aren't topological ancestors
         # of whichever changeset the experimental bookmark is pointing at
-        client.update('heads(branch(yt) - ::bookmark(experimental))')
+        bookmarks, _ = client.bookmarks()
+        bookmark_names = [b[0] for b in bookmarks]
+        if 'experimental' in bookmark_names:
+            client.update('heads(branch(yt) - ::bookmark(experimental))')
+        else:
+            client.update('heads(branch(yt))')
     return dest_repo_path
 
 
@@ -51,9 +56,13 @@
 def get_branch_tip(repo_path, branch, exclude=None):
     """Returns the SHA1 hash of the most recent commit on the given branch"""
     revset = "head() and branch(%s)" % branch
-    if exclude is not None:
-        revset += "and not %s" % exclude
     with hglib.open(repo_path) as client:
+        if exclude is not None:
+            try:
+                client.log(exclude)
+                revset += "and not %s" % exclude
+            except hglib.error.CommandError:
+                pass
         change = client.log(revset)[0][1][:12]
     return change
 

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -9,7 +9,11 @@
 with-xunit=1
 
 [flake8]
-# if we include api.py files, we get tons of spurious "imported but unused" errors
-exclude = */api.py,*/__config__.py,yt/visualization/_mpl_imports.py
+# we exclude:
+#      api.py and __init__.py files to avoid spurious unused import errors
+#      _mpl_imports.py for the same reason
+#      autogenerated __config__.py files
+#      vendored libraries
+exclude = */api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
 max-line-length=999
-ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E302,E303,E401,E502,E701,E703,W291,W293,W391
\ No newline at end of file
+ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E402,E502,E701,E703,E731,W291,W293,W391,W503
\ No newline at end of file

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 tests/README
--- /dev/null
+++ b/tests/README
@@ -0,0 +1,3 @@
+This directory contains two tiny enzo cosmological datasets. 
+
+They were added a long time ago and are provided for testing purposes.
\ No newline at end of file

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 tests/boolean_regions.py
--- a/tests/boolean_regions.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from yt.utilities.answer_testing.output_tests import \
-    SingleOutputTest, create_test
-from yt.utilities.answer_testing.boolean_region_tests import \
-    TestBooleanANDGridQuantity, TestBooleanORGridQuantity, \
-    TestBooleanNOTGridQuantity, TestBooleanANDParticleQuantity, \
-    TestBooleanORParticleQuantity, TestBooleanNOTParticleQuantity
-
-create_test(TestBooleanANDGridQuantity, "BooleanANDGrid")
-
-create_test(TestBooleanORGridQuantity, "BooleanORGrid")
-
-create_test(TestBooleanNOTGridQuantity, "BooleanNOTGrid")
-
-create_test(TestBooleanANDParticleQuantity, "BooleanANDParticle")
-
-create_test(TestBooleanORParticleQuantity, "BooleanORParticle")
-
-create_test(TestBooleanNOTParticleQuantity, "BooleanNOTParticle")

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 tests/fields_to_test.py
--- a/tests/fields_to_test.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# We want to test several things.  We need to be able to run the
-
-field_list = ["Density", "Temperature", "x-velocity", "y-velocity",
-    "z-velocity",
-    # Now some derived fields
-    "Pressure", "SoundSpeed", "particle_density", "Entropy",
-    # Ghost zones
-    "AveragedDensity", "DivV"]
-
-particle_field_list = ["particle_position_x", "ParticleMassMsun"]

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 tests/halos.py
--- a/tests/halos.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from yt.utilities.answer_testing.output_tests import \
-    SingleOutputTest, create_test
-from yt.utilities.answer_testing.halo_tests import \
-    TestHaloCountHOP, TestHaloCountFOF, TestHaloCountPHOP
-
-create_test(TestHaloCountHOP, "halo_count_HOP", threshold=80.0)
-
-create_test(TestHaloCountFOF, "halo_count_FOF", link=0.2, padding=0.02)
-
-create_test(TestHaloCountPHOP, "halo_count_PHOP", threshold=80.0)

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 tests/hierarchy_consistency.py
--- a/tests/hierarchy_consistency.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import numpy as na
-
-from yt.utilities.answer_testing.output_tests import \
-    YTDatasetTest, RegressionTestException
-from yt.funcs import ensure_list
-
-
-class HierarchyInconsistent(RegressionTestException):
-    pass
-
-
-class HierarchyConsistency(YTDatasetTest):
-    name = "index_consistency"
-
-    def run(self):
-        self.result = \
-            all(g in ensure_list(c.Parent) for g in self.ds.index.grids
-                                            for c in g.Children)
-
-    def compare(self, old_result):
-        if not(old_result and self.result): raise HierarchyInconsistent()
-
-
-class GridLocationsProperties(YTDatasetTest):
-    name = "level_consistency"
-
-    def run(self):
-        self.result = dict(grid_left_edge=self.ds.grid_left_edge,
-                           grid_right_edge=self.ds.grid_right_edge,
-                           grid_levels=self.ds.grid_levels,
-                           grid_particle_count=self.ds.grid_particle_count,
-                           grid_dimensions=self.ds.grid_dimensions)
-
-    def compare(self, old_result):
-        # We allow now difference between these values
-        self.compare_data_arrays(self.result, old_result, 0.0)
-
-
-class GridRelationshipsChanged(RegressionTestException):
-    pass
-
-
-class GridRelationships(YTDatasetTest):
-
-    name = "grid_relationships"
-
-    def run(self):
-        self.result = [[p.id for p in ensure_list(g.Parent) \
-            if g.Parent is not None]
-            for g in self.ds.index.grids]
-
-    def compare(self, old_result):
-        if len(old_result) != len(self.result):
-            raise GridRelationshipsChanged()
-        for plist1, plist2 in zip(old_result, self.result):
-            if len(plist1) != len(plist2): raise GridRelationshipsChanged()
-            if not all((p1 == p2 for p1, p2 in zip(plist1, plist2))):
-                raise GridRelationshipsChanged()
-
-
-class GridGlobalIndices(YTDatasetTest):
-    name = "global_startindex"
-
-    def run(self):
-        self.result = na.array([g.get_global_startindex()
-                                for g in self.ds.index.grids])
-
-    def compare(self, old_result):
-        self.compare_array_delta(old_result, self.result, 0.0)

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 tests/object_field_values.py
--- a/tests/object_field_values.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import hashlib
-import numpy as na
-
-from yt.utilities.answer_testing.output_tests import \
-    YTDatasetTest, RegressionTestException, create_test
-from yt.funcs import ensure_list, iterable
-from fields_to_test import field_list, particle_field_list
-
-
-class FieldHashesDontMatch(RegressionTestException):
-    pass
-
-known_objects = {}
-
-
-def register_object(func):
-    known_objects[func.func_name] = func
-    return func
-
-
- at register_object
-def centered_sphere(tobj):
-    center = 0.5 * (tobj.ds.domain_right_edge + tobj.ds.domain_left_edge)
-    width = (tobj.ds.domain_right_edge - tobj.ds.domain_left_edge).max()
-    tobj.data_object = tobj.ds.sphere(center, width / 0.25)
-
-
- at register_object
-def off_centered_sphere(tobj):
-    center = 0.5 * (tobj.ds.domain_right_edge + tobj.ds.domain_left_edge)
-    width = (tobj.ds.domain_right_edge - tobj.ds.domain_left_edge).max()
-    tobj.data_object = tobj.ds.sphere(center - 0.25 * width, width / 0.25)
-
-
- at register_object
-def corner_sphere(tobj):
-    width = (tobj.ds.domain_right_edge - tobj.ds.domain_left_edge).max()
-    tobj.data_object = tobj.ds.sphere(tobj.ds.domain_left_edge, width / 0.25)
-
-
- at register_object
-def disk(self):
-    center = (self.ds.domain_right_edge + self.ds.domain_left_edge) / 2.
-    radius = (self.ds.domain_right_edge - self.ds.domain_left_edge).max() / 10.
-    height = (self.ds.domain_right_edge - self.ds.domain_left_edge).max() / 10.
-    normal = na.array([1.] * 3)
-    self.data_object = self.ds.disk(center, normal, radius, height)
-
-
- at register_object
-def all_data(self):
-    self.data_object = self.ds.all_data()
-
-_new_known_objects = {}
-for field in ["Density"]:  # field_list:
-    for object_name in known_objects:
-
-        def _rfunc(oname, fname):
-
-            def func(tobj):
-                known_objects[oname](tobj)
-                tobj.orig_data_object = tobj.data_object
-                avg_value = tobj.orig_data_object.quantities[
-                        "WeightedAverageQuantity"](fname, "Density")
-                tobj.data_object = tobj.orig_data_object.cut_region(
-                        ["grid['%s'] > %s" % (fname, avg_value)])
-            return func
-        _new_known_objects["%s_cut_region_%s" % (object_name, field)] = \
-                _rfunc(object_name, field)
-known_objects.update(_new_known_objects)
-
-
-class YTFieldValuesTest(YTDatasetTest):
-
-    def run(self):
-        vals = self.data_object[self.field].copy()
-        vals.sort()
-        self.result = hashlib.sha256(vals.tostring()).hexdigest()
-
-    def compare(self, old_result):
-        if self.result != old_result: raise FieldHashesDontMatch
-
-    def setup(self):
-        YTDatasetTest.setup(self)
-        known_objects[self.object_name](self)
-
-
-class YTExtractIsocontoursTest(YTFieldValuesTest):
-
-    def run(self):
-        val = self.data_object.quantities["WeightedAverageQuantity"](
-            "Density", "Density")
-        rset = self.data_object.extract_isocontours("Density",
-            val, rescale=False, sample_values="Temperature")
-        self.result = rset
-
-    def compare(self, old_result):
-        if self.result[0].size == 0 and old_result[0].size == 0:
-            return True
-        self.compare_array_delta(self.result[0].ravel(),
-                                 old_result[0].ravel(), 1e-7)
-        self.compare_array_delta(self.result[1], old_result[1], 1e-7)
-
-
-class YTIsocontourFluxTest(YTFieldValuesTest):
-
-    def run(self):
-        val = self.data_object.quantities["WeightedAverageQuantity"](
-            "Density", "Density")
-        flux = self.data_object.calculate_isocontour_flux(
-           "Density", val, "x-velocity", "y-velocity", "z-velocity")
-        self.result = flux
-
-    def compare(self, old_result):
-        self.compare_value_delta(self.result, old_result, 1e-7)
-
-for object_name in known_objects:
-    for field in field_list + particle_field_list:
-        if "cut_region" in object_name and field in particle_field_list:
-            continue
-        create_test(YTFieldValuesTest, "%s_%s" % (object_name, field),
-                    field=field, object_name=object_name)
-    create_test(YTExtractIsocontoursTest, "%s" % (object_name),
-                object_name=object_name)
-    create_test(YTIsocontourFluxTest, "%s" % (object_name),
-                object_name=object_name)
-
-
-class YTDerivedQuantityTest(YTDatasetTest):
-
-    def setup(self):
-        YTDatasetTest.setup(self)
-        known_objects[self.object_name](self)
-
-    def compare(self, old_result):
-        if hasattr(self.result, 'tostring'):
-            self.compare_array_delta(self.result, old_result, 1e-7)
-            return
-        elif iterable(self.result):
-            a1 = na.array(self.result)
-            a2 = na.array(old_result)
-            self.compare_array_delta(a1, a2, 1e-7)
-        else:
-            if self.result != old_result: raise FieldHashesDontMatch
-
-    def run(self):
-        # This only works if it takes no arguments
-        self.result = self.data_object.quantities[self.dq_name]()
-
-dq_names = ["TotalMass", "AngularMomentumVector", "CenterOfMass",
-            "BulkVelocity", "BaryonSpinParameter", "ParticleSpinParameter"]
-
-# Extrema, WeightedAverageQuantity, TotalQuantity, MaxLocation,
-# MinLocation
-
-for object_name in known_objects:
-    for dq in dq_names:
-        # Some special exceptions
-        if "cut_region" in object_name and (
-            "SpinParameter" in dq or
-            "TotalMass" in dq):
-            continue
-        create_test(YTDerivedQuantityTest, "%s_%s" % (object_name, dq),
-                    dq_name=dq, object_name=object_name)
-
-
-class YTDerivedQuantityTestField(YTDerivedQuantityTest):
-
-    def run(self):
-        self.result = self.data_object.quantities[self.dq_name](
-            self.field_name)
-
-for object_name in known_objects:
-    for field in field_list:
-        for dq in ["Extrema", "TotalQuantity", "MaxLocation", "MinLocation"]:
-            create_test(YTDerivedQuantityTestField,
-                        "%s_%s" % (object_name, field),
-                        field_name=field, dq_name=dq,
-                        object_name=object_name)
-
-
-class YTDerivedQuantityTest_WeightedAverageQuantity(YTDerivedQuantityTest):
-
-    def run(self):
-        self.result = self.data_object.quantities["WeightedAverageQuantity"](
-            self.field_name, weight="CellMassMsun")
-
-for object_name in known_objects:
-    for field in field_list:
-        create_test(YTDerivedQuantityTest_WeightedAverageQuantity,
-                    "%s_%s" % (object_name, field),
-                    field_name=field,
-                    object_name=object_name)

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 tests/projections.py
--- a/tests/projections.py
+++ /dev/null
@@ -1,37 +0,0 @@
-from yt.utilities.answer_testing.output_tests import \
-    SingleOutputTest, create_test
-from yt.utilities.answer_testing.hydro_tests import \
-    TestProjection, TestOffAxisProjection, TestSlice, \
-    TestRay, TestGasDistribution, Test2DGasDistribution
-
-from fields_to_test import field_list
-
-for field in field_list:
-    create_test(TestRay, "%s" % field, field=field)
-
-for axis in range(3):
-    for field in field_list:
-        create_test(TestSlice, "%s_%s" % (axis, field),
-                    field=field, axis=axis)
-
-for axis in range(3):
-    for field in field_list:
-        create_test(TestProjection, "%s_%s" % (axis, field),
-                    field=field, axis=axis)
-        create_test(TestProjection, "%s_%s_Density" % (axis, field),
-                    field=field, axis=axis, weight_field="Density")
-
-for field in field_list:
-    create_test(TestOffAxisProjection, "%s_%s" % (axis, field),
-                field=field, axis=axis)
-    create_test(TestOffAxisProjection, "%s_%s_Density" % (axis, field),
-                field=field, axis=axis, weight_field="Density")
-
-for field in field_list:
-    if field != "Density":
-        create_test(TestGasDistribution, "density_%s" % field,
-                    field_x="Density", field_y=field)
-    if field not in ("x-velocity", "Density"):
-        create_test(Test2DGasDistribution, "density_x-vel_%s" % field,
-                    field_x="Density", field_y="x-velocity", field_z=field,
-                    weight="CellMassMsun")

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 tests/runall.py
--- a/tests/runall.py
+++ /dev/null
@@ -1,135 +0,0 @@
-import matplotlib
-matplotlib.use('Agg')
-from yt.config import ytcfg
-ytcfg["yt", "loglevel"] = "50"
-ytcfg["yt", "serialize"] = "False"
-
-from yt.utilities.answer_testing.api import \
-    RegressionTestRunner, clear_registry, create_test, \
-    TestFieldStatistics, TestAllProjections, registry_entries, \
-    Xunit
-from yt.utilities.command_line import get_yt_version
-
-from yt.mods import *
-import fnmatch
-import imp
-import optparse
-import itertools
-import time
-
-#
-# We assume all tests are to be run, unless explicitly given the name of a
-# single test or something that can be run through fnmatch.
-#
-# Keep in mind that we use a different nomenclature here than is used in the
-# Enzo testing system.  Our 'tests' are actually tests that are small and that
-# run relatively quickly on a single dataset; in Enzo's system, a 'test'
-# encompasses both the creation and the examination of data.  Here we assume
-# the data is kept constant.
-#
-
-cwd = os.path.dirname(globals().get("__file__", os.getcwd()))
-
-
-def load_tests(iname, idir):
-    f, filename, desc = imp.find_module(iname, [idir])
-    tmod = imp.load_module(iname, f, filename, desc)
-    return tmod
-
-
-def find_and_initialize_tests():
-    mapping = {}
-    for f in glob.glob(os.path.join(cwd, "*.py")):
-        clear_registry()
-        iname = os.path.basename(f[:-3])
-        try:
-            load_tests(iname, cwd)
-            mapping[iname] = registry_entries()
-            #print "Associating %s with" % (iname)
-            #print "\n    ".join(registry_entries())
-        except ImportError:
-            pass
-    return mapping
-
-if __name__ == "__main__":
-    clear_registry()
-    mapping = find_and_initialize_tests()
-    test_storage_directory = ytcfg.get("yt", "test_storage_dir")
-    try:
-        my_hash = get_yt_version()
-    except:
-        my_hash = "UNKNOWN%s" % (time.time())
-    parser = optparse.OptionParser()
-    parser.add_option("-f", "--parameter-file", dest="parameter_file",
-        default=os.path.join(cwd, "DD0010/moving7_0010"),
-        help="The parameter file value to feed to 'load' to test against")
-    parser.add_option("-l", "--list", dest="list_tests", action="store_true",
-        default=False, help="List all tests and then exit")
-    parser.add_option("-t", "--tests", dest="test_pattern", default="*",
-        help="The test name pattern to match.  Can include wildcards.")
-    parser.add_option("-o", "--output", dest="storage_dir",
-        default=test_storage_directory,
-        help="Base directory for storing test output.")
-    parser.add_option("-c", "--compare", dest="compare_name",
-        default=None,
-        help="The name against which we will compare")
-    parser.add_option("-n", "--name", dest="this_name",
-        default=my_hash,
-        help="The name we'll call this set of tests")
-    opts, args = parser.parse_args()
-
-    if opts.list_tests:
-        tests_to_run = []
-        for m, vals in mapping.items():
-            new_tests = fnmatch.filter(vals, opts.test_pattern)
-            if len(new_tests) == 0: continue
-            load_tests(m, cwd)
-            keys = set(registry_entries())
-            tests_to_run += [t for t in new_tests if t in keys]
-        tests = list(set(tests_to_run))
-        print ("\n    ".join(tests))
-        sys.exit(0)
-
-    # Load the test ds and make sure it's good.
-    ds = load(opts.parameter_file)
-    if ds is None:
-        print "Couldn't load the specified parameter file."
-        sys.exit(1)
-
-    # Now we modify our compare name and self name to include the ds.
-    compare_id = opts.compare_name
-    watcher = None
-    if compare_id is not None:
-        compare_id += "_%s_%s" % (ds, ds._hash())
-        watcher = Xunit()
-    this_id = opts.this_name + "_%s_%s" % (ds, ds._hash())
-
-    rtr = RegressionTestRunner(this_id, compare_id,
-                               results_path=opts.storage_dir,
-                               compare_results_path=opts.storage_dir,
-                               io_log=[opts.parameter_file])
-
-    rtr.watcher = watcher
-    tests_to_run = []
-    for m, vals in mapping.items():
-        new_tests = fnmatch.filter(vals, opts.test_pattern)
-
-        if len(new_tests) == 0: continue
-        load_tests(m, cwd)
-        keys = set(registry_entries())
-        tests_to_run += [t for t in new_tests if t in keys]
-    for test_name in sorted(tests_to_run):
-        print "RUNNING TEST", test_name
-        rtr.run_test(test_name)
-    if watcher is not None:
-        rtr.watcher.report()
-    failures = 0
-    passes = 1
-    for test_name, result in sorted(rtr.passed_tests.items()):
-        if not result:
-            print "TEST %s: %s" % (test_name, result)
-            print "    %s" % rtr.test_messages[test_name]
-        if result: passes += 1
-        else: failures += 1
-    print "Number of passes  : %s" % passes
-    print "Number of failures: %s" % failures

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 tests/volume_rendering.py
--- a/tests/volume_rendering.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from yt.mods import *
-import numpy as na
-
-from yt.utilities.answer_testing.output_tests import \
-    YTDatasetTest, RegressionTestException
-from yt.funcs import ensure_list
-
-
-class VolumeRenderingInconsistent(RegressionTestException):
-    pass
-
-
-class VolumeRenderingConsistency(YTDatasetTest):
-    name = "volume_rendering_consistency"
-
-    def run(self):
-        c = (self.ds.domain_right_edge + self.ds.domain_left_edge) / 2.
-        W = na.sqrt(3.) * (self.ds.domain_right_edge - \
-            self.ds.domain_left_edge)
-        N = 512
-        n_contours = 5
-        cmap = 'algae'
-        field = 'Density'
-        mi, ma = self.ds.all_data().quantities['Extrema'](field)[0]
-        mi, ma = na.log10(mi), na.log10(ma)
-        contour_width = (ma - mi) / 100.
-        L = na.array([1.] * 3)
-        tf = ColorTransferFunction((mi - 2, ma + 2))
-        tf.add_layers(n_contours, w=contour_width,
-                      col_bounds=(mi * 1.001, ma * 0.999),
-                      colormap=cmap, alpha=na.logspace(-1, 0, n_contours))
-        cam = self.ds.camera(c, L, W, (N, N), transfer_function=tf,
-            no_ghost=True)
-        image = cam.snapshot()
-        # image = cam.snapshot('test_rendering_%s.png'%field)
-        self.result = image
-
-    def compare(self, old_result):
-        # Compare the deltas; give a leeway of 1e-8
-        delta = na.nanmax(na.abs(self.result - old_result) /
-                                 (self.result + old_result))
-        if delta > 1e-9: raise VolumeRenderingInconsistent()

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -121,7 +121,6 @@
     derived_field
 
 from yt.data_objects.api import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     DatasetSeries, ImageArray, \
     particle_filter, add_particle_filter, \
     create_profile, Profile1D, Profile2D, Profile3D, \

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -116,7 +116,8 @@
 
     def make_spectrum(self, input_file, output_file="spectrum.h5",
                       line_list_file="lines.txt",
-                      use_peculiar_velocity=True, njobs="auto"):
+                      use_peculiar_velocity=True, 
+                      subgrid_resolution=10, njobs="auto"):
         """
         Make spectrum from ray data using the line list.
 
@@ -141,6 +142,17 @@
         use_peculiar_velocity : optional, bool
            if True, include line of sight velocity for shifting lines.
            Default: True
+        subgrid_resolution : optional, int
+           When a line is being added that is unresolved (ie its thermal
+           width is less than the spectral bin width), the voigt profile of
+           the line is deposited into an array of virtual bins at higher
+           resolution.  The optical depth from these virtual bins is integrated
+           and then added to the coarser spectral bin.  The subgrid_resolution
+           value determines the ratio between the thermal width and the 
+           bin width of the virtual bins.  Increasing this value yields smaller
+           virtual bins, which increases accuracy, but is more expensive.
+           A value of 10 yields accuracy to the 4th significant digit.
+           Default: 10
         njobs : optional, int or "auto"
            the number of process groups into which the loop over
            absorption lines will be divided.  If set to -1, each
@@ -182,7 +194,9 @@
             njobs = min(comm.size, len(self.line_list))
 
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity,
-                                    line_list_file is not None, njobs=njobs)
+                                    line_list_file is not None, 
+                                    subgrid_resolution=subgrid_resolution,
+                                    njobs=njobs)
         self._add_continua_to_spectrum(field_data, use_peculiar_velocity)
 
         self.flux_field = np.exp(-self.tau_field)
@@ -204,7 +218,7 @@
         Add continuum features to the spectrum.
         """
         # Only add continuum features down to tau of 1.e-4.
-        tau_min = 1.e-4
+        min_tau = 1.e-3
 
         for continuum in self.continuum_list:
             column_density = field_data[continuum['field_name']] * field_data['dl']
@@ -216,12 +230,12 @@
             this_wavelength = delta_lambda + continuum['wavelength']
             right_index = np.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
             left_index = np.digitize((this_wavelength *
-                                     np.power((tau_min * continuum['normalization'] /
+                                     np.power((min_tau * continuum['normalization'] /
                                                column_density), (1. / continuum['index']))),
                                     self.lambda_bins).clip(0, self.n_lambda)
 
             valid_continuua = np.where(((column_density /
-                                         continuum['normalization']) > tau_min) &
+                                         continuum['normalization']) > min_tau) &
                                        (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding continuum feature - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
@@ -235,98 +249,155 @@
             pbar.finish()
 
     def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity,
-                               save_line_list, njobs=-1):
+                               save_line_list, subgrid_resolution=10, njobs=-1):
         """
         Add the absorption lines to the spectrum.
         """
-        # Only make voigt profile for slice of spectrum that is 10 times the line width.
-        spectrum_bin_ratio = 5
-        # Widen wavelength window until optical depth reaches a max value at the ends.
-        max_tau = 0.001
+        # Widen wavelength window until optical depth falls below this tau 
+        # value at the ends to assure that the wings of a line have been 
+        # fully resolved.
+        min_tau = 1e-3
 
+        # step through each ionic transition (e.g. HI, HII, MgII) specified
+        # and deposit the lines into the spectrum
         for line in parallel_objects(self.line_list, njobs=njobs):
             column_density = field_data[line['field_name']] * field_data['dl']
+
             # redshift_eff field combines cosmological and velocity redshifts
+            # so delta_lambda gives the offset in angstroms from the rest frame
+            # wavelength to the observed wavelength of the transition 
             if use_peculiar_velocity:
                 delta_lambda = line['wavelength'] * field_data['redshift_eff']
             else:
                 delta_lambda = line['wavelength'] * field_data['redshift']
+            # lambda_obs is central wavelength of line after redshift
+            lambda_obs = line['wavelength'] + delta_lambda
+            # bin index in lambda_bins of central wavelength of line after z
+            center_index = np.digitize(lambda_obs, self.lambda_bins)
+
+            # thermal broadening b parameter
             thermal_b =  np.sqrt((2 * boltzmann_constant_cgs *
                                   field_data['temperature']) /
                                   line['atomic_mass'])
-            center_bins = np.digitize((delta_lambda + line['wavelength']),
-                                      self.lambda_bins)
 
-            # ratio of line width to bin width
-            width_ratio = ((line['wavelength'] + delta_lambda) * \
-                           thermal_b / speed_of_light_cgs / self.bin_width).in_units("").d
+            # the actual thermal width of the lines
+            thermal_width = (lambda_obs * thermal_b / 
+                             speed_of_light_cgs).convert_to_units("angstrom")
 
-            if (width_ratio < 1.0).any():
-                mylog.warn(("%d out of %d line components are unresolved, " +
-                            "consider increasing spectral resolution.") %
-                           ((width_ratio < 1.0).sum(), width_ratio.size))
+            # Sanitize units for faster runtime of the tau_profile machinery.
+            lambda_0 = line['wavelength'].d  # line's rest frame; angstroms
+            lambda_1 = lambda_obs.d # line's observed frame; angstroms
+            cdens = column_density.in_units("cm**-2").d # cm**-2
+            thermb = thermal_b.in_cgs().d  # thermal b coefficient; cm / s
+            dlambda = delta_lambda.d  # lambda offset; angstroms
+            vlos = field_data['velocity_los'].in_units("km/s").d # km/s
 
-            # do voigt profiles for a subset of the full spectrum
-            left_index  = (center_bins -
-                           spectrum_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)
-            right_index = (center_bins +
-                           spectrum_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)
+            # When we actually deposit the voigt profile, sometimes we will
+            # have underresolved lines (ie lines with smaller widths than
+            # the spectral bin size).  Here, we create virtual bins small
+            # enough in width to well resolve each line, deposit the voigt 
+            # profile into them, then numerically integrate their tau values
+            # and sum them to redeposit them into the actual spectral bins.
 
-            # loop over all lines wider than the bin width
-            valid_lines = np.where((width_ratio >= 1.0) &
-                                   (right_index - left_index > 1))[0]
-            pbar = get_pbar("Adding line - %s [%f A]: " % (line['label'], line['wavelength']),
-                            valid_lines.size)
+            # virtual bins (vbins) will be:
+            # 1) <= the bin_width; assures at least as good as spectral bins
+            # 2) <= 1/10th the thermal width; assures resolving voigt profiles
+            #   (actually 1/subgrid_resolution value, default is 1/10)
+            # 3) a bin width will be divisible by vbin_width times a power of 
+            #    10; this will assure we don't get spikes in the deposited
+            #    spectra from uneven numbers of vbins per bin
+            resolution = thermal_width / self.bin_width 
+            vbin_width = self.bin_width / \
+                         10**(np.ceil(np.log10(subgrid_resolution/resolution)).clip(0, np.inf))
+            vbin_width = vbin_width.in_units('angstrom').d
 
-            # Sanitize units here
-            column_density.convert_to_units("cm ** -2")
-            lbins = self.lambda_bins.d  # Angstroms
-            lambda_0 = line['wavelength'].d  # Angstroms
-            v_doppler = thermal_b.in_cgs().d  # cm / s
-            cdens = column_density.d
-            dlambda = delta_lambda.d  # Angstroms
-            vlos = field_data['velocity_los'].in_units("km/s").d
+            # the virtual window into which the line is deposited initially 
+            # spans a region of 5 thermal_widths, but this may expand
+            n_vbins = np.ceil(5*thermal_width.d/vbin_width)
+            vbin_window_width = n_vbins*vbin_width
 
-            for i, lixel in parallel_objects(enumerate(valid_lines), njobs=-1):
-                my_bin_ratio = spectrum_bin_ratio
+            if (thermal_width < self.bin_width).any():
+                mylog.info(("%d out of %d line components will be " + \
+                            "deposited as unresolved lines.") %
+                           ((thermal_width < self.bin_width).sum(), 
+                            thermal_width.size))
+
+            valid_lines = np.arange(len(thermal_width))
+            pbar = get_pbar("Adding line - %s [%f A]: " % \
+                            (line['label'], line['wavelength']),
+                            thermal_width.size)
+
+            # for a given transition, step through each location in the 
+            # observed spectrum where it occurs and deposit a voigt profile
+            for i in parallel_objects(valid_lines, njobs=-1):
+                my_vbin_window_width = vbin_window_width[i]
+                my_n_vbins = n_vbins[i]
+                my_vbin_width = vbin_width[i]
 
                 while True:
-                    lambda_bins, line_tau = \
+                    vbins = \
+                        np.linspace(lambda_1[i]-my_vbin_window_width/2.,
+                                    lambda_1[i]+my_vbin_window_width/2., 
+                                    my_n_vbins, endpoint=False)
+
+                    vbins, vtau = \
                         tau_profile(
-                            lambda_0, line['f_value'], line['gamma'], v_doppler[lixel],
-                            cdens[lixel], delta_lambda=dlambda[lixel],
-                            lambda_bins=lbins[left_index[lixel]:right_index[lixel]])
+                            lambda_0, line['f_value'], line['gamma'], thermb[i],
+                            cdens[i], delta_lambda=dlambda[i],
+                            lambda_bins=vbins)
 
-                    # Widen wavelength window until optical depth reaches a max value at the ends.
-                    if (line_tau[0] < max_tau and line_tau[-1] < max_tau) or \
-                      (left_index[lixel] <= 0 and right_index[lixel] >= self.n_lambda):
+                    # If tau has not dropped below min tau threshold by the
+                    # edges (ie the wings), then widen the wavelength 
+                    # window and repeat process. 
+                    if (vtau[0] < min_tau and vtau[-1] < min_tau):
                         break
-                    my_bin_ratio *= 2
-                    left_index[lixel]  = (center_bins[lixel] -
-                                          my_bin_ratio *
-                                          width_ratio[lixel]).astype(int).clip(0, self.n_lambda)
-                    right_index[lixel] = (center_bins[lixel] +
-                                          my_bin_ratio *
-                                          width_ratio[lixel]).astype(int).clip(0, self.n_lambda)
+                    my_vbin_window_width *= 2
+                    my_n_vbins *= 2
 
-                self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
+                # identify the extrema of the vbin_window so as to speed
+                # up searching over the entire lambda_bins array
+                bins_from_center = np.ceil((my_vbin_window_width/2.) / \
+                                           self.bin_width.d) + 1
+                left_index = (center_index[i] - bins_from_center).clip(0, self.n_lambda)
+                right_index = (center_index[i] + bins_from_center).clip(0, self.n_lambda)
+                window_width = right_index - left_index
+
+                # run digitize to identify which vbins are deposited into which
+                # global lambda bins.
+                # shift global lambda bins over by half a bin width; 
+                # this has the effect of assuring np.digitize will place 
+                # the vbins in the closest bin center.
+                binned = np.digitize(vbins, 
+                                     self.lambda_bins[left_index:right_index] \
+                                     + (0.5 * self.bin_width))
+
+                # numerically integrate the virtual bins to calculate a
+                # virtual equivalent width; then sum the virtual equivalent
+                # widths and deposit into each spectral bin
+                vEW = vtau * my_vbin_width
+                EW = [vEW[binned == j].sum() for j in np.arange(window_width)]
+                EW = np.array(EW)/self.bin_width.d
+                self.tau_field[left_index:right_index] += EW
+
                 if save_line_list and line['label_threshold'] is not None and \
-                        cdens[lixel] >= line['label_threshold']:
+                        cdens[i] >= line['label_threshold']:
                     if use_peculiar_velocity:
-                        peculiar_velocity = vlos[lixel]
+                        peculiar_velocity = vlos[i]
                     else:
                         peculiar_velocity = 0.0
                     self.spectrum_line_list.append({'label': line['label'],
-                                                    'wavelength': (lambda_0 + dlambda[lixel]),
-                                                    'column_density': column_density[lixel],
-                                                    'b_thermal': thermal_b[lixel],
-                                                    'redshift': field_data['redshift'][lixel],
+                                                    'wavelength': (lambda_0 + dlambda[i]),
+                                                    'column_density': column_density[i],
+                                                    'b_thermal': thermal_b[i],
+                                                    'redshift': field_data['redshift'][i],
                                                     'v_pec': peculiar_velocity})
                 pbar.update(i)
             pbar.finish()
 
-            del column_density, delta_lambda, thermal_b, \
-                center_bins, width_ratio, left_index, right_index
+            del column_density, delta_lambda, lambda_obs, center_index, \
+                thermal_b, thermal_width, lambda_1, cdens, thermb, dlambda, \
+                vlos, resolution, vbin_width, n_vbins, vbin_window_width, \
+                valid_lines, vbins, vtau, vEW
 
         comm = _get_comm(())
         self.tau_field = comm.mpi_allreduce(self.tau_field, op="sum")

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -281,8 +281,6 @@
         errSq=sum(dif**2)
 
         if any(linesP[:,1]==speciesDict['init_b']):
-         #   linesP = prevLinesP
-
             flag = True
             break
             

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -12,24 +12,33 @@
 
 import numpy as np
 from yt.testing import \
-    assert_allclose_units, requires_file, requires_module
+    assert_allclose_units, requires_file, requires_module, \
+    assert_almost_equal, assert_array_almost_equal
 from yt.analysis_modules.absorption_spectrum.absorption_line import \
     voigt_old, voigt_scipy
 from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
 from yt.analysis_modules.cosmological_observation.api import LightRay
+from yt.config import ytcfg
 import tempfile
 import os
 import shutil
+from yt.utilities.on_demand_imports import \
+    _h5py as h5
+
+test_dir = ytcfg.get("yt", "test_data_dir")
 
 COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo"
-
+COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
+HI_SPECTRUM_COSMO = "absorption_spectrum_data/enzo_lyman_alpha_cosmo_spec.h5"
+HI_SPECTRUM_COSMO_FILE = os.path.join(test_dir, HI_SPECTRUM_COSMO)
+HI_SPECTRUM = "absorption_spectrum_data/enzo_lyman_alpha_spec.h5"
+HI_SPECTRUM_FILE = os.path.join(test_dir, HI_SPECTRUM)
 
 @requires_file(COSMO_PLUS)
-def test_absorption_spectrum():
+ at requires_file(HI_SPECTRUM_COSMO)
+def test_absorption_spectrum_cosmo():
     """
-    This test is simply following the description in the docs for how to
-    generate an absorption spectrum from a cosmological light ray for one
-    of the sample datasets
+    This test generates an absorption spectrum from a cosmological light ray
     """
 
     # Set up in a temp dir
@@ -37,7 +46,7 @@
     curdir = os.getcwd()
     os.chdir(tmpdir)
 
-    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.1)
+    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
 
     lr.make_light_ray(seed=1234567,
                       fields=['temperature', 'density', 'H_number_density'],
@@ -65,22 +74,30 @@
     sp.add_continuum(my_label, field, wavelength, normalization, index)
 
     wavelength, flux = sp.make_spectrum('lightray.h5',
-                                        output_file='spectrum.txt',
+                                        output_file='spectrum.h5',
                                         line_list_file='lines.txt',
                                         use_peculiar_velocity=True)
 
+    # load just-generated hdf5 file of spectral data (for consistency)
+    f_new = h5.File('spectrum.h5', 'r')
+
+    # load standard data for comparison
+    f_old = h5.File(HI_SPECTRUM_COSMO_FILE, 'r')
+
+    # compare between standard data and current data for each array saved 
+    # (wavelength, flux, tau)
+    for key in f_old.keys():
+        assert_array_almost_equal(f_new[key].value, f_old[key].value, 10)
+
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
 
-
- at requires_file(COSMO_PLUS)
- at requires_module("astropy")
-def test_absorption_spectrum_fits():
+ at requires_file(COSMO_PLUS_SINGLE)
+ at requires_file(HI_SPECTRUM)
+def test_absorption_spectrum_non_cosmo():
     """
-    This test is simply following the description in the docs for how to
-    generate an absorption spectrum from a cosmological light ray for one
-    of the sample datasets.  Outputs to fits file if astropy is installed.
+    This test generates an absorption spectrum from a non-cosmological light ray
     """
 
     # Set up in a temp dir
@@ -88,11 +105,114 @@
     curdir = os.getcwd()
     os.chdir(tmpdir)
 
-    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.1)
+    lr = LightRay(COSMO_PLUS_SINGLE)
 
-    lr.make_light_ray(seed=1234567,
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
                       fields=['temperature', 'density', 'H_number_density'],
-                      get_los_velocity=True,
+                      data_filename='lightray.h5')
+
+    sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
+
+    my_label = 'HI Lya'
+    field = 'H_number_density'
+    wavelength = 1215.6700  # Angstromss
+    f_value = 4.164E-01
+    gamma = 6.265e+08
+    mass = 1.00794
+
+    sp.add_line(my_label, field, wavelength, f_value,
+                gamma, mass, label_threshold=1.e10)
+
+    wavelength, flux = sp.make_spectrum('lightray.h5',
+                                        output_file='spectrum.h5',
+                                        line_list_file='lines.txt',
+                                        use_peculiar_velocity=True)
+
+    # load just-generated hdf5 file of spectral data (for consistency)
+    f_new = h5.File('spectrum.h5', 'r')
+
+    # load standard data for comparison
+    f_old = h5.File(HI_SPECTRUM_FILE, 'r')
+
+    # compare between standard data and current data for each array saved 
+    # (wavelength, flux, tau)
+    for key in f_old.keys():
+        assert_array_almost_equal(f_new[key].value, f_old[key].value, 10)
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(COSMO_PLUS_SINGLE)
+def test_equivalent_width_conserved():
+    """
+    This tests that the equivalent width of the optical depth is conserved 
+    regardless of the bin width employed in wavelength space.
+    Unresolved lines should still deposit optical depth into the spectrum.
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS_SINGLE)
+
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    my_label = 'HI Lya'
+    field = 'H_number_density'
+    wave = 1215.6700  # Angstromss
+    f_value = 4.164E-01
+    gamma = 6.265e+08
+    mass = 1.00794
+
+    lambda_min= 1200
+    lambda_max= 1300
+    lambda_bin_widths = [1e-3, 1e-2, 1e-1, 1e0, 1e1]
+    total_tau = []
+
+    for lambda_bin_width in lambda_bin_widths:
+        n_lambda = ((lambda_max - lambda_min)/ lambda_bin_width) + 1
+        sp = AbsorptionSpectrum(lambda_min=lambda_min, lambda_max=lambda_max, 
+                                n_lambda=n_lambda)
+        sp.add_line(my_label, field, wave, f_value, gamma, mass)
+        wavelength, flux = sp.make_spectrum('lightray.h5')
+        total_tau.append((lambda_bin_width * sp.tau_field).sum())
+        
+    # assure that the total tau values are all within 1e-5 of each other
+    for tau in total_tau:
+        assert_almost_equal(tau, total_tau[0], 5)
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+
+ at requires_file(COSMO_PLUS_SINGLE)
+ at requires_module("astropy")
+def test_absorption_spectrum_fits():
+    """
+    This test generates an absorption spectrum and saves it as a fits file.
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS_SINGLE)
+
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
                       data_filename='lightray.h5')
 
     sp = AbsorptionSpectrum(900.0, 1800.0, 10000)

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -64,12 +64,12 @@
         Default: None
     near_redshift : optional, float
         The near (lowest) redshift for a light ray containing multiple
-        datasets.  Do not use is making a light ray from a single
+        datasets.  Do not use if making a light ray from a single
         dataset.
         Default: None
     far_redshift : optional, float
         The far (highest) redshift for a light ray containing multiple
-        datasets.  Do not use is making a light ray from a single
+        datasets.  Do not use if making a light ray from a single
         dataset.
         Default: None
     use_minimum_datasets : optional, bool
@@ -168,9 +168,9 @@
 
         # If using only one dataset, set start and stop manually.
         if start_position is not None:
-            if len(self.light_ray_solution) > 1:
-                raise RuntimeError("LightRay Error: cannot specify start_position " + \
-                                   "if light ray uses more than one dataset.")
+            if self.near_redshift is not None or self.far_redshift is not None:
+                raise RuntimeError("LightRay Error: cannot specify both " + \
+                                   "start_position and a redshift range.")
             if not ((end_position is None) ^ (trajectory is None)):
                 raise RuntimeError("LightRay Error: must specify either end_position " + \
                                    "or trajectory, but not both.")

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -30,10 +30,10 @@
     get_rotation_matrix, \
     periodic_dist
 from yt.utilities.physical_constants import \
-    mass_sun_cgs, \
+    mass_sun_cgs
+from yt.utilities.physical_ratios import \
+    rho_crit_g_cm3_h2, \
     TINY
-from yt.utilities.physical_ratios import \
-    rho_crit_g_cm3_h2
 
 from .hop.EnzoHop import RunHOP
 from .fof.EnzoFOF import RunFOF

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 yt/analysis_modules/photon_simulator/tests/test_beta_model.py
--- a/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
@@ -14,9 +14,7 @@
     XSpecThermalModel, XSpecAbsorbModel, \
     ThermalPhotonModel, PhotonList
 from yt.config import ytcfg
-from yt.utilities.answer_testing.framework import \
-    requires_module
-from yt.testing import requires_file
+from yt.testing import requires_file, requires_module
 import numpy as np
 from yt.utilities.physical_ratios import \
     K_per_keV, mass_hydrogen_grams
@@ -43,7 +41,7 @@
 @requires_file(rmf)
 def test_beta_model():
     import xspec
-    
+
     xspec.Fit.statMethod = "cstat"
     xspec.Xset.addModelString("APECTHERMAL","yes")
     xspec.Fit.query = "yes"
@@ -119,7 +117,7 @@
     norm_sim = float(norm_sim.in_cgs())
 
     events = photons.project_photons("z", responses=[arf,rmf],
-                                     absorb_model=abs_model, 
+                                     absorb_model=abs_model,
                                      convolve_energies=True, prng=my_prng)
     events.write_spectrum("beta_model_evt.pi", clobber=True)
 
@@ -143,7 +141,7 @@
     xspec.Fit.renorm()
     xspec.Fit.nIterations = 100
     xspec.Fit.perform()
-    
+
     kT  = m.bapec.kT.values[0]
     mu = (m.bapec.Redshift.values[0]-redshift)*ckms
     Z = m.bapec.Abundanc.values[0]
@@ -156,10 +154,8 @@
     dsigma = m.bapec.Velocity.sigma
     dnorm = m.bapec.norm.sigma
 
-    print kT, kT_sim, dkT
-
     assert np.abs(mu-mu_sim) < dmu
-    assert np.abs(kT-kT_sim) < dkT    
+    assert np.abs(kT-kT_sim) < dkT
     assert np.abs(Z-Z_sim) < dZ
     assert np.abs(sigma-sigma_sim) < dsigma
     assert np.abs(norm-norm_sim) < dnorm

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -17,7 +17,6 @@
 from yt.testing import requires_file
 from yt.utilities.answer_testing.framework import requires_ds, \
     GenericArrayTest, data_dir_load
-import numpy as np
 from numpy.random import RandomState
 import os
 

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 yt/analysis_modules/photon_simulator/tests/test_spectra.py
--- a/yt/analysis_modules/photon_simulator/tests/test_spectra.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_spectra.py
@@ -1,9 +1,8 @@
 from yt.analysis_modules.photon_simulator.api import \
     TableApecModel, XSpecThermalModel
-import numpy as np
 from yt.testing import requires_module, fake_random_ds
 from yt.utilities.answer_testing.framework import \
-    GenericArrayTest, data_dir_load
+    GenericArrayTest
 from yt.config import ytcfg
 
 def setup():

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -19,11 +19,11 @@
 #-----------------------------------------------------------------------------
 
 from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
-from yt.units.yt_array import YTQuantity
-from yt.funcs import fix_axis, mylog, iterable, get_pbar
-from yt.visualization.volume_rendering.api import off_axis_projection
+from yt.funcs import fix_axis, mylog, get_pbar
+from yt.visualization.volume_rendering.off_axis_projection import \
+    off_axis_projection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     communication_system, parallel_root_only
+    communication_system, parallel_root_only
 from yt import units
 from yt.utilities.on_demand_imports import _astropy
 

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -12,8 +12,17 @@
 
 from yt.frontends.stream.api import load_uniform_grid
 from yt.funcs import get_pbar
-from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, \
-    mh, cm_per_km, kboltz, Tcmb, hcgs, clight, sigma_thompson
+from yt.utilities.physical_ratios import \
+    cm_per_kpc, \
+    K_per_keV, \
+    cm_per_km
+from yt.utilities.physical_constants import \
+    mh, \
+    kboltz, \
+    Tcmb, \
+    hcgs, \
+    clight, \
+    sigma_thompson
 from yt.testing import requires_module, assert_almost_equal
 from yt.utilities.answer_testing.framework import requires_ds, \
     GenericArrayTest, data_dir_load, GenericImageTest

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -26,7 +26,9 @@
 except ImportError:
     mylog.debug("The Fortran kD-Tree did not import correctly.")
 
-import math, inspect, time
+import math
+import inspect
+import time
 from collections import defaultdict
 
 sep = 12

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -16,7 +16,6 @@
 #-----------------------------------------------------------------------------
 
 import os
-import types
 from yt.extern.six.moves import configparser
 
 ytcfg_defaults = dict(

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -13,16 +13,19 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import os, os.path, types
+import os
 
 # Named imports
 from yt.extern.six import string_types
-from yt.funcs import *
 from yt.config import ytcfg
+from yt.funcs import mylog
 from yt.utilities.parameter_file_storage import \
     output_type_registry, \
     simulation_time_series_registry, \
     EnzoRunDatabase
+from yt.utilities.exceptions import \
+    YTOutputNotIdentified, \
+    YTSimulationNotIdentified
 from yt.utilities.hierarchy_inspection import find_lowest_subclasses
 
 def load(*args ,**kwargs):

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 yt/data_objects/analyzer_objects.py
--- a/yt/data_objects/analyzer_objects.py
+++ b/yt/data_objects/analyzer_objects.py
@@ -15,7 +15,6 @@
 
 import inspect
 
-from yt.funcs import *
 from yt.extern.six import add_metaclass
 
 analysis_task_registry = {}
@@ -23,7 +22,7 @@
 class RegisteredTask(type):
     def __init__(cls, name, b, d):
         type.__init__(cls, name, b, d)
-        if hasattr(cls, "skip") and cls.skip == False:
+        if hasattr(cls, "skip") and cls.skip is False:
             return
         analysis_task_registry[cls.__name__] = cls
 

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -27,11 +27,6 @@
     particle_handler_registry
 
 from .profiles import \
-    YTEmptyProfileData, \
-    BinnedProfile, \
-    BinnedProfile1D, \
-    BinnedProfile2D, \
-    BinnedProfile3D, \
     create_profile, \
     Profile1D, \
     Profile2D, \

diff -r d0b23c46ae6596ddd42a86e9b4ca70c945e27e9e -r 8628bface7a435be45095e546f91d795f7cedfc6 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -15,21 +15,29 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-import math
-import weakref
-import itertools
-import shelve
 from functools import wraps
 import fileinput
 from re import finditer
+from tempfile import TemporaryFile
 import os
+import zipfile
 
 from yt.config import ytcfg
-from yt.funcs import *
-from yt.utilities.logger import ytLogger
-from .data_containers import \
-    YTSelectionContainer1D, YTSelectionContainer2D, YTSelectionContainer3D, \
-    restore_field_information_state, YTFieldData
+from yt.data_objects.data_containers import \
+    YTSelectionContainer1D, \
+    YTSelectionContainer2D, \
+    YTSelectionContainer3D, \
+    YTFieldData
+from yt.funcs import \
+    ensure_list, \
+    mylog, \
+    get_memory_usage, \
+    iterable, \
+    only_on_root
+from yt.utilities.exceptions import \
+    YTParticleDepositionNotImplemented, \
+    YTNoAPIKey, \
+    YTTooManyVertices
 from yt.utilities.lib.QuadTree import \
     QuadTree
 from yt.utilities.lib.Interpolators import \
@@ -38,8 +46,6 @@
     fill_region, fill_region_float
 from yt.utilities.lib.marching_cubes import \
     march_cubes_grid, march_cubes_grid_flux
-from yt.utilities.data_point_utilities import CombineGrids,\
-    DataCubeRefine, DataCubeReplace, FillRegion, FillBuffer
 from yt.utilities.minimal_representation import \
     MinimalProjectionData
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -47,16 +53,10 @@
 from yt.units.unit_object import Unit
 import yt.geometry.particle_deposit as particle_deposit
 from yt.utilities.grid_data_format.writer import write_to_gdf
+from yt.fields.field_exceptions import \
+    NeedsOriginalGrid
 from yt.frontends.stream.api import load_uniform_grid
 
-from yt.fields.field_exceptions import \
-    NeedsGridType,\
-    NeedsOriginalGrid,\
-    NeedsDataField,\
-    NeedsProperty,\
-    NeedsParameter
-from yt.fields.derived_field import \
-    TranslationFunc
 
 class YTStreamline(YTSelectionContainer1D):
     """
@@ -369,14 +369,13 @@
         data['pdy'] = self.ds.arr(pdy, code_length)
         data['fields'] = nvals
         # Now we run the finalizer, which is ignored if we don't need it
-        fd = data['fields']
         field_data = np.hsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
-            finfo = self.ds._get_field_info(*field)
             mylog.debug("Setting field %s", field)
             input_units = self._projected_units[field]
             self[field] = self.ds.arr(field_data[fi].ravel(), input_units)
-        for i in list(data.keys()): self[i] = data.pop(i)
+        for i in list(data.keys()):
+            self[i] = data.pop(i)
         mylog.info("Projection completed")
         self.tree = tree
 
@@ -970,7 +969,6 @@
         ls.current_level += 1
         ls.current_dx = ls.base_dx / \
             self.ds.relative_refinement(0, ls.current_level)
-        LL = ls.left_edge - ls.domain_left_edge
         ls.old_global_startindex = ls.global_startindex
         ls.global_startindex, end_index, ls.current_dims = \
             self._minimal_box(ls.current_dx)
@@ -1540,11 +1538,8 @@
                     color_log = True, emit_log = True, plot_index = None,
                     color_field_max = None, color_field_min = None,
                     emit_field_max = None, emit_field_min = None):
-        import io
-        from sys import version
         if plot_index is None:
             plot_index = 0
-            vmax=0
         ftype = [("cind", "uint8"), ("emit", "float")]
         vtype = [("x","float"),("y","float"), ("z","float")]
         #(0) formulate vertices
@@ -1583,7 +1578,7 @@
                 tmp = self.vertices[i,:]
                 np.divide(tmp, dist_fac, tmp)
                 v[ax][:] = tmp
-        return  v, lut, transparency, emiss, f['cind']
+        return v, lut, transparency, emiss, f['cind']
 
 
     def export_ply(self, filename, bounds = None, color_field = None,
@@ -1765,8 +1760,6 @@
         api_key = api_key or ytcfg.get("yt","sketchfab_api_key")
         if api_key in (None, "None"):
             raise YTNoAPIKey("SketchFab.com", "sketchfab_api_key")
-        import zipfile, json
-        from tempfile import TemporaryFile
 
         ply_file = TemporaryFile()
         self.export_ply(ply_file, bounds, color_field, color_map, color_log,

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/300cb06d1d8d/
Changeset:   300cb06d1d8d
Branch:      yt
User:        MatthewTurk
Date:        2015-11-19 23:28:18+00:00
Summary:     Fixing import and going back to almost instead of rel.
Affected #:  1 file

diff -r 8628bface7a435be45095e546f91d795f7cedfc6 -r 300cb06d1d8d4ff472b3827f0fbcdd75c6f7178d yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -3,7 +3,8 @@
 from yt.frontends.stream.data_structures import load_particles
 from yt.testing import \
     fake_random_ds, \
-    assert_equal
+    assert_equal, \
+    assert_almost_equal
 
 def setup():
     from yt.config import ytcfg
@@ -118,4 +119,4 @@
                     2**ref_level * ds.domain_dimensions)
             ag = ds.arbitrary_grid([0.0, 0.0, 0.0], [1.0, 1.0, 1.0],
                     2**ref_level * ds.domain_dimensions)
-            yield assert_rel_equal, cg["density"], ag["density"], 7
+            yield assert_almost_equal, cg["density"], ag["density"]


https://bitbucket.org/yt_analysis/yt/commits/5d10327e9828/
Changeset:   5d10327e9828
Branch:      yt
User:        MatthewTurk
Date:        2015-11-23 21:16:19+00:00
Summary:     Typo, thanks Andrew!
Affected #:  1 file

diff -r 300cb06d1d8d4ff472b3827f0fbcdd75c6f7178d -r 5d10327e9828ae1b6c84b56b415000bd0c8aeedd doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -98,7 +98,7 @@
 
 .. warning::
 
-   One thing to keep in mind with access data in this way is that it is
+   One thing to keep in mind with accessing data in this way is that it is
    *persistent*.  It is loaded into memory, and then retained until the dataset
    is deleted or garbage collected.
 


https://bitbucket.org/yt_analysis/yt/commits/eb45e6e815fb/
Changeset:   eb45e6e815fb
Branch:      yt
User:        atmyers
Date:        2015-11-23 21:23:01+00:00
Summary:     Merged in MatthewTurk/yt (pull request #1763)

Numpy-like operations
Affected #:  12 files

diff -r 5b5389c150b9d9ae33a8e36b03f6626f78b8da53 -r eb45e6e815fb3f781e57ebf9dd941b404da4e030 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -63,6 +63,117 @@
    for i in range(sp["temperature"].size):
        print "(%f,  %f,  %f)    %f" % (sp["x"][i], sp["y"][i], sp["z"][i], sp["temperature"][i])
 
+.. _quickly-selecting-data:
+
+Slicing Syntax for Selecting Data
+---------------------------------
+
+yt provides a mechanism for easily selecting data while doing interactive work
+on the command line.  This allows for region selection based on the full domain
+of the object.  Selecting in this manner is exposed through a slice-like
+syntax.  All of these attributes are exposed through the ``RegionExpression``
+object, which is an attribute of a ``DataSet`` object, called ``r``.
+
+Getting All The Data
+^^^^^^^^^^^^^^^^^^^^
+
+The ``.r`` attribute serves as a persistent means of accessing the full data
+from a dataset.  You can access this shorthand operation by querying any field
+on the ``.r`` object, like so:
+
+.. code-block:: python
+
+   ds = yt.load("RedshiftOutput0005")
+   rho = ds.r["density"]
+
+This will return a *flattened* array of data.  The region expression object
+(``r``) doesn't have any derived quantities on it.  This is completely
+equivalent to this set of statements:
+
+.. code-block:: python
+
+   ds = yt.load("RedshiftOutput0005")
+   dd = ds.all_data()
+   rho = dd["density"]
+
+.. warning::
+
+   One thing to keep in mind with accessing data in this way is that it is
+   *persistent*.  It is loaded into memory, and then retained until the dataset
+   is deleted or garbage collected.
+
+Selecting Multiresolution Regions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To select rectilinear regions, where the data is selected the same way that it
+is selected in a :ref:`region-reference`, you can utilize slice-like syntax,
+supplying start and stop, but not supplying a step argument.  This requires
+that three components of the slice must be specified.  These take a start and a
+stop, and are for the three axes in simulation order (if your data is ordered
+z, y, x for instance, this would be in z, y, x order).
+
+The slices can have both position and, optionally, unit values.  These define
+the value with respect to the ``domain_left_edge`` of the dataset.  So for
+instance, you could specify it like so:::
+
+   ds.r[(100, 'kpc'):(200,'kpc'),:,:]
+
+This would return a region that included everything between 100 kpc from the
+left edge of the dataset to 200 kpc from the left edge of the dataset in the
+first dimension, and which spans the entire dataset in the second and third
+dimensions.  By default, if the units are unspecified, they are in the "native"
+code units of the dataset.
+
+This works in all types of datasets, as well.  For instance, if you have a
+geographic dataset (which is usually ordered latitude, longitude, altitude) you
+can easily select, for instance, one hemisphere with a region selection:::
+
+   ds.r[:,-180:0,:]
+
+Selecting Fixed Resolution Regions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+yt also provides functionality for selecting regions that have been turned into
+voxels.  This returns an :ref:`arbitrary-grid` object.  It can be created by
+specifying a complex slice "step", where the start and stop follow the same
+rules as above.  This is similar to how the numpy ``mgrid`` operation works.
+For instance, this code block will generate a grid covering the full domain,
+but converted to being 21x35x100 dimensions:::
+
+  region = ds.r[::21j, ::35j, ::100j]
+
+The left and right edges, as above, can be specified to provide bounds as well.
+For instance, to select a 10 meter cube, with 24 cells in each dimension, we
+could supply:::
+
+  region = ds.r[(20,'m'):(30,'m'):24j, (30,'m'):(40,'m'):24j,
+                (7,'m'):(17,'m'):24j]
+
+This can select both particles and mesh fields.  Mesh fields will be 3D arrays,
+and generated through volume-weighted overlap calculations.
+
+Selecting Slices
+^^^^^^^^^^^^^^^^
+
+If one dimension is specified as a single value, that will be the dimension
+along which a slice is made.  This provides a simple means of generating a
+slice from a subset of the data.  For instance, to create a slice of a dataset,
+you can very simply specify the full domain along two axes:::
+
+   sl = ds.r[:,:,0.25]
+
+This can also be very easily plotted:::
+
+   sl = ds.r[:,:,0.25]
+   sl.plot()
+
+This accepts arguments the same way:::
+
+
+   sl = ds.r[(20.1, 'km'):(31.0, 'km'), (504.143,'m'):(1000.0,'m'),
+             (900.1, 'm')]
+   sl.plot()
+
 .. _available-objects:
 
 Available Objects
@@ -144,6 +255,8 @@
       creating a Region covering the entire dataset domain.  It is effectively 
       ``ds.region(ds.domain_center, ds.domain_left_edge, ds.domain_right_edge)``.
 
+.. _region-reference:
+
 **Box Region** 
     | Class :class:`~yt.data_objects.selection_data_containers.YTRegion`
     | Usage: ``region(center, left_edge, right_edge, fields=None, ds=None, field_parameters=None, data_source=None)``
@@ -227,15 +340,15 @@
       interpolates as necessary from coarse regions to fine.  See 
       :ref:`examining-grid-data-in-a-fixed-resolution-array`.
 
-**Fixed-Resolution Region for Particle Deposition** 
+**Fixed-Resolution Region**
     | Class :class:`~yt.data_objects.construction_data_containers.YTArbitraryGrid`
     | Usage: ``arbitrary_grid(left_edge, right_edge, dimensions, ds=None, field_parameters=None)``
     | When particles are deposited on to mesh fields, they use the existing
       mesh structure, but this may have too much or too little resolution
       relative to the particle locations (or it may not exist at all!).  An
       `arbitrary_grid` provides a means for generating a new independent mesh 
-      structure for particle deposition.  See :ref:`arbitrary-grid` for more 
-      information.
+      structure for particle deposition and simple mesh field interpolation.
+      See :ref:`arbitrary-grid` for more information.
 
 **Projection** 
     | Class :class:`~yt.data_objects.construction_data_containers.YTQuadTreeProj`
@@ -279,6 +392,82 @@
    sp = ds.sphere('c', (10, 'kpc'))
    print sp.quantities.angular_momentum_vector()
 
+Quickly Processing Data
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Most data objects now have multiple numpy-like methods that allow you to
+quickly process data.  More of these methods will be added over time and added
+to this list.  Most, if not all, of these map to other yt operations and are
+designed as syntactic sugar to slightly simplify otherwise somewhat obtuse
+pipelines.
+
+These operations are parallelized.
+
+You can compute the extrema of a field by using the ``max`` or ``min``
+functions.  This will cache the extrema in between, so calling ``min`` right
+after ``max`` will be considerably faster.  Here is an example:::
+
+  ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+  reg = ds.r[0.3:0.6, 0.2:0.4, 0.9:0.95]
+  min_rho = reg.min("density")
+  max_rho = reg.max("density")
+
+This is equivalent to:::
+
+  min_rho, max_rho = reg.quantities.extrema("density")
+
+The ``max`` operation can also compute the maximum intensity projection:::
+
+  proj = reg.max("density", axis="x")
+  proj.plot()
+
+This is equivalent to:::
+
+  proj = ds.proj("density", "x", data_source=reg, method="mip")
+  proj.plot()
+
+The ``min`` operator does not do this, however, as a minimum intensity
+projection is not currently implemented.
+
+You can also compute the ``mean`` value, which accepts a field, axis and wight
+function.  If the axis is not specified, it will return the average value of
+the specified field, weighted by the weight argument.  The weight argument
+defaults to ``ones``, which performs an arithmetic average.  For instance:::
+
+  mean_rho = reg.mean("density")
+  rho_by_vol = reg.mean("density", weight="cell_volume")
+
+This is equivalent to:::
+
+  mean_rho = reg.quantities.weighted_average("density", weight_field="ones")
+  rho_by_vol = reg.quantities.weighted_average("density",
+                    weight_field="cell_volume")
+
+If an axis is provided, it will project along that axis and return it to you:::
+
+  rho_proj = reg.mean("temperature", axis="y", weight="density")
+  rho_proj.plot()
+
+The ``sum`` function will add all the values in the data object.  It accepts a
+field and, optionally, an axis.  If the axis is left unspecified, it will sum
+the values in the object:::
+
+  vol = reg.sum("cell_volume")
+
+If the axis is specified, it will compute a projection using the method ``sum``
+(which does *not* take into account varying path length!) and return that to
+you.::
+
+  cell_count = reg.sum("ones", axis="z")
+  cell_count.plot()
+
+To compute a projection where the path length *is* taken into account, you can
+use the ``integrate`` function:::
+
+  proj = reg.integrate("density", "x")
+
+All of these projections supply the data object as their base input.
+
 Available Derived Quantities
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -350,8 +539,8 @@
 
 .. _arbitrary-grid:
 
-Arbitrary Grids Objects for Particle Deposition
------------------------------------------------
+Arbitrary Grids Objects
+-----------------------
 
 The covering grid and smoothed covering grid objects mandate that they be
 exactly aligned with the mesh.  This is a
@@ -379,6 +568,13 @@
 While these cannot yet be used as input to projections or slices, slices and
 projections can be taken of the data in them and visualized by hand.
 
+These objects, as of yt 3.3, are now also able to "voxelize" mesh fields.  This
+means that you can query the "density" field and it will return the density
+field as deposited, identically to how it would be deposited in a fixed
+resolution buffer.  Note that this means that contributions from misaligned or
+partially-overlapping cells are added in a volume-weighted way, which makes it
+inappropriate for some types of analysis.
+
 .. _boolean_data_objects:
 
 Combining Objects: Boolean Data Objects

diff -r 5b5389c150b9d9ae33a8e36b03f6626f78b8da53 -r eb45e6e815fb3f781e57ebf9dd941b404da4e030 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -54,17 +54,28 @@
                     valid_file.append(False)
         else:
             valid_file.append(False)
+    types_to_check = output_type_registry
     if not any(valid_file):
         try:
             from yt.data_objects.time_series import DatasetSeries
             ts = DatasetSeries.from_filenames(*args, **kwargs)
             return ts
-        except YTOutputNotIdentified:
+        except (TypeError, YTOutputNotIdentified):
             pass
-        mylog.error("None of the arguments provided to load() is a valid file")
-        mylog.error("Please check that you have used a correct path")
-        raise YTOutputNotIdentified(args, kwargs)
-    for n, c in output_type_registry.items():
+        # We check if either the first argument is a dict or list, in which
+        # case we try identifying candidates.
+        if len(args) > 0 and isinstance(args[0], (list, dict)):
+            # This fixes issues where it is assumed the first argument is a
+            # file
+            types_to_check = dict((n, v) for n, v in
+                    output_type_registry.items() if n.startswith("stream_"))
+            # Better way to do this is to override the output_type_registry
+        else:
+            mylog.error("None of the arguments provided to load() is a valid file")
+            mylog.error("Please check that you have used a correct path")
+            raise YTOutputNotIdentified(args, kwargs)
+    for n, c in types_to_check.items():
+        print n
         if n is None: continue
         if c._is_valid(*args, **kwargs): candidates.append(n)
 

diff -r 5b5389c150b9d9ae33a8e36b03f6626f78b8da53 -r eb45e6e815fb3f781e57ebf9dd941b404da4e030 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -43,7 +43,7 @@
 from yt.utilities.lib.Interpolators import \
     ghost_zone_interpolate
 from yt.utilities.lib.misc_utilities import \
-    fill_region
+    fill_region, fill_region_float
 from yt.utilities.lib.marching_cubes import \
     march_cubes_grid, march_cubes_grid_flux
 from yt.utilities.minimal_representation import \
@@ -454,6 +454,25 @@
         pw = self._get_pw(fields, center, width, origin, 'Projection')
         return pw
 
+    def plot(self, fields=None):
+        if hasattr(self.data_source, "left_edge") and \
+            hasattr(self.data_source, "right_edge"):
+            left_edge = self.data_source.left_edge
+            right_edge = self.data_source.right_edge
+            center = (left_edge + right_edge)/2.0
+            width = right_edge - left_edge
+            xax = self.ds.coordinates.x_axis[self.axis]
+            yax = self.ds.coordinates.y_axis[self.axis]
+            lx, rx = left_edge[xax], right_edge[xax]
+            ly, ry = left_edge[yax], right_edge[yax]
+            width = (rx-lx), (ry-ly)
+        else:
+            width = self.ds.domain_width
+            center = self.ds.domain_center
+        pw = self._get_pw(fields, center, width, 'native', 'Projection')
+        pw.show()
+        return pw
+
 class YTCoveringGrid(YTSelectionContainer3D):
     """A 3D region with all data extracted to a single, specified
     resolution.  Left edge should align with a cell boundary, but
@@ -783,7 +802,19 @@
         self._setup_data_source()
 
     def _fill_fields(self, fields):
-        raise NotImplementedError
+        fields = [f for f in fields if f not in self.field_data]
+        if len(fields) == 0: return
+        assert(len(fields) == 1)
+        field = fields[0]
+        dest = np.zeros(self.ActiveDimensions, dtype="float64")
+        for chunk in self._data_source.chunks(fields, "io"):
+            fill_region_float(chunk.fcoords, chunk.fwidth, chunk[field],
+                              self.left_edge, self.right_edge, dest, 1,
+                              self.ds.domain_width,
+                              int(any(self.ds.periodicity)))
+        fi = self.ds._get_field_info(field)
+        self[field] = self.ds.arr(dest, fi.units)
+        
 
 class LevelState(object):
     current_dx = None

diff -r 5b5389c150b9d9ae33a8e36b03f6626f78b8da53 -r eb45e6e815fb3f781e57ebf9dd941b404da4e030 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -611,6 +611,216 @@
         else:
             data_collection.append(gdata)
 
+    # Numpy-like Operations
+    def argmax(self, field, axis=None):
+        raise NotImplementedError
+
+    def argmin(self, field, axis=None):
+        raise NotImplementedError
+
+    def _compute_extrema(self, field):
+        if self._extrema_cache is None:
+            self._extrema_cache = {}
+        if field not in self._extrema_cache:
+            # Note we still need to call extrema for each field, as of right
+            # now
+            mi, ma = self.quantities.extrema(field)
+            self._extrema_cache[field] = (mi, ma)
+        return self._extrema_cache[field]
+
+    _extrema_cache = None
+    def max(self, field, axis=None):
+        r"""Compute the maximum of a field, optionally along an axis.
+
+        This will, in a parallel-aware fashion, compute the maximum of the
+        given field.  Supplying an axis will result in a return value of a
+        YTProjection, with method 'mip' for maximum intensity.  If the max has
+        already been requested, it will use the cached extrema value.
+
+        Parameters
+        ----------
+        field : string or tuple of strings
+            The field to maximize.
+        axis : string, optional
+            If supplied, the axis to project the maximum along.
+
+        Returns
+        -------
+        Either a scalar or a YTProjection.
+
+        Examples
+        --------
+
+        >>> max_temp = reg.max("temperature")
+        >>> max_temp_proj = reg.max("temperature", axis="x")
+        """
+        if axis is None:
+            rv = ()
+            fields = ensure_list(field)
+            for f in fields:
+                rv += (self._compute_extrema(f)[1],)
+            if len(fields) == 1:
+                return rv[0]
+            else:
+                return rv
+        elif axis in self.ds.coordinates.axis_name:
+            r = self.ds.proj(field, axis, data_source=self, method="mip")
+            return r
+        else:
+            raise NotImplementedError("Unknown axis %s" % axis)
+
+    def min(self, field, axis=None):
+        r"""Compute the minimum of a field.
+
+        This will, in a parallel-aware fashion, compute the minimum of the
+        given field.  Supplying an axis is not currently supported.  If the max
+        has already been requested, it will use the cached extrema value.
+
+        Parameters
+        ----------
+        field : string or tuple of strings
+            The field to minimize.
+        axis : string, optional
+            If supplied, the axis to compute the minimum along.
+
+        Returns
+        -------
+        Scalar.
+
+        Examples
+        --------
+
+        >>> min_temp = reg.min("temperature")
+        """
+        if axis is None:
+            rv = ()
+            fields = ensure_list(field)
+            for f in ensure_list(fields):
+                rv += (self._compute_extrema(f)[0],)
+            if len(fields) == 1:
+                return rv[0]
+            else:
+                return rv
+            return rv
+        elif axis in self.ds.coordinates.axis_name:
+            raise NotImplementedError("Minimum intensity projection not"
+                                      " implemented.")
+        else:
+            raise NotImplementedError("Unknown axis %s" % axis)
+
+    def std(self, field, weight=None):
+        raise NotImplementedError
+
+    def ptp(self, field):
+        raise NotImplementedError
+
+    def hist(self, field, weight = None, bins = None):
+        raise NotImplementedError
+
+    def mean(self, field, axis=None, weight='ones'):
+        r"""Compute the mean of a field, optionally along an axis, with a
+        weight.
+
+        This will, in a parallel-aware fashion, compute the mean of the
+        given field.  If an axis is supplied, it will return a projection,
+        where the weight is also supplied.  By default the weight is "ones",
+        resulting in a strict average.
+
+        Parameters
+        ----------
+        field : string or tuple of strings
+            The field to average.
+        axis : string, optional
+            If supplied, the axis to compute the mean along (i.e., to project
+            along)
+        weight : string, optional
+            The field to use as a weight.
+
+        Returns
+        -------
+        Scalar or YTProjection.
+
+        Examples
+        --------
+
+        >>> avg_rho = reg.mean("density", weight="cell_volume")
+        >>> rho_weighted_T = reg.mean("temperature", axis="y", weight="density")
+        """
+        if axis in self.ds.coordinates.axis_name:
+            r = self.ds.proj(field, axis, data_source=self, weight_field=weight)
+        elif axis is None:
+            if weight is None:
+                r = self.quantities.total_quantity(field)
+            else:
+                r = self.quantities.weighted_average_quantity(field, weight)
+        else:
+            raise NotImplementedError("Unknown axis %s" % axis)
+        return r
+
+    def sum(self, field, axis=None):
+        r"""Compute the sum of a field, optionally along an axis.
+
+        This will, in a parallel-aware fashion, compute the sum of the given
+        field.  If an axis is specified, it will return a projection (using
+        method type "sum", which does not take into account path length) along
+        that axis.
+
+        Parameters
+        ----------
+        field : string or tuple of strings
+            The field to sum.
+        axis : string, optional
+            If supplied, the axis to sum along.
+
+        Returns
+        -------
+        Either a scalar or a YTProjection.
+
+        Examples
+        --------
+
+        >>> total_vol = reg.sum("cell_volume")
+        >>> cell_count = reg.sum("ones", axis="x")
+        """
+        # Because we're using ``sum`` to specifically mean a sum or a
+        # projection with the method="sum", we do not utilize the ``mean``
+        # function.
+        if axis in self.ds.coordinates.axis_name:
+            with self._field_parameter_state({'axis':axis}):
+                r = self.ds.proj(field, axis, data_source=self, method="sum")
+        elif axis is None:
+            r = self.quantities.total_quantity(field)
+        else:
+            raise NotImplementedError("Unknown axis %s" % axis)
+        return r
+
+    def integrate(self, field, axis=None):
+        r"""Compute the integral (projection) of a field along an axis.
+
+        This projects a field along an axis.
+
+        Parameters
+        ----------
+        field : string or tuple of strings
+            The field to project.
+        axis : string
+            The axis to project along.
+
+        Returns
+        -------
+        YTProjection
+
+        Examples
+        --------
+
+        >>> column_density = reg.integrate("density", axis="z")
+        """
+        if axis in self.ds.coordinates.axis_name:
+            r = self.ds.proj(field, axis, data_source=self)
+        else:
+            raise NotImplementedError("Unknown axis %s" % axis)
+        return r
+
     @property
     def _hash(self):
         s = "%s" % self

diff -r 5b5389c150b9d9ae33a8e36b03f6626f78b8da53 -r eb45e6e815fb3f781e57ebf9dd941b404da4e030 yt/data_objects/region_expression.py
--- /dev/null
+++ b/yt/data_objects/region_expression.py
@@ -0,0 +1,107 @@
+"""
+An object that can live on the dataset to facilitate data access.
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import weakref
+import types
+
+from yt.utilities.exceptions import YTDimensionalityError
+
+class RegionExpression(object):
+    _all_data = None
+    def __init__(self, ds):
+        self.ds = weakref.proxy(ds)
+
+    @property
+    def all_data(self):
+        if self._all_data is None:
+            self._all_data = self.ds.all_data()
+        return self._all_data
+
+    def __getitem__(self, item):
+        # At first, we will only implement this as accepting a slice that is
+        # (optionally) unitful corresponding to a specific set of coordinates
+        # that result in a rectangular prism or a slice.
+        if isinstance(item, types.StringTypes):
+            # This is some field; we will instead pass this back to the
+            # all_data object.
+            return self.all_data[item]
+        if isinstance(item, tuple) and isinstance(item[1], types.StringTypes):
+            return self.all_data[item]
+        if len(item) != self.ds.dimensionality:
+            # Not the right specification, and we don't want to do anything
+            # implicitly.
+            raise YTDimensionalityError(len(item), self.ds.dimensionality)
+        if self.ds.dimensionality != 3:
+            # We'll pass on this for the time being.
+            raise RuntimeError
+
+        # OK, now we need to look at our slices.  How many are a specific
+        # coordinate?
+        
+        if not all(isinstance(v, slice) for v in item):
+            return self._create_slice(item)
+        else:
+            if all(s.start is s.stop is s.step is None for s in item):
+                return self.all_data
+            return self._create_region(item)
+            
+    def _spec_to_value(self, input_tuple):
+        if not isinstance(input_tuple, tuple):
+            # We now assume that it's in code_length
+            return self.ds.quan(input_tuple, 'code_length')
+        v, u = input_tuple
+        value = self.ds.quan(v, u)
+        return value
+
+    def _create_slice(self, slice_tuple):
+        axis = None
+        new_slice = []
+        for ax, v in enumerate(slice_tuple):
+            if not isinstance(v, slice):
+                if axis is not None: raise RuntimeError
+                axis = ax
+                coord = self._spec_to_value(v)
+                new_slice.append(slice(None, None, None))
+            else:
+                new_slice.append(v)
+        # This new slice doesn't need to be a tuple
+        source = self._create_region(new_slice)
+        sl = self.ds.slice(axis, coord, data_source = source)
+        return sl
+
+    def _slice_to_edges(self, ax, val):
+        if val.start is None:
+            l = self.ds.domain_left_edge[ax]
+        else:
+            l = self._spec_to_value(val.start)
+        if val.stop is None:
+            r = self.ds.domain_right_edge[ax]
+        else:
+            r = self._spec_to_value(val.stop)
+        if r < l:
+            raise RuntimeError
+        return l, r
+
+    def _create_region(self, bounds_tuple):
+        left_edge = []
+        right_edge = []
+        dims = []
+        for ax, b in enumerate(bounds_tuple):
+            l, r = self._slice_to_edges(ax, b)
+            left_edge.append(l)
+            right_edge.append(r)
+            dims.append(getattr(b.step, "imag", None))
+        center = [ (cl + cr)/2.0 for cl, cr in zip(left_edge, right_edge)]
+        if all(d is not None for d in dims):
+            return self.ds.arbitrary_grid(left_edge, right_edge, dims)
+        return self.ds.region(center, left_edge, right_edge)

diff -r 5b5389c150b9d9ae33a8e36b03f6626f78b8da53 -r eb45e6e815fb3f781e57ebf9dd941b404da4e030 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -295,6 +295,25 @@
         pw = self._get_pw(fields, center, width, origin, 'Slice')
         return pw
 
+    def plot(self, fields=None):
+        if hasattr(self._data_source, "left_edge") and \
+            hasattr(self._data_source, "right_edge"):
+            left_edge = self._data_source.left_edge
+            right_edge = self._data_source.right_edge
+            center = (left_edge + right_edge)/2.0
+            width = right_edge - left_edge
+            xax = self.ds.coordinates.x_axis[self.axis]
+            yax = self.ds.coordinates.y_axis[self.axis]
+            lx, rx = left_edge[xax], right_edge[xax]
+            ly, ry = left_edge[yax], right_edge[yax]
+            width = (rx-lx), (ry-ly)
+        else:
+            width = self.ds.domain_width
+            center = self.ds.domain_center
+        pw = self._get_pw(fields, center, width, 'native', 'Slice')
+        pw.show()
+        return pw
+
 class YTCuttingPlane(YTSelectionContainer2D):
     """
     This is a data object corresponding to an oblique slice through the

diff -r 5b5389c150b9d9ae33a8e36b03f6626f78b8da53 -r eb45e6e815fb3f781e57ebf9dd941b404da4e030 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -59,6 +59,8 @@
 from yt.units.yt_array import \
     YTArray, \
     YTQuantity
+from yt.data_objects.region_expression import \
+    RegionExpression
 
 from yt.geometry.coordinates.api import \
     CoordinateHandler, \
@@ -202,6 +204,7 @@
         self.file_style = file_style
         self.conversion_factors = {}
         self.parameters = {}
+        self.region_expression = self.r = RegionExpression(self)
         self.known_filters = self.known_filters or {}
         self.particle_unions = self.particle_unions or {}
         self.field_units = self.field_units or {}

diff -r 5b5389c150b9d9ae33a8e36b03f6626f78b8da53 -r eb45e6e815fb3f781e57ebf9dd941b404da4e030 yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -3,7 +3,8 @@
 from yt.frontends.stream.data_structures import load_particles
 from yt.testing import \
     fake_random_ds, \
-    assert_equal
+    assert_equal, \
+    assert_almost_equal
 
 def setup():
     from yt.config import ytcfg
@@ -108,3 +109,14 @@
             deposited_mass = obj["deposit", "all_density"].sum() * volume
 
             yield assert_equal, deposited_mass, ds.quan(1.0, 'g')
+
+    # Test that we get identical results to the covering grid for unigrid data.
+    # Testing AMR data is much harder.
+    for nprocs in [1, 2, 4, 8]:
+        ds = fake_random_ds(32, nprocs = nprocs)
+        for ref_level in [0, 1, 2]:
+            cg = ds.covering_grid(ref_level, [0.0, 0.0, 0.0],
+                    2**ref_level * ds.domain_dimensions)
+            ag = ds.arbitrary_grid([0.0, 0.0, 0.0], [1.0, 1.0, 1.0],
+                    2**ref_level * ds.domain_dimensions)
+            yield assert_almost_equal, cg["density"], ag["density"]

diff -r 5b5389c150b9d9ae33a8e36b03f6626f78b8da53 -r eb45e6e815fb3f781e57ebf9dd941b404da4e030 yt/data_objects/tests/test_dataset_access.py
--- /dev/null
+++ b/yt/data_objects/tests/test_dataset_access.py
@@ -0,0 +1,39 @@
+from yt.testing import fake_amr_ds, assert_equal
+
+# This will test the "dataset access" method.
+
+def test_region_from_d():
+    ds = fake_amr_ds(fields=["density"])
+    # We'll do a couple here
+
+    # First, no string units
+    reg1 = ds.r[0.2:0.3,0.4:0.6,:]
+    reg2 = ds.region([0.25, 0.5, 0.5], [0.2, 0.4, 0.0], [0.3, 0.6, 1.0])
+    yield assert_equal, reg1["density"], reg2["density"]
+
+    # Now, string units in some -- 1.0 == cm
+    reg1 = ds.r[(0.1, 'cm'):(0.5, 'cm'), :, (0.25, 'cm'): (0.35, 'cm')]
+    reg2 = ds.region([0.3, 0.5, 0.3], [0.1, 0.0, 0.25], [0.5, 1.0, 0.35])
+    yield assert_equal, reg1["density"], reg2["density"]
+
+    # Now, string units in some -- 1.0 == cm
+    reg1 = ds.r[(0.1, 'cm'):(0.5, 'cm'), :, 0.25:0.35]
+    reg2 = ds.region([0.3, 0.5, 0.3], [0.1, 0.0, 0.25], [0.5, 1.0, 0.35])
+    yield assert_equal, reg1["density"], reg2["density"]
+
+    # And, lots of : usage!
+    reg1 = ds.r[:, :, :]
+    reg2 = ds.all_data()
+    yield assert_equal, reg1["density"], reg2["density"]
+
+def test_accessing_all_data():
+    # This will test first that we can access all_data, and next that we can
+    # access it multiple times and get the *same object*.
+    ds = fake_amr_ds(fields=["density"])
+    dd = ds.all_data()
+    yield assert_equal, ds.r["density"], dd["density"]
+    # Now let's assert that it's the same object
+    rho = ds.r["density"]
+    rho *= 2.0
+    yield assert_equal, dd["density"]*2.0, ds.r["density"]
+    yield assert_equal, dd["gas", "density"]*2.0, ds.r["gas", "density"]

diff -r 5b5389c150b9d9ae33a8e36b03f6626f78b8da53 -r eb45e6e815fb3f781e57ebf9dd941b404da4e030 yt/data_objects/tests/test_numpy_ops.py
--- /dev/null
+++ b/yt/data_objects/tests/test_numpy_ops.py
@@ -0,0 +1,92 @@
+from yt.testing import fake_random_ds, fake_amr_ds, assert_equal
+
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+
+def test_mean_sum_integrate():
+    for nprocs in [-1, 1, 2, 16]:
+        if nprocs == -1:
+            ds = fake_amr_ds(fields=("density",))
+        else:
+            ds = fake_random_ds(32, nprocs=nprocs, fields=("density",))
+        ad = ds.all_data()
+
+        # Sums
+        q = ad.sum('density')
+
+        q1 = ad.quantities.total_quantity('density')
+
+        yield assert_equal, q, q1
+
+        # Weighted Averages
+        w = ad.mean("density")
+
+        w1 = ad.quantities.weighted_average_quantity('density', 'ones')
+
+        yield assert_equal, w, w1
+
+        w = ad.mean("density", weight="density")
+
+        w1 = ad.quantities.weighted_average_quantity('density', 'density')
+
+        yield assert_equal, w, w1
+
+        # Projections
+        p = ad.sum('density', axis=0)
+
+        p1 = ds.proj('density', 0, data_source=ad, method="sum")
+
+        yield assert_equal, p['density'], p1['density']
+
+        # Check by axis-name
+        p = ad.sum('density', axis='x')
+
+        yield assert_equal, p['density'], p1['density']
+
+        # Now we check proper projections
+        p = ad.integrate("density", axis=0)
+        p1 = ds.proj("density", 0, data_source=ad)
+
+        yield assert_equal, p['density'], p1['density']
+
+        # Check by axis-name
+        p = ad.integrate('density', axis='x')
+
+        yield assert_equal, p['density'], p1['density']
+
+def test_min_max():
+    for nprocs in [-1, 1, 2, 16]:
+        if nprocs == -1:
+            ds = fake_amr_ds(fields=("density","temperature"))
+        else:
+            ds = fake_random_ds(32, nprocs=nprocs,
+                fields=("density","temperature"))
+
+        ad = ds.all_data()
+
+        q = ad.min("density").v
+        yield assert_equal, q, ad["density"].min()
+
+        q = ad.max("density").v
+        yield assert_equal, q, ad["density"].max()
+
+        p = ad.max("density", axis=1)
+        p1 = ds.proj("density", 1, data_source=ad, method="mip")
+        yield assert_equal, p["density"], p1["density"]
+
+        p = ad.max("density", axis="y")
+        p1 = ds.proj("density", 1, data_source=ad, method="mip")
+        yield assert_equal, p["density"], p1["density"]
+
+        # Test that we can get multiple in a single pass
+
+        qrho, qtemp = ad.max(["density", "temperature"])
+        yield assert_equal, qrho, ad["density"].max()
+        yield assert_equal, qtemp, ad["temperature"].max()
+
+        qrho, qtemp = ad.min(["density", "temperature"])
+        yield assert_equal, qrho, ad["density"].min()
+        yield assert_equal, qtemp, ad["temperature"].min()

diff -r 5b5389c150b9d9ae33a8e36b03f6626f78b8da53 -r eb45e6e815fb3f781e57ebf9dd941b404da4e030 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -510,3 +510,12 @@
 
     def __str__(self):
         return self.message
+
+class YTDimensionalityError(YTException):
+    def __init__(self, wrong, right):
+        self.wrong = wrong
+        self.right = right
+
+    def __str__(self):
+        return 'Dimensionality specified was %s but we need %s' % (
+            self.wrong, self.right)

diff -r 5b5389c150b9d9ae33a8e36b03f6626f78b8da53 -r eb45e6e815fb3f781e57ebf9dd941b404da4e030 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -20,6 +20,7 @@
 cimport libc.math as math
 from libc.math cimport abs
 from fp_utils cimport fmin, fmax, i64min, i64max
+from yt.geometry.selection_routines cimport _ensure_code
 
 cdef extern from "stdlib.h":
     # NOTE that size_t might not be int
@@ -839,3 +840,109 @@
                                         ifield[i]
                                     tot += 1
     return tot
+
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def fill_region_float(np.ndarray[np.float64_t, ndim=2] fcoords,
+                      np.ndarray[np.float64_t, ndim=2] fwidth,
+                      np.ndarray[np.float64_t, ndim=1] data,
+                      np.ndarray[np.float64_t, ndim=1] box_left_edge,
+                      np.ndarray[np.float64_t, ndim=1] box_right_edge,
+                      np.ndarray[np.float64_t, ndim=3] dest,
+                      int antialias = 1,
+                      period = None,
+                      int check_period = 1):
+    cdef np.float64_t ds_period[3]
+    cdef np.float64_t box_dds[3], box_idds[3], width[3], LE[3], RE[3]
+    cdef np.int64_t i, j, k, p, xi, yi, ji
+    cdef np.int64_t dims[3], ld[3], ud[3]
+    cdef np.float64_t overlap[3]
+    cdef np.float64_t dsp, osp[3], odsp[3], sp[3], lfd[3], ufd[3]
+    # These are the temp vars we get from the arrays
+    # Some periodicity helpers
+    cdef int diter[3][2]
+    cdef np.float64_t diterv[3][2]
+    if period is not None:
+        for i in range(3):
+            ds_period[i] = period[i]
+    else:
+        ds_period[0] = ds_period[1] = ds_period[2] = 0.0
+    box_left_edge = _ensure_code(box_left_edge)
+    box_right_edge = _ensure_code(box_right_edge)
+    _ensure_code(fcoords)
+    _ensure_code(fwidth)
+    for i in range(3):
+        LE[i] = box_left_edge[i]
+        RE[i] = box_right_edge[i]
+        width[i] = RE[i] - LE[i]
+        dims[i] = dest.shape[i]
+        box_dds[i] = width[i] / dims[i]
+        box_idds[i] = 1.0/box_dds[i]
+        diter[i][0] = diter[i][1] = 0
+        diterv[i][0] = diterv[i][1] = 0.0
+        overlap[i] = 1.0 
+    with nogil:
+        for p in range(fcoords.shape[0]):
+            for i in range(3):
+               diter[i][1] = 999
+               odsp[i] = fwidth[p,i]*0.5
+               osp[i] = fcoords[p,i] # already centered
+               overlap[i] = 1.0
+            dsp = data[p]
+            if check_period == 1:
+                for i in range(3):
+                    if (osp[i] - odsp[i] < LE[i]):
+                        diter[i][1] = +1
+                        diterv[i][1] = ds_period[i]
+                    elif (osp[i] + odsp[i] > RE[i]):
+                        diter[i][1] = -1
+                        diterv[i][1] = -ds_period[i]
+            for xi in range(2):
+                if diter[0][xi] == 999: continue
+                sp[0] = osp[0] + diterv[0][xi]
+                if (sp[0] + odsp[0] < LE[0]) or (sp[0] - odsp[0] > RE[0]): continue
+                for yi in range(2):
+                    if diter[1][yi] == 999: continue
+                    sp[1] = osp[1] + diterv[1][yi]
+                    if (sp[1] + odsp[1] < LE[1]) or (sp[1] - odsp[1] > RE[1]): continue
+                    for zi in range(2):
+                        if diter[2][zi] == 999: continue
+                        sp[2] = osp[2] + diterv[2][yi]
+                        if (sp[2] + odsp[2] < LE[2]) or (sp[2] - odsp[2] > RE[2]): continue
+                        for i in range(3):
+                            ld[i] = <np.int64_t> fmax(((sp[i]-odsp[i]-LE[i])*box_idds[i]),0)
+                            # NOTE: This is a different way of doing it than in the C
+                            # routines.  In C, we were implicitly casting the
+                            # initialization to int, but *not* the conditional, which
+                            # was allowed an extra value:
+                            #     for(j=lc;j<rc;j++)
+                            # here, when assigning lc (double) to j (int) it got
+                            # truncated, but no similar truncation was done in the
+                            # comparison of j to rc (double).  So give ourselves a
+                            # bonus row and bonus column here.
+                            ud[i] = <np.int64_t> fmin(((sp[i]+odsp[i]-LE[i])*box_idds[i] + 1), dims[i])
+                        for i in range(ld[0], ud[0]):
+                            if antialias == 1:
+                                lfd[0] = box_dds[0] * i + LE[0]
+                                ufd[0] = box_dds[0] * (i + 1) + LE[0]
+                                overlap[0] = ((fmin(ufd[0], sp[0]+odsp[0])
+                                           - fmax(lfd[0], (sp[0]-odsp[0])))*box_idds[0])
+                            if overlap[0] < 0.0: continue
+                            for j in range(ld[1], ud[1]):
+                                if antialias == 1:
+                                    lfd[1] = box_dds[1] * j + LE[1]
+                                    ufd[1] = box_dds[1] * (j + 1) + LE[1]
+                                    overlap[1] = ((fmin(ufd[1], sp[1]+odsp[1])
+                                               - fmax(lfd[1], (sp[1]-odsp[1])))*box_idds[1])
+                                if overlap[1] < 0.0: continue
+                                for k in range(ld[2], ud[2]):
+                                    if antialias == 1:
+                                        lfd[2] = box_dds[2] * k + LE[2]
+                                        ufd[2] = box_dds[2] * (k + 1) + LE[2]
+                                        overlap[2] = ((fmin(ufd[2], sp[2]+odsp[2])
+                                                   - fmax(lfd[2], (sp[2]-odsp[2])))*box_idds[2])
+                                        if overlap[2] < 0.0: continue
+                                        dest[i,j,k] += dsp * (overlap[0]*overlap[1]*overlap[2])
+                                    else:
+                                        dest[i,j,k] = dsp

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list