[yt-svn] commit/yt: 27 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Fri Sep 30 10:54:31 PDT 2016


27 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/147bff72d48d/
Changeset:   147bff72d48d
Branch:      yt
User:        MatthewTurk
Date:        2016-06-19 00:28:55+00:00
Summary:     Replace arrays with memoryviews
Affected #:  1 file

diff -r 27783a435b496c47db10e1a1f5f5f1485ab2bcf1 -r 147bff72d48d86547072fce84411937704e8c3da yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -53,11 +53,11 @@
 @cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def pixelize_cartesian(np.ndarray[np.float64_t, ndim=1] px,
-                       np.ndarray[np.float64_t, ndim=1] py,
-                       np.ndarray[np.float64_t, ndim=1] pdx,
-                       np.ndarray[np.float64_t, ndim=1] pdy,
-                       np.ndarray[np.float64_t, ndim=1] data,
+def pixelize_cartesian(np.float64_t[:] px,
+                       np.float64_t[:] py,
+                       np.float64_t[:] pdx,
+                       np.float64_t[:] pdy,
+                       np.float64_t[:] data,
                        int cols, int rows, bounds,
                        int antialias = 1,
                        period = None,
@@ -296,12 +296,12 @@
 @cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def pixelize_cylinder(np.ndarray[np.float64_t, ndim=1] radius,
-                      np.ndarray[np.float64_t, ndim=1] dradius,
-                      np.ndarray[np.float64_t, ndim=1] theta,
-                      np.ndarray[np.float64_t, ndim=1] dtheta,
+def pixelize_cylinder(np.float64_t[:] radius,
+                      np.float64_t[:] dradius,
+                      np.float64_t[:] theta,
+                      np.float64_t[:] dtheta,
                       buff_size,
-                      np.ndarray[np.float64_t, ndim=1] field,
+                      np.float64_t[:] field,
                       extents, input_img = None):
 
     cdef np.ndarray[np.float64_t, ndim=2] img
@@ -389,12 +389,12 @@
 @cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def pixelize_aitoff(np.ndarray[np.float64_t, ndim=1] theta,
-                    np.ndarray[np.float64_t, ndim=1] dtheta,
-                    np.ndarray[np.float64_t, ndim=1] phi,
-                    np.ndarray[np.float64_t, ndim=1] dphi,
+def pixelize_aitoff(np.float64_t[:] theta,
+                    np.float64_t[:] dtheta,
+                    np.float64_t[:] phi,
+                    np.float64_t[:] dphi,
                     buff_size,
-                    np.ndarray[np.float64_t, ndim=1] field,
+                    np.float64_t[:] field,
                     extents, input_img = None,
                     np.float64_t theta_offset = 0.0,
                     np.float64_t phi_offset = 0.0):


https://bitbucket.org/yt_analysis/yt/commits/8872ce7f0c93/
Changeset:   8872ce7f0c93
Branch:      yt
User:        MatthewTurk
Date:        2016-06-19 01:32:57+00:00
Summary:     Change pixelize_cartesian to use input buffer
Affected #:  4 files

diff -r 147bff72d48d86547072fce84411937704e8c3da -r 8872ce7f0c93ef24f67119efec62cdb45aa57782 yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -128,11 +128,12 @@
         period[1] = self.period[self.y_axis[dim]]
         if hasattr(period, 'in_units'):
             period = period.in_units("code_length").d
-        buff = pixelize_cartesian(data_source['px'], data_source['py'],
+        buff = np.zeros(size, dtype="f8")
+        pixelize_cartesian(buff, data_source['px'], data_source['py'],
                              data_source['pdx'], data_source['pdy'],
-                             data_source[field], size[0], size[1],
+                             data_source[field],
                              bounds, int(antialias),
-                             period, int(periodic)).transpose()
+                             period, int(periodic))
         return buff
 
     def _oblique_pixelize(self, data_source, field, bounds, size, antialias):

diff -r 147bff72d48d86547072fce84411937704e8c3da -r 8872ce7f0c93ef24f67119efec62cdb45aa57782 yt/geometry/coordinates/cylindrical_coordinates.py
--- a/yt/geometry/coordinates/cylindrical_coordinates.py
+++ b/yt/geometry/coordinates/cylindrical_coordinates.py
@@ -113,11 +113,12 @@
         period[1] = self.period[self.y_axis[dim]]
         if hasattr(period, 'in_units'):
             period = period.in_units("code_length").d
-        buff = pixelize_cartesian(data_source['px'], data_source['py'],
+        buff = np.zeros(size, dtype="f8")
+        pixelize_cartesian(buff, data_source['px'], data_source['py'],
                                   data_source['pdx'], data_source['pdy'],
-                                  data_source[field], size[0], size[1],
+                                  data_source[field],
                                   bounds, int(antialias),
-                                  period, int(periodic)).transpose()
+                                  period, int(periodic))
         return buff
 
     def _cyl_pixelize(self, data_source, field, bounds, size, antialias):

diff -r 147bff72d48d86547072fce84411937704e8c3da -r 8872ce7f0c93ef24f67119efec62cdb45aa57782 yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -53,12 +53,13 @@
 @cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def pixelize_cartesian(np.float64_t[:] px,
+def pixelize_cartesian(np.float64_t[:,:] buff,
+                       np.float64_t[:] px,
                        np.float64_t[:] py,
                        np.float64_t[:] pdx,
                        np.float64_t[:] pdy,
                        np.float64_t[:] data,
-                       int cols, int rows, bounds,
+                       bounds,
                        int antialias = 1,
                        period = None,
                        int check_period = 1,
@@ -77,7 +78,6 @@
     cdef int yiter[2]
     cdef np.float64_t xiterv[2]
     cdef np.float64_t yiterv[2]
-    cdef np.ndarray[np.float64_t, ndim=2] my_array
     if period is not None:
         period_x = period[0]
         period_y = period[1]
@@ -87,18 +87,15 @@
     y_max = bounds[3]
     width = x_max - x_min
     height = y_max - y_min
-    px_dx = width / (<np.float64_t> rows)
-    px_dy = height / (<np.float64_t> cols)
+    px_dx = width / (<np.float64_t> buff.shape[0])
+    px_dy = height / (<np.float64_t> buff.shape[1])
     ipx_dx = 1.0 / px_dx
     ipx_dy = 1.0 / px_dy
-    if rows == 0 or cols == 0:
-        raise YTPixelizeError("Cannot scale to zero size")
     if px.shape[0] != py.shape[0] or \
        px.shape[0] != pdx.shape[0] or \
        px.shape[0] != pdy.shape[0] or \
        px.shape[0] != data.shape[0]:
         raise YTPixelizeError("Arrays are not of correct shape.")
-    my_array = np.zeros((rows, cols), "float64")
     xiter[0] = yiter[0] = 0
     xiterv[0] = yiterv[0] = 0.0
     # Here's a basic outline of what we're going to do here.  The xiter and
@@ -160,8 +157,8 @@
                     # truncated, but no similar truncation was done in the
                     # comparison of j to rc (double).  So give ourselves a
                     # bonus row and bonus column here.
-                    rc = <int> fmin(((xsp+dxsp-x_min)*ipx_dx + 1), rows)
-                    rr = <int> fmin(((ysp+dysp-y_min)*ipx_dy + 1), cols)
+                    rc = <int> fmin(((xsp+dxsp-x_min)*ipx_dx + 1), buff.shape[0])
+                    rr = <int> fmin(((ysp+dysp-y_min)*ipx_dy + 1), buff.shape[1])
                     for i in range(lr, rr):
                         lypx = px_dy * i + y_min
                         rypx = px_dy * (i+1) + y_min
@@ -186,7 +183,7 @@
                                             fabs(cy - (ysp-dysp)))
                                 ld_y *= ipx_dy
                                 if ld_x <= line_width or ld_y <= line_width:
-                                    my_array[j,i] = 1.0
+                                    buff[i,j] = 1.0
                             elif antialias == 1:
                                 overlap1 = ((fmin(rxpx, xsp+dxsp)
                                            - fmax(lxpx, (xsp-dxsp)))*ipx_dx)
@@ -199,11 +196,9 @@
                                 # This will reduce artifacts if we ever move to
                                 # compositing instead of replacing bitmaps.
                                 if overlap1 * overlap2 == 0.0: continue
-                                my_array[j,i] += (dsp * overlap1) * overlap2
+                                buff[i,j] += (dsp * overlap1) * overlap2
                             else:
-                                my_array[j,i] = dsp
-                            
-    return my_array
+                                buff[i,j] = dsp
 
 @cython.cdivision(True)
 @cython.boundscheck(False)
@@ -311,7 +306,7 @@
     cdef np.float64_t costheta, sintheta
     cdef int i, pi, pj
     
-    imax = radius.argmax()
+    cdef int imax = radius.argmax()
     rmax = radius[imax] + dradius[imax]
           
     if input_img is None:

diff -r 147bff72d48d86547072fce84411937704e8c3da -r 8872ce7f0c93ef24f67119efec62cdb45aa57782 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -395,18 +395,18 @@
         if self.bv_y != 0.0:
             # Workaround for 0.0 without units
             fv_y -= self.bv_y
-        pixX = pixelize_cartesian(plot.data['px'], plot.data['py'],
+        pixX = np.zeros((nx, ny), dtype="f8")
+        pixY = np.zeros((nx, ny), dtype="f8")
+        pixelize_cartesian(pixX, plot.data['px'], plot.data['py'],
                                   plot.data['pdx'], plot.data['pdy'],
-                                  fv_x, int(nx), int(ny),
+                                  fv_x,
                                   (x0, x1, y0, y1), 0, # bounds, antialias
-                                  (period_x, period_y), periodic,
-                                  ).transpose()
-        pixY = pixelize_cartesian(plot.data['px'], plot.data['py'],
+                                  (period_x, period_y), periodic)
+        pixelize_cartesian(pixY, plot.data['px'], plot.data['py'],
                                   plot.data['pdx'], plot.data['pdy'],
-                                  fv_y, int(nx), int(ny),
+                                  fv_y,
                                   (x0, x1, y0, y1), 0, # bounds, antialias
-                                  (period_x, period_y), periodic,
-                                  ).transpose()
+                                  (period_x, period_y), periodic)
         X,Y = np.meshgrid(np.linspace(xx0,xx1,nx,endpoint=True),
                           np.linspace(yy0,yy1,ny,endpoint=True))
         if self.normalize:
@@ -702,27 +702,29 @@
         plot._axes.hold(True)
         nx = plot.image._A.shape[0] / self.factor
         ny = plot.image._A.shape[1] / self.factor
-        pixX = pixelize_cartesian(plot.data['px'], plot.data['py'],
+        pixX = np.zeros((nx, ny), dtype="f8")
+        pixY = np.zeros((nx, ny), dtype="f8")
+        pixelize_cartesian(pixX, plot.data['px'], plot.data['py'],
                                   plot.data['pdx'], plot.data['pdy'],
                                   plot.data[self.field_x],
-                                  int(nx), int(ny),
-                                  (x0, x1, y0, y1),).transpose()
-        pixY = pixelize_cartesian(plot.data['px'], plot.data['py'],
+                                  (x0, x1, y0, y1))
+        pixelize_cartesian(pixY, plot.data['px'], plot.data['py'],
                                   plot.data['pdx'], plot.data['pdy'],
                                   plot.data[self.field_y],
-                                  int(nx), int(ny),
-                                  (x0, x1, y0, y1),).transpose()
+                                  (x0, x1, y0, y1))
         if self.field_color:
-            self.field_color = pixelize_cartesian(
+            field_colors = np.zeros((nx, ny), dtype="f8")
+            pixelize_cartesian(field_colors,
                         plot.data['px'], plot.data['py'],
                         plot.data['pdx'], plot.data['pdy'],
-                        plot.data[self.field_color], int(nx), int(ny),
-                        (x0, x1, y0, y1),).transpose()
-
+                        plot.data[self.field_color],
+                        (x0, x1, y0, y1))
+        else:
+            field_colors = None
         X,Y = (np.linspace(xx0,xx1,nx,endpoint=True),
                np.linspace(yy0,yy1,ny,endpoint=True))
         streamplot_args = {'x': X, 'y': Y, 'u':pixX, 'v': pixY,
-                           'density': self.dens, 'color':self.field_color}
+                           'density': self.dens, 'color':field_colors}
         streamplot_args.update(self.plot_args)
         plot._axes.streamplot(**streamplot_args)
         plot._axes.set_xlim(xx0,xx1)
@@ -934,12 +936,12 @@
             xf_copy = clump[xf].copy().in_units("code_length")
             yf_copy = clump[yf].copy().in_units("code_length")
 
-            temp = pixelize_cartesian(xf_copy, yf_copy,
+            temp = np.zeros((nx, ny), dtype="f8")
+            pixelize_cartesian(temp, xf_copy, yf_copy,
                                  clump[dxf].in_units("code_length")/2.0,
                                  clump[dyf].in_units("code_length")/2.0,
                                  clump[dxf].d*0.0+i+1, # inits inside Pixelize
-                                 int(nx), int(ny),
-                             (x0, x1, y0, y1), 0).transpose()
+                             (x0, x1, y0, y1), 0)
             buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, np.unique(buff),
                                      extent=extent, **self.plot_args)
@@ -2485,14 +2487,14 @@
         plot._axes.hold(True)
         nx = plot.image._A.shape[0]
         ny = plot.image._A.shape[1]
-        im = pixelize_cartesian(plot.data['px'],
+        im = np.zeros((nx, ny), dtype="f8")
+        pixelize_cartesian(im, plot.data['px'],
                                 plot.data['py'],
                                 plot.data['pdx'],
                                 plot.data['pdy'],
                                 plot.data['px'], # dummy field
-                                int(nx), int(ny),
                                 (x0, x1, y0, y1),
-                                line_width=self.line_width).transpose()
+                                line_width=self.line_width)
         # New image:
         im_buffer = np.zeros((nx, ny, 4), dtype="uint8")
         im_buffer[im>0,3] = 255


https://bitbucket.org/yt_analysis/yt/commits/db6296e3235c/
Changeset:   db6296e3235c
Branch:      yt
User:        MatthewTurk
Date:        2016-06-19 02:12:19+00:00
Summary:     Change pixelize_cylinder to use input buffer
Affected #:  5 files

diff -r 8872ce7f0c93ef24f67119efec62cdb45aa57782 -r db6296e3235c2314349eb2da35dd11f49dcf45df yt/geometry/coordinates/cylindrical_coordinates.py
--- a/yt/geometry/coordinates/cylindrical_coordinates.py
+++ b/yt/geometry/coordinates/cylindrical_coordinates.py
@@ -122,11 +122,13 @@
         return buff
 
     def _cyl_pixelize(self, data_source, field, bounds, size, antialias):
-        buff = pixelize_cylinder(data_source['px'],
-                                 data_source['pdx'],
-                                 data_source['py'],
-                                 data_source['pdy'],
-                                 size, data_source[field], bounds)
+        buff = np.zeros(size, dtype="f8")
+        pixelize_cylinder(buff,
+                          data_source['px'],
+                          data_source['pdx'],
+                          data_source['py'],
+                          data_source['pdy'],
+                          data_source[field], bounds)
         return buff
 
     _x_pairs = (('r', 'theta'), ('z', 'r'), ('theta', 'r'))

diff -r 8872ce7f0c93ef24f67119efec62cdb45aa57782 -r db6296e3235c2314349eb2da35dd11f49dcf45df yt/geometry/coordinates/geographic_coordinates.py
--- a/yt/geometry/coordinates/geographic_coordinates.py
+++ b/yt/geometry/coordinates/geographic_coordinates.py
@@ -185,9 +185,9 @@
         else:
             # We should never get here!
             raise NotImplementedError
-        buff = pixelize_cylinder(r, data_source['pdy'],
-                                 px, pdx,
-                                 size, data_source[field], bounds)
+        buff = np.zeros(size, dtype="f8")
+        pixelize_cylinder(buff, r, data_source['pdy'],
+                          px, pdx, data_source[field], bounds)
         if do_transpose:
             buff = buff.transpose()
         return buff

diff -r 8872ce7f0c93ef24f67119efec62cdb45aa57782 -r db6296e3235c2314349eb2da35dd11f49dcf45df yt/geometry/coordinates/spherical_coordinates.py
--- a/yt/geometry/coordinates/spherical_coordinates.py
+++ b/yt/geometry/coordinates/spherical_coordinates.py
@@ -123,18 +123,21 @@
     def _cyl_pixelize(self, data_source, field, bounds, size, antialias,
                       dimension):
         name = self.axis_name[dimension]
+        buff = np.zeros(size, dtype="f8")
         if name == 'theta':
-            buff = pixelize_cylinder(data_source['px'],
-                                     data_source['pdx'],
-                                     data_source['py'],
-                                     data_source['pdy'],
-                                     size, data_source[field], bounds)
+            pixelize_cylinder(buff,
+                              data_source['px'],
+                              data_source['pdx'],
+                              data_source['py'],
+                              data_source['pdy'],
+                              data_source[field], bounds)
         elif name == 'phi':
-            buff = pixelize_cylinder(data_source['px'],
-                                     data_source['pdx'],
-                                     data_source['py'],
-                                     data_source['pdy'],
-                                     size, data_source[field], bounds)
+            pixelize_cylinder(buff,
+                             data_source['px'],
+                             data_source['pdx'],
+                             data_source['py'],
+                             data_source['pdy'],
+                             data_source[field], bounds)
             buff = buff.transpose()
         else:
             raise RuntimeError

diff -r 8872ce7f0c93ef24f67119efec62cdb45aa57782 -r db6296e3235c2314349eb2da35dd11f49dcf45df yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -291,32 +291,26 @@
 @cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def pixelize_cylinder(np.float64_t[:] radius,
+def pixelize_cylinder(np.float64_t[:,:] buff,
+                      np.float64_t[:] radius,
                       np.float64_t[:] dradius,
                       np.float64_t[:] theta,
                       np.float64_t[:] dtheta,
-                      buff_size,
                       np.float64_t[:] field,
-                      extents, input_img = None):
+                      extents):
 
-    cdef np.ndarray[np.float64_t, ndim=2] img
     cdef np.float64_t x, y, dx, dy, r0, theta0
     cdef np.float64_t rmax, x0, y0, x1, y1
     cdef np.float64_t r_i, theta_i, dr_i, dtheta_i, dthetamin
     cdef np.float64_t costheta, sintheta
     cdef int i, pi, pj
     
-    cdef int imax = radius.argmax()
+    cdef int imax = np.asarray(radius).argmax()
     rmax = radius[imax] + dradius[imax]
           
-    if input_img is None:
-        img = np.zeros((buff_size[0], buff_size[1]))
-        img[:] = np.nan
-    else:
-        img = input_img
     x0, x1, y0, y1 = extents
-    dx = (x1 - x0) / img.shape[0]
-    dy = (y1 - y0) / img.shape[1]
+    dx = (x1 - x0) / buff.shape[0]
+    dy = (y1 - y0) / buff.shape[1]
     cdef np.float64_t rbounds[2]
     cdef np.float64_t corners[8]
     # Find our min and max r
@@ -365,16 +359,12 @@
                 x = r_i * sintheta
                 pi = <int>((x - x0)/dx)
                 pj = <int>((y - y0)/dy)
-                if pi >= 0 and pi < img.shape[0] and \
-                   pj >= 0 and pj < img.shape[1]:
-                    if img[pi, pj] != img[pi, pj]:
-                        img[pi, pj] = 0.0
-                    img[pi, pj] = field[i]
-                r_i += 0.5*dx 
+                if pi >= 0 and pi < buff.shape[0] and \
+                   pj >= 0 and pj < buff.shape[1]:
+                    buff[pi, pj] = field[i]
+                r_i += 0.5*dx
             theta_i += dthetamin
 
-    return img
-
 cdef void aitoff_thetaphi_to_xy(np.float64_t theta, np.float64_t phi,
                                 np.float64_t *x, np.float64_t *y):
     cdef np.float64_t z = math.sqrt(1 + math.cos(phi) * math.cos(theta / 2.0))

diff -r 8872ce7f0c93ef24f67119efec62cdb45aa57782 -r db6296e3235c2314349eb2da35dd11f49dcf45df yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -515,10 +515,10 @@
 
     def __getitem__(self, item) :
         if item in self.data: return self.data[item]
-        buff = pixelize_cylinder(self.data_source["r"], self.data_source["dr"],
-                                 self.data_source["theta"], self.data_source["dtheta"],
-                                 self.buff_size, self.data_source[item].astype("float64"),
-                                 self.radius)
+        buff = np.zeros(self.buff_size, dtype="f8")
+        pixelize_cylinder(buff, self.data_source["r"], self.data_source["dr"],
+                          self.data_source["theta"], self.data_source["dtheta"],
+                          self.data_source[item].astype("float64"), self.radius)
         self[item] = buff
         return buff
 


https://bitbucket.org/yt_analysis/yt/commits/940316ab1ed5/
Changeset:   940316ab1ed5
Branch:      yt
User:        MatthewTurk
Date:        2016-06-19 02:24:29+00:00
Summary:     Changing pixelize_off_axis to use input buffer
Affected #:  4 files

diff -r db6296e3235c2314349eb2da35dd11f49dcf45df -r 940316ab1ed56d9f2fc01feb34956b648c033ce8 yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -138,13 +138,14 @@
 
     def _oblique_pixelize(self, data_source, field, bounds, size, antialias):
         indices = np.argsort(data_source['dx'])[::-1]
-        buff = pixelize_off_axis_cartesian(
+        buff = np.zeros(size, dtype="f8")
+        pixelize_off_axis_cartesian(buff,
                               data_source['x'], data_source['y'],
                               data_source['z'], data_source['px'],
                               data_source['py'], data_source['pdx'],
                               data_source['pdy'], data_source['pdz'],
                               data_source.center, data_source._inv_mat, indices,
-                              data_source[field], size[0], size[1], bounds).transpose()
+                              data_source[field], bounds)
         return buff
 
     def convert_from_cartesian(self, coord):

diff -r db6296e3235c2314349eb2da35dd11f49dcf45df -r 940316ab1ed56d9f2fc01feb34956b648c033ce8 yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -204,6 +204,7 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 def pixelize_off_axis_cartesian(
+                       np.float64_t[:,:] buff,
                        np.float64_t[:] x,
                        np.float64_t[:] y,
                        np.float64_t[:] z,
@@ -216,7 +217,7 @@
                        np.float64_t[:,:] inv_mat,
                        np.int64_t[:] indices,
                        np.float64_t[:] data,
-                       int cols, int rows, bounds):
+                       bounds):
     cdef np.float64_t x_min, x_max, y_min, y_max
     cdef np.float64_t width, height, px_dx, px_dy, ipx_dx, ipx_dy, md
     cdef int i, j, p, ip
@@ -225,7 +226,6 @@
     cdef np.float64_t xsp, ysp, zsp, dxsp, dysp, dzsp, dsp
     cdef np.float64_t pxsp, pysp, cxpx, cypx, cx, cy, cz
     # Some periodicity helpers
-    cdef np.ndarray[np.float64_t, ndim=2] my_array
     cdef np.ndarray[np.int64_t, ndim=2] mask
     x_min = bounds[0]
     x_max = bounds[1]
@@ -233,12 +233,10 @@
     y_max = bounds[3]
     width = x_max - x_min
     height = y_max - y_min
-    px_dx = width / (<np.float64_t> rows)
-    px_dy = height / (<np.float64_t> cols)
+    px_dx = width / (<np.float64_t> buff.shape[0])
+    px_dy = height / (<np.float64_t> buff.shape[1])
     ipx_dx = 1.0 / px_dx
     ipx_dy = 1.0 / px_dy
-    if rows == 0 or cols == 0:
-        raise YTPixelizeError("Cannot scale to zero size")
     if px.shape[0] != py.shape[0] or \
        px.shape[0] != pdx.shape[0] or \
        px.shape[0] != pdy.shape[0] or \
@@ -246,8 +244,7 @@
        px.shape[0] != indices.shape[0] or \
        px.shape[0] != data.shape[0]:
         raise YTPixelizeError("Arrays are not of correct shape.")
-    my_array = np.zeros((rows, cols), "float64")
-    mask = np.zeros((rows, cols), "int64")
+    mask = np.zeros((buff.shape[0], buff.shape[1]), "int64")
     with nogil:
         for ip in range(indices.shape[0]):
             p = indices[ip]
@@ -269,8 +266,8 @@
                 continue
             lc = <int> fmax(((pxsp - md - x_min)*ipx_dx),0)
             lr = <int> fmax(((pysp - md - y_min)*ipx_dy),0)
-            rc = <int> fmin(((pxsp + md - x_min)*ipx_dx + 1), rows)
-            rr = <int> fmin(((pysp + md - y_min)*ipx_dy + 1), cols)
+            rc = <int> fmin(((pxsp + md - x_min)*ipx_dx + 1), buff.shape[0])
+            rr = <int> fmin(((pysp + md - y_min)*ipx_dy + 1), buff.shape[1])
             for i in range(lr, rr):
                 cypx = px_dy * (i + 0.5) + y_min
                 for j in range(lc, rc):
@@ -283,10 +280,11 @@
                        fabs(zsp - cz) * 0.99 > dzsp:
                         continue
                     mask[i, j] += 1
-                    my_array[i, j] += dsp
-    my_array /= mask
-    return my_array.T
-
+                    buff[i, j] += dsp
+    for i in range(buff.shape[0]):
+        for j in range(buff.shape[1]):
+            if mask[i,j] == 0: continue
+            buff[i,j] /= mask[i,j]
 
 @cython.cdivision(True)
 @cython.boundscheck(False)

diff -r db6296e3235c2314349eb2da35dd11f49dcf45df -r 940316ab1ed56d9f2fc01feb34956b648c033ce8 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -536,14 +536,14 @@
             if hasattr(b, "in_units"):
                 b = float(b.in_units("code_length"))
             bounds.append(b)
-        buff = pixelize_off_axis_cartesian(
+        buff = np.zeros(self.buff_size, dtype="f8")
+        pixelize_off_axis_cartesian(buff,
                                self.data_source['x'],   self.data_source['y'],   self.data_source['z'],
                                self.data_source['px'],  self.data_source['py'],
                                self.data_source['pdx'], self.data_source['pdy'], self.data_source['pdz'],
                                self.data_source.center, self.data_source._inv_mat, indices,
                                self.data_source[item],
-                               self.buff_size[0], self.buff_size[1],
-                               bounds).transpose()
+                               bounds)
         ia = ImageArray(buff, input_units=self.data_source[item].units,
                         info=self._get_info(item))
         self[item] = ia

diff -r db6296e3235c2314349eb2da35dd11f49dcf45df -r 940316ab1ed56d9f2fc01feb34956b648c033ce8 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -867,21 +867,21 @@
         ny = plot.image._A.shape[1] / self.factor
         indices = np.argsort(plot.data['dx'])[::-1]
 
-        pixX = pixelize_off_axis_cartesian(
+        pixX = np.zeros((nx, ny), dtype="f8")
+        pixY = np.zeros((nx, ny), dtype="f8")
+        pixelize_off_axis_cartesian(pixX,
                                plot.data['x'], plot.data['y'], plot.data['z'],
                                plot.data['px'], plot.data['py'],
                                plot.data['pdx'], plot.data['pdy'], plot.data['pdz'],
                                plot.data.center, plot.data._inv_mat, indices,
                                plot.data[self.field_x],
-                               int(nx), int(ny),
                                (x0, x1, y0, y1)).transpose()
-        pixY = pixelize_off_axis_cartesian(
+        pixelize_off_axis_cartesian(pixY,
                                plot.data['x'], plot.data['y'], plot.data['z'],
                                plot.data['px'], plot.data['py'],
                                plot.data['pdx'], plot.data['pdy'], plot.data['pdz'],
                                plot.data.center, plot.data._inv_mat, indices,
                                plot.data[self.field_y],
-                               int(nx), int(ny),
                                (x0, x1, y0, y1)).transpose()
         X,Y = np.meshgrid(np.linspace(xx0,xx1,nx,endpoint=True),
                           np.linspace(yy0,yy1,ny,endpoint=True))


https://bitbucket.org/yt_analysis/yt/commits/5e5de6bc4605/
Changeset:   5e5de6bc4605
Branch:      yt
User:        MatthewTurk
Date:        2016-06-20 08:03:26+00:00
Summary:     Removing transpose call
Affected #:  1 file

diff -r 940316ab1ed56d9f2fc01feb34956b648c033ce8 -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -875,14 +875,14 @@
                                plot.data['pdx'], plot.data['pdy'], plot.data['pdz'],
                                plot.data.center, plot.data._inv_mat, indices,
                                plot.data[self.field_x],
-                               (x0, x1, y0, y1)).transpose()
+                               (x0, x1, y0, y1))
         pixelize_off_axis_cartesian(pixY,
                                plot.data['x'], plot.data['y'], plot.data['z'],
                                plot.data['px'], plot.data['py'],
                                plot.data['pdx'], plot.data['pdy'], plot.data['pdz'],
                                plot.data.center, plot.data._inv_mat, indices,
                                plot.data[self.field_y],
-                               (x0, x1, y0, y1)).transpose()
+                               (x0, x1, y0, y1))
         X,Y = np.meshgrid(np.linspace(xx0,xx1,nx,endpoint=True),
                           np.linspace(yy0,yy1,ny,endpoint=True))
 


https://bitbucket.org/yt_analysis/yt/commits/56b867b075cd/
Changeset:   56b867b075cd
Branch:      yt
User:        MatthewTurk
Date:        2016-07-25 21:18:50+00:00
Summary:     Merging with upstream
Affected #:  186 files

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -37,9 +37,11 @@
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c
 yt/utilities/lib/geometry_utils.c
+yt/utilities/lib/image_samplers.c
 yt/utilities/lib/image_utilities.c
 yt/utilities/lib/interpolators.c
 yt/utilities/lib/kdtree.c
+yt/utilities/lib/lenses.c
 yt/utilities/lib/line_integral_convolution.c
 yt/utilities/lib/mesh_construction.cpp
 yt/utilities/lib/mesh_intersection.cpp
@@ -49,6 +51,7 @@
 yt/utilities/lib/mesh_utilities.c
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/particle_mesh_operations.c
+yt/utilities/lib/partitioned_grid.c
 yt/utilities/lib/primitives.c
 yt/utilities/lib/origami.c
 yt/utilities/lib/particle_mesh_operations.c
@@ -62,6 +65,11 @@
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
 yt/utilities/lib/write_array.c
+yt/utilities/lib/perftools_wrap.c
+yt/utilities/lib/partitioned_grid.c
+yt/utilities/lib/volume_container.c
+yt/utilities/lib/lenses.c
+yt/utilities/lib/image_samplers.c
 syntax: glob
 *.pyc
 *.pyd

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5160,4 +5160,38 @@
 954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
 f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3
 079e456c38a87676472a458210077e2be325dc85 last_gplv3
+ca6e536c15a60070e6988fd472dc771a1897e170 yt-2.0
+882c41eed5dd4a3cdcbb567bcb79b833e46b1f42 yt-2.0.1
+a2b3521b1590c25029ca0bc602ad6cb7ae7b8ba2 yt-2.1
+41bd8aacfbc81fa66d7a3f2cd2880f10c3e237a4 yt-2.2
+3836676ee6307f9caf5ccdb0f0dd373676a68535 yt-2.3
+076cec2c57d2e4b508babbfd661f5daa1e34ec80 yt-2.4
+bd285a9a8a643ebb7b47b543e9343da84cd294c5 yt-2.5
+34a5e6774ceb26896c9d767563951d185a720774 yt-2.5.1
+2197c101413723de13e1d0dea153b182342ff719 yt-2.5.2
+59aa6445b5f4a26ecb2449f913c7f2b5fee04bee yt-2.5.3
+4da03e5f00b68c3a52107ff75ce48b09360b30c2 yt-2.5.4
+21c0314cee16242b6685e42a74d16f7a993c9a88 yt-2.5.5
+053487f48672b8fd5c43af992e92bc2f2499f31f yt-2.6
+d43ff9d8e20f2d2b8f31f4189141d2521deb341b yt-2.6.1
+f1e22ef9f3a225f818c43262e6ce9644e05ffa21 yt-2.6.2
+816186f16396a16853810ac9ebcde5057d8d5b1a yt-2.6.3
 f327552a6ede406b82711fb800ebcd5fe692d1cb yt-3.0a4
+73a9f749157260c8949f05c07715305aafa06408 yt-3.0.0
+0cf350f11a551f5a5b4039a70e9ff6d98342d1da yt-3.0.1
+511887af4c995a78fe606e58ce8162c88380ecdc yt-3.0.2
+fd7cdc4836188a3badf81adb477bcc1b9632e485 yt-3.1.0
+28733726b2a751e774c8b7ae46121aa57fd1060f yt-3.2
+425ff6dc64a8eb92354d7e6091653a397c068167 yt-3.2.1
+425ff6dc64a8eb92354d7e6091653a397c068167 yt-3.2.1
+0000000000000000000000000000000000000000 yt-3.2.1
+0000000000000000000000000000000000000000 yt-3.2.1
+f7ca21c7b3fdf25d2ccab139849ae457597cfd5c yt-3.2.1
+a7896583c06585be66de8404d76ad5bc3d2caa9a yt-3.2.2
+80aff0c49f40e04f00d7b39149c7fc297b8ed311 yt-3.2.3
+80aff0c49f40e04f00d7b39149c7fc297b8ed311 yt-3.2.3
+0000000000000000000000000000000000000000 yt-3.2.3
+0000000000000000000000000000000000000000 yt-3.2.3
+83d2c1e9313e7d83eb5b96888451ff2646fd8ff3 yt-3.2.3
+7edbfde96c3d55b227194394f46c0b2e6ed2b961 yt-3.3.0
+9bc3d0e9b750c923d44d73c447df64fc431f5838 yt-3.3.1

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt setupext.py CONTRIBUTING.rst
+include README* CREDITS COPYING.txt CITATION  setupext.py CONTRIBUTING.rst
 include yt/visualization/mapserver/html/map_index.html
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/helper_scripts/generate_doap.py
--- /dev/null
+++ b/doc/helper_scripts/generate_doap.py
@@ -0,0 +1,141 @@
+import os
+import hglib
+import pkg_resources
+from email.utils import parseaddr
+
+templates = {"header": r"""<Project xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#" xmlns="http://usefulinc.com/ns/doap#" xmlns:foaf="http://xmlns.com/foaf/0.1/" xmlns:admin="http://webns.net/mvcb/">
+ <name>The yt Project</name>
+ <shortname>yt</shortname>
+ <shortdesc>Multi-resolution volumetric analysis</shortdesc>
+ <description>yt is a python package for analyzing and visualizing volumetric, multi-resolution data.  Originally developed for astrophysical simulations, it is flexible enough to work with data from other domains such as weather, nuclear engineering, seismology and molecular dynamics.</description>
+ <homepage rdf:resource="http://yt-project.org/" />
+ <download-page rdf:resource="https://pypi.python.org/pypi/yt/" />
+ <download-mirror rdf:resource="http://bitbucket.org/yt_analysis/yt/" />
+ <bug-database rdf:resource="http://bitbucket.org/yt_analysis/yt/issues" />
+ <programming-language>python</programming-language>
+ <programming-language>cython</programming-language>
+ <license rdf:resource="http://usefulinc.com/doap/licenses/bsd" />
+ """,
+ "foaf": r"""<foaf:Person>
+     <foaf:name>%(realname)s</foaf:name>
+  </foaf:Person>
+  """,
+  "release": r"""
+	<release>
+		<Version>
+			<name>%(name)s</name>
+			<created>%(date)s</created>
+			<revision>%(revision)s</revision>
+		</Version>
+	</release>
+   """,
+  "footer": r"""
+ <repository> 
+   <HgRepository>
+     <browse rdf:resource='https://bitbucket.org/yt_analysis/yt/src' />
+     <location rdf:resource='https://bitbucket.org/yt_analysis/yt' />
+   </HgRepository>
+ </repository> 
+</Project>
+"""
+}
+
+known_releases = [
+    ("0.3"  , "2007-12-17"),
+    ("1.0.1", "2008-10-25"),
+    ("1.5"  , "2009-11-04"),
+    ("1.6"  , "2010-01-22"),
+    ("1.6.1", "2010-02-11"),
+    ("1.7"  , "2010-06-27"),
+]
+
+yt_provider = pkg_resources.get_provider("yt")
+yt_path = os.path.dirname(yt_provider.module_path)
+
+name_mappings = {
+        # Sometimes things get filtered out by hgchurn pointing elsewhere.
+        # So we can add them back in manually
+        "andrew.wetzel at yale.edu" : "Andrew Wetzel",
+        "df11c at my.fsu.edu": "Daniel Fenn",
+        "dnarayan at haverford.edu": "Desika Narayanan",
+        "jmtomlinson95 at gmail.com": "Joseph Tomlinson",
+        "kaylea.nelson at yale.edu": "Kaylea Nelson",
+        "tabel at slac.stanford.edu": "Tom Abel",
+        "pshriwise": "Patrick Shriwise",
+        "jnaiman": "Jill Naiman",
+        "gsiisg": "Geoffrey So",
+        "dcollins4096 at gmail.com": "David Collins",
+        "bcrosby": "Brian Crosby",
+        "astrugarek": "Antoine Strugarek",
+        "AJ": "Allyson Julian",
+}
+
+name_ignores = ["convert-repo"]
+
+lastname_sort = lambda a: a.rsplit(None, 1)[-1]
+
+def get_release_tags():
+    c = hglib.open(yt_path)
+    releases = {}
+    for name, rev, node, islocal in c.tags():
+        if name.startswith("yt-"):
+            releases[name] = node
+    rr = []
+    for name, node in sorted(releases.items()):
+        date = c.log(node)[-1][-1]
+        rr.append((date, name[3:]))
+    rr.sort()
+    return [(_[1], _[0].strftime("%Y-%M-%d")) for _ in rr]
+
+def developer_names():
+    cmd = hglib.util.cmdbuilder("churn", "-c")
+    c = hglib.open(yt_path)
+    emails = set([])
+    for dev in c.rawcommand(cmd).split("\n"):
+        if len(dev.strip()) == 0: continue
+        emails.add(dev.rsplit(None, 2)[0])
+    print("Generating real names for {0} emails".format(len(emails)))
+    names = set([])
+    for email in sorted(emails):
+        if email in name_ignores:
+            continue
+        if email in name_mappings:
+            names.add(name_mappings[email])
+            continue
+        cset = c.log(revrange="last(author('%s'))" % email)
+        if len(cset) == 0:
+            print("Error finding {0}".format(email))
+            realname = email
+        else:
+            realname, addr = parseaddr(cset[0][4])
+        if realname == '':
+            realname = email
+        if realname in name_mappings:
+            names.add(name_mappings[realname])
+            continue
+        realname = realname.decode('utf-8')
+        realname = realname.encode('ascii', 'xmlcharrefreplace')
+        names.add(realname)
+    #with open("devs.txt", "w") as f:
+    #    for name in sorted(names, key=lastname_sort):
+    #        f.write("%s\n" % name)
+    devs = list(names)
+    devs.sort(key=lastname_sort)
+    return devs
+
+def generate_doap():
+    dev_names = developer_names()
+    with open("doap.xml", "w") as f:
+        f.write(templates["header"])
+        for dev_name in dev_names:
+            f.write("<developer>\n")
+            f.write(templates["foaf"] % {'realname': dev_name})
+            f.write("</developer>\n")
+        for release in known_releases + get_release_tags():
+            f.write(templates["release"] % {
+                'name': "yt " + release[0], 'revision': release[0], 'date': release[1]}
+            )
+        f.write(templates["footer"])
+
+if __name__ == "__main__":
+    generate_doap()

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -119,6 +119,24 @@
         exit 1
     fi
     DEST_SUFFIX="yt-conda"
+    if [ -n "${PYTHONPATH}" ]
+    then
+        echo "WARNING WARNING WARNING WARNING WARNING WARNING WARNING"
+        echo "*******************************************************"
+        echo
+        echo "The PYTHONPATH environment variable is set to:"
+        echo
+        echo "    $PYTHONPATH"
+        echo
+        echo "If dependencies of yt (numpy, scipy, matplotlib) are installed"
+        echo "to this path, this may cause issues. Exit the install script"
+        echo "with Ctrl-C and unset PYTHONPATH if you are unsure."
+        echo "Hit enter to continue."
+        echo
+        echo "WARNING WARNING WARNING WARNING WARNING WARNING WARNING"
+        echo "*******************************************************"
+        read -p "[hit enter]"
+    fi
 else
     if [ $INST_YT_SOURCE -eq 0 ]
     then
@@ -524,11 +542,11 @@
 echo
 
 printf "%-18s = %s so I " "INST_CONDA" "${INST_CONDA}"
-get_willwont ${INST_PY3}
+get_willwont ${INST_CONDA}
 echo "be installing a conda-based python environment"
 
 printf "%-18s = %s so I " "INST_YT_SOURCE" "${INST_YT_SOURCE}"
-get_willwont ${INST_PY3}
+get_willwont ${INST_YT_SOURCE}
 echo "be compiling yt from source"
 
 printf "%-18s = %s so I " "INST_PY3" "${INST_PY3}"
@@ -744,6 +762,12 @@
     ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
+function test_install
+{
+    echo "Testing that yt can be imported"
+    ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import yt" 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
 ORIG_PWD=`pwd`
 
 if [ -z "${DEST_DIR}" ]
@@ -1238,6 +1262,8 @@
     ( cp ${YT_DIR}/doc/activate.csh ${DEST_DIR}/bin/activate.csh 2>&1 ) 1>> ${LOG_FILE}
     sed -i.bak -e "s,__YT_DIR__,${DEST_DIR}," ${DEST_DIR}/bin/activate.csh
 
+    test_install
+
     function print_afterword
     {
         echo
@@ -1463,7 +1489,7 @@
     if [ $INST_YT_SOURCE -eq 0 ]
     then
         echo "Installing yt"
-        log_cmd conda install --yes yt
+        log_cmd conda install -c conda-forge --yes yt
     else
         echo "Building yt from source"
         YT_DIR="${DEST_DIR}/src/yt-hg"
@@ -1478,10 +1504,12 @@
             ROCKSTAR_LIBRARY_PATH=${DEST_DIR}/lib
         fi
         pushd ${YT_DIR} &> /dev/null
-        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH python setup.py develop 2>&1) 1>> ${LOG_FILE}
+        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH python setup.py develop 2>&1) 1>> ${LOG_FILE} || do_exit
         popd &> /dev/null
     fi
 
+    test_install
+
     echo
     echo
     echo "========================================================================"

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -4,10 +4,9 @@
 =======================
 .. sectionauthor:: Geoffrey So <gso at physics.ucsd.edu>
 
-.. warning:: This is my first attempt at modifying the yt source code,
-   so the program may be bug ridden.  Please send yt-dev an email and
-   address to Geoffrey So if you discover something wrong with this
-   portion of the code.
+.. warning:: This functionality is currently broken and needs to
+   be updated to make use of the :ref:`halo_catalog` framework.
+   Anyone interested in doing so should contact the yt-dev list.
 
 Purpose
 -------

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -3,14 +3,16 @@
 Halo Analysis
 =============
 
-Using halo catalogs, understanding the different halo finding methods,
-and using the halo mass function.
+This section covers halo finding, performing extra analysis on halos,
+and the halo mass function calculator.  If you already have halo
+catalogs and simply want to load them into yt, see
+:ref:`halo-catalog-data`.
 
 .. toctree::
    :maxdepth: 2
 
+   halo_catalogs
+   halo_mass_function
    halo_transition
-   halo_catalogs
-   halo_finders
-   halo_mass_function
    halo_merger_tree
+   ellipsoid_analysis

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -1,28 +1,42 @@
 .. _halo_catalog:
 
-Halo Catalogs
-=============
+Halo Finding and Analysis
+=========================
 
-Creating Halo Catalogs
-----------------------
+In yt-3.x, halo finding and analysis are combined into a single
+framework called the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
+This framework is substantially different from the halo analysis
+machinery available in yt-2.x and is entirely backward incompatible.
+For a direct translation of various halo analysis tasks using yt-2.x
+to yt-3.x, see :ref:`halo-transition`.
 
-In yt 3.0, operations relating to the analysis of halos (halo finding,
-merger tree creation, and individual halo analysis) are all brought
-together into a single framework. This framework is substantially
-different from the halo analysis machinery available in yt-2.x and is
-entirely backward incompatible.
-For a direct translation of various halo analysis tasks using yt-2.x
-to yt-3.0 please see :ref:`halo-transition`.
+.. _halo_catalog_finding:
 
-A catalog of halos can be created from any initial dataset given to halo
-catalog through data_ds. These halos can be found using friends-of-friends,
-HOP, and Rockstar. The finder_method keyword dictates which halo finder to
-use. The available arguments are :ref:`fof`, :ref:`hop`, and :ref:`rockstar`.
-For more details on the relative differences between these halo finders see
-:ref:`halo_finding`.
+Halo Finding
+------------
 
-The class which holds all of the halo information is the
-:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
+If you already have a halo catalog, either produced by one of the methods
+below or in a format described in :ref:`halo-catalog-data`, and want to
+perform further analysis, skip to :ref:`halo_catalog_analysis`.
+
+Three halo finding methods exist within yt.  These are:
+
+* :ref:`fof_finding`: a basic friend-of-friends algorithm (e.g. `Efstathiou et al. (1985)
+  <http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_)
+* :ref:`hop_finding`: `Eisenstein and Hut (1998)
+  <http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_.
+* :ref:`rockstar_finding`: a 6D phase-space halo finder developed by Peter Behroozi that
+  scales well and does substructure finding (`Behroozi et al.
+  2011 <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_)
+
+Halo finding is performed through the creation of a
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+object.  The dataset on which halo finding is to be performed should
+be loaded and given to the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+along with the ``finder_method`` keyword to specify the method to be
+used.
 
 .. code-block:: python
 
@@ -31,28 +45,195 @@
 
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
+   hc.create()
 
-A halo catalog may also be created from already run rockstar outputs.
-This method is not implemented for previously run friends-of-friends or
-HOP finders. Even though rockstar creates one file per processor,
-specifying any one file allows the full catalog to be loaded. Here we
-only specify the file output by the processor with ID 0. Note that the
-argument for supplying a rockstar output is `halos_ds`, not `data_ds`.
+The ``finder_method`` options should be given as "fof", "hop", or
+"rockstar".  Each of these methods has their own set of keyword
+arguments to control functionality.  These can specified in the form
+of a dictinoary using the ``finder_kwargs`` keyword.
 
 .. code-block:: python
 
-   halos_ds = yt.load(path+'rockstar_halos/halos_0.0.bin')
-   hc = HaloCatalog(halos_ds=halos_ds)
+   import yt
+   from yt.analysis_modules.halo_analysis.api import HaloCatalog
 
-Although supplying only the binary output of the rockstar halo finder
-is sufficient for creating a halo catalog, it is not possible to find
-any new information about the identified halos. To associate the halos
-with the dataset from which they were found, supply arguments to both
-halos_ds and data_ds.
+   data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='fof',
+                    finder_kwargs={"ptype": "stars",
+                                   "padding": 0.02})
+   hc.create()
+
+For a full list of keywords for each halo finder, see
+:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`,
+:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`,
+and
+:class:`~yt.analysis_modules.halo_finding.rockstar.rockstar.RockstarHaloFinder`.
+
+.. _fof_finding:
+
+FOF
+^^^
+
+This is a basic friends-of-friends algorithm.  See
+`Efstathiou et al. (1985)
+<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_ for more
+details as well as
+:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`.
+
+.. _hop_finding:
+
+HOP
+^^^
+
+The version of HOP used in yt is an upgraded version of the
+`publicly available HOP code
+<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support
+for 64-bit floats and integers has been added, as well as
+parallel analysis through spatial decomposition. HOP builds
+groups in this fashion:
+
+#. Estimates the local density at each particle using a
+   smoothing kernel.
+
+#. Builds chains of linked particles by 'hopping' from one
+   particle to its densest neighbor. A particle which is
+   its own densest neighbor is the end of the chain.
+
+#. All chains that share the same densest particle are
+   grouped together.
+
+#. Groups are included, linked together, or discarded
+   depending on the user-supplied over density
+   threshold parameter. The default is 160.0.
+
+See the `HOP method paper
+<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for
+full details as well as
+:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`.
+
+.. _rockstar_finding:
+
+Rockstar
+^^^^^^^^
+
+Rockstar uses an adaptive hierarchical refinement of friends-of-friends
+groups in six phase-space dimensions and one time dimension, which
+allows for robust (grid-independent, shape-independent, and noise-
+resilient) tracking of substructure. The code is prepackaged with yt,
+but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead
+developer is Peter Behroozi, and the methods are described in
+`Behroozi et al. 2011 <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_.
+In order to run the Rockstar halo finder in yt, make sure you've
+:ref:`installed it so that it can integrate with yt <rockstar-installation>`.
+
+At the moment, Rockstar does not support multiple particle masses,
+instead using a fixed particle mass. This will not affect most dark matter
+simulations, but does make it less useful for finding halos from the stellar
+mass. In simulations where the highest-resolution particles all have the
+same mass (ie: zoom-in grid based simulations), one can set up a particle
+filter to select the lowest mass particles and perform the halo finding
+only on those.  See the this cookbook recipe for an example:
+:ref:`cookbook-rockstar-nested-grid`.
+
+To run the Rockstar Halo finding, you must launch python with MPI and
+parallelization enabled. While Rockstar itself does not require MPI to run,
+the MPI libraries allow yt to distribute particle information across multiple
+nodes.
+
+.. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
+   connected by an Infiniband network can be problematic. Therefore, for now
+   we recommend forcing the use of the non-Infiniband network (e.g. Ethernet)
+   using this flag: ``--mca btl ^openib``.
+   For example, here is how Rockstar might be called using 24 cores:
+   ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
+
+The script above configures the Halo finder, launches a server process which
+disseminates run information and coordinates writer-reader processes.
+Afterwards, it launches reader and writer tasks, filling the available MPI
+slots, which alternately read particle information and analyze for halo
+content.
+
+The RockstarHaloFinder class has these options that can be supplied to the
+halo catalog through the ``finder_kwargs`` argument:
+
+* ``dm_type``, the index of the dark matter particle. Default is 1.
+* ``outbase``, This is where the out*list files that Rockstar makes should be
+  placed. Default is 'rockstar_halos'.
+* ``num_readers``, the number of reader tasks (which are idle most of the
+  time.) Default is 1.
+* ``num_writers``, the number of writer tasks (which are fed particles and
+  do most of the analysis). Default is MPI_TASKS-num_readers-1.
+  If left undefined, the above options are automatically
+  configured from the number of available MPI tasks.
+* ``force_res``, the resolution that Rockstar uses for various calculations
+  and smoothing lengths. This is in units of Mpc/h.
+  If no value is provided, this parameter is automatically set to
+  the width of the smallest grid element in the simulation from the
+  last data snapshot (i.e. the one where time has evolved the
+  longest) in the time series:
+  ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
+* ``total_particles``, if supplied, this is a pre-calculated
+  total number of dark matter
+  particles present in the simulation. For example, this is useful
+  when analyzing a series of snapshots where the number of dark
+  matter particles should not change and this will save some disk
+  access time. If left unspecified, it will
+  be calculated automatically. Default: ``None``.
+* ``dm_only``, if set to ``True``, it will be assumed that there are
+  only dark matter particles present in the simulation.
+  This option does not modify the halos found by Rockstar, however
+  this option can save disk access time if there are no star particles
+  (or other non-dark matter particles) in the simulation. Default: ``False``.
+
+Rockstar dumps halo information in a series of text (halo*list and
+out*list) and binary (halo*bin) files inside the ``outbase`` directory.
+We use the halo list classes to recover the information.
+
+Inside the ``outbase`` directory there is a text file named ``datasets.txt``
+that records the connection between ds names and the Rockstar file names.
+
+.. _rockstar-installation:
+
+Installing Rockstar
+"""""""""""""""""""
+
+Because of changes in the Rockstar API over time, yt only currently works with
+a slightly older version of Rockstar.  This version of Rockstar has been
+slightly patched and modified to run as a library inside of yt. By default it
+is not installed with yt, but installation is very easy.  The
+:ref:`install-script` used to install yt from source has a line:
+``INST_ROCKSTAR=0`` that must be changed to ``INST_ROCKSTAR=1``.  You can
+rerun this installer script over the top of an existing installation, and
+it will only install components missing from the existing installation.
+You can do this as follows.  Put your freshly modified install_script in
+the parent directory of the yt installation directory (e.g. the parent of
+``$YT_DEST``, ``yt-x86_64``, ``yt-i386``, etc.), and rerun the installer:
+
+.. code-block:: bash
+
+    cd $YT_DEST
+    cd ..
+    vi install_script.sh  // or your favorite editor to change INST_ROCKSTAR=1
+    bash < install_script.sh
+
+This will download Rockstar and install it as a library in yt.
+
+.. _halo_catalog_analysis:
+
+Extra Halo Analysis
+-------------------
+
+As a reminder, all halo catalogs created by the methods outlined in
+:ref:`halo_catalog_finding` as well as those in the formats discussed in
+:ref:`halo-catalog-data` can be loaded in to yt as first-class datasets.
+Once a halo catalog has been created, further analysis can be performed
+by providing both the halo catalog and the original simulation dataset to
+the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
 
 .. code-block:: python
 
-   halos_ds = yt.load(path+'rockstar_halos/halos_0.0.bin')
+   halos_ds = yt.load('rockstar_halos/halos_0.0.bin')
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
 
@@ -60,24 +241,28 @@
 associated with either dataset, to control the spatial region in
 which halo analysis will be performed.
 
-Analysis Using Halo Catalogs
-----------------------------
-
-Analysis is done by adding actions to the
+The :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+allows the user to create a pipeline of analysis actions that will be
+performed on all halos in the existing catalog.  The analysis can be
+performed in parallel with separate processors or groups of processors
+being allocated to perform the entire pipeline on individual halos.
+The pipeline is setup by adding actions to the
 :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
 Each action is represented by a callback function that will be run on
 each halo.  There are four types of actions:
 
-* Filters
-* Quantities
-* Callbacks
-* Recipes
+* :ref:`halo_catalog_filters`
+* :ref:`halo_catalog_quantities`
+* :ref:`halo_catalog_callbacks`
+* :ref:`halo_catalog_recipes`
 
 A list of all available filters, quantities, and callbacks can be found in
 :ref:`halo_analysis_ref`.
 All interaction with this analysis can be performed by importing from
 halo_analysis.
 
+.. _halo_catalog_filters:
+
 Filters
 ^^^^^^^
 
@@ -118,6 +303,8 @@
    # ... Later on in your script
    hc.add_filter("my_filter")
 
+.. _halo_catalog_quantities:
+
 Quantities
 ^^^^^^^^^^
 
@@ -176,6 +363,8 @@
    # ... Anywhere after "my_quantity" has been called
    hc.add_callback("print_quantity")
 
+.. _halo_catalog_callbacks:
+
 Callbacks
 ^^^^^^^^^
 
@@ -214,6 +403,8 @@
    # ...  Later on in your script
    hc.add_callback("my_callback")
 
+.. _halo_catalog_recipes:
+
 Recipes
 ^^^^^^^
 
@@ -258,8 +449,8 @@
 object as the first argument, recipe functions should take a ``HaloCatalog``
 object as the first argument.
 
-Running Analysis
-----------------
+Running the Pipeline
+--------------------
 
 After all callbacks, quantities, and filters have been added, the
 analysis begins with a call to HaloCatalog.create.
@@ -290,7 +481,7 @@
 
 A :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
 saved to disk can be reloaded as a yt dataset with the
-standard call to load. Any side data, such as profiles, can be reloaded
+standard call to ``yt.load``. Any side data, such as profiles, can be reloaded
 with a ``load_profiles`` callback and a call to
 :func:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog.load`.
 
@@ -303,8 +494,8 @@
                    filename="virial_profiles")
    hc.load()
 
-Worked Example of Halo Catalog in Action
-----------------------------------------
+Halo Catalog in Action
+----------------------
 
 For a full example of how to use these methods together see
 :ref:`halo-analysis-example`.

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ /dev/null
@@ -1,231 +0,0 @@
-.. _halo_finding:
-
-Halo Finding
-============
-
-There are three methods of finding particle haloes in yt. The
-default method is called HOP, a method described
-in `Eisenstein and Hut (1998)
-<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_. A basic
-friends-of-friends (e.g. `Efstathiou et al. (1985)
-<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_) halo
-finder is also implemented. Finally Rockstar (`Behroozi et a.
-(2011) <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_) is
-a 6D-phase space halo finder developed by Peter Behroozi that
-excels in finding subhalos and substrcture, but does not allow
-multiple particle masses.
-
-.. _hop:
-
-HOP
----
-
-The version of HOP used in yt is an upgraded version of the
-`publicly available HOP code
-<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support
-for 64-bit floats and integers has been added, as well as
-parallel analysis through spatial decomposition. HOP builds
-groups in this fashion:
-
-#. Estimates the local density at each particle using a
-   smoothing kernel.
-
-#. Builds chains of linked particles by 'hopping' from one
-   particle to its densest neighbor. A particle which is
-   its own densest neighbor is the end of the chain.
-
-#. All chains that share the same densest particle are
-   grouped together.
-
-#. Groups are included, linked together, or discarded
-   depending on the user-supplied over density
-   threshold parameter. The default is 160.0.
-
-Please see the `HOP method paper 
-<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for 
-full details and the 
-:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`
-documentation.
-
-.. _fof:
-
-FOF
----
-
-A basic friends-of-friends halo finder is included.  See the
-:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`
-documentation.
-
-.. _rockstar:
-
-Rockstar Halo Finding
----------------------
-
-Rockstar uses an adaptive hierarchical refinement of friends-of-friends
-groups in six phase-space dimensions and one time dimension, which
-allows for robust (grid-independent, shape-independent, and noise-
-resilient) tracking of substructure. The code is prepackaged with yt,
-but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead
-developer is Peter Behroozi, and the methods are described in `Behroozi
-et al. 2011 <http://arxiv.org/abs/1110.4372>`_.
-In order to run the Rockstar halo finder in yt, make sure you've
-:ref:`installed it so that it can integrate with yt <rockstar-installation>`.
-
-At the moment, Rockstar does not support multiple particle masses,
-instead using a fixed particle mass. This will not affect most dark matter
-simulations, but does make it less useful for finding halos from the stellar
-mass. In simulations where the highest-resolution particles all have the
-same mass (ie: zoom-in grid based simulations), one can set up a particle
-filter to select the lowest mass particles and perform the halo finding
-only on those.  See the this cookbook recipe for an example:
-:ref:`cookbook-rockstar-nested-grid`.
-
-To run the Rockstar Halo finding, you must launch python with MPI and
-parallelization enabled. While Rockstar itself does not require MPI to run,
-the MPI libraries allow yt to distribute particle information across multiple
-nodes.
-
-.. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
-   connected by an Infiniband network can be problematic. Therefore, for now
-   we recommend forcing the use of the non-Infiniband network (e.g. Ethernet)
-   using this flag: ``--mca btl ^openib``.
-   For example, here is how Rockstar might be called using 24 cores:
-   ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
-
-The script above configures the Halo finder, launches a server process which
-disseminates run information and coordinates writer-reader processes.
-Afterwards, it launches reader and writer tasks, filling the available MPI
-slots, which alternately read particle information and analyze for halo
-content.
-
-The RockstarHaloFinder class has these options that can be supplied to the
-halo catalog through the ``finder_kwargs`` argument:
-
-* ``dm_type``, the index of the dark matter particle. Default is 1.
-* ``outbase``, This is where the out*list files that Rockstar makes should be
-  placed. Default is 'rockstar_halos'.
-* ``num_readers``, the number of reader tasks (which are idle most of the
-  time.) Default is 1.
-* ``num_writers``, the number of writer tasks (which are fed particles and
-  do most of the analysis). Default is MPI_TASKS-num_readers-1.
-  If left undefined, the above options are automatically
-  configured from the number of available MPI tasks.
-* ``force_res``, the resolution that Rockstar uses for various calculations
-  and smoothing lengths. This is in units of Mpc/h.
-  If no value is provided, this parameter is automatically set to
-  the width of the smallest grid element in the simulation from the
-  last data snapshot (i.e. the one where time has evolved the
-  longest) in the time series:
-  ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
-* ``total_particles``, if supplied, this is a pre-calculated
-  total number of dark matter
-  particles present in the simulation. For example, this is useful
-  when analyzing a series of snapshots where the number of dark
-  matter particles should not change and this will save some disk
-  access time. If left unspecified, it will
-  be calculated automatically. Default: ``None``.
-* ``dm_only``, if set to ``True``, it will be assumed that there are
-  only dark matter particles present in the simulation.
-  This option does not modify the halos found by Rockstar, however
-  this option can save disk access time if there are no star particles
-  (or other non-dark matter particles) in the simulation. Default: ``False``.
-
-Rockstar dumps halo information in a series of text (halo*list and
-out*list) and binary (halo*bin) files inside the ``outbase`` directory.
-We use the halo list classes to recover the information.
-
-Inside the ``outbase`` directory there is a text file named ``datasets.txt``
-that records the connection between ds names and the Rockstar file names.
-
-For more information, see the
-:class:`~yt.analysis_modules.halo_finding.halo_objects.RockstarHalo` and
-:class:`~yt.analysis_modules.halo_finding.halo_objects.Halo` classes.
-
-.. _parallel-hop-and-fof:
-
-Parallel HOP and FOF
---------------------
-
-Both the HOP and FoF halo finders can run in parallel using simple
-spatial decomposition. In order to run them in parallel it is helpful
-to understand how it works. Below in the first plot (i) is a simplified
-depiction of three haloes labeled 1,2 and 3:
-
-.. image:: _images/ParallelHaloFinder.png
-   :width: 500
-
-Halo 3 is twice reflected around the periodic boundary conditions.
-
-In (ii), the volume has been sub-divided into four equal subregions,
-A,B,C and D, shown with dotted lines. Notice that halo 2 is now in
-two different subregions, C and D, and that halo 3 is now in three,
-A, B and D. If the halo finder is run on these four separate subregions,
-halo 1 is be identified as a single halo, but haloes 2 and 3 are split
-up into multiple haloes, which is incorrect. The solution is to give
-each subregion padding to oversample into neighboring regions.
-
-In (iii), subregion C has oversampled into the other three regions,
-with the periodic boundary conditions taken into account, shown by
-dot-dashed lines. The other subregions oversample in a similar way.
-
-The halo finder is then run on each padded subregion independently
-and simultaneously. By oversampling like this, haloes 2 and 3 will
-both be enclosed fully in at least one subregion and identified
-completely.
-
-Haloes identified with centers of mass inside the padded part of a
-subregion are thrown out, eliminating the problem of halo duplication.
-The centers for the three haloes are shown with stars. Halo 1 will
-belong to subregion A, 2 to C and 3 to B.
-
-To run with parallel halo finding, you must supply a value for
-padding in the finder_kwargs argument. The ``padding`` parameter
-is in simulation units and defaults to 0.02. This parameter is how
-much padding is added to each of the six sides of a subregion.
-This value should be 2x-3x larger than the largest expected halo
-in the simulation. It is unlikely, of course, that the largest
-object in the simulation will be on a subregion boundary, but there
-is no way of knowing before the halo finder is run.
-
-.. code-block:: python
-
-  import yt
-  from yt.analysis_modules.halo_analysis.api import *
-  ds = yt.load("data0001")
-
-  hc = HaloCatalog(data_ds = ds, finder_method = 'hop', finder_kwargs={'padding':0.02})
-  # --or--
-  hc = HaloCatalog(data_ds = ds, finder_method = 'fof', finder_kwargs={'padding':0.02})
-
-In general, a little bit of padding goes a long way, and too much
-just slows down the analysis and doesn't improve the answer (but
-doesn't change it).  It may be worth your time to run the parallel
-halo finder at a few paddings to find the right amount, especially
-if you're analyzing many similar datasets.
-
-.. _rockstar-installation:
-
-Rockstar Installation
----------------------
-
-Because of changes in the Rockstar API over time, yt only currently works with
-a slightly older version of Rockstar.  This version of Rockstar has been
-slightly patched and modified to run as a library inside of yt. By default it
-is not installed with yt, but installation is very easy.  The
-:ref:`install-script` used to install yt from source has a line:
-``INST_ROCKSTAR=0`` that must be changed to ``INST_ROCKSTAR=1``.  You can
-rerun this installer script over the top of an existing installation, and
-it will only install components missing from the existing installation.
-You can do this as follows.  Put your freshly modified install_script in
-the parent directory of the yt installation directory (e.g. the parent of
-``$YT_DEST``, ``yt-x86_64``, ``yt-i386``, etc.), and rerun the installer:
-
-.. code-block:: bash
-
-    cd $YT_DEST
-    cd ..
-    vi install_script.sh  // or your favorite editor to change INST_ROCKSTAR=1
-    bash < install_script.sh
-
-This will download Rockstar and install it as a library in yt.  You should now
-be able to use Rockstar and yt together.

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/analyzing/analysis_modules/halo_transition.rst
--- a/doc/source/analyzing/analysis_modules/halo_transition.rst
+++ b/doc/source/analyzing/analysis_modules/halo_transition.rst
@@ -1,11 +1,12 @@
 .. _halo-transition:
 
-Getting up to Speed with Halo Analysis in yt-3.0
-================================================
+Transitioning From yt-2 to yt-3
+===============================
 
 If you're used to halo analysis in yt-2.x, heres a guide to
 how to update your analysis pipeline to take advantage of
-the new halo catalog infrastructure.
+the new halo catalog infrastructure.  If you're starting
+from scratch, see :ref:`halo_catalog`.
 
 Finding Halos
 -------------

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/analyzing/analysis_modules/index.rst
--- a/doc/source/analyzing/analysis_modules/index.rst
+++ b/doc/source/analyzing/analysis_modules/index.rst
@@ -19,4 +19,3 @@
    two_point_functions
    clump_finding
    particle_trajectories
-   ellipsoid_analysis

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -99,9 +99,9 @@
    To work out the following examples, you should install
    `AtomDB <http://www.atomdb.org>`_ and get the files from the
    `xray_data <http://yt-project.org/data/xray_data.tar.gz>`_ auxiliary
-   data package (see the ``xray_data`` `README <xray_data_README.html>`_
-   for details on the latter). Make sure that in what follows you
-   specify the full path to the locations of these files.
+   data package (see the :ref:`xray_data_README` for details on the latter). 
+   Make sure that in what follows you specify the full path to the locations 
+   of these files.
 
 To generate photons from this dataset, we have several different things
 we need to set up. The first is a standard yt data object. It could

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/analyzing/analysis_modules/radial_column_density.rst
--- a/doc/source/analyzing/analysis_modules/radial_column_density.rst
+++ /dev/null
@@ -1,93 +0,0 @@
-.. _radial-column-density:
-
-Radial Column Density
-=====================
-.. sectionauthor:: Stephen Skory <s at skory.us>
-.. versionadded:: 2.3
-
-.. note::
-
-    As of :code:`yt-3.0`, the radial column density analysis module is not
-    currently functional.  This functionality is still available in
-    :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`,
-    help is needed to port them over.  Contact the yt-users mailing list if you
-    are interested in doing this.
-
-This module allows the calculation of column densities around a point over a
-field such as ``NumberDensity`` or ``Density``.
-This uses :ref:`healpix_volume_rendering` to interpolate column densities
-on the grid cells.
-
-Details
--------
-
-This module allows the calculation of column densities around a single point.
-For example, this is useful for looking at the gas around a radiating source.
-Briefly summarized, the calculation is performed by first creating a number
-of HEALPix shells around the central point.
-Next, the value of the column density at cell centers is found by
-linearly interpolating the values on the inner and outer shell.
-This is added as derived field, which can be used like any other derived field.
-
-Basic Example
--------------
-
-In this simple example below, the radial column density for the field
-``NumberDensity`` is calculated and added as a derived field named
-``RCDNumberDensity``.
-The calculations will use the starting point of (x, y, z) = (0.5, 0.5, 0.5) and
-go out to a maximum radius of 0.5 in code units.
-Due to the way normalization is handled in HEALPix, the column density
-calculation can extend out only as far as the nearest face of the volume.
-For example, with a center point of (0.2, 0.3, 0.4), the column density
-is calculated out to only a radius of 0.2.
-The column density will be output as zero (0.0) outside the maximum radius.
-Just like a real number column density, when the derived is added using
-``add_field``, we give the units as :math:`1/\rm{cm}^2`.
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.radial_column_density.api import *
-  ds = load("data0030")
-
-  rcdnumdens = RadialColumnDensity(ds, 'NumberDensity', [0.5, 0.5, 0.5],
-    max_radius = 0.5)
-  def _RCDNumberDensity(field, data, rcd = rcdnumdens):
-      return rcd._build_derived_field(data)
-  add_field('RCDNumberDensity', _RCDNumberDensity, units=r'1/\rm{cm}^2')
-
-  dd = ds.all_data()
-  print(dd['RCDNumberDensity'])
-
-The field ``RCDNumberDensity`` can be used just like any other derived field
-in yt.
-
-Additional Parameters
----------------------
-
-Each of these parameters is added to the call to ``RadialColumnDensity()``,
-just like ``max_radius`` is used above.
-
-  * ``steps`` : integer - Because this implementation uses linear
-    interpolation to calculate the column
-    density at each cell, the accuracy of the solution goes up as the number of
-    HEALPix surfaces is increased.
-    The ``steps`` parameter controls the number of HEALPix surfaces, and a larger
-    number is more accurate, but slower. Default = 10.
-
-  * ``base`` : string - This controls where the surfaces are placed, with
-    linear "lin" or logarithmic "log" spacing. The inner-most
-    surface is always set to the size of the smallest cell.
-    Default = "lin".
-
-  * ``Nside`` : int
-    The resolution of column density calculation as performed by
-    HEALPix. Higher numbers mean higher quality. Max = 8192.
-    Default = 32.
-
-  * ``ang_divs`` : imaginary integer
-    This number controls the gridding of the HEALPix projection onto
-    the spherical surfaces. Higher numbers mean higher quality.
-    Default = 800j.
-

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/analyzing/analysis_modules/xray_data_README.rst
--- a/doc/source/analyzing/analysis_modules/xray_data_README.rst
+++ b/doc/source/analyzing/analysis_modules/xray_data_README.rst
@@ -1,3 +1,5 @@
+.. _xray_data_README:
+
 Auxiliary Data Files for use with yt's Photon Simulator
 =======================================================
 

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -131,6 +131,16 @@
 
    ds.r[:,-180:0,:]
 
+If you specify a single slice, it will be repeated along all three dimensions.
+For instance, this will give all data:::
+
+   ds.r[:]
+
+And this will select a box running from 0.4 to 0.6 along all three
+dimensions:::
+
+   ds.r[0.4:0.6]
+
 Selecting Fixed Resolution Regions
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -21,7 +21,7 @@
 * Derived Quantities (total mass, angular momentum, etc) (:ref:`creating_derived_quantities`,
   :ref:`derived-quantities`)
 * 1-, 2-, and 3-D profiles (:ref:`generating-profiles-and-histograms`)
-* Halo finding (:ref:`halo_finding`)
+* Halo analysis (:ref:`halo-analysis`)
 * Volume rendering (:ref:`volume_rendering`)
 * Isocontours & flux calculations (:ref:`extracting-isocontour-information`)
 
@@ -194,7 +194,7 @@
 
 The following operations use spatial decomposition:
 
-* :ref:`halo_finding`
+* :ref:`halo-analysis`
 * :ref:`volume_rendering`
 
 Grid Decomposition
@@ -501,7 +501,7 @@
 subtle art in estimating the amount of memory needed for halo finding, but a
 rule of thumb is that the HOP halo finder is the most memory intensive
 (:func:`HaloFinder`), and Friends of Friends (:func:`FOFHaloFinder`) being the
-most memory-conservative. For more information, see :ref:`halo_finding`.
+most memory-conservative. For more information, see :ref:`halo-analysis`.
 
 **Volume Rendering**
 

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/analyzing/saving_data.rst
--- a/doc/source/analyzing/saving_data.rst
+++ b/doc/source/analyzing/saving_data.rst
@@ -1,4 +1,4 @@
-.. _saving_data
+.. _saving_data:
 
 Saving Reloadable Data
 ======================

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -31,7 +31,7 @@
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
               'sphinx.ext.pngmath', 'sphinx.ext.viewcode',
-              'numpydoc', 'yt_cookbook', 'yt_colormaps']
+              'sphinx.ext.napoleon', 'yt_cookbook', 'yt_colormaps']
 
 if not on_rtd:
     extensions.append('sphinx.ext.autosummary')
@@ -67,9 +67,9 @@
 # built documents.
 #
 # The short X.Y version.
-version = '3.3-dev'
+version = '3.4-dev'
 # The full version, including alpha/beta/rc tags.
-release = '3.3-dev'
+release = '3.4-dev'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -38,7 +38,7 @@
 # again.
 
 render_source.set_volume(kd_low_res)
-render_source.set_fields('density')
+render_source.set_field('density')
 sc.render()
 sc.save("v1.png", sigma_clip=6.0)
 

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -56,6 +56,16 @@
 
 .. yt_cookbook:: simulation_analysis.py
 
+Smoothed Fields
+~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to create a smoothed field,
+corresponding to a user-created derived field, using the
+:meth:`~yt.fields.particle_fields.add_volume_weighted_smoothed_field` method.
+See :ref:`gadget-notebook` for how to work with Gadget data.
+
+.. yt_cookbook:: smoothed_field.py
+
 
 .. _cookbook-time-series-analysis:
 
@@ -93,16 +103,6 @@
 
 .. yt_cookbook:: hse_field.py
 
-Smoothed Fields
-~~~~~~~~~~~~~~~
-
-This recipe demonstrates how to create a smoothed field,
-corresponding to a user-created derived field, using the
-:meth:`~yt.fields.particle_fields.add_volume_weighted_smoothed_field` method.
-See :ref:`gadget-notebook` for how to work with Gadget data.
-
-.. yt_cookbook:: smoothed_field.py
-
 Using Particle Filters to Calculate Star Formation Rates
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/cookbook/colormaps.py
--- a/doc/source/cookbook/colormaps.py
+++ b/doc/source/cookbook/colormaps.py
@@ -7,11 +7,11 @@
 p = yt.ProjectionPlot(ds, "z", "density", width=(100, 'kpc'))
 p.save()
 
-# Change the colormap to 'jet' and save again.  We must specify
+# Change the colormap to 'dusk' and save again.  We must specify
 # a different filename here or it will save it over the top of
 # our first projection.
-p.set_cmap(field="density", cmap='jet')
-p.save('proj_with_jet_cmap.png')
+p.set_cmap(field="density", cmap='dusk')
+p.save('proj_with_dusk_cmap.png')
 
 # Change the colormap to 'hot' and save again.
 p.set_cmap(field="density", cmap='hot')

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -303,6 +303,26 @@
 
 .. yt_cookbook:: vol-annotated.py
 
+.. _cookbook-vol-points:
+
+Volume Rendering with Points
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to make a volume rendering composited with point
+sources. This could represent star or dark matter particles, for example.
+
+.. yt_cookbook:: vol-points.py
+
+.. _cookbook-vol-lines:
+
+Volume Rendering with Lines
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to make a volume rendering composited with line
+sources.
+
+.. yt_cookbook:: vol-lines.py
+
 .. _cookbook-opengl_vr:
 
 Advanced Interactive Data Visualization

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -65,10 +65,13 @@
 
 .. yt_cookbook:: light_ray.py
 
+.. _cookbook-single-dataset-light-ray:
+
+Single Dataset Light Ray
+~~~~~~~~~~~~~~~~~~~~~~~~
+
 This script demonstrates how to make a light ray from a single dataset.
 
-.. _cookbook-single-dataset-light-ray:
-
 .. yt_cookbook:: single_dataset_light_ray.py
 
 Creating and Fitting Absorption Spectra

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -1,22 +1,20 @@
 import yt
-import numpy as np
-from yt.visualization.volume_rendering.api import BoxSource, CoordinateVectorSource
 
 # Load the dataset.
 ds = yt.load("Enzo_64/DD0043/data0043")
-sc = yt.create_scene(ds, ('gas','density'))
-sc.get_source(0).transfer_function.grey_opacity=True
+sc = yt.create_scene(ds, ('gas', 'density'))
 
-sc.annotate_domain(ds)
-sc.render()
-sc.save("%s_vr_domain.png" % ds)
+# You may need to adjust the alpha values to get a rendering with good contrast
+# For annotate_domain, the fourth color value is alpha.
 
-sc.annotate_grids(ds)
-sc.render()
-sc.save("%s_vr_grids.png" % ds)
+# Draw the domain boundary
+sc.annotate_domain(ds, color=[1, 1, 1, 0.01])
+sc.save("%s_vr_domain.png" % ds, sigma_clip=4)
 
-# Here we can draw the coordinate vectors on top of the image by processing
-# it through the camera. Then save it out.
-sc.annotate_axes()
-sc.render()
-sc.save("%s_vr_coords.png" % ds)
+# Draw the grid boundaries
+sc.annotate_grids(ds, alpha=0.01)
+sc.save("%s_vr_grids.png" % ds, sigma_clip=4)
+
+# Draw a coordinate axes triad
+sc.annotate_axes(alpha=0.01)
+sc.save("%s_vr_coords.png" % ds, sigma_clip=4)

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/cookbook/single_dataset_light_ray.py
--- a/doc/source/cookbook/single_dataset_light_ray.py
+++ b/doc/source/cookbook/single_dataset_light_ray.py
@@ -8,9 +8,12 @@
 
 # With a single dataset, a start_position and
 # end_position or trajectory must be given.
-# Trajectory should be given as (r, theta, phi)
-lr.make_light_ray(start_position=[0., 0., 0.],
-                  end_position=[1., 1., 1.],
+# These positions can be defined as xyz coordinates,
+# but here we just use the two opposite corners of the 
+# simulation box.  Alternatively, trajectory should 
+# be given as (r, theta, phi)
+lr.make_light_ray(start_position=ds.domain_left_edge,
+                  end_position=ds.domain_right_edge,
                   solution_filename='lightraysolution.txt',
                   data_filename='lightray.h5',
                   fields=['temperature', 'density'])

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/cookbook/vol-annotated.py
--- a/doc/source/cookbook/vol-annotated.py
+++ b/doc/source/cookbook/vol-annotated.py
@@ -1,75 +1,29 @@
-#!/usr/bin/env python
+import yt
 
-import numpy as np
-import pylab
+ds = yt.load('Enzo_64/DD0043/data0043')
 
-import yt
-import yt.visualization.volume_rendering.old_camera as vr
+sc = yt.create_scene(ds, lens_type='perspective')
 
-ds = yt.load("maestro_subCh_plt00248")
+source = sc[0]
 
-dd = ds.all_data()
+source.set_field('density')
+source.set_log(True)
 
-# field in the dataset we will visualize
-field = ('boxlib', 'radial_velocity')
+# Set up the camera parameters: focus, width, resolution, and image orientation
+sc.camera.focus = ds.domain_center
+sc.camera.resolution = 1024
+sc.camera.north_vector = [0, 0, 1]
+sc.camera.position = [1.7, 1.7, 1.7]
 
-# the values we wish to highlight in the rendering.  We'll put a Gaussian
-# centered on these with width sigma
-vals = [-1.e7, -5.e6, -2.5e6, 2.5e6, 5.e6, 1.e7]
-sigma = 2.e5
+# You may need to adjust the alpha values to get an image with good contrast.
+# For the annotate_domain call, the fourth value in the color tuple is the
+# alpha value.
+sc.annotate_axes(alpha=.02)
+sc.annotate_domain(ds, color=[1, 1, 1, .01])
 
-mi, ma = min(vals), max(vals)
+text_string = "T = {} Gyr".format(float(ds.current_time.to('Gyr')))
 
-# Instantiate the ColorTransferfunction.
-tf =  yt.ColorTransferFunction((mi, ma))
-
-for v in vals:
-    tf.sample_colormap(v, sigma**2, colormap="coolwarm")
-
-
-# volume rendering requires periodic boundaries.  This dataset has
-# solid walls.  We need to hack it for now (this will be fixed in
-# a later yt)
-ds.periodicity = (True, True, True)
-
-
-# Set up the camera parameters: center, looking direction, width, resolution
-c = np.array([0.0, 0.0, 0.0])
-L = np.array([1.0, 1.0, 1.2])
-W = 1.5*ds.domain_width
-N = 720
-
-# +z is "up" for our dataset
-north=[0.0,0.0,1.0]
-
-# Create a camera object
-cam = vr.Camera(c, L, W, N, transfer_function=tf, ds=ds,
-                no_ghost=False, north_vector=north,
-                fields = [field], log_fields = [False])
-
-im = cam.snapshot()
-
-# add an axes triad
-cam.draw_coordinate_vectors(im)
-
-# add the domain box to the image
-nim = cam.draw_domain(im)
-
-# increase the contrast -- for some reason, the enhance default
-# to save_annotated doesn't do the trick
-max_val = im[:,:,:3].std() * 4.0
-nim[:,:,:3] /= max_val
-
-# we want to write the simulation time on the figure, so create a
-# figure and annotate it
-f = pylab.figure()
-
-pylab.text(0.2, 0.85, "{:.3g} s".format(float(ds.current_time.d)),
-           transform=f.transFigure, color="white")
-
-# tell the camera to use our figure
-cam._render_figure = f
-
-# save annotated -- this added the transfer function values,
-# and the clear_fig=False ensures it writes onto our existing figure.
-cam.save_annotated("vol_annotated.png", nim, dpi=145, clear_fig=False)
+# save an annotated version of the volume rendering including a representation
+# of the transfer function and a nice label showing the simulation time.
+sc.save_annotated("vol_annotated.png", sigma_clip=6,
+                  text_annotate=[[(.1, 1.05), text_string]])

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/cookbook/vol-lines.py
--- /dev/null
+++ b/doc/source/cookbook/vol-lines.py
@@ -0,0 +1,22 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import LineSource
+from yt.units import kpc
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
+
+nlines = 50
+vertices = (np.random.random([nlines, 2, 3]) - 0.5) * 200 * kpc
+colors = np.random.random([nlines, 4])
+colors[:, 3] = 0.1
+
+lines = LineSource(vertices, colors)
+sc.add_source(lines)
+
+sc.camera.width = 300*kpc
+
+sc.save(sigma_clip=4.0)

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/cookbook/vol-points.py
--- /dev/null
+++ b/doc/source/cookbook/vol-points.py
@@ -0,0 +1,29 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import PointSource
+from yt.units import kpc
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
+
+npoints = 1000
+
+# Random particle positions
+vertices = np.random.random([npoints, 3])*200*kpc
+
+# Random colors
+colors = np.random.random([npoints, 4])
+
+# Set alpha value to something that produces a good contrast with the volume
+# rendering
+colors[:, 3] = 0.1
+
+points = PointSource(vertices, colors=colors)
+sc.add_source(points)
+
+sc.camera.width = 300*kpc
+
+sc.save(sigma_clip=5)

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/developing/building_the_docs.rst
--- a/doc/source/developing/building_the_docs.rst
+++ b/doc/source/developing/building_the_docs.rst
@@ -176,6 +176,7 @@
 .. _Sphinx: http://sphinx-doc.org/
 .. _pandoc: http://johnmacfarlane.net/pandoc/
 .. _ffmpeg: http://www.ffmpeg.org/
+.. _IPython: https://ipython.org/
 
 You will also need the full yt suite of `yt test data
 <http://yt-project.org/data/>`_, including the larger datasets that are not used

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/developing/extensions.rst
--- /dev/null
+++ b/doc/source/developing/extensions.rst
@@ -0,0 +1,54 @@
+.. _extensions:
+
+Extension Packages
+==================
+
+.. note:: For some additional discussion, see `YTEP-0029
+          <http://ytep.readthedocs.io/en/latest/YTEPs/YTEP-0029.html>`_, where
+          this plan was designed.
+
+As of version 3.3 of yt, we have put into place new methods for easing the
+process of developing "extensions" to yt.  Extensions might be analysis
+packages, visualization tools, or other software projects that use yt as a base
+engine but that are versioned, developed and distributed separately.  This
+brings with it the advantage of retaining control over the versioning,
+contribution guidelines, scope, etc, while also providing a mechanism for
+disseminating information about it, and potentially a method of interacting
+with other extensions.
+
+We have created a few pieces of infrastructure for developing extensions,
+making them discoverable, and distributing them to collaborators.
+
+If you have a module you would like to retain some external control over, or
+that you don't feel would fit into yt, we encourage you to build it as an
+extension module and distribute and version it independently.
+
+Hooks for Extensions
+--------------------
+
+Starting with version 3.3 of yt, any package named with the prefix ``yt_`` is
+importable from the namespace ``yt.extensions``.  For instance, the
+``yt_interaction`` package ( https://bitbucket.org/data-exp-lab/yt_interaction
+) is importable as ``yt.extensions.interaction``.
+
+In subsequent versions, we plan to include in yt a catalog of known extensions
+and where to find them; this will put discoverability directly into the code
+base.
+
+Extension Template
+------------------
+
+A template for starting an extension module (or converting an existing set of
+code to an extension module) can be found at
+https://bitbucket.org/yt_analysis/yt_extension_template .
+
+To get started, download a zipfile of the template (
+https://bitbucket.org/yt_analysis/yt_extension_template/get/tip.zip ) and
+follow the directions in ``README.md`` to modify the metadata.
+
+Distributing Extensions
+-----------------------
+
+We encourage you to version on your choice of hosting platform (Bitbucket,
+GitHub, etc), and to distribute your extension widely.  We are presently
+working on deploying a method for listing extension modules on the yt webpage.

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/developing/index.rst
--- a/doc/source/developing/index.rst
+++ b/doc/source/developing/index.rst
@@ -19,6 +19,7 @@
    developing
    building_the_docs
    testing
+   extensions
    debugdrive
    releasing
    creating_datatypes

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -103,7 +103,7 @@
    accept no arguments. The test function should do some work that tests some
    functionality and should also verify that the results are correct using
    assert statements or functions.  
-# Tests can ``yield`` a tuple of the form ``function``, ``argument_one``,
+#. Tests can ``yield`` a tuple of the form ``function``, ``argument_one``,
    ``argument_two``, etc.  For example ``yield assert_equal, 1.0, 1.0`` would be
    captured by nose as a test that asserts that 1.0 is equal to 1.0.
 #. Use ``fake_random_ds`` to test on datasets, and be sure to test for
@@ -487,7 +487,7 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 Before any code is added to or modified in the yt codebase, each incoming
 changeset is run against all available unit and answer tests on our `continuous
-integration server <http://tests.yt-project.org>`_. While unit tests are
+integration server <https://tests.yt-project.org>`_. While unit tests are
 autodiscovered by `nose <http://nose.readthedocs.org/en/latest/>`_ itself,
 answer tests require definition of which set of tests constitute to a given
 answer. Configuration for the integration server is stored in

diff -r 5e5de6bc460555e1cd34d61568eda45ea3d8615a -r 56b867b075cdb30433a6de0684eacc619f337895 doc/source/examining/Loading_Generic_Array_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Array_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Array_Data.ipynb
@@ -41,7 +41,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "import yt\n",
@@ -58,7 +60,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "arr = np.random.random(size=(64,64,64))"
@@ -74,7 +78,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "data = dict(density = (arr, \"g/cm**3\"))\n",
@@ -118,7 +124,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -140,7 +148,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "posx_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
@@ -167,7 +177,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -193,7 +205,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "import h5py\n",
@@ -213,7 +227,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "print (f.keys())"
@@ -229,7 +245,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "units = [\"gauss\",\"gauss\",\"gauss\", \"g/cm**3\", \"erg/cm**3\", \"K\", \n",
@@ -246,7 +264,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "data = {k:(v.value,u) for (k,v), u in zip(f.items(),units)}\n",
@@ -256,7 +276,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "ds = yt.load_uniform_grid(data, data[\"Density\"][0].shape, length_unit=250.*cm_per_kpc, bbox=bbox, nprocs=8, \n",
@@ -273,7 +295,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "prj = yt.ProjectionPlot(ds, \"z\", [\"z-velocity\",\"Temperature\",\"Bx\"], weight_field=\"Density\")\n",
@@ -299,7 +323,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "#Find the min and max of the field\n",
@@ -313,29 +339,15 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Create a Transfer Function that goes from the minimum to the maximum of the data:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "tf = yt.ColorTransferFunction((mi, ma), grey_opacity=False)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
     "Define the properties and size of the `camera` viewport:"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "# Choose a vector representing the viewing direction.\n",
@@ -358,24 +370,41 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
-    "cam = ds.camera(c, L, W, Npixels, tf, fields=['Temperature'],\n",
-    "                north_vector=[0,0,1], steady_north=True, \n",
-    "                sub_samples=5, log_fields=[False])\n",
+    "sc = yt.create_scene(ds, 'Temperature')\n",
+    "dd = ds.all_data()\n",
     "\n",
-    "cam.transfer_function.map_to_colormap(mi,ma, \n",
-    "                                      scale=15.0, colormap='algae')"
+    "source = sc[0]\n",
+    "\n",
+    "source.log_field = False\n",
+    "\n",
+    "tf = yt.ColorTransferFunction((mi, ma), grey_opacity=False)\n",
+    "tf.map_to_colormap(mi, ma, scale=15.0, colormap='algae')\n",
+    "\n",
+    "source.set_transfer_function(tf)\n",
+    "\n",
+    "sc.add_source(source)\n",
+    "\n",
+    "cam = sc.add_camera()\n",
+    "cam.width = W\n",
+    "cam.center = c\n",
+    "cam.normal_vector = L\n",
+    "cam.north_vector = [0, 0, 1]"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
-    "cam.show()"
+    "sc.show(sigma_clip=4)"
    ]
   },
   {
@@ -395,7 +424,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "import astropy.io.fits as pyfits\n",
@@ -412,7 +443,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "f = pyfits.open(data_dir+\"/UnigridData/velocity_field_20.fits\")\n",
@@ -429,7 +462,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "data = {}\n",
@@ -449,7 +484,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "data[\"velocity_x\"] = data.pop(\"x-velocity\")\n",
@@ -467,7 +504,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "ds = yt.load_uniform_grid(data, data[\"velocity_x\"][0].shape, length_unit=(1.0,\"Mpc\"))\n",
@@ -495,7 +534,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "grid_data = [\n",
@@ -520,7 +561,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "for g in grid_data: \n",
@@ -538,7 +581,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "grid_data[0][\"number_of_particles\"] = 0 # Set no particles in the top-level grid\n",
@@ -561,7 +606,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "ds = yt.load_amr_grids(grid_data, [32, 32, 32])"
@@ -577,7 +624,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -613,7 +662,7 @@
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 3.0
+    "version": 3
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
@@ -625,4 +674,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
\ No newline at end of file
+}

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/b5111ca0993f/
Changeset:   b5111ca0993f
Branch:      yt
User:        MatthewTurk
Date:        2016-07-26 15:50:03+00:00
Summary:     Adding a long note about rows and columns, and fix the buffer size
Affected #:  1 file

diff -r 56b867b075cdb30433a6de0684eacc619f337895 -r b5111ca0993f4d285a7bdd4a82cba7f760ec7265 yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -86,8 +86,8 @@
     y_max = bounds[3]
     width = x_max - x_min
     height = y_max - y_min
-    px_dx = width / (<np.float64_t> buff.shape[0])
-    px_dy = height / (<np.float64_t> buff.shape[1])
+    px_dx = width / (<np.float64_t> buff.shape[1])
+    px_dy = height / (<np.float64_t> buff.shape[0])
     ipx_dx = 1.0 / px_dx
     ipx_dy = 1.0 / px_dy
     if px.shape[0] != py.shape[0] or \
@@ -115,6 +115,31 @@
     # (lr) and then iterate up to "right column" (rc) and "uppeR row" (rr),
     # depositing into them the data value.  Overlap computes the relative
     # overlap of a data value with a pixel.
+    # 
+    # NOTE ON ROWS AND COLUMNS:
+    #
+    #   The way that images are plotting in matplotlib is somewhat different
+    #   from what most might expect.  The first axis of the array plotted is
+    #   what varies along the x axis.  So for instance, if you supply
+    #   origin='lower' and plot the results of an mgrid operation, at a fixed
+    #   'y' value you will see the results of that array held constant in the
+    #   first dimension.  Here is some example code:
+    #
+    #   import matplotlib.pyplot as plt
+    #   import numpy as np
+    #   x, y = np.mgrid[0:1:100j,0:1:100j]
+    #   plt.imshow(x, interpolation='nearest', origin='lower')
+    #   plt.imshow(y, interpolation='nearest', origin='lower')
+    #
+    #   The values in the image:
+    #       lower left:  arr[0,0]
+    #       lower right: arr[0,-1]
+    #       upper left:  arr[-1,0]
+    #       upper right: arr[-1,-1]
+    #
+    #   So what we want here is to fill an array such that we fill:
+    #       first axis : y_min .. y_max
+    #       second axis: x_min .. x_max
     with nogil:
         for p in range(px.shape[0]):
             xiter[1] = yiter[1] = 999
@@ -156,8 +181,10 @@
                     # truncated, but no similar truncation was done in the
                     # comparison of j to rc (double).  So give ourselves a
                     # bonus row and bonus column here.
-                    rc = <int> fmin(((xsp+dxsp-x_min)*ipx_dx + 1), buff.shape[0])
-                    rr = <int> fmin(((ysp+dysp-y_min)*ipx_dy + 1), buff.shape[1])
+                    rc = <int> fmin(((xsp+dxsp-x_min)*ipx_dx + 1), buff.shape[1])
+                    rr = <int> fmin(((ysp+dysp-y_min)*ipx_dy + 1), buff.shape[0])
+                    # Note that we're iterating here over *y* in the i
+                    # direction.  See the note above about this.
                     for i in range(lr, rr):
                         lypx = px_dy * i + y_min
                         rypx = px_dy * (i+1) + y_min


https://bitbucket.org/yt_analysis/yt/commits/ba94e78a1648/
Changeset:   ba94e78a1648
Branch:      yt
User:        MatthewTurk
Date:        2016-07-26 19:06:44+00:00
Summary:     Reversing size for pixelize_cartesian
Affected #:  1 file

diff -r b5111ca0993f4d285a7bdd4a82cba7f760ec7265 -r ba94e78a1648156e1f966c16e7e402091abb5bdb yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -128,7 +128,7 @@
         period[1] = self.period[self.y_axis[dim]]
         if hasattr(period, 'in_units'):
             period = period.in_units("code_length").d
-        buff = np.zeros(size, dtype="f8")
+        buff = np.zeros((size[1], size[0]), dtype="f8")
         pixelize_cartesian(buff, data_source['px'], data_source['py'],
                              data_source['pdx'], data_source['pdy'],
                              data_source[field],


https://bitbucket.org/yt_analysis/yt/commits/11ada4479c24/
Changeset:   11ada4479c24
Branch:      yt
User:        MatthewTurk
Date:        2016-07-26 20:12:09+00:00
Summary:     Updating off axis fixed resolution work.
Affected #:  4 files

diff -r ba94e78a1648156e1f966c16e7e402091abb5bdb -r 11ada4479c24df67b36fa7f9802a327371954d02 yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -137,8 +137,8 @@
         return buff
 
     def _oblique_pixelize(self, data_source, field, bounds, size, antialias):
-        indices = np.argsort(data_source['dx'])[::-1]
-        buff = np.zeros(size, dtype="f8")
+        indices = np.argsort(data_source['pdx'])[::-1]
+        buff = np.zeros((size[1], size[0]), dtype="f8")
         pixelize_off_axis_cartesian(buff,
                               data_source['x'], data_source['y'],
                               data_source['z'], data_source['px'],

diff -r ba94e78a1648156e1f966c16e7e402091abb5bdb -r 11ada4479c24df67b36fa7f9802a327371954d02 yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -259,8 +259,8 @@
     y_max = bounds[3]
     width = x_max - x_min
     height = y_max - y_min
-    px_dx = width / (<np.float64_t> buff.shape[0])
-    px_dy = height / (<np.float64_t> buff.shape[1])
+    px_dx = width / (<np.float64_t> buff.shape[1])
+    px_dy = height / (<np.float64_t> buff.shape[0])
     ipx_dx = 1.0 / px_dx
     ipx_dy = 1.0 / px_dy
     if px.shape[0] != py.shape[0] or \
@@ -292,8 +292,8 @@
                 continue
             lc = <int> fmax(((pxsp - md - x_min)*ipx_dx),0)
             lr = <int> fmax(((pysp - md - y_min)*ipx_dy),0)
-            rc = <int> fmin(((pxsp + md - x_min)*ipx_dx + 1), buff.shape[0])
-            rr = <int> fmin(((pysp + md - y_min)*ipx_dy + 1), buff.shape[1])
+            rc = <int> fmin(((pxsp + md - x_min)*ipx_dx + 1), buff.shape[1])
+            rr = <int> fmin(((pysp + md - y_min)*ipx_dy + 1), buff.shape[0])
             for i in range(lr, rr):
                 cypx = px_dy * (i + 0.5) + y_min
                 for j in range(lc, rc):

diff -r ba94e78a1648156e1f966c16e7e402091abb5bdb -r 11ada4479c24df67b36fa7f9802a327371954d02 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -524,30 +524,10 @@
 
 class ObliqueFixedResolutionBuffer(FixedResolutionBuffer):
     """
-    This object is a subclass of
-    :class:`yt.visualization.fixed_resolution.FixedResolutionBuffer`
-    that supports non-aligned input data objects, primarily cutting planes.
+    This object is a deprecated subclass of
+    :class:`yt.visualization.fixed_resolution.FixedResolutionBuffer`.  All
+    necessary behavior is now in the superclass.
     """
-    def __getitem__(self, item):
-        if item in self.data: return self.data[item]
-        indices = np.argsort(self.data_source['dx'])[::-1]
-        bounds = []
-        for b in self.bounds:
-            if hasattr(b, "in_units"):
-                b = float(b.in_units("code_length"))
-            bounds.append(b)
-        buff = np.zeros(self.buff_size, dtype="f8")
-        pixelize_off_axis_cartesian(buff,
-                               self.data_source['x'],   self.data_source['y'],   self.data_source['z'],
-                               self.data_source['px'],  self.data_source['py'],
-                               self.data_source['pdx'], self.data_source['pdy'], self.data_source['pdz'],
-                               self.data_source.center, self.data_source._inv_mat, indices,
-                               self.data_source[item],
-                               bounds)
-        ia = ImageArray(buff, input_units=self.data_source[item].units,
-                        info=self._get_info(item))
-        self[item] = ia
-        return ia
 
 
 class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):

diff -r ba94e78a1648156e1f966c16e7e402091abb5bdb -r 11ada4479c24df67b36fa7f9802a327371954d02 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -422,6 +422,13 @@
 
     assert_array_almost_equal(sl_on.frb['density'], sl_off.frb['density'])
 
+    sl_on.set_buff_size((800, 400))
+    sl_on._recreate_frb()
+    sl_off.set_buff_size((800, 400))
+    sl_off._recreate_frb()
+
+    assert_array_almost_equal(sl_on.frb['density'], sl_off.frb['density'])
+
 def test_plot_particle_field_error():
     ds = fake_random_ds(32, particles=100)
 


https://bitbucket.org/yt_analysis/yt/commits/71a5d2abedd8/
Changeset:   71a5d2abedd8
Branch:      yt
User:        MatthewTurk
Date:        2016-07-26 20:19:52+00:00
Summary:     Removing ObliqueFixedResolutionBuffer.
Affected #:  7 files

diff -r 11ada4479c24df67b36fa7f9802a327371954d02 -r 71a5d2abedd8f50e04dfe760a211b42b7a3cbcf1 doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -26,13 +26,16 @@
 sizes into a fixed-size array that appears like an image.  This process is that
 of pixelization, which yt handles transparently internally.  You can access
 this functionality by constructing a
-:class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer` (or
-:class:`~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`) and
-supplying to it your :class:`~yt.data_objects.data_containers.YTSelectionContainer2D`
+:class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer` and supplying
+to it your :class:`~yt.data_objects.data_containers.YTSelectionContainer2D`
 object, as well as some information about how you want the final image to look.
 You can specify both the bounds of the image (in the appropriate x-y plane) and
-the resolution of the output image.  You can then have yt pixelize any
-field you like.
+the resolution of the output image.  You can then have yt pixelize any field
+you like.
+
+.. note:: In previous versions of yt, there was a special class of
+          FixedResolutionBuffer for off-axis slices.  This is no longer
+          necessary.
 
 To create :class:`~yt.data_objects.data_containers.YTSelectionContainer2D` objects, you can
 access them as described in :ref:`data-objects`, specifically the section

diff -r 11ada4479c24df67b36fa7f9802a327371954d02 -r 71a5d2abedd8f50e04dfe760a211b42b7a3cbcf1 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -49,7 +49,6 @@
    ~yt.visualization.fixed_resolution.FixedResolutionBuffer
    ~yt.visualization.fixed_resolution.ParticleImageBuffer
    ~yt.visualization.fixed_resolution.CylindricalFixedResolutionBuffer
-   ~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer
    ~yt.visualization.fixed_resolution.OffAxisProjectionFixedResolutionBuffer
 
 Data Sources

diff -r 11ada4479c24df67b36fa7f9802a327371954d02 -r 71a5d2abedd8f50e04dfe760a211b42b7a3cbcf1 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -148,8 +148,7 @@
 
 # Now individual component imports from the visualization API
 from yt.visualization.api import \
-    FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
-    write_bitmap, write_image, \
+    FixedResolutionBuffer, write_bitmap, write_image, \
     apply_colormap, scale_image, write_projection, \
     SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
     ProjectionPlot, OffAxisProjectionPlot, \

diff -r 11ada4479c24df67b36fa7f9802a327371954d02 -r 71a5d2abedd8f50e04dfe760a211b42b7a3cbcf1 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -453,12 +453,12 @@
         self.fields = ensure_list(fields) + [k for k in self.field_data.keys()
                                              if k not in self._key_fields]
         from yt.visualization.plot_window import get_oblique_window_parameters, PWViewerMPL
-        from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
+        from yt.visualization.fixed_resolution import FixedResolutionBuffer
         (bounds, center_rot) = get_oblique_window_parameters(normal, center, width, self.ds)
         pw = PWViewerMPL(
             self, bounds, fields=self.fields, origin='center-window', 
             periodic=False, oblique=True,
-            frb_generator=ObliqueFixedResolutionBuffer, 
+            frb_generator=FixedResolutionBuffer, 
             plot_type='OffAxisSlice')
         if axes_unit is not None:
             pw.set_axes_unit(axes_unit)
@@ -466,8 +466,8 @@
         return pw
 
     def to_frb(self, width, resolution, height=None, periodic=False):
-        r"""This function returns an ObliqueFixedResolutionBuffer generated
-        from this object.
+        r"""This function returns a FixedResolutionBuffer generated from this
+        object.
 
         An ObliqueFixedResolutionBuffer is an object that accepts a
         variable-resolution 2D object and transforms it into an NxM bitmap that
@@ -516,9 +516,9 @@
             height = self.ds.quan(height[0], height[1])
         if not iterable(resolution):
             resolution = (resolution, resolution)
-        from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
+        from yt.visualization.fixed_resolution import FixedResolutionBuffer
         bounds = (-width/2.0, width/2.0, -height/2.0, height/2.0)
-        frb = ObliqueFixedResolutionBuffer(self, bounds, resolution,
+        frb = FixedResolutionBuffer(self, bounds, resolution,
                                            periodic=periodic)
         return frb
 

diff -r 11ada4479c24df67b36fa7f9802a327371954d02 -r 71a5d2abedd8f50e04dfe760a211b42b7a3cbcf1 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -26,7 +26,6 @@
 
 from .fixed_resolution import \
     FixedResolutionBuffer, \
-    ObliqueFixedResolutionBuffer, \
     ParticleImageBuffer
 
 from .image_writer import \

diff -r 11ada4479c24df67b36fa7f9802a327371954d02 -r 71a5d2abedd8f50e04dfe760a211b42b7a3cbcf1 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -51,9 +51,8 @@
     Parameters
     ----------
     data_source : :class:`yt.data_objects.construction_data_containers.YTQuadTreeProj` or :class:`yt.data_objects.selection_data_containers.YTSlice`
-        This is the source to be pixelized, which can be a projection or a
-        slice.  (For cutting planes, see
-        `yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`.)
+        This is the source to be pixelized, which can be a projection, slice or
+        cutting plane.
     bounds : sequence of floats
         Bounds are the min and max in the image plane that we want our
         image to cover.  It's in the order of (xmin, xmax, ymin, ymax),
@@ -67,12 +66,6 @@
         This can be true or false, and governs whether the pixelization
         will span the domain boundaries.
 
-    See Also
-    --------
-    :class:`yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer` : A similar object,
-                                                     used for cutting
-                                                     planes.
-
     Examples
     --------
     To make a projection and then several images, you can generate a
@@ -522,14 +515,6 @@
         self[item] = buff
         return buff
 
-class ObliqueFixedResolutionBuffer(FixedResolutionBuffer):
-    """
-    This object is a deprecated subclass of
-    :class:`yt.visualization.fixed_resolution.FixedResolutionBuffer`.  All
-    necessary behavior is now in the superclass.
-    """
-
-
 class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):
     """
     This object is a subclass of

diff -r 11ada4479c24df67b36fa7f9802a327371954d02 -r 71a5d2abedd8f50e04dfe760a211b42b7a3cbcf1 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -26,7 +26,6 @@
 from .base_plot_types import ImagePlotMPL
 from .fixed_resolution import \
     FixedResolutionBuffer, \
-    ObliqueFixedResolutionBuffer, \
     OffAxisProjectionFixedResolutionBuffer
 from .plot_modifications import callback_registry
 from .plot_container import \
@@ -157,9 +156,8 @@
     ----------
 
     data_source : :class:`yt.data_objects.construction_data_containers.YTQuadTreeProj` or :class:`yt.data_objects.selection_data_containers.YTSlice`
-        This is the source to be pixelized, which can be a projection or a
-        slice.  (For cutting planes, see
-        `yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`.)
+        This is the source to be pixelized, which can be a projection, a slice,
+        or a cutting plane.
     bounds : sequence of floats
         Bounds are the min and max in the image plane that we want our
         image to cover.  It's in the order of (xmin, xmax, ymin, ymax),
@@ -263,8 +261,6 @@
             bounds = self.xlim+self.ylim+self.zlim
         else:
             bounds = self.xlim+self.ylim
-        if self._frb_generator is ObliqueFixedResolutionBuffer:
-            bounds = np.array([b.in_units('code_length') for b in bounds])
 
         # Generate the FRB
         self._frb = self._frb_generator(self.data_source, bounds,
@@ -1507,7 +1503,7 @@
     """
 
     _plot_type = 'OffAxisSlice'
-    _frb_generator = ObliqueFixedResolutionBuffer
+    _frb_generator = FixedResolutionBuffer
 
     def __init__(self, ds, normal, fields, center='c', width=None,
                  axes_unit=None, north_vector=None, fontsize=18,


https://bitbucket.org/yt_analysis/yt/commits/ce87f3b9ad8e/
Changeset:   ce87f3b9ad8e
Branch:      yt
User:        MatthewTurk
Date:        2016-07-26 20:41:03+00:00
Summary:     Fixing orders of all the callback pixelizations
Affected #:  1 file

diff -r 71a5d2abedd8f50e04dfe760a211b42b7a3cbcf1 -r ce87f3b9ad8e20e7ee653dc8249f6a71606130c9 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -391,8 +391,10 @@
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         plot._axes.hold(True)
-        nx = plot.image._A.shape[0] / self.factor
-        ny = plot.image._A.shape[1] / self.factor
+        # See the note about rows/columns in the pixelizer for more information
+        # on why we choose the bounds we do
+        nx = plot.image._A.shape[1] / self.factor
+        ny = plot.image._A.shape[0] / self.factor
         # periodicity
         ax = plot.data.axis
         ds = plot.data.ds
@@ -409,8 +411,8 @@
         if self.bv_y != 0.0:
             # Workaround for 0.0 without units
             fv_y -= self.bv_y
-        pixX = np.zeros((nx, ny), dtype="f8")
-        pixY = np.zeros((nx, ny), dtype="f8")
+        pixX = np.zeros((ny, nx), dtype="f8")
+        pixY = np.zeros((ny, nx), dtype="f8")
         pixelize_cartesian(pixX, plot.data['px'], plot.data['py'],
                                   plot.data['pdx'], plot.data['pdy'],
                                   fv_x,
@@ -480,8 +482,10 @@
 
         plot._axes.hold(True)
 
-        numPoints_x = plot.image._A.shape[0]
-        numPoints_y = plot.image._A.shape[1]
+        # See the note about rows/columns in the pixelizer for more information
+        # on why we choose the bounds we do
+        numPoints_x = plot.image._A.shape[1]
+        numPoints_y = plot.image._A.shape[0]
 
         # Multiply by dx and dy to go from data->plot
         dx = (xx1 - xx0) / (x1-x0)
@@ -605,7 +609,7 @@
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         (dx, dy) = self.pixel_scale(plot)
-        (xpix, ypix) = plot.image._A.shape
+        (ypix, xpix) = plot.image._A.shape
         ax = plot.data.axis
         px_index = plot.data.ds.coordinates.x_axis[ax]
         py_index = plot.data.ds.coordinates.y_axis[ax]
@@ -715,10 +719,12 @@
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         plot._axes.hold(True)
-        nx = plot.image._A.shape[0] / self.factor
-        ny = plot.image._A.shape[1] / self.factor
-        pixX = np.zeros((nx, ny), dtype="f8")
-        pixY = np.zeros((nx, ny), dtype="f8")
+        # See the note about rows/columns in the pixelizer for more information
+        # on why we choose the bounds we do
+        nx = plot.image._A.shape[1] / self.factor
+        ny = plot.image._A.shape[0] / self.factor
+        pixX = np.zeros((ny, nx), dtype="f8")
+        pixY = np.zeros((ny, nx), dtype="f8")
         pixelize_cartesian(pixX, plot.data['px'], plot.data['py'],
                                   plot.data['pdx'], plot.data['pdy'],
                                   plot.data[self.field_x],
@@ -728,7 +734,7 @@
                                   plot.data[self.field_y],
                                   (x0, x1, y0, y1))
         if self.field_color:
-            field_colors = np.zeros((nx, ny), dtype="f8")
+            field_colors = np.zeros((ny, nx), dtype="f8")
             pixelize_cartesian(field_colors,
                         plot.data['px'], plot.data['py'],
                         plot.data['pdx'], plot.data['pdy'],
@@ -878,12 +884,12 @@
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         plot._axes.hold(True)
-        nx = plot.image._A.shape[0] / self.factor
-        ny = plot.image._A.shape[1] / self.factor
+        nx = plot.image._A.shape[1] / self.factor
+        ny = plot.image._A.shape[0] / self.factor
         indices = np.argsort(plot.data['dx'])[::-1]
 
-        pixX = np.zeros((nx, ny), dtype="f8")
-        pixY = np.zeros((nx, ny), dtype="f8")
+        pixX = np.zeros((ny, nx), dtype="f8")
+        pixY = np.zeros((ny, nx), dtype="f8")
         pixelize_off_axis_cartesian(pixX,
                                plot.data['x'], plot.data['y'], plot.data['z'],
                                plot.data['px'], plot.data['py'],
@@ -943,7 +949,7 @@
         dxf = "d%s" % xf
         dyf = "d%s" % yf
 
-        nx, ny = plot.image._A.shape
+        ny, nx = plot.image._A.shape
         buff = np.zeros((nx,ny),dtype='float64')
         for i,clump in enumerate(reversed(self.clumps)):
             mylog.info("Pixelizing contour %s", i)
@@ -951,7 +957,7 @@
             xf_copy = clump[xf].copy().in_units("code_length")
             yf_copy = clump[yf].copy().in_units("code_length")
 
-            temp = np.zeros((nx, ny), dtype="f8")
+            temp = np.zeros((ny, nx), dtype="f8")
             pixelize_cartesian(temp, xf_copy, yf_copy,
                                  clump[dxf].in_units("code_length")/2.0,
                                  clump[dyf].in_units("code_length")/2.0,
@@ -2408,8 +2414,10 @@
         extent = [xx0,xx1,yy0,yy1]
 
         plot._axes.hold(True)
-        nx = plot.image._A.shape[0]
-        ny = plot.image._A.shape[1]
+        # We are feeding this size into the pixelizer, where it will properly
+        # set it in reverse order
+        nx = plot.image._A.shape[1]
+        ny = plot.image._A.shape[0]
         pixX = plot.data.ds.coordinates.pixelize(plot.data.axis,
                                                  plot.data,
                                                  self.field_x,
@@ -2439,14 +2447,15 @@
 
         if self.const_alpha:
             plot._axes.imshow(lic_data_clip, extent=extent, cmap=self.cmap,
-                              alpha=self.alpha)
+                              alpha=self.alpha, origin='lower')
         else:
             lic_data_rgba = cm.ScalarMappable(norm=None, cmap=self.cmap).\
                             to_rgba(lic_data_clip)
             lic_data_clip_rescale = (lic_data_clip - self.lim[0]) \
                                     / (self.lim[1] - self.lim[0])
             lic_data_rgba[...,3] = lic_data_clip_rescale * self.alpha
-            plot._axes.imshow(lic_data_rgba, extent=extent, cmap=self.cmap)
+            plot._axes.imshow(lic_data_rgba, extent=extent, cmap=self.cmap,
+                              origin='lower')
         plot._axes.hold(False)
 
         return plot
@@ -2494,9 +2503,9 @@
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         plot._axes.hold(True)
-        nx = plot.image._A.shape[0]
-        ny = plot.image._A.shape[1]
-        im = np.zeros((nx, ny), dtype="f8")
+        nx = plot.image._A.shape[1]
+        ny = plot.image._A.shape[0]
+        im = np.zeros((ny, nx), dtype="f8")
         pixelize_cartesian(im, plot.data['px'],
                                 plot.data['py'],
                                 plot.data['pdx'],
@@ -2505,7 +2514,7 @@
                                 (x0, x1, y0, y1),
                                 line_width=self.line_width)
         # New image:
-        im_buffer = np.zeros((nx, ny, 4), dtype="uint8")
+        im_buffer = np.zeros((ny, nx, 4), dtype="uint8")
         im_buffer[im>0,3] = 255
         im_buffer[im>0,:3] = self.color
         plot._axes.imshow(im_buffer, origin='lower',


https://bitbucket.org/yt_analysis/yt/commits/0adffb2ed05d/
Changeset:   0adffb2ed05d
Branch:      yt
User:        MatthewTurk
Date:        2016-07-26 20:49:42+00:00
Summary:     Deprecating ObliqueFixedResolutionBuffer
Affected #:  1 file

diff -r ce87f3b9ad8e20e7ee653dc8249f6a71606130c9 -r 0adffb2ed05d1bad721d5df6c12fd13dacfd28a9 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -18,7 +18,8 @@
 from yt.funcs import \
     get_output_filename, \
     mylog, \
-    ensure_list
+    ensure_list, \
+    deprecate
 from .volume_rendering.api import off_axis_projection
 from .fixed_resolution_filters import apply_filter, filter_registry
 from yt.data_objects.image_array import ImageArray
@@ -487,6 +488,11 @@
             self.__dict__['apply_' + filtername] = \
                 types.MethodType(filt, self)
 
+class ObliqueFixedResolutionBuffer(FixedResolutionBuffer):
+    @deprecate("FixedResolutionBuffer")
+    def __init__(self, *args, **kwargs):
+        super(ObliqueFixedResolutionBuffer, self).__init__(*args, **kwargs)
+
 class CylindricalFixedResolutionBuffer(FixedResolutionBuffer):
     """
     This object is a subclass of


https://bitbucket.org/yt_analysis/yt/commits/0e54390725f6/
Changeset:   0e54390725f6
Branch:      yt
User:        MatthewTurk
Date:        2016-07-27 03:42:40+00:00
Summary:     Fix flake8 and re-add import
Affected #:  2 files

diff -r 0adffb2ed05d1bad721d5df6c12fd13dacfd28a9 -r 0e54390725f6284e9ce31633099dbb8853a74056 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -148,7 +148,8 @@
 
 # Now individual component imports from the visualization API
 from yt.visualization.api import \
-    FixedResolutionBuffer, write_bitmap, write_image, \
+    FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
+    write_bitmap, write_image, \
     apply_colormap, scale_image, write_projection, \
     SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
     ProjectionPlot, OffAxisProjectionPlot, \

diff -r 0adffb2ed05d1bad721d5df6c12fd13dacfd28a9 -r 0e54390725f6284e9ce31633099dbb8853a74056 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -24,7 +24,7 @@
 from .fixed_resolution_filters import apply_filter, filter_registry
 from yt.data_objects.image_array import ImageArray
 from yt.utilities.lib.pixelization_routines import \
-    pixelize_cylinder, pixelize_off_axis_cartesian
+    pixelize_cylinder
 from yt.utilities.lib.api import add_points_to_greyscale_image
 from yt.frontends.stream.api import load_uniform_grid
 


https://bitbucket.org/yt_analysis/yt/commits/0199fc7a4502/
Changeset:   0199fc7a4502
Branch:      yt
User:        MatthewTurk
Date:        2016-07-27 03:42:46+00:00
Summary:     Increment tests
Affected #:  1 file

diff -r 0e54390725f6284e9ce31633099dbb8853a74056 -r 0199fc7a45028569d437bcccd77a6d881dea2116 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -39,7 +39,7 @@
   local_owls_000:
     - yt/frontends/owls/tests/test_outputs.py
   
-  local_pw_001:
+  local_pw_002:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
     - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes


https://bitbucket.org/yt_analysis/yt/commits/24a49a436381/
Changeset:   24a49a436381
Branch:      yt
User:        MatthewTurk
Date:        2016-07-27 04:54:03+00:00
Summary:     Starting to work through the cylindrical coordinate pixelizers
Affected #:  4 files

diff -r 0199fc7a45028569d437bcccd77a6d881dea2116 -r 24a49a4363812fb07acf37c8adbc549c76c6c025 yt/geometry/coordinates/cylindrical_coordinates.py
--- a/yt/geometry/coordinates/cylindrical_coordinates.py
+++ b/yt/geometry/coordinates/cylindrical_coordinates.py
@@ -122,7 +122,7 @@
         return buff
 
     def _cyl_pixelize(self, data_source, field, bounds, size, antialias):
-        buff = np.zeros(size, dtype="f8")
+        buff = np.zeros((size[1], size[0]), dtype="f8")
         pixelize_cylinder(buff,
                           data_source['px'],
                           data_source['pdx'],

diff -r 0199fc7a45028569d437bcccd77a6d881dea2116 -r 24a49a4363812fb07acf37c8adbc549c76c6c025 yt/geometry/coordinates/geographic_coordinates.py
--- a/yt/geometry/coordinates/geographic_coordinates.py
+++ b/yt/geometry/coordinates/geographic_coordinates.py
@@ -203,7 +203,7 @@
         else:
             # We should never get here!
             raise NotImplementedError
-        buff = np.zeros(size, dtype="f8")
+        buff = np.zeros((size[1], size[0]), dtype="f8")
         pixelize_cylinder(buff, r, data_source['pdy'],
                           px, pdx, data_source[field], bounds)
         if do_transpose:

diff -r 0199fc7a45028569d437bcccd77a6d881dea2116 -r 24a49a4363812fb07acf37c8adbc549c76c6c025 yt/geometry/coordinates/spherical_coordinates.py
--- a/yt/geometry/coordinates/spherical_coordinates.py
+++ b/yt/geometry/coordinates/spherical_coordinates.py
@@ -123,7 +123,7 @@
     def _cyl_pixelize(self, data_source, field, bounds, size, antialias,
                       dimension):
         name = self.axis_name[dimension]
-        buff = np.zeros(size, dtype="f8")
+        buff = np.zeros((size[1], size[0]), dtype="f8")
         if name == 'theta':
             pixelize_cylinder(buff,
                               data_source['px'],
@@ -138,7 +138,6 @@
                              data_source['py'],
                              data_source['pdy'],
                              data_source[field], bounds)
-            buff = buff.transpose()
         else:
             raise RuntimeError
         return buff

diff -r 0199fc7a45028569d437bcccd77a6d881dea2116 -r 24a49a4363812fb07acf37c8adbc549c76c6c025 yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -333,8 +333,8 @@
     rmax = radius[imax] + dradius[imax]
           
     x0, x1, y0, y1 = extents
-    dx = (x1 - x0) / buff.shape[0]
-    dy = (y1 - y0) / buff.shape[1]
+    dx = (x1 - x0) / buff.shape[1]
+    dy = (y1 - y0) / buff.shape[0]
     cdef np.float64_t rbounds[2]
     cdef np.float64_t corners[8]
     # Find our min and max r


https://bitbucket.org/yt_analysis/yt/commits/e682c81923f6/
Changeset:   e682c81923f6
Branch:      yt
User:        MatthewTurk
Date:        2016-07-27 04:57:46+00:00
Summary:     Fix missing import
Affected #:  1 file

diff -r 24a49a4363812fb07acf37c8adbc549c76c6c025 -r e682c81923f6e40749b1bbb0134550facebb3453 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -26,6 +26,7 @@
 
 from .fixed_resolution import \
     FixedResolutionBuffer, \
+    ObliqueFixedResolutionBuffer, \
     ParticleImageBuffer
 
 from .image_writer import \


https://bitbucket.org/yt_analysis/yt/commits/4da858e68d31/
Changeset:   4da858e68d31
Branch:      yt
User:        MatthewTurk
Date:        2016-08-03 02:40:45+00:00
Summary:     Merging with upstream
Affected #:  9 files

diff -r e682c81923f6e40749b1bbb0134550facebb3453 -r 4da858e68d31efc7bb8dd634e9c01b473cdffa32 CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -301,7 +301,15 @@
 This downloads that new forked repository to your local machine, so that you
 can access it, read it, make modifications, etc.  It will put the repository in
 a local directory of the same name as the repository in the current working
-directory.  You can see any past state of the code by using the hg log command.
+directory. You should also run the following command, to make sure you are at
+the "yt" branch, and not other ones like "stable" (this will be important
+later when you want to submit your pull requests):
+
+.. code-block:: bash
+
+   $ hg update yt
+
+You can see any past state of the code by using the hg log command.
 For example, the following command would show you the last 5 changesets
 (modifications to the code) that were submitted to that repository.
 

diff -r e682c81923f6e40749b1bbb0134550facebb3453 -r 4da858e68d31efc7bb8dd634e9c01b473cdffa32 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -49,8 +49,8 @@
                     # in Python 3 (except Mercurial, which requires Python 2).
 INST_HG=1           # Install Mercurial or not?  If hg is not already
                     # installed, yt cannot be installed from source.
-INST_UNSTRUCTURED=0 # Install dependencies needed for unstructured mesh 
-                    # rendering?
+INST_EMBREE=0       # Install dependencies needed for Embree-accelerated 
+                    # ray tracing
 
 # These options control whether low-level system libraries are installed
 # they are necessary for building yt's dependencies from source and are 
@@ -75,6 +75,7 @@
 INST_H5PY=1     # Install h5py?
 INST_ASTROPY=0  # Install astropy?
 INST_NOSE=1     # Install nose?
+INST_NETCDF4=0  # Install netcdf4 and its python bindings?
 
 # These options allow you to customize the builds of yt dependencies.
 # They are only used if INST_CONDA=0.
@@ -484,21 +485,19 @@
     ( $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
-# set paths needed for unstructured mesh rendering support
+# set paths needed for Embree
 
-if [ $INST_UNSTRUCTURED -ne 0 ]
+if [ $INST_EMBREE -ne 0 ]
 then
     if [ $INST_YT_SOURCE -eq 0 ]
     then
-        echo "yt must be compiled from source to install support for"
-        echo "unstructured mesh rendering. Please set INST_YT_SOURCE to 1"
-        echo "and re-run the install script."
+        echo "yt must be compiled from source to install Embree support."
+        echo "Please set INST_YT_SOURCE to 1 and re-run the install script."
         exit 1
     fi
     if [ $INST_CONDA -eq 0 ]
     then
-        echo "unstructured mesh rendering support has not yet been implemented"
-        echo "for INST_CONDA=0."
+        echo "Embree support has not yet been implemented for INST_CONDA=0."
         exit 1
     fi
     if [ `uname` = "Darwin" ]
@@ -510,8 +509,8 @@
         EMBREE="embree-2.8.0.x86_64.linux"
         EMBREE_URL="https://github.com/embree/embree/releases/download/v2.8.0/$EMBREE.tar.gz"
     else
-        echo "Unstructured mesh rendering is not supported on this platform."
-        echo "Set INST_UNSTRUCTURED=0 and re-run the install script."
+        echo "Embree is not supported on this platform."
+        echo "Set INST_EMBREE=0 and re-run the install script."
         exit 1
     fi
     PYEMBREE_URL="https://github.com/scopatz/pyembree/archive/master.zip"
@@ -528,6 +527,17 @@
     fi
 fi
 
+if [ $INST_NETCDF4 -ne 0 ]
+then
+    if [ $INST_CONDA -eq 0 ]
+    then
+        echo "This script can only install netcdf4 through conda."
+        echo "Please set INST_CONDA to 1"
+        echo "and re-run the install script"
+        exit 1
+    fi
+fi
+
 echo
 echo
 echo "========================================================================"
@@ -557,9 +567,9 @@
 get_willwont ${INST_HG}
 echo "be installing Mercurial"
 
-printf "%-18s = %s so I " "INST_UNSTRUCTURED" "${INST_UNSTRUCTURED}"
-get_willwont ${INST_UNSTRUCTURED}
-echo "be installing unstructured mesh rendering"
+printf "%-18s = %s so I " "INST_EMBREE" "${INST_EMBREE}"
+get_willwont ${INST_EMBREE}
+echo "be installing Embree"
 
 if [ $INST_CONDA -eq 0 ]
 then
@@ -1411,7 +1421,7 @@
     fi
     YT_DEPS+=('sympy')
 
-    if [ $INST_UNSTRUCTURED -eq 1 ]
+    if [ $INST_NETCDF4 -eq 1 ]
     then
         YT_DEPS+=('netcdf4')   
     fi
@@ -1425,14 +1435,21 @@
         log_cmd conda install --yes ${YT_DEP}
     done
 
+    if [ $INST_PY3 -eq 1 ]
+    then
+        echo "Installing mercurial"
+        log_cmd conda create -y -n py27 python=2.7 mercurial
+        log_cmd ln -s ${DEST_DIR}/envs/py27/bin/hg ${DEST_DIR}/bin
+    fi
+
     log_cmd pip install python-hglib
 
     log_cmd hg clone https://bitbucket.org/yt_analysis/yt_conda ${DEST_DIR}/src/yt_conda
     
-    if [ $INST_UNSTRUCTURED -eq 1 ]
+    if [ $INST_EMBREE -eq 1 ]
     then
         
-        echo "Installing embree"
+        echo "Installing Embree"
         if [ ! -d ${DEST_DIR}/src ]
         then
             mkdir ${DEST_DIR}/src
@@ -1479,13 +1496,6 @@
         fi
     fi
 
-    if [ $INST_PY3 -eq 1 ]
-    then
-        echo "Installing mercurial"
-        log_cmd conda create -y -n py27 python=2.7 mercurial
-        log_cmd ln -s ${DEST_DIR}/envs/py27/bin/hg ${DEST_DIR}/bin
-    fi
-
     if [ $INST_YT_SOURCE -eq 0 ]
     then
         echo "Installing yt"
@@ -1494,7 +1504,7 @@
         echo "Building yt from source"
         YT_DIR="${DEST_DIR}/src/yt-hg"
         log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
-        if [ $INST_UNSTRUCTURED -eq 1 ]
+        if [ $INST_EMBREE -eq 1 ]
         then
             echo $DEST_DIR > ${YT_DIR}/embree.cfg
         fi

diff -r e682c81923f6e40749b1bbb0134550facebb3453 -r 4da858e68d31efc7bb8dd634e9c01b473cdffa32 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -3,40 +3,46 @@
 Unstructured Mesh Rendering
 ===========================
 
-Installation
-^^^^^^^^^^^^
+Beginning with version 3.3, yt has the ability to volume render unstructured
+mesh data like that created by finite element calculations. No additional 
+dependencies are required in order to use this feature. However, it is 
+possible to speed up the rendering operation by installing with 
+`Embree <https://embree.github.io>`_ support. Embree is a fast ray-tracing
+library from Intel that can substantially speed up the mesh rendering operation
+on large datasets. You can read about how to install yt with Embree support 
+below, or you can skip to the examples.
 
-Beginning with version 3.3, yt has the ability to volume render unstructured
-mesh data like that created by finite element calculations. In order to use
-this capability, a few additional dependencies are required. The easiest way
-to install yt with unstructured mesh support is to use conda to install the
+Optional Embree Installation
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The easiest way to install yt with Embree support is to use conda to install the
 most recent development version of yt from our channel:
 
 .. code-block:: bash
 
     conda install -c http://use.yt/with_conda/ yt
 
-If you want to install from source, you can use the ``get_yt.sh`` script.
-Be sure to set the INST_YT_SOURCE and INST_UNSTRUCTURED flags to 1 at the
-top of the script. The ``get_yt.sh`` script can be downloaded by doing:
+Alternatively, you can install yt from source using the ``install_script.sh`` 
+script. Be sure to set the INST_CONDA, INST_YT_SOURCE, INST_EMBREE, 
+and INST_NETCDF4 flags to 1 at the top of the script. The ``install_script.sh`` 
+script can be downloaded by doing:
 
 .. code-block:: bash
 
-  wget http://bitbucket.org/yt_analysis/yt/raw/yt/doc/get_yt.sh
+  wget http://bitbucket.org/yt_analysis/yt/raw/yt/doc/install_script.sh
 
 and then run like so:
 
 .. code-block:: bash
 
-  bash get_yt.sh
+  bash install_script.sh
 
-Alternatively, you can install the additional dependencies by hand.
-First, `embree <https://embree.github.io>`_
-(a fast software ray-tracing library from Intel) must be installed, either
-by compiling from source or by using one of the pre-built binaries available
-at Embree's `downloads <https://embree.github.io/downloads.html>`_ page.
+Finally, you can install the additional dependencies by hand.
+First, you will need to install Embree, either by compiling from source 
+or by using one of the pre-built binaries available at Embree's 
+`downloads <https://embree.github.io/downloads.html>`_ page.
 
-Second, the python bindings for embree (called
+Second, the python bindings for Embree (called
 `pyembree <https://github.com/scopatz/pyembree>`_) must also be installed. To
 do so, first obtain a copy, by .e.g. cloning the repo:
 
@@ -54,7 +60,7 @@
 
     CFLAGS='-I/opt/local/include' LDFLAGS='-L/opt/local/lib' python setup.py install
 
-Once embree and pyembree are installed, you must rebuild yt from source in order to use
+Once Embree and pyembree are installed, you must rebuild yt from source in order to use
 the unstructured mesh rendering capability. Once again, if embree is installed in a
 location that is not part of your default search path, you must tell yt where to find it.
 There are a number of ways to do this. One way is to again manually pass in the flags
@@ -84,20 +90,6 @@
 neccessary if you installed embree into a location that is in your default path, such
 as /usr/local.
 
-Once the pre-requisites are installed, unstructured mesh data can be rendered
-much like any other dataset. In particular, a new type of
-:class:`~yt.visualization.volume_rendering.render_source.RenderSource` object
-has been defined, called the
-:class:`~yt.visualization.volume_rendering.render_source.MeshSource`, that
-represents the unstructured mesh data that will be rendered. The user creates
-this object, and also defines a
-:class:`~yt.visualization.volume_rendering.camera.Camera`
-that specifies your viewpoint into the scene. When
-:class:`~yt.visualization.volume_rendering.render_source.RenderSource` is called,
-a set of rays are cast at the source. Each time a ray strikes the source mesh,
-the data is sampled at the intersection point at the resulting value gets
-saved into an image. See below for examples.
-
 Examples
 ^^^^^^^^
 

diff -r e682c81923f6e40749b1bbb0134550facebb3453 -r 4da858e68d31efc7bb8dd634e9c01b473cdffa32 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -39,7 +39,7 @@
   local_owls_000:
     - yt/frontends/owls/tests/test_outputs.py
   
-  local_pw_002:
+  local_pw_003:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
     - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes

diff -r e682c81923f6e40749b1bbb0134550facebb3453 -r 4da858e68d31efc7bb8dd634e9c01b473cdffa32 yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -181,6 +181,15 @@
         ("magmom", ("g*cm/s", ["momentum_magnitude"], r"\rho |\mathbf{U}|")),
         ("maggrav", ("cm/s**2", [], r"|\mathbf{g}|")),
         ("phiGrav", ("erg/g", [], r"\Phi")),
+        ("enuc", ("erg/(g*s)", [], r"\dot{e}_{\rm{nuc}}")),
+        ("rho_enuc", ("erg/(cm**3*s)", [], r"\rho \dot{e}_{\rm{nuc}}")),
+        ("angular_momentum_x", ("g/(cm*s)", [], r"\ell_x")),
+        ("angular_momentum_y", ("g/(cm*s)", [], r"\ell_y")),
+        ("angular_momentum_z", ("g/(cm*s)", [], r"\ell_z")),
+        ("phiRot", ("erg/g", [], r"\Phi_{\rm{rot}}")),
+        ("rot_x", ("cm/s**2", [], r"\mathbf{f}_{\rm{rot}} \cdot \mathbf{e}_x")),
+        ("rot_y", ("cm/s**2", [], r"\mathbf{f}_{\rm{rot}} \cdot \mathbf{e}_y")),
+        ("rot_z", ("cm/s**2", [], r"\mathbf{f}_{\rm{rot}} \cdot \mathbf{e}_z")),
     )
 
     def setup_fluid_fields(self):

diff -r e682c81923f6e40749b1bbb0134550facebb3453 -r 4da858e68d31efc7bb8dd634e9c01b473cdffa32 yt/units/tests/test_units.py
--- a/yt/units/tests/test_units.py
+++ b/yt/units/tests/test_units.py
@@ -480,6 +480,9 @@
     test_unit = Unit('m_geom/l_geom**3')
     assert_equal(test_unit.latex_repr, '\\frac{1}{M_\\odot^{2}}')
 
+    test_unit = Unit('1e9*cm')
+    assert_equal(test_unit.latex_repr, '1.0 \\times 10^{9}\\ \\rm{cm}')
+
 def test_latitude_longitude():
     lat = unit_symbols.lat
     lon = unit_symbols.lon

diff -r e682c81923f6e40749b1bbb0134550facebb3453 -r 4da858e68d31efc7bb8dd634e9c01b473cdffa32 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -127,10 +127,20 @@
         symbols = invert_symbols[val]
         for i in range(1, len(symbols)):
             expr = expr.subs(symbols[i], symbols[0])
-
+    prefix = None
+    if isinstance(expr, Mul):
+        coeffs = expr.as_coeff_Mul()
+        if coeffs[0] == 1 or not isinstance(coeffs[0], Float):
+            pass
+        else:
+            expr = coeffs[1]
+            prefix = Float(coeffs[0], 2)
     latex_repr = latex(expr, symbol_names=symbol_table, mul_symbol="dot",
                        fold_frac_powers=True, fold_short_frac=True)
 
+    if prefix is not None:
+        latex_repr = latex(prefix, mul_symbol="times") + '\\ ' + latex_repr
+
     if latex_repr == '1':
         return ''
     else:

diff -r e682c81923f6e40749b1bbb0134550facebb3453 -r 4da858e68d31efc7bb8dd634e9c01b473cdffa32 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -824,7 +824,9 @@
                 h_power = expr.as_coeff_exponent(h_expr)[1]
                 # un is now the original unit, but with h factored out.
                 un = str(expr*h_expr**(-1*h_power))
-                if str(un).endswith('cm') and un != 'cm':
+                un_unit = Unit(un, registry=self.ds.unit_registry)
+                cm = Unit('cm').expr
+                if str(un).endswith('cm') and cm not in un_unit.expr.atoms():
                     comoving = True
                     un = un[:-2]
                 # no length units besides code_length end in h so this is safe
@@ -834,18 +836,22 @@
                     # It doesn't make sense to scale a position by anything
                     # other than h**-1
                     raise RuntimeError
-                if un in formatted_length_unit_names:
-                    un = formatted_length_unit_names[un]
-                pp = un[0]
-                if pp in latex_prefixes:
-                    symbol_wo_prefix = un[1:]
-                    if symbol_wo_prefix in prefixable_units:
-                        un = un.replace(pp, "{"+latex_prefixes[pp]+"}", 1)
                 if un not in ['1', 'u', 'unitary']:
-                    if hinv:
-                        un = un + '\,h^{-1}'
-                    if comoving:
-                        un = un + '\,(1+z)^{-1}'
+                    if un in formatted_length_unit_names:
+                        un = formatted_length_unit_names[un]
+                    else:
+                        un = Unit(un, registry=self.ds.unit_registry)
+                        un = un.latex_representation()
+                        if hinv:
+                            un = un + '\,h^{-1}'
+                        if comoving:
+                            un = un + '\,(1+z)^{-1}'
+                        pp = un[0]
+                        if pp in latex_prefixes:
+                            symbol_wo_prefix = un[1:]
+                            if symbol_wo_prefix in prefixable_units:
+                                un = un.replace(
+                                    pp, "{"+latex_prefixes[pp]+"}", 1)
                     axes_unit_labels[i] = '\ \ ('+un+')'
 
             if self.oblique:
@@ -1488,7 +1494,7 @@
          Defaults to None, which automatically picks an appropriate unit.
          If axes_unit is '1', 'u', or 'unitary', it will not display the
          units, and only show the axes name.
-    north-vector : a sequence of floats
+    north_vector : a sequence of floats
          A vector defining the 'up' direction in the plot.  This
          option sets the orientation of the slicing plane.  If not
          set, an arbitrary grid-aligned north-vector is chosen.
@@ -1624,7 +1630,7 @@
          Defaults to None, which automatically picks an appropriate unit.
          If axes_unit is '1', 'u', or 'unitary', it will not display the
          units, and only show the axes name.
-    north-vector : a sequence of floats
+    north_vector : a sequence of floats
          A vector defining the 'up' direction in the plot.  This
          option sets the orientation of the slicing plane.  If not
          set, an arbitrary grid-aligned north-vector is chosen.
@@ -1687,8 +1693,9 @@
 
 class WindowPlotMPL(ImagePlotMPL):
     """A container for a single PlotWindow matplotlib figure and axes"""
-    def __init__(self, data, cbname, cblinthresh, cmap, extent, zlim, figure_size,
-                 fontsize, aspect, figure, axes, cax):
+    def __init__(self, data, cbname, cblinthresh, cmap, extent, zlim,
+                 figure_size, fontsize, aspect, figure, axes, cax):
+        from matplotlib.ticker import ScalarFormatter
         self._draw_colorbar = True
         self._draw_axes = True
         self._fontsize = fontsize
@@ -1716,7 +1723,14 @@
 
         self._init_image(data, cbname, cblinthresh, cmap, extent, aspect)
 
-        self.image.axes.ticklabel_format(scilimits=(-2, 3))
+        # In matplotlib 2.1 and newer we'll be able to do this using
+        # self.image.axes.ticklabel_format
+        # See https://github.com/matplotlib/matplotlib/pull/6337
+        formatter = ScalarFormatter(useMathText=True)
+        formatter.set_scientific(True)
+        formatter.set_powerlimits((-2, 3))
+        self.image.axes.xaxis.set_major_formatter(formatter)
+        self.image.axes.yaxis.set_major_formatter(formatter)
         if cbname == 'linear':
             self.cb.formatter.set_scientific(True)
             self.cb.formatter.set_powerlimits((-2, 3))
@@ -1820,7 +1834,7 @@
          ('{yloc}', '{space}')                  ('lower', 'window')
          ('{yloc}', '{xloc}', '{space}')        ('lower', 'right', 'window')
          ==================================     ============================
-    north-vector : a sequence of floats
+    north_vector : a sequence of floats
         A vector defining the 'up' direction in the `OffAxisSlicePlot`; not
         used in `AxisAlignedSlicePlot`.  This option sets the orientation of the
         slicing plane.  If not set, an arbitrary grid-aligned north-vector is

diff -r e682c81923f6e40749b1bbb0134550facebb3453 -r 4da858e68d31efc7bb8dd634e9c01b473cdffa32 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -778,8 +778,9 @@
 
         """
         if "__IPYTHON__" in dir(builtins):
+            from IPython.display import display
             self._sigma_clip = sigma_clip
-            return self
+            display(self)
         else:
             raise YTNotInsideNotebook
 


https://bitbucket.org/yt_analysis/yt/commits/c195b3e709bc/
Changeset:   c195b3e709bc
Branch:      yt
User:        MatthewTurk
Date:        2016-08-03 02:41:02+00:00
Summary:     Updating test numbers
Affected #:  1 file

diff -r 4da858e68d31efc7bb8dd634e9c01b473cdffa32 -r c195b3e709bc0908c080192d1f4fd318adf23ef8 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -39,7 +39,7 @@
   local_owls_000:
     - yt/frontends/owls/tests/test_outputs.py
   
-  local_pw_003:
+  local_pw_004:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
     - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes


https://bitbucket.org/yt_analysis/yt/commits/36e8d7c14ad7/
Changeset:   36e8d7c14ad7
Branch:      yt
User:        MatthewTurk
Date:        2016-08-03 03:20:32+00:00
Summary:     Removing flipud change in LIC
Affected #:  1 file

diff -r c195b3e709bc0908c080192d1f4fd318adf23ef8 -r 36e8d7c14ad7587166c086b707fad756082e74ee yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -2442,7 +2442,7 @@
         kernel = kernel.astype(np.double)
 
         lic_data = line_integral_convolution_2d(vectors,self.texture,kernel)
-        lic_data = np.flipud(lic_data / lic_data.max())
+        lic_data = lic_data / lic_data.max()
         lic_data_clip = np.clip(lic_data,self.lim[0],self.lim[1])
 
         if self.const_alpha:


https://bitbucket.org/yt_analysis/yt/commits/7af4ee124f9f/
Changeset:   7af4ee124f9f
Branch:      yt
User:        MatthewTurk
Date:        2016-08-08 19:10:12+00:00
Summary:     Merging with upstream
Affected #:  28 files

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -21,6 +21,7 @@
                 Daniel Fenn (df11c at my.fsu.edu)
                 John Forces (jforbes at ucolick.org)
                 Adam Ginsburg (keflavich at gmail.com)
+                Austin Gilbert (augilbert4 at gmail.com)
                 Sam Geen (samgeen at gmail.com)
                 Nathan Goldbaum (goldbaum at ucolick.org)
                 William Gray (graywilliamj at gmail.com)

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -116,7 +116,10 @@
         echo
         echo "    $ source deactivate"
         echo
-        echo "or install yt into your current environment"
+        echo "or install yt into your current environment with:"
+        echo
+        echo "    $ conda install -c conda-forge yt"
+        echo
         exit 1
     fi
     DEST_SUFFIX="yt-conda"

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 doc/source/visualizing/callbacks.rst
--- a/doc/source/visualizing/callbacks.rst
+++ b/doc/source/visualizing/callbacks.rst
@@ -278,17 +278,17 @@
 Overplot Cell Edges
 ~~~~~~~~~~~~~~~~~~~
 
-.. function:: annotate_cell_edges(line_width=1.0, alpha = 1.0,
-                                  color = (0.0, 0.0, 0.0))
+.. function:: annotate_cell_edges(line_width=0.002, alpha=1.0, color='black')
 
    (This is a proxy for
    :class:`~yt.visualization.plot_modifications.CellEdgesCallback`.)
 
-    Annotate the edges of cells, where the ``line_width`` in pixels is specified.
-    The ``alpha`` of the overlaid image and the ``color`` of the lines are also
-    specifiable.  Note that because the lines are drawn from both sides of a
-    cell, the image sometimes has the effect of doubling the line width.
-    Color here is in RGB float values (0 to 1).
+    Annotate the edges of cells, where the ``line_width`` relative to size of
+    the longest plot axis is specified.  The ``alpha`` of the overlaid image and
+    the ``color`` of the lines are also specifiable.  Note that because the
+    lines are drawn from both sides of a cell, the image sometimes has the
+    effect of doubling the line width.  Color here is a matplotlib color name or
+    a 3-tuple of RGB float values.
 
 .. python-script::
 

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -17,6 +17,24 @@
 plots of slices, projections, 1D profiles, and 2D profiles (phase plots), all of
 which are described below.
 
+.. _viewing-plots:
+
+Viewing Plots
+-------------
+
+YT uses an environment neutral plotting mechanism that detects the appropriate
+matplotlib configuration for a given environment, however it defaults to a basic
+renderer. To utilize interactive plots in matplotlib supported
+environments (Qt, GTK, WX, etc.) simply call the ``toggle_interactivity()`` function. Below is an
+example in a jupyter notebook environment, but the same command should work
+in other environments as well:
+
+.. code-block:: python
+ 
+   %matplotlib notebook
+   import yt
+   yt.toggle_interactivity()
+
 .. _simple-inspection:
 
 Slices & Projections

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 doc/source/visualizing/streamlines.rst
--- a/doc/source/visualizing/streamlines.rst
+++ b/doc/source/visualizing/streamlines.rst
@@ -118,7 +118,7 @@
     from yt.visualization.api import Streamlines
 
     ds = yt.load('DD1701') # Load ds
-    streamlines = Streamlines(ds, [0.5]*3)
+    streamlines = Streamlines(ds, ds.domain_center)
     streamlines.integrate_through_volume()
     stream = streamlines.path(0)
     matplotlib.pylab.semilogy(stream['t'], stream['density'], '-x')

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -73,6 +73,9 @@
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_sph
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo_sph
 
+  local_axialpix_001:
+    - yt/geometry/coordinates/tests/test_axial_pixelization.py:test_axial_pixelization
+
 other_tests:
   unittests:
      - '-v'

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -93,7 +93,8 @@
     parallel_profile, \
     enable_plugins, \
     memory_checker, \
-    deprecated_class
+    deprecated_class, \
+    toggle_interactivity
 from yt.utilities.logger import ytLogger as mylog
 
 import yt.utilities.physical_constants as physical_constants

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -45,12 +45,12 @@
        lower wavelength bound in angstroms.
     lambda_max : float
        upper wavelength bound in angstroms.
-    n_lambda : float
+    n_lambda : int
        number of wavelength bins.
     """
 
     def __init__(self, lambda_min, lambda_max, n_lambda):
-        self.n_lambda = n_lambda
+        self.n_lambda = int(n_lambda)
         # lambda, flux, and tau are wavelength, flux, and optical depth
         self.lambda_min = lambda_min
         self.lambda_max = lambda_max
@@ -301,7 +301,7 @@
             valid_continuua = np.where(((column_density /
                                          continuum['normalization']) > min_tau) &
                                        (right_index - left_index > 1))[0]
-            pbar = get_pbar("Adding continuum feature - %s [%f A]: " % \
+            pbar = get_pbar("Adding continuum - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
                             valid_continuua.size)
             for i, lixel in enumerate(valid_continuua):
@@ -550,8 +550,9 @@
         """
         mylog.info("Writing spectrum to fits file: %s.", filename)
         col1 = pyfits.Column(name='wavelength', format='E', array=self.lambda_field)
-        col2 = pyfits.Column(name='flux', format='E', array=self.flux_field)
-        cols = pyfits.ColDefs([col1, col2])
+        col2 = pyfits.Column(name='tau', format='E', array=self.tau_field)
+        col3 = pyfits.Column(name='flux', format='E', array=self.flux_field)
+        cols = pyfits.ColDefs([col1, col2, col3])
         tbhdu = pyfits.BinTableHDU.from_columns(cols)
         tbhdu.writeto(filename, clobber=True)
 

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -85,7 +85,7 @@
         if self.children is None: return
         for child in self.children:
             child.add_validator(validator)
-        
+
     def add_info_item(self, info_item, *args, **kwargs):
         "Adds an entry to clump_info list and tells children to do the same."
 

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -127,4 +127,3 @@
 
     return simulation_time_series_registry[simulation_type](parameter_filename,
                                                             find_outputs=find_outputs)
-

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 yt/frontends/rockstar/definitions.py
--- a/yt/frontends/rockstar/definitions.py
+++ b/yt/frontends/rockstar/definitions.py
@@ -38,7 +38,7 @@
 # Note the final field here, which is a field for min/max format revision in
 # which the field appears.
 
-KNOWN_REVISIONS=[0, 1]
+KNOWN_REVISIONS=[0, 1, 2]
 
 halo_dt = [
     ('particle_identifier', np.int64),
@@ -101,6 +101,12 @@
     ('min_pos_err', np.float32),
     ('min_vel_err', np.float32),
     ('min_bulkvel_err', np.float32),
+    ('type', np.int32, (2, 100)),
+    ('sm', np.float32, (2, 100)),
+    ('gas', np.float32, (2, 100)),
+    ('bh', np.float32, (2, 100)),
+    ('peak_density', np.float32, (2, 100)),
+    ('av_density', np.float32, (2, 100)),
 ]
 
 halo_dts = {}

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -668,7 +668,7 @@
         sfh.update({0:data})
         grid_left_edges = domain_left_edge
         grid_right_edges = domain_right_edge
-        grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+        grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32")
 
     if length_unit is None:
         length_unit = 'code_length'

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -16,7 +16,7 @@
 
 import errno
 from yt.extern.six import string_types
-from yt.extern.six.moves import input
+from yt.extern.six.moves import input, builtins
 import time
 import inspect
 import traceback
@@ -986,3 +986,21 @@
     except ImportError:
         pass
     return dummy_context_manager()
+
+interactivity = False
+
+"""Sets the condition that interactive backends can be used."""
+def toggle_interactivity():
+    global interactivity
+    interactivity = not interactivity
+    if interactivity is True:
+        if '__IPYTHON__' in dir(builtins):
+            import IPython
+            shell = IPython.get_ipython()
+            shell.magic('matplotlib')
+        else:
+            import matplotlib
+            matplotlib.interactive(True)
+
+def get_interactivity():
+    return interactivity

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -113,7 +113,7 @@
             # re-order the array and squeeze out the dummy dim
             return np.squeeze(np.transpose(img, (yax, xax, ax)))
 
-        elif dimension < 3:
+        elif self.axis_id.get(dimension, dimension) < 3:
             return self._ortho_pixelize(data_source, field, bounds, size,
                                         antialias, dimension, periodic)
         else:

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 yt/geometry/coordinates/tests/test_axial_pixelization.py
--- /dev/null
+++ b/yt/geometry/coordinates/tests/test_axial_pixelization.py
@@ -0,0 +1,9 @@
+from yt.testing import \
+    fake_amr_ds, _geom_transforms
+from yt.utilities.answer_testing.framework import \
+    AxialPixelizationTest
+
+def test_axial_pixelization():
+    for geom in sorted(_geom_transforms):
+        ds = fake_amr_ds(geometry=geom)
+        yield AxialPixelizationTest(ds)

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -926,12 +926,12 @@
     yt_quan2 = YTQuantity.from_pint(p_quan)
 
     yield assert_array_equal, p_arr, yt_arr.to_pint()
-    assert p_quan.units == yt_quan.to_pint().units
+    assert_equal(p_quan, yt_quan.to_pint())
     yield assert_array_equal, yt_arr, YTArray.from_pint(p_arr)
     yield assert_array_equal, yt_arr, yt_arr2
 
     yield assert_equal, p_quan.magnitude, yt_quan.to_pint().magnitude
-    assert p_quan.units == yt_quan.to_pint().units
+    assert_equal(p_quan, yt_quan.to_pint())
     yield assert_equal, yt_quan, YTQuantity.from_pint(p_quan)
     yield assert_equal, yt_quan, yt_quan2
 

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -713,7 +713,7 @@
         >>> c = yt.YTArray.from_pint(b)
         """
         p_units = []
-        for base, exponent in arr.units.items():
+        for base, exponent in arr._units.items():
             bs = convert_pint_units(base)
             p_units.append("%s**(%s)" % (bs, Rational(exponent)))
         p_units = "*".join(p_units)

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -324,6 +324,8 @@
             self.ds = data_dir_load(ds_fn)
 
     def __call__(self):
+        if AnswerTestingTest.result_storage is None:
+            return
         nv = self.run()
         if self.reference_storage.reference_name is not None:
             dd = self.reference_storage.get(self.storage_name)
@@ -860,6 +862,47 @@
     def compare(self, new_result, old_result):
         compare_image_lists(new_result, old_result, self.decimals)
 
+class AxialPixelizationTest(AnswerTestingTest):
+    # This test is typically used once per geometry or coordinates type.
+    # Feed it a dataset, and it checks that the results of basic pixelization
+    # don't change.
+    _type_name = "AxialPixelization"
+    _attrs = ('geometry',)
+    def __init__(self, ds_fn, decimals=None):
+        super(AxialPixelizationTest, self).__init__(ds_fn)
+        self.decimals = decimals
+        self.geometry = self.ds.coordinates.name
+
+    def run(self):
+        rv = {}
+        ds = self.ds
+        for i, axis in enumerate(ds.coordinates.axis_order):
+            (bounds, center, display_center) = \
+                    pw.get_window_parameters(axis, ds.domain_center, None, ds)
+            slc = ds.slice(axis, center[i])
+            xax = ds.coordinates.axis_name[ds.coordinates.x_axis[axis]]
+            yax = ds.coordinates.axis_name[ds.coordinates.y_axis[axis]]
+            pix_x = ds.coordinates.pixelize(axis, slc, xax, bounds, (512, 512))
+            pix_y = ds.coordinates.pixelize(axis, slc, yax, bounds, (512, 512))
+            # Wipe out all NaNs
+            pix_x[np.isnan(pix_x)] = 0.0
+            pix_y[np.isnan(pix_y)] = 0.0
+            rv['%s_x' % axis] = pix_x
+            rv['%s_y' % axis] = pix_y
+        return rv
+
+    def compare(self, new_result, old_result):
+        assert_equal(len(new_result), len(old_result),
+                                          err_msg="Number of outputs not equal.",
+                                          verbose=True)
+        for k in new_result:
+            if self.decimals is None:
+                assert_almost_equal(new_result[k], old_result[k])
+            else:
+                assert_allclose_units(new_result[k], old_result[k],
+                                      10**(-self.decimals))
+
+
 def requires_sim(sim_fn, sim_type, big_data = False, file_check = False):
     def ffalse(func):
         return lambda: None

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -20,9 +20,30 @@
     mylog, \
     iterable, \
     get_brewer_cmap, \
-    matplotlib_style_context
+    matplotlib_style_context, \
+    get_interactivity
 import numpy as np
 
+backend_dict = {'GTK': ['backend_gtk', 'FigureCanvasGTK',
+                       'FigureManagerGTK'],
+               'GTKAgg': ['backend_gtkagg', 'FigureCanvasGTKAgg'],
+               'GTKCairo': ['backend_gtkcairo', 'FigureCanvasGTKCairo'],
+               'MacOSX': ['backend_macosx', 'FigureCanvasMac', 'FigureManagerMac'],
+               'Qt4Agg': ['backend_qt4agg', 'FigureCanvasQTAgg'],
+               'Qt5Agg': ['backend_gt5agg', 'FigureCanvasQTAgg'],
+               'TkAgg': ['backend_tkagg', 'FigureCanvasTkAgg'],
+               'WX': ['backend_wx', 'FigureCanvasWx'],
+               'WXAgg': ['backend_wxagg', 'FigureCanvasWxAgg'],
+               'GTK3Cairo': ['backend_gtk3cairo',
+                             'FigureCanvasGTK3Cairo',
+                             'FigureManagerGTK3Cairo'],
+               'GTK3Agg': ['backend_gtk3agg', 'FigureCanvasGTK3Agg',
+                           'FigureManagerGTK3Agg'],
+               'WebAgg': ['backend_webagg', 'FigureCanvasWebAgg'],
+               'nbAgg': ['backend_nbagg', 'FigureCanvasNbAgg',
+                         'FigureManagerNbAgg'],
+                'agg': ['backend_agg', 'FigureCanvasAgg']}
+
 
 class CallbackWrapper(object):
     def __init__(self, viewer, window_plot, frb, field, font_properties, 
@@ -50,14 +71,15 @@
         self.font_color = font_color
         self.field = field
 
+
 class PlotMPL(object):
-    """A base class for all yt plots made using matplotlib.
+    """A base class for all yt plots made using matplotlib, that is backend independent.
 
     """
+
     def __init__(self, fsize, axrect, figure, axes):
         """Initialize PlotMPL class"""
         import matplotlib.figure
-        from ._mpl_imports import FigureCanvasAgg
         self._plot_valid = True
         if figure is None:
             self.figure = matplotlib.figure.Figure(figsize=fsize, frameon=True)
@@ -70,11 +92,33 @@
             axes.cla()
             axes.set_position(axrect)
             self.axes = axes
-        self.canvas = FigureCanvasAgg(self.figure)
+        canvas_classes = self._set_canvas()
+        self.canvas = canvas_classes[0](self.figure)
+        if len(canvas_classes) > 1:
+            self.manager = canvas_classes[1](self.canvas, 1)
         for which in ['major', 'minor']:
             for axis in 'xy':
                 self.axes.tick_params(which=which, axis=axis, direction='in')
 
+    def _set_canvas(self):
+        self.interactivity = get_interactivity()
+        if self.interactivity:
+            backend = str(matplotlib.get_backend())
+        else:
+            backend = 'agg'
+
+        for key in backend_dict.keys():
+            if key == backend:
+                mod = __import__('matplotlib.backends', globals(), locals(),
+                                 [backend_dict[key][0]], 0)
+                submod = getattr(mod, backend_dict[key][0])
+                FigureCanvas = getattr(submod, backend_dict[key][1])
+                if len(backend_dict[key]) > 2:
+                    FigureManager = getattr(submod, backend_dict[key][2])
+                    return [FigureCanvas, FigureManager]
+                else:
+                    return [FigureCanvas]
+
     def save(self, name, mpl_kwargs=None, canvas=None):
         """Choose backend and save image to disk"""
         from ._mpl_imports import \
@@ -105,6 +149,12 @@
             canvas.print_figure(name, **mpl_kwargs)
         return name
 
+    def show(self):
+        try:
+            self.manager.show()
+        except AttributeError:
+            self.canvas.show()
+
     def _get_labels(self):
         ax = self.axes
         labels = ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels()

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -620,15 +620,18 @@
         >>> slc.show()
 
         """
-        if "__IPYTHON__" in dir(builtins):
-            api_version = get_ipython_api_version()
-            if api_version in ('0.10', '0.11'):
-                self._send_zmq()
-            else:
-                from IPython.display import display
-                display(self)
+        interactivity = self.plots[list(self.plots.keys())[0]].interactivity
+        if interactivity:
+            for k,v in sorted(iteritems(self.plots)):
+                v.show()
         else:
-            raise YTNotInsideNotebook
+            if "__IPYTHON__" in dir(builtins):
+                api_version = get_ipython_api_version()
+                if api_version in ('0.10', '0.11'):
+                    self._send_zmq()
+                else:
+                    from IPython.display import display
+                    display(self)
 
     @validate_plot
     def display(self, name=None, mpl_kwargs=None):

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -2462,7 +2462,7 @@
 
 class CellEdgesCallback(PlotCallback):
     """
-    annotate_cell_edges(line_width=1.0, alpha = 1.0, color = (0.0, 0.0, 0.0))
+    annotate_cell_edges(line_width=0.002, alpha = 1.0, color = 'black')
 
     Annotate cell edges.  This is done through a second call to pixelize, where
     the distance from a pixel to a cell boundary in pixels is compared against
@@ -2472,12 +2472,13 @@
     Parameters
     ----------
     line_width : float
-        Distance, in pixels, from a cell edge that will mark a pixel as being
-        annotated as a cell edge.  Default is 1.0.
+        The width of the cell edge lines in normalized units relative to the
+        size of the longest axis.  Default is 1% of the size of the smallest
+        axis.
     alpha : float
         When the second image is overlaid, it will have this level of alpha
         transparency.  Default is 1.0 (fully-opaque).
-    color : tuple of three floats
+    color : tuple of three floats or matplotlib color name
         This is the color of the cell edge values.  It defaults to black.
 
     Examples
@@ -2491,11 +2492,13 @@
     """
     _type_name = "cell_edges"
     _supported_geometries = ("cartesian", "spectral_cube")
-    def __init__(self, line_width=1.0, alpha = 1.0, color=(0.0, 0.0, 0.0)):
+    def __init__(self, line_width=0.002, alpha = 1.0, color='black'):
+        from matplotlib.colors import ColorConverter
+        conv = ColorConverter()
         PlotCallback.__init__(self)
         self.line_width = line_width
         self.alpha = alpha
-        self.color = (np.array(color) * 255).astype("uint8")
+        self.color = (np.array(conv.to_rgb(color)) * 255).astype("uint8")
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -2505,23 +2508,40 @@
         plot._axes.hold(True)
         nx = plot.image._A.shape[1]
         ny = plot.image._A.shape[0]
-        im = np.zeros((ny, nx), dtype="f8")
-        pixelize_cartesian(im, plot.data['px'],
+        aspect = float((y1 - y0) / (x1 - x0))
+        pixel_aspect = float(ny)/nx
+        relative_aspect = pixel_aspect / aspect
+        if relative_aspect > 1:
+            nx = int(nx/relative_aspect)
+        else:
+            ny = int(ny*relative_aspect)
+        if aspect > 1:
+            if nx < 1600:
+                nx = int(1600./nx*ny)
+                ny = 1600
+            long_axis = ny
+        else:
+            if ny < 1600:
+                nx = int(1600./ny*nx)
+                ny = 1600
+            long_axis = nx
+        line_width = max(self.line_width*long_axis, 1.0)
+        im_buffer = np.zeros((ny, nx, 4), dtype="uint8")
+        im = pixelize_cartesian(im_buffer,
+                                plot.data['px'],
                                 plot.data['py'],
                                 plot.data['pdx'],
                                 plot.data['pdy'],
                                 plot.data['px'], # dummy field
                                 (x0, x1, y0, y1),
-                                line_width=self.line_width)
+                                line_width=line_width)
         # New image:
-        im_buffer = np.zeros((ny, nx, 4), dtype="uint8")
-        im_buffer[im>0,3] = 255
-        im_buffer[im>0,:3] = self.color
+        im_buffer[im > 0, 3] = 255
+        im_buffer[im > 0, :3] = self.color
         plot._axes.imshow(im_buffer, origin='lower',
-                          interpolation='nearest',
-                          extent = [xx0, xx1, yy0, yy1],
-                          alpha = self.alpha)
-        plot._axes.set_xlim(xx0,xx1)
-        plot._axes.set_ylim(yy0,yy1)
+                          interpolation='bilinear',
+                          extent=[xx0, xx1, yy0, yy1],
+                          alpha=self.alpha)
+        plot._axes.set_xlim(xx0, xx1)
+        plot._axes.set_ylim(yy0, yy1)
         plot._axes.hold(False)
-

diff -r 36e8d7c14ad7587166c086b707fad756082e74ee -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -23,7 +23,8 @@
 from distutils.version import LooseVersion
 from numbers import Number
 
-from .base_plot_types import ImagePlotMPL
+from .base_plot_types import \
+    ImagePlotMPL
 from .fixed_resolution import \
     FixedResolutionBuffer, \
     OffAxisProjectionFixedResolutionBuffer


https://bitbucket.org/yt_analysis/yt/commits/85c276905957/
Changeset:   85c276905957
Branch:      yt
User:        MatthewTurk
Date:        2016-08-08 19:16:15+00:00
Summary:     This results in identical results
Affected #:  1 file

diff -r 7af4ee124f9fc0eea4e420f8157b38d22378a043 -r 85c2769059573d14d7f014d4d9e8a7ee0822ea78 yt/geometry/coordinates/spherical_coordinates.py
--- a/yt/geometry/coordinates/spherical_coordinates.py
+++ b/yt/geometry/coordinates/spherical_coordinates.py
@@ -132,7 +132,8 @@
                               data_source['pdy'],
                               data_source[field], bounds)
         elif name == 'phi':
-            pixelize_cylinder(buff,
+            # Note that we feed in buff.T here
+            pixelize_cylinder(buff.T,
                              data_source['px'],
                              data_source['pdx'],
                              data_source['py'],


https://bitbucket.org/yt_analysis/yt/commits/9bee5a66f96e/
Changeset:   9bee5a66f96e
Branch:      yt
User:        MatthewTurk
Date:        2016-08-08 20:52:40+00:00
Summary:     Fix buffer size and image buffer size for line annotation
Affected #:  1 file

diff -r 85c2769059573d14d7f014d4d9e8a7ee0822ea78 -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -2526,8 +2526,8 @@
                 ny = 1600
             long_axis = nx
         line_width = max(self.line_width*long_axis, 1.0)
-        im_buffer = np.zeros((ny, nx, 4), dtype="uint8")
-        im = pixelize_cartesian(im_buffer,
+        im = np.zeros((ny, nx), dtype="f8")
+        pixelize_cartesian(im,
                                 plot.data['px'],
                                 plot.data['py'],
                                 plot.data['pdx'],
@@ -2536,6 +2536,7 @@
                                 (x0, x1, y0, y1),
                                 line_width=line_width)
         # New image:
+        im_buffer = np.zeros((ny, nx, 4), dtype="uint8")
         im_buffer[im > 0, 3] = 255
         im_buffer[im > 0, :3] = self.color
         plot._axes.imshow(im_buffer, origin='lower',


https://bitbucket.org/yt_analysis/yt/commits/a359b82b46c0/
Changeset:   a359b82b46c0
Branch:      yt
User:        MatthewTurk
Date:        2016-08-12 18:01:11+00:00
Summary:     Merging with upstream.
Affected #:  26 files

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 doc/extensions/config_help.py
--- /dev/null
+++ b/doc/extensions/config_help.py
@@ -0,0 +1,34 @@
+import re
+import subprocess
+from docutils import statemachine
+from sphinx.util.compat import Directive
+
+def setup(app):
+    app.add_directive('config_help', GetConfigHelp)
+    setup.app = app
+    setup.config = app.config
+    setup.confdir = app.confdir
+
+    retdict = dict(
+        version='1.0',
+        parallel_read_safe=True,
+        parallel_write_safe=True
+    )
+
+    return retdict
+
+class GetConfigHelp(Directive):
+    required_arguments = 1
+    optional_arguments = 0
+    final_argument_whitespace = True
+
+    def run(self):
+        rst_file = self.state_machine.document.attributes['source']
+        data = subprocess.check_output(
+            self.arguments[0].split(" ") + ['-h']).decode('utf8').split('\n')
+        ind = next((i for i, val in enumerate(data)
+                    if re.match('\s{0,3}\{.*\}\s*$', val)))
+        lines = ['.. code-block:: none', ''] + data[ind + 1:]
+        self.state_machine.insert_input(
+            statemachine.string2lines("\n".join(lines)), rst_file)
+        return []

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 doc/source/_static/apiKey01.jpg
Binary file doc/source/_static/apiKey01.jpg has changed

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 doc/source/_static/apiKey02.jpg
Binary file doc/source/_static/apiKey02.jpg has changed

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 doc/source/_static/apiKey03.jpg
Binary file doc/source/_static/apiKey03.jpg has changed

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 doc/source/_static/apiKey04.jpg
Binary file doc/source/_static/apiKey04.jpg has changed

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -31,7 +31,8 @@
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
               'sphinx.ext.pngmath', 'sphinx.ext.viewcode',
-              'sphinx.ext.napoleon', 'yt_cookbook', 'yt_colormaps']
+              'sphinx.ext.napoleon', 'yt_cookbook', 'yt_colormaps',
+              'config_help']
 
 if not on_rtd:
     extensions.append('sphinx.ext.autosummary')

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 doc/source/cookbook/yt_gadget_owls_analysis.ipynb
--- a/doc/source/cookbook/yt_gadget_owls_analysis.ipynb
+++ b/doc/source/cookbook/yt_gadget_owls_analysis.ipynb
@@ -20,7 +20,7 @@
    "source": [
     "The first thing you will need to run these examples is a working installation of yt.  The author or these examples followed the instructions under \"Get yt: from source\" at http://yt-project.org/ to install an up to date development version of yt.\n",
     "\n",
-    "Next you should set the default ``test_data_dir`` in the ``.yt/config`` file in your home directory.  Note that you may have to create the directory and file if it doesn't exist already.\n",
+    "Next you should set the default ``test_data_dir`` in the ``~/.config/yt/ytrc`` file in your home directory.  Note that you may have to create the directory and file if it doesn't exist already.\n",
     "\n",
     "> [yt]\n",
     "\n",

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -285,15 +285,12 @@
 
 These datasets are available at http://yt-project.org/data/.
 
-Next, modify the file ``~/.yt/config`` to include a section ``[yt]``
-with the parameter ``test_data_dir``.  Set this to point to the
-directory with the test data you want to test with.  Here is an example
-config file:
+Next, add the config parameter ``test_data_dir`` pointing to 
+directory with the test data you want to test with, e.g.:
 
 .. code-block:: none
 
-   [yt]
-   test_data_dir = /Users/tomservo/src/yt-data
+   $ yt config set yt test_data_dir /Users/tomservo/src/yt-data
 
 More data will be added over time.  To run the answer tests, you must first
 generate a set of test answers locally on a "known good" revision, then update
@@ -313,7 +310,7 @@
 This command will create a set of local answers from the tipsy frontend tests
 and store them in ``$HOME/Documents/test`` (this can but does not have to be the
 same directory as the ``test_data_dir`` configuration variable defined in your
-``.yt/config`` file) in a file named ``local-tipsy``. To run the tipsy
+``~/.config/yt/ytrc`` file) in a file named ``local-tipsy``. To run the tipsy
 frontend's answer tests using a different yt changeset, update to that
 changeset, recompile if necessary, and run the tests using the following
 command:

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 doc/source/faq/index.rst
--- a/doc/source/faq/index.rst
+++ b/doc/source/faq/index.rst
@@ -388,10 +388,10 @@
 To make things easier to load these sample datasets, you can add the parent
 directory to your downloaded sample data to your *yt path*.
 If you set the option ``test_data_dir``, in the section ``[yt]``,
-in ``~/.yt/config``, yt will search this path for them.
+in ``~/.config/yt/ytrc``, yt will search this path for them.
 
 This means you can download these datasets to ``/big_drive/data_for_yt`` , add
-the appropriate item to ``~/.yt/config``, and no matter which directory you are
+the appropriate item to ``~/.config/yt/ytrc``, and no matter which directory you are
 in when running yt, it will also check in *that* directory.
 
 
@@ -437,12 +437,11 @@
 hand, you may want it to output a lot more, since you can't figure out exactly what's going
 wrong, and you want to output some debugging information. The yt log level can be
 changed using the :ref:`configuration-file`, either by setting it in the
-``$HOME/.yt/config`` file:
+``$HOME/.config/yt/ytrc`` file:
 
 .. code-block:: bash
 
-   [yt]
-   loglevel = 10 # This sets the log level to "DEBUG"
+   $ yt config set yt loglevel 10  # This sets the log level to "DEBUG"
 
 which would produce debug (as well as info, warning, and error) messages, or at runtime:
 

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 doc/source/index.rst
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -133,6 +133,16 @@
      <tr valign="top"><td width="25%"><p>
+           <a href="sharing_data.html">Sharing Data</a>
+         </p>
+       </td>
+       <td width="75%">
+         <p class="linkdescr">The yt Hub</p>
+       </td>
+     </tr>
+     <tr valign="top">
+       <td width="25%">
+         <p><a href="reference/index.html">Reference Materials</a></p></td>
@@ -185,6 +195,7 @@
    analyzing/analysis_modules/index
    examining/index
    developing/index
+   sharing_data
    reference/index
    faq/index
    Getting Help <help/index>

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 doc/source/reference/command-line.rst
--- a/doc/source/reference/command-line.rst
+++ b/doc/source/reference/command-line.rst
@@ -54,35 +54,7 @@
 
 This will print the list of available subcommands,
 
-.. code-block:: bash
-
-    help                Print help message
-    bootstrap_dev       Bootstrap a yt development environment
-    bugreport           Report a bug in yt
-    hub_register        Register a user on the Hub: http://hub.yt-project.org/
-    hub_submit          Submit a mercurial repository to the yt Hub
-                        (http://hub.yt-project.org/), creating a BitBucket
-                        repo in the process if necessary.
-    instinfo            Get some information about the yt installation
-    version             Get some information about the yt installation (this
-                        is an alias for instinfo).
-    load                Load a single dataset into an IPython instance
-    mapserver           Serve a plot in a GMaps-style interface
-    pastebin            Post a script to an anonymous pastebin
-    pastebin_grab       Print an online pastebin to STDOUT for local use.
-    upload_notebook     Upload an IPython notebook to hub.yt-project.org.
-    plot                Create a set of images
-    rpdb                Connect to a currently running (on localhost) rpd
-                        session. Commands run with --rpdb will trigger an rpdb
-                        session with any uncaught exceptions.
-    notebook            Run the IPython Notebook
-    stats               Print stats and max/min value of a given field (if
-                        requested), for one or more datasets (default field is
-                        Density)
-    update              Update the yt installation to the most recent version
-    delete_image        Delete image from imgur.com.
-    upload_image        Upload an image to imgur.com. Must be PNG.
-
+.. config_help:: yt
 
 To execute any such function, simply run:
 
@@ -217,13 +189,12 @@
 
 This command will accept the filename of a ``.ipynb`` file (generated from an
 IPython notebook session) and upload it to the `yt hub
-<http://hub.yt-project.org/>` where others will be able to view it, and
+<https://hub.yt/>`__ where others will be able to view it, and
 download it.  This is an easy method for recording a sequence of commands,
 their output, narrative information, and then sharing that with others.  These
 notebooks will be viewable online, and the appropriate URLs will be returned on
 the command line.
 
-
 rpdb
 ++++
 
@@ -272,3 +243,95 @@
 The image uploaded using ``upload_image`` is assigned with a unique hash that
 can be used to remove it. This subcommand provides an easy way to send a delete
 request directly to the `imgur.com <http://imgur.com/>`_.
+
+Hub helper
+~~~~~~~~~~
+
+The :code:`yt hub` command-line tool allows to interact with the `yt hub
+<https://hub.yt>`__. The following subcommands are currently available:
+
+.. config_help:: yt hub
+
+register
+++++++++
+
+This subcommand starts an interactive process of creating an account on the `yt
+hub <https://hub.yt/>`__. Please note that the yt Hub also supports multiple OAuth
+providers such as Google, Bitbucket and GitHub for authentication. 
+See :ref:`hub-APIkey` for more information.
+
+start
++++++
+
+This subcommand launches the Jupyter Notebook on the `yt Hub <https://hub.yt>`__
+with a chosen Hub folder mounted to the ``/data`` directory inside the notebook.
+If no path is given all the `example yt datasets
+<https://yt-project.org/data>`_ are mounted by default. The appropriate URL
+allowing to access the Notebook will be returned on the commandline. 
+
+Example:
+
+.. code-block:: bash
+
+   $ yt hub start
+   $ yt hub start /user/xarthisius/Public
+
+
+Config helper
+~~~~~~~~~~~~~
+
+The :code:`yt config` command-line tool allows you to modify and access yt's
+configuration without manually locating and opening the config file in an editor.
+To get a quick list of available commands, just type:
+
+.. code-block:: bash
+
+   yt config -h
+
+This will print the list of available subcommands:
+
+.. config_help:: yt config
+
+Since the yt version 3.3.2, the previous location of the configuration file
+(``$HOME/.yt/config``) has been deprecated in favor of a location adhering to the
+`XDG Base Directory Specification
+<https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
+(``$XDG_HOME_CONFIG/yt/ytrc``). In order to perform an automatic migration of
+the old config, you are encouraged to run:
+
+.. code-block:: bash
+
+   yt config migrate
+
+that will copy your current config file to the new location and store a backup
+copy as ``$HOME/.yt/config.bak``.
+
+Examples
+++++++++
+
+Listing current content of the config file:
+
+.. code-block:: bash
+
+   $ yt config list
+   [yt]
+   loglevel = 50
+
+Obtaining a single config value by name:
+
+.. code-block:: bash
+
+   $ yt config get yt loglevel
+   50
+
+Changing a single config value:
+
+.. code-block:: bash
+
+   $ yt config set yt loglevel 10
+
+Removing a single config entry:
+
+.. code-block:: bash
+
+   $ yt config rm yt loglevel

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -18,9 +18,9 @@
 Configuration File Format
 ^^^^^^^^^^^^^^^^^^^^^^^^^
 
-yt will look for and recognize the file ``$HOME/.yt/config`` as a configuration
+yt will look for and recognize the file ``$HOME/.config/yt/ytrc`` as a configuration
 file, containing several options that can be modified and adjusted to control
-runtime behavior.  For example, a sample ``$HOME/.yt/config`` file could look
+runtime behavior.  For example, a sample ``$HOME/.config/yt/ytrc`` file could look
 like:
 
 .. code-block:: none
@@ -31,7 +31,17 @@
 
 This configuration file would set the logging threshold much lower, enabling
 much more voluminous output from yt.  Additionally, it increases the number of
-datasets tracked between instantiations of yt.
+datasets tracked between instantiations of yt. The configuration file can be
+managed using the ``yt config`` helper. It can list, add, modify and remove
+options from the configuration file, e.g.:
+
+.. code-block:: none
+
+   $ yt config -h
+   $ yt config list
+   $ yt config set yt loglevel 1
+   $ yt config rm yt maximumstoreddatasets
+
 
 Configuration Options At Runtime
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 doc/source/sharing_data.rst
--- /dev/null
+++ b/doc/source/sharing_data.rst
@@ -0,0 +1,117 @@
+.. _sharing-data:
+
+The yt Hub
+==========
+
+.. contents::
+   :depth: 2
+   :local:
+   :backlinks: none
+
+What is the yt Hub?
+-------------------
+
+The yt data Hub is a mechanism by which images, data objects and projects can be
+shared with other people. For instance, one can upload a dataset and allow other
+people to remotely analyze it with a jupyter notebook or upload notebooks and
+view them from any web browser.
+
+.. note:: All items posted on the hub are public!
+
+Over time, more widgets will be added, and more datatypes will be able to be
+uploaded.  If you are interested in adding more ways of sharing data, please
+email the developers' list.  We would like to add support for 3D widgets such
+as isocontours as well as interactive binning and rebinning of data from yt
+data objects, to be displayed as phase plots and profiles.
+
+.. note:: Working with the Hub requires additional dependencies to be installed.
+          You can obtain them by running: ``pip install yt[hub]``. 
+
+.. _hub-APIkey:
+
+Obtaining an API key
+--------------------
+
+In order to interact with the yt Hub, you need to obtain API key, which is
+available only for authenticated users. You can `log into
+<https://girder.hub.yt/#?dialog=login>`_ the Hub using your Google, GitHub or
+Bitbucket account. After you log in, an API key can be generated under the *My
+account* page, which can be accessed through the dropdown menu in the upper
+right corner. 
+
+.. image:: _static/apiKey01.jpg
+   :width: 50 %
+
+Select the *API keys* tab and press *Create new key* button:
+
+.. image:: _static/apiKey02.jpg
+   :width: 50 %
+
+By convention, the *Name* field of API keys can be used to specify what
+application is making use of the key in a human-readable way e.g. ``yt
+command``, although you may name your key however you want.
+
+.. image:: _static/apiKey03.jpg
+   :width: 50 %
+
+After the API Key is created you can obtain it by clicking *show* link:
+
+.. image:: _static/apiKey04.jpg
+   :width: 50 %
+
+For more information about API keys please see `this document
+<http://girder.readthedocs.io/en/latest/user-guide.html?highlight=API%20keys#api-keys>`__.
+
+After you have gotten your API key, update your config file:
+
+.. code-block:: none
+
+   $ yt config set yt hub_api_key 3fd1de56c2114c13a2de4dd51g10974b
+
+Replace ``3fd1de56c2114c13a2de4dd51g10974b`` with your API key.
+
+Registering a User
+^^^^^^^^^^^^^^^^^^
+
+If you do not wish to use OAuth authentication, you can create a Hub account
+using ``yt`` command. To register a user:
+
+.. code-block:: bash
+
+   $ yt hub register
+
+This will walk you through the process of registering. You will need to supply
+a name, a username, a password and an email address. Apart from creating a new
+user account, it will also generate an API key and append it to the yt's config
+file.  At this point, you're ready to go!
+
+What Can Be Uploaded
+--------------------
+
+Currently, the yt hub can accept these types of data:
+
+ * Raw data files, scripts.
+ * IPython notebooks: these are stored on the hub and are made available for
+   download and via the IPython `nbviewer <http://nbviewer.ipython.org/>`_
+   service.
+
+How to Upload Data
+------------------
+
+Uploading data can be performed using the ``girder-cli`` command tool or
+directly via the web interface. Please refer to ``girder-cli`` `documentation page
+<http://girder.readthedocs.io/en/latest/python-client.html>`_ for additional
+information.
+
+Uploading Notebooks
+^^^^^^^^^^^^^^^^^^^
+
+Notebooks can be uploaded from the bash command line:
+
+.. code-block:: bash
+
+   yt upload_notebook notebook_file.ipynb
+
+After the notebook is finished uploading, yt will print a link to the raw
+notebook as well as an nbviewer link to the same notebook.  Your notebooks will
+be stored under your hub Public directory.

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -537,6 +537,27 @@
    slc.set_center((0.5, 0.503))
    slc.save()
 
+Flipping the plot view axes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+By default, all :class:`~yt.visualization.plot_window.PlotWindow` objects plot
+with the assumption that the eastern direction on the plot forms a right handed
+coordinate system with the ``normal`` and ``north_vector`` for the system, whether
+explicitly or implicitly defined. This setting can be toggled or explicitly defined
+by the user at initialization:
+
+.. python-script::
+
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   #slicing with non right-handed coordinates
+   slc = yt.SlicePlot(ds, 'x', 'velocity_x', right_handed=False)
+   slc.annotate_title('Not Right Handed')
+   slc.save("NotRightHanded.png")
+
+   #switching to right-handed coordinates
+   slc.toggle_right_handed()
+   slc.annotate_title('Right Handed')
+   slc.save("Standard.png")
 
 .. _hiding-colorbar-and-axes:
 
@@ -704,6 +725,7 @@
    slc.set_cbar_minorticks('all', 'off')
    slc.save()
 
+
 .. _matplotlib-customization:
 
 Further customization via matplotlib

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 doc/source/visualizing/sketchfab.rst
--- a/doc/source/visualizing/sketchfab.rst
+++ b/doc/source/visualizing/sketchfab.rst
@@ -105,7 +105,7 @@
 but it requires that you get an API key first.  You can get this API key by
 creating an account and then going to your "dashboard," where it will be listed
 on the right hand side.  Once you've obtained it, put it into your
-``~/.yt/config`` file under the heading ``[yt]`` as the variable
+``~/.config/yt/ytrc`` file under the heading ``[yt]`` as the variable
 ``sketchfab_api_key``.  If you don't want to do this, you can also supply it as
 an argument to the function ``export_sketchfab``.
 

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 setup.py
--- a/setup.py
+++ b/setup.py
@@ -380,6 +380,9 @@
         'IPython',
         'cython',
     ],
+    extras_require = {
+        'hub':  ["girder_client"]
+    },
     cmdclass={'sdist': sdist, 'build_ext': build_ext, 'build_py': build_py},
     author="The yt project",
     author_email="yt-dev at lists.spacepope.org",

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -39,7 +39,7 @@
   local_owls_000:
     - yt/frontends/owls/tests/test_outputs.py
   
-  local_pw_004:
+  local_pw_006:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
     - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -16,6 +16,7 @@
 #-----------------------------------------------------------------------------
 
 import os
+import warnings
 from yt.extern.six.moves import configparser
 
 ytcfg_defaults = dict(
@@ -48,8 +49,9 @@
     test_storage_dir = '/does/not/exist',
     test_data_dir = '/does/not/exist',
     enzo_db = '',
-    hub_url = 'https://hub.yt-project.org/upload',
+    hub_url = 'https://girder.hub.yt/api/v1',
     hub_api_key = '',
+    hub_sandbox = '/collection/yt_sandbox/data',
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
@@ -67,20 +69,28 @@
     default_colormap = 'arbre',
     ray_tracing_engine = 'embree',
     )
+
+CONFIG_DIR = os.environ.get(
+    'XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config', 'yt'))
+if not os.path.exists(CONFIG_DIR):
+    os.makedirs(CONFIG_DIR)
+
+CURRENT_CONFIG_FILE = os.path.join(CONFIG_DIR, 'ytrc')
+_OLD_CONFIG_FILE = os.path.join(os.path.expanduser('~'), '.yt', 'config')
+
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten
 # without them.
 
-__fn = os.path.expanduser("~/.yt/config")
-if os.path.exists(__fn):
-    f = open(__fn).read()
+if os.path.exists(_OLD_CONFIG_FILE):
+    f = open(_OLD_CONFIG_FILE).read()
     if any(header in f for header in ["[lagos]","[raven]","[fido]","[enki]"]):
         print("***********************************************************")
         print("* Upgrading configuration file to new format; saving old. *")
         print("***********************************************************")
         # This is of the old format
         cp = configparser.ConfigParser()
-        cp.read(__fn)
+        cp.read(_OLD_CONFIG_FILE)
         # NOTE: To avoid having the 'DEFAULT' section here,
         # we are not passing in ytcfg_defaults to the constructor.
         new_cp = configparser.ConfigParser()
@@ -91,16 +101,21 @@
                 if option.lower() in ytcfg_defaults:
                     new_cp.set("yt", option, cp.get(section, option))
                     print("Setting %s to %s" % (option, cp.get(section, option)))
-        open(__fn + ".old", "w").write(f)
-        new_cp.write(open(__fn, "w"))
-# Pathological check for Kraken
-#elif os.path.exists("~/"):
-#    if not os.path.exists("~/.yt"):
-#            print "yt is creating a new directory, ~/.yt ."
-#            os.mkdir(os.path.exists("~/.yt/"))
-#    # Now we can read in and write out ...
-#    new_cp = configparser.ConfigParser(ytcfg_defaults)
-#    new_cp.write(__fn)
+        open(_OLD_CONFIG_FILE + ".old", "w").write(f)
+        new_cp.write(open(_OLD_CONFIG_FILE, "w"))
+
+    msg = (
+        "The configuration file {} is deprecated. "
+        "Please migrate your config to {} by running: "
+        "'yt config migrate'"
+    )
+    warnings.warn(msg.format(_OLD_CONFIG_FILE, CURRENT_CONFIG_FILE))
+
+if not os.path.exists(CURRENT_CONFIG_FILE):
+    cp = configparser.ConfigParser()
+    cp.add_section("yt")
+    with open(CURRENT_CONFIG_FILE, 'w') as new_cfg:
+        cp.write(new_cfg)
 
 class YTConfigParser(configparser.ConfigParser):
     def __setitem__(self, key, val):
@@ -108,12 +123,8 @@
     def __getitem__(self, key):
         self.get(key[0], key[1])
 
-if os.path.exists(os.path.expanduser("~/.yt/config")):
-    ytcfg = YTConfigParser(ytcfg_defaults)
-    ytcfg.read(['yt.cfg', os.path.expanduser('~/.yt/config')])
-else:
-    ytcfg = YTConfigParser(ytcfg_defaults)
-    ytcfg.read(['yt.cfg'])
+ytcfg = YTConfigParser(ytcfg_defaults)
+ytcfg.read([_OLD_CONFIG_FILE, CURRENT_CONFIG_FILE, 'yt.cfg'])
 if not ytcfg.has_section("yt"):
     ytcfg.add_section("yt")
 

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -34,6 +34,7 @@
         ("radiation_acceleration_x", ("code_length/code_time**2", ["radiation_acceleration_x"], None)),
         ("radiation_acceleration_y", ("code_length/code_time**2", ["radiation_acceleration_y"], None)),
         ("radiation_acceleration_z", ("code_length/code_time**2", ["radiation_acceleration_z"], None)),
+        ("metallicity", ("Zsun", ["metallicity"], None)),
 
         # We need to have a bunch of species fields here, too
         ("metal_density",   ("code_mass/code_length**3", ["metal_density"], None)),

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -26,7 +26,7 @@
 import json
 import pprint
 
-from yt.config import ytcfg
+from yt.config import ytcfg, CURRENT_CONFIG_FILE
 ytcfg["yt","__command_line"] = "True"
 from yt.startup_tasks import parser, subparsers
 from yt.funcs import \
@@ -39,11 +39,13 @@
     enable_plugins
 from yt.extern.six import add_metaclass, string_types
 from yt.extern.six.moves import urllib, input
+from yt.extern.six.moves.urllib.parse import urlparse
 from yt.convenience import load
 from yt.visualization.plot_window import \
     SlicePlot, \
     ProjectionPlot
 from yt.utilities.metadata import get_metadata
+from yt.utilities.configure import set_config
 from yt.utilities.exceptions import \
     YTOutputNotIdentified, YTFieldNotParseable
 
@@ -117,16 +119,48 @@
         print("Changeset = %s" % vstring.strip().decode("utf-8"))
     print("---")
     return vstring
+    
 
+def _get_girder_client():
+    try:
+        import girder_client
+    except ImportError:
+        print("this command requires girder_client to be installed")
+        print("Please install them using your python package manager, e.g.:")
+        print("   pip install girder_client --user")
+        exit()
+
+    hub_url = urlparse(ytcfg.get("yt", "hub_url"))
+    gc = girder_client.GirderClient(apiUrl=hub_url.geturl())
+    gc.authenticate(apiKey=ytcfg.get("yt", "hub_api_key"))
+    return gc
+
+
+_subparsers = {None: subparsers}
+_subparsers_description = {
+    'config': 'Get and set configuration values for yt',
+    'hub': 'Interact with the yt Hub'
+}
 class YTCommandSubtype(type):
     def __init__(cls, name, b, d):
         type.__init__(cls, name, b, d)
         if cls.name is not None:
             names = ensure_list(cls.name)
+            if cls.subparser not in _subparsers:
+                try:
+                    description = _subparsers_description[cls.subparser]
+                except KeyError:
+                    description = cls.subparser
+                parent_parser = argparse.ArgumentParser(add_help=False)
+                p = subparsers.add_parser(cls.subparser, help=description,
+                                          description=description,
+                                          parents=[parent_parser])
+                _subparsers[cls.subparser] = p.add_subparsers(
+                    title=cls.subparser, dest=cls.subparser)
+            sp = _subparsers[cls.subparser]
             for name in names:
-                sc = subparsers.add_parser(name,
-                    description = cls.description,
-                    help = cls.description)
+                sc = sp.add_parser(name, description=cls.description, 
+                                   help=cls.description)
                 sc.set_defaults(func=cls.run)
                 for arg in cls.args:
                     _add_arg(sc, arg)
@@ -138,6 +172,7 @@
     description = ""
     aliases = ()
     ndatasets = 1
+    subparser = None
 
     @classmethod
     def run(cls, args):
@@ -557,25 +592,27 @@
 
 
 class YTHubRegisterCmd(YTCommand):
-    name = "hub_register"
+    subparser = "hub"
+    name = "register"
     description = \
         """
-        Register a user on the Hub: http://hub.yt-project.org/
+        Register a user on the yt Hub: http://hub.yt/
         """
     def __call__(self, args):
-        # We need these pieces of information:
-        #   1. Name
-        #   2. Email
-        #   3. Username
-        #   4. Password (and password2)
-        #   5. (optional) URL
-        #   6. "Secret" key to make it epsilon harder for spammers
-        if ytcfg.get("yt","hub_api_key") != "":
+        try:
+            import requests
+        except ImportError:
+            print("yt {} requires requests to be installed".format(self.name))
+            print("Please install them using your python package manager, e.g.:")
+            print("   pip install requests --user")
+            exit()
+        if ytcfg.get("yt", "hub_api_key") != "":
             print("You seem to already have an API key for the hub in")
-            print("~/.yt/config .  Delete this if you want to force a")
+            print("{} . Delete this if you want to force a".format(CURRENT_CONFIG_FILE))
             print("new user registration.")
+            exit()
         print("Awesome!  Let's start by registering a new user for you.")
-        print("Here's the URL, for reference: http://hub.yt-project.org/ ")
+        print("Here's the URL, for reference: http://hub.yt/ ")
         print()
         print("As always, bail out with Ctrl-C at any time.")
         print()
@@ -586,8 +623,11 @@
         print()
         print("To start out, what's your name?")
         print()
-        name = input("Name? ")
-        if len(name) == 0: sys.exit(1)
+        first_name = input("First Name? ")
+        if len(first_name) == 0: sys.exit(1)
+        print()
+        last_name = input("Last Name? ")
+        if len(last_name) == 0: sys.exit(1)
         print()
         print("And your email address?")
         print()
@@ -604,33 +644,32 @@
             print("Sorry, they didn't match!  Let's try again.")
             print()
         print()
-        print("Would you like a URL displayed for your user?")
-        print("Leave blank if no.")
-        print()
-        url = input("URL? ")
-        print()
         print("Okay, press enter to register.  You should receive a welcome")
         print("message at %s when this is complete." % email)
         print()
         input()
-        data = dict(name = name, email = email, username = username,
-                    password = password1, password2 = password2,
-                    url = url, zap = "rowsdower")
-        data = urllib.parse.urlencode(data)
-        hub_url = "https://hub.yt-project.org/create_user"
-        req = urllib.request.Request(hub_url, data)
-        try:
-            urllib.request.urlopen(req).read()
-        except urllib.error.HTTPError as exc:
-            if exc.code == 400:
-                print("Sorry, the Hub couldn't create your user.")
-                print("You can't register duplicate users, which is the most")
-                print("common cause of this error.  All values for username,")
-                print("name, and email must be unique in our system.")
-                sys.exit(1)
-        except urllib.URLError as exc:
-            print("Something has gone wrong.  Here's the error message.")
-            raise exc
+
+        data = dict(firstName=first_name, email=email, login=username,
+                    password=password1, lastName=last_name, admin=False)
+        hub_url = ytcfg.get("yt", "hub_url")
+        req = requests.post(hub_url + "/user", data=data)
+      
+        if req.ok:
+            headers = {'Girder-Token': req.json()['authToken']['token']}
+        else:
+            if req.status_code == 400:
+                print("Registration failed with 'Bad request':")
+                print(req.json()["message"])
+            exit(1)
+        print("User registration successful")
+        print("Obtaining API key...")
+        req = requests.post(hub_url + "/api_key", headers=headers,
+                            data={'name': 'ytcmd', 'active': True})
+        apiKey = req.json()["key"]
+
+        print("Storing API key in configuration file")
+        set_config("yt", "hub_api_key", apiKey)
+        
         print()
         print("SUCCESS!")
         print()
@@ -810,40 +849,60 @@
         import yt.utilities.lodgeit as lo
         lo.main( None, download=args.number )
 
+class YTHubStartNotebook(YTCommand):
+    args = (
+        dict(dest="folderId", default=ytcfg.get("yt", "hub_sandbox"),
+             nargs="?", 
+             help="(Optional) Hub folder to mount inside the Notebook"),
+    )
+    description = \
+        """
+        Start the Jupyter Notebook on the yt Hub.
+        """
+    subparser = "hub"
+    name = "start"
+    def __call__(self, args):
+        gc = _get_girder_client()
+
+        # TODO: should happen server-side
+        _id = gc._checkResourcePath(args.folderId)
+
+        resp = gc.post("/notebook/{}".format(_id))
+        try:
+            print("Launched! Please visit this URL:")
+            print("    https://tmpnb.hub.yt" + resp['url'])
+            print()
+        except (KeyError, TypeError):
+            print("Something went wrong. The yt Hub responded with : ")
+            print(resp)
+
 class YTNotebookUploadCmd(YTCommand):
     args = (dict(short="file", type=str),)
     description = \
         """
-        Upload an IPython notebook to hub.yt-project.org.
+        Upload an IPython Notebook to the yt Hub.
         """
 
     name = "upload_notebook"
     def __call__(self, args):
-        filename = args.file
-        if not os.path.isfile(filename):
-            raise IOError(filename)
-        if not filename.endswith(".ipynb"):
-            print("File must be an IPython notebook!")
-            return 1
-        import json
-        try:
-            t = json.loads(open(filename).read())['metadata']['name']
-        except (ValueError, KeyError):
-            print("File does not appear to be an IPython notebook.")
-        if len(t) == 0:
-            t = filename.strip(".ipynb")
-        from yt.utilities.minimal_representation import MinimalNotebook
-        mn = MinimalNotebook(filename, t)
-        rv = mn.upload()
+        gc = _get_girder_client()
+        username = gc.get("/user/me")["login"]
+        gc.upload(args.file, "/user/{}/Public".format(username))
+
+        _id = gc.resourceLookup(
+            "/user/{}/Public/{}".format(username, args.file))["_id"]
+        _fid = next(gc.listFile(_id))["_id"]
+        hub_url = urlparse(ytcfg.get("yt", "hub_url"))
         print("Upload successful!")
         print()
         print("To access your raw notebook go here:")
         print()
-        print("  %s" % (rv['url']))
+        print("  {}://{}/#item/{}".format(hub_url.scheme, hub_url.netloc, _id))
         print()
         print("To view your notebook go here:")
         print()
-        print("  %s" % (rv['url'].replace("/go/", "/nb/")))
+        print("  http://nbviewer.jupyter.org/urls/{}/file/{}/download".format(
+            hub_url.netloc + hub_url.path, _fid))
         print()
 
 class YTPlotCmd(YTCommand):
@@ -947,7 +1006,7 @@
             )
     description = \
         """
-        Run the IPython Notebook
+        Start the Jupyter Notebook locally. 
         """
     def __call__(self, args):
         kwargs = {}
@@ -1141,6 +1200,61 @@
             print()
             pprint.pprint(rv)
 
+
+class YTConfigGetCmd(YTCommand):
+    subparser = 'config'
+    name = 'get'
+    description = 'get a config value'
+    args = (dict(short='section', help='The section containing the option.'),
+            dict(short='option', help='The option to retrieve.'))
+    def __call__(self, args):
+        from yt.utilities.configure import get_config
+        print(get_config(args.section, args.option))
+
+
+class YTConfigSetCmd(YTCommand):
+    subparser = 'config'
+    name = 'set'
+    description = 'set a config value'
+    args = (dict(short='section', help='The section containing the option.'),
+            dict(short='option', help='The option to set.'),
+            dict(short='value', help='The value to set the option to.'))
+    def __call__(self, args):
+        from yt.utilities.configure import set_config
+        set_config(args.section, args.option, args.value)
+
+
+class YTConfigRemoveCmd(YTCommand):
+    subparser = 'config'
+    name = 'rm'
+    description = 'remove a config option'
+    args = (dict(short='section', help='The section containing the option.'),
+            dict(short='option', help='The option to remove.'))
+    def __call__(self, args):
+        from yt.utilities.configure import rm_config
+        rm_config(args.section, args.option)
+
+
+class YTConfigListCmd(YTCommand):
+    subparser = 'config'
+    name = 'list'
+    description = 'show the config content'
+    args = ()
+    def __call__(self, args):
+        from yt.utilities.configure import write_config
+        write_config(sys.stdout)
+
+
+class YTConfigMigrateCmd(YTCommand):
+    subparser = 'config'
+    name = 'migrate'
+    description = 'migrate old config file'
+    args = ()
+    def __call__(self, args):
+        from yt.utilities.configure import migrate_config
+        migrate_config()
+
+
 class YTSearchCmd(YTCommand):
     args = (dict(short="-o", longname="--output",
                  action="store", type=str,

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 yt/utilities/configure.py
--- /dev/null
+++ b/yt/utilities/configure.py
@@ -0,0 +1,92 @@
+# -*- coding: UTF-8 -*-
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import sys
+import argparse
+from yt.config import CURRENT_CONFIG_FILE, _OLD_CONFIG_FILE
+from yt.extern.six.moves import configparser
+
+CONFIG = configparser.SafeConfigParser()
+CONFIG.read([CURRENT_CONFIG_FILE])
+
+
+def get_config(section, option):
+    return CONFIG.get(section, option)
+
+
+def set_config(section, option, value):
+    if not CONFIG.has_section(section):
+        CONFIG.add_section(section)
+    CONFIG.set(section, option, value)
+    write_config()
+
+
+def write_config(fd=None):
+    if fd is None:
+        with open(CURRENT_CONFIG_FILE, 'w') as fd:
+            CONFIG.write(fd)
+    else:
+        CONFIG.write(fd)
+
+def migrate_config():
+    if not os.path.exists(_OLD_CONFIG_FILE):
+        print("Old config not found.")
+        sys.exit()
+    CONFIG.read(_OLD_CONFIG_FILE)
+    print("Writing a new config file to: {}".format(CURRENT_CONFIG_FILE))
+    write_config()
+    print("Backing up the old config file: {}.bak".format(_OLD_CONFIG_FILE))
+    os.rename(_OLD_CONFIG_FILE, _OLD_CONFIG_FILE + '.bak')
+
+
+def rm_config(section, option):
+    CONFIG.remove_option(section, option)
+    write_config()
+
+
+def main():
+    parser = argparse.ArgumentParser(
+        description='Get and set configuration values for yt')
+    subparsers = parser.add_subparsers(help='sub-command help', dest='cmd')
+
+    get_parser = subparsers.add_parser('get', help='get a config value')
+    set_parser = subparsers.add_parser('set', help='set a config value')
+    rm_parser = subparsers.add_parser('rm', help='remove a config option')
+    subparsers.add_parser('migrate', help='migrate old config file')
+    subparsers.add_parser('list', help='show all config values')
+
+    get_parser.add_argument(
+        'section', help='The section containing the option.')
+    get_parser.add_argument('option', help='The option to retrieve.')
+
+    set_parser.add_argument(
+        'section', help='The section containing the option.')
+    set_parser.add_argument('option', help='The option to set.')
+    set_parser.add_argument('value', help='The value to set the option to.')
+
+    rm_parser.add_argument(
+        'section', help='The section containing the option to remove.')
+    rm_parser.add_argument('option', help='The option to remove.')
+
+    args = parser.parse_args()
+
+    if args.cmd == 'get':
+        print(get_config(args.section, args.option))
+    elif args.cmd == 'set':
+        set_config(args.section, args.option, args.value)
+    elif args.cmd == 'list':
+        write_config(sys.stdout)
+    elif args.cmd == 'migrate':
+        migrate_config()
+    elif args.cmd == 'rm':
+        rm_config(args.section, args.option)
+
+if __name__ == '__main__':
+    main()  # pragma: no cover

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -110,6 +110,13 @@
             # We are somewhere in the middle of the face
             temp_x = intersect_t * v_dir[i] + v_pos[i] # current position
             temp_y = ((temp_x - vc.left_edge[i])*vc.idds[i])
+            # There are some really tough cases where we just within a couple
+            # least significant places of the edge, and this helps prevent
+            # killing the calculation through a segfault in those cases.
+            if -1 < temp_y < 0 and step[i] > 0:
+                temp_y = 0.0
+            elif vc.dims[i] - 1 < temp_y < vc.dims[i] and step[i] < 0:
+                temp_y = vc.dims[i] - 1
             cur_ind[i] =  <int> (floor(temp_y))
         if step[i] > 0:
             temp_y = (cur_ind[i] + 1) * vc.dds[i] + vc.left_edge[i]

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 yt/utilities/tests/test_config.py
--- /dev/null
+++ b/yt/utilities/tests/test_config.py
@@ -0,0 +1,142 @@
+# -*- coding: UTF-8 -*-
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import contextlib
+import mock
+import os
+import sys
+import unittest
+import yt.utilities.command_line
+import yt.config
+from yt.config import \
+    CURRENT_CONFIG_FILE, _OLD_CONFIG_FILE
+from yt.extern.six import StringIO
+from yt.extern.six.moves.configparser import NoOptionError, SafeConfigParser
+
+
+_DUMMY_CFG = ['[yt]', 'loglevel = 49']
+
+
+ at contextlib.contextmanager
+def captureOutput():
+    oldout, olderr = sys.stdout, sys.stderr
+    try:
+        out = [StringIO(), StringIO()]
+        sys.stdout, sys.stderr = out
+        yield out
+    finally:
+        sys.stdout, sys.stderr = oldout, olderr
+        out[0] = out[0].getvalue()
+        out[1] = out[1].getvalue()
+
+
+class SysExitException(Exception):
+    pass
+
+
+def setUpModule():
+    for cfgfile in (CURRENT_CONFIG_FILE, _OLD_CONFIG_FILE):
+        if os.path.exists(cfgfile):
+            os.rename(cfgfile, cfgfile + '.bak_test')
+
+            if cfgfile == CURRENT_CONFIG_FILE:
+                yt.utilities.configure.CONFIG = SafeConfigParser()
+                if not yt.utilities.configure.CONFIG.has_section('yt'):
+                    yt.utilities.configure.CONFIG.add_section('yt')
+
+
+def tearDownModule():
+    for cfgfile in (CURRENT_CONFIG_FILE, _OLD_CONFIG_FILE): 
+        if os.path.exists(cfgfile + '.bak_test'):
+            os.rename(cfgfile + '.bak_test', cfgfile)
+
+
+class TestYTConfig(unittest.TestCase):
+    def _runYTConfig(self, args):
+        args = ['yt', 'config'] + args
+        retcode = 0
+
+        with mock.patch.object(sys, 'argv', args),\
+                mock.patch('sys.exit', side_effect=SysExitException) as exit,\
+                captureOutput() as output:
+            try:
+                yt.utilities.command_line.run_main()
+            except SysExitException:
+                args = exit.mock_calls[0][1]
+                retcode = args[0] if len(args) else 0
+        return {
+            'rc': retcode,
+            'stdout': output[0],
+            'stderr': output[1]
+        }
+
+class TestYTConfigCommands(TestYTConfig):
+    def testConfigCommands(self):
+        self.assertFalse(os.path.exists(CURRENT_CONFIG_FILE))
+
+        info = self._runYTConfig(['--help'])
+        self.assertEqual(info['rc'], 0)
+        self.assertEqual(info['stderr'], '')
+        self.assertIn('Get and set configuration values for yt',
+                      info['stdout'])
+
+        info = self._runYTConfig(['list'])
+        self.assertEqual(info['rc'], 0)
+        self.assertIn('[yt]', info['stdout'])
+
+        info = self._runYTConfig(['set', 'yt', '__parallel', 'True'])
+        self.assertEqual(info['rc'], 0)
+
+        info = self._runYTConfig(['get', 'yt', '__parallel'])
+        self.assertEqual(info['rc'], 0)
+        self.assertEqual(info['stdout'].strip(), 'True')
+
+        info = self._runYTConfig(['rm', 'yt', '__parallel'])
+        self.assertEqual(info['rc'], 0)
+
+        with self.assertRaises(NoOptionError):
+            self._runYTConfig(['get', 'yt', 'foo'])
+    
+    def tearDown(self):
+        if os.path.exists(CURRENT_CONFIG_FILE):
+            os.remove(CURRENT_CONFIG_FILE)
+
+class TestYTConfigMigration(TestYTConfig):
+
+    def setUp(self):
+        if not os.path.exists(os.path.dirname(_OLD_CONFIG_FILE)):
+            os.makedirs(os.path.dirname(_OLD_CONFIG_FILE))
+
+        with open(_OLD_CONFIG_FILE, 'w') as fh:
+            for line in _DUMMY_CFG:
+                fh.write('{}\n'.format(line))
+        
+        if os.path.exists(CURRENT_CONFIG_FILE):
+            os.remove(CURRENT_CONFIG_FILE)
+
+    def tearDown(self):
+        if os.path.exists(CURRENT_CONFIG_FILE):
+            os.remove(CURRENT_CONFIG_FILE)
+        if os.path.exists(_OLD_CONFIG_FILE + '.bak'):
+            os.remove(_OLD_CONFIG_FILE + '.bak')
+
+    def testConfigMigration(self):
+        self.assertFalse(os.path.exists(CURRENT_CONFIG_FILE))
+        self.assertTrue(os.path.exists(_OLD_CONFIG_FILE))
+        
+        info = self._runYTConfig(['migrate'])
+        self.assertEqual(info['rc'], 0)
+
+        self.assertTrue(os.path.exists(CURRENT_CONFIG_FILE))
+        self.assertFalse(os.path.exists(_OLD_CONFIG_FILE))
+        self.assertTrue(os.path.exists(_OLD_CONFIG_FILE + '.bak'))
+
+        with open(CURRENT_CONFIG_FILE, 'r') as fh:
+            new_cfg = ''.join(fh.readlines())
+        self.assertEqual(new_cfg.strip().split('\n'), _DUMMY_CFG)

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -13,7 +13,6 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
-
 import numpy as np
 import matplotlib
 import types
@@ -171,11 +170,15 @@
     window_size : float
         The size of the window on the longest axis (in units of inches),
         including the margins but not the colorbar.
+    right_handed : boolean
+        Whether the implicit east vector for the image generated is set to make a right
+        handed coordinate system with a north vector and the normal vector, the
+        direction of the 'window' into the data.
 
     """
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True,
-                 periodic=True, origin='center-window', oblique=False, 
-                 window_size=8.0, fields=None, fontsize=18, aspect=None, 
+                 periodic=True, origin='center-window', oblique=False, right_handed=True,
+                 window_size=8.0, fields=None, fontsize=18, aspect=None,
                  setup=False):
         if not hasattr(self, "ds"):
             self.ds = data_source.ds
@@ -185,8 +188,10 @@
         self.center = None
         self._periodic = periodic
         self.oblique = oblique
+        self._right_handed = right_handed
         self.buff_size = buff_size
         self.antialias = antialias
+
         self.aspect = aspect
         skip = list(FixedResolutionBuffer._exclude_fields) + data_source._key_fields
         if fields is None:
@@ -616,6 +621,11 @@
         self._axes_unit_names = unit_name
         return self
 
+    @invalidate_plot
+    def toggle_right_handed(self):
+        self._right_handed = not self._right_handed
+
+
 class PWViewerMPL(PlotWindow):
     """Viewer using matplotlib as a backend via the WindowPlotMPL.
 
@@ -793,7 +803,6 @@
                 ia = ImageArray(ia)
             else:
                 ia = image
-
             self.plots[f] = WindowPlotMPL(
                 ia, self._field_transform[f].name,
                 self._field_transform[f].func,
@@ -801,6 +810,10 @@
                 self.figure_size, font_size,
                 self.aspect, fig, axes, cax)
 
+            if not self._right_handed:
+                ax = self.plots[f].axes
+                ax.invert_xaxis()
+
             axes_unit_labels = ['', '']
             comoving = False
             hinv = False
@@ -1210,6 +1223,10 @@
          ('{yloc}', '{space}')                  ('lower', 'window')
          ('{yloc}', '{xloc}', '{space}')        ('lower', 'right', 'window')
          ==================================     ============================
+    right_handed : boolean
+         Whether the implicit east vector for the image generated is set to make a right
+         handed coordinate system with a normal vector, the direction of the
+         'window' into the data.
     fontsize : integer
          The size of the fonts for the axis, colorbar, and tick labels.
     field_parameters : dictionary
@@ -1234,7 +1251,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, ds, axis, fields, center='c', width=None, axes_unit=None,
-                 origin='center-window', fontsize=18, field_parameters=None,
+                 origin='center-window', right_handed=True, fontsize=18, field_parameters=None,
                  window_size=8.0, aspect=None, data_source=None):
         # this will handle time series data and controllers
         ts = self._initialize_dataset(ds)
@@ -1259,7 +1276,7 @@
         validate_mesh_fields(slc, fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin,
                              fontsize=fontsize, fields=fields,
-                             window_size=window_size, aspect=aspect)
+                             window_size=window_size, aspect=aspect, right_handed=right_handed)
         if axes_unit is None:
             axes_unit = get_axes_unit(width, ds)
         self.set_axes_unit(axes_unit)
@@ -1346,7 +1363,10 @@
          ('{yloc}', '{space}')                  ('lower', 'window')
          ('{yloc}', '{xloc}', '{space}')        ('lower', 'right', 'window')
          ==================================     ============================
-
+    right_handed : boolean
+         Whether the implicit east vector for the image generated is set to make a right
+         handed coordinate system with the direction of the
+         'window' into the data.
     data_source : YTSelectionContainer Object
          Object to be used for data selection.  Defaults to a region covering
          the entire simulation.
@@ -1399,8 +1419,8 @@
 
     def __init__(self, ds, axis, fields, center='c', width=None, axes_unit=None,
                  weight_field=None, max_level=None, origin='center-window',
-                 fontsize=18, field_parameters=None, data_source=None,
-                 method = "integrate", proj_style = None, window_size=8.0, 
+                 right_handed=True, fontsize=18, field_parameters=None, data_source=None,
+                 method = "integrate", proj_style = None, window_size=8.0,
                  aspect=None):
         ts = self._initialize_dataset(ds)
         self.ts = ts
@@ -1435,7 +1455,7 @@
                            field_parameters=field_parameters, method=method,
                            max_level=max_level)
         PWViewerMPL.__init__(self, proj, bounds, fields=fields, origin=origin,
-                             fontsize=fontsize, window_size=window_size, 
+                             right_handed=right_handed, fontsize=fontsize, window_size=window_size, 
                              aspect=aspect)
         if axes_unit is None:
             axes_unit = get_axes_unit(width, ds)
@@ -1499,6 +1519,10 @@
          A vector defining the 'up' direction in the plot.  This
          option sets the orientation of the slicing plane.  If not
          set, an arbitrary grid-aligned north-vector is chosen.
+    right_handed : boolean
+         Whether the implicit east vector for the image generated is set to make a right
+         handed coordinate system with the north vector and the normal, the direction of the
+         'window' into the data.
     fontsize : integer
          The size of the fonts for the axis, colorbar, and tick labels.
     field_parameters : dictionary
@@ -1513,7 +1537,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, ds, normal, fields, center='c', width=None,
-                 axes_unit=None, north_vector=None, fontsize=18,
+                 axes_unit=None, north_vector=None, right_handed=True, fontsize=18,
                  field_parameters=None, data_source=None):
         (bounds, center_rot) = get_oblique_window_parameters(normal,center,width,ds)
         if field_parameters is None:
@@ -1533,7 +1557,7 @@
         # aren't well-defined for off-axis data objects
         PWViewerMPL.__init__(self, cutting, bounds, fields=fields,
                              origin='center-window',periodic=False,
-                             oblique=True, fontsize=fontsize)
+                             right_handed=right_handed, oblique=True, fontsize=fontsize)
         if axes_unit is None:
             axes_unit = get_axes_unit(width, ds)
         self.set_axes_unit(axes_unit)
@@ -1635,6 +1659,10 @@
          A vector defining the 'up' direction in the plot.  This
          option sets the orientation of the slicing plane.  If not
          set, an arbitrary grid-aligned north-vector is chosen.
+    right_handed : boolean
+         Whether the implicit east vector for the image generated is set to make a right
+         handed coordinate system with the north vector and the normal, the direction of the
+         'window' into the data.
     fontsize : integer
          The size of the fonts for the axis, colorbar, and tick labels.
     method : string
@@ -1657,8 +1685,9 @@
 
     def __init__(self, ds, normal, fields, center='c', width=None,
                  depth=(1, '1'), axes_unit=None, weight_field=None,
-                 max_level=None, north_vector=None, volume=None, no_ghost=False,
-                 le=None, re=None, interpolated=False, fontsize=18, method="integrate"):
+                 max_level=None, north_vector=None, right_handed=True,
+                 volume=None, no_ghost=False, le=None, re=None,
+                 interpolated=False, fontsize=18, method="integrate"):
         (bounds, center_rot) = \
           get_oblique_window_parameters(normal,center,width,ds,depth=depth)
         fields = ensure_list(fields)[:]
@@ -1684,7 +1713,8 @@
         # aren't well-defined for off-axis data objects
         PWViewerMPL.__init__(
             self, OffAxisProj, bounds, fields=fields, origin='center-window',
-            periodic=False, oblique=True, fontsize=fontsize)
+            periodic=False, oblique=True, right_handed=right_handed,
+            fontsize=fontsize)
         if axes_unit is None:
             axes_unit = get_axes_unit(width, ds)
         self.set_axes_unit(axes_unit)

diff -r 9bee5a66f96e2202fdc463629a59beb2d127aed0 -r a359b82b46c0e742a810397adaec1d85e88415c2 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -91,7 +91,8 @@
              "set_window_size": [((7.0, ), {})],
              "set_zlim": [(('density', 1e-25, 1e-23), {}),
                           (('density', 1e-25, None), {'dynamic_range': 4})],
-             "zoom": [((10, ), {})]}
+             "zoom": [((10, ), {})],
+             "toggle_right_handed": [((),{})]}
 
 
 CENTER_SPECS = (
@@ -449,3 +450,4 @@
         for field_name_list in field_names:
             assert_raises(
                 YTInvalidFieldType, object, ds, normal, field_name_list)
+


https://bitbucket.org/yt_analysis/yt/commits/4387fbcf5773/
Changeset:   4387fbcf5773
Branch:      yt
User:        MatthewTurk
Date:        2016-08-12 18:01:28+00:00
Summary:     Incrementing local_pw_007 test.
Affected #:  1 file

diff -r a359b82b46c0e742a810397adaec1d85e88415c2 -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -39,7 +39,7 @@
   local_owls_000:
     - yt/frontends/owls/tests/test_outputs.py
   
-  local_pw_006:
+  local_pw_007:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
     - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes


https://bitbucket.org/yt_analysis/yt/commits/af39415bb4c4/
Changeset:   af39415bb4c4
Branch:      yt
User:        MatthewTurk
Date:        2016-09-07 18:17:55+00:00
Summary:     Merging from upstream
Affected #:  86 files

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -23,6 +23,7 @@
 yt/geometry/particle_smooth.c
 yt/geometry/selection_routines.c
 yt/utilities/amr_utils.c
+yt/utilities/lib/autogenerated_element_samplers.c
 yt/utilities/kdtree/forthonf2c.h
 yt/utilities/libconfig_wrapper.c
 yt/utilities/spatial/ckdtree.c
@@ -33,6 +34,7 @@
 yt/utilities/lib/bounding_volume_hierarchy.c
 yt/utilities/lib/contour_finding.c
 yt/utilities/lib/depth_first_octree.c
+yt/utilities/lib/distance_queue.c
 yt/utilities/lib/element_mappings.c
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -3,6 +3,7 @@
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js
 include yt/visualization/mapserver/html/leaflet/images/*.png
+include yt/utilities/mesh_types.yaml
 exclude scripts/pr_backport.py
 recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE* *.cu
 recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.html

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -1429,25 +1429,24 @@
         YT_DEPS+=('netcdf4')   
     fi
     
-    # Here is our dependency list for yt
-    log_cmd conda update --yes conda
+    log_cmd ${DEST_DIR}/bin/conda update --yes conda
     
     log_cmd echo "DEPENDENCIES" ${YT_DEPS[@]}
     for YT_DEP in "${YT_DEPS[@]}"; do
         echo "Installing $YT_DEP"
-        log_cmd conda install --yes ${YT_DEP}
+        log_cmd ${DEST_DIR}/bin/conda install --yes ${YT_DEP}
     done
 
     if [ $INST_PY3 -eq 1 ]
     then
         echo "Installing mercurial"
-        log_cmd conda create -y -n py27 python=2.7 mercurial
+        log_cmd ${DEST_DIR}/bin/conda create -y -n py27 python=2.7 mercurial
         log_cmd ln -s ${DEST_DIR}/envs/py27/bin/hg ${DEST_DIR}/bin
     fi
 
-    log_cmd pip install python-hglib
+    log_cmd ${DEST_DIR}/bin/pip install python-hglib
 
-    log_cmd hg clone https://bitbucket.org/yt_analysis/yt_conda ${DEST_DIR}/src/yt_conda
+    log_cmd ${DEST_DIR}/bin/hg clone https://bitbucket.org/yt_analysis/yt_conda ${DEST_DIR}/src/yt_conda
     
     if [ $INST_EMBREE -eq 1 ]
     then
@@ -1474,17 +1473,17 @@
         ( ${GETFILE} "$PYEMBREE_URL" 2>&1 ) 1>> ${LOG_FILE} || do_exit
         log_cmd unzip ${DEST_DIR}/src/master.zip
         pushd ${DEST_DIR}/src/pyembree-master &> /dev/null
-        log_cmd python setup.py install build_ext -I${DEST_DIR}/include -L${DEST_DIR}/lib
+        log_cmd ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py install build_ext -I${DEST_DIR}/include -L${DEST_DIR}/lib
         popd &> /dev/null
     fi
 
     if [ $INST_ROCKSTAR -eq 1 ]
     then
         echo "Building Rockstar"
-        ( hg clone http://bitbucket.org/MatthewTurk/rockstar ${DEST_DIR}/src/rockstar/ 2>&1 ) 1>> ${LOG_FILE}
-        ROCKSTAR_PACKAGE=$(conda build ${DEST_DIR}/src/yt_conda/rockstar --output)
-        log_cmd conda build ${DEST_DIR}/src/yt_conda/rockstar
-        log_cmd conda install $ROCKSTAR_PACKAGE
+        ( ${DEST_DIR}/bin/hg clone http://bitbucket.org/MatthewTurk/rockstar ${DEST_DIR}/src/rockstar/ 2>&1 ) 1>> ${LOG_FILE}
+        ROCKSTAR_PACKAGE=$(${DEST_DIR}/bin/conda build ${DEST_DIR}/src/yt_conda/rockstar --output)
+        log_cmd ${DEST_DIR}/bin/conda build ${DEST_DIR}/src/yt_conda/rockstar
+        log_cmd ${DEST_DIR}/bin/conda install $ROCKSTAR_PACKAGE
         ROCKSTAR_DIR=${DEST_DIR}/src/rockstar
     fi
 
@@ -1493,20 +1492,20 @@
     then
         if [ $INST_PY3 -eq 1 ]
         then
-            log_cmd pip install pyx
+            log_cmd ${DEST_DIR}/bin/pip install pyx
         else
-            log_cmd pip install pyx==0.12.1
+            log_cmd ${DEST_DIR}/bin/pip install pyx==0.12.1
         fi
     fi
 
     if [ $INST_YT_SOURCE -eq 0 ]
     then
         echo "Installing yt"
-        log_cmd conda install -c conda-forge --yes yt
+        log_cmd ${DEST_DIR}/bin/conda install -c conda-forge --yes yt
     else
         echo "Building yt from source"
         YT_DIR="${DEST_DIR}/src/yt-hg"
-        log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
+        log_cmd ${DEST_DIR}/bin/hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
         if [ $INST_EMBREE -eq 1 ]
         then
             echo $DEST_DIR > ${YT_DIR}/embree.cfg
@@ -1517,7 +1516,7 @@
             ROCKSTAR_LIBRARY_PATH=${DEST_DIR}/lib
         fi
         pushd ${YT_DIR} &> /dev/null
-        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH python setup.py develop 2>&1) 1>> ${LOG_FILE} || do_exit
+        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py develop 2>&1) 1>> ${LOG_FILE} || do_exit
         popd &> /dev/null
     fi
 

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -116,7 +116,12 @@
 Continuum features with optical depths that follow a power law can also be
 added.  Like adding lines, you must specify details like the wavelength
 and the field in the dataset and LightRay that is tied to this feature.
-Below, we will add H Lyman continuum.
+The wavelength refers to the location at which the continuum begins to be 
+applied to the dataset, and as it moves to lower wavelength values, the 
+optical depth value decreases according to the defined power law.  The 
+normalization value is the column density of the linked field which results
+in an optical depth of 1 at the defined wavelength.  Below, we add the hydrogen 
+Lyman continuum.
 
 .. code-block:: python
 
@@ -131,7 +136,7 @@
 Making the Spectrum
 ^^^^^^^^^^^^^^^^^^^
 
-Once all the lines and continuum are added, it is time to make a spectrum out
+Once all the lines and continuua are added, it is time to make a spectrum out
 of some light ray data.
 
 .. code-block:: python

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b doc/source/analyzing/analysis_modules/cosmology_calculator.rst
--- a/doc/source/analyzing/analysis_modules/cosmology_calculator.rst
+++ b/doc/source/analyzing/analysis_modules/cosmology_calculator.rst
@@ -31,13 +31,13 @@
    print("hubble distance", co.hubble_distance())
 
    # distance from z = 0 to 0.5
-   print("comoving radial distance", co.comoving_radial_distance(0, 0.5).in_units("Mpc/h"))
+   print("comoving radial distance", co.comoving_radial_distance(0, 0.5).in_units("Mpccm/h"))
 
    # transverse distance
-   print("transverse distance", co.comoving_transverse_distance(0, 0.5).in_units("Mpc/h"))
+   print("transverse distance", co.comoving_transverse_distance(0, 0.5).in_units("Mpccm/h"))
 
    # comoving volume
-   print("comoving volume", co.comoving_volume(0, 0.5).in_units("Gpc**3"))
+   print("comoving volume", co.comoving_volume(0, 0.5).in_units("Gpccm**3"))
 
    # angulare diameter distance
    print("angular diameter distance", co.angular_diameter_distance(0, 0.5).in_units("Mpc/h"))
@@ -67,7 +67,16 @@
    # convert redshift to time after Big Bang (same as Hubble time)
    print("t from z", co.t_from_z(0.5).in_units("Gyr"))
 
-Note, that all distances returned are comoving distances.  All of the above
+.. warning::
+
+   Cosmological distance calculations return values that are either
+   in the comoving or proper frame, depending on the specific quantity.  For
+   simplicity, the proper and comoving frames are set equal to each other
+   within the cosmology calculator.  This means that for some distance value,
+   x, x.to("Mpc") and x.to("Mpccm") will be the same.  The user should take
+   care to understand which reference frame is correct for the given calculation.
+
+All of the above
 functions accept scalar values and arrays.  The helper functions, `co.quan`
 and `co.arr` exist to create unitful `YTQuantities` and `YTArray` with the
 unit registry of the cosmology calculator.  For more information on the usage

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b doc/source/analyzing/analysis_modules/light_ray_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
@@ -49,13 +49,18 @@
 * ``deltaz_min`` (*float*):  Specifies the minimum Delta-z between
   consecutive datasets in the returned list.  Default: 0.0.
 
-* ``minimum_coherent_box_fraction`` (*float*): Used with
-  ``use_minimum_datasets`` set to False, this parameter specifies the
-  fraction of the total box size to be traversed before rerandomizing the
-  projection axis and center.  This was invented to allow light rays with
-  thin slices to sample coherent large scale structure, but in practice
-  does not work so well.  Try setting this parameter to 1 and see what
-  happens.  Default: 0.0.
+* ``max_box_fraction`` (*float*):  In terms of the size of the domain, the
+  maximum length a light ray segment can be in order to span the redshift interval
+  from one dataset to another.  If using a zoom-in simulation, this parameter can
+  be set to the length of the high resolution region so as to limit ray segments
+  to that size.  If the high resolution region is not cubical, the smallest side
+  should be used.  Default: 1.0 (the size of the box)
+
+* ``minimum_coherent_box_fraction`` (*float*): Use to specify the minimum
+  length of a ray, in terms of the size of the domain, before the trajectory
+  is re-randomized.  Set to 0 to have ray trajectory randomized for every
+  dataset.  Set to np.inf (infinity) to use a single trajectory for the
+  entire ray.  Default: 0.0.
 
 * ``time_data`` (*bool*): Whether or not to include time outputs when
   gathering datasets for time series.  Default: True.
@@ -67,7 +72,7 @@
 ---------------------
 
 Once the LightRay object has been instantiated, the
-:func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay,make_light_ray`
+:func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`
 function will trace out the rays in each dataset and collect information for all the
 fields requested.  The output file will be an HDF5 file containing all the
 cell field values for all the cells that were intersected by the ray.  A
@@ -85,6 +90,21 @@
 
 * ``seed`` (*int*): Seed for the random number generator.  Default: None.
 
+* ``periodic`` (*bool*): If True, ray trajectories will make use of periodic
+  boundaries.  If False, ray trajectories will not be periodic.  Default : True.
+
+* ``left_edge`` (iterable of *floats* or *YTArray*): The left corner of the
+  region in which rays are to be generated.  If None, the left edge will be
+  that of the domain.  Default: None.
+
+* ``right_edge`` (iterable of *floats* or *YTArray*): The right corner of
+  the region in which rays are to be generated.  If None, the right edge
+  will be that of the domain.  Default: None.
+
+* ``min_level`` (*int*): The minimum refinement level of the spatial region in
+  which the ray passes.  This can be used with zoom-in simulations where the
+  high resolution region does not keep a constant geometry.  Default: None.
+
 * ``start_position`` (*list* of floats): Used only if creating a light ray
   from a single dataset.  The coordinates of the starting position of the
   ray.  Default: None.
@@ -122,7 +142,82 @@
   slice and 1 to have all processors work together on each projection.
   Default: 1
 
-.. note:: As of :code:`yt-3.0`, the functionality for recording properties of the nearest halo to each element of the ray no longer exists.  This is still available in :code:`yt-2.x`.  If you would like to use this feature in :code:`yt-3.x`, help is needed to port it over.  Contact the yt-users mailing list if you are interested in doing this.
+Useful Tips for Making LightRays
+--------------------------------
+
+Below are some tips that may come in handy for creating proper LightRays.
+
+How many snapshots do I need?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The number of snapshots required to traverse some redshift interval depends
+on the simulation box size and cosmological parameters.  Before running an
+expensive simulation only to find out that you don't have enough outputs
+to span the redshift interval you want, have a look at
+:ref:`planning-cosmology-simulations`.  The functionality described there
+will allow you to calculate the precise number of snapshots and specific
+redshifts at which they should be written.
+
+My snapshots are too far apart!
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``max_box_fraction`` keyword, provided when creating the `Lightray`,
+allows the user to control how long a ray segment can be for an
+individual dataset.  Be default, the `LightRay` generator will try to
+make segments no longer than the size of the box to avoid sampling the
+same structures more than once.  However, this can be increased in the
+case that the redshift interval between datasets is longer than the
+box size.  Increasing this value should be done with caution as longer
+ray segments run a greater risk of coming back to somewhere near their
+original position.
+
+What if I have a zoom-in simulation?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A zoom-in simulation has a high resolution region embedded within a
+larger, low resolution volume.  In this type of simulation, it is likely
+that you will want the ray segments to stay within the high resolution
+region.  To do this, you must first specify the size of the high
+resolution region when creating the `LightRay` using the
+``max_box_fraction`` keyword.  This will make sure that
+the calculation of the spacing of the segment datasets only takes into
+account the high resolution region and not the full box size.  If your
+high resolution region is not a perfect cube, specify the smallest side.
+Then, in the call to
+:func:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`,
+use the ``left_edge`` and ``right_edge`` keyword arguments to specify the
+precise location of the high resolution region.
+
+Technically speaking, the ray segments should no longer be periodic
+since the high resolution region is only a sub-volume within the
+larger domain.  To make the ray segments non-periodic, set the
+``periodic`` keyword to False.  The LightRay generator will continue
+to generate randomly oriented segments until it finds one that fits
+entirely within the high resolution region.  If you have a high
+resolution region that can move and change shape slightly as structure
+forms, use the `min_level` keyword to mandate that the ray segment only
+pass through cells that are refined to at least some minimum level.
+
+If the size of the high resolution region is not large enough to
+span the required redshift interval, the `LightRay` generator can
+be configured to treat the high resolution region as if it were
+periodic simply by setting the ``periodic`` keyword to True.  This
+option should be used with caution as it will lead to the creation
+of disconnected ray segments within a single dataset.
+
+I want a continous trajectory over the entire ray.
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Set the ``minimum_coherent_box_fraction`` keyword argument to a very
+large number, like infinity (`numpy.inf`).
+
+.. note::
+
+   As of :code:`yt-3.0`, the functionality for recording properties of
+   the nearest halo to each element of the ray no longer exists.  This
+   is still available in :code:`yt-2.x`.  If you would like to use this
+   feature in :code:`yt-3.x`, help is needed to port it over.  Contact
+   the yt-users mailing list if you are interested in doing this.
 
 What Can I do with this?
 ------------------------

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
--- a/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
+++ b/doc/source/analyzing/analysis_modules/planning_cosmology_simulations.rst
@@ -4,7 +4,7 @@
 ===================================================
 
 If you want to run a cosmological simulation that will have just enough data
-outputs to create a cosmology splice, the
+outputs to create a light cone or light ray, the
 :meth:`~yt.analysis_modules.cosmological_observation.cosmology_splice.CosmologySplice.plan_cosmology_splice`
 function will calculate a list of redshifts outputs that will minimally
 connect a redshift interval.

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b doc/source/cookbook/embedded_webm_animation.ipynb
--- a/doc/source/cookbook/embedded_webm_animation.ipynb
+++ /dev/null
@@ -1,137 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "This example shows how to embed an animation produced by `matplotlib` into an IPython notebook.  This example makes use of `matplotlib`'s [animation toolkit](http://matplotlib.org/api/animation_api.html) to transform individual frames into a final rendered movie.  \n",
-    "\n",
-    "Matplotlib uses [`ffmpeg`](http://www.ffmpeg.org/) to generate the movie, so you must install `ffmpeg` for this example to work correctly.  Usually the best way to install `ffmpeg` is using your system's package manager."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import yt\n",
-    "from matplotlib import animation"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "First, we need to construct a function that will embed the video produced by ffmpeg directly into the notebook document. This makes use of the [HTML5 video tag](http://www.w3schools.com/html/html5_video.asp) and the WebM video format.  WebM is supported by Chrome, Firefox, and Opera, but not Safari and Internet Explorer.  If you have trouble viewing the video you may need to use a different video format.  Since this uses `libvpx` to construct the frames, you will need to ensure that ffmpeg has been compiled with `libvpx` support."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "from tempfile import NamedTemporaryFile\n",
-    "import base64\n",
-    "\n",
-    "VIDEO_TAG = \"\"\"<video controls>\n",
-    " <source src=\"data:video/x-webm;base64,{0}\" type=\"video/webm\">\n",
-    " Your browser does not support the video tag.\n",
-    "</video>\"\"\"\n",
-    "\n",
-    "def anim_to_html(anim):\n",
-    "    if not hasattr(anim, '_encoded_video'):\n",
-    "        with NamedTemporaryFile(suffix='.webm') as f:\n",
-    "            anim.save(f.name, fps=6, extra_args=['-vcodec', 'libvpx'])\n",
-    "            video = open(f.name, \"rb\").read()\n",
-    "        anim._encoded_video = base64.b64encode(video)\n",
-    "    \n",
-    "    return VIDEO_TAG.format(anim._encoded_video.decode('ascii'))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Next, we define a function to actually display the video inline in the notebook."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "from IPython.display import HTML\n",
-    "\n",
-    "def display_animation(anim):\n",
-    "    plt.close(anim._fig)\n",
-    "    return HTML(anim_to_html(anim))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Finally, we set up the animation itsself.  We use yt to load the data and create each frame and use matplotlib to stitch the frames together.  Note that we customize the plot a bit by calling the `set_zlim` function.  Customizations only need to be applied to the first frame - they will carry through to the rest.\n",
-    "\n",
-    "This may take a while to run, be patient."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import matplotlib.pyplot as plt\n",
-    "from matplotlib.backends.backend_agg import FigureCanvasAgg\n",
-    "\n",
-    "prj = yt.ProjectionPlot(yt.load('Enzo_64/DD0000/data0000'), 0, 'density', weight_field='density',width=(180,'Mpccm'))\n",
-    "prj.set_zlim('density',1e-32,1e-26)\n",
-    "fig = prj.plots['density'].figure\n",
-    "\n",
-    "# animation function.  This is called sequentially\n",
-    "def animate(i):\n",
-    "    ds = yt.load('Enzo_64/DD%04i/data%04i' % (i,i))\n",
-    "    prj._switch_ds(ds)\n",
-    "\n",
-    "# call the animator.  blit=True means only re-draw the parts that have changed.\n",
-    "anim = animation.FuncAnimation(fig, animate, frames=44, interval=200, blit=False)\n",
-    "\n",
-    "# call our new function to display the animation\n",
-    "display_animation(anim)"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 2",
-   "language": "python",
-   "name": "python2"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 2
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython2",
-   "version": "2.7.10"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b doc/source/cookbook/embedded_webm_animation.rst
--- a/doc/source/cookbook/embedded_webm_animation.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Making animations using matplotlib and ffmpeg
----------------------------------------------
-
-.. notebook:: embedded_webm_animation.ipynb

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b doc/source/cookbook/index.rst
--- a/doc/source/cookbook/index.rst
+++ b/doc/source/cookbook/index.rst
@@ -41,7 +41,6 @@
 
    notebook_tutorial
    custom_colorbar_tickmarks
-   embedded_webm_animation
    gadget_notebook
    owls_notebook
    ../visualizing/transfer_function_helper

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -250,6 +250,7 @@
 
 * ``InteractingJets/jet_000002``
 * ``WaveDarkMatter/psiDM_000020``
+* ``Plummer/plummer_000000``
 
 Halo Catalog
 ~~~~~~~~~~~~

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1069,6 +1069,8 @@
 This means that the yt fields, e.g., ``("gas","density")``, will be in cgs units, but the GAMER fields,
 e.g., ``("gamer","Dens")``, will be in code units.
 
+Particle data are supported and are always stored in the same file as the grid data.
+
 .. rubric:: Caveats
 
 * GAMER data in raw binary format (i.e., OPT__OUTPUT_TOTAL = C-binary) is not supported.

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b doc/source/quickstart/index.rst
--- a/doc/source/quickstart/index.rst
+++ b/doc/source/quickstart/index.rst
@@ -12,15 +12,27 @@
 on time, you can non-interactively go through the linked pages below and view the
 worked examples.
 
-To execute the quickstart interactively, you need to download the repository and
-start the IPython notebook.  If you do not already have the yt repository, the
-easiest way to get the repository is to clone it using mercurial:
+To execute the quickstart interactively, you have a couple of options: 1) run
+the notebook from your own system or 2) run it from the url
+https://demo.use.yt. Option 1 requires an existing installation of yt (see
+:ref:`getting-and-installing-yt`), a copy of the yt source (which you may
+already have depending on your installation choice), and a download of the
+tutorial data-sets (total about 3 GB). If you know you are going to be a yt user
+and have the time to download the data-sets, option 1 is a good choice. However,
+if you're only interested in getting a feel for yt and its capabilities, or you
+already have yt but don't want to spend time downloading the data, go ahead to
+https://demo.use.yt.
+
+If you're running the tutorial from your own system and you do not already have
+the yt repository, the easiest way to get the repository is to clone it using
+mercurial:
 
 .. code-block:: bash
 
    hg clone https://bitbucket.org/yt_analysis/yt
 
-Now start the IPython notebook from within the repository:
+Now start the IPython notebook from within the repository (we presume you have
+yt installed):
 
 .. code-block:: bash
 

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -34,7 +34,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Gadget                |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
-| GAMER                 |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
+| GAMER                 |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Gasoline              |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b setup.py
--- a/setup.py
+++ b/setup.py
@@ -114,6 +114,9 @@
               ["yt/utilities/spatial/ckdtree.pyx"],
               include_dirs=["yt/utilities/lib/"],
               libraries=std_libs),
+    Extension("yt.utilities.lib.autogenerated_element_samplers",
+              ["yt/utilities/lib/autogenerated_element_samplers.pyx"],
+              include_dirs=["yt/utilities/lib/"]),
     Extension("yt.utilities.lib.bitarray",
               ["yt/utilities/lib/bitarray.pyx"],
               libraries=std_libs),
@@ -193,7 +196,7 @@
     "particle_mesh_operations", "depth_first_octree", "fortran_reader",
     "interpolators", "misc_utilities", "basic_octree", "image_utilities",
     "points_in_volume", "quad_tree", "ray_integrators", "mesh_utilities",
-    "amr_kdtools", "lenses",
+    "amr_kdtools", "lenses", "distance_queue"
 ]
 for ext_name in lib_exts:
     cython_extensions.append(

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -20,7 +20,7 @@
   local_gadget_000:
     - yt/frontends/gadget/tests/test_outputs.py
 
-  local_gamer_000:
+  local_gamer_001:
     - yt/frontends/gamer/tests/test_outputs.py
 
   local_gdf_000:
@@ -67,11 +67,13 @@
   local_ytdata_000:
     - yt/frontends/ytdata
 
-  local_absorption_spectrum_001:
+  local_absorption_spectrum_005:
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_novpec
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_sph
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo_sph
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_with_continuum
 
   local_axialpix_001:
     - yt/geometry/coordinates/tests/test_axial_pixelization.py:test_axial_pixelization

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -203,6 +203,13 @@
             input_ds = input_file
         field_data = input_ds.all_data()
 
+        # temperature field required to calculate voigt profile widths
+        if ('temperature' not in input_ds.derived_field_list) and \
+           (('gas', 'temperature') not in input_ds.derived_field_list):
+            raise RuntimeError(
+                "('gas', 'temperature') field required to be present in %s "
+                "for AbsorptionSpectrum to function." % input_file)
+
         self.tau_field = np.zeros(self.lambda_field.size)
         self.absorbers_list = []
 
@@ -210,6 +217,7 @@
             comm = _get_comm(())
             njobs = min(comm.size, len(self.line_list))
 
+        mylog.info("Creating spectrum")
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity,
                                     output_absorbers_file,
                                     subgrid_resolution=subgrid_resolution,
@@ -268,47 +276,96 @@
                 redshift_eff = ((1 + redshift) * \
                                 (1 + field_data['redshift_dopp'])) - 1.
 
+        if not use_peculiar_velocity:
+            redshift_eff = redshift
+
         return redshift, redshift_eff
 
     def _add_continua_to_spectrum(self, field_data, use_peculiar_velocity,
                                   observing_redshift=0.):
         """
-        Add continuum features to the spectrum.
+        Add continuum features to the spectrum.  Continuua are recorded as
+        a name, associated field, wavelength, normalization value, and index.
+        Continuua are applied at and below the denoted wavelength, where the
+        optical depth decreases as a power law of desired index.  For positive 
+        index values, this means optical depth is highest at the denoted 
+        wavelength, and it drops with shorter and shorter wavelengths.  
+        Consequently, transmitted flux undergoes a discontinuous cutoff at the 
+        denoted wavelength, and then slowly increases with decreasing wavelength 
+        according to the power law.
         """
         # Change the redshifts of continuum sources to account for the
         # redshift at which the observer sits
         redshift, redshift_eff = self._apply_observing_redshift(field_data,
                                  use_peculiar_velocity, observing_redshift)
 
-        # Only add continuum features down to tau of 1.e-4.
-        min_tau = 1.e-3
+        # min_tau is the minimum optical depth value that warrants 
+        # accounting for an absorber.  for a single absorber, noticeable 
+        # continuum effects begin for tau = 1e-3 (leading to transmitted 
+        # flux of e^-tau ~ 0.999).  but we apply a cutoff to remove
+        # absorbers with insufficient column_density to contribute 
+        # significantly to a continuum (see below).  because lots of 
+        # low column density absorbers can add up to a significant
+        # continuum effect, we normalize min_tau by the n_absorbers.
+        n_absorbers = field_data['dl'].size
+        min_tau = 1.e-3/n_absorbers
 
         for continuum in self.continuum_list:
-            column_density = field_data[continuum['field_name']] * field_data['dl']
+
+            # Normalization is in cm**-2, so column density must be as well
+            column_density = (field_data[continuum['field_name']] * 
+                              field_data['dl']).in_units('cm**-2')
+            if (column_density == 0).all():
+                mylog.info("Not adding continuum %s: insufficient column density" % continuum['label'])
+                continue
 
             # redshift_eff field combines cosmological and velocity redshifts
             if use_peculiar_velocity:
                 delta_lambda = continuum['wavelength'] * redshift_eff
             else:
                 delta_lambda = continuum['wavelength'] * redshift
+
+            # right index of continuum affected area is wavelength itself
             this_wavelength = delta_lambda + continuum['wavelength']
-            right_index = np.digitize(this_wavelength, self.lambda_field).clip(0, self.n_lambda)
+            right_index = np.digitize(this_wavelength, 
+                                      self.lambda_field).clip(0, self.n_lambda)
+            # left index of continuum affected area wavelength at which 
+            # optical depth reaches tau_min
             left_index = np.digitize((this_wavelength *
-                                     np.power((min_tau * continuum['normalization'] /
-                                               column_density), (1. / continuum['index']))),
-                                    self.lambda_field).clip(0, self.n_lambda)
+                              np.power((min_tau * continuum['normalization'] /
+                                        column_density),
+                                       (1. / continuum['index']))),
+                              self.lambda_field).clip(0, self.n_lambda)
 
+            # Only calculate the effects of continuua where normalized 
+            # column_density is greater than min_tau
+            # because lower column will not have significant contribution
             valid_continuua = np.where(((column_density /
                                          continuum['normalization']) > min_tau) &
                                        (right_index - left_index > 1))[0]
+            if valid_continuua.size == 0:
+                mylog.info("Not adding continuum %s: insufficient column density or out of range" %
+                    continuum['label'])
+                continue
+
             pbar = get_pbar("Adding continuum - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
                             valid_continuua.size)
+
+            # Tau value is (wavelength / continuum_wavelength)**index / 
+            #              (column_dens / norm)
+            # i.e. a power law decreasing as wavelength decreases
+
+            # Step through the absorber list and add continuum tau for each to
+            # the total optical depth for all wavelengths
             for i, lixel in enumerate(valid_continuua):
-                line_tau = np.power((self.lambda_field[left_index[lixel]:right_index[lixel]] /
-                                     this_wavelength[lixel]), continuum['index']) * \
-                                     column_density[lixel] / continuum['normalization']
-                self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
+                cont_tau = \
+                    np.power((self.lambda_field[left_index[lixel] :
+                                                right_index[lixel]] /
+                                   this_wavelength[lixel]), \
+                              continuum['index']) * \
+                    (column_density[lixel] / continuum['normalization'])
+                self.tau_field[left_index[lixel]:right_index[lixel]] += cont_tau
                 pbar.update(i)
             pbar.finish()
 
@@ -333,6 +390,9 @@
         # and deposit the lines into the spectrum
         for line in parallel_objects(self.line_list, njobs=njobs):
             column_density = field_data[line['field_name']] * field_data['dl']
+            if (column_density == 0).all():
+                mylog.info("Not adding line %s: insufficient column density" % line['label'])
+                continue
 
             # redshift_eff field combines cosmological and velocity redshifts
             # so delta_lambda gives the offset in angstroms from the rest frame
@@ -376,7 +436,10 @@
             cdens = column_density.in_units("cm**-2").d # cm**-2
             thermb = thermal_b.in_cgs().d  # thermal b coefficient; cm / s
             dlambda = delta_lambda.d  # lambda offset; angstroms
-            vlos = field_data['velocity_los'].in_units("km/s").d # km/s
+            if use_peculiar_velocity:
+                vlos = field_data['velocity_los'].in_units("km/s").d # km/s
+            else:
+                vlos = np.zeros(field_data['temperature'].size)
 
             # When we actually deposit the voigt profile, sometimes we will
             # have underresolved lines (ie lines with smaller widths than
@@ -413,6 +476,12 @@
             # observed spectrum where it occurs and deposit a voigt profile
             for i in parallel_objects(np.arange(n_absorbers), njobs=-1):
 
+                # if there is a ray element with temperature = 0 or column
+                # density = 0, skip it
+                if (thermal_b[i] == 0.) or (cdens[i] == 0.):
+                    pbar.update(i)
+                    continue
+
                 # the virtual window into which the line is deposited initially
                 # spans a region of 2 coarse spectral bins
                 # (one on each side of the center_index) but the window

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -33,7 +33,8 @@
 COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
 GIZMO_PLUS = "gizmo_cosmology_plus/N128L16.param"
 GIZMO_PLUS_SINGLE = "gizmo_cosmology_plus/snap_N128L16_151.hdf5"
-
+ISO_GALAXY = "IsolatedGalaxy/galaxy0030/galaxy0030"
+FIRE = "FIRE_M12i_ref11/snapshot_600.hdf5"
 
 @requires_file(COSMO_PLUS)
 @requires_answer_testing()
@@ -145,6 +146,58 @@
     shutil.rmtree(tmpdir)
 
 @requires_file(COSMO_PLUS_SINGLE)
+ at requires_answer_testing()
+def test_absorption_spectrum_non_cosmo_novpec():
+    """
+    This test generates an absorption spectrum from a simple light ray on a
+    grid dataset
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS_SINGLE)
+
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5', use_peculiar_velocity=False)
+
+    sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
+
+    my_label = 'HI Lya'
+    field = 'H_number_density'
+    wavelength = 1215.6700  # Angstromss
+    f_value = 4.164E-01
+    gamma = 6.265e+08
+    mass = 1.00794
+
+    sp.add_line(my_label, field, wavelength, f_value,
+                gamma, mass, label_threshold=1.e10)
+
+    wavelength, flux = sp.make_spectrum('lightray.h5',
+                                        output_file='spectrum.h5',
+                                        line_list_file='lines.txt',
+                                        use_peculiar_velocity=False)
+
+    # load just-generated hdf5 file of spectral data (for consistency)
+    data = h5.File('spectrum.h5', 'r')
+
+    for key in data.keys():
+        func = lambda x=key: data[x][:]
+        func.__name__ = "{}_non_cosmo_novpec".format(key)
+        test = GenericArrayTest(None, func)
+        test_absorption_spectrum_non_cosmo_novpec.__name__ = test.description
+        yield test
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(COSMO_PLUS_SINGLE)
 def test_equivalent_width_conserved():
     """
     This tests that the equivalent width of the optical depth is conserved 
@@ -360,3 +413,146 @@
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
+
+ at requires_file(ISO_GALAXY)
+ at requires_answer_testing()
+def test_absorption_spectrum_with_continuum():
+    """
+    This test generates an absorption spectrum from a simple light ray on a
+    grid dataset and adds Lyman alpha and Lyman continuum to it
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = load(ISO_GALAXY)
+    lr = LightRay(ds)
+
+    ray_start = ds.domain_left_edge
+    ray_end = ds.domain_right_edge
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    sp = AbsorptionSpectrum(800.0, 1300.0, 5001)
+
+    my_label = 'HI Lya'
+    field = 'H_number_density'
+    wavelength = 1215.6700  # Angstromss
+    f_value = 4.164E-01
+    gamma = 6.265e+08
+    mass = 1.00794
+
+    sp.add_line(my_label, field, wavelength, f_value,
+                gamma, mass, label_threshold=1.e10)
+
+    my_label = 'Ly C'
+    field = 'H_number_density'
+    wavelength = 912.323660  # Angstroms
+    normalization = 1.6e17
+    index = 3.0
+
+    sp.add_continuum(my_label, field, wavelength, normalization, index)
+
+    wavelength, flux = sp.make_spectrum('lightray.h5',
+                                        output_file='spectrum.h5',
+                                        line_list_file='lines.txt',
+                                        use_peculiar_velocity=True)
+
+    # load just-generated hdf5 file of spectral data (for consistency)
+    data = h5.File('spectrum.h5', 'r')
+    
+    for key in data.keys():
+        func = lambda x=key: data[x][:]
+        func.__name__ = "{}_continuum".format(key)
+        test = GenericArrayTest(None, func)
+        test_absorption_spectrum_with_continuum.__name__ = test.description
+        yield test
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(FIRE)
+def test_absorption_spectrum_with_zero_field():
+    """
+    This test generates an absorption spectrum with some 
+    particle dataset
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = load(FIRE)
+    lr = LightRay(ds)
+
+    # Define species and associated parameters to add to continuum
+    # Parameters used for both adding the transition to the spectrum
+    # and for fitting
+    # Note that for single species that produce multiple lines
+    # (as in the OVI doublet), 'numLines' will be equal to the number
+    # of lines, and f,gamma, and wavelength will have multiple values.
+
+    HI_parameters = {
+        'name': 'HI',
+        'field': 'H_number_density',
+        'f': [.4164],
+        'Gamma': [6.265E8],
+        'wavelength': [1215.67],
+        'mass': 1.00794,
+        'numLines': 1,
+        'maxN': 1E22, 'minN': 1E11,
+        'maxb': 300, 'minb': 1,
+        'maxz': 6, 'minz': 0,
+        'init_b': 30,
+        'init_N': 1E14
+    }
+
+    species_dicts = {'HI': HI_parameters}
+
+
+    # Get all fields that need to be added to the light ray
+    fields = [('gas','temperature')]
+    for s, params in species_dicts.items():
+        fields.append(params['field'])
+
+    # With a single dataset, a start_position and
+    # end_position or trajectory must be given.
+    # Trajectory should be given as (r, theta, phi)
+    lr.make_light_ray(
+        start_position=ds.arr([0., 0., 0.], 'unitary'),
+        end_position=ds.arr([1., 1., 1.], 'unitary'),
+        solution_filename='test_lightraysolution.txt',
+        data_filename='test_lightray.h5',
+        fields=fields)
+    
+    # Create an AbsorptionSpectrum object extending from
+    # lambda = 900 to lambda = 1800, with 10000 pixels
+    sp = AbsorptionSpectrum(900.0, 1400.0, 50000)
+    
+    # Iterate over species
+    for s, params in species_dicts.items():
+        # Iterate over transitions for a single species
+        for i in range(params['numLines']):
+            # Add the lines to the spectrum
+            sp.add_line(
+                s, params['field'],
+                params['wavelength'][i], params['f'][i],
+                params['Gamma'][i], params['mass'],
+                label_threshold=1.e10)
+    
+    
+    # Make and save spectrum
+    wavelength, flux = sp.make_spectrum(
+        'test_lightray.h5',
+        output_file='test_spectrum.h5',
+        line_list_file='test_lines.txt',
+        use_peculiar_velocity=True)
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -21,6 +21,8 @@
 from yt.funcs import mylog
 from yt.utilities.cosmology import \
     Cosmology
+from yt.utilities.physical_constants import \
+    c
 
 class CosmologySplice(object):
     """
@@ -67,7 +69,11 @@
         max_box_fraction : float
             In terms of the size of the domain, the maximum length a light
             ray segment can be in order to span the redshift interval from
-            one dataset to another.
+            one dataset to another.  If using a zoom-in simulation, this
+            parameter can be set to the length of the high resolution
+            region so as to limit ray segments to that size.  If the
+            high resolution region is not cubical, the smallest side
+            should be used.
             Default: 1.0 (the size of the box)
         deltaz_min : float
             Specifies the minimum delta z between consecutive datasets
@@ -115,6 +121,7 @@
                 output['next'] = self.splice_outputs[i + 1]
 
         # Calculate maximum delta z for each data dump.
+        self.max_box_fraction = max_box_fraction
         self._calculate_deltaz_max()
 
         # Calculate minimum delta z for each data dump.
@@ -144,7 +151,7 @@
             self.splice_outputs.sort(key=lambda obj:np.fabs(z - obj['redshift']))
             cosmology_splice.append(self.splice_outputs[0])
             z = cosmology_splice[-1]["redshift"]
-            z_target = z - max_box_fraction * cosmology_splice[-1]["dz_max"]
+            z_target = z - cosmology_splice[-1]["dz_max"]
 
             # fill redshift space with datasets
             while ((z_target > near_redshift) and
@@ -172,7 +179,7 @@
 
                 cosmology_splice.append(current_slice)
                 z = current_slice["redshift"]
-                z_target = z - max_box_fraction * current_slice["dz_max"]
+                z_target = z - current_slice["dz_max"]
 
         # Make light ray using maximum number of datasets (minimum spacing).
         else:
@@ -199,8 +206,8 @@
         mylog.info("create_cosmology_splice: Used %d data dumps to get from z = %f to %f." %
                    (len(cosmology_splice), far_redshift, near_redshift))
         
-        # change the 'next' and 'previous' pointers to point to the correct outputs for the created
-        # splice
+        # change the 'next' and 'previous' pointers to point to the correct outputs
+        # for the created splice
         for i, output in enumerate(cosmology_splice):
             if len(cosmology_splice) == 1:
                 output['previous'] = None
@@ -264,7 +271,8 @@
                 rounded += np.power(10.0, (-1.0*decimals))
             z = rounded
 
-            deltaz_max = self._deltaz_forward(z, self.simulation.box_size)
+            deltaz_max = self._deltaz_forward(z, self.simulation.box_size *
+                                              self.max_box_fraction)
             outputs.append({'redshift': z, 'dz_max': deltaz_max})
             z -= deltaz_max
 
@@ -282,72 +290,23 @@
         from z to (z - delta z).
         """
 
-        d_Tolerance = 1e-4
-        max_Iterations = 100
+        target_distance = self.simulation.box_size * \
+          self.max_box_fraction
+        for output in self.splice_outputs:
+            output['dz_max'] = self._deltaz_forward(output['redshift'],
+                                                    target_distance)
 
-        target_distance = self.simulation.box_size
-
-        for output in self.splice_outputs:
-            z = output['redshift']
-
-            # Calculate delta z that corresponds to the length of the box
-            # at a given redshift using Newton's method.
-            z1 = z
-            z2 = z1 - 0.1 # just an initial guess
-            distance1 = self.simulation.quan(0.0, "Mpccm / h")
-            distance2 = self.cosmology.comoving_radial_distance(z2, z)
-            iteration = 1
-
-            while ((np.abs(distance2-target_distance)/distance2) > d_Tolerance):
-                m = (distance2 - distance1) / (z2 - z1)
-                z1 = z2
-                distance1 = distance2
-                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z)
-                iteration += 1
-                if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
-                                "exceeded for z = %f (delta z = %f)." %
-                                (z, np.abs(z2 - z)))
-                    break
-            output['dz_max'] = np.abs(z2 - z)
-            
     def _calculate_deltaz_min(self, deltaz_min=0.0):
         r"""Calculate delta z that corresponds to a single top grid pixel
         going from z to (z - delta z).
         """
 
-        d_Tolerance = 1e-4
-        max_Iterations = 100
-
         target_distance = self.simulation.box_size / \
           self.simulation.domain_dimensions[0]
-
         for output in self.splice_outputs:
-            z = output['redshift']
-
-            # Calculate delta z that corresponds to the length of a
-            # top grid pixel at a given redshift using Newton's method.
-            z1 = z
-            z2 = z1 - 0.01 # just an initial guess
-            distance1 = self.simulation.quan(0.0, "Mpccm / h")
-            distance2 = self.cosmology.comoving_radial_distance(z2, z)
-            iteration = 1
-
-            while ((np.abs(distance2 - target_distance) / distance2) > d_Tolerance):
-                m = (distance2 - distance1) / (z2 - z1)
-                z1 = z2
-                distance1 = distance2
-                z2 = ((target_distance - distance2) / m.in_units("Mpccm / h")) + z2
-                distance2 = self.cosmology.comoving_radial_distance(z2, z)
-                iteration += 1
-                if (iteration > max_Iterations):
-                    mylog.error("calculate_deltaz_max: Warning - max iterations " +
-                                "exceeded for z = %f (delta z = %f)." %
-                                (z, np.abs(z2 - z)))
-                    break
-            # Use this calculation or the absolute minimum specified by the user.
-            output['dz_min'] = max(np.abs(z2 - z), deltaz_min)
+            zf = self._deltaz_forward(output['redshift'],
+                                      target_distance)
+            output['dz_min'] = max(zf, deltaz_min)
 
     def _deltaz_forward(self, z, target_distance):
         r"""Calculate deltaz corresponding to moving a comoving distance
@@ -357,10 +316,13 @@
         d_Tolerance = 1e-4
         max_Iterations = 100
 
-        # Calculate delta z that corresponds to the length of the
-        # box at a given redshift.
         z1 = z
-        z2 = z1 - 0.1 # just an initial guess
+        # Use Hubble's law for initial guess
+        target_distance = self.cosmology.quan(target_distance.to("Mpccm / h"))
+        v = self.cosmology.hubble_parameter(z) * target_distance
+        v = min(v, 0.9 * c)
+        dz = np.sqrt((1. + v/c) / (1. - v/c)) - 1.
+        z2 = z1 - dz
         distance1 = self.cosmology.quan(0.0, "Mpccm / h")
         distance2 = self.cosmology.comoving_radial_distance(z2, z)
         iteration = 1

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -21,8 +21,6 @@
     load
 from yt.frontends.ytdata.utilities import \
     save_as_dataset
-from yt.units.unit_object import \
-    Unit
 from yt.units.yt_array import \
     YTArray
 from yt.utilities.cosmology import \
@@ -81,21 +79,23 @@
     max_box_fraction : optional, float
         In terms of the size of the domain, the maximum length a light
         ray segment can be in order to span the redshift interval from
-        one dataset to another.
+        one dataset to another.  If using a zoom-in simulation, this
+        parameter can be set to the length of the high resolution
+        region so as to limit ray segments to that size.  If the
+        high resolution region is not cubical, the smallest side
+        should be used.
         Default: 1.0 (the size of the box)
     deltaz_min : optional, float
         Specifies the minimum :math:`\Delta z` between consecutive
         datasets in the returned list.  Do not use for simple rays.
         Default: 0.0.
     minimum_coherent_box_fraction : optional, float
-        Used with use_minimum_datasets set to False, this parameter
-        specifies the fraction of the total box size to be traversed
-        before rerandomizing the projection axis and center.  This
-        was invented to allow light rays with thin slices to sample
-        coherent large scale structure, but in practice does not work
-        so well.  Try setting this parameter to 1 and see what happens.  
-        Do not use for simple rays.
-        Default: 0.0.
+        Use to specify the minimum length of a ray, in terms of the
+        size of the domain, before the trajectory is re-randomized.
+        Set to 0 to have ray trajectory randomized for every dataset.
+        Set to np.inf (infinity) to use a single trajectory for the
+        entire ray.
+        Default: 0.
     time_data : optional, bool
         Whether or not to include time outputs when gathering
         datasets for time series.  Do not use for simple rays.
@@ -125,6 +125,11 @@
                  time_data=True, redshift_data=True,
                  find_outputs=False, load_kwargs=None):
 
+        if near_redshift is not None and far_redshift is not None and \
+          near_redshift >= far_redshift:
+            raise RuntimeError(
+                "near_redshift must be less than far_redshift.")
+
         self.near_redshift = near_redshift
         self.far_redshift = far_redshift
         self.use_minimum_datasets = use_minimum_datasets
@@ -156,8 +161,7 @@
                 self.cosmology = Cosmology(
                     hubble_constant=self.ds.hubble_constant,
                     omega_matter=self.ds.omega_matter,
-                    omega_lambda=self.ds.omega_lambda,
-                    unit_registry=self.ds.unit_registry)
+                    omega_lambda=self.ds.omega_lambda)
             else:
                 redshift = 0.
             self.light_ray_solution.append({"filename": self.parameter_filename,
@@ -171,20 +175,23 @@
             CosmologySplice.__init__(self, self.parameter_filename, simulation_type,
                                      find_outputs=find_outputs)
             self.light_ray_solution = \
-              self.create_cosmology_splice(self.near_redshift, self.far_redshift,
-                                           minimal=self.use_minimum_datasets,
-                                           max_box_fraction=max_box_fraction,
-                                           deltaz_min=self.deltaz_min,
-                                           time_data=time_data,
-                                           redshift_data=redshift_data)
+              self.create_cosmology_splice(
+                  self.near_redshift, self.far_redshift,
+                  minimal=self.use_minimum_datasets,
+                  max_box_fraction=max_box_fraction,
+                  deltaz_min=self.deltaz_min,
+                  time_data=time_data,
+                  redshift_data=redshift_data)
 
     def _calculate_light_ray_solution(self, seed=None,
+                                      left_edge=None, right_edge=None,
+                                      min_level=None, periodic=True,
                                       start_position=None, end_position=None,
                                       trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
 
         # Calculate dataset sizes, and get random dataset axes and centers.
-        np.random.seed(seed)
+        my_random = np.random.RandomState(seed)
 
         # If using only one dataset, set start and stop manually.
         if start_position is not None:
@@ -194,9 +201,9 @@
             if not ((end_position is None) ^ (trajectory is None)):
                 raise RuntimeError("LightRay Error: must specify either end_position " + \
                                    "or trajectory, but not both.")
-            self.light_ray_solution[0]['start'] = np.asarray(start_position)
+            self.light_ray_solution[0]['start'] = start_position
             if end_position is not None:
-                self.light_ray_solution[0]['end'] = np.asarray(end_position)
+                self.light_ray_solution[0]['end'] = end_position
             else:
                 # assume trajectory given as r, theta, phi
                 if len(trajectory) != 3:
@@ -230,29 +237,40 @@
 
                 # Get dataset axis and center.
                 # If using box coherence, only get start point and vector if
-                # enough of the box has been used,
-                # or if box_fraction_used will be greater than 1 after this slice.
-                if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
-                        (box_fraction_used >
-                         self.minimum_coherent_box_fraction) or \
-                        (box_fraction_used +
-                         self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                    # Random start point
-                    self.light_ray_solution[q]['start'] = np.random.random(3)
-                    theta = np.pi * np.random.random()
-                    phi = 2 * np.pi * np.random.random()
-                    box_fraction_used = 0.0
+                # enough of the box has been used.
+                if (q == 0) or (box_fraction_used >=
+                                self.minimum_coherent_box_fraction):
+                    if periodic:
+                        self.light_ray_solution[q]['start'] = left_edge + \
+                          (right_edge - left_edge) * my_random.random_sample(3)
+                        theta = np.pi * my_random.random_sample()
+                        phi = 2 * np.pi * my_random.random_sample()
+                        box_fraction_used = 0.0
+                    else:
+                        ds = load(self.light_ray_solution[q]["filename"])
+                        ray_length = \
+                          ds.quan(self.light_ray_solution[q]['traversal_box_fraction'],
+                                  "unitary")
+                        self.light_ray_solution[q]['start'], \
+                          self.light_ray_solution[q]['end'] = \
+                          non_periodic_ray(ds, left_edge, right_edge, ray_length,
+                                           my_random=my_random, min_level=min_level)
+                        del ds
                 else:
-                    # Use end point of previous segment and same theta and phi.
+                    # Use end point of previous segment, adjusted for periodicity,
+                    # and the same trajectory.
                     self.light_ray_solution[q]['start'] = \
-                      self.light_ray_solution[q-1]['end'][:]
+                      periodic_adjust(self.light_ray_solution[q-1]['end'][:],
+                                      left=left_edge, right=right_edge)
 
-                self.light_ray_solution[q]['end'] = \
-                  self.light_ray_solution[q]['start'] + \
-                    self.light_ray_solution[q]['traversal_box_fraction'] * \
-                    np.array([np.cos(phi) * np.sin(theta),
-                              np.sin(phi) * np.sin(theta),
-                              np.cos(theta)])
+                if "end" not in self.light_ray_solution[q]:
+                    self.light_ray_solution[q]['end'] = \
+                      self.light_ray_solution[q]['start'] + \
+                        self.light_ray_solution[q]['traversal_box_fraction'] * \
+                        self.simulation.box_size * \
+                        np.array([np.cos(phi) * np.sin(theta),
+                                  np.sin(phi) * np.sin(theta),
+                                  np.cos(theta)])
                 box_fraction_used += \
                   self.light_ray_solution[q]['traversal_box_fraction']
 
@@ -263,15 +281,18 @@
                             'far_redshift':self.far_redshift,
                             'near_redshift':self.near_redshift})
 
-    def make_light_ray(self, seed=None,
+    def make_light_ray(self, seed=None, periodic=True,
+                       left_edge=None, right_edge=None, min_level=None,
                        start_position=None, end_position=None,
                        trajectory=None,
                        fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
-                       get_los_velocity=None, use_peculiar_velocity=True, 
-                       redshift=None, njobs=-1):
+                       get_los_velocity=None, use_peculiar_velocity=True,
+                       redshift=None, field_parameters=None, njobs=-1):
         """
-        make_light_ray(seed=None, start_position=None, end_position=None,
+        make_light_ray(seed=None, periodic=True,
+                       left_edge=None, right_edge=None, min_level=None,
+                       start_position=None, end_position=None,
                        trajectory=None, fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
                        use_peculiar_velocity=True, redshift=None,
@@ -287,6 +308,29 @@
         seed : optional, int
             Seed for the random number generator.
             Default: None.
+        periodic : optional, bool
+            If True, ray trajectories will make use of periodic
+            boundaries.  If False, ray trajectories will not be
+            periodic.
+            Default : True.
+        left_edge : optional, iterable of floats or YTArray
+            The left corner of the region in which rays are to be
+            generated.  If None, the left edge will be that of the
+            domain.  If specified without units, it is assumed to
+            be in code units.
+            Default: None.
+        right_edge : optional, iterable of floats or YTArray
+            The right corner of the region in which rays are to be
+            generated.  If None, the right edge will be that of the
+            domain.  If specified without units, it is assumed to
+            be in code units.
+            Default: None.
+        min_level : optional, int
+            The minimum refinement level of the spatial region in which
+            the ray passes.  This can be used with zoom-in simulations
+            where the high resolution region does not keep a constant
+            geometry.
+            Default: None.
         start_position : optional, iterable of floats or YTArray.
             Used only if creating a light ray from a single dataset.
             The coordinates of the starting position of the ray.
@@ -365,33 +409,63 @@
         ...                       use_peculiar_velocity=True)
 
         """
+        if self.simulation_type is None:
+            domain = self.ds
+        else:
+            domain = self.simulation
 
-        if start_position is not None and hasattr(start_position, 'units'):
-            start_position = start_position.to('unitary')
-        elif start_position is not None :
-            start_position = self.ds.arr(
-                start_position, 'code_length').to('unitary')
+        assumed_units = "code_length"
+        if left_edge is None:
+            left_edge = domain.domain_left_edge
+        elif not hasattr(left_edge, 'units'):
+            left_edge = domain.arr(left_edge, assumed_units)
+        left_edge.convert_to_units('unitary')
 
-        if end_position is not None and hasattr(end_position, 'units'):
-            end_position = end_position.to('unitary')
-        elif end_position is not None :
-            end_position = self.ds.arr(
-                end_position, 'code_length').to('unitary')
+        if right_edge is None:
+            right_edge = domain.domain_right_edge
+        elif not hasattr(right_edge, 'units'):
+            right_edge = domain.arr(right_edge, assumed_units)
+        right_edge.convert_to_units('unitary')
+
+        if start_position is not None:
+            if hasattr(start_position, 'units'):
+                start_position = start_position
+            else:
+                start_position = self.ds.arr(start_position, assumed_units)
+            start_position.convert_to_units('unitary')
+
+        if end_position is not None:
+            if hasattr(end_position, 'units'):
+                end_position = end_position
+            else:
+                end_position = self.ds.arr(end_position, assumed_units)
+            end_position.convert_to_units('unitary')
 
         if get_los_velocity is not None:
             use_peculiar_velocity = get_los_velocity
-            mylog.warn("'get_los_velocity' kwarg is deprecated. Use 'use_peculiar_velocity' instead.")
+            mylog.warn("'get_los_velocity' kwarg is deprecated. " + \
+                       "Use 'use_peculiar_velocity' instead.")
 
         # Calculate solution.
         self._calculate_light_ray_solution(seed=seed,
+                                           left_edge=left_edge,
+                                           right_edge=right_edge,
+                                           min_level=min_level, periodic=periodic,
                                            start_position=start_position,
                                            end_position=end_position,
                                            trajectory=trajectory,
                                            filename=solution_filename)
 
+        if field_parameters is None:
+            field_parameters = {}
+
         # Initialize data structures.
         self._data = {}
+        # temperature field is automatically added to fields
         if fields is None: fields = []
+        if (('gas', 'temperature') not in fields) and \
+           ('temperature' not in fields):
+           fields.append(('gas', 'temperature'))
         data_fields = fields[:]
         all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
@@ -425,19 +499,11 @@
             if setup_function is not None:
                 setup_function(ds)
 
-            if start_position is not None:
-                my_segment["start"] = ds.arr(my_segment["start"], "unitary")
-                my_segment["end"] = ds.arr(my_segment["end"], "unitary")
-            else:
-                my_segment["start"] = ds.domain_width * my_segment["start"] + \
-                  ds.domain_left_edge
-                my_segment["end"] = ds.domain_width * my_segment["end"] + \
-                  ds.domain_left_edge
-
             if not ds.cosmological_simulation:
                 next_redshift = my_segment["redshift"]
             elif self.near_redshift == self.far_redshift:
-                if isinstance(my_segment["traversal_box_fraction"], YTArray):
+                if isinstance(my_segment["traversal_box_fraction"], YTArray) and \
+                  not my_segment["traversal_box_fraction"].units.is_dimensionless:
                     segment_length = \
                       my_segment["traversal_box_fraction"].in_units("Mpccm / h")
                 else:
@@ -451,18 +517,18 @@
             else:
                 next_redshift = my_segment['next']['redshift']
 
+            # Make sure start, end, left, right
+            # are using the dataset's unit system.
+            my_start = ds.arr(my_segment['start'])
+            my_end   = ds.arr(my_segment['end'])
+            my_left  = ds.arr(left_edge)
+            my_right = ds.arr(right_edge)
             mylog.info("Getting segment at z = %s: %s to %s." %
-                       (my_segment['redshift'], my_segment['start'],
-                        my_segment['end']))
-
-            # Convert segment units from unitary to code length for sub_ray
-            my_segment['start'] = my_segment['start'].to('code_length')
-            my_segment['end'] = my_segment['end'].to('code_length')
+                       (my_segment['redshift'], my_start, my_end))
 
             # Break periodic ray into non-periodic segments.
-            sub_segments = periodic_ray(my_segment['start'], my_segment['end'],
-                                        left=ds.domain_left_edge,
-                                        right=ds.domain_right_edge)
+            sub_segments = periodic_ray(my_start, my_end,
+                                        left=my_left, right=my_right)
 
             # Prepare data structure for subsegment.
             sub_data = {}
@@ -475,6 +541,8 @@
                 mylog.info("Getting subsegment: %s to %s." %
                            (list(sub_segment[0]), list(sub_segment[1])))
                 sub_ray = ds.ray(sub_segment[0], sub_segment[1])
+                for key, val in field_parameters.items():
+                    sub_ray.set_field_parameter(key, val)
                 asort = np.argsort(sub_ray["t"])
                 sub_data['dl'].extend(sub_ray['dts'][asort] *
                                       vector_length(sub_ray.start_point,
@@ -513,7 +581,7 @@
                     # sight) and the velocity vectors: a dot b = ab cos(theta)
 
                     sub_vel_mag = sub_ray['velocity_magnitude']
-                    cos_theta = np.dot(line_of_sight, sub_vel) / sub_vel_mag
+                    cos_theta = line_of_sight.dot(sub_vel) / sub_vel_mag
                     # Protect against stituations where velocity mag is exactly
                     # zero, in which case zero / zero = NaN.
                     cos_theta = np.nan_to_num(cos_theta)
@@ -533,8 +601,7 @@
             # Get redshift for each lixel.  Assume linear relation between l 
             # and z.
             sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
-                (sub_data['dl'] / vector_length(my_segment['start'],
-                                                my_segment['end']).in_cgs())
+                (sub_data['dl'] / vector_length(my_start, my_end).in_cgs())
             sub_data['redshift'] = my_segment['redshift'] - \
               sub_data['dredshift'].cumsum() + sub_data['dredshift']
 
@@ -604,19 +671,18 @@
               self.cosmology.t_from_z(ds["current_redshift"])
         extra_attrs = {"data_type": "yt_light_ray"}
         field_types = dict([(field, "grid") for field in data.keys()])
+
         # Only return LightRay elements with non-zero density
-        mask_field_units = ['K', 'cm**-3', 'g/cm**3']
-        mask_field_units = [Unit(u) for u in mask_field_units]
-        for f in data:
-            for u in mask_field_units:
-                if data[f].units.same_dimensions_as(u):
-                    mask = data[f] > 0
-                    if not np.any(mask):
-                        raise RuntimeError(
-                            "No zones along light ray with nonzero %s. "
-                            "Please modify your light ray trajectory." % (f,))
-                    for key in data.keys():
-                        data[key] = data[key][mask]
+        if 'temperature' in data: f = 'temperature'
+        if ('gas', 'temperature') in data: f = ('gas', 'temperature')
+        if 'temperature' in data or ('gas', 'temperature') in data:
+            mask = data[f] > 0
+            if not np.any(mask):
+                raise RuntimeError(
+                    "No zones along light ray with nonzero %s. "
+                    "Please modify your light ray trajectory." % (f,))
+            for key in data.keys():
+                data[key] = data[key][mask]
         save_as_dataset(ds, filename, data, field_types=field_types,
                         extra_attrs=extra_attrs)
 
@@ -671,6 +737,22 @@
 
     return np.sqrt(np.power((end - start), 2).sum())
 
+def periodic_adjust(p, left=None, right=None):
+    """
+    Return the point p adjusted for periodic boundaries.
+
+    """
+    if isinstance(p, YTArray):
+        p.convert_to_units("unitary")
+    if left is None:
+        left = np.zeros_like(p)
+    if right is None:
+        right = np.ones_like(p)
+
+    w = right - left
+    p -= left
+    return np.mod(p, w)
+
 def periodic_distance(coord1, coord2):
     """
     periodic_distance(coord1, coord2)
@@ -712,7 +794,7 @@
     dim = right - left
 
     vector = end - start
-    wall = np.zeros(start.shape)
+    wall = np.zeros_like(start)
     close = np.zeros(start.shape, dtype=object)
 
     left_bound = vector < 0
@@ -732,7 +814,6 @@
     this_end = end.copy()
     t = 0.0
     tolerance = 1e-6
-
     while t < 1.0 - tolerance:
         hit_left = (this_start <= left) & (vector < 0)
         if (hit_left).any():
@@ -750,8 +831,44 @@
         now = this_start + vector * dt
         close_enough = np.abs(now - nearest) / np.abs(vector.max()) < 1e-10
         now[close_enough] = nearest[close_enough]
-        segments.append([np.copy(this_start), np.copy(now)])
+        segments.append([this_start.copy(), now.copy()])
         this_start = now.copy()
         t += dt
 
     return segments
+
+def non_periodic_ray(ds, left_edge, right_edge, ray_length, max_iter=5000,
+                     min_level=None, my_random=None):
+
+    max_length = vector_length(left_edge, right_edge)
+    if ray_length > max_length:
+        raise RuntimeError(
+            ("The maximum segment length in the region %s to %s is %s, " +
+             "but the ray length requested is %s.  Decrease ray length.") %
+             (left_edge, right_edge, max_length, ray_length))
+
+    if my_random is None:
+        my_random = np.random.RandomState()
+    i = 0
+    while True:
+        start = my_random.random_sample(3) * \
+          (right_edge - left_edge) + left_edge
+        theta = np.pi * my_random.random_sample()
+        phi = 2 * np.pi * my_random.random_sample()
+        end = start + ray_length * \
+          np.array([np.cos(phi) * np.sin(theta),
+                    np.sin(phi) * np.sin(theta),
+                    np.cos(theta)])
+        i += 1
+        test_ray = ds.ray(start, end)
+        if (end >= left_edge).all() and (end <= right_edge).all() and \
+          (min_level is None or min_level <= 0 or
+           (test_ray["grid_level"] >= min_level).all()):
+            mylog.info("Found ray after %d attempts." % i)
+            del test_ray
+            return start, end
+        del test_ray
+        if i > max_iter:
+            raise RuntimeError(
+                ("Failed to create segment in %d attempts.  " +
+                 "Decreasing ray length is recommended") % i)

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/tests/test_light_ray.py
@@ -10,6 +10,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
+
 from yt.testing import \
     requires_file
 from yt.analysis_modules.cosmological_observation.api import LightRay
@@ -41,6 +43,48 @@
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
 
+ at requires_file(COSMO_PLUS)
+def test_light_ray_cosmo_nested():
+    """
+    This test generates a cosmological light ray confing the ray to a subvolume
+    """
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    left = np.ones(3) * 0.25
+    right = np.ones(3) * 0.75
+
+    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
+
+    lr.make_light_ray(seed=1234567, left_edge=left, right_edge=right,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(COSMO_PLUS)
+def test_light_ray_cosmo_nonperiodic():
+    """
+    This test generates a cosmological light ray using non-periodic segments
+    """
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
+
+    lr.make_light_ray(seed=1234567, periodic=False,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
 
 @requires_file(COSMO_PLUS_SINGLE)
 def test_light_ray_non_cosmo():

diff -r 4387fbcf5773b5a52ada2940d446b9f1ab31defe -r af39415bb4c4f8eedd995b58dbf984e503d1967b yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -11,7 +11,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.data_objects.data_containers import YTFieldData
+from yt.data_objects.field_data import YTFieldData
 from yt.data_objects.time_series import DatasetSeries
 from yt.utilities.lib.particle_mesh_operations import CICSample_3
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -81,30 +81,38 @@
         self.times = []
         self.suppress_logging = suppress_logging
 
-        # Default fields 
-        
         if fields is None: fields = []
-        fields.append("particle_position_x")
-        fields.append("particle_position_y")
-        fields.append("particle_position_z")
         fields = list(OrderedDict.fromkeys(fields))
 
         if self.suppress_logging:
             old_level = int(ytcfg.get("yt","loglevel"))
             mylog.setLevel(40)
+        
+        fds = {}
+        ds_first = self.data_series[0]
+        dd_first = ds_first.all_data()
+        idx_field = dd_first._determine_fields("particle_index")[0]
+        for field in ("particle_position_%s" % ax for ax in "xyz"):
+            fds[field] = dd_first._determine_fields(field)[0]
+
         my_storage = {}
         pbar = get_pbar("Constructing trajectory information", len(self.data_series))
         for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
             dd = ds.all_data()
-            idx_field = dd._determine_fields("particle_index")[0]
             newtags = dd[idx_field].ndarray_view().astype("int64")
             mask = np.in1d(newtags, indices, assume_unique=True)
-            sorts = np.argsort(newtags[mask])
-            self.array_indices.append(np.where(np.in1d(indices, newtags, assume_unique=True))[0])
+            sort = np.argsort(newtags[mask])
+            array_indices = np.where(np.in1d(indices, newtags, assume_unique=True))[0]
+            self.array_indices.append(array_indices)
             self.masks.append(mask)
-            self.sorts.append(sorts)
+            self.sorts.append(sort)
+
+            pfields = {}
+            for field in ("particle_position_%s" % ax for ax in "xyz"):
+                pfields[field] = dd[fds[field]].ndarray_view()[mask][sort]
+
             sto.result_id = ds.parameter_filename
-            sto.result = ds.current_time
+            sto.result = (ds.current_time, array_indices, pfields)
             pbar.update(i)
         pbar.finish()
 
@@ -112,17 +120,22 @@
             mylog.setLevel(old_level)
 
         times = []
-        for fn, time in sorted(my_storage.items()):
+        for fn, (time, indices, pfields) in sorted(my_storage.items()):
             times.append(time)
-
         self.times = self.data_series[0].arr([time for time in times], times[0].units)
 
         self.particle_fields = []
+        output_field = np.empty((self.num_indices, self.num_steps))
+        output_field.fill(np.nan)
+        for field in ("particle_position_%s" % ax for ax in "xyz"):
+            for i, (fn, (time, indices, pfields)) in enumerate(sorted(my_storage.items())):
+                output_field[indices, i] = pfields[field]
+            self.field_data[field] = array_like_field(
+                dd_first, output_field.copy(), fds[field])
+            self.particle_fields.append(field)
 
         # Instantiate fields the caller requested
-
-        for field in fields:
-            self._get_data(field)
+        self._get_data(fields)
 
     def has_key(self, key):
         return (key in self.field_data)
@@ -137,7 +150,7 @@
         if key == "particle_time":
             return self.times
         if key not in self.field_data:
-            self._get_data(key)
+            self._get_data([key])
         return self.field_data[key]
     
     def __setitem__(self, key, val):
@@ -188,65 +201,89 @@
         >>> trajs = ParticleTrajectories(my_fns, indices)
         >>> trajs.add_fields(["particle_mass", "particle_gpot"])
         """
-        for field in fields:
-            if field not in self.field_data:
-                self._get_data(field)
+        self._get_data(fields)
                 
-    def _get_data(self, field):
+    def _get_data(self, fields):
         """
-        Get a field to include in the trajectory collection.
+        Get a list of fields to include in the trajectory collection.
         The trajectory collection itself is a dict of 2D numpy arrays,
         with shape (num_indices, num_steps)
         """
-        if field not in self.field_data:
-            if self.suppress_logging:
-                old_level = int(ytcfg.get("yt","loglevel"))
-                mylog.setLevel(40)
-            ds_first = self.data_series[0]
-            dd_first = ds_first.all_data()
-            fd = dd_first._determine_fields(field)[0]
+
+        missing_fields = [field for field in fields
+                          if field not in self.field_data]
+        if not missing_fields:
+            return
+
+        if self.suppress_logging:
+            old_level = int(ytcfg.get("yt","loglevel"))
+            mylog.setLevel(40)
+        ds_first = self.data_series[0]
+        dd_first = ds_first.all_data()
+
+        fds = {}
+        new_particle_fields = []
+        for field in missing_fields:
+            fds[field] = dd_first._determine_fields(field)[0]
             if field not in self.particle_fields:
-                if self.data_series[0]._get_field_info(*fd).particle_type:
+                if self.data_series[0]._get_field_info(*fds[field]).particle_type:
                     self.particle_fields.append(field)
-            particles = np.empty((self.num_indices,self.num_steps))
-            particles[:] = np.nan
-            step = int(0)
-            pbar = get_pbar("Generating field %s in trajectories." % (field), self.num_steps)
-            my_storage={}
-            for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
-                mask = self.masks[i]
-                sort = self.sorts[i]
-                if field in self.particle_fields:
+                    new_particle_fields.append(field)
+                    
+
+        grid_fields = [field for field in missing_fields
+                       if field not in self.particle_fields]
+        step = int(0)
+        pbar = get_pbar("Generating [%s] fields in trajectories." %
+                        ", ".join(missing_fields), self.num_steps)
+        my_storage = {}
+        
+        for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
+            mask = self.masks[i]
+            sort = self.sorts[i]
+            pfield = {}
+
+            if new_particle_fields:  # there's at least one particle field
+                dd = ds.all_data()
+                for field in new_particle_fields:
                     # This is easy... just get the particle fields
-                    dd = ds.all_data()
-                    pfield = dd[fd].ndarray_view()[mask][sort]
-                else:
-                    # This is hard... must loop over grids
-                    pfield = np.zeros((self.num_indices))
-                    x = self["particle_position_x"][:,step].ndarray_view()
-                    y = self["particle_position_y"][:,step].ndarray_view()
-                    z = self["particle_position_z"][:,step].ndarray_view()
-                    # This will fail for non-grid index objects
-                    particle_grids, particle_grid_inds = ds.index._find_points(x,y,z)
-                    for grid in particle_grids:
-                        cube = grid.retrieve_ghost_zones(1, [fd])
-                        CICSample_3(x,y,z,pfield,
+                    pfield[field] = dd[fds[field]].ndarray_view()[mask][sort]
+
+            if grid_fields:
+                # This is hard... must loop over grids
+                for field in grid_fields:
+                    pfield[field] = np.zeros((self.num_indices))
+                x = self["particle_position_x"][:,step].ndarray_view()
+                y = self["particle_position_y"][:,step].ndarray_view()
+                z = self["particle_position_z"][:,step].ndarray_view()
+                particle_grids, particle_grid_inds = ds.index._find_points(x,y,z)
+
+                # This will fail for non-grid index objects
+                for grid in particle_grids:
+                    cube = grid.retrieve_ghost_zones(1, grid_fields)
+                    for field in grid_fields:
+                        CICSample_3(x, y, z, pfield[field],
                                     self.num_indices,
-                                    cube[fd],
+                                    cube[fds[field]],
                                     np.array(grid.LeftEdge).astype(np.float64),
                                     np.array(grid.ActiveDimensions).astype(np.int32),
                                     grid.dds[0])
-                sto.result_id = ds.parameter_filename
-                sto.result = (self.array_indices[i], pfield)
-                pbar.update(step)
-                step += 1
-            pbar.finish()
+            sto.result_id = ds.parameter_filename
+            sto.result = (self.array_indices[i], pfield)
+            pbar.update(step)
+            step += 1
+        pbar.finish()
+
+        output_field = np.empty((self.num_indices,self.num_steps))
+        output_field.fill(np.nan)
+        for field in missing_fields:
+            fd = fds[field]
             for i, (fn, (indices, pfield)) in enumerate(sorted(my_storage.items())):
-                particles[indices,i] = pfield
-            self.field_data[field] = array_like_field(dd_first, particles, fd)
-            if self.suppress_logging:
-                mylog.setLevel(old_level)
-        return self.field_data[field]
+                output_field[indices, i] = pfield[field]
+            self.field_data[field] = array_like_field(dd_first, output_field.copy(), fd)
+
+        if self.suppress_logging:
+            mylog.setLevel(old_level)
 
     def trajectory_from_index(self, index):
         """

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/64011277dfc0/
Changeset:   64011277dfc0
Branch:      yt
User:        MatthewTurk
Date:        2016-09-30 12:41:56+00:00
Summary:     Merging with upstream
Affected #:  119 files

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -63,6 +63,7 @@
 yt/utilities/lib/quad_tree.c
 yt/utilities/lib/ray_integrators.c
 yt/utilities/lib/ragged_arrays.c
+yt/utilities/lib/cosmology_time.c
 yt/utilities/lib/grid_traversal.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -13,8 +13,14 @@
 the result of user-specified functions, such as checking for gravitational
 boundedness.  A sample recipe can be found in :ref:`cookbook-find_clumps`.
 
+Setting up the Clump Finder
+---------------------------
+
 The clump finder requires a data object (see :ref:`data-objects`) and a field
-over which the contouring is to be performed.
+over which the contouring is to be performed.  The data object is then used
+to create the initial
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object that
+acts as the base for clump finding.
 
 .. code:: python
 
@@ -28,11 +34,15 @@
 
    master_clump = Clump(data_source, ("gas", "density"))
 
+Clump Validators
+----------------
+
 At this point, every isolated contour will be considered a clump,
 whether this is physical or not.  Validator functions can be added to
 determine if an individual contour should be considered a real clump.
-These functions are specified with the ``Clump.add_validator`` function.
-Current, two validators exist: a minimum number of cells and gravitational
+These functions are specified with the
+:func:`~yt.analysis_modules.level_sets.clump_handling.Clump.add_validator`
+function.  Current, two validators exist: a minimum number of cells and gravitational
 boundedness.
 
 .. code:: python
@@ -52,7 +62,8 @@
        return (clump["gas", "cell_mass"].sum() >= min_mass)
    add_validator("minimum_gas_mass", _minimum_gas_mass)
 
-The ``add_validator`` function adds the validator to a registry that can
+The :func:`~yt.analysis_modules.level_sets.clump_validators.add_validator`
+function adds the validator to a registry that can
 be accessed by the clump finder.  Then, the validator can be added to the
 clump finding just like the others.
 
@@ -60,9 +71,15 @@
 
    master_clump.add_validator("minimum_gas_mass", ds.quan(1.0, "Msun"))
 
-The clump finding algorithm accepts the ``Clump`` object, the initial minimum
-and maximum of the contouring field, and the step size.  The lower value of the
-contour finder will be continually multiplied by the step size.
+Running the Clump Finder
+------------------------
+
+Clump finding then proceeds by calling the
+:func:`~yt.analysis_modules.level_sets.clump_handling.find_clumps` function.
+This function accepts the
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object, the initial
+minimum and maximum of the contouring field, and the step size.  The lower value
+of the contour finder will be continually multiplied by the step size.
 
 .. code:: python
 
@@ -71,41 +88,27 @@
    step = 2.0
    find_clumps(master_clump, c_min, c_max, step)
 
-After the clump finding has finished, the master clump will represent the top
-of a hierarchy of clumps.  The ``children`` attribute within a ``Clump`` object
-contains a list of all sub-clumps.  Each sub-clump is also a ``Clump`` object
-with its own ``children`` attribute, and so on.
+Calculating Clump Quantities
+----------------------------
 
-A number of helper routines exist for examining the clump hierarchy.
-
-.. code:: python
-
-   # Write a text file of the full hierarchy.
-   write_clump_index(master_clump, 0, "%s_clump_hierarchy.txt" % ds)
-
-   # Write a text file of only the leaf nodes.
-   write_clumps(master_clump,0, "%s_clumps.txt" % ds)
-
-   # Get a list of just the leaf nodes.
-   leaf_clumps = get_lowest_clumps(master_clump)
-
-``Clump`` objects can be used like all other data containers.
-
-.. code:: python
-
-   print(leaf_clumps[0]["gas", "density"])
-   print(leaf_clumps[0].quantities.total_mass())
-
-The writing functions will write out a series or properties about each
-clump by default.  Additional properties can be appended with the
-``Clump.add_info_item`` function.
+By default, a number of quantities will be calculated for each clump when the
+clump finding process has finished.  The default quantities are: ``total_cells``,
+``cell_mass``, ``mass_weighted_jeans_mass``, ``volume_weighted_jeans_mass``,
+``max_grid_level``, ``min_number_density``, and ``max_number_density``.
+Additional items can be added with the
+:func:`~yt.analysis_modules.level_sets.clump_handling.Clump.add_info_item`
+function.
 
 .. code:: python
 
    master_clump.add_info_item("total_cells")
 
 Just like the validators, custom info items can be added by defining functions
-that minimally accept a ``Clump`` object and return a string to be printed.
+that minimally accept a
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object and return
+a format string to be printed and the value.  These are then added to the list
+of available info items by calling
+:func:`~yt.analysis_modules.level_sets.clump_info_items.add_clump_info`:
 
 .. code:: python
 
@@ -121,10 +124,47 @@
 
    master_clump.add_info_item("mass_weighted_jeans_mass")
 
-By default, the following info items are activated: **total_cells**,
-**cell_mass**, **mass_weighted_jeans_mass**, **volume_weighted_jeans_mass**,
-**max_grid_level**, **min_number_density**, **max_number_density**, and
-**distance_to_main_clump**.
+Beside the quantities calculated by default, the following are available:
+``center_of_mass`` and ``distance_to_main_clump``.
+
+Working with Clumps
+-------------------
+
+After the clump finding has finished, the master clump will represent the top
+of a hierarchy of clumps.  The ``children`` attribute within a
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object
+contains a list of all sub-clumps.  Each sub-clump is also a
+:class:`~yt.analysis_modules.level_sets.clump_handling.Clump` object
+with its own ``children`` attribute, and so on.
+
+.. code:: python
+
+   print(master_clump["gas", "density"])
+   print(master_clump.children)
+   print(master_clump.children[0]["gas", "density"])
+
+The entire clump tree can traversed with a loop syntax:
+
+.. code:: python
+
+   for clump in master_clump:
+       print(clump.clump_id)
+
+The :func:`~yt.analysis_modules.level_sets.clump_handling.get_lowest_clumps`
+function will return a list of the individual clumps that have no children
+of their own (the leaf clumps).
+
+.. code:: python
+
+   # Get a list of just the leaf nodes.
+   leaf_clumps = get_lowest_clumps(master_clump)
+
+   print(leaf_clumps[0]["gas", "density"])
+   print(leaf_clumps[0]["all", "particle_mass"])
+   print(leaf_clumps[0].quantities.total_mass())
+
+Visualizing Clumps
+------------------
 
 Clumps can be visualized using the ``annotate_clumps`` callback.
 
@@ -134,3 +174,44 @@
                            center='c', width=(20,'kpc'))
    prj.annotate_clumps(leaf_clumps)
    prj.save('clumps')
+
+Saving and Reloading Clump Data
+-------------------------------
+
+The clump tree can be saved as a reloadable dataset with the
+:func:`~yt.analysis_modules.level_sets.clump_handling.Clump.save_as_dataset`
+function.  This will save all info items that have been calculated as well as
+any field values specified with the *fields* keyword.  This function
+can be called for any clump in the tree, saving that clump and all those
+below it.
+
+.. code:: python
+
+   fn = master_clump.save_as_dataset(fields=["density", "particle_mass"])
+
+The clump tree can then be reloaded as a regular dataset.  The ``tree`` attribute
+associated with the dataset provides access to the clump tree.  The tree can be
+iterated over in the same fashion as the original tree.
+
+.. code:: python
+
+   ds_clumps = yt.load(fn)
+   for clump ds_clumps.tree:
+       print(clump.clump_id)
+
+The ``leaves`` attribute returns a list of all leaf clumps.
+
+.. code:: python
+
+   print(ds_clumps.leaves)
+
+Info items for each clump can be accessed with the `clump` field type.  Gas
+or grid fields should be accessed using the `grid` field type and particle
+fields should be access using the specific particle type.
+
+.. code:: python
+
+   my_clump = ds_clumps.leaves[0]
+   print(my_clumps["clump", "cell_mass"])
+   print(my_clumps["grid", "density"])
+   print(my_clumps["all", "particle_mass"])

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -110,11 +110,10 @@
    import yt
    ds = yt.load("galaxy0030/galaxy0030")
    source = ds.sphere( "c", (10, "kpc"))
-   profile = yt.create_profile(source,
-                               [("gas", "density")],          # the bin field
-                               [("gas", "temperature"),       # profile field
-                                ("gas", "radial_velocity")],  # profile field
-                               weight_field=("gas", "cell_mass"))
+   profile = source.profile([("gas", "density")],          # the bin field
+                            [("gas", "temperature"),       # profile field
+                             ("gas", "radial_velocity")],  # profile field
+                            weight_field=("gas", "cell_mass"))
 
 The binning, weight, and profile data can now be access as:
 
@@ -145,11 +144,10 @@
 
 .. code-block:: python
 
-   profile2d = yt.create_profile(source,
-                                 [("gas", "density"),      # the x bin field
-                                  ("gas", "temperature")], # the y bin field
-                                 [("gas", "cell_mass")],   # the profile field
-                                 weight_field=None)
+   profile2d = source.profile([("gas", "density"),      # the x bin field
+                               ("gas", "temperature")], # the y bin field
+                              [("gas", "cell_mass")],   # the profile field
+                              weight_field=None)
 
 Accessing the x, y, and profile fields work just as with one-dimensional profiles:
 
@@ -164,7 +162,10 @@
 phase plot that shows the distribution of mass in the density-temperature
 plane, with the average temperature overplotted.  The
 :func:`~matplotlib.pyplot.pcolormesh` function can be used to manually plot
-the 2D profile.
+the 2D profile.  If you want to generate a default profile plot, you can simply
+call:::
+
+  profile.plot()
 
 Three-dimensional profiles can be generated and accessed following
 the same procedures.  Additional keyword arguments are available to control

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -21,7 +21,7 @@
 ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
 im, sc = yt.volume_render(ds, 'density', fname='v0.png')
 sc.camera.set_width(ds.arr(100, 'kpc'))
-render_source = sc.get_source(0)
+render_source = sc.get_source()
 kd=render_source.volume
 
 # Print out specifics of KD Tree

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb doc/source/cookbook/custom_transfer_function_volume_rendering.py
--- a/doc/source/cookbook/custom_transfer_function_volume_rendering.py
+++ b/doc/source/cookbook/custom_transfer_function_volume_rendering.py
@@ -10,7 +10,7 @@
 # Modify the transfer function
 
 # First get the render source, in this case the entire domain, with field ('gas','density')
-render_source = sc.get_source(0)
+render_source = sc.get_source()
 
 # Clear the transfer function
 render_source.transfer_function.clear()

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -27,14 +27,14 @@
 # As many validators can be added as you want.
 master_clump.add_validator("min_cells", 20)
 
+# Calculate center of mass for all clumps.
+master_clump.add_info_item("center_of_mass")
+
 # Begin clump finding.
 find_clumps(master_clump, c_min, c_max, step)
 
-# Write out the full clump hierarchy.
-write_clump_index(master_clump, 0, "%s_clump_hierarchy.txt" % ds)
-
-# Write out only the leaf nodes of the hierarchy.
-write_clumps(master_clump,0, "%s_clumps.txt" % ds)
+# Save the clump tree as a reloadable dataset
+fn = master_clump.save_as_dataset(fields=["density", "particle_mass"])
 
 # We can traverse the clump hierarchy to get a list of all of the 'leaf' clumps
 leaf_clumps = get_lowest_clumps(master_clump)
@@ -46,5 +46,17 @@
 # Next we annotate the plot with contours on the borders of the clumps
 prj.annotate_clumps(leaf_clumps)
 
-# Lastly, we write the plot to disk.
+# Save the plot to disk.
 prj.save('clumps')
+
+# Reload the clump dataset.
+cds = yt.load(fn)
+
+# Query fields for clumps in the tree.
+print (cds.tree["clump", "center_of_mass"])
+print (cds.tree.children[0]["grid", "density"])
+print (cds.tree.children[1]["all", "particle_mass"])
+
+# Get all of the leaf clumps.
+print (cds.leaves)
+print (cds.leaves[0]["clump", "cell_mass"])

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -8,7 +8,7 @@
 im, sc = yt.volume_render(ds, field=("gas","density"), fname="v0.png", sigma_clip=6.0)
 
 sc.camera.set_width(ds.arr(0.1,'code_length'))
-tf = sc.get_source(0).transfer_function
+tf = sc.get_source().transfer_function
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(-3,0,4), colormap = 'RdBu_r')
@@ -19,7 +19,7 @@
 # accentuate the outer regions of the galaxy. Let's start by bringing up the
 # alpha values for each contour to go between 0.1 and 1.0
 
-tf = sc.get_source(0).transfer_function
+tf = sc.get_source().transfer_function
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(0,0,4), colormap = 'RdBu_r')

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -834,8 +834,8 @@
    ds = yt.load("snapshot_061.hdf5")
 
 Gadget data in raw binary format can also be loaded with the ``load`` command.
-This is only supported for snapshots created with the ``SnapFormat`` parameter
-set to 1 (the standard for Gadget-2).
+This is supported for snapshots created with the ``SnapFormat`` parameter
+set to 1 (the standard for Gadget-2) or 2.
 
 .. code-block:: python
 
@@ -1264,23 +1264,37 @@
 
 .. code-block:: python
 
-   import yt
-   import numpy
-   from yt.utilities.exodusII_reader import get_data
+    import yt
+    import numpy as np
 
-   coords, connectivity, data = get_data("MOOSE_sample_data/out.e-s010")
+    coords = np.array([[0.0, 0.0],
+                       [1.0, 0.0],
+                       [1.0, 1.0],
+                       [0.0, 1.0]], dtype=np.float64)
 
-This uses a publically available `MOOSE <http://mooseframework.org/>`
-dataset along with the get_data function to parse the coords, connectivity,
-and data. Then, these can be loaded as an in-memory dataset as follows:
+     connect = np.array([[0, 1, 3],
+                         [1, 2, 3]], dtype=np.int64)
+
+     data = {}
+     data['connect1', 'test'] = np.array([[0.0, 1.0, 3.0],
+                                          [1.0, 2.0, 3.0]], dtype=np.float64)
+
+Here, we have made up a simple, 2D unstructured mesh dataset consisting of two
+triangles and one node-centered data field. This data can be loaded as an in-memory
+dataset as follows:
 
 .. code-block:: python
 
-    mesh_id = 0
-    ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+    ds = yt.load_unstructured_mesh(connect, coords, data)
 
-Note that load_unstructured_mesh can take either a single or a list of meshes.
-Here, we have selected only the first mesh to load.
+Note that load_unstructured_mesh can take either a single mesh or a list of meshes.
+Here, we only have one mesh. The in-memory dataset can then be visualized as usual,
+e.g.:
+
+.. code-block:: python
+
+    sl = yt.SlicePlot(ds, 'z', 'test')
+    sl.annotate_mesh_lines()
 
 .. rubric:: Caveats
 
@@ -1519,6 +1533,57 @@
    # The halo mass
    print(ad["FOF", "particle_mass"])
 
+.. _loading-openpmd-data:
+
+openPMD Data
+---------
+
+`openPMD <http://www.openpmd.org>`_ is an open source meta-standard and naming
+scheme for mesh based data and particle data. It does not actually define a file
+format.
+
+HDF5-containers respecting the minimal set of meta information from
+versions 1.0.0 and 1.0.1 of the standard are compatible.
+Support for the ED-PIC extension is not available. Mesh data in cartesian coordinates
+and particle data can be read by this frontend.
+
+To load the first in-file iteration of a openPMD datasets using the standard HDF5
+output format:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load('example-3d/hdf5/data00000100.h5')
+
+If you operate on large files, you may want to modify the virtual chunking behaviour through
+``open_pmd_virtual_gridsize``. The supplied value is an estimate of the size of a single read request
+for each particle attribute/mesh (in Byte).
+
+.. code-block:: python
+
+  import yt
+  ds = yt.load('example-3d/hdf5/data00000100.h5', open_pmd_virtual_gridsize=10e4)
+  sp = yt.SlicePlot(ds, 'x', 'rho')
+  sp.show()
+
+Particle data is fully supported:
+
+.. code-block:: python
+
+  import yt
+  ds = yt.load('example-3d/hdf5/data00000100.h5')
+  ad = f.all_data()
+  ppp = yt.ParticlePhasePlot(ad, 'particle_position_y', 'particle_momentum_y', 'particle_weighting')
+  ppp.show()
+
+.. rubric:: Caveats
+
+* 1D, 2D and 3D data is compatible, but lower dimensional data might yield
+  strange results since it gets padded and treated as 3D. Extraneous dimensions are
+  set to be of length 1.0m and have a width of one cell.
+* The frontend has hardcoded logic for renaming the openPMD ``position``
+  of particles to ``positionCoarse``
+
 .. _loading-pyne-data:
 
 PyNE Data

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -322,6 +322,21 @@
    ~yt.frontends.moab.io.IOHandlerMoabH5MHex8
    ~yt.frontends.moab.io.IOHandlerMoabPyneHex8
 
+OpenPMD
+^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.frontends.open_pmd.data_structures.OpenPMDGrid
+   ~yt.frontends.open_pmd.data_structures.OpenPMDHierarchy
+   ~yt.frontends.open_pmd.data_structures.OpenPMDDataset
+   ~yt.frontends.open_pmd.fields.OpenPMDFieldInfo
+   ~yt.frontends.open_pmd.io.IOHandlerOpenPMDHDF5
+   ~yt.frontends.open_pmd.misc.parse_unit_dimension
+   ~yt.frontends.open_pmd.misc.is_const_component
+   ~yt.frontends.open_pmd.misc.get_component
+
 RAMSES
 ^^^^^^
 
@@ -398,6 +413,8 @@
    ~yt.frontends.ytdata.data_structures.YTNonspatialHierarchy
    ~yt.frontends.ytdata.data_structures.YTNonspatialGrid
    ~yt.frontends.ytdata.data_structures.YTProfileDataset
+   ~yt.frontends.ytdata.data_structures.YTClumpTreeDataset
+   ~yt.frontends.ytdata.data_structures.YTClumpContainer
    ~yt.frontends.ytdata.fields.YTDataContainerFieldInfo
    ~yt.frontends.ytdata.fields.YTGridFieldInfo
    ~yt.frontends.ytdata.io.IOHandlerYTDataContainerHDF5
@@ -441,6 +458,26 @@
    ~yt.data_objects.profiles.ParticleProfile
    ~yt.data_objects.profiles.create_profile
 
+.. _clump_finding:
+
+Clump Finding
+^^^^^^^^^^^^^
+
+The ``Clump`` object and associated functions can be used for identification
+of topologically disconnected structures, i.e., clump finding.
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.analysis_modules.level_sets.clump_handling.Clump
+   ~yt.analysis_modules.level_sets.clump_handling.Clump.add_info_item
+   ~yt.analysis_modules.level_sets.clump_handling.Clump.add_validator
+   ~yt.analysis_modules.level_sets.clump_handling.Clump.save_as_dataset
+   ~yt.analysis_modules.level_sets.clump_handling.find_clumps
+   ~yt.analysis_modules.level_sets.clump_handling.get_lowest_clumps
+   ~yt.analysis_modules.level_sets.clump_info_items.add_clump_info
+   ~yt.analysis_modules.level_sets.clump_validators.add_validator
+
 .. _halo_analysis_ref:
 
 Halo Analysis

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -48,6 +48,8 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Nyx                   |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
+| openPMD               |     Y      |     Y     |      N     |   Y   |    Y     |    Y     |     N      | Partial  |
++-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | Orion                 |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+
 | OWLS/EAGLE            |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -5,7 +5,7 @@
 how much output it displays, loading custom fields, loading custom colormaps,
 accessing test datasets regardless of where you are in the file system, etc.
 This customization is done through :ref:`configuration-file` and
-:ref:`plugin-file` both of which exist in your ``$HOME/.yt`` directory.
+:ref:`plugin-file` both of which exist in your ``$HOME/.config/yt`` directory.
 
 .. _configuration-file:
 
@@ -149,9 +149,10 @@
 Plugin File Format
 ^^^^^^^^^^^^^^^^^^
 
-yt will look for and recognize the file ``$HOME/.yt/my_plugins.py`` as a plugin
-file, which should contain python code.  If accessing yt functions and classes
-they will not require the ``yt.`` prefix, because of how they are loaded.
+yt will look for and recognize the file ``$HOME/.config/yt/my_plugins.py`` as a
+plugin file, which should contain python code.  If accessing yt functions and
+classes they will not require the ``yt.`` prefix, because of how they are
+loaded.
 
 For example, if I created a plugin file containing:
 
@@ -159,7 +160,8 @@
 
    def _myfunc(field, data):
        return np.random.random(data["density"].shape)
-   add_field("random", function=_myfunc, units='auto')
+   add_field('random', function=_myfunc,
+             dimensions='dimensionless', units='auto')
 
 then all of my data objects would have access to the field ``random``.
 

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -155,7 +155,7 @@
    "source": [
     "im, sc = yt.volume_render(ds, ['temperature'])\n",
     "\n",
-    "source = sc.get_source(0)\n",
+    "source = sc.get_source()\n",
     "source.set_transfer_function(tfh.tf)\n",
     "im2 = sc.render()\n",
     "\n",

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb doc/source/visualizing/Volume_Rendering_Tutorial.ipynb
--- a/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb
+++ b/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb
@@ -57,7 +57,7 @@
    },
    "outputs": [],
    "source": [
-    "print (sc.get_source(0))"
+    "print (sc.get_source())"
    ]
   },
   {
@@ -177,7 +177,7 @@
     "tfh.tf.add_layers(10, colormap='gist_rainbow')\n",
     "\n",
     "# Grab the first render source and set it to use the new transfer function\n",
-    "render_source = sc.get_source(0)\n",
+    "render_source = sc.get_source()\n",
     "render_source.transfer_function = tfh.tf\n",
     "\n",
     "sc.render()\n",

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -105,7 +105,7 @@
     sc = yt.create_scene(ds)
 
     # override the default colormap
-    ms = sc.get_source(0)
+    ms = sc.get_source()
     ms.cmap = 'Eos A'
 
     # adjust the camera position and orientation
@@ -133,7 +133,7 @@
     sc = yt.create_scene(ds)
 
     # override the default colormap
-    ms = sc.get_source(0)
+    ms = sc.get_source()
     ms.cmap = 'Eos A'
 
     # adjust the camera position and orientation
@@ -165,7 +165,7 @@
     sc = yt.create_scene(ds, ('connect2', 'diffused'))
 
     # override the default colormap
-    ms = sc.get_source(0)
+    ms = sc.get_source()
     ms.cmap = 'Eos A'
 
     # adjust the camera position and orientation
@@ -196,7 +196,7 @@
     sc = yt.create_scene(ds, ("connect1", "u"))
 
     # override the default colormap
-    ms = sc.get_source(0)
+    ms = sc.get_source()
     ms.cmap = 'Eos A'
 
     # adjust the camera position and orientation
@@ -224,7 +224,7 @@
    sc = yt.create_scene(ds, ('connect2', 'diffused'))
 
    # override the default colormap
-   ms = sc.get_source(0)
+   ms = sc.get_source()
    ms.cmap = 'Eos A'
 
    # adjust the camera position and orientation
@@ -250,7 +250,7 @@
 
     # override the default colormap. This time we also override
     # the default color bounds
-    ms = sc.get_source(0)
+    ms = sc.get_source()
     ms.cmap = 'hot'
     ms.color_bounds = (500.0, 1700.0)
 
@@ -287,7 +287,7 @@
 
     # override the default colormap. This time we also override
     # the default color bounds
-    ms = sc.get_source(0)
+    ms = sc.get_source()
     ms.cmap = 'hot'
     ms.color_bounds = (500.0, 1700.0)
 
@@ -320,7 +320,7 @@
     sc = yt.create_scene(ds, ("connect2", "diffused"))
 
     # override the default colormap
-    ms = sc.get_source(0)
+    ms = sc.get_source()
     ms.cmap = 'Eos A'
 
     # Create a perspective Camera
@@ -392,7 +392,7 @@
     sc = yt.create_scene(ds)
 
     # override the default colormap
-    ms = sc.get_source(0)
+    ms = sc.get_source()
     ms.cmap = 'Eos A'
 
     # adjust the camera position and orientation

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb setup.py
--- a/setup.py
+++ b/setup.py
@@ -155,6 +155,8 @@
     Extension("yt.utilities.lib.primitives",
               ["yt/utilities/lib/primitives.pyx"],
               libraries=std_libs),
+    Extension("yt.utilities.lib.cosmology_time",
+              ["yt/utilities/lib/cosmology_time.pyx"]),
     Extension("yt.utilities.lib.origami",
               ["yt/utilities/lib/origami.pyx",
                "yt/utilities/lib/origami_tags.c"],

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -35,35 +35,36 @@
     - yt/frontends/owls_subfind/tests/test_outputs.py
     - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g5
     - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g42
-  
+
   local_owls_000:
     - yt/frontends/owls/tests/test_outputs.py
-  
-  local_pw_007:
+
+  local_pw_008:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
     - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
     - yt/visualization/tests/test_particle_plot.py:test_particle_projection_answers
     - yt/visualization/tests/test_particle_plot.py:test_particle_projection_filter
     - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers
-  
+
   local_tipsy_001:
     - yt/frontends/tipsy/tests/test_outputs.py
-  
-  local_varia_003:
+
+  local_varia_005:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py
     - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
     - yt/visualization/volume_rendering/tests/test_vr_orientation.py
     - yt/visualization/volume_rendering/tests/test_mesh_render.py
+    - yt/visualization/tests/test_mesh_slices.py:test_tri2
 
   local_orion_000:
     - yt/frontends/boxlib/tests/test_orion.py
-  
+
   local_ramses_000:
     - yt/frontends/ramses/tests/test_outputs.py
-  
+
   local_ytdata_000:
     - yt/frontends/ytdata
 
@@ -81,6 +82,7 @@
 other_tests:
   unittests:
      - '-v'
+     - '--exclude=test_mesh_slices'  # disable randomly failing test
   cookbook:
      - '-v'
      - 'doc/source/cookbook/tests/test_cookbook.py'

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb yt/analysis_modules/halo_analysis/halo_catalog.py
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ b/yt/analysis_modules/halo_analysis/halo_catalog.py
@@ -13,10 +13,11 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.utilities.on_demand_imports import _h5py as h5py
 import numpy as np
 import os
 
+from yt.frontends.ytdata.utilities import \
+    save_as_dataset
 from yt.funcs import \
     ensure_dir, \
     mylog
@@ -431,13 +432,16 @@
                     key, quantity = action
                     if quantity in self.halos_ds.field_info:
                         new_halo.quantities[key] = \
-                          self.data_source[quantity][int(i)].in_cgs()
+                          self.data_source[quantity][int(i)]
                     elif callable(quantity):
                         new_halo.quantities[key] = quantity(new_halo)
                 else:
-                    raise RuntimeError("Action must be a callback, filter, or quantity.")
+                    raise RuntimeError(
+                        "Action must be a callback, filter, or quantity.")
 
             if halo_filter:
+                for quantity in new_halo.quantities.values():
+                    quantity.convert_to_base()
                 self.catalog.append(new_halo.quantities)
 
             if save_halos and halo_filter:
@@ -458,27 +462,20 @@
         mylog.info("Saving halo catalog (%d halos) to %s." %
                    (n_halos, os.path.join(self.output_dir,
                                          self.output_prefix)))
-        out_file = h5py.File(filename, 'w')
-        for attr in ["current_redshift", "current_time",
-                     "domain_dimensions",
-                     "cosmological_simulation", "omega_lambda",
-                     "omega_matter", "hubble_constant"]:
-            out_file.attrs[attr] = getattr(self.halos_ds, attr)
-        for attr in ["domain_left_edge", "domain_right_edge"]:
-            out_file.attrs[attr] = getattr(self.halos_ds, attr).in_cgs()
-        out_file.attrs["data_type"] = "halo_catalog"
-        out_file.attrs["num_halos"] = n_halos
+        extra_attrs = {"data_type": "halo_catalog",
+                       "num_halos": n_halos}
+        data = {}
+        ftypes = {}
         if n_halos > 0:
-            field_data = np.empty(n_halos)
             for key in self.quantities:
-                units = ""
-                if hasattr(self.catalog[0][key], "units"):
-                    units = str(self.catalog[0][key].units)
-                for i in range(n_halos):
-                    field_data[i] = self.catalog[i][key]
-                dataset = out_file.create_dataset(str(key), data=field_data)
-                dataset.attrs["units"] = units
-        out_file.close()
+                # This sets each field to be saved in the root hdf5 group,
+                # as per the HaloCatalog format.
+                ftypes[key] = "."
+                data[key] = self.halos_ds.arr(
+                    [halo[key] for halo in self.catalog])
+
+        save_as_dataset(self.halos_ds, filename, data,
+                        field_types=ftypes, extra_attrs=extra_attrs)
 
     def add_default_quantities(self, field_type='halos'):
         self.add_quantity("particle_identifier", field_type=field_type,prepend=True)

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
--- a/yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
+++ b/yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
@@ -2,6 +2,8 @@
 import sys
 
 from yt.convenience import load
+from yt.frontends.halo_catalog.data_structures import \
+    HaloCatalogDataset
 from yt.utilities.answer_testing.framework import \
     FieldValuesTest, \
     requires_ds
@@ -30,6 +32,7 @@
                           "halo_catalogs", method,
                           "%s.0.h5" % method)
         ds = load(fn)
+        assert isinstance(ds, HaloCatalogDataset)
         for field in _fields:
             yield FieldValuesTest(ds, field, particle_type=True,
                                   decimals=decimals[method])

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -13,14 +13,22 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import copy
 import numpy as np
 import uuid
 
 from yt.fields.derived_field import \
     ValidateSpatial
-from yt.funcs import mylog, iterable
-from yt.extern.six import string_types
+from yt.frontends.ytdata.utilities import \
+    save_as_dataset
+from yt.funcs import \
+    deprecate, \
+    get_output_filename, \
+    iterable, \
+    mylog
+from yt.extern.six import \
+    string_types
+from yt.utilities.tree_container import \
+    TreeContainer
 
 from .clump_info_items import \
     clump_info_registry
@@ -46,28 +54,40 @@
                  display_field=False,
                  units='')
 
-class Clump(object):
+class Clump(TreeContainer):
     children = None
     def __init__(self, data, field, parent=None,
-                 clump_info=None, validators=None):
+                 clump_info=None, validators=None,
+                 base=None, contour_key=None,
+                 contour_id=None):
         self.data = data
         self.field = field
         self.parent = parent
         self.quantities = data.quantities
         self.min_val = self.data[field].min()
         self.max_val = self.data[field].max()
+        self.info = {}
+
+        # is this the parent clump?
+        if base is None:
+            base = self
+            self.total_clumps = 0
+            if clump_info is None:
+                self.set_default_clump_info()
+            else:
+                self.clump_info = clump_info
+
+        self.base = base
+        self.clump_id = self.base.total_clumps
+        self.base.total_clumps += 1
+        self.contour_key = contour_key
+        self.contour_id = contour_id
 
         if parent is not None:
             self.data.parent = self.parent.data
 
-        # List containing characteristics about clumps that are to be written 
-        # out by the write routines.
-        if clump_info is None:
-            self.set_default_clump_info()
-        else:
-            # Clump info will act the same if add_info_item is called 
-            # before or after clump finding.
-            self.clump_info = copy.deepcopy(clump_info)
+        if parent is not None:
+            self.data.parent = self.parent.data
 
         if validators is None:
             validators = []
@@ -125,10 +145,11 @@
         for child in self.children:
             child.clear_clump_info()
 
+    @deprecate("Clump.save_as_dataset")
     def write_info(self, level, f_ptr):
         "Writes information for clump using the list of items in clump_info."
 
-        for item in self.clump_info:
+        for item in self.base.clump_info:
             value = item(self)
             f_ptr.write("%s%s\n" % ('\t'*level, value))
 
@@ -159,8 +180,190 @@
                 # Using "ones" here will speed things up.
                 continue
             self.children.append(Clump(new_clump, self.field, parent=self,
-                                       clump_info=self.clump_info,
-                                       validators=self.validators))
+                                       validators=self.validators,
+                                       base=self.base,
+                                       contour_key=contour_key,
+                                       contour_id=cid))
+
+    def __iter__(self):
+        yield self
+        if self.children is None:
+            return
+        for child in self.children:
+            for a_node in child:
+                yield a_node
+
+    def save_as_dataset(self, filename=None, fields=None):
+        r"""Export clump tree to a reloadable yt dataset.
+
+        This function will take a clump object and output a dataset
+        containing the fields given in the ``fields`` list and all info
+        items.  The resulting dataset can be reloaded as a yt dataset.
+
+        Parameters
+        ----------
+        filename : str, optional
+            The name of the file to be written.  If None, the name
+            will be a combination of the original dataset and the clump
+            index.
+        fields : list of strings or tuples, optional
+            If this is supplied, it is the list of fields to be saved to
+            disk.
+
+        Returns
+        -------
+        filename : str
+            The name of the file that has been created.
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> from yt.analysis_modules.level_sets.api import \
+        ...         Clump, find_clumps
+        >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+        >>> data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.],
+        ...                       (8, 'kpc'), (1, 'kpc'))
+        >>> field = ("gas", "density")
+        >>> step = 2.0
+        >>> c_min = 10**np.floor(np.log10(data_source[field]).min()  )
+        >>> c_max = 10**np.floor(np.log10(data_source[field]).max()+1)
+        >>> master_clump = Clump(data_source, field)
+        >>> master_clump.add_info_item("center_of_mass")
+        >>> master_clump.add_validator("min_cells", 20)
+        >>> find_clumps(master_clump, c_min, c_max, step)
+        >>> fn = master_clump.save_as_dataset(fields=["density", "particle_mass"])
+        >>> new_ds = yt.load(fn)
+        >>> print (ds.tree["clump", "cell_mass"])
+        1296926163.91 Msun
+        >>> print ds.tree["grid", "density"]
+        [  2.54398434e-26   2.46620353e-26   2.25120154e-26 ...,   1.12879234e-25
+           1.59561490e-25   1.09824903e-24] g/cm**3
+        >>> print ds.tree["all", "particle_mass"]
+        [  4.25472446e+38   4.25472446e+38   4.25472446e+38 ...,   2.04238266e+38
+           2.04523901e+38   2.04770938e+38] g
+        >>> print ds.tree.children[0]["clump", "cell_mass"]
+        909636495.312 Msun
+        >>> print ds.leaves[0]["clump", "cell_mass"]
+        3756566.99809 Msun
+        >>> print ds.leaves[0]["grid", "density"]
+        [  6.97820274e-24   6.58117370e-24   7.32046082e-24   6.76202430e-24
+           7.41184837e-24   6.76981480e-24   6.94287213e-24   6.56149658e-24
+           6.76584569e-24   6.94073710e-24   7.06713082e-24   7.22556526e-24
+           7.08338898e-24   6.78684331e-24   7.40647040e-24   7.03050456e-24
+           7.12438678e-24   6.56310217e-24   7.23201662e-24   7.17314333e-24] g/cm**3
+
+        """
+
+        ds = self.data.ds
+        keyword = "%s_clump_%d" % (str(ds), self.clump_id)
+        filename = get_output_filename(filename, keyword, ".h5")
+
+        # collect clump info fields
+        clump_info = dict([(ci.name, []) for ci in self.base.clump_info])
+        clump_info.update(
+            dict([(field, []) for field in ["clump_id", "parent_id",
+                                            "contour_key", "contour_id"]]))
+        for clump in self:
+            clump_info["clump_id"].append(clump.clump_id)
+            if clump.parent is None:
+                parent_id = -1
+            else:
+                parent_id = clump.parent.clump_id
+            clump_info["parent_id"].append(parent_id)
+
+            contour_key = clump.contour_key
+            if contour_key is None: contour_key = -1
+            clump_info["contour_key"].append(contour_key)
+            contour_id = clump.contour_id
+            if contour_id is None: contour_id = -1
+            clump_info["contour_id"].append(contour_id)
+
+            for ci in self.base.clump_info:
+                ci(clump)
+                clump_info[ci.name].append(clump.info[ci.name][1])
+        for ci in clump_info:
+            if hasattr(clump_info[ci][0], "units"):
+                clump_info[ci] = ds.arr(clump_info[ci])
+            else:
+                clump_info[ci] = np.array(clump_info[ci])
+
+        ftypes = dict([(ci, "clump") for ci in clump_info])
+
+        # collect data fields
+        if fields is not None:
+            contour_fields = \
+              [("index", "contours_%s" % ckey)
+               for ckey in np.unique(clump_info["contour_key"]) \
+               if str(ckey) != "-1"]
+
+            ptypes = []
+            field_data = {}
+            need_grid_positions = False
+            for f in self.base.data._determine_fields(fields) + contour_fields:
+                field_data[f] = self.base[f]
+                if ds.field_info[f].particle_type:
+                    if f[0] not in ptypes:
+                        ptypes.append(f[0])
+                    ftypes[f] = f[0]
+                else:
+                    need_grid_positions = True
+                    ftypes[f] = "grid"
+
+            if len(ptypes) > 0:
+                for ax in "xyz":
+                    for ptype in ptypes:
+                        p_field = (ptype, "particle_position_%s" % ax)
+                        if p_field in ds.field_info and \
+                          p_field not in field_data:
+                            ftypes[p_field] = p_field[0]
+                            field_data[p_field] = self.base[p_field]
+
+                for clump in self:
+                    if clump.contour_key is None:
+                        continue
+                    for ptype in ptypes:
+                        cfield = (ptype, "contours_%s" % clump.contour_key)
+                        if cfield not in field_data:
+                            field_data[cfield] = \
+                              clump.data._part_ind(ptype).astype(np.int64)
+                            ftypes[cfield] = ptype
+                        field_data[cfield][clump.data._part_ind(ptype)] = \
+                          clump.contour_id
+
+            if need_grid_positions:
+                for ax in "xyz":
+                    g_field = ("index", ax)
+                    if g_field in ds.field_info and \
+                      g_field not in field_data:
+                        field_data[g_field] = self.base[g_field]
+                        ftypes[g_field] = "grid"
+                    g_field = ("index", "d" + ax)
+                    if g_field in ds.field_info and \
+                      g_field not in field_data:
+                        ftypes[g_field] = "grid"
+                        field_data[g_field] = self.base[g_field]
+
+            if self.contour_key is not None:
+                cfilters = {}
+                for field in field_data:
+                    if ftypes[field] == "grid":
+                        ftype = "index"
+                    else:
+                        ftype = field[0]
+                    cfield = (ftype, "contours_%s" % self.contour_key)
+                    if cfield not in cfilters:
+                        cfilters[cfield] = field_data[cfield] == self.contour_id
+                    field_data[field] = field_data[field][cfilters[cfield]]
+
+        clump_info.update(field_data)
+        extra_attrs = {"data_type": "yt_clump_tree",
+                       "container_type": "yt_clump_tree"}
+        save_as_dataset(ds, filename, clump_info,
+                        field_types=ftypes,
+                        extra_attrs=extra_attrs)
+
+        return filename
 
     def pass_down(self,operation):
         """
@@ -270,6 +473,7 @@
 
     return clump_list
 
+ at deprecate("Clump.save_as_dataset")
 def write_clump_index(clump, level, fh):
     top = False
     if isinstance(fh, string_types):
@@ -287,6 +491,7 @@
     if top:
         fh.close()
 
+ at deprecate("Clump.save_as_dataset")
 def write_clumps(clump, level, fh):
     top = False
     if isinstance(fh, string_types):

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb yt/analysis_modules/level_sets/clump_info_items.py
--- a/yt/analysis_modules/level_sets/clump_info_items.py
+++ b/yt/analysis_modules/level_sets/clump_info_items.py
@@ -21,14 +21,15 @@
 clump_info_registry = OperatorRegistry()
 
 def add_clump_info(name, function):
-    clump_info_registry[name] = ClumpInfoCallback(function)
+    clump_info_registry[name] = ClumpInfoCallback(name, function)
 
 class ClumpInfoCallback(object):
     r"""
     A ClumpInfoCallback is a function that takes a clump, computes a 
     quantity, and returns a string to be printed out for writing clump info.
     """
-    def __init__(self, function, args=None, kwargs=None):
+    def __init__(self, name, function, args=None, kwargs=None):
+        self.name = name
         self.function = function
         self.args = args
         if self.args is None: self.args = []
@@ -36,43 +37,51 @@
         if self.kwargs is None: self.kwargs = {}
 
     def __call__(self, clump):
-        return self.function(clump, *self.args, **self.kwargs)
-    
+        if self.name not in clump.info:
+            clump.info[self.name] = self.function(clump, *self.args, **self.kwargs)
+        rv = clump.info[self.name]
+        return rv[0] % rv[1]
+
+def _center_of_mass(clump, units="code_length", **kwargs):
+    p = clump.quantities.center_of_mass(**kwargs)
+    return "Center of mass: %s.", p.to(units)
+add_clump_info("center_of_mass", _center_of_mass)
+
 def _total_cells(clump):
     n_cells = clump.data["index", "ones"].size
-    return "Cells: %d." % n_cells
+    return "Cells: %d.", n_cells
 add_clump_info("total_cells", _total_cells)
 
 def _cell_mass(clump):
     cell_mass = clump.data["gas", "cell_mass"].sum().in_units("Msun")
-    return "Mass: %e Msun." % cell_mass
+    return "Mass: %e Msun.", cell_mass
 add_clump_info("cell_mass", _cell_mass)
 
 def _mass_weighted_jeans_mass(clump):
     jeans_mass = clump.data.quantities.weighted_average_quantity(
         "jeans_mass", ("gas", "cell_mass")).in_units("Msun")
-    return "Jeans Mass (mass-weighted): %.6e Msolar." % jeans_mass
+    return "Jeans Mass (mass-weighted): %.6e Msolar.", jeans_mass
 add_clump_info("mass_weighted_jeans_mass", _mass_weighted_jeans_mass)
 
 def _volume_weighted_jeans_mass(clump):
     jeans_mass = clump.data.quantities.weighted_average_quantity(
         "jeans_mass", ("index", "cell_volume")).in_units("Msun")
-    return "Jeans Mass (volume-weighted): %.6e Msolar." % jeans_mass
+    return "Jeans Mass (volume-weighted): %.6e Msolar.", jeans_mass
 add_clump_info("volume_weighted_jeans_mass", _volume_weighted_jeans_mass)
 
 def _max_grid_level(clump):
     max_level = clump.data["index", "grid_level"].max()
-    return "Max grid level: %d." % max_level
+    return "Max grid level: %d.", max_level
 add_clump_info("max_grid_level", _max_grid_level)
 
 def _min_number_density(clump):
     min_n = clump.data["gas", "number_density"].min().in_units("cm**-3")
-    return "Min number density: %.6e cm^-3." % min_n
+    return "Min number density: %.6e cm^-3.", min_n
 add_clump_info("min_number_density", _min_number_density)
 
 def _max_number_density(clump):
     max_n = clump.data["gas", "number_density"].max().in_units("cm**-3")
-    return "Max number density: %.6e cm^-3." % max_n
+    return "Max number density: %.6e cm^-3.", max_n
 add_clump_info("max_number_density", _max_number_density)
 
 def _distance_to_main_clump(clump, units="pc"):
@@ -82,6 +91,7 @@
     master_com = clump.data.ds.arr(master.data.quantities.center_of_mass())
     my_com = clump.data.ds.arr(clump.data.quantities.center_of_mass())
     distance = np.sqrt(((master_com - my_com)**2).sum())
-    return "Distance from master center of mass: %.6e %s." % \
-      (distance.in_units(units), units)
+    distance.convert_to_units("pc")
+    return "Distance from master center of mass: %%.6e %s." % units, \
+      distance.in_units(units)
 add_clump_info("distance_to_main_clump", _distance_to_main_clump)

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb yt/analysis_modules/level_sets/clump_validators.py
--- a/yt/analysis_modules/level_sets/clump_validators.py
+++ b/yt/analysis_modules/level_sets/clump_validators.py
@@ -13,6 +13,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
+
 from yt.utilities.lib.misc_utilities import \
     gravitational_binding_energy
 from yt.utilities.operator_registry import \
@@ -64,28 +66,30 @@
              (bulk_velocity[1] - clump["all", "particle_velocity_y"])**2 +
              (bulk_velocity[2] - clump["all", "particle_velocity_z"])**2)).sum()
 
+    if use_particles:
+        m = np.concatenate([clump["gas", "cell_mass"].in_cgs(),
+                            clump["all", "particle_mass"].in_cgs()])
+        px = np.concatenate([clump["index", "x"].in_cgs(),
+                             clump["all", "particle_position_x"].in_cgs()])
+        py = np.concatenate([clump["index", "y"].in_cgs(),
+                             clump["all", "particle_position_y"].in_cgs()])
+        pz = np.concatenate([clump["index", "z"].in_cgs(),
+                             clump["all", "particle_position_z"].in_cgs()])
+    else:
+        m = clump["gas", "cell_mass"].in_cgs()
+        px = clump["index", "x"].in_cgs()
+        py = clump["index", "y"].in_cgs()
+        pz = clump["index", "z"].in_cgs()
+
     potential = clump.data.ds.quan(G *
         gravitational_binding_energy(
-            clump["gas", "cell_mass"].in_cgs(),
-            clump["index", "x"].in_cgs(),
-            clump["index", "y"].in_cgs(),
-            clump["index", "z"].in_cgs(),
+            m, px, py, pz,
             truncate, (kinetic / G).in_cgs()),
-        kinetic.in_cgs().units)
-    
+            kinetic.in_cgs().units)
+
     if truncate and potential >= kinetic:
         return True
 
-    if use_particles:
-        potential += clump.data.ds.quan(G *
-            gravitational_binding_energy(
-                clump["all", "particle_mass"].in_cgs(),
-                clump["all", "particle_position_x"].in_cgs(),
-                clump["all", "particle_position_y"].in_cgs(),
-                clump["all", "particle_position_z"].in_cgs(),
-                truncate, ((kinetic - potential) / G).in_cgs()),
-        kinetic.in_cgs().units)
-
     return potential >= kinetic
 add_validator("gravitationally_bound", _gravitationally_bound)
 

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb yt/analysis_modules/level_sets/tests/test_clump_finding.py
--- a/yt/analysis_modules/level_sets/tests/test_clump_finding.py
+++ b/yt/analysis_modules/level_sets/tests/test_clump_finding.py
@@ -15,16 +15,25 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
+import os
+import shutil
+import tempfile
+
 
 from yt.analysis_modules.level_sets.api import \
     Clump, \
     find_clumps, \
     get_lowest_clumps
+from yt.convenience import \
+    load
 from yt.frontends.stream.api import \
     load_uniform_grid
 from yt.testing import \
     assert_array_equal, \
-    assert_equal
+    assert_equal, \
+    requires_file
+from yt.utilities.answer_testing.framework import \
+    data_dir_load
 
 def test_clump_finding():
     n_c = 8
@@ -63,7 +72,6 @@
     # two leaf clumps
     assert_equal(len(leaf_clumps), 2)
 
-
     # check some clump fields
     assert_equal(master_clump.children[0]["density"][0].size, 1)
     assert_equal(master_clump.children[0]["density"][0], ad["density"].max())
@@ -72,3 +80,58 @@
     assert_equal(master_clump.children[1]["density"][0].size, 1)
     assert_equal(master_clump.children[1]["density"][0], ad["density"].max())
     assert_equal(master_clump.children[1]["particle_mass"].size, 0)
+
+i30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+ at requires_file(i30)
+def test_clump_tree_save():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = data_dir_load(i30)
+    data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.],
+                          (8, 'kpc'), (1, 'kpc'))
+
+    field = ("gas", "density")
+    step = 2.0
+    c_min = 10**np.floor(np.log10(data_source[field]).min()  )
+    c_max = 10**np.floor(np.log10(data_source[field]).max()+1)
+
+    master_clump = Clump(data_source, field)
+    master_clump.add_info_item("center_of_mass")
+    master_clump.add_validator("min_cells", 20)
+
+    find_clumps(master_clump, c_min, c_max, step)
+    leaf_clumps = get_lowest_clumps(master_clump)
+
+    fn = master_clump.save_as_dataset(fields=["density", "particle_mass"])
+    ds2 = load(fn)
+
+    # compare clumps in the tree
+    t1 = [c for c in master_clump]
+    t2 = [c for c in ds2.tree]
+    mt1 = ds.arr([c.info["cell_mass"][1] for c in t1])
+    mt2 = ds2.arr([c["clump", "cell_mass"] for c in t2])
+    it1 = np.argsort(mt1).d.astype(int)
+    it2 = np.argsort(mt2).d.astype(int)
+    assert_array_equal(mt1[it1], mt2[it2])
+
+    for i1, i2 in zip(it1, it2):
+        ct1 = t1[i1]
+        ct2 = t2[i2]
+        assert_array_equal(ct1["gas", "density"],
+                           ct2["grid", "density"])
+        assert_array_equal(ct1["all", "particle_mass"],
+                           ct2["all", "particle_mass"])
+
+    # compare leaf clumps
+    c1 = [c for c in leaf_clumps]
+    c2 = [c for c in ds2.leaves]
+    mc1 = ds.arr([c.info["cell_mass"][1] for c in c1])
+    mc2 = ds2.arr([c["clump", "cell_mass"] for c in c2])
+    ic1 = np.argsort(mc1).d.astype(int)
+    ic2 = np.argsort(mc2).d.astype(int)
+    assert_array_equal(mc1[ic1], mc2[ic2])
+
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -1132,12 +1132,15 @@
                                        mask, sample_values = None,
                                        sample_type = "face",
                                        no_ghost = False):
-        vals = grid.get_vertex_centered_data(field, no_ghost = no_ghost)
+        # TODO: check if multiple fields can be passed here
+        vals = grid.get_vertex_centered_data([field], no_ghost=no_ghost)[field]
         if sample_values is not None:
-            svals = grid.get_vertex_centered_data(sample_values)
+            # TODO: is no_ghost=False correct here?
+            svals = grid.get_vertex_centered_data([sample_values])[sample_values]
         else:
             svals = None
-        sample_type = {"face":1, "vertex":2}[sample_type]
+
+        sample_type = {"face": 1, "vertex": 2}[sample_type]
         my_verts = march_cubes_grid(value, vals, mask, grid.LeftEdge,
                                     grid.dds, svals, sample_type)
         return my_verts
@@ -1209,15 +1212,21 @@
 
     def _calculate_flux_in_grid(self, grid, mask,
             field_x, field_y, field_z, fluxing_field = None):
-        vals = grid.get_vertex_centered_data(self.surface_field)
+
+        vc_fields = [self.surface_field, field_x, field_y, field_z]
+        if fluxing_field is not None:
+            vc_fields.append(fluxing_field)
+
+        vc_data = grid.get_vertex_centered_data(vc_fields)
         if fluxing_field is None:
-            ff = np.ones(vals.shape, dtype="float64")
+            ff = np.ones_like(vc_data[self.surface_field], dtype="float64")
         else:
-            ff = grid.get_vertex_centered_data(fluxing_field)
-        xv, yv, zv = [grid.get_vertex_centered_data(f)
-                      for f in [field_x, field_y, field_z]]
-        return march_cubes_grid_flux(self.field_value, vals, xv, yv, zv,
-                    ff, mask, grid.LeftEdge, grid.dds)
+            ff = vc_data[fluxing_field]
+
+        return march_cubes_grid_flux(
+            self.field_value, vc_data[self.surface_field], vc_data[field_x],
+            vc_data[field_y], vc_data[field_z], ff, mask, grid.LeftEdge,
+            grid.dds)
 
     @property
     def triangles(self):

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -310,7 +310,7 @@
         with self._field_type_state(ftype, finfo):
             if fname in self._container_fields:
                 tr = self._generate_container_field(field)
-            if finfo.particle_type:
+            if finfo.particle_type: # This is a property now
                 tr = self._generate_particle_field(field)
             else:
                 tr = self._generate_fluid_field(field)
@@ -1696,13 +1696,18 @@
 
     def _extract_isocontours_from_grid(self, grid, mask, field, value,
                                        sample_values=None):
-        vals = grid.get_vertex_centered_data(field, no_ghost=False)
+        vc_fields = [field]
         if sample_values is not None:
-            svals = grid.get_vertex_centered_data(sample_values)
-        else:
+            vc_fields.append(sample_values)
+
+        vc_data = grid.get_vertex_centered_data(vc_fields, no_ghost=False)
+        try:
+            svals = vc_data[sample_values]
+        except KeyError:
             svals = None
-        my_verts = march_cubes_grid(value, vals, mask, grid.LeftEdge,
-                                    grid.dds, svals)
+
+        my_verts = march_cubes_grid(value, vc_data[field], mask,
+            grid.LeftEdge, grid.dds, svals)
         return my_verts
 
     def calculate_isocontour_flux(self, field, value,
@@ -1774,15 +1779,21 @@
 
     def _calculate_flux_in_grid(self, grid, mask, field, value,
                     field_x, field_y, field_z, fluxing_field = None):
-        vals = grid.get_vertex_centered_data(field)
+        
+        vc_fields = [field, field_x, field_y, field_z]
+        if fluxing_field is not None:
+            vc_fields.append(fluxing_field)
+
+        vc_data = grid.get_vertex_centered_data(vc_fields)
+
         if fluxing_field is None:
-            ff = np.ones(vals.shape, dtype="float64")
+            ff = np.ones_like(vc_data[field], dtype="float64")
         else:
-            ff = grid.get_vertex_centered_data(fluxing_field)
-        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in
-                     [field_x, field_y, field_z]]
-        return march_cubes_grid_flux(value, vals, xv, yv, zv,
-                    ff, mask, grid.LeftEdge, grid.dds)
+            ff = vc_data[fluxing_field]
+
+        return march_cubes_grid_flux(value, vc_data[field], vc_data[field_x],
+            vc_data[field_y], vc_data[field_z], ff, mask, grid.LeftEdge,
+            grid.dds)
 
     def extract_connected_sets(self, field, num_levels, min_val, max_val,
                                log_space=True, cumulative=True):

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -13,8 +13,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import warnings
 import weakref
 import numpy as np
+from six import string_types
 
 from yt.data_objects.data_containers import \
     YTSelectionContainer
@@ -252,33 +254,50 @@
         cube._base_grid = self
         return cube
 
-    def get_vertex_centered_data(self, field, smoothed=True, no_ghost=False):
-        new_field = np.zeros(self.ActiveDimensions + 1, dtype='float64')
+    def get_vertex_centered_data(self, fields, smoothed=True, no_ghost=False):
+        _old_api = isinstance(fields, (string_types, tuple))
+        if _old_api:
+            message = (
+                'get_vertex_centered_data() requires list of fields, rather than '
+                'a single field as an argument.'
+            )
+            warnings.warn(message, DeprecationWarning, stacklevel=2)
+            fields = [fields]
+
+        # Make sure the field list has only unique entries
+        fields = list(set(fields))
+        new_fields = {}
+        for field in fields:
+            new_fields[field] = np.zeros(self.ActiveDimensions + 1, dtype='float64')
 
         if no_ghost:
-            # Ensure we have the native endianness in this array.  Avoid making
-            # a copy if possible.
-            old_field = np.asarray(self[field], dtype="=f8")
-            # We'll use the ghost zone routine, which will naturally
-            # extrapolate here.
-            input_left = np.array([0.5, 0.5, 0.5], dtype="float64")
-            output_left = np.array([0.0, 0.0, 0.0], dtype="float64")
-            # rf = 1 here
-            ghost_zone_interpolate(1, old_field, input_left,
-                                   new_field, output_left)
+            for field in fields:
+                # Ensure we have the native endianness in this array.  Avoid making
+                # a copy if possible.
+                old_field = np.asarray(self[field], dtype="=f8")
+                # We'll use the ghost zone routine, which will naturally
+                # extrapolate here.
+                input_left = np.array([0.5, 0.5, 0.5], dtype="float64")
+                output_left = np.array([0.0, 0.0, 0.0], dtype="float64")
+                # rf = 1 here
+                ghost_zone_interpolate(1, old_field, input_left,
+                                       new_fields[field], output_left)
         else:
-            cg = self.retrieve_ghost_zones(1, field, smoothed=smoothed)
-            np.add(new_field, cg[field][1: ,1: ,1: ], new_field)
-            np.add(new_field, cg[field][:-1,1: ,1: ], new_field)
-            np.add(new_field, cg[field][1: ,:-1,1: ], new_field)
-            np.add(new_field, cg[field][1: ,1: ,:-1], new_field)
-            np.add(new_field, cg[field][:-1,1: ,:-1], new_field)
-            np.add(new_field, cg[field][1: ,:-1,:-1], new_field)
-            np.add(new_field, cg[field][:-1,:-1,1: ], new_field)
-            np.add(new_field, cg[field][:-1,:-1,:-1], new_field)
-            np.multiply(new_field, 0.125, new_field)
+            cg = self.retrieve_ghost_zones(1, fields, smoothed=smoothed)
+            for field in fields:
+                np.add(new_fields[field], cg[field][1: ,1: ,1: ], new_fields[field])
+                np.add(new_fields[field], cg[field][:-1,1: ,1: ], new_fields[field])
+                np.add(new_fields[field], cg[field][1: ,:-1,1: ], new_fields[field])
+                np.add(new_fields[field], cg[field][1: ,1: ,:-1], new_fields[field])
+                np.add(new_fields[field], cg[field][:-1,1: ,:-1], new_fields[field])
+                np.add(new_fields[field], cg[field][1: ,:-1,:-1], new_fields[field])
+                np.add(new_fields[field], cg[field][:-1,:-1,1: ], new_fields[field])
+                np.add(new_fields[field], cg[field][:-1,:-1,:-1], new_fields[field])
+                np.multiply(new_fields[field], 0.125, new_fields[field])
 
-        return new_field
+        if _old_api:
+            return new_fields[fields[0]]
+        return new_fields
 
     def select_icoords(self, dobj):
         mask = self._get_selector_mask(dobj.selector)

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -372,8 +372,9 @@
         for ax in "xyz"[:ds.dimensionality]:
             setattr(self, ax, ds.data[ax])
             setattr(self, "%s_bins" % ax, ds.data["%s_bins" % ax])
-            setattr(self, "%s_field" % ax,
-                    tuple(ds.parameters["%s_field" % ax]))
+            field_name = tuple(ds.parameters["%s_field" % ax])
+            setattr(self, "%s_field" % ax, field_name)
+            self.field_info[field_name] = ds.field_info[field_name]
             setattr(self, "%s_log" % ax, ds.parameters["%s_log" % ax])
             exclude_fields.extend([ax, "%s_bins" % ax,
                                    ds.parameters["%s_field" % ax][1]])
@@ -384,6 +385,7 @@
         for field in profile_fields:
             self.field_map[field[1]] = field
             self.field_data[field] = ds.data[field]
+            self.field_info[field] = ds.field_info[field]
             self.field_units[field] = ds.data[field].units
 
 class Profile1D(ProfileND):

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -783,6 +783,7 @@
         self.conditionals = ensure_list(conditionals)
         self.base_object = data_source
         self._selector = None
+        self._particle_mask = {}
         # Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,
         # ires and get_data
 
@@ -805,7 +806,8 @@
             f = self.base_object[field]
             if f.shape != ind.shape:
                 parent = getattr(self, "parent", self.base_object)
-                self.field_data[field] = parent[field][self._part_ind]
+                self.field_data[field] = \
+                  parent[field][self._part_ind(field[0])]
             else:
                 self.field_data[field] = self.base_object[field][ind]
 
@@ -835,21 +837,22 @@
                 np.logical_and(res, ind, ind)
         return ind
 
-    _particle_mask = None
-    @property
-    def _part_ind(self):
-        if self._particle_mask is None:
+    def _part_ind(self, ptype):
+        if self._particle_mask.get(ptype) is None:
             parent = getattr(self, "parent", self.base_object)
             units = "code_length"
             mask = points_in_cells(
-                self["x"].to(units), self["y"].to(units),
-                self["z"].to(units), self["dx"].to(units),
-                self["dy"].to(units), self["dz"].to(units),
-                parent["particle_position_x"].to(units),
-                parent["particle_position_y"].to(units),
-                parent["particle_position_z"].to(units))
-            self._particle_mask = mask
-        return self._particle_mask
+                self[("index", "x")].to(units),
+                self[("index", "y")].to(units),
+                self[("index", "z")].to(units),
+                self[("index", "dx")].to(units),
+                self[("index", "dy")].to(units),
+                self[("index", "dz")].to(units),
+                parent[(ptype, "particle_position_x")].to(units),
+                parent[(ptype, "particle_position_y")].to(units),
+                parent[(ptype, "particle_position_z")].to(units))
+            self._particle_mask[ptype] = mask
+        return self._particle_mask[ptype]
 
     @property
     def icoords(self):

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -20,6 +20,7 @@
 import os
 import time
 import weakref
+import warnings
 
 from collections import defaultdict
 from yt.extern.six import add_metaclass, string_types
@@ -214,7 +215,7 @@
             obj = _cached_datasets[apath]
         return obj
 
-    def __init__(self, filename, dataset_type=None, file_style=None, 
+    def __init__(self, filename, dataset_type=None, file_style=None,
                  units_override=None, unit_system="cgs"):
         """
         Base class for generating new output types.  Principally consists of
@@ -338,7 +339,7 @@
         in that directory, and a list of subdirectories.  It should return a
         list of filenames (defined relative to the supplied directory) and a
         boolean as to whether or not further directories should be recursed.
-        
+
         This function doesn't need to catch all possibilities, nor does it need
         to filter possibilities.
         """
@@ -938,7 +939,7 @@
             "dangerous option that may yield inconsistent results, and must be "
             "used very carefully, and only if you know what you want from it.")
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g"),
-                          ("velocity","cm/s"), ("magnetic","gauss"), 
+                          ("velocity","cm/s"), ("magnetic","gauss"),
                           ("temperature","K")]:
             val = self.units_override.get("%s_unit" % unit, None)
             if val is not None:
@@ -1043,7 +1044,7 @@
         self._quan = functools.partial(YTQuantity, registry=self.unit_registry)
         return self._quan
 
-    def add_field(self, name, function=None, **kwargs):
+    def add_field(self, name, function=None, sampling_type=None, **kwargs):
         """
         Dataset-specific call to add_field
 
@@ -1081,7 +1082,18 @@
         if not override and name in self.field_info:
             mylog.warning("Field %s already exists. To override use " +
                           "force_override=True.", name)
-        self.field_info.add_field(name, function=function, **kwargs)
+        if kwargs.setdefault('particle_type', False):
+            if sampling_type is not None and sampling_type != "particle":
+                raise RuntimeError("Clashing definition of 'sampling_type' and "
+                               "'particle_type'. Note that 'particle_type' is "
+                               "deprecated. Please just use 'sampling_type'.")
+            else:
+                sampling_type = "particle"
+        if sampling_type is None:
+            warnings.warn("Because 'sampling_type' not specified, yt will "
+                          "assume a cell 'sampling_type'")
+            sampling_type = "cell"
+        self.field_info.add_field(name, sampling_type, function=function, **kwargs)
         self.field_info._show_field_errors.append(name)
         deps, _ = self.field_info.check_derived_fields([name])
         self.field_dependencies.update(deps)

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb yt/fields/angular_momentum.py
--- a/yt/fields/angular_momentum.py
+++ b/yt/fields/angular_momentum.py
@@ -59,15 +59,15 @@
         rv = data.ds.arr(rv, input_units = data["index", "x"].units)
         return xv * rv[...,1] - yv * rv[...,0]
 
-    registry.add_field((ftype, "specific_angular_momentum_x"),
+    registry.add_field((ftype, "specific_angular_momentum_x"), sampling_type="cell", 
                         function=_specific_angular_momentum_x,
                         units=unit_system["specific_angular_momentum"],
                         validators=[ValidateParameter("center")])
-    registry.add_field((ftype, "specific_angular_momentum_y"),
+    registry.add_field((ftype, "specific_angular_momentum_y"), sampling_type="cell", 
                         function=_specific_angular_momentum_y,
                         units=unit_system["specific_angular_momentum"],
                         validators=[ValidateParameter("center")])
-    registry.add_field((ftype, "specific_angular_momentum_z"),
+    registry.add_field((ftype, "specific_angular_momentum_z"), sampling_type="cell", 
                         function=_specific_angular_momentum_z,
                         units=unit_system["specific_angular_momentum"],
                         validators=[ValidateParameter("center")])
@@ -78,7 +78,7 @@
     def _angular_momentum_x(field, data):
         return data[ftype, "cell_mass"] \
              * data[ftype, "specific_angular_momentum_x"]
-    registry.add_field((ftype, "angular_momentum_x"),
+    registry.add_field((ftype, "angular_momentum_x"), sampling_type="cell", 
                        function=_angular_momentum_x,
                        units=unit_system["angular_momentum"],
                        validators=[ValidateParameter('center')])
@@ -86,7 +86,7 @@
     def _angular_momentum_y(field, data):
         return data[ftype, "cell_mass"] \
              * data[ftype, "specific_angular_momentum_y"]
-    registry.add_field((ftype, "angular_momentum_y"),
+    registry.add_field((ftype, "angular_momentum_y"), sampling_type="cell", 
                        function=_angular_momentum_y,
                        units=unit_system["angular_momentum"],
                        validators=[ValidateParameter('center')])
@@ -94,7 +94,7 @@
     def _angular_momentum_z(field, data):
         return data[ftype, "cell_mass"] \
              * data[ftype, "specific_angular_momentum_z"]
-    registry.add_field((ftype, "angular_momentum_z"),
+    registry.add_field((ftype, "angular_momentum_z"), sampling_type="cell", 
                        function=_angular_momentum_z,
                        units=unit_system["angular_momentum"],
                        validators=[ValidateParameter('center')])

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb yt/fields/astro_fields.py
--- a/yt/fields/astro_fields.py
+++ b/yt/fields/astro_fields.py
@@ -52,7 +52,7 @@
         """
         return np.sqrt(3.0 * np.pi / (16.0 * G * data[ftype, "density"]))
 
-    registry.add_field((ftype, "dynamical_time"),
+    registry.add_field((ftype, "dynamical_time"), sampling_type="cell", 
                        function=_dynamical_time,
                        units=unit_system["time"])
 
@@ -65,7 +65,7 @@
              (data[ftype, "density"]**(-0.5)))
         return u
 
-    registry.add_field((ftype, "jeans_mass"),
+    registry.add_field((ftype, "jeans_mass"), sampling_type="cell", 
                        function=_jeans_mass,
                        units=unit_system["mass"])
 
@@ -88,7 +88,7 @@
                                     - 1.6667 * logT0**1  - 0.2193 * logT0)),
                            "") # add correct units here
 
-    registry.add_field((ftype, "chandra_emissivity"),
+    registry.add_field((ftype, "chandra_emissivity"), sampling_type="cell", 
                        function=_chandra_emissivity,
                        units="") # add correct units here
 
@@ -102,7 +102,7 @@
         nenh *= 0.5*(1.+X_H)*X_H*data["cell_volume"]
         return nenh
     
-    registry.add_field((ftype, "emission_measure"),
+    registry.add_field((ftype, "emission_measure"), sampling_type="cell", 
                        function=_emission_measure,
                        units=unit_system["number_density"])
 
@@ -112,7 +112,7 @@
                            * data[ftype, "temperature"].to_ndarray()**0.5,
                            "") # add correct units here
 
-    registry.add_field((ftype, "xray_emissivity"),
+    registry.add_field((ftype, "xray_emissivity"), sampling_type="cell", 
                        function=_xray_emissivity,
                        units="") # add correct units here
 
@@ -121,7 +121,7 @@
         # Only useful as a weight_field for temperature, metallicity, velocity
         return data["density"]*data["density"]*data["kT"]**-0.25/mh/mh
 
-    registry.add_field((ftype,"mazzotta_weighting"),
+    registry.add_field((ftype,"mazzotta_weighting"), sampling_type="cell", 
                        function=_mazzotta_weighting,
                        units="keV**-0.25*cm**-6")
 
@@ -135,7 +135,7 @@
         # See issue #1225
         return -scale * vel * data[ftype, "density"]
 
-    registry.add_field((ftype, "sz_kinetic"),
+    registry.add_field((ftype, "sz_kinetic"), sampling_type="cell", 
                        function=_sz_kinetic,
                        units=unit_system["length"]**-1,
                        validators=[
@@ -145,6 +145,6 @@
         scale = 0.88 / mh * kboltz / (me * clight*clight) * sigma_thompson
         return scale * data[ftype, "density"] * data[ftype, "temperature"]
 
-    registry.add_field((ftype, "szy"),
+    registry.add_field((ftype, "szy"), sampling_type="cell", 
                        function=_szy,
                        units=unit_system["length"]**-1)

diff -r af39415bb4c4f8eedd995b58dbf984e503d1967b -r 64011277dfc00865ba800cef5e00505a5ec7fefb yt/fields/cosmology_fields.py
--- a/yt/fields/cosmology_fields.py
+++ b/yt/fields/cosmology_fields.py
@@ -45,14 +45,14 @@
         return data[ftype, "density"] + \
           data[ftype, "dark_matter_density"]
 
-    registry.add_field((ftype, "matter_density"),
+    registry.add_field((ftype, "matter_density"), sampling_type="cell", 
                        function=_matter_density,
                        units=unit_system["density"])
 
     def _matter_mass(field, data):
         return data[ftype, "matter_density"] * data["index", "cell_volume"]
 
-    registry.add_field((ftype, "matter_mass"),
+    registry.add_field((ftype, "matter_mass"), sampling_type="cell", 
                        function=_matter_mass,
                        units=unit_system["mass"])
 
@@ -65,7 +65,7 @@
         return data[ftype, "matter_density"] / \
           co.critical_density(data.ds.current_redshift)
 
-    registry.add_field((ftype, "overdensity"),
+    registry.add_field((ftype, "overdensity"), sampling_type="cell", 
                        function=_overdensity,
                        units="")
 
@@ -83,7 +83,7 @@
         return data[ftype, "density"] / omega_baryon / co.critical_density(0.0) / \
           (1.0 + data.ds.current_redshift)**3
 
-    registry.add_field((ftype, "baryon_overdensity"),
+    registry.add_field((ftype, "baryon_overdensity"), sampling_type="cell", 
                        function=_baryon_overdensity,
                        units="",
                        validators=[ValidateParameter("omega_baryon")])
@@ -100,7 +100,7 @@
           co.critical_density(0.0) / \
           (1.0 + data.ds.current_redshift)**3
 
-    registry.add_field((ftype, "matter_overdensity"),
+    registry.add_field((ftype, "matter_overdensity"), sampling_type="cell", 
                        function=_matter_overdensity,
                        units="")
 
@@ -109,7 +109,7 @@
         virial_radius = data.get_field_parameter("virial_radius")
         return data["radius"] / virial_radius
 
-    registry.add_field(("index", "virial_radius_fraction"),
+    registry.add_field(("index", "virial_radius_fraction"), sampling_type="cell", 
                        function=_virial_radius_fraction,
                        validators=[ValidateParameter("virial_radius")],
                        units="")
@@ -137,7 +137,7 @@
         return (1.5 * (co.hubble_constant / speed_of_light_cgs)**2 * (dl * dls / ds) * \
           data[ftype, "matter_overdensity"]).in_units("1/cm")
 
-    registry.add_field((ftype, "weak_lensing_convergence"),
+    registry.add_field((ftype, "weak_lensing_convergence"), sampling_type="cell", 
                        function=_weak_lensing_convergence,
                        units=unit_system["length"]**-1,
         validators=[ValidateParameter("observer_redshift"),

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/1d70bf412499/
Changeset:   1d70bf412499
Branch:      yt
User:        atmyers
Date:        2016-09-30 17:54:01+00:00
Summary:     Merged in MatthewTurk/yt (pull request #2245)

Change pixelizers to composite
Affected #:  15 files

diff -r 743f56472b10f18505a7e1c1061b496050e8aaf1 -r 1d70bf4124996d584483c173e37f29670a73db99 doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -26,13 +26,16 @@
 sizes into a fixed-size array that appears like an image.  This process is that
 of pixelization, which yt handles transparently internally.  You can access
 this functionality by constructing a
-:class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer` (or
-:class:`~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`) and
-supplying to it your :class:`~yt.data_objects.data_containers.YTSelectionContainer2D`
+:class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer` and supplying
+to it your :class:`~yt.data_objects.data_containers.YTSelectionContainer2D`
 object, as well as some information about how you want the final image to look.
 You can specify both the bounds of the image (in the appropriate x-y plane) and
-the resolution of the output image.  You can then have yt pixelize any
-field you like.
+the resolution of the output image.  You can then have yt pixelize any field
+you like.
+
+.. note:: In previous versions of yt, there was a special class of
+          FixedResolutionBuffer for off-axis slices.  This is no longer
+          necessary.
 
 To create :class:`~yt.data_objects.data_containers.YTSelectionContainer2D` objects, you can
 access them as described in :ref:`data-objects`, specifically the section

diff -r 743f56472b10f18505a7e1c1061b496050e8aaf1 -r 1d70bf4124996d584483c173e37f29670a73db99 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -49,7 +49,6 @@
    ~yt.visualization.fixed_resolution.FixedResolutionBuffer
    ~yt.visualization.fixed_resolution.ParticleImageBuffer
    ~yt.visualization.fixed_resolution.CylindricalFixedResolutionBuffer
-   ~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer
    ~yt.visualization.fixed_resolution.OffAxisProjectionFixedResolutionBuffer
 
 Data Sources

diff -r 743f56472b10f18505a7e1c1061b496050e8aaf1 -r 1d70bf4124996d584483c173e37f29670a73db99 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -39,7 +39,7 @@
   local_owls_000:
     - yt/frontends/owls/tests/test_outputs.py
 
-  local_pw_006:
+  local_pw_008:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
     - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
@@ -50,7 +50,7 @@
   local_tipsy_001:
     - yt/frontends/tipsy/tests/test_outputs.py
 
-  local_varia_004:
+  local_varia_005:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py

diff -r 743f56472b10f18505a7e1c1061b496050e8aaf1 -r 1d70bf4124996d584483c173e37f29670a73db99 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -463,12 +463,12 @@
         self.fields = ensure_list(fields) + [k for k in self.field_data.keys()
                                              if k not in self._key_fields]
         from yt.visualization.plot_window import get_oblique_window_parameters, PWViewerMPL
-        from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
+        from yt.visualization.fixed_resolution import FixedResolutionBuffer
         (bounds, center_rot) = get_oblique_window_parameters(normal, center, width, self.ds)
         pw = PWViewerMPL(
             self, bounds, fields=self.fields, origin='center-window', 
             periodic=False, oblique=True,
-            frb_generator=ObliqueFixedResolutionBuffer, 
+            frb_generator=FixedResolutionBuffer, 
             plot_type='OffAxisSlice')
         if axes_unit is not None:
             pw.set_axes_unit(axes_unit)
@@ -476,8 +476,8 @@
         return pw
 
     def to_frb(self, width, resolution, height=None, periodic=False):
-        r"""This function returns an ObliqueFixedResolutionBuffer generated
-        from this object.
+        r"""This function returns a FixedResolutionBuffer generated from this
+        object.
 
         An ObliqueFixedResolutionBuffer is an object that accepts a
         variable-resolution 2D object and transforms it into an NxM bitmap that
@@ -526,9 +526,9 @@
             height = self.ds.quan(height[0], height[1])
         if not iterable(resolution):
             resolution = (resolution, resolution)
-        from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
+        from yt.visualization.fixed_resolution import FixedResolutionBuffer
         bounds = (-width/2.0, width/2.0, -height/2.0, height/2.0)
-        frb = ObliqueFixedResolutionBuffer(self, bounds, resolution,
+        frb = FixedResolutionBuffer(self, bounds, resolution,
                                            periodic=periodic)
         return frb
 

diff -r 743f56472b10f18505a7e1c1061b496050e8aaf1 -r 1d70bf4124996d584483c173e37f29670a73db99 yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -128,22 +128,24 @@
         period[1] = self.period[self.y_axis[dim]]
         if hasattr(period, 'in_units'):
             period = period.in_units("code_length").d
-        buff = pixelize_cartesian(data_source['px'], data_source['py'],
+        buff = np.zeros((size[1], size[0]), dtype="f8")
+        pixelize_cartesian(buff, data_source['px'], data_source['py'],
                              data_source['pdx'], data_source['pdy'],
-                             data_source[field], size[0], size[1],
+                             data_source[field],
                              bounds, int(antialias),
-                             period, int(periodic)).transpose()
+                             period, int(periodic))
         return buff
 
     def _oblique_pixelize(self, data_source, field, bounds, size, antialias):
-        indices = np.argsort(data_source['dx'])[::-1]
-        buff = pixelize_off_axis_cartesian(
+        indices = np.argsort(data_source['pdx'])[::-1]
+        buff = np.zeros((size[1], size[0]), dtype="f8")
+        pixelize_off_axis_cartesian(buff,
                               data_source['x'], data_source['y'],
                               data_source['z'], data_source['px'],
                               data_source['py'], data_source['pdx'],
                               data_source['pdy'], data_source['pdz'],
                               data_source.center, data_source._inv_mat, indices,
-                              data_source[field], size[0], size[1], bounds).transpose()
+                              data_source[field], bounds)
         return buff
 
     def convert_from_cartesian(self, coord):

diff -r 743f56472b10f18505a7e1c1061b496050e8aaf1 -r 1d70bf4124996d584483c173e37f29670a73db99 yt/geometry/coordinates/cylindrical_coordinates.py
--- a/yt/geometry/coordinates/cylindrical_coordinates.py
+++ b/yt/geometry/coordinates/cylindrical_coordinates.py
@@ -113,19 +113,22 @@
         period[1] = self.period[self.y_axis[dim]]
         if hasattr(period, 'in_units'):
             period = period.in_units("code_length").d
-        buff = pixelize_cartesian(data_source['px'], data_source['py'],
+        buff = np.zeros(size, dtype="f8")
+        pixelize_cartesian(buff, data_source['px'], data_source['py'],
                                   data_source['pdx'], data_source['pdy'],
-                                  data_source[field], size[0], size[1],
+                                  data_source[field],
                                   bounds, int(antialias),
-                                  period, int(periodic)).transpose()
+                                  period, int(periodic))
         return buff
 
     def _cyl_pixelize(self, data_source, field, bounds, size, antialias):
-        buff = pixelize_cylinder(data_source['px'],
-                                 data_source['pdx'],
-                                 data_source['py'],
-                                 data_source['pdy'],
-                                 size, data_source[field], bounds)
+        buff = np.zeros((size[1], size[0]), dtype="f8")
+        pixelize_cylinder(buff,
+                          data_source['px'],
+                          data_source['pdx'],
+                          data_source['py'],
+                          data_source['pdy'],
+                          data_source[field], bounds)
         return buff
 
     _x_pairs = (('r', 'theta'), ('z', 'r'), ('theta', 'r'))

diff -r 743f56472b10f18505a7e1c1061b496050e8aaf1 -r 1d70bf4124996d584483c173e37f29670a73db99 yt/geometry/coordinates/geographic_coordinates.py
--- a/yt/geometry/coordinates/geographic_coordinates.py
+++ b/yt/geometry/coordinates/geographic_coordinates.py
@@ -203,9 +203,9 @@
         else:
             # We should never get here!
             raise NotImplementedError
-        buff = pixelize_cylinder(r, data_source['pdy'],
-                                 px, pdx,
-                                 size, data_source[field], bounds)
+        buff = np.zeros((size[1], size[0]), dtype="f8")
+        pixelize_cylinder(buff, r, data_source['pdy'],
+                          px, pdx, data_source[field], bounds)
         if do_transpose:
             buff = buff.transpose()
         return buff

diff -r 743f56472b10f18505a7e1c1061b496050e8aaf1 -r 1d70bf4124996d584483c173e37f29670a73db99 yt/geometry/coordinates/spherical_coordinates.py
--- a/yt/geometry/coordinates/spherical_coordinates.py
+++ b/yt/geometry/coordinates/spherical_coordinates.py
@@ -123,19 +123,22 @@
     def _cyl_pixelize(self, data_source, field, bounds, size, antialias,
                       dimension):
         name = self.axis_name[dimension]
+        buff = np.zeros((size[1], size[0]), dtype="f8")
         if name == 'theta':
-            buff = pixelize_cylinder(data_source['px'],
-                                     data_source['pdx'],
-                                     data_source['py'],
-                                     data_source['pdy'],
-                                     size, data_source[field], bounds)
+            pixelize_cylinder(buff,
+                              data_source['px'],
+                              data_source['pdx'],
+                              data_source['py'],
+                              data_source['pdy'],
+                              data_source[field], bounds)
         elif name == 'phi':
-            buff = pixelize_cylinder(data_source['px'],
-                                     data_source['pdx'],
-                                     data_source['py'],
-                                     data_source['pdy'],
-                                     size, data_source[field], bounds)
-            buff = buff.transpose()
+            # Note that we feed in buff.T here
+            pixelize_cylinder(buff.T,
+                             data_source['px'],
+                             data_source['pdx'],
+                             data_source['py'],
+                             data_source['pdy'],
+                             data_source[field], bounds)
         else:
             raise RuntimeError
         return buff

diff -r 743f56472b10f18505a7e1c1061b496050e8aaf1 -r 1d70bf4124996d584483c173e37f29670a73db99 yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -54,12 +54,13 @@
 @cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def pixelize_cartesian(np.ndarray[np.float64_t, ndim=1] px,
-                       np.ndarray[np.float64_t, ndim=1] py,
-                       np.ndarray[np.float64_t, ndim=1] pdx,
-                       np.ndarray[np.float64_t, ndim=1] pdy,
-                       np.ndarray[np.float64_t, ndim=1] data,
-                       int cols, int rows, bounds,
+def pixelize_cartesian(np.float64_t[:,:] buff,
+                       np.float64_t[:] px,
+                       np.float64_t[:] py,
+                       np.float64_t[:] pdx,
+                       np.float64_t[:] pdy,
+                       np.float64_t[:] data,
+                       bounds,
                        int antialias = 1,
                        period = None,
                        int check_period = 1,
@@ -78,7 +79,6 @@
     cdef int yiter[2]
     cdef np.float64_t xiterv[2]
     cdef np.float64_t yiterv[2]
-    cdef np.ndarray[np.float64_t, ndim=2] my_array
     if period is not None:
         period_x = period[0]
         period_y = period[1]
@@ -88,18 +88,15 @@
     y_max = bounds[3]
     width = x_max - x_min
     height = y_max - y_min
-    px_dx = width / (<np.float64_t> rows)
-    px_dy = height / (<np.float64_t> cols)
+    px_dx = width / (<np.float64_t> buff.shape[1])
+    px_dy = height / (<np.float64_t> buff.shape[0])
     ipx_dx = 1.0 / px_dx
     ipx_dy = 1.0 / px_dy
-    if rows == 0 or cols == 0:
-        raise YTPixelizeError("Cannot scale to zero size")
     if px.shape[0] != py.shape[0] or \
        px.shape[0] != pdx.shape[0] or \
        px.shape[0] != pdy.shape[0] or \
        px.shape[0] != data.shape[0]:
         raise YTPixelizeError("Arrays are not of correct shape.")
-    my_array = np.zeros((rows, cols), "float64")
     xiter[0] = yiter[0] = 0
     xiterv[0] = yiterv[0] = 0.0
     # Here's a basic outline of what we're going to do here.  The xiter and
@@ -120,6 +117,31 @@
     # (lr) and then iterate up to "right column" (rc) and "uppeR row" (rr),
     # depositing into them the data value.  Overlap computes the relative
     # overlap of a data value with a pixel.
+    # 
+    # NOTE ON ROWS AND COLUMNS:
+    #
+    #   The way that images are plotting in matplotlib is somewhat different
+    #   from what most might expect.  The first axis of the array plotted is
+    #   what varies along the x axis.  So for instance, if you supply
+    #   origin='lower' and plot the results of an mgrid operation, at a fixed
+    #   'y' value you will see the results of that array held constant in the
+    #   first dimension.  Here is some example code:
+    #
+    #   import matplotlib.pyplot as plt
+    #   import numpy as np
+    #   x, y = np.mgrid[0:1:100j,0:1:100j]
+    #   plt.imshow(x, interpolation='nearest', origin='lower')
+    #   plt.imshow(y, interpolation='nearest', origin='lower')
+    #
+    #   The values in the image:
+    #       lower left:  arr[0,0]
+    #       lower right: arr[0,-1]
+    #       upper left:  arr[-1,0]
+    #       upper right: arr[-1,-1]
+    #
+    #   So what we want here is to fill an array such that we fill:
+    #       first axis : y_min .. y_max
+    #       second axis: x_min .. x_max
     with nogil:
         for p in range(px.shape[0]):
             xiter[1] = yiter[1] = 999
@@ -161,8 +183,10 @@
                     # truncated, but no similar truncation was done in the
                     # comparison of j to rc (double).  So give ourselves a
                     # bonus row and bonus column here.
-                    rc = <int> fmin(((xsp+dxsp-x_min)*ipx_dx + 1), rows)
-                    rr = <int> fmin(((ysp+dysp-y_min)*ipx_dy + 1), cols)
+                    rc = <int> fmin(((xsp+dxsp-x_min)*ipx_dx + 1), buff.shape[1])
+                    rr = <int> fmin(((ysp+dysp-y_min)*ipx_dy + 1), buff.shape[0])
+                    # Note that we're iterating here over *y* in the i
+                    # direction.  See the note above about this.
                     for i in range(lr, rr):
                         lypx = px_dy * i + y_min
                         rypx = px_dy * (i+1) + y_min
@@ -187,7 +211,7 @@
                                             fabs(cy - (ysp-dysp)))
                                 ld_y *= ipx_dy
                                 if ld_x <= line_width or ld_y <= line_width:
-                                    my_array[j,i] = 1.0
+                                    buff[i,j] = 1.0
                             elif antialias == 1:
                                 overlap1 = ((fmin(rxpx, xsp+dxsp)
                                            - fmax(lxpx, (xsp-dxsp)))*ipx_dx)
@@ -200,16 +224,15 @@
                                 # This will reduce artifacts if we ever move to
                                 # compositing instead of replacing bitmaps.
                                 if overlap1 * overlap2 == 0.0: continue
-                                my_array[j,i] += (dsp * overlap1) * overlap2
+                                buff[i,j] += (dsp * overlap1) * overlap2
                             else:
-                                my_array[j,i] = dsp
-
-    return my_array
+                                buff[i,j] = dsp
 
 @cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
 def pixelize_off_axis_cartesian(
+                       np.float64_t[:,:] buff,
                        np.float64_t[:] x,
                        np.float64_t[:] y,
                        np.float64_t[:] z,
@@ -222,7 +245,7 @@
                        np.float64_t[:,:] inv_mat,
                        np.int64_t[:] indices,
                        np.float64_t[:] data,
-                       int cols, int rows, bounds):
+                       bounds):
     cdef np.float64_t x_min, x_max, y_min, y_max
     cdef np.float64_t width, height, px_dx, px_dy, ipx_dx, ipx_dy, md
     cdef int i, j, p, ip
@@ -231,7 +254,6 @@
     cdef np.float64_t xsp, ysp, zsp, dxsp, dysp, dzsp, dsp
     cdef np.float64_t pxsp, pysp, cxpx, cypx, cx, cy, cz
     # Some periodicity helpers
-    cdef np.ndarray[np.float64_t, ndim=2] my_array
     cdef np.ndarray[np.int64_t, ndim=2] mask
     x_min = bounds[0]
     x_max = bounds[1]
@@ -239,12 +261,10 @@
     y_max = bounds[3]
     width = x_max - x_min
     height = y_max - y_min
-    px_dx = width / (<np.float64_t> rows)
-    px_dy = height / (<np.float64_t> cols)
+    px_dx = width / (<np.float64_t> buff.shape[1])
+    px_dy = height / (<np.float64_t> buff.shape[0])
     ipx_dx = 1.0 / px_dx
     ipx_dy = 1.0 / px_dy
-    if rows == 0 or cols == 0:
-        raise YTPixelizeError("Cannot scale to zero size")
     if px.shape[0] != py.shape[0] or \
        px.shape[0] != pdx.shape[0] or \
        px.shape[0] != pdy.shape[0] or \
@@ -252,8 +272,7 @@
        px.shape[0] != indices.shape[0] or \
        px.shape[0] != data.shape[0]:
         raise YTPixelizeError("Arrays are not of correct shape.")
-    my_array = np.zeros((rows, cols), "float64")
-    mask = np.zeros((rows, cols), "int64")
+    mask = np.zeros((buff.shape[0], buff.shape[1]), "int64")
     with nogil:
         for ip in range(indices.shape[0]):
             p = indices[ip]
@@ -275,8 +294,8 @@
                 continue
             lc = <int> fmax(((pxsp - md - x_min)*ipx_dx),0)
             lr = <int> fmax(((pysp - md - y_min)*ipx_dy),0)
-            rc = <int> fmin(((pxsp + md - x_min)*ipx_dx + 1), rows)
-            rr = <int> fmin(((pysp + md - y_min)*ipx_dy + 1), cols)
+            rc = <int> fmin(((pxsp + md - x_min)*ipx_dx + 1), buff.shape[1])
+            rr = <int> fmin(((pysp + md - y_min)*ipx_dy + 1), buff.shape[0])
             for i in range(lr, rr):
                 cypx = px_dy * (i + 0.5) + y_min
                 for j in range(lc, rc):
@@ -289,40 +308,35 @@
                        fabs(zsp - cz) * 0.99 > dzsp:
                         continue
                     mask[i, j] += 1
-                    my_array[i, j] += dsp
-    my_array /= mask
-    return my_array.T
-
+                    buff[i, j] += dsp
+    for i in range(buff.shape[0]):
+        for j in range(buff.shape[1]):
+            if mask[i,j] == 0: continue
+            buff[i,j] /= mask[i,j]
 
 @cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def pixelize_cylinder(np.ndarray[np.float64_t, ndim=1] radius,
-                      np.ndarray[np.float64_t, ndim=1] dradius,
-                      np.ndarray[np.float64_t, ndim=1] theta,
-                      np.ndarray[np.float64_t, ndim=1] dtheta,
-                      buff_size,
-                      np.ndarray[np.float64_t, ndim=1] field,
-                      extents, input_img = None):
+def pixelize_cylinder(np.float64_t[:,:] buff,
+                      np.float64_t[:] radius,
+                      np.float64_t[:] dradius,
+                      np.float64_t[:] theta,
+                      np.float64_t[:] dtheta,
+                      np.float64_t[:] field,
+                      extents):
 
-    cdef np.ndarray[np.float64_t, ndim=2] img
     cdef np.float64_t x, y, dx, dy, r0, theta0
     cdef np.float64_t rmax, x0, y0, x1, y1
     cdef np.float64_t r_i, theta_i, dr_i, dtheta_i, dthetamin
     cdef np.float64_t costheta, sintheta
     cdef int i, pi, pj
-
-    imax = radius.argmax()
+    
+    cdef int imax = np.asarray(radius).argmax()
     rmax = radius[imax] + dradius[imax]
 
-    if input_img is None:
-        img = np.zeros((buff_size[0], buff_size[1]))
-        img[:] = np.nan
-    else:
-        img = input_img
     x0, x1, y0, y1 = extents
-    dx = (x1 - x0) / img.shape[0]
-    dy = (y1 - y0) / img.shape[1]
+    dx = (x1 - x0) / buff.shape[1]
+    dy = (y1 - y0) / buff.shape[0]
     cdef np.float64_t rbounds[2]
     cdef np.float64_t corners[8]
     # Find our min and max r
@@ -371,16 +385,12 @@
                 x = r_i * sintheta
                 pi = <int>((x - x0)/dx)
                 pj = <int>((y - y0)/dy)
-                if pi >= 0 and pi < img.shape[0] and \
-                   pj >= 0 and pj < img.shape[1]:
-                    if img[pi, pj] != img[pi, pj]:
-                        img[pi, pj] = 0.0
-                    img[pi, pj] = field[i]
+                if pi >= 0 and pi < buff.shape[0] and \
+                   pj >= 0 and pj < buff.shape[1]:
+                    buff[pi, pj] = field[i]
                 r_i += 0.5*dx
             theta_i += dthetamin
 
-    return img
-
 cdef void aitoff_thetaphi_to_xy(np.float64_t theta, np.float64_t phi,
                                 np.float64_t *x, np.float64_t *y):
     cdef np.float64_t z = math.sqrt(1 + math.cos(phi) * math.cos(theta / 2.0))
@@ -390,12 +400,12 @@
 @cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def pixelize_aitoff(np.ndarray[np.float64_t, ndim=1] theta,
-                    np.ndarray[np.float64_t, ndim=1] dtheta,
-                    np.ndarray[np.float64_t, ndim=1] phi,
-                    np.ndarray[np.float64_t, ndim=1] dphi,
+def pixelize_aitoff(np.float64_t[:] theta,
+                    np.float64_t[:] dtheta,
+                    np.float64_t[:] phi,
+                    np.float64_t[:] dphi,
                     buff_size,
-                    np.ndarray[np.float64_t, ndim=1] field,
+                    np.float64_t[:] field,
                     extents, input_img = None,
                     np.float64_t theta_offset = 0.0,
                     np.float64_t phi_offset = 0.0):

diff -r 743f56472b10f18505a7e1c1061b496050e8aaf1 -r 1d70bf4124996d584483c173e37f29670a73db99 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -18,12 +18,13 @@
 from yt.funcs import \
     get_output_filename, \
     mylog, \
-    ensure_list
+    ensure_list, \
+    deprecate
 from .volume_rendering.api import off_axis_projection
 from .fixed_resolution_filters import apply_filter, filter_registry
 from yt.data_objects.image_array import ImageArray
 from yt.utilities.lib.pixelization_routines import \
-    pixelize_cylinder, pixelize_off_axis_cartesian
+    pixelize_cylinder
 from yt.utilities.lib.api import add_points_to_greyscale_image
 from yt.frontends.stream.api import load_uniform_grid
 
@@ -51,9 +52,8 @@
     Parameters
     ----------
     data_source : :class:`yt.data_objects.construction_data_containers.YTQuadTreeProj` or :class:`yt.data_objects.selection_data_containers.YTSlice`
-        This is the source to be pixelized, which can be a projection or a
-        slice.  (For cutting planes, see
-        `yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`.)
+        This is the source to be pixelized, which can be a projection, slice or
+        cutting plane.
     bounds : sequence of floats
         Bounds are the min and max in the image plane that we want our
         image to cover.  It's in the order of (xmin, xmax, ymin, ymax),
@@ -67,12 +67,6 @@
         This can be true or false, and governs whether the pixelization
         will span the domain boundaries.
 
-    See Also
-    --------
-    :class:`yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer` : A similar object,
-                                                     used for cutting
-                                                     planes.
-
     Examples
     --------
     To make a projection and then several images, you can generate a
@@ -494,6 +488,11 @@
             self.__dict__['apply_' + filtername] = \
                 types.MethodType(filt, self)
 
+class ObliqueFixedResolutionBuffer(FixedResolutionBuffer):
+    @deprecate("FixedResolutionBuffer")
+    def __init__(self, *args, **kwargs):
+        super(ObliqueFixedResolutionBuffer, self).__init__(*args, **kwargs)
+
 class CylindricalFixedResolutionBuffer(FixedResolutionBuffer):
     """
     This object is a subclass of
@@ -515,41 +514,13 @@
 
     def __getitem__(self, item) :
         if item in self.data: return self.data[item]
-        buff = pixelize_cylinder(self.data_source["r"], self.data_source["dr"],
-                                 self.data_source["theta"], self.data_source["dtheta"],
-                                 self.buff_size, self.data_source[item].astype("float64"),
-                                 self.radius)
+        buff = np.zeros(self.buff_size, dtype="f8")
+        pixelize_cylinder(buff, self.data_source["r"], self.data_source["dr"],
+                          self.data_source["theta"], self.data_source["dtheta"],
+                          self.data_source[item].astype("float64"), self.radius)
         self[item] = buff
         return buff
 
-class ObliqueFixedResolutionBuffer(FixedResolutionBuffer):
-    """
-    This object is a subclass of
-    :class:`yt.visualization.fixed_resolution.FixedResolutionBuffer`
-    that supports non-aligned input data objects, primarily cutting planes.
-    """
-    def __getitem__(self, item):
-        if item in self.data: return self.data[item]
-        indices = np.argsort(self.data_source['dx'])[::-1]
-        bounds = []
-        for b in self.bounds:
-            if hasattr(b, "in_units"):
-                b = float(b.in_units("code_length"))
-            bounds.append(b)
-        buff = pixelize_off_axis_cartesian(
-                               self.data_source['x'],   self.data_source['y'],   self.data_source['z'],
-                               self.data_source['px'],  self.data_source['py'],
-                               self.data_source['pdx'], self.data_source['pdy'], self.data_source['pdz'],
-                               self.data_source.center, self.data_source._inv_mat, indices,
-                               self.data_source[item],
-                               self.buff_size[0], self.buff_size[1],
-                               bounds).transpose()
-        ia = ImageArray(buff, input_units=self.data_source[item].units,
-                        info=self._get_info(item))
-        self[item] = ia
-        return ia
-
-
 class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):
     """
     This object is a subclass of

diff -r 743f56472b10f18505a7e1c1061b496050e8aaf1 -r 1d70bf4124996d584483c173e37f29670a73db99 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -392,8 +392,10 @@
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         plot._axes.hold(True)
-        nx = plot.image._A.shape[0] / self.factor
-        ny = plot.image._A.shape[1] / self.factor
+        # See the note about rows/columns in the pixelizer for more information
+        # on why we choose the bounds we do
+        nx = plot.image._A.shape[1] / self.factor
+        ny = plot.image._A.shape[0] / self.factor
         # periodicity
         ax = plot.data.axis
         ds = plot.data.ds
@@ -410,18 +412,18 @@
         if self.bv_y != 0.0:
             # Workaround for 0.0 without units
             fv_y -= self.bv_y
-        pixX = pixelize_cartesian(plot.data['px'], plot.data['py'],
+        pixX = np.zeros((ny, nx), dtype="f8")
+        pixY = np.zeros((ny, nx), dtype="f8")
+        pixelize_cartesian(pixX, plot.data['px'], plot.data['py'],
                                   plot.data['pdx'], plot.data['pdy'],
-                                  fv_x, int(nx), int(ny),
+                                  fv_x,
                                   (x0, x1, y0, y1), 0, # bounds, antialias
-                                  (period_x, period_y), periodic,
-                                  ).transpose()
-        pixY = pixelize_cartesian(plot.data['px'], plot.data['py'],
+                                  (period_x, period_y), periodic)
+        pixelize_cartesian(pixY, plot.data['px'], plot.data['py'],
                                   plot.data['pdx'], plot.data['pdy'],
-                                  fv_y, int(nx), int(ny),
+                                  fv_y,
                                   (x0, x1, y0, y1), 0, # bounds, antialias
-                                  (period_x, period_y), periodic,
-                                  ).transpose()
+                                  (period_x, period_y), periodic)
         X,Y = np.meshgrid(np.linspace(xx0,xx1,nx,endpoint=True),
                           np.linspace(yy0,yy1,ny,endpoint=True))
         if self.normalize:
@@ -481,8 +483,10 @@
 
         plot._axes.hold(True)
 
-        numPoints_x = plot.image._A.shape[0]
-        numPoints_y = plot.image._A.shape[1]
+        # See the note about rows/columns in the pixelizer for more information
+        # on why we choose the bounds we do
+        numPoints_x = plot.image._A.shape[1]
+        numPoints_y = plot.image._A.shape[0]
 
         # Multiply by dx and dy to go from data->plot
         dx = (xx1 - xx0) / (x1-x0)
@@ -606,7 +610,7 @@
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         (dx, dy) = self.pixel_scale(plot)
-        (xpix, ypix) = plot.image._A.shape
+        (ypix, xpix) = plot.image._A.shape
         ax = plot.data.axis
         px_index = plot.data.ds.coordinates.x_axis[ax]
         py_index = plot.data.ds.coordinates.y_axis[ax]
@@ -716,29 +720,33 @@
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         plot._axes.hold(True)
-        nx = plot.image._A.shape[0] / self.factor
-        ny = plot.image._A.shape[1] / self.factor
-        pixX = pixelize_cartesian(plot.data['px'], plot.data['py'],
+        # See the note about rows/columns in the pixelizer for more information
+        # on why we choose the bounds we do
+        nx = plot.image._A.shape[1] / self.factor
+        ny = plot.image._A.shape[0] / self.factor
+        pixX = np.zeros((ny, nx), dtype="f8")
+        pixY = np.zeros((ny, nx), dtype="f8")
+        pixelize_cartesian(pixX, plot.data['px'], plot.data['py'],
                                   plot.data['pdx'], plot.data['pdy'],
                                   plot.data[self.field_x],
-                                  int(nx), int(ny),
-                                  (x0, x1, y0, y1),).transpose()
-        pixY = pixelize_cartesian(plot.data['px'], plot.data['py'],
+                                  (x0, x1, y0, y1))
+        pixelize_cartesian(pixY, plot.data['px'], plot.data['py'],
                                   plot.data['pdx'], plot.data['pdy'],
                                   plot.data[self.field_y],
-                                  int(nx), int(ny),
-                                  (x0, x1, y0, y1),).transpose()
+                                  (x0, x1, y0, y1))
         if self.field_color:
-            self.field_color = pixelize_cartesian(
+            field_colors = np.zeros((ny, nx), dtype="f8")
+            pixelize_cartesian(field_colors,
                         plot.data['px'], plot.data['py'],
                         plot.data['pdx'], plot.data['pdy'],
-                        plot.data[self.field_color], int(nx), int(ny),
-                        (x0, x1, y0, y1),).transpose()
-
+                        plot.data[self.field_color],
+                        (x0, x1, y0, y1))
+        else:
+            field_colors = None
         X,Y = (np.linspace(xx0,xx1,nx,endpoint=True),
                np.linspace(yy0,yy1,ny,endpoint=True))
         streamplot_args = {'x': X, 'y': Y, 'u':pixX, 'v': pixY,
-                           'density': self.dens, 'color':self.field_color}
+                           'density': self.dens, 'color':field_colors}
         streamplot_args.update(self.plot_args)
         plot._axes.streamplot(**streamplot_args)
         plot._axes.set_xlim(xx0,xx1)
@@ -877,26 +885,26 @@
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         plot._axes.hold(True)
-        nx = plot.image._A.shape[0] / self.factor
-        ny = plot.image._A.shape[1] / self.factor
+        nx = plot.image._A.shape[1] / self.factor
+        ny = plot.image._A.shape[0] / self.factor
         indices = np.argsort(plot.data['dx'])[::-1]
 
-        pixX = pixelize_off_axis_cartesian(
+        pixX = np.zeros((ny, nx), dtype="f8")
+        pixY = np.zeros((ny, nx), dtype="f8")
+        pixelize_off_axis_cartesian(pixX,
                                plot.data['x'], plot.data['y'], plot.data['z'],
                                plot.data['px'], plot.data['py'],
                                plot.data['pdx'], plot.data['pdy'], plot.data['pdz'],
                                plot.data.center, plot.data._inv_mat, indices,
                                plot.data[self.field_x],
-                               int(nx), int(ny),
-                               (x0, x1, y0, y1)).transpose()
-        pixY = pixelize_off_axis_cartesian(
+                               (x0, x1, y0, y1))
+        pixelize_off_axis_cartesian(pixY,
                                plot.data['x'], plot.data['y'], plot.data['z'],
                                plot.data['px'], plot.data['py'],
                                plot.data['pdx'], plot.data['pdy'], plot.data['pdz'],
                                plot.data.center, plot.data._inv_mat, indices,
                                plot.data[self.field_y],
-                               int(nx), int(ny),
-                               (x0, x1, y0, y1)).transpose()
+                               (x0, x1, y0, y1))
         X,Y = np.meshgrid(np.linspace(xx0,xx1,nx,endpoint=True),
                           np.linspace(yy0,yy1,ny,endpoint=True))
 
@@ -942,7 +950,7 @@
         dxf = "d%s" % xf
         dyf = "d%s" % yf
 
-        nx, ny = plot.image._A.shape
+        ny, nx = plot.image._A.shape
         buff = np.zeros((nx,ny),dtype='float64')
         for i,clump in enumerate(reversed(self.clumps)):
             mylog.info("Pixelizing contour %s", i)
@@ -950,12 +958,12 @@
             xf_copy = clump[xf].copy().in_units("code_length")
             yf_copy = clump[yf].copy().in_units("code_length")
 
-            temp = pixelize_cartesian(xf_copy, yf_copy,
+            temp = np.zeros((ny, nx), dtype="f8")
+            pixelize_cartesian(temp, xf_copy, yf_copy,
                                  clump[dxf].in_units("code_length")/2.0,
                                  clump[dyf].in_units("code_length")/2.0,
                                  clump[dxf].d*0.0+i+1, # inits inside Pixelize
-                                 int(nx), int(ny),
-                             (x0, x1, y0, y1), 0).transpose()
+                             (x0, x1, y0, y1), 0)
             buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, np.unique(buff),
                                      extent=extent, **self.plot_args)
@@ -2410,8 +2418,10 @@
         extent = [xx0,xx1,yy0,yy1]
 
         plot._axes.hold(True)
-        nx = plot.image._A.shape[0]
-        ny = plot.image._A.shape[1]
+        # We are feeding this size into the pixelizer, where it will properly
+        # set it in reverse order
+        nx = plot.image._A.shape[1]
+        ny = plot.image._A.shape[0]
         pixX = plot.data.ds.coordinates.pixelize(plot.data.axis,
                                                  plot.data,
                                                  self.field_x,
@@ -2436,19 +2446,20 @@
         kernel = kernel.astype(np.double)
 
         lic_data = line_integral_convolution_2d(vectors,self.texture,kernel)
-        lic_data = np.flipud(lic_data / lic_data.max())
+        lic_data = lic_data / lic_data.max()
         lic_data_clip = np.clip(lic_data,self.lim[0],self.lim[1])
 
         if self.const_alpha:
             plot._axes.imshow(lic_data_clip, extent=extent, cmap=self.cmap,
-                              alpha=self.alpha)
+                              alpha=self.alpha, origin='lower')
         else:
             lic_data_rgba = cm.ScalarMappable(norm=None, cmap=self.cmap).\
                             to_rgba(lic_data_clip)
             lic_data_clip_rescale = (lic_data_clip - self.lim[0]) \
                                     / (self.lim[1] - self.lim[0])
             lic_data_rgba[...,3] = lic_data_clip_rescale * self.alpha
-            plot._axes.imshow(lic_data_rgba, extent=extent, cmap=self.cmap)
+            plot._axes.imshow(lic_data_rgba, extent=extent, cmap=self.cmap,
+                              origin='lower')
         plot._axes.hold(False)
 
         return plot
@@ -2499,8 +2510,8 @@
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
         plot._axes.hold(True)
-        nx = plot.image._A.shape[0]
-        ny = plot.image._A.shape[1]
+        nx = plot.image._A.shape[1]
+        ny = plot.image._A.shape[0]
         aspect = float((y1 - y0) / (x1 - x0))
         pixel_aspect = float(ny)/nx
         relative_aspect = pixel_aspect / aspect
@@ -2519,16 +2530,17 @@
                 ny = 1600
             long_axis = nx
         line_width = max(self.line_width*long_axis, 1.0)
-        im = pixelize_cartesian(plot.data['px'],
+        im = np.zeros((ny, nx), dtype="f8")
+        pixelize_cartesian(im,
+                                plot.data['px'],
                                 plot.data['py'],
                                 plot.data['pdx'],
                                 plot.data['pdy'],
                                 plot.data['px'], # dummy field
-                                int(nx), int(ny),
                                 (x0, x1, y0, y1),
-                                line_width=line_width).transpose()
+                                line_width=line_width)
         # New image:
-        im_buffer = np.zeros((nx, ny, 4), dtype="uint8")
+        im_buffer = np.zeros((ny, nx, 4), dtype="uint8")
         im_buffer[im > 0, 3] = 255
         im_buffer[im > 0, :3] = self.color
         plot._axes.imshow(im_buffer, origin='lower',

diff -r 743f56472b10f18505a7e1c1061b496050e8aaf1 -r 1d70bf4124996d584483c173e37f29670a73db99 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -26,7 +26,6 @@
     ImagePlotMPL
 from .fixed_resolution import \
     FixedResolutionBuffer, \
-    ObliqueFixedResolutionBuffer, \
     OffAxisProjectionFixedResolutionBuffer
 from .plot_modifications import callback_registry
 from .plot_container import \
@@ -156,11 +155,9 @@
     Parameters
     ----------
 
-    data_source : :class:`yt.data_objects.construction_data_containers.YTQuadTreeProj`
     or :class:`yt.data_objects.selection_data_containers.YTSlice`
         This is the source to be pixelized, which can be a projection or a
-        slice.  (For cutting planes, see
-        `yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`.)
+        slice or a cutting plane.
     bounds : sequence of floats
         Bounds are the min and max in the image plane that we want our
         image to cover.  It's in the order of (xmin, xmax, ymin, ymax),
@@ -272,8 +269,6 @@
             bounds = self.xlim+self.ylim+self.zlim
         else:
             bounds = self.xlim+self.ylim
-        if self._frb_generator is ObliqueFixedResolutionBuffer:
-            bounds = np.array([b.in_units('code_length') for b in bounds])
 
         # Generate the FRB
         self.frb = self._frb_generator(self.data_source, bounds,
@@ -1577,7 +1572,7 @@
     """
 
     _plot_type = 'OffAxisSlice'
-    _frb_generator = ObliqueFixedResolutionBuffer
+    _frb_generator = FixedResolutionBuffer
 
     def __init__(self, ds, normal, fields, center='c', width=None,
                  axes_unit=None, north_vector=None, right_handed=True, fontsize=18,

diff -r 743f56472b10f18505a7e1c1061b496050e8aaf1 -r 1d70bf4124996d584483c173e37f29670a73db99 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -423,6 +423,13 @@
 
     assert_array_almost_equal(sl_on.frb['density'], sl_off.frb['density'])
 
+    sl_on.set_buff_size((800, 400))
+    sl_on._recreate_frb()
+    sl_off.set_buff_size((800, 400))
+    sl_off._recreate_frb()
+
+    assert_array_almost_equal(sl_on.frb['density'], sl_off.frb['density'])
+
 def test_plot_particle_field_error():
     ds = fake_random_ds(32, particles=100)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list