[yt-svn] commit/yt: 17 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Feb 3 09:06:36 PST 2016


17 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/3b6ac9a9b9d8/
Changeset:   3b6ac9a9b9d8
Branch:      yt
User:        MatthewTurk
Date:        2015-11-27 21:27:14+00:00
Summary:     Starting refactoring oct visitors into classes
Affected #:  4 files

diff -r 03a54b627189e63eaee9f2bc1d4a36c3ab6b9637 -r 3b6ac9a9b9d889a28071eaec31992859df0e129c yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -20,7 +20,7 @@
 cimport oct_visitors
 cimport selection_routines
 from .oct_visitors cimport \
-    OctVisitorData, oct_visitor_function, Oct, cind
+    OctVisitor, oct_visitor_function, Oct, cind
 from libc.stdlib cimport bsearch, qsort, realloc, malloc, free
 from libc.math cimport floor
 
@@ -80,7 +80,7 @@
     cdef void visit_all_octs(self,
                         selection_routines.SelectorObject selector,
                         oct_visitor_function *func,
-                        OctVisitorData *data,
+                        OctVisitor visitor,
                         int vc = ?)
     cdef Oct *next_root(self, int domain_id, int ind[3])
     cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)

diff -r 03a54b627189e63eaee9f2bc1d4a36c3ab6b9637 -r 3b6ac9a9b9d889a28071eaec31992859df0e129c yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -187,21 +187,6 @@
                 obj.partial_coverage)
         return obj
 
-    cdef void setup_data(self, OctVisitorData *data, int domain_id = -1):
-        cdef int i
-        data.index = 0
-        data.last = -1
-        data.global_index = -1
-        for i in range(3):
-            data.pos[i] = -1
-            data.ind[i] = -1
-        data.array = NULL
-        data.dims = 0
-        data.domain = domain_id
-        data.level = -1
-        data.oref = self.oref
-        data.nz = (1 << (data.oref*3))
-
     def __dealloc__(self):
         free_octs(self.cont)
         if self.root_mesh == NULL: return

diff -r 03a54b627189e63eaee9f2bc1d4a36c3ab6b9637 -r 3b6ac9a9b9d889a28071eaec31992859df0e129c yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -16,6 +16,8 @@
 
 cimport numpy as np
 
+cdef class OctreeContainer
+
 cdef struct Oct
 cdef struct Oct:
     np.int64_t file_ind     # index with respect to the order in which it was
@@ -30,50 +32,99 @@
     np.int64_t domain
     np.int64_t padding
 
-cdef struct OctVisitorData:
+cdef class OctVisitor:
     np.uint64_t index
     np.uint64_t last
     np.int64_t global_index
     np.int64_t pos[3]       # position in ints
     np.uint8_t ind[3]              # cell position
-    void *array
     int dims
     np.int32_t domain
     np.int8_t level
     np.int8_t oref # This is the level of overref.  1 => 8 zones, 2 => 64, etc.
                    # To calculate nzones, 1 << (oref * 3)
     np.int32_t nz
+
+    # There will also be overrides for the memoryviews associated with the
+    # specific instance.
+
+    cdef __init__(self, OctreeContainer octree)
                             
-ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
-                                   np.uint8_t selected)
+    cdef void visit(self, Oct*, np.uint8_t selected)
 
-cdef oct_visitor_function count_total_octs
-cdef oct_visitor_function count_total_cells
-cdef oct_visitor_function mark_octs
-cdef oct_visitor_function mask_octs
-cdef oct_visitor_function index_octs
-cdef oct_visitor_function icoords_octs
-cdef oct_visitor_function ires_octs
-cdef oct_visitor_function fcoords_octs
-cdef oct_visitor_function fwidth_octs
-cdef oct_visitor_function copy_array_f64
-cdef oct_visitor_function copy_array_i64
-cdef oct_visitor_function identify_octs
-cdef oct_visitor_function assign_domain_ind
-cdef oct_visitor_function fill_file_indices_oind
-cdef oct_visitor_function fill_file_indices_rind
-cdef oct_visitor_function count_by_domain
-cdef oct_visitor_function store_octree
-cdef oct_visitor_function load_octree
+    cdef inline int oind(self):
+        cdef int d = (1 << self.oref)
+        return (((self.ind[0]*d)+self.ind[1])*d+self.ind[2])
+
+    cdef inline int rind(self):
+        cdef int d = (1 << self.oref)
+        return (((self.ind[2]*d)+self.ind[1])*d+self.ind[0])
+
+cdef class CountTotalOcts(OctVisitor)
+
+cdef class CountTotalCells(OctVisitor)
+
+cdef class MarkOcts(OctVisitor):
+    # Unused
+    np.uint8_t[:,:,:,:] mark
+
+cdef class MaskOcts(OctVisitor):
+    np.uint8_t[:,:,:,:] mask
+
+cdef class IndexOcts(OctVisitor):
+    np.int64_t[:] oct_index
+
+cdef class ICoordsOcts(OctVisitor):
+    np.int64_t[:,3] icoords
+
+cdef class IResOcts(OctVisitor):
+    np.int64_t[:,3] ires
+
+cdef class FCoordsOcts(OctVisitor):
+    np.float64_t[:,3] fcoords
+
+cdef class FWidthOcts(OctVisitor):
+    np.float64_t[:,3] fwidth
+
+cdef fused numpy_dt:
+    np.float32_t
+    np.float64_t
+    np.int32_t
+    np.int64_t
+
+cdef class CopyArray[numpy_dt](OctVisitor):
+    numpy_dt[:,:] source
+    numpy_dt[:,:] dest
+
+cdef class IdentifyOcts(OctVisitor):
+    np.uint64_t[:] domain_mask
+
+cdef class AssignDomainInd(OctVisitor):
+    pass
+
+cdef class FillFileIndicesO(OctVisitor):
+    np.uint8_t[:] levels
+    np.uint8_t[:] file_inds
+    np.uint8_t[:] cell_inds
+
+cdef class FillFileIndicesR(OctVisitor):
+    np.uint8_t[:] levels
+    np.int64_t[:] file_inds
+    np.uint8_t[:] cell_inds
+
+cdef class CountByDomain(OctVisitor):
+    np.int64_t[:] domain_counts
+
+cdef class StoreOctree(OctVisitor):
+    np.uint8_t[:] ref_mask
+
+cdef class LoadOctree(OctVisitor):
+    np.uint8_t[:] ref_mask
+    Oct[:] octs
+    np.int64_t nocts
+    np.int64_t nfinest
 
 cdef inline int cind(int i, int j, int k):
     # THIS ONLY WORKS FOR CHILDREN.  It is not general for zones.
     return (((i*2)+j)*2+k)
 
-cdef inline int oind(OctVisitorData *data):
-    cdef int d = (1 << data.oref)
-    return (((data.ind[0]*d)+data.ind[1])*d+data.ind[2])
-
-cdef inline int rind(OctVisitorData *data):
-    cdef int d = (1 << data.oref)
-    return (((data.ind[2]*d)+data.ind[1])*d+data.ind[0])

diff -r 03a54b627189e63eaee9f2bc1d4a36c3ab6b9637 -r 3b6ac9a9b9d889a28071eaec31992859df0e129c yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -22,206 +22,197 @@
 
 # Now some visitor functions
 
-cdef void copy_array_f64(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # We should always have global_index less than our source.
-    # "last" here tells us the dimensionality of the array.
-    if selected == 0: return
-    cdef int i
-    # There are this many records between "octs"
-    cdef np.int64_t index = (data.global_index * data.nz)*data.dims
-    cdef np.float64_t **p = <np.float64_t**> data.array
-    index += oind(data)*data.dims
-    for i in range(data.dims):
-        p[1][data.index + i] = p[0][index + i]
-    data.index += data.dims
+cdef class OctVisitor
+    cdef __init__(self, OctreeContainer octree, int domain_id = -1):
+        cdef int i
+        self.index = 0
+        self.last = -1
+        self.global_index = -1
+        for i in range(3):
+            self.pos[i] = -1
+            self.ind[i] = -1
+        self.array = NULL
+        self.dims = 0
+        self.domain = domain_id
+        self.level = -1
+        self.oref = self.oref
+        self.nz = (1 << (self.oref*3))
 
-cdef void copy_array_i64(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # We should always have global_index less than our source.
-    # "last" here tells us the dimensionality of the array.
-    if selected == 0: return
-    cdef int i
-    cdef np.int64_t index = (data.global_index * data.nz)*data.dims
-    cdef np.int64_t **p = <np.int64_t**> data.array
-    index += oind(data)*data.dims
-    for i in range(data.dims):
-        p[1][data.index + i] = p[0][index + i]
-    data.index += data.dims
+    cdef void visit(self, Oct* o, np.uint8_t, selected):
+        raise NotImplementedError
 
-cdef void count_total_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # Count even if not selected.
-    # Number of *octs* visited.
-    if data.last != o.domain_ind:
-        data.index += 1
-        data.last = o.domain_ind
+cdef class CopyArray[numpy_dt](OctVisitor):
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # We should always have global_index less than our source.
+        # "last" here tells us the dimensionality of the array.
+        if selected == 0: return
+        cdef int i
+        # There are this many records between "octs"
+        cdef np.int64_t index = (self.global_index * self.nz)*self.dims
+        # We may want to change the way this is structured to be N,2,2,2,dim
+        index += oind(data)*self.dims
+        for i in range(self.dims):
+            self.dest[self.index, i] = self.source[index, i]
+        self.index += self.dims
 
-cdef void count_total_cells(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # Number of *cells* visited and selected.
-    data.index += selected
+cdef class CountTotalOcts(OctVisitor):
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # Count even if not selected.
+        # Number of *octs* visited.
+        if self.last != o.domain_ind:
+            self.index += 1
+            self.last = o.domain_ind
 
-cdef void mark_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # We mark them even if they are not selected
-    cdef int i
-    cdef np.uint8_t *arr = <np.uint8_t *> data.array
-    if data.last != o.domain_ind:
-        data.last = o.domain_ind
-        data.index += 1
-    cdef np.int64_t index = data.index * data.nz
-    index += oind(data)
-    arr[index] = 1
+cdef class CountTotalCells(OctVisitor):
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # Number of *cells* visited and selected.
+        self.index += selected
 
-cdef void mask_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    if selected == 0: return
-    cdef int i
-    cdef np.uint8_t *arr = <np.uint8_t *> data.array
-    cdef np.int64_t index = data.global_index * data.nz
-    index += oind(data)
-    arr[index] = 1
+cdef class MarkOcts(OctVisitor):
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # We mark them even if they are not selected
+        if self.last != o.domain_ind:
+            self.last = o.domain_ind
+            self.index += 1
+        self.mark[self.index, self.ind[0], self.ind[1], self.ind[2]] = 1
 
-cdef void index_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # Note that we provide an index even if the cell is not selected.
-    cdef int i
-    cdef np.int64_t *arr
-    if data.last != o.domain_ind:
-        data.last = o.domain_ind
-        arr = <np.int64_t *> data.array
-        arr[o.domain_ind] = data.index
-        data.index += 1
+cdef class MaskOcts(OctVisitor):
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        if selected == 0: return
+        self.mask[self.global_index, self.ind[0], self.ind[1], self.ind[2]] = 1
 
-cdef void icoords_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    if selected == 0: return
-    cdef np.int64_t *coords = <np.int64_t*> data.array
-    cdef int i
-    for i in range(3):
-        coords[data.index * 3 + i] = (data.pos[i] << data.oref) + data.ind[i]
-    data.index += 1
+cdef class IndexOcts(OctVisitor):
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # Note that we provide an index even if the cell is not selected.
+        cdef np.int64_t *arr
+        if self.last != o.domain_ind:
+            self.last = o.domain_ind
+            self.oct_index[o.domain_ind] = self.index
+            self.index += 1
 
-cdef void ires_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    if selected == 0: return
-    cdef np.int64_t *ires = <np.int64_t*> data.array
-    ires[data.index] = data.level
-    data.index += 1
+cdef class ICoordsOcts(OctVisitor):
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        if selected == 0: return
+        cdef int i
+        for i in range(3):
+            self.coords[self.index,i] = (self.pos[i] << self.oref) + self.ind[i]
+        self.index += 1
 
- at cython.cdivision(True)
-cdef void fcoords_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # Note that this does not actually give the correct floating point
-    # coordinates.  It gives them in some unit system where the domain is 1.0
-    # in all directions, and assumes that they will be scaled later.
-    if selected == 0: return
-    cdef np.float64_t *fcoords = <np.float64_t*> data.array
-    cdef int i
-    cdef np.float64_t c, dx
-    dx = 1.0 / ((1 << data.oref) << data.level)
-    for i in range(3):
-        c = <np.float64_t> ((data.pos[i] << data.oref ) + data.ind[i])
-        fcoords[data.index * 3 + i] = (c + 0.5) * dx
-    data.index += 1
+cdef class IResOcts(OctVisitor):
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        if selected == 0: return
+        self.ires[self.index] = self.level
+        self.index += 1
 
- at cython.cdivision(True)
-cdef void fwidth_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # Note that this does not actually give the correct floating point
-    # coordinates.  It gives them in some unit system where the domain is 1.0
-    # in all directions, and assumes that they will be scaled later.
-    if selected == 0: return
-    cdef np.float64_t *fwidth = <np.float64_t*> data.array
-    cdef int i
-    cdef np.float64_t dx
-    dx = 1.0 / ((1 << data.oref) << data.level)
-    for i in range(3):
-        fwidth[data.index * 3 + i] = dx
-    data.index += 1
+cdef class FCoordsOcts(OctVisitor):
+    @cython.cdivision(True)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # Note that this does not actually give the correct floating point
+        # coordinates.  It gives them in some unit system where the domain is 1.0
+        # in all directions, and assumes that they will be scaled later.
+        if selected == 0: return
+        cdef int i
+        cdef np.float64_t c, dx
+        dx = 1.0 / ((1 << self.oref) << self.level)
+        for i in range(3):
+            c = <np.float64_t> ((self.pos[i] << self.oref ) + self.ind[i])
+            self.fcoords[self.index,i] = (c + 0.5) * dx
+        self.index += 1
 
-cdef void identify_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # We assume that our domain has *already* been selected by, which means
-    # we'll get all cells within the domain for a by-domain selector and all
-    # cells within the domain *and* selector for the selector itself.
-    if selected == 0: return
-    cdef np.uint8_t *arr = <np.uint8_t *> data.array
-    arr[o.domain - 1] = 1
+cdef class FCoordsOcts(OctVisitor):
+    @cython.cdivision(True)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # Note that this does not actually give the correct floating point
+        # coordinates.  It gives them in some unit system where the domain is 1.0
+        # in all directions, and assumes that they will be scaled later.
+        if selected == 0: return
+        cdef int i
+        cdef np.float64_t dx
+        dx = 1.0 / ((1 << self.oref) << self.level)
+        for i in range(3):
+            self.fwidth[self.index,i] = dx
+        self.index += 1
 
-cdef void assign_domain_ind(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    o.domain_ind = data.global_index
-    data.index += 1
+cdef class IdentifyOcts(OctVisitor):
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # We assume that our domain has *already* been selected by, which means
+        # we'll get all cells within the domain for a by-domain selector and all
+        # cells within the domain *and* selector for the selector itself.
+        if selected == 0: return
+        self.domain_mask[o.domain - 1] = 1
 
-cdef void fill_file_indices_oind(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # We fill these arrays, then inside the level filler we use these as
-    # indices as we fill a second array from the data.
-    if selected == 0: return
-    cdef void **p = <void **> data.array
-    cdef np.uint8_t *level_arr = <np.uint8_t *> p[0]
-    cdef np.int64_t *find_arr = <np.int64_t *> p[1]
-    cdef np.uint8_t *cell_arr = <np.uint8_t *> p[2]
-    level_arr[data.index] = data.level
-    find_arr[data.index] = o.file_ind
-    cell_arr[data.index] = oind(data)
-    data.index +=1
+cdef class AssignDomainInd(OctVisitor):
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        o.domain_ind = self.global_index
+        self.index += 1
 
-cdef void fill_file_indices_rind(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # We fill these arrays, then inside the level filler we use these as
-    # indices as we fill a second array from the data.
-    if selected == 0: return
-    cdef void **p = <void **> data.array
-    cdef np.uint8_t *level_arr = <np.uint8_t *> p[0]
-    cdef np.int64_t *find_arr = <np.int64_t *> p[1]
-    cdef np.uint8_t *cell_arr = <np.uint8_t *> p[2]
-    level_arr[data.index] = data.level
-    find_arr[data.index] = o.file_ind
-    cell_arr[data.index] = rind(data)
-    data.index +=1
+cdef class FillFileIndicesO(OctVisitor):
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # We fill these arrays, then inside the level filler we use these as
+        # indices as we fill a second array from the self.
+        if selected == 0: return
+        self.levels[self.index] = self.level
+        self.file_inds[self.index] = o.file_ind
+        self.cell_inds[self.index] = oind(data)
+        self.index +=1
 
-cdef void count_by_domain(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    cdef np.int64_t *arr
-    if selected == 0: return
-    # NOTE: We do this for every *cell*.
-    arr = <np.int64_t *> data.array
-    arr[o.domain - 1] += 1
+cdef class FillFileIndicesR(OctVisitor):
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # We fill these arrays, then inside the level filler we use these as
+        # indices as we fill a second array from the self.
+        if selected == 0: return
+        self.levels[self.index] = self.level
+        self.file_inds[self.index] = o.file_ind
+        self.cell_inds[self.index] = rind(data)
+        self.index +=1
 
-cdef void store_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    cdef np.uint8_t res, ii
-    cdef np.uint8_t *arr
-    cdef np.uint8_t *always_descend
-    ii = cind(data.ind[0], data.ind[1], data.ind[2])
-    cdef void **p = <void **> data.array
-    arr = <np.uint8_t *> p[0]
-    if o.children == NULL:
-        # Not refined.
-        res = 0
-    else:
-        res = 1
-    arr[data.index] = res
-    data.index += 1
+cdef class CountByDomain(OctVisitor):
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        if selected == 0: return
+        # NOTE: We do this for every *cell*.
+        self.domain_counts[o.domain - 1] += 1
 
-cdef void load_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    cdef void **p = <void **> data.array
-    cdef np.uint8_t *arr = <np.uint8_t *> p[0]
-    cdef Oct* octs = <Oct*> p[1]
-    cdef np.int64_t *nocts = <np.int64_t*> p[2]
-    cdef np.int64_t *nfinest = <np.int64_t*> p[3]
-    cdef int i, ii
-    ii = cind(data.ind[0], data.ind[1], data.ind[2])
-    if arr[data.index] == 0:
-        # We only want to do this once.  Otherwise we end up with way too many
-        # nfinest for our tastes.
-        if o.file_ind == -1:
-            o.children = NULL
-            o.file_ind = nfinest[0]
-            o.domain = 1
-            nfinest[0] += 1
-    elif arr[data.index] > 0:
-        if arr[data.index] != 1 and arr[data.index] != 8:
-            print "ARRAY CLUE: ", arr[data.index], "UNKNOWN"
+cdef class StoreOctree(OctVisitor):
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        cdef np.uint8_t res, ii
+        ii = cind(self.ind[0], self.ind[1], self.ind[2])
+        if o.children == NULL:
+            # Not refined.
+            res = 0
+        else:
+            res = 1
+        self.ref_mask[self.index] = res
+        self.index += 1
+
+cdef class LoadOctree(OctVisitor):
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        cdef np.int64_t *nfinest = <np.int64_t*> p[3]
+        cdef int i, ii
+        ii = cind(self.ind[0], self.ind[1], self.ind[2])
+        if self.ref_mask[self.index] == 0:
+            # We only want to do this once.  Otherwise we end up with way too many
+            # nfinest for our tastes.
+            if o.file_ind == -1:
+                o.children = NULL
+                o.file_ind = self.nfinest
+                o.domain = 1
+                self.nfinest += 1
+        elif self.ref_mask[self.index] > 0:
+            if self.ref_mask[self.index] != 1 and self.ref_mask[self.index] != 8:
+                print "ARRAY CLUE: ", self.ref_mask[self.index], "UNKNOWN"
+                raise RuntimeError
+            if o.children == NULL:
+                o.children = <Oct **> malloc(sizeof(Oct *) * 8)
+                for i in range(8):
+                    o.children[i] = NULL
+            for i in range(8):
+                o.children[ii + i] = &self.octs[self.nocts]
+                o.children[ii + i].domain_ind = self.nocts
+                o.children[ii + i].file_ind = -1
+                o.children[ii + i].domain = -1
+                o.children[ii + i].children = NULL
+                self.nocts += 1
+        else:
+            print "SOMETHING IS AMISS", self.index
             raise RuntimeError
-        if o.children == NULL:
-            o.children = <Oct **> malloc(sizeof(Oct *) * 8)
-            for i in range(8):
-                o.children[i] = NULL
-        for i in range(8):
-            o.children[ii + i] = &octs[nocts[0]]
-            o.children[ii + i].domain_ind = nocts[0]
-            o.children[ii + i].file_ind = -1
-            o.children[ii + i].domain = -1
-            o.children[ii + i].children = NULL
-            nocts[0] += 1
-    else:
-        print "SOMETHING IS AMISS", data.index
-        raise RuntimeError
-    data.index += 1
+        self.index += 1


https://bitbucket.org/yt_analysis/yt/commits/da665a962b9c/
Changeset:   da665a962b9c
Branch:      yt
User:        MatthewTurk
Date:        2015-12-02 17:10:50+00:00
Summary:     Merging from upstream
Affected #:  18 files

diff -r 3b6ac9a9b9d889a28071eaec31992859df0e129c -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -11,8 +11,8 @@
 with the path length of the ray through the cell.  Line profiles are 
 generated using a voigt profile based on the temperature field.  The lines 
 are then shifted according to the redshift recorded by the light ray tool 
-and (optionally) the line of sight peculiar velocity.  Inclusion of the 
-peculiar velocity requires setting ``get_los_velocity`` to True in the call to 
+and (optionally) the peculiar velocity of gas along the ray.  Inclusion of the 
+peculiar velocity requires setting ``use_peculiar_velocity`` to True in the call to 
 :meth:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`.
 
 The spectrum generator will output a file containing the wavelength and 

diff -r 3b6ac9a9b9d889a28071eaec31992859df0e129c -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 doc/source/analyzing/analysis_modules/light_ray_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
@@ -79,7 +79,7 @@
 
   lr.make_light_ray(seed=8675309,
                     fields=['temperature', 'density'],
-                    get_los_velocity=True)
+                    use_peculiar_velocity=True)
 
 The keyword arguments are:
 
@@ -107,8 +107,10 @@
 * ``data_filename`` (*string*): Path to output file for ray data.  
   Default: None.
 
-* ``get_los_velocity`` (*bool*): If True, the line of sight velocity is 
-  calculated for each point in the ray.  Default: True.
+* ``use_peculiar_velocity`` (*bool*): If True, the doppler redshift from
+  the peculiar velocity of gas along the ray is calculated and added to the
+  cosmological redshift as the "effective" redshift.
+  Default: True.
 
 * ``redshift`` (*float*): Used with light rays made from single datasets to 
   specify a starting redshift for the ray.  If not used, the starting 

diff -r 3b6ac9a9b9d889a28071eaec31992859df0e129c -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 doc/source/cookbook/fit_spectrum.py
--- a/doc/source/cookbook/fit_spectrum.py
+++ b/doc/source/cookbook/fit_spectrum.py
@@ -71,7 +71,6 @@
                   solution_filename='lightraysolution.txt',
                   data_filename='lightray.h5',
                   fields=fields, setup_function=setup_ds,
-                  get_los_velocity=True,
                   njobs=-1)
 
 # Create an AbsorptionSpectrum object extending from

diff -r 3b6ac9a9b9d889a28071eaec31992859df0e129c -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 doc/source/cookbook/light_ray.py
--- a/doc/source/cookbook/light_ray.py
+++ b/doc/source/cookbook/light_ray.py
@@ -20,7 +20,6 @@
                   solution_filename='LR/lightraysolution.txt',
                   data_filename='LR/lightray.h5',
                   fields=['temperature', 'density'],
-                  get_los_velocity=True,
                   njobs=-1)
 
 # Optionally, we can now overplot the part of this ray that intersects 

diff -r 3b6ac9a9b9d889a28071eaec31992859df0e129c -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 doc/source/cookbook/single_dataset_light_ray.py
--- a/doc/source/cookbook/single_dataset_light_ray.py
+++ b/doc/source/cookbook/single_dataset_light_ray.py
@@ -13,8 +13,7 @@
                   end_position=[1., 1., 1.],
                   solution_filename='lightraysolution.txt',
                   data_filename='lightray.h5',
-                  fields=['temperature', 'density'],
-                  get_los_velocity=True)
+                  fields=['temperature', 'density'])
 
 # Optionally, we can now overplot this ray on a projection of the source 
 # dataset

diff -r 3b6ac9a9b9d889a28071eaec31992859df0e129c -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -139,7 +139,9 @@
            is recommended to set to None in such circumstances.
            Default: None
         use_peculiar_velocity : optional, bool
-           if True, include line of sight velocity for shifting lines.
+           if True, include peculiar velocity for calculating doppler redshift
+           to shift lines.  Requires similar flag to be set in LightRay 
+           generation.
            Default: True
         subgrid_resolution : optional, int
            When a line is being added that is unresolved (ie its thermal

diff -r 3b6ac9a9b9d889a28071eaec31992859df0e129c -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -50,7 +50,6 @@
 
     lr.make_light_ray(seed=1234567,
                       fields=['temperature', 'density', 'H_number_density'],
-                      get_los_velocity=True,
                       data_filename='lightray.h5')
 
     sp = AbsorptionSpectrum(900.0, 1800.0, 10000)

diff -r 3b6ac9a9b9d889a28071eaec31992859df0e129c -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -258,13 +258,13 @@
                        trajectory=None,
                        fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
-                       get_los_velocity=True, redshift=None,
-                       njobs=-1):
+                       get_los_velocity=None, use_peculiar_velocity=True, 
+                       redshift=None, njobs=-1):
         """
         make_light_ray(seed=None, start_position=None, end_position=None,
                        trajectory=None, fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
-                       get_los_velocity=True, redshift=None,
+                       use_peculiar_velocity=True, redshift=None,
                        njobs=-1)
 
         Create a light ray and get field values for each lixel.  A light
@@ -305,9 +305,10 @@
         data_filename : optional, string
             Path to output file for ray data.
             Default: None.
-        get_los_velocity : optional, bool
-            If True, the line of sight velocity is calculated for
-            each point in the ray.
+        use_peculiar_velocity : optional, bool
+            If True, the peculiar velocity along the ray will be sampled for
+            calculating the effective redshift combining the cosmological
+            redshift and the doppler redshift.
             Default: True.
         redshift : optional, float
             Used with light rays made from single datasets to specify a
@@ -335,7 +336,7 @@
         ...                       solution_filename="solution.txt",
         ...                       data_filename="my_ray.h5",
         ...                       fields=["temperature", "density"],
-        ...                       get_los_velocity=True)
+        ...                       use_peculiar_velocity=True)
 
         Make a light ray from a single dataset:
 
@@ -349,9 +350,12 @@
         ...                       solution_filename="solution.txt",
         ...                       data_filename="my_ray.h5",
         ...                       fields=["temperature", "density"],
-        ...                       get_los_velocity=True)
+        ...                       use_peculiar_velocity=True)
 
         """
+        if get_los_velocity is not None:
+            use_peculiar_velocity = get_los_velocity
+            mylog.warn("'get_los_velocity' kwarg is deprecated. Use 'use_peculiar_velocity' instead.")
 
         # Calculate solution.
         self._calculate_light_ray_solution(seed=seed,
@@ -368,9 +372,10 @@
         all_fields.extend(['dl', 'dredshift', 'redshift'])
         all_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
         data_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
-        if get_los_velocity:
-            all_fields.extend(['velocity_x', 'velocity_y',
-                               'velocity_z', 'velocity_los', 'redshift_eff'])
+        if use_peculiar_velocity:
+            all_fields.extend(['velocity_x', 'velocity_y', 'velocity_z', 
+                               'velocity_los', 'redshift_eff', 
+                               'redshift_dopp'])
             data_fields.extend(['velocity_x', 'velocity_y', 'velocity_z'])
 
         all_ray_storage = {}
@@ -444,16 +449,43 @@
                 for field in data_fields:
                     sub_data[field].extend(sub_ray[field][asort])
 
-                if get_los_velocity:
-                    line_of_sight = sub_segment[1] - sub_segment[0]
+                if use_peculiar_velocity:
+                    line_of_sight = sub_segment[0] - sub_segment[1]
                     line_of_sight /= ((line_of_sight**2).sum())**0.5
                     sub_vel = ds.arr([sub_ray['velocity_x'],
                                       sub_ray['velocity_y'],
                                       sub_ray['velocity_z']])
-                    # line of sight velocity is reversed relative to ray
-                    sub_data['velocity_los'].extend(-1*(np.rollaxis(sub_vel, 1) *
-                                                     line_of_sight).sum(axis=1)[asort])
-                    del sub_vel
+                    # Line of sight velocity = vel_los
+                    sub_vel_los = (np.rollaxis(sub_vel, 1) * \
+                                   line_of_sight).sum(axis=1)
+                    sub_data['velocity_los'].extend(sub_vel_los[asort])
+
+                    # doppler redshift:
+                    # See https://en.wikipedia.org/wiki/Redshift and 
+                    # Peebles eqns: 5.48, 5.49
+
+                    # 1 + redshift_dopp = (1 + v*cos(theta)/c) / 
+                    # sqrt(1 - v**2/c**2)
+
+                    # where v is the peculiar velocity (ie physical velocity
+                    # without the hubble flow, but no hubble flow in sim, so
+                    # just the physical velocity).
+
+                    # the bulk of the doppler redshift is from line of sight 
+                    # motion, but there is a small amount from time dilation 
+                    # of transverse motion, hence the inclusion of theta (the 
+                    # angle between line of sight and the velocity). 
+                    # theta is the angle between the ray vector (i.e. line of 
+                    # sight) and the velocity vectors: a dot b = ab cos(theta)
+
+                    sub_vel_mag = sub_ray['velocity_magnitude']
+                    cos_theta = np.dot(line_of_sight, sub_vel) / sub_vel_mag
+                    redshift_dopp = \
+                        (1 + sub_vel_mag * cos_theta / speed_of_light_cgs) / \
+                         np.sqrt(1 - sub_vel_mag**2 / speed_of_light_cgs**2) - 1
+                    sub_data['redshift_dopp'].extend(redshift_dopp[asort])
+                    del sub_vel, sub_vel_los, sub_vel_mag, cos_theta, \
+                        redshift_dopp
 
                 sub_ray.clear_data()
                 del sub_ray, asort
@@ -461,34 +493,25 @@
             for key in sub_data:
                 sub_data[key] = ds.arr(sub_data[key]).in_cgs()
 
-            # Get redshift for each lixel.  Assume linear relation between l and z.
+            # Get redshift for each lixel.  Assume linear relation between l 
+            # and z.
             sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
                 (sub_data['dl'] / vector_length(my_segment['start'],
                                                 my_segment['end']).in_cgs())
             sub_data['redshift'] = my_segment['redshift'] - \
               sub_data['dredshift'].cumsum() + sub_data['dredshift']
 
-            # When velocity_los is present, add effective redshift 
-            # (redshift_eff) field by combining cosmological redshift and 
+            # When using the peculiar velocity, create effective redshift 
+            # (redshift_eff) field combining cosmological redshift and 
             # doppler redshift.
             
-            # first convert los velocities to comoving frame (ie mult. by (1+z)), 
-            # then calculate doppler redshift:
-            # 1 + redshift_dopp = sqrt((1+v/c) / (1-v/c))
+            # then to add cosmological redshift and doppler redshifts, follow
+            # eqn 3.75 in Peacock's Cosmological Physics:
+            # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler)
 
-            # then to add cosmological redshift and doppler redshift, follow
-            # eqn 3.75 in Peacock's Cosmological Physics:
-            # 1 + z_obs = (1 + z_cosmo) * (1 + z_doppler)
-            # Alternatively, see eqn 5.49 in Peebles for a similar result.
-            if get_los_velocity:
-
-                velocity_los_cm = (1 + sub_data['redshift']) * \
-                                  sub_data['velocity_los']
-                redshift_dopp = ((1 + velocity_los_cm / speed_of_light_cgs) /
-                                (1 - velocity_los_cm / speed_of_light_cgs))**(0.5) - 1
-                sub_data['redshift_eff'] = ((1 + redshift_dopp) * \
-                                           (1 + sub_data['redshift'])) - 1
-                del velocity_los_cm, redshift_dopp
+            if use_peculiar_velocity:
+               sub_data['redshift_eff'] = ((1 + sub_data['redshift_dopp']) * \
+                                            (1 + sub_data['redshift'])) - 1
 
             # Remove empty lixels.
             sub_dl_nonzero = sub_data['dl'].nonzero()

diff -r 3b6ac9a9b9d889a28071eaec31992859df0e129c -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -127,7 +127,7 @@
     def onaxis_image_func(filename_prefix):
         szprj.write_png(filename_prefix)
     for test in [GenericArrayTest(ds, onaxis_array_func),
-                 GenericImageTest(ds, onaxis_image_func, 3)]:
+                 GenericImageTest(ds, onaxis_image_func, 12)]:
         test_M7_onaxis.__name__ = test.description
         yield test
 
@@ -142,6 +142,6 @@
     def offaxis_image_func(filename_prefix):
         szprj.write_png(filename_prefix)
     for test in [GenericArrayTest(ds, offaxis_array_func),
-                 GenericImageTest(ds, offaxis_image_func, 3)]:
+                 GenericImageTest(ds, offaxis_image_func, 12)]:
         test_M7_offaxis.__name__ = test.description
         yield test

diff -r 3b6ac9a9b9d889a28071eaec31992859df0e129c -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -712,7 +712,27 @@
         raise NotImplementedError
 
     def ptp(self, field):
-        raise NotImplementedError
+        r"""Compute the range of values (maximum - minimum) of a field.
+
+        This will, in a parallel-aware fashion, compute the "peak-to-peak" of
+        the given field.
+
+        Parameters
+        ----------
+        field : string or tuple of strings
+            The field to average.
+
+        Returns
+        -------
+        Scalar
+
+        Examples
+        --------
+
+        >>> rho_range = reg.ptp("density")
+        """
+        ex = self._compute_extrema(field)
+        return ex[1] - ex[0]
 
     def hist(self, field, weight = None, bins = None):
         raise NotImplementedError

diff -r 3b6ac9a9b9d889a28071eaec31992859df0e129c -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 yt/data_objects/tests/test_numpy_ops.py
--- a/yt/data_objects/tests/test_numpy_ops.py
+++ b/yt/data_objects/tests/test_numpy_ops.py
@@ -73,6 +73,9 @@
         q = ad.max("density").v
         yield assert_equal, q, ad["density"].max()
 
+        ptp = ad.ptp("density").v
+        yield assert_equal, ptp, ad["density"].max() - ad["density"].min()
+
         p = ad.max("density", axis=1)
         p1 = ds.proj("density", 1, data_source=ad, method="mip")
         yield assert_equal, p["density"], p1["density"]

diff -r 3b6ac9a9b9d889a28071eaec31992859df0e129c -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -88,8 +88,6 @@
         self.display_field = display_field
         self.particle_type = particle_type
         self.vector_field = vector_field
-        if output_units is None: output_units = units
-        self.output_units = output_units
 
         self._function = function
 
@@ -112,6 +110,9 @@
             raise FieldUnitsError("Cannot handle units '%s' (type %s)." \
                                   "Please provide a string or Unit " \
                                   "object." % (units, type(units)) )
+        if output_units is None:
+            output_units = self.units
+        self.output_units = output_units
 
     def _copy_def(self):
         dd = {}

diff -r 3b6ac9a9b9d889a28071eaec31992859df0e129c -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -125,17 +125,19 @@
         if None in (self.domain_left_edge, self.domain_right_edge):
             R0 = self.parameters['R0']
             if 'offset_center' in self.parameters and self.parameters['offset_center']:
-                self.domain_left_edge = np.array([0, 0, 0])
+                self.domain_left_edge = np.array([0, 0, 0], dtype=np.float64)
                 self.domain_right_edge = np.array([
-                 2.0 * self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+                    2.0 * self.parameters.get("R%s" % ax, R0) for ax in 'xyz'],
+                    dtype=np.float64)
             else:
                 self.domain_left_edge = np.array([
-                    -self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+                    -self.parameters.get("R%s" % ax, R0) for ax in 'xyz'],
+                    dtype=np.float64)
                 self.domain_right_edge = np.array([
-                    +self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+                    +self.parameters.get("R%s" % ax, R0) for ax in 'xyz'],
+                    dtype=np.float64)
             self.domain_left_edge *= self.parameters.get("a", 1.0)
             self.domain_right_edge *= self.parameters.get("a", 1.0)
-
         nz = 1 << self.over_refine_factor
         self.domain_dimensions = np.ones(3, "int32") * nz
         if "do_periodic" in self.parameters and self.parameters["do_periodic"]:

diff -r 3b6ac9a9b9d889a28071eaec31992859df0e129c -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -51,6 +51,8 @@
 
 import matplotlib.image as mpimg
 import yt.visualization.plot_window as pw
+import yt.visualization.particle_plots as particle_plots
+import yt.visualization.profile_plotter as profile_plotter
 
 mylog = logging.getLogger('nose.plugins.answer-testing')
 run_big_data = False
@@ -349,11 +351,12 @@
 
     def create_plot(self, ds, plot_type, plot_field, plot_axis, plot_kwargs = None):
         # plot_type should be a string
-        # plot_args should be a tuple
         # plot_kwargs should be a dict
         if plot_type is None:
             raise RuntimeError('Must explicitly request a plot type')
-        cls = getattr(pw, plot_type)
+        cls = getattr(pw, plot_type, None)
+        if cls is None:
+            cls = getattr(particle_plots, plot_type)
         plot = cls(*(ds, plot_axis, plot_field), **plot_kwargs)
         return plot
 
@@ -740,6 +743,50 @@
     def compare(self, new_result, old_result):
         compare_image_lists(new_result, old_result, self.decimals)
 
+class PhasePlotAttributeTest(AnswerTestingTest):
+    _type_name = "PhasePlotAttribute"
+    _attrs = ('plot_type', 'x_field', 'y_field', 'z_field',
+              'attr_name', 'attr_args')
+    def __init__(self, ds_fn, x_field, y_field, z_field, 
+                 attr_name, attr_args, decimals, plot_type='PhasePlot'):
+        super(PhasePlotAttributeTest, self).__init__(ds_fn)
+        self.data_source = self.ds.all_data()
+        self.plot_type = plot_type
+        self.x_field = x_field
+        self.y_field = y_field
+        self.z_field = z_field
+        self.plot_kwargs = {}
+        self.attr_name = attr_name
+        self.attr_args = attr_args
+        self.decimals = decimals
+
+    def create_plot(self, data_source, x_field, y_field, z_field, 
+                    plot_type, plot_kwargs=None):
+        # plot_type should be a string
+        # plot_kwargs should be a dict
+        if plot_type is None:
+            raise RuntimeError('Must explicitly request a plot type')
+        cls = getattr(profile_plotter, plot_type, None)
+        if cls is None:
+            cls = getattr(particle_plots, plot_type)
+        plot = cls(*(data_source, x_field, y_field, z_field), **plot_kwargs)
+        return plot
+
+    def run(self):
+        plot = self.create_plot(self.data_source, self.x_field, self.y_field,
+                                self.z_field, self.plot_type, self.plot_kwargs)
+        attr = getattr(plot, self.attr_name)
+        attr(*self.attr_args[0], **self.attr_args[1])
+        tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+        os.close(tmpfd)
+        plot.save(name=tmpname)
+        image = mpimg.imread(tmpname)
+        os.remove(tmpname)
+        return [zlib.compress(image.dumps())]
+
+    def compare(self, new_result, old_result):
+        compare_image_lists(new_result, old_result, self.decimals)
+
 class GenericArrayTest(AnswerTestingTest):
     _type_name = "GenericArray"
     _attrs = ('array_func_name','args','kwargs')

diff -r 3b6ac9a9b9d889a28071eaec31992859df0e129c -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 yt/visualization/tests/test_particle_plot.py
--- a/yt/visualization/tests/test_particle_plot.py
+++ b/yt/visualization/tests/test_particle_plot.py
@@ -20,9 +20,14 @@
 from yt.data_objects.profiles import create_profile
 from yt.extern.parameterized import parameterized, param
 from yt.visualization.tests.test_plotwindow import \
-    assert_fname, WIDTH_SPECS
+    assert_fname, WIDTH_SPECS, ATTR_ARGS
 from yt.testing import \
     fake_particle_ds, assert_array_almost_equal
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    data_dir_load, \
+    PlotWindowAttributeTest, \
+    PhasePlotAttributeTest
 from yt.visualization.api import \
     ParticleProjectionPlot, ParticlePhasePlot
 from yt.units.yt_array import YTArray
@@ -33,6 +38,22 @@
     from yt.config import ytcfg
     ytcfg["yt", "__withintesting"] = "True"
 
+#  override some of the plotwindow ATTR_ARGS
+PROJ_ATTR_ARGS = ATTR_ARGS.copy() 
+PROJ_ATTR_ARGS["set_cmap"] = [(('particle_mass', 'RdBu'), {}), 
+                                  (('particle_mass', 'kamae'), {})]
+PROJ_ATTR_ARGS["set_log"] = [(('particle_mass', False), {})]
+PROJ_ATTR_ARGS["set_zlim"] = [(('particle_mass', 1e-25, 1e-23), {}),
+                                  (('particle_mass', 1e-25, None), 
+                                   {'dynamic_range': 4})]
+
+PHASE_ATTR_ARGS = {"annotate_text": [(((5e-29, 5e7), "Hello YT"), {}), 
+                               (((5e-29, 5e7), "Hello YT"), {'color':'b'})],
+                   "set_title": [(('particle_mass', 'A phase plot.'), {})],
+                   "set_log": [(('particle_mass', False), {})],
+                   "set_unit": [(('particle_mass', 'Msun'), {})],
+                   "set_xlim": [((-4e7, 4e7), {})],
+                   "set_ylim": [((-4e7, 4e7), {})]}
 
 TEST_FLNMS = [None, 'test', 'test.png', 'test.eps',
               'test.ps', 'test.pdf']
@@ -59,6 +80,58 @@
                  ['particle_mass', 'particle_ones'])]
 
 
+g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+
+ at requires_ds(g30, big_data=True)
+def test_particle_projection_answers():
+    '''
+
+    This iterates over the all the plot modification functions in 
+    PROJ_ATTR_ARGS. Each time, it compares the images produced by 
+    ParticleProjectionPlot to the gold standard.
+    
+
+    '''
+
+    plot_field = 'particle_mass'
+    decimals = 12
+    ds = data_dir_load(g30)
+    for ax in 'xyz':
+        for attr_name in PROJ_ATTR_ARGS.keys():
+            for args in PROJ_ATTR_ARGS[attr_name]:
+                test = PlotWindowAttributeTest(ds, plot_field, ax, 
+                                               attr_name,
+                                               args, decimals, 
+                                               'ParticleProjectionPlot')
+                test_particle_projection_answers.__name__ = test.description
+                yield test
+
+
+ at requires_ds(g30, big_data=True)
+def test_particle_phase_answers():
+    '''
+
+    This iterates over the all the plot modification functions in 
+    PHASE_ATTR_ARGS. Each time, it compares the images produced by 
+    ParticlePhasePlot to the gold standard.
+
+    '''
+
+    decimals = 12
+    ds = data_dir_load(g30)
+
+    x_field = 'particle_velocity_x'
+    y_field = 'particle_velocity_y'
+    z_field = 'particle_mass'
+    for attr_name in PHASE_ATTR_ARGS.keys():
+        for args in PHASE_ATTR_ARGS[attr_name]:
+            test = PhasePlotAttributeTest(ds, x_field, y_field, z_field,
+                                          attr_name, args, decimals,
+                                          'ParticlePhasePlot')
+                
+            test_particle_phase_answers.__name__ = test.description
+            yield test
+
 class TestParticlePhasePlotSave(unittest.TestCase):
 
     @classmethod

diff -r 3b6ac9a9b9d889a28071eaec31992859df0e129c -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -182,7 +182,7 @@
 def test_attributes():
     """Test plot member functions that aren't callbacks"""
     plot_field = 'density'
-    decimals = 3
+    decimals = 12
 
     ds = data_dir_load(M7)
     for ax in 'xyz':
@@ -200,7 +200,7 @@
 @requires_ds(WT)
 def test_attributes_wt():
     plot_field = 'density'
-    decimals = 3
+    decimals = 12
 
     ds = data_dir_load(WT)
     ax = 'z'

diff -r 3b6ac9a9b9d889a28071eaec31992859df0e129c -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 yt/visualization/tests/test_profile_plots.py
--- a/yt/visualization/tests/test_profile_plots.py
+++ b/yt/visualization/tests/test_profile_plots.py
@@ -26,6 +26,46 @@
     ProfilePlot, PhasePlot
 from yt.visualization.tests.test_plotwindow import \
     assert_fname, TEST_FLNMS
+from yt.utilities.answer_testing.framework import \
+    PhasePlotAttributeTest, \
+    requires_ds, \
+    data_dir_load
+
+ATTR_ARGS = {"annotate_text": [(((5e-29, 5e7), "Hello YT"), {}), 
+                               (((5e-29, 5e7), "Hello YT"), {'color':'b'})],
+             
+             "set_title": [(('cell_mass', 'A phase plot.'), {})],
+             "set_log": [(('cell_mass', False), {})],
+             "set_unit": [(('cell_mass', 'Msun'), {})],
+             "set_xlim": [((1e-27, 1e-24), {})],
+             "set_ylim": [((1e2, 1e6), {})]}
+
+
+g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+
+ at requires_ds(g30, big_data=True)
+def test_phase_plot_attributes():
+    '''
+
+    This iterates over the all the plot modification functions in 
+    ATTR_ARGS. Each time, it compares the images produced by 
+    PhasePlot to the gold standard.
+    
+
+    '''
+
+    x_field = 'density'
+    y_field = 'temperature'
+    z_field = 'cell_mass'
+    decimals = 12
+    ds = data_dir_load(g30)
+    for ax in 'xyz':
+        for attr_name in ATTR_ARGS.keys():
+            for args in ATTR_ARGS[attr_name]:
+                test = PhasePlotAttributeTest(ds, x_field, y_field, z_field, 
+                                               attr_name, args, decimals)
+                test_phase_plot_attributes.__name__ = test.description
+                yield test
 
 class TestProfilePlotSave(unittest.TestCase):
 

diff -r 3b6ac9a9b9d889a28071eaec31992859df0e129c -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 yt/visualization/volume_rendering/tests/test_vr_orientation.py
--- a/yt/visualization/volume_rendering/tests/test_vr_orientation.py
+++ b/yt/visualization/volume_rendering/tests/test_vr_orientation.py
@@ -107,7 +107,7 @@
 
     n_frames = 5
     theta = np.pi / n_frames
-    decimals = 3
+    decimals = 12
 
     for lens_type in ['plane-parallel', 'perspective']:
         frame = 0


https://bitbucket.org/yt_analysis/yt/commits/6ee902fd7f04/
Changeset:   6ee902fd7f04
Branch:      yt
User:        MatthewTurk
Date:        2015-12-09 19:00:20+00:00
Summary:     Partially converted to oct visitor functions
Affected #:  8 files

diff -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 -r 6ee902fd7f0428c63251c386f32395265145d762 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -8,9 +8,7 @@
 from yt.utilities.lib.fp_utils cimport imax
 from yt.geometry.oct_container cimport \
     SparseOctreeContainer
-from yt.geometry.oct_visitors cimport \
-    OctVisitorData, oct_visitor_function, Oct, \
-    fill_file_indices_oind, fill_file_indices_rind
+from yt.geometry.oct_visitors cimport Oct
 from yt.geometry.particle_deposit cimport \
     ParticleDepositOperation
 from libc.stdint cimport int32_t, int64_t

diff -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 -r 6ee902fd7f0428c63251c386f32395265145d762 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -19,8 +19,7 @@
 from fp_utils cimport *
 cimport oct_visitors
 cimport selection_routines
-from .oct_visitors cimport \
-    OctVisitor, oct_visitor_function, Oct, cind
+from .oct_visitors cimport OctVisitor, Oct, cind
 from libc.stdlib cimport bsearch, qsort, realloc, malloc, free
 from libc.math cimport floor
 
@@ -59,7 +58,6 @@
     cdef OctAllocationContainer *cont
     cdef OctAllocationContainer **domains
     cdef Oct ****root_mesh
-    cdef oct_visitor_function *fill_func
     cdef int partial_coverage
     cdef int level_offset
     cdef int nn[3]
@@ -79,13 +77,12 @@
     cdef np.int64_t get_domain_offset(self, int domain_id)
     cdef void visit_all_octs(self,
                         selection_routines.SelectorObject selector,
-                        oct_visitor_function *func,
                         OctVisitor visitor,
                         int vc = ?)
     cdef Oct *next_root(self, int domain_id, int ind[3])
     cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
-    cdef void setup_data(self, OctVisitorData *data, int domain_id = ?)
     cdef void append_domain(self, np.int64_t domain_count)
+    cdef public object fill_style
 
 cdef class SparseOctreeContainer(OctreeContainer):
     cdef OctKey *root_nodes

diff -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 -r 6ee902fd7f0428c63251c386f32395265145d762 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -99,7 +99,7 @@
             self.DLE[i] = domain_left_edge[i] #0
             self.DRE[i] = domain_right_edge[i] #num_grid
         self._initialize_root_mesh()
-        self.fill_func = oct_visitors.fill_file_indices_oind
+        self.fill_style = "o"
 
     def _initialize_root_mesh(self):
         self.root_mesh = <Oct****> malloc(sizeof(void*) * self.nn[0])
@@ -132,16 +132,16 @@
                 partial_coverage = header['partial_coverage'])
         # NOTE: We do not allow domain/file indices to be specified.
         cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
-        cdef OctVisitorData data
-        obj.setup_data(&data, -1)
+        cdef oct_visitors.LoadOctree visitor
+        visitor = oct_visitors.LoadOctree(obj, -1)
         cdef int i, j, k, n
-        data.global_index = -1
-        data.level = 0
-        data.oref = 0
-        data.nz = 1
-        assert(ref_mask.shape[0] / float(data.nz) ==
-            <int>(ref_mask.shape[0]/float(data.nz)))
-        obj.allocate_domains([ref_mask.shape[0] / data.nz])
+        visitor.global_index = -1
+        visitor.level = 0
+        visitor.oref = 0
+        visitor.nz = 1
+        assert(ref_mask.shape[0] / float(visitor.nz) ==
+            <int>(ref_mask.shape[0]/float(visitor.nz)))
+        obj.allocate_domains([ref_mask.shape[0] / visitor.nz])
         cdef np.float64_t pos[3]
         cdef np.float64_t dds[3]
         # This dds is the oct-width
@@ -150,13 +150,11 @@
         # Pos is the center of the octs
         cdef OctAllocationContainer *cur = obj.domains[0]
         cdef Oct *o
-        cdef void *p[4]
         cdef np.int64_t nfinest = 0
-        p[0] = ref_mask.data
-        p[1] = <void *> cur.my_octs
-        p[2] = <void *> &cur.n_assigned
-        p[3] = <void *> &nfinest
-        data.array = p
+        visitor.ref_mask = ref_mask
+        visitor.octs = cur.my_octs
+        visitor.nocts = &cur.n_assigned
+        visitor.nfinest = &nfinest
         pos[0] = obj.DLE[0] + dds[0]/2.0
         for i in range(obj.nn[0]):
             pos[1] = obj.DLE[1] + dds[1]/2.0
@@ -170,19 +168,18 @@
                     o.domain = 1
                     obj.root_mesh[i][j][k] = o
                     cur.n_assigned += 1
-                    data.pos[0] = i
-                    data.pos[1] = j
-                    data.pos[2] = k
+                    visitor.pos[0] = i
+                    visitor.pos[1] = j
+                    visitor.pos[2] = k
                     # Always visit covered
                     selector.recursively_visit_octs(
                         obj.root_mesh[i][j][k],
-                        pos, dds, 0, oct_visitors.load_octree,
-                        &data, 1)
+                        pos, dds, 0, oct_visitors.load_octree, 1)
                     pos[2] += dds[2]
                 pos[1] += dds[1]
             pos[0] += dds[0]
         obj.nocts = cur.n_assigned
-        if obj.nocts * data.nz != ref_mask.size:
+        if obj.nocts * visitor.nz != ref_mask.size:
             raise KeyError(ref_mask.size, obj.nocts, obj.oref,
                 obj.partial_coverage)
         return obj
@@ -214,14 +211,12 @@
 
     @cython.cdivision(True)
     cdef void visit_all_octs(self, SelectorObject selector,
-                        oct_visitor_function *func,
-                        OctVisitorData *data,
-                        int vc = -1):
+                        OctVisitor visitor, int vc = -1):
         cdef int i, j, k, n
         if vc == -1:
             vc = self.partial_coverage
-        data.global_index = -1
-        data.level = 0
+        visitor.global_index = -1
+        visitor.level = 0
         cdef np.float64_t pos[3]
         cdef np.float64_t dds[3]
         # This dds is the oct-width
@@ -236,12 +231,12 @@
                 for k in range(self.nn[2]):
                     if self.root_mesh[i][j][k] == NULL:
                         raise RuntimeError
-                    data.pos[0] = i
-                    data.pos[1] = j
-                    data.pos[2] = k
+                    visitor.pos[0] = i
+                    visitor.pos[1] = j
+                    visitor.pos[2] = k
                     selector.recursively_visit_octs(
                         self.root_mesh[i][j][k],
-                        pos, dds, 0, func, data, vc)
+                        pos, dds, 0, visitor, vc)
                     pos[2] += dds[2]
                 pos[1] += dds[1]
             pos[0] += dds[0]
@@ -325,10 +320,10 @@
     def domain_identify(self, SelectorObject selector):
         cdef np.ndarray[np.uint8_t, ndim=1] domain_mask
         domain_mask = np.zeros(self.num_domains, dtype="uint8")
-        cdef OctVisitorData data
-        self.setup_data(&data)
-        data.array = domain_mask.data
-        self.visit_all_octs(selector, oct_visitors.identify_octs, &data)
+        cdef oct_visitors.IdentifyOcts visitor
+        visitor = oct_visitors.IdentifyOcts(self)
+        visitor.domain_mask = domain_mask
+        self.visit_all_octs(selector, visitor)
         cdef int i
         domain_ids = []
         for i in range(self.num_domains):
@@ -426,13 +421,13 @@
              int domain_id = -1):
         if num_cells == -1:
             num_cells = selector.count_octs(self, domain_id)
-        cdef np.ndarray[np.uint8_t, ndim=1] coords
-        cdef OctVisitorData data
-        self.setup_data(&data, domain_id)
-        coords = np.zeros((num_cells*data.nz), dtype="uint8")
-        data.array = <void *> coords.data
-        self.visit_all_octs(selector, oct_visitors.mask_octs, &data)
-        return coords.astype("bool")
+        cdef np.ndarray[np.uint8_t, ndim=1] mask
+        cdef oct_visitors.MaskOcts visitor
+        visitor = oct_visitors.MaskOcts(self, domain_id)
+        mask = np.zeros((num_cells*visitor.nz), dtype="uint8")
+        visitor.mask = mask
+        self.visit_all_octs(selector, visitor)
+        return mask.astype("bool")
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -441,12 +436,12 @@
                 int domain_id = -1):
         if num_cells == -1:
             num_cells = selector.count_oct_cells(self, domain_id)
-        cdef OctVisitorData data
-        self.setup_data(&data, domain_id)
+        cdef oct_visitors.ICoordsOcts visitor
+        visitor = oct_visitors.ICoordsOcts(self, domain_id)
         cdef np.ndarray[np.int64_t, ndim=2] coords
         coords = np.empty((num_cells, 3), dtype="int64")
-        data.array = <void *> coords.data
-        self.visit_all_octs(selector, oct_visitors.icoords_octs, &data)
+        visitor.icoords = coords
+        self.visit_all_octs(selector, visitor)
         return coords
 
     @cython.boundscheck(False)
@@ -457,13 +452,13 @@
         cdef int i
         if num_cells == -1:
             num_cells = selector.count_oct_cells(self, domain_id)
-        cdef OctVisitorData data
-        self.setup_data(&data, domain_id)
+        cdef oct_visitors.IResOcts visitor
+        visitor = oct_visitors.IResOcts(self, domain_id)
         #Return the 'resolution' of each cell; ie the level
         cdef np.ndarray[np.int64_t, ndim=1] res
         res = np.empty(num_cells, dtype="int64")
-        data.array = <void *> res.data
-        self.visit_all_octs(selector, oct_visitors.ires_octs, &data)
+        visitor.ires = res
+        self.visit_all_octs(selector, visitor)
         if self.level_offset > 0:
             for i in range(num_cells):
                 res[i] += self.level_offset
@@ -476,12 +471,12 @@
                 int domain_id = -1):
         if num_cells == -1:
             num_cells = selector.count_oct_cells(self, domain_id)
-        cdef OctVisitorData data
-        self.setup_data(&data, domain_id)
+        cdef oct_visitors.FWidthOcts visitor
+        visitor = oct_visitors.FWidthOcts(self, domain_id)
         cdef np.ndarray[np.float64_t, ndim=2] fwidth
         fwidth = np.empty((num_cells, 3), dtype="float64")
-        data.array = <void *> fwidth.data
-        self.visit_all_octs(selector, oct_visitors.fwidth_octs, &data)
+        visitor.fwidth = fwidth
+        self.visit_all_octs(selector, visitor)
         cdef np.float64_t base_dx
         for i in range(3):
             base_dx = (self.DRE[i] - self.DLE[i])/self.nn[i]
@@ -495,13 +490,13 @@
                 int domain_id = -1):
         if num_cells == -1:
             num_cells = selector.count_oct_cells(self, domain_id)
-        cdef OctVisitorData data
-        self.setup_data(&data, domain_id)
+        cdef oct_visitors.FCoordsOcts visitor
+        visitor = oct_visitors.FCoordsOcts(self, domain_id)
         #Return the floating point unitary position of every cell
         cdef np.ndarray[np.float64_t, ndim=2] coords
         coords = np.empty((num_cells, 3), dtype="float64")
-        data.array = <void *> coords.data
-        self.visit_all_octs(selector, oct_visitors.fcoords_octs, &data)
+        visitor.fcoords = coords
+        self.visit_all_octs(selector, visitor)
         cdef int i
         cdef np.float64_t base_dx
         for i in range(3):
@@ -519,17 +514,15 @@
                       partial_coverage = self.partial_coverage)
         cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
         # domain_id = -1 here, because we want *every* oct
-        cdef OctVisitorData data
-        self.setup_data(&data, -1)
-        data.oref = 0
-        data.nz = 1
+        cdef oct_visitors.StoreOctree visitor
+        visitor = oct_visitors.StoreOctree(self, -1)
+        visitor.oref = 0
+        visitor.nz = 1
         cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
-        ref_mask = np.zeros(self.nocts * data.nz, dtype="uint8") - 1
-        cdef void *p[1]
-        p[0] = ref_mask.data
-        data.array = p
+        ref_mask = np.zeros(self.nocts * visitor.nz, dtype="uint8") - 1
+        visitor.ref_mask = ref_mask
         # Enforce partial_coverage here
-        self.visit_all_octs(selector, oct_visitors.store_octree, &data, 1)
+        self.visit_all_octs(selector, visitor, 1)
         header['octree'] = ref_mask
         return header
 
@@ -552,48 +545,50 @@
                     order='C')
             else:
                 dest = np.zeros(num_cells, dtype=source.dtype, order='C')
-        cdef OctVisitorData data
-        self.setup_data(&data, domain_id)
-        data.index = offset
-        # We only need this so we can continue calculating the offset
-        data.dims = dims
-        cdef void *p[2]
-        p[0] = source.data
-        p[1] = dest.data
-        data.array = &p
-        cdef oct_visitor_function *func
+        cdef OctVisitor visitor
+        cdef oct_visitors.CopyArrayI64 visitor_i64
+        cdef oct_visitors.CopyArrayF64 visitor_f64
         if source.dtype != dest.dtype:
             raise RuntimeError
         if source.dtype == np.int64:
-            func = oct_visitors.copy_array_i64
+            visitor_i64 = oct_visitors.CopyArrayI64(self, domain_id)
+            visitor_i64.source = source
+            visitor_i64.dest = dest
+            visitor = visitor_i64
         elif source.dtype == np.float64:
-            func = oct_visitors.copy_array_f64
+            visitor_f64 = oct_visitors.CopyArrayF64(self, domain_id)
+            visitor_f64.source = source
+            visitor_f64.dest = dest
+            visitor = visitor_f64
         else:
             raise NotImplementedError
-        self.visit_all_octs(selector, func, &data)
-        if (data.global_index + 1) * data.nz * data.dims > source.size:
+        visitor.index = offset
+        # We only need this so we can continue calculating the offset
+        visitor.dims = dims
+        self.visit_all_octs(selector, visitor)
+        if (visitor.global_index + 1) * visitor.nz * visitor.dims > source.size:
             print "GLOBAL INDEX RAN AHEAD.",
-            print (data.global_index + 1) * data.nz * data.dims - source.size
+            print (visitor.global_index + 1) * visitor.nz * visitor.dims - source.size
             print dest.size, source.size, num_cells
             raise RuntimeError
-        if data.index > dest.size:
+        if visitor.index > dest.size:
             print "DEST INDEX RAN AHEAD.",
-            print data.index - dest.size
-            print (data.global_index + 1) * data.nz * data.dims, source.size
+            print visitor.index - dest.size
+            print (visitor.global_index + 1) * visitor.nz * visitor.dims, source.size
             print num_cells
             raise RuntimeError
         if num_cells >= 0:
             return dest
-        return data.index - offset
+        return visitor.index - offset
 
     def domain_ind(self, selector, int domain_id = -1):
         cdef np.ndarray[np.int64_t, ndim=1] ind
         # Here's where we grab the masked items.
         ind = np.zeros(self.nocts, 'int64') - 1
-        cdef OctVisitorData data
-        self.setup_data(&data, domain_id)
-        data.array = ind.data
-        self.visit_all_octs(selector, oct_visitors.index_octs, &data)
+        cdef oct_visitors.IndexOcts visitor
+        visitor = oct_visitors.IndexOcts(self, domain_id)
+        visitor.oct_index = ind
+        self.visit_all_octs(selector, visitor)
         return ind
 
     @cython.boundscheck(False)
@@ -725,14 +720,23 @@
             levels[i] = 100
             file_inds[i] = -1
             cell_inds[i] = 9
-        cdef OctVisitorData data
-        self.setup_data(&data, domain_id)
-        cdef void *p[3]
-        p[0] = levels.data
-        p[1] = file_inds.data
-        p[2] = cell_inds.data
-        data.array = p
-        self.visit_all_octs(selector, self.fill_func, &data)
+        cdef oct_visitors.FillFileIndicesO visitor_o
+        cdef oct_visitors.FillFileIndicesR visitor_r
+        if self.fill_style == "r":
+            visitor_r = oct_visitors.FillFileIndicesR(self, domain_id)
+            visitor_r.levels = levels
+            visitor_r.file_inds = file_inds
+            visitor_r.cell_inds = cell_inds
+            visitor = visitor_r
+        elif self.fill_style == "o":
+            visitor_o = oct_visitors.FillFileIndicesO(self, domain_id)
+            visitor_o.levels = levels
+            visitor_o.file_inds = file_inds
+            visitor_o.cell_inds = cell_inds
+            visitor = visitor_o
+        else:
+            raise RuntimeError
+        self.visit_all_octs(selector, visitor)
         return levels, cell_inds, file_inds
 
     def domain_count(self, SelectorObject selector):
@@ -740,10 +744,10 @@
         cdef np.int64_t i, num_octs
         cdef np.ndarray[np.int64_t, ndim=1] domain_counts
         domain_counts = np.zeros(self.num_domains, dtype="int64")
-        cdef OctVisitorData data
-        self.setup_data(&data, -1)
-        data.array = <void*> domain_counts.data
-        self.visit_all_octs(selector, oct_visitors.count_by_domain, &data)
+        cdef oct_visitors.CountByDomain visitor
+        visitor = oct_visitors.CountByDomain(self, -1)
+        visitor.domain_counts = domain_counts
+        self.visit_all_octs(selector, visitor)
         return domain_counts
 
     @cython.boundscheck(False)
@@ -771,10 +775,10 @@
 
     def finalize(self):
         cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
-        cdef OctVisitorData data
-        self.setup_data(&data, 1)
-        self.visit_all_octs(selector, oct_visitors.assign_domain_ind, &data)
-        assert ((data.global_index+1)*data.nz == data.index)
+        cdef oct_visitors.AssignDomainInd visitor
+        visitor = oct_visitors.AssignDomainInd(self, 1)
+        self.visit_all_octs(selector, visitor)
+        assert ((visitor.global_index+1)*visitor.nz == visitor.index)
 
 cdef int root_node_compare(void *a, void *b) nogil:
     cdef OctKey *ao
@@ -809,7 +813,7 @@
         for i in range(3):
             self.DLE[i] = domain_left_edge[i] #0
             self.DRE[i] = domain_right_edge[i] #num_grid
-        self.fill_func = oct_visitors.fill_file_indices_rind
+        self.fill_style = "r"
 
     @classmethod
     def load_octree(self, header):
@@ -854,13 +858,12 @@
 
     @cython.cdivision(True)
     cdef void visit_all_octs(self, SelectorObject selector,
-                        oct_visitor_function *func,
-                        OctVisitorData *data,
+                        OctVisitor visitor,
                         int vc = -1):
         cdef int i, j, k, n
         cdef np.int64_t key, ukey
-        data.global_index = -1
-        data.level = 0
+        visitor.global_index = -1
+        visitor.level = 0
         if vc == -1:
             vc = self.partial_coverage
         cdef np.float64_t pos[3]
@@ -873,11 +876,11 @@
         for i in range(self.num_root):
             o = self.root_nodes[i].node
             key = self.root_nodes[i].key
-            self.key_to_ipos(key, data.pos)
+            self.key_to_ipos(key, visitor.pos)
             for j in range(3):
-                pos[j] = self.DLE[j] + (data.pos[j] + 0.5) * dds[j]
+                pos[j] = self.DLE[j] + (visitor.pos[j] + 0.5) * dds[j]
             selector.recursively_visit_octs(
-                o, pos, dds, 0, func, data, vc)
+                o, pos, dds, 0, visitor, vc)
 
     cdef np.int64_t get_domain_offset(self, int domain_id):
         return 0 # We no longer have a domain offset.
@@ -931,7 +934,7 @@
         OctreeContainer.__init__(self, oct_domain_dimensions,
                 domain_left_edge, domain_right_edge, partial_coverage,
                  over_refine)
-        self.fill_func = oct_visitors.fill_file_indices_rind
+        self.fill_style = "r"
 
 cdef OctList *OctList_subneighbor_find(OctList *olist, Oct *top,
                                        int i, int j, int k):

diff -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 -r 6ee902fd7f0428c63251c386f32395265145d762 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -16,8 +16,6 @@
 
 cimport numpy as np
 
-cdef class OctreeContainer
-
 cdef struct Oct
 cdef struct Oct:
     np.int64_t file_ind     # index with respect to the order in which it was
@@ -33,23 +31,21 @@
     np.int64_t padding
 
 cdef class OctVisitor:
-    np.uint64_t index
-    np.uint64_t last
-    np.int64_t global_index
-    np.int64_t pos[3]       # position in ints
-    np.uint8_t ind[3]              # cell position
-    int dims
-    np.int32_t domain
-    np.int8_t level
-    np.int8_t oref # This is the level of overref.  1 => 8 zones, 2 => 64, etc.
-                   # To calculate nzones, 1 << (oref * 3)
-    np.int32_t nz
+    cdef np.uint64_t index
+    cdef np.uint64_t last
+    cdef np.int64_t global_index
+    cdef np.int64_t pos[3]       # position in ints
+    cdef np.uint8_t ind[3]              # cell position
+    cdef int dims
+    cdef np.int32_t domain
+    cdef np.int8_t level
+    cdef np.int8_t oref # This is the level of overref.  1 => 8 zones, 2 => 64, etc.
+                        # To calculate nzones, 1 << (oref * 3)
+    cdef np.int32_t nz
 
     # There will also be overrides for the memoryviews associated with the
     # specific instance.
 
-    cdef __init__(self, OctreeContainer octree)
-                            
     cdef void visit(self, Oct*, np.uint8_t selected)
 
     cdef inline int oind(self):
@@ -60,69 +56,69 @@
         cdef int d = (1 << self.oref)
         return (((self.ind[2]*d)+self.ind[1])*d+self.ind[0])
 
-cdef class CountTotalOcts(OctVisitor)
+cdef class CountTotalOcts(OctVisitor):
+    pass
 
-cdef class CountTotalCells(OctVisitor)
+cdef class CountTotalCells(OctVisitor):
+    pass
 
 cdef class MarkOcts(OctVisitor):
     # Unused
-    np.uint8_t[:,:,:,:] mark
+    cdef np.uint8_t[:,:,:,:] mark
 
 cdef class MaskOcts(OctVisitor):
-    np.uint8_t[:,:,:,:] mask
+    cdef np.uint8_t[:,:,:,:] mask
 
 cdef class IndexOcts(OctVisitor):
-    np.int64_t[:] oct_index
+    cdef np.int64_t[:] oct_index
 
 cdef class ICoordsOcts(OctVisitor):
-    np.int64_t[:,3] icoords
+    cdef np.int64_t[:,:] icoords
 
 cdef class IResOcts(OctVisitor):
-    np.int64_t[:,3] ires
+    cdef np.int64_t[:,:] ires
 
 cdef class FCoordsOcts(OctVisitor):
-    np.float64_t[:,3] fcoords
+    cdef np.float64_t[:,:] fcoords
 
 cdef class FWidthOcts(OctVisitor):
-    np.float64_t[:,3] fwidth
+    cdef np.float64_t[:,:] fwidth
 
-cdef fused numpy_dt:
-    np.float32_t
-    np.float64_t
-    np.int32_t
-    np.int64_t
+cdef class CopyArrayI64(OctVisitor):
+    cdef np.int64_t[:,:] source
+    cdef np.int64_t[:,:] dest
 
-cdef class CopyArray[numpy_dt](OctVisitor):
-    numpy_dt[:,:] source
-    numpy_dt[:,:] dest
+cdef class CopyArrayF64(OctVisitor):
+    cdef np.int64_t[:,:] source
+    cdef np.int64_t[:,:] dest
 
 cdef class IdentifyOcts(OctVisitor):
-    np.uint64_t[:] domain_mask
+    cdef np.uint64_t[:] domain_mask
 
 cdef class AssignDomainInd(OctVisitor):
     pass
 
 cdef class FillFileIndicesO(OctVisitor):
-    np.uint8_t[:] levels
-    np.uint8_t[:] file_inds
-    np.uint8_t[:] cell_inds
+    cdef np.uint8_t[:] levels
+    cdef np.uint8_t[:] file_inds
+    cdef np.uint8_t[:] cell_inds
 
 cdef class FillFileIndicesR(OctVisitor):
-    np.uint8_t[:] levels
-    np.int64_t[:] file_inds
-    np.uint8_t[:] cell_inds
+    cdef np.uint8_t[:] levels
+    cdef np.int64_t[:] file_inds
+    cdef np.uint8_t[:] cell_inds
 
 cdef class CountByDomain(OctVisitor):
-    np.int64_t[:] domain_counts
+    cdef np.int64_t[:] domain_counts
 
 cdef class StoreOctree(OctVisitor):
-    np.uint8_t[:] ref_mask
+    cdef np.uint8_t[:] ref_mask
 
 cdef class LoadOctree(OctVisitor):
-    np.uint8_t[:] ref_mask
-    Oct[:] octs
-    np.int64_t nocts
-    np.int64_t nfinest
+    cdef np.uint8_t[:] ref_mask
+    cdef Oct* octs
+    cdef np.int64_t *nocts
+    cdef np.int64_t *nfinest
 
 cdef inline int cind(int i, int j, int k):
     # THIS ONLY WORKS FOR CHILDREN.  It is not general for zones.

diff -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 -r 6ee902fd7f0428c63251c386f32395265145d762 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -19,11 +19,12 @@
 import numpy
 from fp_utils cimport *
 from libc.stdlib cimport malloc, free
+from yt.geometry.oct_container cimport OctreeContainer
 
 # Now some visitor functions
 
-cdef class OctVisitor
-    cdef __init__(self, OctreeContainer octree, int domain_id = -1):
+cdef class OctVisitor:
+    def __init__(self, OctreeContainer octree, int domain_id = -1):
         cdef int i
         self.index = 0
         self.last = -1
@@ -31,17 +32,16 @@
         for i in range(3):
             self.pos[i] = -1
             self.ind[i] = -1
-        self.array = NULL
         self.dims = 0
         self.domain = domain_id
         self.level = -1
         self.oref = self.oref
         self.nz = (1 << (self.oref*3))
 
-    cdef void visit(self, Oct* o, np.uint8_t, selected):
+    cdef void visit(self, Oct* o, np.uint8_t selected):
         raise NotImplementedError
 
-cdef class CopyArray[numpy_dt](OctVisitor):
+cdef class CopyArrayI64(OctVisitor):
     cdef void visit(self, Oct* o, np.uint8_t selected):
         # We should always have global_index less than our source.
         # "last" here tells us the dimensionality of the array.
@@ -50,7 +50,21 @@
         # There are this many records between "octs"
         cdef np.int64_t index = (self.global_index * self.nz)*self.dims
         # We may want to change the way this is structured to be N,2,2,2,dim
-        index += oind(data)*self.dims
+        index += self.oind()*self.dims
+        for i in range(self.dims):
+            self.dest[self.index, i] = self.source[index, i]
+        self.index += self.dims
+
+cdef class CopyArrayF64(OctVisitor):
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # We should always have global_index less than our source.
+        # "last" here tells us the dimensionality of the array.
+        if selected == 0: return
+        cdef int i
+        # There are this many records between "octs"
+        cdef np.int64_t index = (self.global_index * self.nz)*self.dims
+        # We may want to change the way this is structured to be N,2,2,2,dim
+        index += self.oind()*self.dims
         for i in range(self.dims):
             self.dest[self.index, i] = self.source[index, i]
         self.index += self.dims
@@ -119,7 +133,7 @@
             self.fcoords[self.index,i] = (c + 0.5) * dx
         self.index += 1
 
-cdef class FCoordsOcts(OctVisitor):
+cdef class FWidthOcts(OctVisitor):
     @cython.cdivision(True)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         # Note that this does not actually give the correct floating point
@@ -153,7 +167,7 @@
         if selected == 0: return
         self.levels[self.index] = self.level
         self.file_inds[self.index] = o.file_ind
-        self.cell_inds[self.index] = oind(data)
+        self.cell_inds[self.index] = self.oind()
         self.index +=1
 
 cdef class FillFileIndicesR(OctVisitor):
@@ -163,7 +177,7 @@
         if selected == 0: return
         self.levels[self.index] = self.level
         self.file_inds[self.index] = o.file_ind
-        self.cell_inds[self.index] = rind(data)
+        self.cell_inds[self.index] = self.rind()
         self.index +=1
 
 cdef class CountByDomain(OctVisitor):
@@ -186,7 +200,6 @@
 
 cdef class LoadOctree(OctVisitor):
     cdef void visit(self, Oct* o, np.uint8_t selected):
-        cdef np.int64_t *nfinest = <np.int64_t*> p[3]
         cdef int i, ii
         ii = cind(self.ind[0], self.ind[1], self.ind[2])
         if self.ref_mask[self.index] == 0:
@@ -194,9 +207,9 @@
             # nfinest for our tastes.
             if o.file_ind == -1:
                 o.children = NULL
-                o.file_ind = self.nfinest
+                o.file_ind = self.nfinest[0]
                 o.domain = 1
-                self.nfinest += 1
+                self.nfinest[0] += 1
         elif self.ref_mask[self.index] > 0:
             if self.ref_mask[self.index] != 1 and self.ref_mask[self.index] != 8:
                 print "ARRAY CLUE: ", self.ref_mask[self.index], "UNKNOWN"
@@ -206,12 +219,12 @@
                 for i in range(8):
                     o.children[i] = NULL
             for i in range(8):
-                o.children[ii + i] = &self.octs[self.nocts]
-                o.children[ii + i].domain_ind = self.nocts
+                o.children[ii + i] = &self.octs[self.nocts[0]]
+                o.children[ii + i].domain_ind = self.nocts[0]
                 o.children[ii + i].file_ind = -1
                 o.children[ii + i].domain = -1
                 o.children[ii + i].children = NULL
-                self.nocts += 1
+                self.nocts[0] += 1
         else:
             print "SOMETHING IS AMISS", self.index
             raise RuntimeError

diff -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 -r 6ee902fd7f0428c63251c386f32395265145d762 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -15,15 +15,13 @@
 #-----------------------------------------------------------------------------
 
 from oct_container cimport OctreeContainer, Oct, OctInfo, ORDER_MAX
-cimport oct_visitors
 from oct_visitors cimport cind
 from libc.stdlib cimport malloc, free, qsort
 from libc.math cimport floor
 from fp_utils cimport *
 cimport numpy as np
 import numpy as np
-from selection_routines cimport SelectorObject, \
-    OctVisitorData, oct_visitor_function
+from selection_routines cimport SelectorObject
 cimport cython
 
 cdef class ParticleOctreeContainer(OctreeContainer):

diff -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 -r 6ee902fd7f0428c63251c386f32395265145d762 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -15,8 +15,7 @@
 #-----------------------------------------------------------------------------
 
 cimport numpy as np
-from oct_visitors cimport Oct, OctVisitorData, \
-    oct_visitor_function
+from oct_visitors cimport Oct, OctVisitor
 from grid_visitors cimport GridTreeNode, GridVisitorData, \
     grid_visitor_function, check_child_masked
 
@@ -41,12 +40,11 @@
     cdef void recursively_visit_octs(self, Oct *root,
                         np.float64_t pos[3], np.float64_t dds[3],
                         int level,
-                        oct_visitor_function *func,
-                        OctVisitorData *data,
+                        OctVisitor visitor,
                         int visit_covered = ?)
-    cdef void visit_oct_cells(self, OctVisitorData *data, Oct *root, Oct *ch,
+    cdef void visit_oct_cells(self, Oct *root, Oct *ch,
                               np.float64_t spos[3], np.float64_t sdds[3],
-                              oct_visitor_function *func, int i, int j, int k)
+                              OctVisitor visitor, int i, int j, int k)
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
                                np.int32_t level, Oct *o = ?) nogil

diff -r da665a962b9c5b5d844c6619a8ce752ed32c02d5 -r 6ee902fd7f0428c63251c386f32395265145d762 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -163,16 +163,16 @@
         return gridi.astype("bool")
 
     def count_octs(self, OctreeContainer octree, int domain_id = -1):
-        cdef OctVisitorData data
-        octree.setup_data(&data, domain_id)
-        octree.visit_all_octs(self, oct_visitors.count_total_octs, &data)
-        return data.index
+        cdef oct_visitors.CountTotalOcts visitor
+        visitor = oct_visitors.CountTotalOcts(octree, domain_id)
+        octree.visit_all_octs(self, visitor)
+        return visitor.index
 
     def count_oct_cells(self, OctreeContainer octree, int domain_id = -1):
-        cdef OctVisitorData data
-        octree.setup_data(&data, domain_id)
-        octree.visit_all_octs(self, oct_visitors.count_total_cells, &data)
-        return data.index
+        cdef oct_visitors.CountTotalCells visitor
+        visitor = oct_visitors.CountTotalCells(octree, domain_id)
+        octree.visit_all_octs(self, visitor)
+        return visitor.index
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -180,8 +180,7 @@
     cdef void recursively_visit_octs(self, Oct *root,
                         np.float64_t pos[3], np.float64_t dds[3],
                         int level,
-                        oct_visitor_function *func,
-                        OctVisitorData *data,
+                        OctVisitor visitor,
                         int visit_covered = 0):
         # visit_covered tells us whether this octree supports partial
         # refinement.  If it does, we need to handle this specially -- first
@@ -203,7 +202,7 @@
             RE[i] = pos[i] + dds[i]/2.0
         #print LE[0], RE[0], LE[1], RE[1], LE[2], RE[2]
         res = self.select_grid(LE, RE, level, root)
-        if res == 1 and data.domain > 0 and root.domain != data.domain:
+        if res == 1 and visitor.domain > 0 and root.domain != visitor.domain:
             res = -1
         cdef int increment = 1
         cdef int next_level, this_level
@@ -243,52 +242,52 @@
                         if root.children != NULL:
                             ch = root.children[cind(i, j, k)]
                         if iter == 1 and next_level == 1 and ch != NULL:
-                            # Note that data.pos is always going to be the
+                            # Note that visitor.pos is always going to be the
                             # position of the Oct -- it is *not* always going
                             # to be the same as the position of the cell under
                             # investigation.
-                            data.pos[0] = (data.pos[0] << 1) + i
-                            data.pos[1] = (data.pos[1] << 1) + j
-                            data.pos[2] = (data.pos[2] << 1) + k
-                            data.level += 1
+                            visitor.pos[0] = (visitor.pos[0] << 1) + i
+                            visitor.pos[1] = (visitor.pos[1] << 1) + j
+                            visitor.pos[2] = (visitor.pos[2] << 1) + k
+                            visitor.level += 1
                             self.recursively_visit_octs(
-                                ch, spos, sdds, level + 1, func, data,
+                                ch, spos, sdds, level + 1, visitor,
                                 visit_covered)
-                            data.pos[0] = (data.pos[0] >> 1)
-                            data.pos[1] = (data.pos[1] >> 1)
-                            data.pos[2] = (data.pos[2] >> 1)
-                            data.level -= 1
-                        elif this_level == 1 and data.oref > 0:
-                            data.global_index += increment
+                            visitor.pos[0] = (visitor.pos[0] >> 1)
+                            visitor.pos[1] = (visitor.pos[1] >> 1)
+                            visitor.pos[2] = (visitor.pos[2] >> 1)
+                            visitor.level -= 1
+                        elif this_level == 1 and visitor.oref > 0:
+                            visitor.global_index += increment
                             increment = 0
-                            self.visit_oct_cells(data, root, ch, spos, sdds,
-                                                 func, i, j, k)
+                            self.visit_oct_cells(root, ch, spos, sdds,
+                                                 visitor, i, j, k)
                         elif this_level == 1 and increment == 1:
-                            data.global_index += increment
+                            visitor.global_index += increment
                             increment = 0
-                            data.ind[0] = data.ind[1] = data.ind[2] = 0
-                            func(root, data, 1)
+                            visitor.ind[0] = visitor.ind[1] = visitor.ind[2] = 0
+                            visitor.visit(root, 1)
                         spos[2] += sdds[2]
                     spos[1] += sdds[1]
                 spos[0] += sdds[0]
             this_level = 0 # We turn this off for the second pass.
             iter += 1
 
-    cdef void visit_oct_cells(self, OctVisitorData *data, Oct *root, Oct *ch,
+    cdef void visit_oct_cells(self, Oct *root, Oct *ch,
                               np.float64_t spos[3], np.float64_t sdds[3],
-                              oct_visitor_function *func, int i, int j, int k):
+                              OctVisitor visitor, int i, int j, int k):
         # We can short-circuit the whole process if data.oref == 1.
         # This saves us some funny-business.
         cdef int selected
-        if data.oref == 1:
+        if visitor.oref == 1:
             selected = self.select_cell(spos, sdds)
             if ch != NULL:
                 selected *= self.overlap_cells
-            # data.ind refers to the cell, not to the oct.
-            data.ind[0] = i
-            data.ind[1] = j
-            data.ind[2] = k
-            func(root, data, selected)
+            # visitor.ind refers to the cell, not to the oct.
+            visitor.ind[0] = i
+            visitor.ind[1] = j
+            visitor.ind[2] = k
+            visitor.visit(root, selected)
             return
         # Okay, now that we've got that out of the way, we have to do some
         # other checks here.  In this case, spos[] is the position of the
@@ -298,7 +297,7 @@
         cdef np.float64_t dds[3]
         cdef np.float64_t pos[3]
         cdef int ci, cj, ck
-        cdef int nr = (1 << (data.oref - 1))
+        cdef int nr = (1 << (visitor.oref - 1))
         for ci in range(3):
             dds[ci] = sdds[ci] / nr
         # Boot strap at the first index.
@@ -311,10 +310,10 @@
                     selected = self.select_cell(pos, dds)
                     if ch != NULL:
                         selected *= self.overlap_cells
-                    data.ind[0] = ci + i * nr
-                    data.ind[1] = cj + j * nr
-                    data.ind[2] = ck + k * nr
-                    func(root, data, selected)
+                    visitor.ind[0] = ci + i * nr
+                    visitor.ind[1] = cj + j * nr
+                    visitor.ind[2] = ck + k * nr
+                    visitor.visit(root, selected)
                     pos[2] += dds[2]
                 pos[1] += dds[1]
             pos[0] += dds[0]


https://bitbucket.org/yt_analysis/yt/commits/ef02c23e423e/
Changeset:   ef02c23e423e
Branch:      yt
User:        MatthewTurk
Date:        2015-12-09 22:02:07+00:00
Summary:     Optimize
Affected #:  2 files

diff -r 6ee902fd7f0428c63251c386f32395265145d762 -r ef02c23e423ec70131de8b6e30d26e591520b35e yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -76,7 +76,7 @@
     cdef np.int64_t[:,:] icoords
 
 cdef class IResOcts(OctVisitor):
-    cdef np.int64_t[:,:] ires
+    cdef np.int64_t[:] ires
 
 cdef class FCoordsOcts(OctVisitor):
     cdef np.float64_t[:,:] fcoords

diff -r 6ee902fd7f0428c63251c386f32395265145d762 -r ef02c23e423ec70131de8b6e30d26e591520b35e yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -42,6 +42,8 @@
         raise NotImplementedError
 
 cdef class CopyArrayI64(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         # We should always have global_index less than our source.
         # "last" here tells us the dimensionality of the array.
@@ -56,6 +58,8 @@
         self.index += self.dims
 
 cdef class CopyArrayF64(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         # We should always have global_index less than our source.
         # "last" here tells us the dimensionality of the array.
@@ -70,6 +74,8 @@
         self.index += self.dims
 
 cdef class CountTotalOcts(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         # Count even if not selected.
         # Number of *octs* visited.
@@ -78,11 +84,15 @@
             self.last = o.domain_ind
 
 cdef class CountTotalCells(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         # Number of *cells* visited and selected.
         self.index += selected
 
 cdef class MarkOcts(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         # We mark them even if they are not selected
         if self.last != o.domain_ind:
@@ -91,11 +101,15 @@
         self.mark[self.index, self.ind[0], self.ind[1], self.ind[2]] = 1
 
 cdef class MaskOcts(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         if selected == 0: return
         self.mask[self.global_index, self.ind[0], self.ind[1], self.ind[2]] = 1
 
 cdef class IndexOcts(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         # Note that we provide an index even if the cell is not selected.
         cdef np.int64_t *arr
@@ -105,14 +119,18 @@
             self.index += 1
 
 cdef class ICoordsOcts(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         if selected == 0: return
         cdef int i
         for i in range(3):
-            self.coords[self.index,i] = (self.pos[i] << self.oref) + self.ind[i]
+            self.icoords[self.index,i] = (self.pos[i] << self.oref) + self.ind[i]
         self.index += 1
 
 cdef class IResOcts(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         if selected == 0: return
         self.ires[self.index] = self.level
@@ -120,6 +138,8 @@
 
 cdef class FCoordsOcts(OctVisitor):
     @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         # Note that this does not actually give the correct floating point
         # coordinates.  It gives them in some unit system where the domain is 1.0
@@ -135,6 +155,8 @@
 
 cdef class FWidthOcts(OctVisitor):
     @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         # Note that this does not actually give the correct floating point
         # coordinates.  It gives them in some unit system where the domain is 1.0
@@ -148,6 +170,8 @@
         self.index += 1
 
 cdef class IdentifyOcts(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         # We assume that our domain has *already* been selected by, which means
         # we'll get all cells within the domain for a by-domain selector and all
@@ -156,11 +180,15 @@
         self.domain_mask[o.domain - 1] = 1
 
 cdef class AssignDomainInd(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         o.domain_ind = self.global_index
         self.index += 1
 
 cdef class FillFileIndicesO(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         # We fill these arrays, then inside the level filler we use these as
         # indices as we fill a second array from the self.
@@ -171,6 +199,8 @@
         self.index +=1
 
 cdef class FillFileIndicesR(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         # We fill these arrays, then inside the level filler we use these as
         # indices as we fill a second array from the self.
@@ -181,12 +211,16 @@
         self.index +=1
 
 cdef class CountByDomain(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         if selected == 0: return
         # NOTE: We do this for every *cell*.
         self.domain_counts[o.domain - 1] += 1
 
 cdef class StoreOctree(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         cdef np.uint8_t res, ii
         ii = cind(self.ind[0], self.ind[1], self.ind[2])
@@ -199,6 +233,8 @@
         self.index += 1
 
 cdef class LoadOctree(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         cdef int i, ii
         ii = cind(self.ind[0], self.ind[1], self.ind[2])


https://bitbucket.org/yt_analysis/yt/commits/81c7c33a1d65/
Changeset:   81c7c33a1d65
Branch:      yt
User:        MatthewTurk
Date:        2015-12-09 22:48:30+00:00
Summary:     Passes geometry tests.
Affected #:  2 files

diff -r ef02c23e423ec70131de8b6e30d26e591520b35e -r 81c7c33a1d650a36b9622c1f05a66b0d4e1ea739 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -174,7 +174,7 @@
                     # Always visit covered
                     selector.recursively_visit_octs(
                         obj.root_mesh[i][j][k],
-                        pos, dds, 0, oct_visitors.load_octree, 1)
+                        pos, dds, 0, visitor, 1)
                     pos[2] += dds[2]
                 pos[1] += dds[1]
             pos[0] += dds[0]

diff -r ef02c23e423ec70131de8b6e30d26e591520b35e -r 81c7c33a1d650a36b9622c1f05a66b0d4e1ea739 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -35,7 +35,7 @@
         self.dims = 0
         self.domain = domain_id
         self.level = -1
-        self.oref = self.oref
+        self.oref = octree.oref
         self.nz = (1 << (self.oref*3))
 
     cdef void visit(self, Oct* o, np.uint8_t selected):


https://bitbucket.org/yt_analysis/yt/commits/9cecd46bfaf6/
Changeset:   9cecd46bfaf6
Branch:      yt
User:        MatthewTurk
Date:        2015-12-09 22:50:54+00:00
Summary:     Masking should have different array shape.
Affected #:  1 file

diff -r 81c7c33a1d650a36b9622c1f05a66b0d4e1ea739 -r 9cecd46bfaf6691fb5904c483913d276ad9a5cc7 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -424,7 +424,8 @@
         cdef np.ndarray[np.uint8_t, ndim=1] mask
         cdef oct_visitors.MaskOcts visitor
         visitor = oct_visitors.MaskOcts(self, domain_id)
-        mask = np.zeros((num_cells*visitor.nz), dtype="uint8")
+        cdef int ns = 1 << self.oref
+        mask = np.zeros((num_cells, ns, ns, ns), dtype="uint8")
         visitor.mask = mask
         self.visit_all_octs(selector, visitor)
         return mask.astype("bool")


https://bitbucket.org/yt_analysis/yt/commits/298252ca425b/
Changeset:   298252ca425b
Branch:      yt
User:        MatthewTurk
Date:        2015-12-10 01:06:18+00:00
Summary:     This should fix all the non-CopyArray[IF]64 errors.
Affected #:  2 files

diff -r 9cecd46bfaf6691fb5904c483913d276ad9a5cc7 -r 298252ca425bc54c1983a18d4ab5843131ef7971 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -421,7 +421,7 @@
              int domain_id = -1):
         if num_cells == -1:
             num_cells = selector.count_octs(self, domain_id)
-        cdef np.ndarray[np.uint8_t, ndim=1] mask
+        cdef np.ndarray[np.uint8_t, ndim=4] mask
         cdef oct_visitors.MaskOcts visitor
         visitor = oct_visitors.MaskOcts(self, domain_id)
         cdef int ns = 1 << self.oref

diff -r 9cecd46bfaf6691fb5904c483913d276ad9a5cc7 -r 298252ca425bc54c1983a18d4ab5843131ef7971 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -93,7 +93,7 @@
     cdef np.int64_t[:,:] dest
 
 cdef class IdentifyOcts(OctVisitor):
-    cdef np.uint64_t[:] domain_mask
+    cdef np.uint8_t[:] domain_mask
 
 cdef class AssignDomainInd(OctVisitor):
     pass


https://bitbucket.org/yt_analysis/yt/commits/fd58e6084669/
Changeset:   fd58e6084669
Branch:      yt
User:        MatthewTurk
Date:        2015-12-10 20:00:34+00:00
Summary:     Failed attempt to fix CopyArray.
Affected #:  3 files

diff -r 298252ca425bc54c1983a18d4ab5843131ef7971 -r fd58e6084669f758ef82e61ff9131e393a359905 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -541,11 +541,11 @@
             # means we actually do want the number of Octs, not the number of
             # cells.
             num_cells = selector.count_oct_cells(self, domain_id)
-            if dims > 1:
-                dest = np.zeros((num_cells, dims), dtype=source.dtype,
-                    order='C')
-            else:
-                dest = np.zeros(num_cells, dtype=source.dtype, order='C')
+            dest = np.zeros((num_cells, dims), dtype=source.dtype,
+                            order='C')
+        dest = np.atleast_2d(dest)
+        if dims != 1:
+            raise RuntimeError
         cdef OctVisitor visitor
         cdef oct_visitors.CopyArrayI64 visitor_i64
         cdef oct_visitors.CopyArrayF64 visitor_f64

diff -r 298252ca425bc54c1983a18d4ab5843131ef7971 -r fd58e6084669f758ef82e61ff9131e393a359905 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -85,12 +85,12 @@
     cdef np.float64_t[:,:] fwidth
 
 cdef class CopyArrayI64(OctVisitor):
-    cdef np.int64_t[:,:] source
+    cdef np.int64_t[:,:,:,:,:] source
     cdef np.int64_t[:,:] dest
 
 cdef class CopyArrayF64(OctVisitor):
-    cdef np.int64_t[:,:] source
-    cdef np.int64_t[:,:] dest
+    cdef np.float64_t[:,:,:,:] source
+    cdef np.float64_t[:,:] dest
 
 cdef class IdentifyOcts(OctVisitor):
     cdef np.uint8_t[:] domain_mask

diff -r 298252ca425bc54c1983a18d4ab5843131ef7971 -r fd58e6084669f758ef82e61ff9131e393a359905 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -50,27 +50,23 @@
         if selected == 0: return
         cdef int i
         # There are this many records between "octs"
-        cdef np.int64_t index = (self.global_index * self.nz)*self.dims
-        # We may want to change the way this is structured to be N,2,2,2,dim
-        index += self.oind()*self.dims
-        for i in range(self.dims):
-            self.dest[self.index, i] = self.source[index, i]
+        self.dest[self.index] = self.source[
+                self.ind[0], self.ind[1], self.ind[2],
+                self.global_index]
         self.index += self.dims
 
 cdef class CopyArrayF64(OctVisitor):
-    @cython.boundscheck(False)
-    @cython.initializedcheck(False)
+    #@cython.boundscheck(False)
+    #@cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         # We should always have global_index less than our source.
         # "last" here tells us the dimensionality of the array.
         if selected == 0: return
         cdef int i
         # There are this many records between "octs"
-        cdef np.int64_t index = (self.global_index * self.nz)*self.dims
-        # We may want to change the way this is structured to be N,2,2,2,dim
-        index += self.oind()*self.dims
-        for i in range(self.dims):
-            self.dest[self.index, i] = self.source[index, i]
+        self.dest[self.index] = self.source[
+                self.ind[0], self.ind[1], self.ind[2],
+                self.global_index]
         self.index += self.dims
 
 cdef class CountTotalOcts(OctVisitor):


https://bitbucket.org/yt_analysis/yt/commits/57f8ed655563/
Changeset:   57f8ed655563
Branch:      yt
User:        MatthewTurk
Date:        2015-12-26 01:56:32+00:00
Summary:     Fix the copyarray routines for real this time.  Cross your fingers.
Affected #:  3 files

diff -r fd58e6084669f758ef82e61ff9131e393a359905 -r 57f8ed655563542e3ff3964f69b85e97479f2571 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -543,9 +543,14 @@
             num_cells = selector.count_oct_cells(self, domain_id)
             dest = np.zeros((num_cells, dims), dtype=source.dtype,
                             order='C')
-        dest = np.atleast_2d(dest)
         if dims != 1:
             raise RuntimeError
+        # Just make sure that we're in the right shape.  Ideally this will not
+        # duplicate memory.  Since we're in Cython, we want to avoid modifying
+        # the .shape attributes directly.
+        dest = dest.reshape((num_cells, 1))
+        source = source.reshape((source.shape[0], source.shape[1],
+                    source.shape[2], source.shape[3], dims))
         cdef OctVisitor visitor
         cdef oct_visitors.CopyArrayI64 visitor_i64
         cdef oct_visitors.CopyArrayF64 visitor_f64

diff -r fd58e6084669f758ef82e61ff9131e393a359905 -r 57f8ed655563542e3ff3964f69b85e97479f2571 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -85,11 +85,11 @@
     cdef np.float64_t[:,:] fwidth
 
 cdef class CopyArrayI64(OctVisitor):
-    cdef np.int64_t[:,:,:,:,:] source
+    cdef np.int64_t[:,:,:,:,:,:] source
     cdef np.int64_t[:,:] dest
 
 cdef class CopyArrayF64(OctVisitor):
-    cdef np.float64_t[:,:,:,:] source
+    cdef np.float64_t[:,:,:,:,:] source
     cdef np.float64_t[:,:] dest
 
 cdef class IdentifyOcts(OctVisitor):

diff -r fd58e6084669f758ef82e61ff9131e393a359905 -r 57f8ed655563542e3ff3964f69b85e97479f2571 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -48,12 +48,11 @@
         # We should always have global_index less than our source.
         # "last" here tells us the dimensionality of the array.
         if selected == 0: return
-        cdef int i
         # There are this many records between "octs"
-        self.dest[self.index] = self.source[
+        self.dest[self.index, :] = self.source[
                 self.ind[0], self.ind[1], self.ind[2],
-                self.global_index]
-        self.index += self.dims
+                self.global_index, :]
+        self.index += 1
 
 cdef class CopyArrayF64(OctVisitor):
     #@cython.boundscheck(False)
@@ -62,12 +61,11 @@
         # We should always have global_index less than our source.
         # "last" here tells us the dimensionality of the array.
         if selected == 0: return
-        cdef int i
         # There are this many records between "octs"
-        self.dest[self.index] = self.source[
+        self.dest[self.index, :] = self.source[
                 self.ind[0], self.ind[1], self.ind[2],
-                self.global_index]
-        self.index += self.dims
+                self.global_index, :]
+        self.index += 1
 
 cdef class CountTotalOcts(OctVisitor):
     @cython.boundscheck(False)


https://bitbucket.org/yt_analysis/yt/commits/7647e5c29854/
Changeset:   7647e5c29854
Branch:      yt
User:        MatthewTurk
Date:        2015-12-27 01:37:07+00:00
Summary:     Attempt to undo some of the pointer casting mess.
Affected #:  2 files

diff -r 57f8ed655563542e3ff3964f69b85e97479f2571 -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -90,10 +90,11 @@
         return self._num_zones + 2*self._num_ghost_zones
 
     def _reshape_vals(self, arr):
-        if len(arr.shape) == 4 and arr.flags["F_CONTIGUOUS"]:
-            return arr
         nz = self.nz
-        n_oct = arr.shape[0] / (nz**3.0)
+        if len(arr.shape) <= 2:
+            n_oct = arr.shape[0] / (nz**3.0)
+        else:
+            n_oct = max(arr.shape)
         if arr.size == nz*nz*nz*n_oct:
             new_shape = (nz, nz, nz, n_oct)
         elif arr.size == nz*nz*nz*n_oct * 3:
@@ -115,10 +116,9 @@
 
     def select_blocks(self, selector):
         mask = self.oct_handler.mask(selector, domain_id = self.domain_id)
-        mask = self._reshape_vals(mask)
         slicer = OctreeSubsetBlockSlice(self)
         for i, sl in slicer:
-            yield sl, mask[:,:,:,i]
+            yield sl, mask[i,...]
 
     def select_tcoords(self, dobj):
         # These will not be pre-allocated, which can be a problem for speed and

diff -r 57f8ed655563542e3ff3964f69b85e97479f2571 -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -106,7 +106,6 @@
     @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         # Note that we provide an index even if the cell is not selected.
-        cdef np.int64_t *arr
         if self.last != o.domain_ind:
             self.last = o.domain_ind
             self.oct_index[o.domain_ind] = self.index


https://bitbucket.org/yt_analysis/yt/commits/95e93d609ddd/
Changeset:   95e93d609ddd
Branch:      yt
User:        MatthewTurk
Date:        2015-12-27 15:24:02+00:00
Summary:     Merging with upstream, including smoothing memoryviews
Affected #:  132 files

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -22,4 +22,21 @@
 ngoldbau at ucsc.edu = goldbaum at ucolick.org
 biondo at wisc.edu = Biondo at wisc.edu
 samgeen at googlemail.com = samgeen at gmail.com
-fbogert = fbogert at ucsc.edu
\ No newline at end of file
+fbogert = fbogert at ucsc.edu
+bwoshea = oshea at msu.edu
+mornkr at slac.stanford.edu = me at jihoonkim.org
+kbarrow = kssbarrow at gatech.edu
+kssbarrow at gmail.com = kssbarrow at gatech.edu
+kassbarrow at gmail.com = kssbarrow at gatech.edu
+antoine.strugarek at cea.fr = strugarek at astro.umontreal.ca
+rosen at ucolick.org = alrosen at ucsc.edu
+jzuhone = jzuhone at gmail.com
+karraki at nmsu.edu = karraki at gmail.com
+hckr at eml.cc = astrohckr at gmail.com
+julian3 at illinois.edu = astrohckr at gmail.com
+cosmosquark = bthompson2090 at gmail.com
+chris.m.malone at lanl.gov = chris.m.malone at gmail.com
+jnaiman at ucolick.org = jnaiman
+migueld.deval = miguel at archlinux.net
+slevy at ncsa.illinois.edu = salevy at illinois.edu
+malzraa at gmail.com = kellerbw at mcmaster.ca
\ No newline at end of file

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -57,6 +57,11 @@
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
 yt/utilities/lib/write_array.c
+yt/utilities/lib/element_mappings.c
+yt/utilities/lib/mesh_construction.cpp
+yt/utilities/lib/mesh_samplers.cpp
+yt/utilities/lib/mesh_traversal.cpp
+yt/utilities/lib/mesh_intersection.cpp
 syntax: glob
 *.pyc
 .*.swp

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -4,20 +4,30 @@
                 Tom Abel (tabel at stanford.edu)
                 Gabriel Altay (gabriel.altay at gmail.com)
                 Kenza Arraki (karraki at gmail.com)
+                Kirk Barrow (kssbarrow at gatech.edu)
+                Ricarda Beckmann (Ricarda.Beckmann at astro.ox.ac.uk)
                 Elliott Biondo (biondo at wisc.edu)
                 Alex Bogert (fbogert at ucsc.edu)
+                André-Patrick Bubel (code at andre-bubel.de)
                 Pengfei Chen (madcpf at gmail.com)
                 David Collins (dcollins4096 at gmail.com)
                 Brian Crosby (crosby.bd at gmail.com)
                 Andrew Cunningham (ajcunn at gmail.com)
                 Miguel de Val-Borro (miguel.deval at gmail.com)
+                Bili Dong (qobilidop at gmail.com)
+                Nicholas Earl (nchlsearl at gmail.com)
                 Hilary Egan (hilaryye at gmail.com)
+                Daniel Fenn (df11c at my.fsu.edu)
                 John Forces (jforbes at ucolick.org)
+                Adam Ginsburg (keflavich at gmail.com)
                 Sam Geen (samgeen at gmail.com)
                 Nathan Goldbaum (goldbaum at ucolick.org)
+                William Gray (graywilliamj at gmail.com)
                 Markus Haider (markus.haider at uibk.ac.at)
                 Eric Hallman (hallman13 at gmail.com)
                 Cameron Hummels (chummels at gmail.com)
+                Anni Järvenpää (anni.jarvenpaa at gmail.com)
+                Allyson Julian (astrohckr at gmail.com)
                 Christian Karch (chiffre at posteo.de)
                 Ben W. Keller (kellerbw at mcmaster.ca)
                 Ji-hoon Kim (me at jihoonkim.org)
@@ -25,11 +35,15 @@
                 Kacper Kowalik (xarthisius.kk at gmail.com)
                 Mark Krumholz (mkrumhol at ucsc.edu)
                 Michael Kuhlen (mqk at astro.berkeley.edu)
+                Meagan Lang (langmm.astro at gmail.com)
+                Doris Lee (dorislee at berkeley.edu)
                 Eve Lee (elee at cita.utoronto.ca)
                 Sam Leitner (sam.leitner at gmail.com)
+                Stuart Levy (salevy at illinois.edu)
                 Yuan Li (yuan at astro.columbia.edu)
                 Chris Malone (chris.m.malone at gmail.com)
                 Josh Maloney (joshua.moloney at colorado.edu)
+                Jonah Miller (jonah.maxwell.miller at gmail.com)
                 Chris Moody (cemoody at ucsc.edu)
                 Stuart Mumford (stuart at mumford.me.uk)
                 Andrew Myers (atmyers at astro.berkeley.edu)
@@ -44,6 +58,7 @@
                 Mark Richardson (Mark.L.Richardson at asu.edu)
                 Thomas Robitaille (thomas.robitaille at gmail.com)
                 Anna Rosen (rosen at ucolick.org)
+                Chuck Rozhon (rozhon2 at illinois.edu)
                 Douglas Rudd (drudd at uchicago.edu)
                 Anthony Scopatz (scopatz at gmail.com)
                 Noel Scudder (noel.scudder at stonybrook.edu)
@@ -59,6 +74,7 @@
                 Ji Suoqing (jisuoqing at gmail.com)
                 Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
                 Benjamin Thompson (bthompson2090 at gmail.com)
+                Robert Thompson (rthompsonj at gmail.com)
                 Stephanie Tonnesen (stonnes at gmail.com)
                 Matthew Turk (matthewturk at gmail.com)
                 Rich Wagner (rwagner at physics.ucsd.edu)

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -468,6 +468,19 @@
 
 All of these projections supply the data object as their base input.
 
+Often, it can be useful to sample a field at the minimum and maximum of a
+different field.  You can use the ``argmax`` and ``argmin`` operations to do
+this.::
+
+  reg.argmin("density", axis="temperature")
+
+This will return the temperature at the minimum density.
+
+If you don't specify an ``axis``, it will return the spatial position of
+the maximum value of the queried field.  Here is an example:::
+
+  x, y, z = reg.argmin("density")
+
 Available Derived Quantities
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -494,11 +507,15 @@
     | Usage: ``extrema(fields, non_zero=False)``
     | The extrema of a field or list of fields.
 
-**Maximum Location**
-    | Class :class:`~yt.data_objects.derived_quantities.MaxLocation`
-    | Usage: ``max_location(fields)``
-    | The maximum of a field or list of fields as well
-      as the x,y,z location of that maximum.
+**Maximum Location Sampling**
+    | Class :class:`~yt.data_objects.derived_quantities.SampleAtMaxFieldValues`
+    | Usage: ``sample_at_max_field_values(fields, sample_fields)``
+    | The value of sample_fields at the maximum value in fields.
+
+**Minimum Location Sampling**
+    | Class :class:`~yt.data_objects.derived_quantities.SampleAtMinFieldValues`
+    | Usage: ``sample_at_min_field_values(fields, sample_fields)``
+    | The value of sample_fields at the minimum value in fields.
 
 **Minimum Location**
     | Class :class:`~yt.data_objects.derived_quantities.MinLocation`
@@ -506,6 +523,12 @@
     | The minimum of a field or list of fields as well
       as the x,y,z location of that minimum.
 
+**Maximum Location**
+    | Class :class:`~yt.data_objects.derived_quantities.MaxLocation`
+    | Usage: ``max_location(fields)``
+    | The maximum of a field or list of fields as well
+      as the x,y,z location of that maximum.
+
 **Spin Parameter**
     | Class :class:`~yt.data_objects.derived_quantities.SpinParameter`
     | Usage: ``spin_parameter(use_gas=True, use_particles=True)``

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:0dbaef644354e4d0191367f8f90e6dfd0d3d527925ef0331e1ef381c9099a8cd"
+  "signature": "sha256:6d823c3543f4183db8d28ad5003183515a69ce533fcfff00d92db0372afc3930"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -529,8 +529,21 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "`YTArray`s can be written to disk, to be loaded again to be used in yt or in a different context later. There are two formats that can be written to/read from: HDF5 and ASCII.  \n",
-      "\n",
+      "`YTArray`s can be written to disk, to be loaded again to be used in yt or in a different context later. There are two formats that can be written to/read from: HDF5 and ASCII.  "
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 4,
+     "metadata": {},
+     "source": [
+      "HDF5"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
       "To write to HDF5, use `write_hdf5`:"
      ]
     },
@@ -591,6 +604,38 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
+      "If you want to read/write a dataset from/to a specific group within the HDF5 file, use the `group_name` keyword:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "my_vels.write_hdf5(\"data_in_group.h5\", dataset_name=\"velocity\", info=info, group_name=\"/data/fields\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "where we have used the standard HDF5 slash notation for writing a group hierarchy (e.g., group within a group):"
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 4,
+     "metadata": {},
+     "source": [
+      "ASCII"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
       "To write one or more `YTArray`s to an ASCII text file, use `yt.savetxt`, which works a lot like NumPy's `savetxt`, except with units:"
      ]
     },

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -339,11 +339,11 @@
 .. code-block:: python
 
    #!python
-   class MaximumValue(AnswerTestingTest):
-       _type_name = "ParentageRelationships"
+   class MaximumValueTest(AnswerTestingTest):
+       _type_name = "MaximumValue"
        _attrs = ("field",)
        def __init__(self, ds_fn, field):
-           super(MaximumValue, self).__init__(ds_fn)
+           super(MaximumValueTest, self).__init__(ds_fn)
            self.field = field
 
        def run(self):
@@ -381,10 +381,10 @@
 * Typically for derived values, we compare to 10 or 12 decimal places.
   For exact values, we compare exactly.
 
-How to Add Data to the Testing Suite
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+How To Write Answer Tests for a Frontend
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-To add data to the testing suite, first write a new set of tests for the data.
+To add a new frontend answer test, first write a new set of tests for the data.
 The Enzo example in ``yt/frontends/enzo/tests/test_outputs.py`` is
 considered canonical.  Do these things:
 
@@ -399,8 +399,13 @@
   * This routine should test a number of different fields and data objects.
 
   * The test routine itself should be decorated with
-    ``@requires_ds(path_to_test_dataset)``. This decorator can accept the
-    argument ``big_data=True`` if the test is expensive.
+    ``@requires_ds(test_dataset_name)``. This decorator can accept the
+    argument ``big_data=True`` if the test is expensive. The 
+    ``test_dataset_name`` should be a string containing the path you would pass
+    to the ``yt.load`` function. It does not need to be the full path to the 
+    dataset, since the path will be automatically prepended with the location of
+    the test data directory.  See :ref:`configuration-file` for more information
+    about the ``test_data-dir`` configuration option.
 
   * There are ``small_patch_amr`` and ``big_patch_amr`` routines that you can
     yield from to execute a bunch of standard tests. In addition we have created
@@ -408,7 +413,59 @@
     you should start, and then yield additional tests that stress the outputs in
     whatever ways are necessary to ensure functionality.
 
-  * **All tests should be yielded!**
-
 If you are adding to a frontend that has a few tests already, skip the first
 two steps.
+
+How to Write Image Comparison Tests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+We have a number of tests designed to compare images as part of yt. We make use
+of some functionality from matplotlib to automatically compare images and detect
+differences, if any. Image comparison tests are used in the plotting and volume
+rendering machinery.
+
+The easiest way to use the image comparison tests is to make use of the 
+``GenericImageTest`` class. This class takes three arguments:
+
+* A dataset instance (e.g. something you load with ``yt.load`` or 
+  ``data_dir_load``) 
+* A function the test machinery can call which will save an image to disk. The 
+  test class will then find any images that get created and compare them with the
+  stored "correct" answer.
+* An integer specifying the number of decimal places to use when comparing
+  images. A smaller number of decimal places will produce a less stringent test.
+  Matplotlib uses an L2 norm on the full image to do the comparison tests, so
+  this is not a pixel-by-pixel measure, and surprisingly large variations will
+  still pass the test if the strictness of the comparison is not high enough.
+
+You *must* decorate your test function with ``requires_ds``, otherwise the 
+answer testing machinery will not be properly set up.
+
+Here is an example test function:
+
+.. code-block:: python
+
+   from yt.utilities.answer_testing.framework import \
+       GenericImageTest, requires_ds, data_dir_load
+
+   from matplotlib import pyplot as plt
+
+   @requires_ds(my_ds)
+   def test_my_ds():
+       ds = data_dir_load(my_ds)
+       def create_image(filename_prefix):
+           plt.plot([1, 2], [1, 2])
+           plt.savefig(filename_prefix)
+       test = GenericImageTest(ds, create_image, 12)
+       # this ensures a nice test name in nose's output
+       test_my_ds.__description__ = test.description
+       yield test_my_ds
+
+Another good example of an image comparison test is the
+``PlotWindowAttributeTest`` defined in the answer testing framework and used in
+``yt/visualization/tests/test_plotwindow.py``. This test shows how a new answer
+test subclass can be used to programitically test a variety of different methods
+of a complicated class using the same test class. This sort of image comparison
+test is more useful if you are finding yourself writing a ton of boilerplate
+code to get your image comparison test working.  The ``GenericImageTest`` is
+more useful if you only need to do a one-off image comparison test.

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -327,6 +327,100 @@
 
 .. _loading-fits-data:
 
+Exodus II Data
+--------------
+
+Exodus II is a file format for Finite Element datasets that is used by the MOOSE
+framework for file IO. Support for this format (and for unstructured mesh data in 
+general) is a new feature as of yt 3.3, so while we aim to fully support it, we also expect 
+there to be some buggy features at present. Currently, yt can visualize first-order
+mesh types only (4-node quads, 8-node hexes, 3-node triangles, and 4-node tetrahedra).
+Development of higher-order visualization capability is a work in progress.
+
+To load an Exodus II dataset, you can use the ``yt.load`` command on the Exodus II
+file:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+
+Because Exodus II datasets can have multiple steps (which can correspond to time steps, 
+picard iterations, non-linear solve iterations, etc...), you can also specify a step
+argument when you load an Exodus II data that defines the index at which to look when
+you read data from the file.
+
+You can access the connectivity information directly by doing:
+
+.. code-block:: python
+    
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   print(ds.index.meshes[0].connectivity_coords)
+   print(ds.index.meshes[0].connectivity_indices)
+   print(ds.index.meshes[1].connectivity_coords)
+   print(ds.index.meshes[1].connectivity_indices)
+
+This particular dataset has two meshes in it, both of which are made of 8-node hexes.
+yt uses a field name convention to access these different meshes in plots and data
+objects. To see all the fields found in a particlular dataset, you can do:
+
+.. code-block:: python
+    
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   print(ds.field_list)
+
+This will give you a list of field names like ``('connect1', 'diffused')`` and 
+``('connect2', 'convected')``. Here, fields labelled with ``'connect1'`` correspond to the
+first mesh, and those with ``'connect2'`` to the second, and so on. To grab the value
+of the ``'convected'`` variable at all the nodes in the first mesh, for example, you
+would do:
+
+.. code-block:: python
+    
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ad = ds.all_data()  # geometric selection, this just grabs everything
+   print(ad['connect1', 'convected'])
+
+In this dataset, ``('connect1', 'convected')`` is nodal field, meaning that the field values
+are defined at the vertices of the elements. If we examine the shape of the returned array:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ad = ds.all_data()
+   print(ad['connect1', 'convected'].shape)
+
+we see that this mesh has 12480 8-node hexahedral elements, and that we get 8 field values
+for each element. To get the vertex positions at which these field values are defined, we
+can do, for instance:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ad = ds.all_data()
+   print(ad['connect1', 'vertex_x'])
+
+If we instead look at an element-centered field, like ``('connect1', 'conv_indicator')``,
+we get:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ad = ds.all_data()
+   print(ad['connect1', 'conv_indicator'].shape)
+
+we instead get only one field value per element.
+
+For information about visualizing unstructured mesh data, including Exodus II datasets, 
+please see :ref:`unstructured-mesh-slices` and :ref:`unstructured_mesh_rendering`. 
+
+
 FITS Data
 ---------
 
@@ -1035,8 +1129,8 @@
 
 In addition to the above grid types, you can also load data stored on
 unstructured meshes. This type of mesh is used, for example, in many
-finite element calculations. Currently, hexahedral, tetrahedral, and
-wedge-shaped mesh element are supported.
+finite element calculations. Currently, hexahedral and tetrahedral
+mesh elements are supported.
 
 To load an unstructured mesh, you need to specify the following. First,
 you need to have a coordinates array, which should be an (L, 3) array

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 doc/source/reference/changelog.rst
--- a/doc/source/reference/changelog.rst
+++ b/doc/source/reference/changelog.rst
@@ -22,7 +22,7 @@
 * Late-stage beta support for Python 3 - unit tests and answer tests pass for 
   all the major frontends under python 3.4, and yt should now be mostly if not 
   fully usable.  Because many of the yt developers are still on Python 2 at 
-  this point, this should be considered a “late stage beta” as there may be 
+  this point, this should be considered a "late stage beta" as there may be 
   remaining issues yet to be identified or worked out.
 * Now supporting Gadget Friend-of-Friends/Subfind catalogs - see here to learn 
   how to load halo catalogs as regular yt datasets.

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -355,6 +355,78 @@
 keyword arguments, as described in
 :class:`~yt.visualization.plot_window.OffAxisProjectionPlot`
 
+.. _unstructured-mesh-slices:
+
+Unstructured Mesh Slices
+------------------------
+
+Unstructured Mesh datasets can be sliced using the same syntax as above.
+Here is an example script using a publically available MOOSE dataset:
+
+.. python-script::
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
+   sl = yt.SlicePlot(ds, 'x', ('connect1', 'diffused'))
+   sl.zoom(0.75)
+   sl.save()
+
+Here, we plot the ``'diffused'`` variable, using a slice normal to the ``'x'`` direction, 
+through the meshed labelled by ``'connect1'``. By default, the slice goes through the
+center of the domain. We have also zoomed out a bit to get a better view of the 
+resulting structure. To instead plot the ``'convected'`` variable, using a slice normal
+to the ``'z'`` direction through the mesh labelled by ``'connect2'``, we do:
+
+.. python-script::
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
+   sl = yt.SlicePlot(ds, 'z', ('connect2', 'convected'))
+   sl.zoom(0.75)
+   sl.save()
+
+These slices are made by sampling the finite element solution at the points corresponding 
+to each pixel of the image. The ``'convected'`` and ``'diffused'`` variables are node-centered,
+so this interpolation is performed by converting the sample point the reference coordinate
+system of the element and evaluating the appropriate shape functions. You can also
+plot element-centered fields:
+
+.. python-script::
+
+   import yt
+   ds = yt.load('MOOSE_sample_data/out.e-s010')
+   sl = yt.SlicePlot(ds, 'y', ('connect1', 'conv_indicator'))
+   sl.zoom(0.75)
+   sl.save()
+
+We can also annotate the mesh lines, as follows:
+
+.. python-script::
+
+   import yt
+   ds = yt.load('MOOSE_sample_data/out.e-s010')
+   sl = yt.SlicePlot(ds, 'z', ('connect1', 'diffused'))
+   sl.annotate_mesh_lines(thresh=0.1)
+   sl.zoom(0.75)
+   sl.save()
+
+This annotation is performed by marking the pixels where the mapped coordinate is close
+to the element boundary. What counts as 'close' (in the mapped coordinate system) is 
+determined by the ``thresh`` parameter, which can be varied to make the lines thicker or
+thinner.
+
+Finally, slices can also be used to examine 2D unstructured mesh datasets, but the
+slices must be taken to be normal to the ``'z'`` axis, or you'll get an error. Here is
+an example using another MOOSE dataset:
+
+.. python-script::
+
+   import yt
+   ds = yt.load('MOOSE_sample_data/out.e')
+   sl = yt.SlicePlot(ds, 2, ('connect1', 'nodal_aux'))
+   sl.save()
+
+
 Plot Customization: Recentering, Resizing, Colormaps, and More
 --------------------------------------------------------------
 

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -3,19 +3,16 @@
 Unstructured Mesh Rendering
 ===========================
 
+Installation
+^^^^^^^^^^^^
+
 Beginning with version 3.3, yt has the ability to volume render unstructured
-meshes from, for example, finite element calculations. In order to use this
-capability, a few additional dependencies are required beyond those you get
-when you run the install script. First, `embree <https://embree.github.io>`_
+mesh data - like that created by finite element calculations, for example. 
+In order to use this capability, a few additional dependencies are required 
+beyond those you get when you run the install script. First, `embree <https://embree.github.io>`_
 (a fast software ray-tracing library from Intel) must be installed, either
 by compiling from source or by using one of the pre-built binaries available
-at Embree's `downloads <https://embree.github.io/downloads.html>`_ page. Once
-Embree is installed, you must also create a symlink next to the library. For
-example, if the libraries were installed at /usr/local/lib/, you must do
-
-.. code-block:: bash
-
-    sudo ln -s /usr/local/lib/libembree.2.6.1.dylib /usr/local/lib/libembree.so
+at Embree's `downloads <https://embree.github.io/downloads.html>`_ page. 
 
 Second, the python bindings for embree (called 
 `pyembree <https://github.com/scopatz/pyembree>`_) must also be installed. To
@@ -25,23 +22,39 @@
 
     git clone https://github.com/scopatz/pyembree
 
-To install, navigate to the root directory and run the setup script:
+To install, navigate to the root directory and run the setup script.
+If Embree was installed to some location that is not in your path by default,
+you will need to pass in CFLAGS and LDFLAGS to the setup.py script. For example,
+the Mac OS X package installer puts the installation at /opt/local/ instead of 
+usr/local. To account for this, you would do:
 
 .. code-block:: bash
 
-    python setup.py develop
+    CFLAGS='-I/opt/local/include' LDFLAGS='-L/opt/local/lib' python setup.py install
 
-If Embree was installed to some location that is not in your path by default,
-you will need to pass in CFLAGS and LDFLAGS to the setup.py script. For example,
-the Mac OS package installer puts the installation at /opt/local/ instead of 
-usr/local. To account for this, you would do:
+Once embree and pyembree are installed, you must rebuild yt from source in order to use
+the unstructured mesh rendering capability. Once again, if embree is installed in a 
+location that is not part of your default search path, you must tell yt where to find it.
+There are a number of ways to do this. One way is to again manually pass in the flags
+when running the setup script in the yt-hg directory:
 
 .. code-block:: bash
 
     CFLAGS='-I/opt/local/include' LDFLAGS='-L/opt/local/lib' python setup.py develop
 
-You must also use these flags when building any part of yt that links against
-pyembree.
+You can also set EMBREE_DIR environment variable to '/opt/local', in which case
+you could just run 
+
+.. code-block:: bash
+   
+   python setup.py develop
+
+as usual. Finally, if you create a file called embree.cfg in the yt-hg directory with
+the location of the embree installation, the setup script will find this and use it, 
+provided EMBREE_DIR is not set. We recommend one of the later two methods, especially
+if you plan on re-compiling the cython extensions regularly. Note that none of this is
+neccessary if you installed embree into a location that is in your default path, such
+as /usr/local.
 
 Once the pre-requisites are installed, unstructured mesh data can be rendered
 much like any other dataset. In particular, a new type of 
@@ -55,120 +68,293 @@
 :class:`~yt.visualization.volume_rendering.render_source.RenderSource` is called,
 a set of rays are cast at the source. Each time a ray strikes the source mesh,
 the data is sampled at the intersection point at the resulting value gets 
-saved into an image.
+saved into an image. See below for examples.
 
-See below for examples. First, here is an example of rendering a hexahedral mesh.
+Examples
+^^^^^^^^
+
+First, here is an example of rendering an 8-node, hexahedral MOOSE dataset.
 
 .. python-script::
 
    import yt
-   import pylab as plt
-   from yt.visualization.volume_rendering.render_source import MeshSource
-   from yt.visualization.volume_rendering.camera import Camera
-   from yt.utilities.exodusII_reader import get_data
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
 
-   # load the data
-   coords, connectivity, data = get_data("MOOSE_sample_data/out.e-s010")
-   mesh_id = 0
-   field_name = ('gas', 'diffused')
-   ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
 
-   # create the RenderSource
-   ms = MeshSource(ds, field_name)
+   ms = MeshSource(ds, ('connect1', 'diffused'))
 
-   # set up camera
+   # setup the camera
    cam = Camera(ds)
-   camera_position = ds.arr([-3.0, 3.0, -3.0], 'code_length')
-   north_vector = ds.arr([0.0, 1.0, 0.0], 'dimensionless')
-   cam.resolution = (800, 800)
-   cam.set_position(camera_position, north_vector)
+   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
 
-   # make the image
-   im = ms.render(cam)
+   cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')  # the camera location
+   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
+   cam.set_position(cam_pos, north_vector)
 
-   # plot and save
-   plt.imshow(im, cmap='Eos A', origin='lower', vmin=0, vmax=2.0)
-   plt.gca().axes.get_xaxis().set_visible(False)
-   plt.gca().axes.get_yaxis().set_visible(False)
-   cb = plt.colorbar()
-   cb.set_label(field_name[1])
-   plt.savefig('hex_mesh_render.png')
+   im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
+   pw.write_png(im, 'hex_mesh_render.png')
 
-Next, here is an example of rendering a dataset with tetrahedral mesh elements.
+You can also overplot the mesh boundaries:
 
 .. python-script::
 
    import yt
-   import pylab as plt
-   from yt.visualization.volume_rendering.render_source import MeshSource
-   from yt.visualization.volume_rendering.camera import Camera
-   from yt.utilities.exodusII_reader import get_data
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
 
-   # load the data
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
+
+   ms = MeshSource(ds, ('connect1', 'diffused'))
+
+   # setup the camera
+   cam = Camera(ds)
+   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
+
+   cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')  # the camera location
+   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
+   cam.set_position(cam_pos, north_vector)
+   cam.resolution = (800, 800)
+
+   ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
+   im = ms.annotate_mesh_lines()
+   pw.write_png(im, 'hex_render_with_mesh.png')
+
+As with slices, you can visualize different meshes and different fields. For example,
+Here is a script similar to the above that plots the "diffused" variable 
+using the mesh labelled by "connect2":
+
+.. python-script::
+
+   import yt
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
+
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
+
+   ms = MeshSource(ds, ('connect2', 'diffused'))
+
+   # setup the camera
+   cam = Camera(ds)
+   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
+
+   cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')  # the camera location
+   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
+   cam.set_position(cam_pos, north_vector)
+
+   im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
+   pw.write_png(im, 'hex_mesh_render.png')
+
+Next, here is an example of rendering a dataset with tetrahedral mesh elements.
+Note that in this dataset, there are multiple "steps" per file, so we specify
+that we want to look at the last one.
+
+.. python-script::
+
+   import yt
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
+
    filename = "MOOSE_sample_data/high_order_elems_tet4_refine_out.e"
-   coords, connectivity, data = get_data(filename)
-   mesh_id = 0
-   field_name = ('gas', 'u')
-   ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+   ds = yt.load(filename, step=-1)  # we look at the last time frame
 
-   # create the RenderSource
-   ms = MeshSource(ds, field_name)
+   ms = MeshSource(ds, ('connect1', 'u'))
 
-   # set up camera
+   # setup the camera 
    cam = Camera(ds)
    camera_position = ds.arr([3.0, 3.0, 3.0], 'code_length')
    cam.set_width(ds.arr([2.0, 2.0, 2.0], 'code_length'))
    north_vector = ds.arr([0.0, 1.0, 0.0], 'dimensionless')
+   cam.set_position(camera_position, north_vector)
+
+   im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 1.0))
+   pw.write_png(im, 'tetra_render.png')
+
+Another example, this time plotting the temperature field from a 20-node hex 
+MOOSE dataset:
+
+.. python-script::
+
+   import yt
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
+
+   ds = yt.load("MOOSE_sample_data/mps_out.e", step=-1)  # we load the last time frame
+
+   ms = MeshSource(ds, ('connect2', 'temp'))
+
+   # set up the camera
+   cam = Camera(ds)
+   camera_position = ds.arr([-1.0, 1.0, -0.5], 'code_length')
+   north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+   cam.width = ds.arr([0.04, 0.04, 0.04], 'code_length')
    cam.resolution = (800, 800)
    cam.set_position(camera_position, north_vector)
 
-   # make the image
-   im = ms.render(cam)
+   im = ms.render(cam, cmap='hot', color_bounds=(500.0, 1700.0))
+   im = ms.annotate_mesh_lines()
+   pw.write_png(im, 'hex20_render.png')
 
-   # plot and save
-   plt.imshow(im, cmap='Eos A', origin='lower', vmin=0.0, vmax=1.0)
-   plt.gca().axes.get_xaxis().set_visible(False)
-   plt.gca().axes.get_yaxis().set_visible(False)
-   cb = plt.colorbar()
-   cb.set_label(field_name[1])
-   plt.savefig('tet_mesh_render.png')
+As with other volume renderings in yt, you can swap out different lenses. Here is 
+an example that uses a "perspective" lens, for which the rays diverge from the 
+camera position according to some opening angle:
 
-Finally, here is a script that creates frames of a movie. It calls the rotate()
-method 300 times, saving a new image to the disk each time.
+.. python-script::
+
+   import yt
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
+
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
+
+   ms = MeshSource(ds, ('connect2', 'diffused'))
+
+   # setup the camera
+   cam = Camera(ds, lens_type='perspective')
+   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
+
+   cam_pos = ds.arr([-4.5, 4.5, -4.5], 'code_length')  # the camera location
+   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
+   cam.set_position(cam_pos, north_vector)
+
+   im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
+   im = ms.annotate_mesh_lines()
+   pw.write_png(im, 'hex_mesh_render_perspective.png')
+
+You can also create scenes that have multiple meshes. The ray-tracing infrastructure
+will keep track of the depth information for each source separately, and composite
+the final image accordingly. In the next example, we show how to render a scene 
+with two meshes on it:
+
+.. python-script::
+
+    import yt
+    from yt.visualization.volume_rendering.api import MeshSource, Camera, Scene
+    import yt.utilities.png_writer as pw
+
+    ds = yt.load("MOOSE_sample_data/out.e-s010")
+
+    # this time we create an empty scene and add sources to it one-by-one
+    sc = Scene()
+
+    cam = Camera(ds)
+    cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
+    cam.set_position(ds.arr([-3.0, 3.0, -3.0], 'code_length'),
+                     ds.arr([0.0, 1.0, 0.0], 'dimensionless'))
+    cam.set_width = ds.arr([8.0, 8.0, 8.0], 'code_length')
+    cam.resolution = (800, 800)
+
+    sc.camera = cam
+
+    # create two distinct MeshSources from 'connect1' and 'connect2'
+    ms1 = MeshSource(ds, ('connect1', 'diffused'))
+    ms2 = MeshSource(ds, ('connect2', 'diffused'))
+
+    sc.add_source(ms1)
+    sc.add_source(ms2)
+
+    im = sc.render()
+
+    pw.write_png(im, 'composite_render.png')
+
+
+Making Movies
+^^^^^^^^^^^^^
+
+Here are a couple of example scripts that show how to create image frames that 
+can later be stiched together into a movie. In the first example, we look at a 
+single dataset at a fixed time, but we move the camera around to get a different
+vantage point. We call the rotate() method 300 times, saving a new image to the 
+disk each time.
 
 .. code-block:: python
 
    import yt
-   import pylab as plt
-   from yt.visualization.volume_rendering.render_source import MeshSource
-   from yt.visualization.volume_rendering.camera import Camera
-   from yt.utilities.exodusII_reader import get_data
+   from yt.visualization.volume_rendering.api import MeshSource, Camera
+   import yt.utilities.png_writer as pw
 
-   # load dataset
-   coords, connectivity, data = get_data("MOOSE_sample_data/out.e-s010")
-   mesh_id = 0
-   field_name = ('gas', 'diffused')
-   ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
 
-   # create the RenderSource
-   ms = MeshSource(ds, field_name)
+   ms = MeshSource(ds, ('connect1', 'diffused'))
 
-   # set up camera
+   # setup the camera
    cam = Camera(ds)
-   camera_position = ds.arr([-3.0, 3.0, -3.0], 'code_length')
-   north_vector = ds.arr([0.0, 1.0, 0.0], 'dimensionless')
-   cam.set_position(camera_position, north_vector)
+   cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')  # point we're looking at
+
+   cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')  # the camera location
+   north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')  # down is the new up
+   cam.set_position(cam_pos, north_vector)
+   cam.resolution = (800, 800)
    cam.steady_north = True
 
    # make movie frames
    num_frames = 301
    for i in range(num_frames):
        cam.rotate(2.0*np.pi/num_frames)
-       im = ms.render(cam)
-       plt.imshow(im, cmap='Eos A', origin='lower',vmin=0.0, vmax=2.0)
-       plt.gca().axes.get_xaxis().set_visible(False)
-       plt.gca().axes.get_yaxis().set_visible(False)
-       cb = plt.colorbar()
-       cb.set_label('diffused')
-       plt.savefig('movie_frames/surface_render_%.4d.png' % i)
-       plt.clf()
+       im = ms.render(cam, cmap='Eos A', color_bounds=(0.0, 2.0))
+       pw.write_png(im, 'movie_frames/surface_render_%.4d.png' % i)
+
+Finally, this example demonstrates how to loop over the time steps in a single
+file with a fixed camera position:
+
+.. code-block:: python
+
+    import yt
+    from yt.visualization.volume_rendering.api import MeshSource, Camera
+    import pylab as plt
+
+    NUM_STEPS = 127
+    CMAP = 'hot'
+    VMIN = 300.0
+    VMAX = 2000.0
+
+    for step in range(NUM_STEPS):
+
+        ds = yt.load("MOOSE_sample_data/mps_out.e", step=step)
+
+	time = ds._get_current_time()
+
+	# the field name is a tuple of strings. The first string
+	# specifies which mesh will be plotted, the second string
+	# specifies the name of the field.
+	field_name = ('connect2', 'temp')
+
+	# this initializes the render source
+	ms = MeshSource(ds, field_name)
+
+	# set up the camera here. these values were arrived by
+	# calling pitch, yaw, and roll in the notebook until I
+	# got the angle I wanted.
+	cam = Camera(ds)
+	camera_position = ds.arr([0.1, 0.0, 0.1], 'code_length')
+	cam.focus = ds.domain_center
+	north_vector = ds.arr([0.3032476, 0.71782557, -0.62671153], 'dimensionless')
+	cam.width = ds.arr([ 0.04,  0.04,  0.04], 'code_length')
+	cam.resolution = (800, 800)
+	cam.set_position(camera_position, north_vector)
+
+	# actually make the image here
+	im = ms.render(cam, cmap=CMAP, color_bounds=(VMIN, VMAX))
+
+	# Plot the result using matplotlib and save.
+	# Note that we are setting the upper and lower
+	# bounds of the colorbar to be the same for all
+	# frames of the image.
+
+	# must clear the image between frames
+	plt.clf()
+	fig = plt.gcf()
+	ax = plt.gca()
+	ax.imshow(im, interpolation='nearest', origin='lower')
+
+	# Add the colorbar using a fake (not shown) image.
+	p = ax.imshow(ms.data, visible=False, cmap=CMAP, vmin=VMIN, vmax=VMAX)
+	cb = fig.colorbar(p)
+	cb.set_label(field_name[1])
+
+	ax.text(25, 750, 'time = %.2e' % time, color='k')
+	ax.axes.get_xaxis().set_visible(False)
+	ax.axes.get_yaxis().set_visible(False)
+
+	plt.savefig('movie_frames/test_%.3d' % step)

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 requirements.txt
--- a/requirements.txt
+++ b/requirements.txt
@@ -4,3 +4,4 @@
 h5py==2.5.0 
 nose==1.3.6 
 sympy==0.7.6 
+

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 tests/nose_runner.py
--- /dev/null
+++ b/tests/nose_runner.py
@@ -0,0 +1,54 @@
+import sys
+import os
+import yaml
+import multiprocessing as mp
+import nose
+import glob
+from contextlib import closing
+from yt.config import ytcfg
+from yt.utilities.answer_testing.framework import AnswerTesting
+
+
+def run_job(argv):
+    with closing(open(str(os.getpid()) + ".out", "w")) as fstderr:
+        cur_stderr = sys.stderr
+        sys.stderr = fstderr
+        answer = argv[0]
+        test_dir = ytcfg.get("yt", "test_data_dir")
+        answers_dir = os.path.join(test_dir, "answers")
+        if not os.path.isdir(os.path.join(answers_dir, answer)):
+            nose.run(argv=argv + ['--answer-store'],
+                     addplugins=[AnswerTesting()], exit=False)
+        nose.run(argv=argv, addplugins=[AnswerTesting()], exit=False)
+    sys.stderr = cur_stderr
+
+if __name__ == "__main__":
+    test_dir = ytcfg.get("yt", "test_data_dir")
+    answers_dir = os.path.join(test_dir, "answers")
+    with open('tests/tests_%i.%i.yaml' % sys.version_info[:2], 'r') as obj:
+        tests = yaml.load(obj)
+
+    base_argv = ['--local-dir=%s' % answers_dir, '-v', '-s', '--nologcapture',
+                 '--with-answer-testing', '--answer-big-data', '--local']
+    args = [['unittests', '-v', '-s', '--nologcapture']]
+    for answer in list(tests.keys()):
+        argv = [answer]
+        argv += base_argv
+        argv.append('--xunit-file=%s.xml' % answer)
+        argv.append('--answer-name=%s' % answer)
+        argv += tests[answer]
+        args.append(argv)
+    
+    processes = [mp.Process(target=run_job, args=(args[i],))
+                 for i in range(len(args))]
+    for p in processes:
+        p.start()
+    for p in processes:
+        p.join(timeout=7200)
+        if p.is_alive():
+            p.terminate()
+            p.join(timeout=30)
+    for fname in glob.glob("*.out"):
+        with open(fname, 'r') as fin:
+            print(fin.read())
+        os.remove(fname)

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 tests/tests_2.7.yaml
--- /dev/null
+++ b/tests/tests_2.7.yaml
@@ -0,0 +1,51 @@
+local_artio_270:
+  - yt/frontends/artio/tests/test_outputs.py
+
+local_athena_270:
+  - yt/frontends/athena
+
+local_chombo_270:
+  - yt/frontends/chombo/tests/test_outputs.py
+
+local_enzo_270:
+  - yt/frontends/enzo
+
+local_fits_270:
+  - yt/frontends/fits/tests/test_outputs.py
+
+local_flash_270:
+  - yt/frontends/flash/tests/test_outputs.py
+
+local_gadget_270:
+  - yt/frontends/gadget/tests/test_outputs.py
+
+local_halos_270:
+  - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
+  - yt/analysis_modules/halo_finding/tests/test_rockstar.py
+  - yt/frontends/owls_subfind/tests/test_outputs.py
+
+local_owls_270:
+  - yt/frontends/owls/tests/test_outputs.py
+
+local_pw_270:
+  - yt/visualization/tests/test_plotwindow.py:test_attributes
+  - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+
+local_tipsy_270:
+  - yt/frontends/tipsy/tests/test_outputs.py
+
+local_varia_270:
+  - yt/analysis_modules/radmc3d_export
+  - yt/frontends/moab/tests/test_c5.py
+  - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+  - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+  - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+
+local_orion_270:
+  - yt/frontends/boxlib/tests/test_orion.py
+
+local_ramses_270:
+  - yt/frontends/ramses/tests/test_outputs.py
+
+local_ytdata_270:
+  - yt/frontends/ytdata
\ No newline at end of file

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 tests/tests_3.4.yaml
--- /dev/null
+++ b/tests/tests_3.4.yaml
@@ -0,0 +1,49 @@
+local_artio_340:
+  - yt/frontends/artio/tests/test_outputs.py
+
+local_athena_340:
+  - yt/frontends/athena
+
+local_chombo_340:
+  - yt/frontends/chombo/tests/test_outputs.py
+
+local_enzo_340:
+  - yt/frontends/enzo
+
+local_fits_340:
+  - yt/frontends/fits/tests/test_outputs.py
+
+local_flash_340:
+  - yt/frontends/flash/tests/test_outputs.py
+
+local_gadget_340:
+  - yt/frontends/gadget/tests/test_outputs.py
+
+local_halos_340:
+  - yt/frontends/owls_subfind/tests/test_outputs.py
+
+local_owls_340:
+  - yt/frontends/owls/tests/test_outputs.py
+
+local_pw_340:
+  - yt/visualization/tests/test_plotwindow.py:test_attributes
+  - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+
+local_tipsy_340:
+  - yt/frontends/tipsy/tests/test_outputs.py
+
+local_varia_340:
+  - yt/analysis_modules/radmc3d_export
+  - yt/frontends/moab/tests/test_c5.py
+  - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+  - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+  - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+
+local_orion_340:
+  - yt/frontends/boxlib/tests/test_orion.py
+
+local_ramses_340:
+  - yt/frontends/ramses/tests/test_outputs.py
+
+local_ytdata_340:
+  - yt/frontends/ytdata
\ No newline at end of file

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -123,7 +123,7 @@
     s_ds = halo.data_object.ds
     old_sphere = halo.data_object
     max_vals = old_sphere.quantities.max_location(field)
-    new_center = s_ds.arr(max_vals[2:])
+    new_center = s_ds.arr(max_vals[1:])
     new_sphere = s_ds.sphere(new_center.in_units("code_length"),
                                old_sphere.radius.in_units("code_length"))
     mylog.info("Moving sphere center from %s to %s." % (old_sphere.center,

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1344,7 +1344,8 @@
 
     def _retrieve_halos(self):
         # First get the halo particulars.
-        lines = file("%s.out" % self.basename)
+        with open("%s.out" % self.basename, 'r') as fh:
+            lines = fh.readlines()
         # The location of particle data for each halo.
         locations = self._collect_halo_data_locations()
         halo = 0
@@ -1395,7 +1396,8 @@
 
     def _collect_halo_data_locations(self):
         # The halos are listed in order in the file.
-        lines = file("%s.txt" % self.basename)
+        with open("%s.txt" % self.basename, 'r') as fh:
+            lines = fh.readlines()
         locations = []
         realpath = path.realpath("%s.txt" % self.basename)
         for line in lines:
@@ -1408,7 +1410,6 @@
                 item = item.split("/")
                 temp.append(path.join(path.dirname(realpath), item[-1]))
             locations.append(temp)
-        lines.close()
         return locations
 
 class TextHaloList(HaloList):
@@ -1422,7 +1423,8 @@
 
     def _retrieve_halos(self, fname, columns, comment):
         # First get the halo particulars.
-        lines = file(fname)
+        with open(fname, 'r') as fh:
+            lines = fh.readlines()
         halo = 0
         base_set = ['x', 'y', 'z', 'r']
         keys = columns.keys()

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -47,6 +47,12 @@
                "y":("z","x"),
                "z":("x","y")}
 
+def force_unicode(value):
+    if hasattr(value, 'decode'):
+        return value.decode('utf8')
+    else:
+        return value
+
 def parse_value(value, default_units):
     if isinstance(value, YTQuantity):
         return value.in_units(default_units)
@@ -919,27 +925,28 @@
 
         p = f["/parameters"]
         parameters["ExposureTime"] = YTQuantity(p["exp_time"].value, "s")
-        if isinstance(p["area"].value, (string_types, bytes)):
-            parameters["Area"] = p["area"].value.decode("utf8")
+        area = force_unicode(p['area'].value)
+        if isinstance(area, string_types):
+            parameters["Area"] = area
         else:
-            parameters["Area"] = YTQuantity(p["area"].value, "cm**2")
+            parameters["Area"] = YTQuantity(area, "cm**2")
         parameters["Redshift"] = p["redshift"].value
         parameters["AngularDiameterDistance"] = YTQuantity(p["d_a"].value, "Mpc")
         parameters["sky_center"] = YTArray(p["sky_center"][:], "deg")
         parameters["dtheta"] = YTQuantity(p["dtheta"].value, "deg")
         parameters["pix_center"] = p["pix_center"][:]
         if "rmf" in p:
-            parameters["RMF"] = p["rmf"].value.decode("utf8")
+            parameters["RMF"] = force_unicode(p["rmf"].value)
         if "arf" in p:
-            parameters["ARF"] = p["arf"].value.decode("utf8")
+            parameters["ARF"] = force_unicode(p["arf"].value)
         if "channel_type" in p:
-            parameters["ChannelType"] = p["channel_type"].value.decode("utf8")
+            parameters["ChannelType"] = force_unicode(p["channel_type"].value)
         if "mission" in p:
-            parameters["Mission"] = p["mission"].value.decode("utf8")
+            parameters["Mission"] = force_unicode(p["mission"].value)
         if "telescope" in p:
-            parameters["Telescope"] = p["telescope"].value.decode("utf8")
+            parameters["Telescope"] = force_unicode(p["telescope"].value)
         if "instrument" in p:
-            parameters["Instrument"] = p["instrument"].value.decode("utf8")
+            parameters["Instrument"] = force_unicode(p["instrument"].value)
 
         d = f["/data"]
         events["xpix"] = d["xpix"][:]
@@ -1552,4 +1559,4 @@
             d.create_dataset(key, data=f_in[key].value)
 
     f_in.close()
-    f_out.close()
\ No newline at end of file
+    f_out.close()

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -87,7 +87,7 @@
         events1 = photons1.project_photons([1.0,-0.5,0.2], responses=[arf,rmf],
                                           absorb_model=tbabs_model, 
                                           convolve_energies=True, prng=prng)
-
+        events1['xsky']
         return_events = return_data(events1.events)
 
         tests.append(GenericArrayTest(ds, return_events, args=[a]))

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -17,9 +17,11 @@
 import numpy as np
 from functools import wraps
 import fileinput
+import io
 from re import finditer
 from tempfile import TemporaryFile
 import os
+import sys
 import zipfile
 
 from yt.config import ytcfg
@@ -592,20 +594,14 @@
         return tuple(self.ActiveDimensions.tolist())
 
     def _setup_data_source(self):
-        LE = self.left_edge - self.base_dds
-        RE = self.right_edge + self.base_dds
-        if not all(self.ds.periodicity):
-            for i in range(3):
-                if self.ds.periodicity[i]: continue
-                LE[i] = max(LE[i], self.ds.domain_left_edge[i])
-                RE[i] = min(RE[i], self.ds.domain_right_edge[i])
-        self._data_source = self.ds.region(self.center, LE, RE)
+        self._data_source = self.ds.region(self.center,
+            self.left_edge, self.right_edge)
         self._data_source.min_level = 0
         self._data_source.max_level = self.level
-        self._pdata_source = self.ds.region(self.center,
-            self.left_edge, self.right_edge)
-        self._pdata_source.min_level = 0
-        self._pdata_source.max_level = self.level
+        # This triggers "special" behavior in the RegionSelector to ensure we
+        # select *cells* whose bounding boxes overlap with our region, not just
+        # their cell centers.
+        self._data_source.loose_selection = True
 
     def get_data(self, fields = None):
         if fields is None: return
@@ -644,7 +640,7 @@
 
     def _fill_particles(self, part):
         for p in part:
-            self[p] = self._pdata_source[p]
+            self[p] = self._data_source[p]
 
     def _fill_fields(self, fields):
         fields = [f for f in fields if f not in self.field_data]
@@ -1278,14 +1274,13 @@
     def _color_samples_obj(self, cs, em, color_log, emit_log, color_map, arr,
                            color_field_max, color_field_min, color_field,
                            emit_field_max, emit_field_min, emit_field): # this now holds for obj files
-        from sys import version
         if color_field is not None:
             if color_log: cs = np.log10(cs)
         if emit_field is not None:
             if emit_log: em = np.log10(em)
         if color_field is not None:
             if color_field_min is None:
-                if version >= '3':
+                if sys.version_info > (3, ):
                     cs = [float(field) for field in cs]
                     cs = np.array(cs)
                 mi = cs.min()
@@ -1293,7 +1288,7 @@
                 mi = color_field_min
                 if color_log: mi = np.log10(mi)
             if color_field_max is None:
-                if version >= '3':
+                if sys.version_info > (3, ):
                     cs = [float(field) for field in cs]
                     cs = np.array(cs)
                 ma = cs.max()
@@ -1311,7 +1306,7 @@
         # now, get emission
         if emit_field is not None:
             if emit_field_min is None:
-                if version >= '3':
+                if sys.version_info > (3, ):
                     em = [float(field) for field in em]
                     em = np.array(em)
                 emi = em.min()
@@ -1319,7 +1314,7 @@
                 emi = emit_field_min
                 if emit_log: emi = np.log10(emi)
             if emit_field_max is None:
-                if version >= '3':
+                if sys.version_info > (3, ):
                     em = [float(field) for field in em]
                     em = np.array(em)
                 ema = em.max()
@@ -1339,15 +1334,9 @@
                     color_log = True, emit_log = True, plot_index = None,
                     color_field_max = None, color_field_min = None,
                     emit_field_max = None, emit_field_min = None):
-        from sys import version
-        from io import IOBase
         if plot_index is None:
             plot_index = 0
-        if version < '3':
-            checker = file
-        else:
-            checker = IOBase
-        if isinstance(filename, checker):
+        if isinstance(filename, io.IOBase):
             fobj = filename + '.obj'
             fmtl = filename + '.mtl'
         else:
@@ -1639,7 +1628,7 @@
     @parallel_root_only
     def _export_ply(self, filename, bounds = None, color_field = None,
                    color_map = "algae", color_log = True, sample_type = "face"):
-        if isinstance(filename, file):
+        if isinstance(filename, io.IOBase):
             f = filename
         else:
             f = open(filename, "wb")

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -588,7 +588,7 @@
                         extra_attrs=extra_attrs)
 
         return filename
-        
+
     def to_glue(self, fields, label="yt", data_collection=None):
         """
         Takes specific *fields* in the container and exports them to
@@ -613,10 +613,84 @@
 
     # Numpy-like Operations
     def argmax(self, field, axis=None):
-        raise NotImplementedError
+        r"""Return the values at which the field is maximized.
+
+        This will, in a parallel-aware fashion, find the maximum value and then
+        return to you the values at that maximum location that are requested
+        for "axis".  By default it will return the spatial positions (in the
+        natural coordinate system), but it can be any field
+
+        Parameters
+        ----------
+        field : string or tuple of strings
+            The field to maximize.
+        axis : string or list of strings, optional
+            If supplied, the fields to sample along; if not supplied, defaults
+            to the coordinate fields.  This can be the name of the coordinate
+            fields (i.e., 'x', 'y', 'z') or a list of fields, but cannot be 0,
+            1, 2.
+
+        Returns
+        -------
+        A list of YTQuantities as specified by the axis argument.
+
+        Examples
+        --------
+
+        >>> temp_at_max_rho = reg.argmax("density", axis="temperature")
+        >>> max_rho_xyz = reg.argmax("density")
+        >>> t_mrho, v_mrho = reg.argmax("density", axis=["temperature",
+        ...                 "velocity_magnitude"])
+        >>> x, y, z = reg.argmax("density")
+
+        """
+        if axis is None:
+            mv, pos0, pos1, pos2 = self.quantities.max_location(field)
+            return pos0, pos1, pos2
+        rv = self.quantities.sample_at_max_field_values(field, axis)
+        if len(rv) == 2:
+            return rv[1]
+        return rv[1:]
 
     def argmin(self, field, axis=None):
-        raise NotImplementedError
+        r"""Return the values at which the field is minimized.
+
+        This will, in a parallel-aware fashion, find the minimum value and then
+        return to you the values at that minimum location that are requested
+        for "axis".  By default it will return the spatial positions (in the
+        natural coordinate system), but it can be any field
+
+        Parameters
+        ----------
+        field : string or tuple of strings
+            The field to minimize.
+        axis : string or list of strings, optional
+            If supplied, the fields to sample along; if not supplied, defaults
+            to the coordinate fields.  This can be the name of the coordinate
+            fields (i.e., 'x', 'y', 'z') or a list of fields, but cannot be 0,
+            1, 2.
+
+        Returns
+        -------
+        A list of YTQuantities as specified by the axis argument.
+
+        Examples
+        --------
+
+        >>> temp_at_min_rho = reg.argmin("density", axis="temperature")
+        >>> min_rho_xyz = reg.argmin("density")
+        >>> t_mrho, v_mrho = reg.argmin("density", axis=["temperature",
+        ...                 "velocity_magnitude"])
+        >>> x, y, z = reg.argmin("density")
+
+        """
+        if axis is None:
+            mv, pos0, pos1, pos2 = self.quantities.min_location(field)
+            return pos0, pos1, pos2
+        rv = self.quantities.sample_at_min_field_values(field, axis)
+        if len(rv) == 2:
+            return rv[1]
+        return rv[1:]
 
     def _compute_extrema(self, field):
         if self._extrema_cache is None:

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -522,10 +522,57 @@
         return [self.data_source.ds.arr([mis.min(), mas.max()])
                 for mis, mas in zip(values[::2], values[1::2])]
 
-class MaxLocation(DerivedQuantity):
+class SampleAtMaxFieldValues(DerivedQuantity):
     r"""
-    Calculates the maximum value plus the index, x, y, and z position
-    of the maximum.
+    Calculates the maximum value and returns whichever fields are asked to be
+    sampled.
+
+    Parameters
+    ----------
+    field : field
+        The field over which the extrema are to be calculated.
+    sample_fields : list of fields
+        The fields to sample and return at the minimum value.
+
+    Examples
+    --------
+
+    >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+    >>> ad = ds.all_data()
+    >>> print ad.quantities.sample_at_max_field_values(("gas", "density"),
+    ...         ["temperature", "velocity_magnitude"])
+
+    """
+    def count_values(self, field, sample_fields):
+        # field itself, then index, then the number of sample fields
+        self.num_vals = 1 + len(sample_fields)
+
+    def __call__(self, field, sample_fields):
+        rv = super(SampleAtMaxFieldValues, self).__call__(field, sample_fields)
+        if len(rv) == 1: rv = rv[0]
+        return rv
+
+    def process_chunk(self, data, field, sample_fields):
+        field = data._determine_fields(field)[0]
+        ma = array_like_field(data, -HUGE, field)
+        vals = [array_like_field(data, -1, sf) for sf in sample_fields]
+        maxi = -1
+        if data[field].size > 0:
+            maxi = self._func(data[field])
+            ma = data[field][maxi]
+            vals = [data[sf][maxi] for sf in sample_fields]
+        return (ma,) + tuple(vals)
+
+    def reduce_intermediate(self, values):
+        i = self._func(values[0]) # ma is values[0]
+        return [val[i] for val in values]
+
+    def _func(self, arr):
+        return np.argmax(arr)
+
+class MaxLocation(SampleAtMaxFieldValues):
+    r"""
+    Calculates the maximum value plus the x, y, and z position of the maximum.
 
     Parameters
     ----------
@@ -540,36 +587,39 @@
     >>> print ad.quantities.max_location(("gas", "density"))
 
     """
-    def count_values(self, *args, **kwargs):
-        self.num_vals = 5
-
     def __call__(self, field):
-        rv = super(MaxLocation, self).__call__(field)
+        sample_fields = get_position_fields(field, self.data_source)
+        rv = super(MaxLocation, self).__call__(field, sample_fields)
         if len(rv) == 1: rv = rv[0]
         return rv
 
-    def process_chunk(self, data, field):
-        field = data._determine_fields(field)[0]
-        ma = array_like_field(data, -HUGE, field)
-        position_fields = get_position_fields(field, data)
-        mx = array_like_field(data, -1, position_fields[0])
-        my = array_like_field(data, -1, position_fields[1])
-        mz = array_like_field(data, -1, position_fields[2])
-        maxi = -1
-        if data[field].size > 0:
-            maxi = np.argmax(data[field])
-            ma = data[field][maxi]
-            mx, my, mz = [data[ax][maxi] for ax in position_fields]
-        return (ma, maxi, mx, my, mz)
+class SampleAtMinFieldValues(SampleAtMaxFieldValues):
+    r"""
+    Calculates the minimum value and returns whichever fields are asked to be
+    sampled.
 
-    def reduce_intermediate(self, values):
-        i = np.argmax(values[0]) # ma is values[0]
-        return [val[i] for val in values]
+    Parameters
+    ----------
+    field : field
+        The field over which the extrema are to be calculated.
+    sample_fields : list of fields
+        The fields to sample and return at the minimum value.
 
-class MinLocation(DerivedQuantity):
+    Examples
+    --------
+
+    >>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+    >>> ad = ds.all_data()
+    >>> print ad.quantities.sample_at_min_field_values(("gas", "density"),
+    ...         ["temperature", "velocity_magnitude"])
+
+    """
+    def _func(self, arr):
+        return np.argmin(arr)
+
+class MinLocation(SampleAtMinFieldValues):
     r"""
-    Calculates the minimum value plus the index, x, y, and z position
-    of the minimum.
+    Calculates the minimum value plus the x, y, and z position of the minimum.
 
     Parameters
     ----------
@@ -584,32 +634,12 @@
     >>> print ad.quantities.min_location(("gas", "density"))
 
     """
-    def count_values(self, *args, **kwargs):
-        self.num_vals = 5
-
     def __call__(self, field):
-        rv = super(MinLocation, self).__call__(field)
+        sample_fields = get_position_fields(field, self.data_source)
+        rv = super(MinLocation, self).__call__(field, sample_fields)
         if len(rv) == 1: rv = rv[0]
         return rv
 
-    def process_chunk(self, data, field):
-        field = data._determine_fields(field)[0]
-        ma = array_like_field(data, HUGE, field)
-        position_fields = get_position_fields(field, data)
-        mx = array_like_field(data, -1, position_fields[0])
-        my = array_like_field(data, -1, position_fields[1])
-        mz = array_like_field(data, -1, position_fields[2])
-        mini = -1
-        if data[field].size > 0:
-            mini = np.argmin(data[field])
-            ma = data[field][mini]
-            mx, my, mz = [data[ax][mini] for ax in position_fields]
-        return (ma, mini, mx, my, mz)
-
-    def reduce_intermediate(self, values):
-        i = np.argmin(values[0]) # ma is values[0]
-        return [val[i] for val in values]
-
 class SpinParameter(DerivedQuantity):
     r"""
     Calculates the dimensionless spin parameter.

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -21,7 +21,9 @@
     get_output_filename, \
     ensure_list, \
     iterable
-from yt.units.yt_array import array_like_field
+from yt.units.yt_array import \
+    array_like_field, \
+    YTQuantity
 from yt.units.unit_object import Unit
 from yt.data_objects.data_containers import YTFieldData
 from yt.utilities.lib.misc_utilities import \
@@ -35,6 +37,22 @@
     NGPDeposit_2
 
 
+def _sanitize_min_max_units(amin, amax, finfo, registry):
+    # returns a copy of amin and amax, converted to finfo's output units
+    umin = getattr(amin, 'units', None)
+    umax = getattr(amax, 'units', None)
+    if umin is None:
+        umin = Unit(finfo.output_units, registry=registry)
+        rmin = YTQuantity(amin, umin)
+    else:
+        rmin = amin.in_units(finfo.output_units)
+    if umax is None:
+        umax = Unit(finfo.output_units, registry=registry)
+        rmax = YTQuantity(amax, umax)
+    else:
+        rmax = amax.in_units(finfo.output_units)
+    return rmin, rmax
+
 def preserve_source_parameters(func):
     def save_state(*args, **kwargs):
         # Temporarily replace the 'field_parameters' for a
@@ -67,6 +85,7 @@
         self.data_source = data_source
         self.ds = data_source.ds
         self.field_map = {}
+        self.field_info = {}
         self.field_data = YTFieldData()
         if weight_field is not None:
             self.variance = YTFieldData()
@@ -85,6 +104,8 @@
         
         """
         fields = self.data_source._determine_fields(fields)
+        for f in fields:
+            self.field_info[f] = self.data_source.ds.field_info[f]
         temp_storage = ProfileFieldAccumulator(len(fields), self.size)
         citer = self.data_source.chunks([], "io")
         for chunk in parallel_objects(citer):
@@ -372,9 +393,11 @@
     x_n : integer
         The number of bins along the x direction.
     x_min : float
-        The minimum value of the x profile field.
+        The minimum value of the x profile field. If supplied without units,
+        assumed to be in the output units for x_field.
     x_max : float
-        The maximum value of the x profile field.
+        The maximum value of the x profile field. If supplied without units,
+        assumed to be in the output units for x_field.
     x_log : boolean
         Controls whether or not the bins for the x field are evenly
         spaced in linear (False) or log (True) space.
@@ -385,8 +408,12 @@
     def __init__(self, data_source, x_field, x_n, x_min, x_max, x_log,
                  weight_field = None):
         super(Profile1D, self).__init__(data_source, weight_field)
-        self.x_field = x_field
+        self.x_field = data_source._determine_fields(x_field)[0]
+        self.field_info[self.x_field] = \
+            self.data_source.ds.field_info[self.x_field]
         self.x_log = x_log
+        x_min, x_max = _sanitize_min_max_units(
+            x_min, x_max, self.field_info[self.x_field], self.ds.unit_registry)
         self.x_bins = array_like_field(data_source,
                                        self._get_bins(x_min, x_max, x_n, x_log),
                                        self.x_field)
@@ -442,9 +469,11 @@
     x_n : integer
         The number of bins along the x direction.
     x_min : float
-        The minimum value of the x profile field.
+        The minimum value of the x profile field. If supplied without units,
+        assumed to be in the output units for x_field.
     x_max : float
-        The maximum value of the x profile field.
+        The maximum value of the x profile field. If supplied without units,
+        assumed to be in the output units for x_field.
     x_log : boolean
         Controls whether or not the bins for the x field are evenly
         spaced in linear (False) or log (True) space.
@@ -453,9 +482,11 @@
     y_n : integer
         The number of bins along the y direction.
     y_min : float
-        The minimum value of the y profile field.
+        The minimum value of the y profile field. If supplied without units,
+        assumed to be in the output units for y_field.
     y_max : float
-        The maximum value of the y profile field.
+        The maximum value of the y profile field. If supplied without units,
+        assumed to be in the output units for y_field.
     y_log : boolean
         Controls whether or not the bins for the y field are evenly
         spaced in linear (False) or log (True) space.
@@ -469,14 +500,22 @@
                  weight_field = None):
         super(Profile2D, self).__init__(data_source, weight_field)
         # X
-        self.x_field = x_field
+        self.x_field = data_source._determine_fields(x_field)[0]
         self.x_log = x_log
+        self.field_info[self.x_field] = \
+            self.data_source.ds.field_info[self.x_field]
+        x_min, x_max = _sanitize_min_max_units(
+            x_min, x_max, self.field_info[self.x_field], self.ds.unit_registry)
         self.x_bins = array_like_field(data_source,
                                        self._get_bins(x_min, x_max, x_n, x_log),
                                        self.x_field)
         # Y
-        self.y_field = y_field
+        self.y_field = data_source._determine_fields(y_field)[0]
         self.y_log = y_log
+        self.field_info[self.y_field] = \
+            self.data_source.ds.field_info[self.y_field]
+        y_min, y_max = _sanitize_min_max_units(
+            y_min, y_max, self.field_info[self.y_field], self.ds.unit_registry)
         self.y_bins = array_like_field(data_source,
                                        self._get_bins(y_min, y_max, y_n, y_log),
                                        self.y_field)
@@ -550,17 +589,21 @@
     x_n : integer
         The number of bins along the x direction.
     x_min : float
-        The minimum value of the x profile field.
+        The minimum value of the x profile field. If supplied without units,
+        assumed to be in the output units for x_field.
     x_max : float
-        The maximum value of the x profile field.
+        The maximum value of the x profile field. If supplied without units,
+        assumed to be in the output units for x_field.
     y_field : string field name
         The field to profile as a function of along the y axis
     y_n : integer
         The number of bins along the y direction.
     y_min : float
-        The minimum value of the y profile field.
+        The minimum value of the y profile field. If supplied without units,
+        assumed to be in the output units for y_field.
     y_max : float
-        The maximum value of the y profile field.
+        The maximum value of the y profile field. If supplied without units,
+        assumed to be in the output units for y_field.
     weight_field : string field name
         The field to use for weighting. Default is None.
     deposition : string, optional
@@ -661,9 +704,11 @@
     x_n : integer
         The number of bins along the x direction.
     x_min : float
-        The minimum value of the x profile field.
+        The minimum value of the x profile field. If supplied without units,
+        assumed to be in the output units for x_field.
     x_max : float
-        The maximum value of the x profile field.
+        The maximum value of the x profile field. If supplied without units,
+        assumed to be in the output units for x_field.
     x_log : boolean
         Controls whether or not the bins for the x field are evenly
         spaced in linear (False) or log (True) space.
@@ -672,9 +717,11 @@
     y_n : integer
         The number of bins along the y direction.
     y_min : float
-        The minimum value of the y profile field.
+        The minimum value of the y profile field. If supplied without units,
+        assumed to be in the output units for y_field.
     y_max : float
-        The maximum value of the y profile field.
+        The maximum value of the y profile field. If supplied without units,
+        assumed to be in the output units for y_field.
     y_log : boolean
         Controls whether or not the bins for the y field are evenly
         spaced in linear (False) or log (True) space.
@@ -683,9 +730,11 @@
     z_n : integer
         The number of bins along the z direction.
     z_min : float
-        The minimum value of the z profile field.
+        The minimum value of the z profile field. If supplied without units,
+        assumed to be in the output units for z_field.
     z_max : float
-        The maximum value of thee z profile field.
+        The maximum value of thee z profile field. If supplied without units,
+        assumed to be in the output units for z_field.
     z_log : boolean
         Controls whether or not the bins for the z field are evenly
         spaced in linear (False) or log (True) space.
@@ -700,20 +749,32 @@
                  weight_field = None):
         super(Profile3D, self).__init__(data_source, weight_field)
         # X
-        self.x_field = x_field
+        self.x_field = data_source._determine_fields(x_field)[0]
         self.x_log = x_log
+        self.field_info[self.x_field] = \
+            self.data_source.ds.field_info[self.x_field]
+        x_min, x_max = _sanitize_min_max_units(
+            x_min, x_max, self.field_info[self.x_field], self.ds.unit_registry)
         self.x_bins = array_like_field(data_source,
                                        self._get_bins(x_min, x_max, x_n, x_log),
                                        self.x_field)
         # Y
-        self.y_field = y_field
+        self.y_field = data_source._determine_fields(y_field)[0]
         self.y_log = y_log
+        self.field_info[self.y_field] = \
+            self.data_source.ds.field_info[self.y_field]
+        y_min, y_max = _sanitize_min_max_units(
+            y_min, y_max, self.field_info[self.y_field], self.ds.unit_registry)
         self.y_bins = array_like_field(data_source,
                                        self._get_bins(y_min, y_max, y_n, y_log),
                                        self.y_field)
         # Z
-        self.z_field = z_field
+        self.z_field = data_source._determine_fields(z_field)[0]
         self.z_log = z_log
+        self.field_info[self.z_field] = \
+            self.data_source.ds.field_info[self.z_field]
+        z_min, z_max = _sanitize_min_max_units(
+            z_min, z_max, self.field_info[self.z_field], self.ds.unit_registry)
         self.z_bins = array_like_field(data_source,
                                        self._get_bins(z_min, z_max, z_n, z_log),
                                        self.z_field)

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 yt/data_objects/region_expression.py
--- a/yt/data_objects/region_expression.py
+++ b/yt/data_objects/region_expression.py
@@ -12,8 +12,8 @@
 #-----------------------------------------------------------------------------
 
 import weakref
-import types
 
+from yt.extern.six import string_types
 from yt.utilities.exceptions import YTDimensionalityError
 
 class RegionExpression(object):
@@ -31,11 +31,11 @@
         # At first, we will only implement this as accepting a slice that is
         # (optionally) unitful corresponding to a specific set of coordinates
         # that result in a rectangular prism or a slice.
-        if isinstance(item, types.StringTypes):
+        if isinstance(item, string_types):
             # This is some field; we will instead pass this back to the
             # all_data object.
             return self.all_data[item]
-        if isinstance(item, tuple) and isinstance(item[1], types.StringTypes):
+        if isinstance(item, tuple) and isinstance(item[1], string_types):
             return self.all_data[item]
         if len(item) != self.ds.dimensionality:
             # Not the right specification, and we don't want to do anything

diff -r 7647e5c29854028d8936ee1d1f9b4669fd992cd0 -r 95e93d609ddd831c9f9d05961c421af91515d840 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -625,7 +625,7 @@
         """
         mylog.debug("Searching for maximum value of %s", field)
         source = self.all_data()
-        max_val, maxi, mx, my, mz = \
+        max_val, mx, my, mz = \
             source.quantities.max_location(field)
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f",
               max_val, mx, my, mz)
@@ -637,7 +637,7 @@
         """
         mylog.debug("Searching for minimum value of %s", field)
         source = self.all_data()
-        min_val, maxi, mx, my, mz = \
+        min_val, mx, my, mz = \
             source.quantities.min_location(field)
         mylog.info("Min Value is %0.5e at %0.16f %0.16f %0.16f",
               min_val, mx, my, mz)
@@ -1141,5 +1141,5 @@
     def _calculate_offsets(self, fields):
         pass
 
-    def __cmp__(self, other):
-        return cmp(self.filename, other.filename)
+    def __lt__(self, other):
+        return self.filename < other.filename

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/d883cddfa337/
Changeset:   d883cddfa337
Branch:      yt
User:        MatthewTurk
Date:        2016-01-24 19:59:21+00:00
Summary:     Merging from upstream
Affected #:  95 files

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 doc/source/_static/custom.css
--- a/doc/source/_static/custom.css
+++ b/doc/source/_static/custom.css
@@ -112,5 +112,5 @@
 
 
 .navbar-form.navbar-right:last-child {
-    margin-right: -20px;
+    margin-right: -60px;
 }

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 doc/source/help/index.rst
--- a/doc/source/help/index.rst
+++ b/doc/source/help/index.rst
@@ -114,7 +114,7 @@
 
 If you have not done so already (see :ref:`source-installation`), clone a copy of the yt mercurial repository and make it the 'active' installation by doing
 
-.. code-block::bash
+.. code-block:: bash
 
   python setup.py develop
 

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -1452,7 +1452,9 @@
 .. note::
    PyX must be installed, which can be accomplished either manually
    with ``pip install pyx`` or with the install script by setting
-   ``INST_PYX=1``.
+   ``INST_PYX=1``. If you are using python2, you must install pyx
+   version 0.12.1 with ``pip install pyx==0.12.1``, since that is 
+   the last version with python2 support.
 
 This module can take any of the plots mentioned above and create an
 EPS or PDF figure.  For example,
@@ -1499,3 +1501,31 @@
 margin, but it can be overridden by providing the keyword
 ``cb_location`` with a dict of either ``right, left, top, bottom``
 with the fields as the keys.
+
+You can also combine slices, projections, and phase plots. Here is
+an example that includes slices and phase plots:
+
+.. code-block:: python
+
+    from yt import SlicePlot, PhasePlot
+    from yt.visualization.eps_writer import multiplot_yt
+   
+    ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+    p1 = SlicePlot(ds, 0, 'density')
+    p1.set_width(10, 'kpc')
+   
+    p2 = SlicePlot(ds, 0, 'temperature')
+    p2.set_width(10, 'kpc')
+    p2.set_cmap('temperature', 'hot')
+
+    sph = ds.sphere(ds.domain_center, (10, 'kpc'))
+    p3 = PhasePlot(sph, 'radius', 'density', 'temperature',
+                   weight_field='cell_mass')
+
+    p4 = PhasePlot(sph, 'radius', 'density', 'pressure', 'cell_mass')
+
+    mp = multiplot_yt(2, 2, [p1, p2, p3, p4], savefig="yt", shrink_cb=0.9,
+                      bare_axes=False, yt_nocbar=False, margins=(0.5,0.5))
+
+    mp.save_fig('multi_slice_phase')

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 yt/analysis_modules/halo_finding/fof/EnzoFOF.c
--- a/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
+++ b/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
@@ -48,7 +48,7 @@
 
     xpos    = (PyArrayObject *) PyArray_FromAny(oxpos,
                     PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+                    NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL);
     if(!xpos){
     PyErr_Format(_FOFerror,
              "EnzoFOF: xpos didn't work.");
@@ -58,7 +58,7 @@
 
     ypos    = (PyArrayObject *) PyArray_FromAny(oypos,
                     PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+                    NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL);
     if((!ypos)||(PyArray_SIZE(ypos) != num_particles)) {
     PyErr_Format(_FOFerror,
              "EnzoFOF: xpos and ypos must be the same length.");
@@ -67,7 +67,7 @@
 
     zpos    = (PyArrayObject *) PyArray_FromAny(ozpos,
                     PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+                    NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL);
     if((!zpos)||(PyArray_SIZE(zpos) != num_particles)) {
     PyErr_Format(_FOFerror,
              "EnzoFOF: xpos and zpos must be the same length.");
@@ -140,7 +140,8 @@
 
 	kdFinishFoF(kd);
 
-    PyArray_UpdateFlags(particle_group_id, NPY_OWNDATA | particle_group_id->flags);
+    PyArray_UpdateFlags(particle_group_id,
+        NPY_ARRAY_OWNDATA | PyArray_FLAGS(particle_group_id));
     PyObject *return_value = Py_BuildValue("N", particle_group_id);
 
     Py_DECREF(xpos);

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 yt/analysis_modules/halo_finding/hop/EnzoHop.c
--- a/yt/analysis_modules/halo_finding/hop/EnzoHop.c
+++ b/yt/analysis_modules/halo_finding/hop/EnzoHop.c
@@ -42,7 +42,7 @@
 
     *xpos    = (PyArrayObject *) PyArray_FromAny(oxpos,
                     PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+                    NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL);
     if(!*xpos){
     PyErr_Format(_HOPerror,
              "EnzoHop: xpos didn't work.");
@@ -52,7 +52,7 @@
 
     *ypos    = (PyArrayObject *) PyArray_FromAny(oypos,
                     PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+                    NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL);
     if((!*ypos)||(PyArray_SIZE(*ypos) != num_particles)) {
     PyErr_Format(_HOPerror,
              "EnzoHop: xpos and ypos must be the same length.");
@@ -61,7 +61,7 @@
 
     *zpos    = (PyArrayObject *) PyArray_FromAny(ozpos,
                     PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+                    NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL);
     if((!*zpos)||(PyArray_SIZE(*zpos) != num_particles)) {
     PyErr_Format(_HOPerror,
              "EnzoHop: xpos and zpos must be the same length.");
@@ -70,7 +70,7 @@
 
     *mass    = (PyArrayObject *) PyArray_FromAny(omass,
                     PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
+                    NPY_ARRAY_INOUT_ARRAY | NPY_ARRAY_UPDATEIFCOPY, NULL);
     if((!*mass)||(PyArray_SIZE(*mass) != num_particles)) {
     PyErr_Format(_HOPerror,
              "EnzoHop: xpos and mass must be the same length.");
@@ -129,11 +129,11 @@
                     PyArray_DescrFromType(NPY_FLOAT64));
 
     fprintf(stdout, "Copying arrays for %d particles\n", num_particles);
-    kd->np_masses = (npy_float64*) mass->data;
-    kd->np_pos[0] = (npy_float64*) xpos->data;
-    kd->np_pos[1] = (npy_float64*) ypos->data;
-    kd->np_pos[2] = (npy_float64*) zpos->data;
-    kd->np_densities = (npy_float64*) particle_density->data;
+    kd->np_masses = (npy_float64*) PyArray_DATA(mass);
+    kd->np_pos[0] = (npy_float64*) PyArray_DATA(xpos);
+    kd->np_pos[1] = (npy_float64*) PyArray_DATA(ypos);
+    kd->np_pos[2] = (npy_float64*) PyArray_DATA(zpos);
+    kd->np_densities = (npy_float64*) PyArray_DATA(particle_density);
     kd->totalmass = totalmass;
 	for (i = 0; i < num_particles; i++) kd->p[i].np_index = i;
 
@@ -173,8 +173,8 @@
     free(my_comm.gl);
     free_slice(my_comm.s);
 
-    PyArray_UpdateFlags(particle_density, NPY_OWNDATA | particle_density->flags);
-    PyArray_UpdateFlags(particle_group_id, NPY_OWNDATA | particle_group_id->flags);
+    PyArray_UpdateFlags(particle_density, NPY_ARRAY_OWNDATA | PyArray_FLAGS(particle_density));
+    PyArray_UpdateFlags(particle_group_id, NPY_ARRAY_OWNDATA | PyArray_FLAGS(particle_group_id));
     PyObject *return_value = Py_BuildValue("NN", particle_density, particle_group_id);
 
     Py_DECREF(xpos);
@@ -266,11 +266,11 @@
     totalmass /= normalize_to;
 
 
-    self->kd->np_masses = (npy_float64 *)self->mass->data;
-    self->kd->np_pos[0] = (npy_float64 *)self->xpos->data;
-    self->kd->np_pos[1] = (npy_float64 *)self->ypos->data;
-    self->kd->np_pos[2] = (npy_float64 *)self->zpos->data;
-    self->kd->np_densities = (npy_float64 *)self->densities->data;
+    self->kd->np_masses = (npy_float64 *)PyArray_DATA(self->mass);
+    self->kd->np_pos[0] = (npy_float64 *)PyArray_DATA(self->xpos);
+    self->kd->np_pos[1] = (npy_float64 *)PyArray_DATA(self->ypos);
+    self->kd->np_pos[2] = (npy_float64 *)PyArray_DATA(self->zpos);
+    self->kd->np_densities = (npy_float64 *)PyArray_DATA(self->densities);
     self->kd->totalmass = totalmass;
 
     PrepareKD(self->kd);

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -18,7 +18,7 @@
 from collections import defaultdict
 
 from yt.funcs import mylog, get_pbar
-from yt.utilities.lib.ContourFinding import \
+from yt.utilities.lib.contour_finding import \
     ContourTree, TileContourTree, link_node_contours, \
     update_joins
 from yt.utilities.lib.grid_traversal import \

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -13,7 +13,7 @@
 
 from yt.data_objects.data_containers import YTFieldData
 from yt.data_objects.time_series import DatasetSeries
-from yt.utilities.lib.CICDeposit import CICSample_3
+from yt.utilities.lib.particle_mesh_operations import CICSample_3
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
 from yt.funcs import mylog, get_pbar

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -98,7 +98,8 @@
             key of the unit: (width, 'unit').  If set to a float, code units
             are assumed. Only for off-axis cubes.
         depth_res : integer, optional
-            The resolution of integration along the line of sight for off-axis cubes. Default: 256
+            Deprecated, this is still in the function signature for API
+            compatibility
         method : string, optional
             Set the projection method to be used.
             "integrate" : line of sight integration over the line element.
@@ -189,7 +190,7 @@
                 buf = prj.to_frb(width, self.nx, center=self.center)["intensity"]
             else:
                 buf, sc = off_axis_projection(ds, self.center, normal, width,
-                                          (self.nx, self.ny, depth_res), "intensity",
+                                          (self.nx, self.ny), "intensity",
                                           north_vector=north_vector, no_ghost=no_ghost,
                                           method=method, weight=weight_field)
             sto.result_id = i

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -171,7 +171,7 @@
             domains[(dle,dre)] = [halo,]
     #for niceness, let's process the domains in order of 
     #the one with the most halos
-    domains_list = [(len(v),k,v) for k,v in domains.iteritems()]
+    domains_list = [(len(v),k,v) for k,v in domains.items()]
     domains_list.sort() 
     domains_list.reverse() #we want the most populated domains first
     return domains_list

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -19,7 +19,7 @@
 #-----------------------------------------------------------------------------
 
 from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
-from yt.funcs import fix_axis, mylog, get_pbar
+from yt.funcs import fix_axis, get_pbar
 from yt.visualization.volume_rendering.off_axis_projection import \
     off_axis_projection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -236,7 +236,8 @@
         nx : integer, optional
             The dimensions on a side of the projection image.
         nz : integer, optional
-            The number of elements along the integration path length.
+            Deprecated, this is still in the function signature for API
+            compatibility
         north_vector : a sequence of floats
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
@@ -250,8 +251,8 @@
             less notable when the transfer function is smooth and
             broad. Default: True
         source : yt.data_objects.data_containers.YTSelectionContainer, optional
-            If specified, this will be the data source used for selecting regions to project.
-            Currently unsupported in yt 2.x.
+            If specified, this will be the data source used for selecting regions 
+            to project.
 
         Examples
         --------
@@ -261,32 +262,31 @@
         wd = self.ds.coordinates.sanitize_width(L, width, depth)
         w = tuple(el.in_units('code_length').v for el in wd)
         ctr, dctr = self.ds.coordinates.sanitize_center(center, L)
-        res = (nx, nx, nz)
+        res = (nx, nx)
 
-        if source is not None:
-            mylog.error("Source argument is not currently supported for off-axis S-Z projections.")
-            raise NotImplementedError
+        if source is None:
+            source = self.ds
 
         beta_par = generate_beta_par(L)
         self.ds.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
         setup_sunyaev_zeldovich_fields(self.ds)
 
-        dens = off_axis_projection(self.ds, ctr, L, w, res, "density",
+        dens = off_axis_projection(source, ctr, L, w, res, "density",
                                    north_vector=north_vector, no_ghost=no_ghost)
-        Te = off_axis_projection(self.ds, ctr, L, w, res, "t_sz",
+        Te = off_axis_projection(source, ctr, L, w, res, "t_sz",
                                  north_vector=north_vector, no_ghost=no_ghost)/dens
-        bpar = off_axis_projection(self.ds, ctr, L, w, res, "beta_par",
+        bpar = off_axis_projection(source, ctr, L, w, res, "beta_par",
                                    north_vector=north_vector, no_ghost=no_ghost)/dens
-        omega1 = off_axis_projection(self.ds, ctr, L, w, res, "t_squared",
+        omega1 = off_axis_projection(source, ctr, L, w, res, "t_squared",
                                      north_vector=north_vector, no_ghost=no_ghost)/dens
         omega1 = omega1/(Te*Te) - 1.
         if self.high_order:
-            bperp2 = off_axis_projection(self.ds, ctr, L, w, res, "beta_perp_squared", 
+            bperp2 = off_axis_projection(source, ctr, L, w, res, "beta_perp_squared", 
                                          north_vector=north_vector, no_ghost=no_ghost)/dens
-            sigma1 = off_axis_projection(self.ds, ctr, L, w, res, "t_beta_par", 
+            sigma1 = off_axis_projection(source, ctr, L, w, res, "t_beta_par", 
                                          north_vector=north_vector, no_ghost=no_ghost)/dens
             sigma1 = sigma1/Te - bpar
-            kappa1 = off_axis_projection(self.ds, ctr, L, w, res, "beta_par_squared", 
+            kappa1 = off_axis_projection(source, ctr, L, w, res, "beta_par_squared", 
                                          north_vector=north_vector, no_ghost=no_ghost)/dens
             kappa1 -= bpar
         else:

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -19,7 +19,7 @@
 import fileinput
 import io
 from re import finditer
-from tempfile import TemporaryFile
+from tempfile import NamedTemporaryFile, TemporaryFile
 import os
 import sys
 import zipfile
@@ -40,9 +40,9 @@
     YTParticleDepositionNotImplemented, \
     YTNoAPIKey, \
     YTTooManyVertices
-from yt.utilities.lib.QuadTree import \
+from yt.utilities.lib.quad_tree import \
     QuadTree
-from yt.utilities.lib.Interpolators import \
+from yt.utilities.lib.interpolators import \
     ghost_zone_interpolate
 from yt.utilities.lib.misc_utilities import \
     fill_region, fill_region_float
@@ -58,7 +58,7 @@
 from yt.fields.field_exceptions import \
     NeedsOriginalGrid
 from yt.frontends.stream.api import load_uniform_grid
-
+import yt.extern.six as six
 
 class YTStreamline(YTSelectionContainer1D):
     """
@@ -810,7 +810,7 @@
                               int(any(self.ds.periodicity)))
         fi = self.ds._get_field_info(field)
         self[field] = self.ds.arr(dest, fi.units)
-        
+
 
 class LevelState(object):
     current_dx = None
@@ -1628,7 +1628,7 @@
     @parallel_root_only
     def _export_ply(self, filename, bounds = None, color_field = None,
                    color_map = "algae", color_log = True, sample_type = "face"):
-        if isinstance(filename, io.IOBase):
+        if hasattr(filename, 'read'):
             f = filename
         else:
             f = open(filename, "wb")
@@ -1641,27 +1641,29 @@
               ("red", "uint8"), ("green", "uint8"), ("blue", "uint8") ]
         fs = [("ni", "uint8"), ("v1", "<i4"), ("v2", "<i4"), ("v3", "<i4"),
               ("red", "uint8"), ("green", "uint8"), ("blue", "uint8") ]
-        f.write("ply\n")
-        f.write("format binary_little_endian 1.0\n")
-        f.write("element vertex %s\n" % (nv))
-        f.write("property float x\n")
-        f.write("property float y\n")
-        f.write("property float z\n")
+        f.write(b"ply\n")
+        f.write(b"format binary_little_endian 1.0\n")
+        line = "element vertex %i\n" % (nv)
+        f.write(six.b(line))
+        f.write(b"property float x\n")
+        f.write(b"property float y\n")
+        f.write(b"property float z\n")
         if color_field is not None and sample_type == "vertex":
-            f.write("property uchar red\n")
-            f.write("property uchar green\n")
-            f.write("property uchar blue\n")
+            f.write(b"property uchar red\n")
+            f.write(b"property uchar green\n")
+            f.write(b"property uchar blue\n")
             v = np.empty(self.vertices.shape[1], dtype=vs)
             cs = self.vertex_samples[color_field]
             self._color_samples(cs, color_log, color_map, v)
         else:
             v = np.empty(self.vertices.shape[1], dtype=vs[:3])
-        f.write("element face %s\n" % (nv/3))
-        f.write("property list uchar int vertex_indices\n")
+        line = "element face %i\n" % (nv / 3)
+        f.write(six.b(line))
+        f.write(b"property list uchar int vertex_indices\n")
         if color_field is not None and sample_type == "face":
-            f.write("property uchar red\n")
-            f.write("property uchar green\n")
-            f.write("property uchar blue\n")
+            f.write(b"property uchar red\n")
+            f.write(b"property uchar green\n")
+            f.write(b"property uchar blue\n")
             # Now we get our samples
             cs = self[color_field]
             arr = np.empty(cs.shape[0], dtype=np.dtype(fs))
@@ -1676,7 +1678,7 @@
             np.divide(tmp, w, tmp)
             np.subtract(tmp, 0.5, tmp) # Center at origin.
             v[ax][:] = tmp
-        f.write("end_header\n")
+        f.write(b"end_header\n")
         v.tofile(f)
         arr["ni"][:] = 3
         vi = np.arange(nv, dtype="<i")
@@ -1765,39 +1767,49 @@
             open(fn, "wb").write(ply_file.read())
             raise YTTooManyVertices(self.vertices.shape[1], fn)
 
-        zfs = TemporaryFile()
+        zfs = NamedTemporaryFile(suffix='.zip')
         with zipfile.ZipFile(zfs, "w", zipfile.ZIP_DEFLATED) as zf:
             zf.writestr("yt_export.ply", ply_file.read())
         zfs.seek(0)
 
         zfs.seek(0)
         data = {
-            'title': title,
             'token': api_key,
+            'name': title,
             'description': description,
-            'fileModel': zfs,
-            'filenameModel': "yt_export.zip",
+            'tags': "yt",
         }
-        upload_id = self._upload_to_sketchfab(data)
+        files = {
+            'modelFile': zfs
+        }
+        upload_id = self._upload_to_sketchfab(data, files)
         upload_id = self.comm.mpi_bcast(upload_id, root = 0)
         return upload_id
 
     @parallel_root_only
-    def _upload_to_sketchfab(self, data):
-        import json
-        from yt.extern.six.moves import urllib
-        from yt.utilities.poster.encode import multipart_encode
-        from yt.utilities.poster.streaminghttp import register_openers
-        register_openers()
-        datamulti, headers = multipart_encode(data)
-        request = urllib.request.Request("https://api.sketchfab.com/v1/models",
-                        datamulti, headers)
-        rv = urllib.request.urlopen(request).read()
-        rv = json.loads(rv)
-        upload_id = rv.get("result", {}).get("id", None)
-        if upload_id:
-            mylog.info("Model uploaded to: https://sketchfab.com/show/%s",
-                       upload_id)
+    def _upload_to_sketchfab(self, data, files):
+        import requests
+        SKETCHFAB_DOMAIN = 'sketchfab.com'
+        SKETCHFAB_API_URL = 'https://api.{}/v2/models'.format(SKETCHFAB_DOMAIN)
+        SKETCHFAB_MODEL_URL = 'https://{}/models/'.format(SKETCHFAB_DOMAIN)
+
+        try:
+            r = requests.post(SKETCHFAB_API_URL, data=data, files=files, verify=False)
+        except requests.exceptions.RequestException as e:
+            mylog.error("An error occured: {}".format(e))
+            return
+
+        result = r.json()
+
+        if r.status_code != requests.codes.created:
+            mylog.error("Upload to SketchFab failed with error: {}".format(result))
+            return
+
+        model_uid = result['uid']
+        model_url = SKETCHFAB_MODEL_URL + model_uid
+        if model_uid:
+            mylog.info("Model uploaded to: {}".format(model_url))
         else:
             mylog.error("Problem uploading.")
-        return upload_id
+
+        return model_uid

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -24,7 +24,7 @@
 from yt.utilities.exceptions import \
     YTFieldTypeNotFound, \
     YTParticleDepositionNotImplemented
-from yt.utilities.lib.Interpolators import \
+from yt.utilities.lib.interpolators import \
     ghost_zone_interpolate
 
 class AMRGridPatch(YTSelectionContainer):

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -32,7 +32,7 @@
     new_bin_profile3d
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_objects
-from yt.utilities.lib.CICDeposit import \
+from yt.utilities.lib.particle_mesh_operations import \
     CICDeposit_2, \
     NGPDeposit_2
 

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -293,6 +293,22 @@
     def _is_valid(cls, *args, **kwargs):
         return False
 
+    @classmethod
+    def _guess_candidates(cls, base, directories, files):
+        """
+        This is a class method that accepts a directory (base), a list of files
+        in that directory, and a list of subdirectories.  It should return a
+        list of filenames (defined relative to the supplied directory) and a
+        boolean as to whether or not further directories should be recursed.
+        
+        This function doesn't need to catch all possibilities, nor does it need
+        to filter possibilities.
+        """
+        return [], True
+
+    def close(self):
+        pass
+
     def __getitem__(self, key):
         """ Returns units, parameters, or conversion_factors in that order. """
         return self.parameters[key]

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 yt/extern/progressbar.py
--- a/yt/extern/progressbar.py
+++ /dev/null
@@ -1,370 +0,0 @@
-#!/usr/bin/python
-# -*- coding: iso-8859-1 -*-
-#
-# progressbar  - Text progressbar library for python.
-# Copyright (c) 2005 Nilton Volpato
-# 
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-# 
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# Lesser General Public License for more details.
-# 
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
-
-
-"""Text progressbar library for python.
-
-This library provides a text mode progressbar. This is tipically used
-to display the progress of a long running operation, providing a
-visual clue that processing is underway.
-
-The ProgressBar class manages the progress, and the format of the line
-is given by a number of widgets. A widget is an object that may
-display diferently depending on the state of the progress. There are
-three types of widget:
-- a string, which always shows itself;
-- a ProgressBarWidget, which may return a diferent value every time
-it's update method is called; and
-- a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it
-expands to fill the remaining width of the line.
-
-The progressbar module is very easy to use, yet very powerful. And
-automatically supports features like auto-resizing when available.
-"""
-from __future__ import print_function
-
-__author__ = "Nilton Volpato"
-__author_email__ = "first-name dot last-name @ gmail.com"
-__date__ = "2006-05-07"
-__version__ = "2.2"
-
-# Changelog
-#
-# 2006-05-07: v2.2 fixed bug in windows
-# 2005-12-04: v2.1 autodetect terminal width, added start method
-# 2005-12-04: v2.0 everything is now a widget (wow!)
-# 2005-12-03: v1.0 rewrite using widgets
-# 2005-06-02: v0.5 rewrite
-# 2004-??-??: v0.1 first version
-
-
-from yt.extern.six import string_types
-import sys, time
-from array import array
-try:
-    from fcntl import ioctl
-    import termios
-except ImportError:
-    pass
-import signal
-
-class ProgressBarWidget(object):
-    """This is an element of ProgressBar formatting.
-
-    The ProgressBar object will call it's update value when an update
-    is needed. It's size may change between call, but the results will
-    not be good if the size changes drastically and repeatedly.
-    """
-    def update(self, pbar):
-        """Returns the string representing the widget.
-
-        The parameter pbar is a reference to the calling ProgressBar,
-        where one can access attributes of the class for knowing how
-        the update must be made.
-
-        At least this function must be overriden."""
-        pass
-
-class ProgressBarWidgetHFill(object):
-    """This is a variable width element of ProgressBar formatting.
-
-    The ProgressBar object will call it's update value, informing the
-    width this object must the made. This is like TeX \\hfill, it will
-    expand to fill the line. You can use more than one in the same
-    line, and they will all have the same width, and together will
-    fill the line.
-    """
-    def update(self, pbar, width):
-        """Returns the string representing the widget.
-
-        The parameter pbar is a reference to the calling ProgressBar,
-        where one can access attributes of the class for knowing how
-        the update must be made. The parameter width is the total
-        horizontal width the widget must have.
-
-        At least this function must be overriden."""
-        pass
-
-
-class ETA(ProgressBarWidget):
-    "Widget for the Estimated Time of Arrival"
-    def format_time(self, seconds):
-        return time.strftime('%H:%M:%S', time.gmtime(seconds))
-    def update(self, pbar):
-        if pbar.currval == 0:
-            return 'ETA:  --:--:--'
-        elif pbar.finished:
-            return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
-        else:
-            elapsed = pbar.seconds_elapsed
-            eta = elapsed * pbar.maxval / pbar.currval - elapsed
-            return 'ETA:  %s' % self.format_time(eta)
-
-class FileTransferSpeed(ProgressBarWidget):
-    "Widget for showing the transfer speed (useful for file transfers)."
-    def __init__(self):
-        self.fmt = '%6.2f %s'
-        self.units = ['B','K','M','G','T','P']
-    def update(self, pbar):
-        if pbar.seconds_elapsed < 2e-6:#== 0:
-            bps = 0.0
-        else:
-            bps = float(pbar.currval) / pbar.seconds_elapsed
-        spd = bps
-        for u in self.units:
-            if spd < 1000:
-                break
-            spd /= 1000
-        return self.fmt % (spd, u+'/s')
-
-class RotatingMarker(ProgressBarWidget):
-    "A rotating marker for filling the bar of progress."
-    def __init__(self, markers='|/-\\'):
-        self.markers = markers
-        self.curmark = -1
-    def update(self, pbar):
-        if pbar.finished:
-            return self.markers[0]
-        self.curmark = (self.curmark + 1)%len(self.markers)
-        return self.markers[self.curmark]
-
-class Percentage(ProgressBarWidget):
-    "Just the percentage done."
-    def update(self, pbar):
-        return '%3d%%' % pbar.percentage()
-
-class Bar(ProgressBarWidgetHFill):
-    "The bar of progress. It will strech to fill the line."
-    def __init__(self, marker='#', left='|', right='|'):
-        self.marker = marker
-        self.left = left
-        self.right = right
-    def _format_marker(self, pbar):
-        if isinstance(self.marker, string_types):
-            return self.marker
-        else:
-            return self.marker.update(pbar)
-    def update(self, pbar, width):
-        percent = pbar.percentage()
-        cwidth = int(width - len(self.left) - len(self.right))
-        marked_width = int(percent * cwidth / 100)
-        m = self._format_marker(pbar)
-        bar = (self.left + (m*marked_width).ljust(cwidth) + self.right)
-        return bar
-
-class ReverseBar(Bar):
-    "The reverse bar of progress, or bar of regress. :)"
-    def update(self, pbar, width):
-        percent = pbar.percentage()
-        cwidth = width - len(self.left) - len(self.right)
-        marked_width = int(percent * cwidth / 100)
-        m = self._format_marker(pbar)
-        bar = (self.left + (m*marked_width).rjust(cwidth) + self.right)
-        return bar
-
-default_widgets = [Percentage(), ' ', Bar()]
-class ProgressBar(object):
-    """This is the ProgressBar class, it updates and prints the bar.
-
-    The term_width parameter may be an integer. Or None, in which case
-    it will try to guess it, if it fails it will default to 80 columns.
-
-    The simple use is like this:
-    >>> pbar = ProgressBar().start()
-    >>> for i in xrange(100):
-    ...    # do something
-    ...    pbar.update(i+1)
-    ...
-    >>> pbar.finish()
-
-    But anything you want to do is possible (well, almost anything).
-    You can supply different widgets of any type in any order. And you
-    can even write your own widgets! There are many widgets already
-    shipped and you should experiment with them.
-
-    When implementing a widget update method you may access any
-    attribute or function of the ProgressBar object calling the
-    widget's update method. The most important attributes you would
-    like to access are:
-    - currval: current value of the progress, 0 <= currval <= maxval
-    - maxval: maximum (and final) value of the progress
-    - finished: True if the bar is have finished (reached 100%), False o/w
-    - start_time: first time update() method of ProgressBar was called
-    - seconds_elapsed: seconds elapsed since start_time
-    - percentage(): percentage of the progress (this is a method)
-    """
-    def __init__(self, maxval=100, widgets=default_widgets, term_width=None,
-                 fd=sys.stderr):
-        assert maxval > 0
-        self.maxval = maxval
-        self.widgets = widgets
-        self.fd = fd
-        self.signal_set = False
-        if term_width is None:
-            try:
-                self.handle_resize(None,None)
-                signal.signal(signal.SIGWINCH, self.handle_resize)
-                self.signal_set = True
-            except:
-                self.term_width = 79
-        else:
-            self.term_width = term_width
-
-        self.currval = 0
-        self.finished = False
-        self.prev_percentage = -1
-        self.start_time = None
-        self.seconds_elapsed = 0
-
-    def handle_resize(self, signum, frame):
-        h,w=array('h', ioctl(self.fd,termios.TIOCGWINSZ,'\0'*8))[:2]
-        self.term_width = w
-
-    def percentage(self):
-        "Returns the percentage of the progress."
-        return self.currval*100.0 / self.maxval
-
-    def _format_widgets(self):
-        r = []
-        hfill_inds = []
-        num_hfill = 0
-        currwidth = 0
-        for i, w in enumerate(self.widgets):
-            if isinstance(w, ProgressBarWidgetHFill):
-                r.append(w)
-                hfill_inds.append(i)
-                num_hfill += 1
-            elif isinstance(w, string_types):
-                r.append(w)
-                currwidth += len(w)
-            else:
-                weval = w.update(self)
-                currwidth += len(weval)
-                r.append(weval)
-        for iw in hfill_inds:
-            r[iw] = r[iw].update(self, (self.term_width-currwidth)/num_hfill)
-        return r
-
-    def _format_line(self):
-        return ''.join(self._format_widgets()).ljust(self.term_width)
-
-    def _need_update(self):
-        return int(self.percentage()) != int(self.prev_percentage)
-
-    def update(self, value):
-        "Updates the progress bar to a new value."
-        assert 0 <= value <= self.maxval
-        self.currval = value
-        if not self._need_update() or self.finished:
-            return
-        if not self.start_time:
-            self.start_time = time.time()
-        self.seconds_elapsed = time.time() - self.start_time
-        self.prev_percentage = self.percentage()
-        if value != self.maxval:
-            self.fd.write(self._format_line() + '\r')
-        else:
-            self.finished = True
-            self.fd.write(self._format_line() + '\n')
-
-    def start(self):
-        """Start measuring time, and prints the bar at 0%.
-
-        It returns self so you can use it like this:
-        >>> pbar = ProgressBar().start()
-        >>> for i in xrange(100):
-        ...    # do something
-        ...    pbar.update(i+1)
-        ...
-        >>> pbar.finish()
-        """
-        self.update(0)
-        return self
-
-    def finish(self):
-        """Used to tell the progress is finished."""
-        self.update(self.maxval)
-        if self.signal_set:
-            signal.signal(signal.SIGWINCH, signal.SIG_DFL)
-        
-
-
-
-
-
-if __name__=='__main__':
-    import os
-
-    def example1():
-        widgets = ['Test: ', Percentage(), ' ', Bar(marker=RotatingMarker()),
-                   ' ', ETA(), ' ', FileTransferSpeed()]
-        pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
-        for i in range(1000000):
-            # do something
-            pbar.update(10*i+1)
-        pbar.finish()
-        print()
-
-    def example2():
-        class CrazyFileTransferSpeed(FileTransferSpeed):
-            "It's bigger between 45 and 80 percent"
-            def update(self, pbar):
-                if 45 < pbar.percentage() < 80:
-                    return 'Bigger Now ' + FileTransferSpeed.update(self,pbar)
-                else:
-                    return FileTransferSpeed.update(self,pbar)
-
-        widgets = [CrazyFileTransferSpeed(),' <<<', Bar(), '>>> ', Percentage(),' ', ETA()]
-        pbar = ProgressBar(widgets=widgets, maxval=10000000)
-        # maybe do something
-        pbar.start()
-        for i in range(2000000):
-            # do something
-            pbar.update(5*i+1)
-        pbar.finish()
-        print()
-
-    def example3():
-        widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')]
-        pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
-        for i in range(1000000):
-            # do something
-            pbar.update(10*i+1)
-        pbar.finish()
-        print()
-
-    def example4():
-        widgets = ['Test: ', Percentage(), ' ',
-                   Bar(marker='0',left='[',right=']'),
-                   ' ', ETA(), ' ', FileTransferSpeed()]
-        pbar = ProgressBar(widgets=widgets, maxval=500)
-        pbar.start()
-        for i in range(100,500+1,50):
-            time.sleep(0.2)
-            pbar.update(i)
-        pbar.finish()
-        print()
-
-
-    example1()
-    example2()
-    example3()
-    example4()
-

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 yt/extern/setup.py
--- a/yt/extern/setup.py
+++ b/yt/extern/setup.py
@@ -11,5 +11,6 @@
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('extern', parent_package, top_path)
+    config.add_subpackage("tqdm")
     config.make_config_py()
     return config

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 yt/extern/tqdm/LICENSE
--- /dev/null
+++ b/yt/extern/tqdm/LICENSE
@@ -0,0 +1,22 @@
+https://github.com/tqdm/tqdm
+
+The MIT License (MIT)
+
+Copyright (c) 2013 noamraph
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 yt/extern/tqdm/__init__.py
--- /dev/null
+++ b/yt/extern/tqdm/__init__.py
@@ -0,0 +1,11 @@
+from ._tqdm import tqdm
+from ._tqdm import trange
+from ._tqdm import format_interval
+from ._tqdm import format_meter
+from ._tqdm_gui import tqdm_gui
+from ._tqdm_gui import tgrange
+from ._tqdm_pandas import tqdm_pandas
+from ._version import __version__  # NOQA
+
+__all__ = ['tqdm', 'tqdm_gui', 'trange', 'tgrange', 'format_interval',
+           'format_meter', 'tqdm_pandas', '__version__']

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 yt/extern/tqdm/_tqdm.py
--- /dev/null
+++ b/yt/extern/tqdm/_tqdm.py
@@ -0,0 +1,562 @@
+"""
+Customisable progressbar decorator for iterators.
+Includes a default (x)range iterator printing to stderr.
+
+Usage:
+  >>> from tqdm import trange[, tqdm]
+  >>> for i in trange(10): #same as: for i in tqdm(xrange(10))
+  ...     ...
+"""
+# future division is important to divide integers and get as
+# a result precise floating numbers (instead of truncated int)
+from __future__ import division, absolute_import
+# import compatibility functions and utilities
+from ._utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
+    _term_move_up
+import sys
+from time import time
+
+
+__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
+                              "casperdcl", "lrq3000"]}
+__all__ = ['tqdm', 'trange', 'format_interval', 'format_meter']
+
+
+def format_sizeof(num, suffix=''):
+    """
+    Formats a number (greater than unity) with SI Order of Magnitude prefixes.
+
+    Parameters
+    ----------
+    num  : float
+        Number ( >= 1) to format.
+    suffix  : str, optional
+        Post-postfix [default: ''].
+
+    Returns
+    -------
+    out  : str
+        Number with Order of Magnitude SI unit postfix.
+    """
+    for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
+        if abs(num) < 999.95:
+            if abs(num) < 99.95:
+                if abs(num) < 9.995:
+                    return '{0:1.2f}'.format(num) + unit + suffix
+                return '{0:2.1f}'.format(num) + unit + suffix
+            return '{0:3.0f}'.format(num) + unit + suffix
+        num /= 1000.0
+    return '{0:3.1f}Y'.format(num) + suffix
+
+
+def format_interval(t):
+    """
+    Formats a number of seconds as a clock time, [H:]MM:SS
+
+    Parameters
+    ----------
+    t  : int
+        Number of seconds.
+    Returns
+    -------
+    out  : str
+        [H:]MM:SS
+    """
+    mins, s = divmod(int(t), 60)
+    h, m = divmod(mins, 60)
+    if h:
+        return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
+    else:
+        return '{0:02d}:{1:02d}'.format(m, s)
+
+
+def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
+                 unit='it', unit_scale=False, rate=None):
+    """
+    Return a string-based progress bar given some parameters
+
+    Parameters
+    ----------
+    n  : int
+        Number of finished iterations.
+    total  : int
+        The expected total number of iterations. If meaningless (), only
+        basic progress statistics are displayed (no ETA).
+    elapsed  : float
+        Number of seconds passed since start.
+    ncols  : int, optional
+        The width of the entire output message. If specified, dynamically
+        resizes the progress meter to stay within this bound
+        [default: None]. The fallback meter width is 10 for the progress bar
+        + no limit for the iterations counter and statistics. If 0, will not
+        print any meter (only stats).
+    prefix  : str, optional
+        Prefix message (included in total width) [default: ''].
+    ascii  : bool, optional
+        If not set, use unicode (smooth blocks) to fill the meter
+        [default: False]. The fallback is to use ASCII characters (1-9 #).
+    unit  : str, optional
+        The iteration unit [default: 'it'].
+    unit_scale  : bool, optional
+        If set, the number of iterations will printed with an appropriate
+        SI metric prefix (K = 10^3, M = 10^6, etc.) [default: False].
+    rate  : float, optional
+        Manual override for iteration rate.
+        If [default: None], uses n/elapsed.
+
+    Returns
+    -------
+    out  : Formatted meter and stats, ready to display.
+    """
+
+    # sanity check: total
+    if total and n > total:
+        total = None
+
+    elapsed_str = format_interval(elapsed)
+
+    # if unspecified, attempt to use rate = average speed
+    # (we allow manual override since predicting time is an arcane art)
+    if rate is None and elapsed:
+        rate = n / elapsed
+    rate_fmt = ((format_sizeof(rate) if unit_scale else
+                 '{0:5.2f}'.format(rate)) if elapsed else
+                '?') \
+        + unit + '/s'
+
+    if unit_scale:
+        n_fmt = format_sizeof(n)
+        total_fmt = format_sizeof(total) if total else None
+    else:
+        n_fmt = str(n)
+        total_fmt = str(total)
+
+    # total is known: we can predict some stats
+    if total:
+        # fractional and percentage progress
+        frac = n / total
+        percentage = frac * 100
+
+        remaining_str = format_interval((total - n) / rate) if rate else '?'
+
+        # format the stats displayed to the left and right sides of the bar
+        l_bar = (prefix if prefix else '') + '{0:3.0f}%|'.format(percentage)
+        r_bar = '| {0}/{1} [{2}<{3}, {4}]'.format(
+                n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt)
+
+        if ncols == 0:
+            return l_bar[:-1] + r_bar[1:]
+
+        # space available for bar's display
+        N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \
+            else 10
+
+        # format bar depending on availability of unicode/ascii chars
+        if ascii:
+            bar_length, frac_bar_length = divmod(
+                int(frac * N_BARS * 10), 10)
+
+            bar = '#' * bar_length
+            frac_bar = chr(48 + frac_bar_length) if frac_bar_length \
+                else ' '
+
+        else:
+            bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8)
+
+            bar = _unich(0x2588) * bar_length
+            frac_bar = _unich(0x2590 - frac_bar_length) \
+                if frac_bar_length else ' '
+
+        # whitespace padding
+        if bar_length < N_BARS:
+            full_bar = bar + frac_bar + \
+                ' ' * max(N_BARS - bar_length - 1, 0)
+        else:
+            full_bar = bar + \
+                ' ' * max(N_BARS - bar_length, 0)
+
+        return l_bar + full_bar + r_bar
+
+    # no total: no progressbar, ETA, just progress stats
+    else:
+        return (prefix if prefix else '') + '{0}{1} [{2}, {3}]'.format(
+            n_fmt, unit, elapsed_str, rate_fmt)
+
+
+def StatusPrinter(file):
+    """
+    Manage the printing and in-place updating of a line of characters.
+    Note that if the string is longer than a line, then in-place updating
+    may not work (it will print a new line at each refresh).
+    """
+    fp = file
+    if not getattr(fp, 'flush', False):  # pragma: no cover
+        fp.flush = lambda: None
+
+    last_printed_len = [0]  # closure over mutable variable (fast)
+
+    def print_status(s):
+        len_s = len(s)
+        fp.write('\r' + s + (' ' * max(last_printed_len[0] - len_s, 0)))
+        fp.flush()
+        last_printed_len[0] = len_s
+    return print_status
+
+
+class tqdm(object):
+    """
+    Decorate an iterable object, returning an iterator which acts exactly
+    like the orignal iterable, but prints a dynamically updating
+    progressbar every time a value is requested.
+    """
+    def __init__(self, iterable=None, desc=None, total=None, leave=False,
+                 file=sys.stderr, ncols=None, mininterval=0.1,
+                 maxinterval=10.0, miniters=None, ascii=None, disable=False,
+                 unit='it', unit_scale=False, dynamic_ncols=False,
+                 smoothing=0.3, nested=False, gui=False):
+        """
+        Parameters
+        ----------
+        iterable  : iterable, optional
+            Iterable to decorate with a progressbar.
+            Leave blank [default: None] to manually manage the updates.
+        desc  : str, optional
+            Prefix for the progressbar [default: None].
+        total  : int, optional
+            The number of expected iterations. If not given, len(iterable)
+            is used if possible. As a last resort, only basic progress
+            statistics are displayed (no ETA, no progressbar). If `gui` is
+            True and this parameter needs subsequent updating, specify an
+            initial arbitrary large positive integer, e.g. int(9e9).
+        leave  : bool, optional
+            If [default: False], removes all traces of the progressbar
+            upon termination of iteration.
+        file  : `io.TextIOWrapper` or `io.StringIO`, optional
+            Specifies where to output the progress messages
+            [default: sys.stderr]. Uses `file.write(str)` and `file.flush()`
+            methods.
+        ncols  : int, optional
+            The width of the entire output message. If specified,
+            dynamically resizes the progressbar to stay within this bound.
+            If [default: None], attempts to use environment width. The
+            fallback is a meter width of 10 and no limit for the counter and
+            statistics. If 0, will not print any meter (only stats).
+        mininterval  : float, optional
+            Minimum progress update interval, in seconds [default: 0.1].
+        maxinterval  : float, optional
+            Maximum progress update interval, in seconds [default: 10.0].
+        miniters  : int, optional
+            Minimum progress update interval, in iterations [default: None].
+        ascii  : bool, optional
+            If [default: None] or false, use unicode (smooth blocks) to fill
+            the meter. The fallback is to use ASCII characters `1-9 #`.
+        disable : bool
+            Whether to disable the entire progressbar wrapper
+            [default: False].
+        unit  : str, optional
+            String that will be used to define the unit of each iteration
+            [default: 'it'].
+        unit_scale  : bool, optional
+            If set, the number of iterations will be reduced/scaled
+            automatically and a metric prefix following the
+            International System of Units standard will be added
+            (kilo, mega, etc.) [default: False].
+        dynamic_ncols  : bool, optional
+            If set, constantly alters `ncols` to the environment (allowing
+            for window resizes) [default: False].
+        smoothing  : float
+            Exponential moving average smoothing factor for speed estimates
+            (ignored in GUI mode). Ranges from 0 (average speed) to 1
+            (current/instantaneous speed) [default: 0.3].
+        nested  : bool, optional
+            Whether this iterable is nested in another one also managed by
+            `tqdm` [default: False]. Allows display of multiple, nested
+            progress bars.
+        gui  : bool, optional
+            WARNING: internal paramer - do not use.
+            Use tqdm_gui(...) instead. If set, will attempt to use
+            matplotlib animations for a graphical output [default: false].
+
+        Returns
+        -------
+        out  : decorated iterator.
+        """
+        # Preprocess the arguments
+        if total is None and iterable is not None:
+            try:
+                total = len(iterable)
+            except (TypeError, AttributeError):
+                total = None
+
+        if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
+                dynamic_ncols:
+            if dynamic_ncols:  # pragma: no cover
+                dynamic_ncols = _environ_cols_wrapper()
+                ncols = dynamic_ncols(file)
+            else:
+                ncols = _environ_cols_wrapper()(file)
+
+        if miniters is None:
+            miniters = 0
+            dynamic_miniters = True
+        else:
+            dynamic_miniters = False
+
+        if mininterval is None:
+            mininterval = 0
+
+        if maxinterval is None:
+            maxinterval = 0
+
+        if ascii is None:
+            ascii = not _supports_unicode(file)
+
+        if smoothing is None:
+            smoothing = 0
+
+        # Store the arguments
+        self.iterable = iterable
+        self.desc = desc + ': ' if desc else ''
+        self.total = total
+        self.leave = leave
+        self.fp = file
+        self.ncols = ncols
+        self.mininterval = mininterval
+        self.maxinterval = maxinterval
+        self.miniters = miniters
+        self.dynamic_miniters = dynamic_miniters
+        self.ascii = ascii
+        self.disable = disable
+        self.unit = unit
+        self.unit_scale = unit_scale
+        self.gui = gui
+        self.dynamic_ncols = dynamic_ncols
+        self.smoothing = smoothing
+        self.avg_rate = None
+        # if nested, at initial sp() call we replace '\r' by '\n' to
+        # not overwrite the outer progress bar
+        self.nested = nested
+
+        if not gui:
+            # Initialize the screen printer
+            self.sp = StatusPrinter(self.fp)
+            if not disable:
+                if self.nested:
+                    self.fp.write('\n')
+                self.sp(format_meter(0, total, 0,
+                        (dynamic_ncols(file) if dynamic_ncols else ncols),
+                        self.desc, ascii, unit, unit_scale))
+
+        # Init the time/iterations counters
+        self.start_t = self.last_print_t = time()
+        self.last_print_n = 0
+        self.n = 0
+
+    def __len__(self):
+        return len(self.iterable) if self.iterable else self.total
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *exc):
+        self.close()
+        return False
+
+    def __iter__(self):
+        ''' Backward-compatibility to use: for x in tqdm(iterable) '''
+
+        # Inlining instance variables as locals (speed optimisation)
+        iterable = self.iterable
+
+        # If the bar is disabled, then just walk the iterable
+        # (note: keep this check outside the loop for performance)
+        if self.disable:
+            for obj in iterable:
+                yield obj
+        else:
+            ncols = self.ncols
+            mininterval = self.mininterval
+            maxinterval = self.maxinterval
+            miniters = self.miniters
+            dynamic_miniters = self.dynamic_miniters
+            unit = self.unit
+            unit_scale = self.unit_scale
+            ascii = self.ascii
+            start_t = self.start_t
+            last_print_t = self.last_print_t
+            last_print_n = self.last_print_n
+            n = self.n
+            dynamic_ncols = self.dynamic_ncols
+            smoothing = self.smoothing
+            avg_rate = self.avg_rate
+
+            try:
+                sp = self.sp
+            except AttributeError:
+                raise DeprecationWarning('Please use tqdm_gui(...)'
+                                         ' instead of tqdm(..., gui=True)')
+
+            for obj in iterable:
+                yield obj
+                # Update and print the progressbar.
+                # Note: does not call self.update(1) for speed optimisation.
+                n += 1
+                delta_it = n - last_print_n
+                # check the counter first (avoid calls to time())
+                if delta_it >= miniters:
+                    cur_t = time()
+                    delta_t = cur_t - last_print_t
+                    if delta_t >= mininterval:
+                        elapsed = cur_t - start_t
+                        # EMA (not just overall average)
+                        if smoothing and delta_t:
+                            avg_rate = delta_it / delta_t \
+                                if avg_rate is None \
+                                else smoothing * delta_it / delta_t + \
+                                (1 - smoothing) * avg_rate
+
+                        sp(format_meter(
+                            n, self.total, elapsed,
+                            (dynamic_ncols(self.fp) if dynamic_ncols
+                             else ncols),
+                            self.desc, ascii, unit, unit_scale, avg_rate))
+
+                        # If no `miniters` was specified, adjust automatically
+                        # to the maximum iteration rate seen so far.
+                        if dynamic_miniters:
+                            if maxinterval and delta_t > maxinterval:
+                                # Set miniters to correspond to maxinterval
+                                miniters = delta_it * maxinterval / delta_t
+                            elif mininterval and delta_t:
+                                # EMA-weight miniters to converge
+                                # towards the timeframe of mininterval
+                                miniters = smoothing * delta_it * mininterval \
+                                    / delta_t + (1 - smoothing) * miniters
+                            else:
+                                miniters = smoothing * delta_it + \
+                                    (1 - smoothing) * miniters
+
+                        # Store old values for next call
+                        last_print_n = n
+                        last_print_t = cur_t
+
+            # Closing the progress bar.
+            # Update some internal variables for close().
+            self.last_print_n = last_print_n
+            self.n = n
+            self.close()
+
+    def update(self, n=1):
+        """
+        Manually update the progress bar, useful for streams
+        such as reading files.
+        E.g.:
+        >>> t = tqdm(total=filesize) # Initialise
+        >>> for current_buffer in stream:
+        ...    ...
+        ...    t.update(len(current_buffer))
+        >>> t.close()
+        The last line is highly recommended, but possibly not necessary if
+        `t.update()` will be called in such a way that `filesize` will be
+        exactly reached and printed.
+
+        Parameters
+        ----------
+        n  : int
+            Increment to add to the internal counter of iterations
+            [default: 1].
+        """
+        if self.disable:
+            return
+
+        if n < 1:
+            n = 1
+        self.n += n
+
+        delta_it = self.n - self.last_print_n  # should be n?
+        if delta_it >= self.miniters:
+            # We check the counter first, to reduce the overhead of time()
+            cur_t = time()
+            delta_t = cur_t - self.last_print_t
+            if delta_t >= self.mininterval:
+                elapsed = cur_t - self.start_t
+                # EMA (not just overall average)
+                if self.smoothing and delta_t:
+                    self.avg_rate = delta_it / delta_t \
+                        if self.avg_rate is None \
+                        else self.smoothing * delta_it / delta_t + \
+                        (1 - self.smoothing) * self.avg_rate
+
+                if not hasattr(self, "sp"):
+                    raise DeprecationWarning('Please use tqdm_gui(...)'
+                                             ' instead of tqdm(..., gui=True)')
+
+                self.sp(format_meter(
+                    self.n, self.total, elapsed,
+                    (self.dynamic_ncols(self.fp) if self.dynamic_ncols
+                     else self.ncols),
+                    self.desc, self.ascii, self.unit, self.unit_scale,
+                    self.avg_rate))
+
+                # If no `miniters` was specified, adjust automatically to the
+                # maximum iteration rate seen so far.
+                # e.g.: After running `tqdm.update(5)`, subsequent
+                # calls to `tqdm.update()` will only cause an update after
+                # at least 5 more iterations.
+                if self.dynamic_miniters:
+                    if self.maxinterval and delta_t > self.maxinterval:
+                        self.miniters = self.miniters * self.maxinterval \
+                            / delta_t
+                    elif self.mininterval and delta_t:
+                        self.miniters = self.smoothing * delta_it \
+                            * self.mininterval / delta_t + \
+                            (1 - self.smoothing) * self.miniters
+                    else:
+                        self.miniters = self.smoothing * delta_it + \
+                            (1 - self.smoothing) * self.miniters
+
+                # Store old values for next call
+                self.last_print_n = self.n
+                self.last_print_t = cur_t
+
+    def close(self):
+        """
+        Cleanup and (if leave=False) close the progressbar.
+        """
+        if self.disable:
+            return
+
+        endchar = '\r'
+        if self.nested:
+            endchar += _term_move_up()
+
+        if self.leave:
+            if self.last_print_n < self.n:
+                cur_t = time()
+                # stats for overall rate (no weighted average)
+                self.sp(format_meter(
+                    self.n, self.total, cur_t - self.start_t,
+                    (self.dynamic_ncols(self.fp) if self.dynamic_ncols
+                     else self.ncols),
+                    self.desc, self.ascii, self.unit, self.unit_scale))
+            if self.nested:
+                self.fp.write(endchar)
+            else:
+                self.fp.write('\n')
+        else:
+            self.sp('')
+            self.fp.write(endchar)
+
+    def set_description(self, desc=None):
+        """
+        Set/modify description of the progress bar.
+        """
+        self.desc = desc + ': ' if desc else ''
+
+
+def trange(*args, **kwargs):
+    """
+    A shortcut for tqdm(xrange(*args), **kwargs).
+    On Python3+ range is used instead of xrange.
+    """
+    return tqdm(_range(*args), **kwargs)

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 yt/extern/tqdm/_tqdm_gui.py
--- /dev/null
+++ b/yt/extern/tqdm/_tqdm_gui.py
@@ -0,0 +1,308 @@
+"""
+GUI progressbar decorator for iterators.
+Includes a default (x)range iterator printing to stderr.
+
+Usage:
+  >>> from tqdm_gui import tgrange[, tqdm_gui]
+  >>> for i in tgrange(10): #same as: for i in tqdm_gui(xrange(10))
+  ...     ...
+"""
+# future division is important to divide integers and get as
+# a result precise floating numbers (instead of truncated int)
+from __future__ import division, absolute_import
+# import compatibility functions and utilities
+from time import time
+from ._utils import _range
+# to inherit from the tqdm class
+from ._tqdm import tqdm, format_meter
+
+
+__author__ = {"github.com/": ["casperdcl", "lrq3000"]}
+__all__ = ['tqdm_gui', 'tgrange']
+
+
+class tqdm_gui(tqdm):  # pragma: no cover
+    """
+    Experimental GUI version of tqdm!
+    """
+    def __init__(self, *args, **kwargs):
+
+        # try:  # pragma: no cover
+        import matplotlib as mpl
+        import matplotlib.pyplot as plt
+        from collections import deque
+        # except ImportError:  # gui not available
+        #   kwargs['gui'] = False
+        # else:
+        kwargs['gui'] = True
+
+        super(tqdm_gui, self).__init__(*args, **kwargs)
+
+        # Initialize the GUI display
+        if self.disable or not kwargs['gui']:
+            return
+
+        self.fp.write('Warning: GUI is experimental/alpha\n')
+        self.mpl = mpl
+        self.plt = plt
+        self.sp = None
+
+        # Remember if external environment uses toolbars
+        self.toolbar = self.mpl.rcParams['toolbar']
+        self.mpl.rcParams['toolbar'] = 'None'
+
+        self.mininterval = max(self.mininterval, 0.5)
+        self.fig, ax = plt.subplots(figsize=(9, 2.2))
+        # self.fig.subplots_adjust(bottom=0.2)
+        if self.total:
+            self.xdata = []
+            self.ydata = []
+            self.zdata = []
+        else:
+            self.xdata = deque([])
+            self.ydata = deque([])
+            self.zdata = deque([])
+        self.line1, = ax.plot(self.xdata, self.ydata, color='b')
+        self.line2, = ax.plot(self.xdata, self.zdata, color='k')
+        ax.set_ylim(0, 0.001)
+        if self.total:
+            ax.set_xlim(0, 100)
+            ax.set_xlabel('percent')
+            self.fig.legend((self.line1, self.line2), ('cur', 'est'),
+                            loc='center right')
+            # progressbar
+            self.hspan = plt.axhspan(0, 0.001,
+                                     xmin=0, xmax=0, color='g')
+        else:
+            # ax.set_xlim(-60, 0)
+            ax.set_xlim(0, 60)
+            ax.invert_xaxis()
+            ax.set_xlabel('seconds')
+            ax.legend(('cur', 'est'), loc='lower left')
+        ax.grid()
+        # ax.set_xlabel('seconds')
+        ax.set_ylabel((self.unit if self.unit else 'it') + '/s')
+        if self.unit_scale:
+            plt.ticklabel_format(style='sci', axis='y',
+                                 scilimits=(0, 0))
+            ax.yaxis.get_offset_text().set_x(-0.15)
+
+        # Remember if external environment is interactive
+        self.wasion = plt.isinteractive()
+        plt.ion()
+        self.ax = ax
+
+    def __iter__(self):
+        # TODO: somehow allow the following:
+        # if not self.gui:
+        #   return super(tqdm_gui, self).__iter__()
+        iterable = self.iterable
+        if self.disable:
+            for obj in iterable:
+                yield obj
+            return
+
+        # ncols = self.ncols
+        mininterval = self.mininterval
+        miniters = self.miniters
+        dynamic_miniters = self.dynamic_miniters
+        unit = self.unit
+        unit_scale = self.unit_scale
+        ascii = self.ascii
+        start_t = self.start_t
+        last_print_t = self.last_print_t
+        last_print_n = self.last_print_n
+        n = self.n
+        # dynamic_ncols = self.dynamic_ncols
+        # smoothing = self.smoothing
+        # avg_rate = self.avg_rate
+
+        plt = self.plt
+        ax = self.ax
+        xdata = self.xdata
+        ydata = self.ydata
+        zdata = self.zdata
+        line1 = self.line1
+        line2 = self.line2
+
+        for obj in iterable:
+            yield obj
+            # Update and print the progressbar.
+            # Note: does not call self.update(1) for speed optimisation.
+            n += 1
+            delta_it = n - last_print_n
+            # check the counter first (avoid calls to time())
+            if delta_it >= miniters:
+                cur_t = time()
+                delta_t = cur_t - last_print_t
+                if delta_t >= mininterval:  # pragma: no cover
+                    elapsed = cur_t - start_t
+                    # Inline due to multiple calls
+                    total = self.total
+                    # instantaneous rate
+                    y = delta_it / delta_t
+                    # overall rate
+                    z = n / elapsed
+                    # update line data
+                    xdata.append(n * 100.0 / total if total else cur_t)
+                    ydata.append(y)
+                    zdata.append(z)
+
+                    # Discard old values
+                    # xmin, xmax = ax.get_xlim()
+                    # if (not total) and elapsed > xmin * 1.1:
+                    if (not total) and elapsed > 66:
+                        xdata.popleft()
+                        ydata.popleft()
+                        zdata.popleft()
+
+                    ymin, ymax = ax.get_ylim()
+                    if y > ymax or z > ymax:
+                        ymax = 1.1 * y
+                        ax.set_ylim(ymin, ymax)
+                        ax.figure.canvas.draw()
+
+                    if total:
+                        line1.set_data(xdata, ydata)
+                        line2.set_data(xdata, zdata)
+                        try:
+                            poly_lims = self.hspan.get_xy()
+                        except AttributeError:
+                            self.hspan = plt.axhspan(0, 0.001, xmin=0,
+                                                     xmax=0, color='g')
+                            poly_lims = self.hspan.get_xy()
+                        poly_lims[0, 1] = ymin
+                        poly_lims[1, 1] = ymax
+                        poly_lims[2] = [n / total, ymax]
+                        poly_lims[3] = [poly_lims[2, 0], ymin]
+                        if len(poly_lims) > 4:
+                            poly_lims[4, 1] = ymin
+                        self.hspan.set_xy(poly_lims)
+                    else:
+                        t_ago = [cur_t - i for i in xdata]
+                        line1.set_data(t_ago, ydata)
+                        line2.set_data(t_ago, zdata)
+
+                    ax.set_title(format_meter(
+                        n, total, elapsed, 0,
+                        self.desc, ascii, unit, unit_scale),
+                        fontname="DejaVu Sans Mono",
+                        fontsize=11)
+                    plt.pause(1e-9)
+
+                    # If no `miniters` was specified, adjust automatically
+                    # to the maximum iteration rate seen so far.
+                    if dynamic_miniters:
+                        miniters = max(miniters, delta_it)
+
+                    # Store old values for next call
+                    last_print_n = n
+                    last_print_t = cur_t
+
+        # Closing the progress bar.
+        # Update some internal variables for close().
+        self.last_print_n = last_print_n
+        self.n = n
+        self.close()
+
+    def update(self, n=1):
+        # if not self.gui:
+        #   return super(tqdm_gui, self).close()
+        if self.disable:
+            return
+
+        if n < 1:
+            n = 1
+        self.n += n
+
+        delta_it = self.n - self.last_print_n  # should be n?
+        if delta_it >= self.miniters:
+            # We check the counter first, to reduce the overhead of time()
+            cur_t = time()
+            delta_t = cur_t - self.last_print_t
+            if delta_t >= self.mininterval:
+                elapsed = cur_t - self.start_t
+                # Inline due to multiple calls
+                total = self.total
+                ax = self.ax
+
+                # instantaneous rate
+                y = delta_it / delta_t
+                # smoothed rate
+                z = self.n / elapsed
+                # update line data
+                self.xdata.append(self.n * 100.0 / total
+                                  if total else cur_t)
+                self.ydata.append(y)
+                self.zdata.append(z)
+
+                # Discard old values
+                if (not total) and elapsed > 66:
+                    self.xdata.popleft()
+                    self.ydata.popleft()
+                    self.zdata.popleft()
+
+                ymin, ymax = ax.get_ylim()
+                if y > ymax or z > ymax:
+                    ymax = 1.1 * y
+                    ax.set_ylim(ymin, ymax)
+                    ax.figure.canvas.draw()
+
+                if total:
+                    self.line1.set_data(self.xdata, self.ydata)
+                    self.line2.set_data(self.xdata, self.zdata)
+                    try:
+                        poly_lims = self.hspan.get_xy()
+                    except AttributeError:
+                        self.hspan = self.plt.axhspan(0, 0.001, xmin=0,
+                                                      xmax=0, color='g')
+                        poly_lims = self.hspan.get_xy()
+                    poly_lims[0, 1] = ymin
+                    poly_lims[1, 1] = ymax
+                    poly_lims[2] = [self.n / total, ymax]
+                    poly_lims[3] = [poly_lims[2, 0], ymin]
+                    if len(poly_lims) > 4:
+                        poly_lims[4, 1] = ymin
+                    self.hspan.set_xy(poly_lims)
+                else:
+                    t_ago = [cur_t - i for i in self.xdata]
+                    self.line1.set_data(t_ago, self.ydata)
+                    self.line2.set_data(t_ago, self.zdata)
+
+                ax.set_title(format_meter(
+                    self.n, total, elapsed, 0,
+                    self.desc, self.ascii, self.unit, self.unit_scale),
+                    fontname="DejaVu Sans Mono",
+                    fontsize=11)
+                self.plt.pause(1e-9)
+
+                # If no `miniters` was specified, adjust automatically to the
+                # maximum iteration rate seen so far.
+                if self.dynamic_miniters:
+                    self.miniters = max(self.miniters, delta_it)
+
+                # Store old values for next call
+                self.last_print_n = self.n
+                self.last_print_t = cur_t
+
+    def close(self):
+        # if not self.gui:
+        #   return super(tqdm_gui, self).close()
+        if self.disable:
+            return
+
+        # Restore toolbars
+        self.mpl.rcParams['toolbar'] = self.toolbar
+        # Return to non-interactive mode
+        if not self.wasion:
+            self.plt.ioff()
+        if not self.leave:
+            self.plt.close(self.fig)
+
+
+def tgrange(*args, **kwargs):
+    """
+    A shortcut for tqdm_gui(xrange(*args), **kwargs).
+    On Python3+ range is used instead of xrange.
+    """
+    return tqdm_gui(_range(*args), **kwargs)

diff -r 95e93d609ddd831c9f9d05961c421af91515d840 -r d883cddfa337b71f6440d9faff1bc13758bec589 yt/extern/tqdm/_tqdm_pandas.py
--- /dev/null
+++ b/yt/extern/tqdm/_tqdm_pandas.py
@@ -0,0 +1,58 @@
+# future division is important to divide integers and get as
+# a result precise floating numbers (instead of truncated int)
+from __future__ import absolute_import
+
+
+__author__ = "github.com/casperdcl"
+__all__ = ['tqdm_pandas']
+
+
+def tqdm_pandas(t):  # pragma: no cover
+    """
+    Registers the given `tqdm` instance with
+    `pandas.core.groupby.DataFrameGroupBy.progress_apply`.
+    It will even close() the `tqdm` instance upon completion.
+
+    Examples
+    --------
+    >>> import pandas as pd
+    >>> import numpy as np
+    >>> from tqdm import tqdm, tqdm_pandas
+    >>>
+    >>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
+    >>> tqdm_pandas(tqdm())  # can use tqdm_gui, optional kwargs, etc
+    >>> # Now you can use `progress_apply` instead of `apply`
+    >>> df.groupby(0).progress_apply(lambda x: x**2)
+
+    References
+    ----------
+    https://stackoverflow.com/questions/18603270/
+    progress-indicator-during-pandas-operations-python
+    """
+    from pandas.core.groupby import DataFrameGroupBy
+
+    def inner(groups, func, *args, **kwargs):
+        """
+        Parameters
+        ----------
+        groups  : DataFrameGroupBy
+            Grouped data.
+        func  : function
+            To be applied on the grouped data.
+
+        *args and *kwargs are transmitted to DataFrameGroupBy.apply()
+        """
+        t.total = len(groups) + 1  # pandas calls update once too many
+
+        def wrapper(*args, **kwargs):
+            t.update()
+            return func(*args, **kwargs)
+
+        result = groups.apply(wrapper, *args, **kwargs)
+
+        t.close()
+
+        return result
+
+    # Enable custom tqdm progress in pandas!
+    DataFrameGroupBy.progress_apply = inner

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/b9768c9cce41/
Changeset:   b9768c9cce41
Branch:      yt
User:        MatthewTurk
Date:        2016-01-25 16:04:02+00:00
Summary:     Switching to F order for the octs
Affected #:  1 file

diff -r d883cddfa337b71f6440d9faff1bc13758bec589 -r b9768c9cce41430596bc192290f0f90481bd6e94 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -50,7 +50,7 @@
         if selected == 0: return
         # There are this many records between "octs"
         self.dest[self.index, :] = self.source[
-                self.ind[0], self.ind[1], self.ind[2],
+                self.ind[2], self.ind[1], self.ind[0],
                 self.global_index, :]
         self.index += 1
 
@@ -63,7 +63,7 @@
         if selected == 0: return
         # There are this many records between "octs"
         self.dest[self.index, :] = self.source[
-                self.ind[0], self.ind[1], self.ind[2],
+                self.ind[2], self.ind[1], self.ind[0],
                 self.global_index, :]
         self.index += 1
 
@@ -92,14 +92,14 @@
         if self.last != o.domain_ind:
             self.last = o.domain_ind
             self.index += 1
-        self.mark[self.index, self.ind[0], self.ind[1], self.ind[2]] = 1
+        self.mark[self.index, self.ind[2], self.ind[1], self.ind[0]] = 1
 
 cdef class MaskOcts(OctVisitor):
     @cython.boundscheck(False)
     @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         if selected == 0: return
-        self.mask[self.global_index, self.ind[0], self.ind[1], self.ind[2]] = 1
+        self.mask[self.global_index, self.ind[2], self.ind[1], self.ind[0]] = 1
 
 cdef class IndexOcts(OctVisitor):
     @cython.boundscheck(False)


https://bitbucket.org/yt_analysis/yt/commits/3516e7d7bc27/
Changeset:   3516e7d7bc27
Branch:      yt
User:        MatthewTurk
Date:        2016-01-25 19:07:13+00:00
Summary:     Adding a comment about fill_style
Affected #:  1 file

diff -r b9768c9cce41430596bc192290f0f90481bd6e94 -r 3516e7d7bc274156aa3b445c47c87ded9838da9b yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -82,6 +82,8 @@
     cdef Oct *next_root(self, int domain_id, int ind[3])
     cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
     cdef void append_domain(self, np.int64_t domain_count)
+    # The fill_style is the ordering, C or F, of the octs in the file.  "o"
+    # corresponds to C, and "r" is for Fortran.
     cdef public object fill_style
 
 cdef class SparseOctreeContainer(OctreeContainer):


https://bitbucket.org/yt_analysis/yt/commits/fc521ecbd44a/
Changeset:   fc521ecbd44a
Branch:      yt
User:        MatthewTurk
Date:        2016-01-25 19:23:28+00:00
Summary:     Adding comments
Affected #:  1 file

diff -r 3516e7d7bc274156aa3b445c47c87ded9838da9b -r fc521ecbd44a3f9b033c9622e054278ae3b20fad yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -41,6 +41,8 @@
     cdef void visit(self, Oct* o, np.uint8_t selected):
         raise NotImplementedError
 
+# This copies an integer array from the source to the destination, based on the
+# selection criteria.
 cdef class CopyArrayI64(OctVisitor):
     @cython.boundscheck(False)
     @cython.initializedcheck(False)
@@ -54,6 +56,8 @@
                 self.global_index, :]
         self.index += 1
 
+# This copies a floating point array from the source to the destination, based
+# on the selection criteria.
 cdef class CopyArrayF64(OctVisitor):
     #@cython.boundscheck(False)
     #@cython.initializedcheck(False)
@@ -67,6 +71,9 @@
                 self.global_index, :]
         self.index += 1
 
+# This counts the number of octs, selected or not, that the selector hits.
+# Note that the selector will not recursively visit unselected octs, so this is
+# still useful.
 cdef class CountTotalOcts(OctVisitor):
     @cython.boundscheck(False)
     @cython.initializedcheck(False)
@@ -77,6 +84,7 @@
             self.index += 1
             self.last = o.domain_ind
 
+# This counts the number of selected cells.
 cdef class CountTotalCells(OctVisitor):
     @cython.boundscheck(False)
     @cython.initializedcheck(False)
@@ -84,6 +92,7 @@
         # Number of *cells* visited and selected.
         self.index += selected
 
+# Every time a cell is visited, mark it.  This will be for all visited octs.
 cdef class MarkOcts(OctVisitor):
     @cython.boundscheck(False)
     @cython.initializedcheck(False)
@@ -94,6 +103,7 @@
             self.index += 1
         self.mark[self.index, self.ind[2], self.ind[1], self.ind[0]] = 1
 
+# Mask all the selected cells.
 cdef class MaskOcts(OctVisitor):
     @cython.boundscheck(False)
     @cython.initializedcheck(False)
@@ -101,6 +111,7 @@
         if selected == 0: return
         self.mask[self.global_index, self.ind[2], self.ind[1], self.ind[0]] = 1
 
+# Compute a mapping from domain_ind to flattened index.
 cdef class IndexOcts(OctVisitor):
     @cython.boundscheck(False)
     @cython.initializedcheck(False)
@@ -111,6 +122,7 @@
             self.oct_index[o.domain_ind] = self.index
             self.index += 1
 
+# Integer coordinates
 cdef class ICoordsOcts(OctVisitor):
     @cython.boundscheck(False)
     @cython.initializedcheck(False)
@@ -121,6 +133,7 @@
             self.icoords[self.index,i] = (self.pos[i] << self.oref) + self.ind[i]
         self.index += 1
 
+# Level
 cdef class IResOcts(OctVisitor):
     @cython.boundscheck(False)
     @cython.initializedcheck(False)
@@ -129,6 +142,7 @@
         self.ires[self.index] = self.level
         self.index += 1
 
+# Floating point coordinates
 cdef class FCoordsOcts(OctVisitor):
     @cython.cdivision(True)
     @cython.boundscheck(False)
@@ -146,6 +160,7 @@
             self.fcoords[self.index,i] = (c + 0.5) * dx
         self.index += 1
 
+# Floating point widths; domain modifications are done later.
 cdef class FWidthOcts(OctVisitor):
     @cython.cdivision(True)
     @cython.boundscheck(False)
@@ -162,6 +177,7 @@
             self.fwidth[self.index,i] = dx
         self.index += 1
 
+# Mark which domains are touched by a selector.
 cdef class IdentifyOcts(OctVisitor):
     @cython.boundscheck(False)
     @cython.initializedcheck(False)
@@ -172,6 +188,7 @@
         if selected == 0: return
         self.domain_mask[o.domain - 1] = 1
 
+# Assign domain indices to octs
 cdef class AssignDomainInd(OctVisitor):
     @cython.boundscheck(False)
     @cython.initializedcheck(False)
@@ -179,6 +196,7 @@
         o.domain_ind = self.global_index
         self.index += 1
 
+# From the file, fill in C order
 cdef class FillFileIndicesO(OctVisitor):
     @cython.boundscheck(False)
     @cython.initializedcheck(False)
@@ -191,6 +209,7 @@
         self.cell_inds[self.index] = self.oind()
         self.index +=1
 
+# From the file, fill in F order
 cdef class FillFileIndicesR(OctVisitor):
     @cython.boundscheck(False)
     @cython.initializedcheck(False)
@@ -203,6 +222,7 @@
         self.cell_inds[self.index] = self.rind()
         self.index +=1
 
+# Count octs by domain
 cdef class CountByDomain(OctVisitor):
     @cython.boundscheck(False)
     @cython.initializedcheck(False)
@@ -211,6 +231,7 @@
         # NOTE: We do this for every *cell*.
         self.domain_counts[o.domain - 1] += 1
 
+# Store the refinement mapping of the octree to be loaded later
 cdef class StoreOctree(OctVisitor):
     @cython.boundscheck(False)
     @cython.initializedcheck(False)
@@ -225,6 +246,7 @@
         self.ref_mask[self.index] = res
         self.index += 1
 
+# Go from a refinement mapping to a new octree
 cdef class LoadOctree(OctVisitor):
     @cython.boundscheck(False)
     @cython.initializedcheck(False)


https://bitbucket.org/yt_analysis/yt/commits/7268e755205a/
Changeset:   7268e755205a
Branch:      yt
User:        MatthewTurk
Date:        2016-02-02 22:46:12+00:00
Summary:     Reverting commenting out of arrays.
Affected #:  1 file

diff -r fc521ecbd44a3f9b033c9622e054278ae3b20fad -r 7268e755205a18223488ebacd8be89c9fa252572 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -59,8 +59,8 @@
 # This copies a floating point array from the source to the destination, based
 # on the selection criteria.
 cdef class CopyArrayF64(OctVisitor):
-    #@cython.boundscheck(False)
-    #@cython.initializedcheck(False)
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
     cdef void visit(self, Oct* o, np.uint8_t selected):
         # We should always have global_index less than our source.
         # "last" here tells us the dimensionality of the array.


https://bitbucket.org/yt_analysis/yt/commits/45dc609be9e8/
Changeset:   45dc609be9e8
Branch:      yt
User:        xarthisius
Date:        2016-02-03 17:06:20+00:00
Summary:     Merged in MatthewTurk/yt (pull request #1901)

Octree visitor refactoring
Affected #:  9 files

diff -r 5beb2280f5c889b80eaa6cfa962e298b105aff1d -r 45dc609be9e893c48cdfc2e568eb26ca0a0f8cbe yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -90,10 +90,11 @@
         return self._num_zones + 2*self._num_ghost_zones
 
     def _reshape_vals(self, arr):
-        if len(arr.shape) == 4 and arr.flags["F_CONTIGUOUS"]:
-            return arr
         nz = self.nz
-        n_oct = arr.shape[0] / (nz**3.0)
+        if len(arr.shape) <= 2:
+            n_oct = arr.shape[0] / (nz**3.0)
+        else:
+            n_oct = max(arr.shape)
         if arr.size == nz*nz*nz*n_oct:
             new_shape = (nz, nz, nz, n_oct)
         elif arr.size == nz*nz*nz*n_oct * 3:
@@ -115,10 +116,9 @@
 
     def select_blocks(self, selector):
         mask = self.oct_handler.mask(selector, domain_id = self.domain_id)
-        mask = self._reshape_vals(mask)
         slicer = OctreeSubsetBlockSlice(self)
         for i, sl in slicer:
-            yield sl, mask[:,:,:,i]
+            yield sl, mask[i,...]
 
     def select_tcoords(self, dobj):
         # These will not be pre-allocated, which can be a problem for speed and

diff -r 5beb2280f5c889b80eaa6cfa962e298b105aff1d -r 45dc609be9e893c48cdfc2e568eb26ca0a0f8cbe yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -8,9 +8,7 @@
 from yt.utilities.lib.fp_utils cimport imax
 from yt.geometry.oct_container cimport \
     SparseOctreeContainer
-from yt.geometry.oct_visitors cimport \
-    OctVisitorData, oct_visitor_function, Oct, \
-    fill_file_indices_oind, fill_file_indices_rind
+from yt.geometry.oct_visitors cimport Oct
 from yt.geometry.particle_deposit cimport \
     ParticleDepositOperation
 from libc.stdint cimport int32_t, int64_t

diff -r 5beb2280f5c889b80eaa6cfa962e298b105aff1d -r 45dc609be9e893c48cdfc2e568eb26ca0a0f8cbe yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -19,8 +19,7 @@
 from fp_utils cimport *
 cimport oct_visitors
 cimport selection_routines
-from .oct_visitors cimport \
-    OctVisitorData, oct_visitor_function, Oct, cind
+from .oct_visitors cimport OctVisitor, Oct, cind
 from libc.stdlib cimport bsearch, qsort, realloc, malloc, free
 from libc.math cimport floor
 
@@ -59,7 +58,6 @@
     cdef OctAllocationContainer *cont
     cdef OctAllocationContainer **domains
     cdef Oct ****root_mesh
-    cdef oct_visitor_function *fill_func
     cdef int partial_coverage
     cdef int level_offset
     cdef int nn[3]
@@ -79,13 +77,14 @@
     cdef np.int64_t get_domain_offset(self, int domain_id)
     cdef void visit_all_octs(self,
                         selection_routines.SelectorObject selector,
-                        oct_visitor_function *func,
-                        OctVisitorData *data,
+                        OctVisitor visitor,
                         int vc = ?)
     cdef Oct *next_root(self, int domain_id, int ind[3])
     cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
-    cdef void setup_data(self, OctVisitorData *data, int domain_id = ?)
     cdef void append_domain(self, np.int64_t domain_count)
+    # The fill_style is the ordering, C or F, of the octs in the file.  "o"
+    # corresponds to C, and "r" is for Fortran.
+    cdef public object fill_style
 
 cdef class SparseOctreeContainer(OctreeContainer):
     cdef OctKey *root_nodes

diff -r 5beb2280f5c889b80eaa6cfa962e298b105aff1d -r 45dc609be9e893c48cdfc2e568eb26ca0a0f8cbe yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -99,7 +99,7 @@
             self.DLE[i] = domain_left_edge[i] #0
             self.DRE[i] = domain_right_edge[i] #num_grid
         self._initialize_root_mesh()
-        self.fill_func = oct_visitors.fill_file_indices_oind
+        self.fill_style = "o"
 
     def _initialize_root_mesh(self):
         self.root_mesh = <Oct****> malloc(sizeof(void*) * self.nn[0])
@@ -132,16 +132,16 @@
                 partial_coverage = header['partial_coverage'])
         # NOTE: We do not allow domain/file indices to be specified.
         cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
-        cdef OctVisitorData data
-        obj.setup_data(&data, -1)
+        cdef oct_visitors.LoadOctree visitor
+        visitor = oct_visitors.LoadOctree(obj, -1)
         cdef int i, j, k, n
-        data.global_index = -1
-        data.level = 0
-        data.oref = 0
-        data.nz = 1
-        assert(ref_mask.shape[0] / float(data.nz) ==
-            <int>(ref_mask.shape[0]/float(data.nz)))
-        obj.allocate_domains([ref_mask.shape[0] / data.nz])
+        visitor.global_index = -1
+        visitor.level = 0
+        visitor.oref = 0
+        visitor.nz = 1
+        assert(ref_mask.shape[0] / float(visitor.nz) ==
+            <int>(ref_mask.shape[0]/float(visitor.nz)))
+        obj.allocate_domains([ref_mask.shape[0] / visitor.nz])
         cdef np.float64_t pos[3]
         cdef np.float64_t dds[3]
         # This dds is the oct-width
@@ -150,13 +150,11 @@
         # Pos is the center of the octs
         cdef OctAllocationContainer *cur = obj.domains[0]
         cdef Oct *o
-        cdef void *p[4]
         cdef np.int64_t nfinest = 0
-        p[0] = ref_mask.data
-        p[1] = <void *> cur.my_octs
-        p[2] = <void *> &cur.n_assigned
-        p[3] = <void *> &nfinest
-        data.array = p
+        visitor.ref_mask = ref_mask
+        visitor.octs = cur.my_octs
+        visitor.nocts = &cur.n_assigned
+        visitor.nfinest = &nfinest
         pos[0] = obj.DLE[0] + dds[0]/2.0
         for i in range(obj.nn[0]):
             pos[1] = obj.DLE[1] + dds[1]/2.0
@@ -170,38 +168,22 @@
                     o.domain = 1
                     obj.root_mesh[i][j][k] = o
                     cur.n_assigned += 1
-                    data.pos[0] = i
-                    data.pos[1] = j
-                    data.pos[2] = k
+                    visitor.pos[0] = i
+                    visitor.pos[1] = j
+                    visitor.pos[2] = k
                     # Always visit covered
                     selector.recursively_visit_octs(
                         obj.root_mesh[i][j][k],
-                        pos, dds, 0, oct_visitors.load_octree,
-                        &data, 1)
+                        pos, dds, 0, visitor, 1)
                     pos[2] += dds[2]
                 pos[1] += dds[1]
             pos[0] += dds[0]
         obj.nocts = cur.n_assigned
-        if obj.nocts * data.nz != ref_mask.size:
+        if obj.nocts * visitor.nz != ref_mask.size:
             raise KeyError(ref_mask.size, obj.nocts, obj.oref,
                 obj.partial_coverage)
         return obj
 
-    cdef void setup_data(self, OctVisitorData *data, int domain_id = -1):
-        cdef int i
-        data.index = 0
-        data.last = -1
-        data.global_index = -1
-        for i in range(3):
-            data.pos[i] = -1
-            data.ind[i] = -1
-        data.array = NULL
-        data.dims = 0
-        data.domain = domain_id
-        data.level = -1
-        data.oref = self.oref
-        data.nz = (1 << (data.oref*3))
-
     def __dealloc__(self):
         free_octs(self.cont)
         if self.root_mesh == NULL: return
@@ -229,14 +211,12 @@
 
     @cython.cdivision(True)
     cdef void visit_all_octs(self, SelectorObject selector,
-                        oct_visitor_function *func,
-                        OctVisitorData *data,
-                        int vc = -1):
+                        OctVisitor visitor, int vc = -1):
         cdef int i, j, k, n
         if vc == -1:
             vc = self.partial_coverage
-        data.global_index = -1
-        data.level = 0
+        visitor.global_index = -1
+        visitor.level = 0
         cdef np.float64_t pos[3]
         cdef np.float64_t dds[3]
         # This dds is the oct-width
@@ -251,12 +231,12 @@
                 for k in range(self.nn[2]):
                     if self.root_mesh[i][j][k] == NULL:
                         raise RuntimeError
-                    data.pos[0] = i
-                    data.pos[1] = j
-                    data.pos[2] = k
+                    visitor.pos[0] = i
+                    visitor.pos[1] = j
+                    visitor.pos[2] = k
                     selector.recursively_visit_octs(
                         self.root_mesh[i][j][k],
-                        pos, dds, 0, func, data, vc)
+                        pos, dds, 0, visitor, vc)
                     pos[2] += dds[2]
                 pos[1] += dds[1]
             pos[0] += dds[0]
@@ -340,10 +320,10 @@
     def domain_identify(self, SelectorObject selector):
         cdef np.ndarray[np.uint8_t, ndim=1] domain_mask
         domain_mask = np.zeros(self.num_domains, dtype="uint8")
-        cdef OctVisitorData data
-        self.setup_data(&data)
-        data.array = domain_mask.data
-        self.visit_all_octs(selector, oct_visitors.identify_octs, &data)
+        cdef oct_visitors.IdentifyOcts visitor
+        visitor = oct_visitors.IdentifyOcts(self)
+        visitor.domain_mask = domain_mask
+        self.visit_all_octs(selector, visitor)
         cdef int i
         domain_ids = []
         for i in range(self.num_domains):
@@ -441,13 +421,14 @@
              int domain_id = -1):
         if num_cells == -1:
             num_cells = selector.count_octs(self, domain_id)
-        cdef np.ndarray[np.uint8_t, ndim=1] coords
-        cdef OctVisitorData data
-        self.setup_data(&data, domain_id)
-        coords = np.zeros((num_cells*data.nz), dtype="uint8")
-        data.array = <void *> coords.data
-        self.visit_all_octs(selector, oct_visitors.mask_octs, &data)
-        return coords.astype("bool")
+        cdef np.ndarray[np.uint8_t, ndim=4] mask
+        cdef oct_visitors.MaskOcts visitor
+        visitor = oct_visitors.MaskOcts(self, domain_id)
+        cdef int ns = 1 << self.oref
+        mask = np.zeros((num_cells, ns, ns, ns), dtype="uint8")
+        visitor.mask = mask
+        self.visit_all_octs(selector, visitor)
+        return mask.astype("bool")
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -456,12 +437,12 @@
                 int domain_id = -1):
         if num_cells == -1:
             num_cells = selector.count_oct_cells(self, domain_id)
-        cdef OctVisitorData data
-        self.setup_data(&data, domain_id)
+        cdef oct_visitors.ICoordsOcts visitor
+        visitor = oct_visitors.ICoordsOcts(self, domain_id)
         cdef np.ndarray[np.int64_t, ndim=2] coords
         coords = np.empty((num_cells, 3), dtype="int64")
-        data.array = <void *> coords.data
-        self.visit_all_octs(selector, oct_visitors.icoords_octs, &data)
+        visitor.icoords = coords
+        self.visit_all_octs(selector, visitor)
         return coords
 
     @cython.boundscheck(False)
@@ -472,13 +453,13 @@
         cdef int i
         if num_cells == -1:
             num_cells = selector.count_oct_cells(self, domain_id)
-        cdef OctVisitorData data
-        self.setup_data(&data, domain_id)
+        cdef oct_visitors.IResOcts visitor
+        visitor = oct_visitors.IResOcts(self, domain_id)
         #Return the 'resolution' of each cell; ie the level
         cdef np.ndarray[np.int64_t, ndim=1] res
         res = np.empty(num_cells, dtype="int64")
-        data.array = <void *> res.data
-        self.visit_all_octs(selector, oct_visitors.ires_octs, &data)
+        visitor.ires = res
+        self.visit_all_octs(selector, visitor)
         if self.level_offset > 0:
             for i in range(num_cells):
                 res[i] += self.level_offset
@@ -491,12 +472,12 @@
                 int domain_id = -1):
         if num_cells == -1:
             num_cells = selector.count_oct_cells(self, domain_id)
-        cdef OctVisitorData data
-        self.setup_data(&data, domain_id)
+        cdef oct_visitors.FWidthOcts visitor
+        visitor = oct_visitors.FWidthOcts(self, domain_id)
         cdef np.ndarray[np.float64_t, ndim=2] fwidth
         fwidth = np.empty((num_cells, 3), dtype="float64")
-        data.array = <void *> fwidth.data
-        self.visit_all_octs(selector, oct_visitors.fwidth_octs, &data)
+        visitor.fwidth = fwidth
+        self.visit_all_octs(selector, visitor)
         cdef np.float64_t base_dx
         for i in range(3):
             base_dx = (self.DRE[i] - self.DLE[i])/self.nn[i]
@@ -510,13 +491,13 @@
                 int domain_id = -1):
         if num_cells == -1:
             num_cells = selector.count_oct_cells(self, domain_id)
-        cdef OctVisitorData data
-        self.setup_data(&data, domain_id)
+        cdef oct_visitors.FCoordsOcts visitor
+        visitor = oct_visitors.FCoordsOcts(self, domain_id)
         #Return the floating point unitary position of every cell
         cdef np.ndarray[np.float64_t, ndim=2] coords
         coords = np.empty((num_cells, 3), dtype="float64")
-        data.array = <void *> coords.data
-        self.visit_all_octs(selector, oct_visitors.fcoords_octs, &data)
+        visitor.fcoords = coords
+        self.visit_all_octs(selector, visitor)
         cdef int i
         cdef np.float64_t base_dx
         for i in range(3):
@@ -534,17 +515,15 @@
                       partial_coverage = self.partial_coverage)
         cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
         # domain_id = -1 here, because we want *every* oct
-        cdef OctVisitorData data
-        self.setup_data(&data, -1)
-        data.oref = 0
-        data.nz = 1
+        cdef oct_visitors.StoreOctree visitor
+        visitor = oct_visitors.StoreOctree(self, -1)
+        visitor.oref = 0
+        visitor.nz = 1
         cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
-        ref_mask = np.zeros(self.nocts * data.nz, dtype="uint8") - 1
-        cdef void *p[1]
-        p[0] = ref_mask.data
-        data.array = p
+        ref_mask = np.zeros(self.nocts * visitor.nz, dtype="uint8") - 1
+        visitor.ref_mask = ref_mask
         # Enforce partial_coverage here
-        self.visit_all_octs(selector, oct_visitors.store_octree, &data, 1)
+        self.visit_all_octs(selector, visitor, 1)
         header['octree'] = ref_mask
         return header
 
@@ -562,53 +541,60 @@
             # means we actually do want the number of Octs, not the number of
             # cells.
             num_cells = selector.count_oct_cells(self, domain_id)
-            if dims > 1:
-                dest = np.zeros((num_cells, dims), dtype=source.dtype,
-                    order='C')
-            else:
-                dest = np.zeros(num_cells, dtype=source.dtype, order='C')
-        cdef OctVisitorData data
-        self.setup_data(&data, domain_id)
-        data.index = offset
-        # We only need this so we can continue calculating the offset
-        data.dims = dims
-        cdef void *p[2]
-        p[0] = source.data
-        p[1] = dest.data
-        data.array = &p
-        cdef oct_visitor_function *func
+            dest = np.zeros((num_cells, dims), dtype=source.dtype,
+                            order='C')
+        if dims != 1:
+            raise RuntimeError
+        # Just make sure that we're in the right shape.  Ideally this will not
+        # duplicate memory.  Since we're in Cython, we want to avoid modifying
+        # the .shape attributes directly.
+        dest = dest.reshape((num_cells, 1))
+        source = source.reshape((source.shape[0], source.shape[1],
+                    source.shape[2], source.shape[3], dims))
+        cdef OctVisitor visitor
+        cdef oct_visitors.CopyArrayI64 visitor_i64
+        cdef oct_visitors.CopyArrayF64 visitor_f64
         if source.dtype != dest.dtype:
             raise RuntimeError
         if source.dtype == np.int64:
-            func = oct_visitors.copy_array_i64
+            visitor_i64 = oct_visitors.CopyArrayI64(self, domain_id)
+            visitor_i64.source = source
+            visitor_i64.dest = dest
+            visitor = visitor_i64
         elif source.dtype == np.float64:
-            func = oct_visitors.copy_array_f64
+            visitor_f64 = oct_visitors.CopyArrayF64(self, domain_id)
+            visitor_f64.source = source
+            visitor_f64.dest = dest
+            visitor = visitor_f64
         else:
             raise NotImplementedError
-        self.visit_all_octs(selector, func, &data)
-        if (data.global_index + 1) * data.nz * data.dims > source.size:
+        visitor.index = offset
+        # We only need this so we can continue calculating the offset
+        visitor.dims = dims
+        self.visit_all_octs(selector, visitor)
+        if (visitor.global_index + 1) * visitor.nz * visitor.dims > source.size:
             print "GLOBAL INDEX RAN AHEAD.",
-            print (data.global_index + 1) * data.nz * data.dims - source.size
+            print (visitor.global_index + 1) * visitor.nz * visitor.dims - source.size
             print dest.size, source.size, num_cells
             raise RuntimeError
-        if data.index > dest.size:
+        if visitor.index > dest.size:
             print "DEST INDEX RAN AHEAD.",
-            print data.index - dest.size
-            print (data.global_index + 1) * data.nz * data.dims, source.size
+            print visitor.index - dest.size
+            print (visitor.global_index + 1) * visitor.nz * visitor.dims, source.size
             print num_cells
             raise RuntimeError
         if num_cells >= 0:
             return dest
-        return data.index - offset
+        return visitor.index - offset
 
     def domain_ind(self, selector, int domain_id = -1):
         cdef np.ndarray[np.int64_t, ndim=1] ind
         # Here's where we grab the masked items.
         ind = np.zeros(self.nocts, 'int64') - 1
-        cdef OctVisitorData data
-        self.setup_data(&data, domain_id)
-        data.array = ind.data
-        self.visit_all_octs(selector, oct_visitors.index_octs, &data)
+        cdef oct_visitors.IndexOcts visitor
+        visitor = oct_visitors.IndexOcts(self, domain_id)
+        visitor.oct_index = ind
+        self.visit_all_octs(selector, visitor)
         return ind
 
     @cython.boundscheck(False)
@@ -740,14 +726,23 @@
             levels[i] = 100
             file_inds[i] = -1
             cell_inds[i] = 9
-        cdef OctVisitorData data
-        self.setup_data(&data, domain_id)
-        cdef void *p[3]
-        p[0] = levels.data
-        p[1] = file_inds.data
-        p[2] = cell_inds.data
-        data.array = p
-        self.visit_all_octs(selector, self.fill_func, &data)
+        cdef oct_visitors.FillFileIndicesO visitor_o
+        cdef oct_visitors.FillFileIndicesR visitor_r
+        if self.fill_style == "r":
+            visitor_r = oct_visitors.FillFileIndicesR(self, domain_id)
+            visitor_r.levels = levels
+            visitor_r.file_inds = file_inds
+            visitor_r.cell_inds = cell_inds
+            visitor = visitor_r
+        elif self.fill_style == "o":
+            visitor_o = oct_visitors.FillFileIndicesO(self, domain_id)
+            visitor_o.levels = levels
+            visitor_o.file_inds = file_inds
+            visitor_o.cell_inds = cell_inds
+            visitor = visitor_o
+        else:
+            raise RuntimeError
+        self.visit_all_octs(selector, visitor)
         return levels, cell_inds, file_inds
 
     def domain_count(self, SelectorObject selector):
@@ -755,10 +750,10 @@
         cdef np.int64_t i, num_octs
         cdef np.ndarray[np.int64_t, ndim=1] domain_counts
         domain_counts = np.zeros(self.num_domains, dtype="int64")
-        cdef OctVisitorData data
-        self.setup_data(&data, -1)
-        data.array = <void*> domain_counts.data
-        self.visit_all_octs(selector, oct_visitors.count_by_domain, &data)
+        cdef oct_visitors.CountByDomain visitor
+        visitor = oct_visitors.CountByDomain(self, -1)
+        visitor.domain_counts = domain_counts
+        self.visit_all_octs(selector, visitor)
         return domain_counts
 
     @cython.boundscheck(False)
@@ -786,10 +781,10 @@
 
     def finalize(self):
         cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
-        cdef OctVisitorData data
-        self.setup_data(&data, 1)
-        self.visit_all_octs(selector, oct_visitors.assign_domain_ind, &data)
-        assert ((data.global_index+1)*data.nz == data.index)
+        cdef oct_visitors.AssignDomainInd visitor
+        visitor = oct_visitors.AssignDomainInd(self, 1)
+        self.visit_all_octs(selector, visitor)
+        assert ((visitor.global_index+1)*visitor.nz == visitor.index)
 
 cdef int root_node_compare(void *a, void *b) nogil:
     cdef OctKey *ao
@@ -824,7 +819,7 @@
         for i in range(3):
             self.DLE[i] = domain_left_edge[i] #0
             self.DRE[i] = domain_right_edge[i] #num_grid
-        self.fill_func = oct_visitors.fill_file_indices_rind
+        self.fill_style = "r"
 
     @classmethod
     def load_octree(self, header):
@@ -869,13 +864,12 @@
 
     @cython.cdivision(True)
     cdef void visit_all_octs(self, SelectorObject selector,
-                        oct_visitor_function *func,
-                        OctVisitorData *data,
+                        OctVisitor visitor,
                         int vc = -1):
         cdef int i, j, k, n
         cdef np.int64_t key, ukey
-        data.global_index = -1
-        data.level = 0
+        visitor.global_index = -1
+        visitor.level = 0
         if vc == -1:
             vc = self.partial_coverage
         cdef np.float64_t pos[3]
@@ -888,11 +882,11 @@
         for i in range(self.num_root):
             o = self.root_nodes[i].node
             key = self.root_nodes[i].key
-            self.key_to_ipos(key, data.pos)
+            self.key_to_ipos(key, visitor.pos)
             for j in range(3):
-                pos[j] = self.DLE[j] + (data.pos[j] + 0.5) * dds[j]
+                pos[j] = self.DLE[j] + (visitor.pos[j] + 0.5) * dds[j]
             selector.recursively_visit_octs(
-                o, pos, dds, 0, func, data, vc)
+                o, pos, dds, 0, visitor, vc)
 
     cdef np.int64_t get_domain_offset(self, int domain_id):
         return 0 # We no longer have a domain offset.
@@ -946,7 +940,7 @@
         OctreeContainer.__init__(self, oct_domain_dimensions,
                 domain_left_edge, domain_right_edge, partial_coverage,
                  over_refine)
-        self.fill_func = oct_visitors.fill_file_indices_rind
+        self.fill_style = "r"
 
 cdef OctList *OctList_subneighbor_find(OctList *olist, Oct *top,
                                        int i, int j, int k):

diff -r 5beb2280f5c889b80eaa6cfa962e298b105aff1d -r 45dc609be9e893c48cdfc2e568eb26ca0a0f8cbe yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -30,50 +30,97 @@
     np.int64_t domain
     np.int64_t padding
 
-cdef struct OctVisitorData:
-    np.uint64_t index
-    np.uint64_t last
-    np.int64_t global_index
-    np.int64_t pos[3]       # position in ints
-    np.uint8_t ind[3]              # cell position
-    void *array
-    int dims
-    np.int32_t domain
-    np.int8_t level
-    np.int8_t oref # This is the level of overref.  1 => 8 zones, 2 => 64, etc.
-                   # To calculate nzones, 1 << (oref * 3)
-    np.int32_t nz
-                            
-ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
-                                   np.uint8_t selected)
+cdef class OctVisitor:
+    cdef np.uint64_t index
+    cdef np.uint64_t last
+    cdef np.int64_t global_index
+    cdef np.int64_t pos[3]       # position in ints
+    cdef np.uint8_t ind[3]              # cell position
+    cdef int dims
+    cdef np.int32_t domain
+    cdef np.int8_t level
+    cdef np.int8_t oref # This is the level of overref.  1 => 8 zones, 2 => 64, etc.
+                        # To calculate nzones, 1 << (oref * 3)
+    cdef np.int32_t nz
 
-cdef oct_visitor_function count_total_octs
-cdef oct_visitor_function count_total_cells
-cdef oct_visitor_function mark_octs
-cdef oct_visitor_function mask_octs
-cdef oct_visitor_function index_octs
-cdef oct_visitor_function icoords_octs
-cdef oct_visitor_function ires_octs
-cdef oct_visitor_function fcoords_octs
-cdef oct_visitor_function fwidth_octs
-cdef oct_visitor_function copy_array_f64
-cdef oct_visitor_function copy_array_i64
-cdef oct_visitor_function identify_octs
-cdef oct_visitor_function assign_domain_ind
-cdef oct_visitor_function fill_file_indices_oind
-cdef oct_visitor_function fill_file_indices_rind
-cdef oct_visitor_function count_by_domain
-cdef oct_visitor_function store_octree
-cdef oct_visitor_function load_octree
+    # There will also be overrides for the memoryviews associated with the
+    # specific instance.
+
+    cdef void visit(self, Oct*, np.uint8_t selected)
+
+    cdef inline int oind(self):
+        cdef int d = (1 << self.oref)
+        return (((self.ind[0]*d)+self.ind[1])*d+self.ind[2])
+
+    cdef inline int rind(self):
+        cdef int d = (1 << self.oref)
+        return (((self.ind[2]*d)+self.ind[1])*d+self.ind[0])
+
+cdef class CountTotalOcts(OctVisitor):
+    pass
+
+cdef class CountTotalCells(OctVisitor):
+    pass
+
+cdef class MarkOcts(OctVisitor):
+    # Unused
+    cdef np.uint8_t[:,:,:,:] mark
+
+cdef class MaskOcts(OctVisitor):
+    cdef np.uint8_t[:,:,:,:] mask
+
+cdef class IndexOcts(OctVisitor):
+    cdef np.int64_t[:] oct_index
+
+cdef class ICoordsOcts(OctVisitor):
+    cdef np.int64_t[:,:] icoords
+
+cdef class IResOcts(OctVisitor):
+    cdef np.int64_t[:] ires
+
+cdef class FCoordsOcts(OctVisitor):
+    cdef np.float64_t[:,:] fcoords
+
+cdef class FWidthOcts(OctVisitor):
+    cdef np.float64_t[:,:] fwidth
+
+cdef class CopyArrayI64(OctVisitor):
+    cdef np.int64_t[:,:,:,:,:,:] source
+    cdef np.int64_t[:,:] dest
+
+cdef class CopyArrayF64(OctVisitor):
+    cdef np.float64_t[:,:,:,:,:] source
+    cdef np.float64_t[:,:] dest
+
+cdef class IdentifyOcts(OctVisitor):
+    cdef np.uint8_t[:] domain_mask
+
+cdef class AssignDomainInd(OctVisitor):
+    pass
+
+cdef class FillFileIndicesO(OctVisitor):
+    cdef np.uint8_t[:] levels
+    cdef np.uint8_t[:] file_inds
+    cdef np.uint8_t[:] cell_inds
+
+cdef class FillFileIndicesR(OctVisitor):
+    cdef np.uint8_t[:] levels
+    cdef np.int64_t[:] file_inds
+    cdef np.uint8_t[:] cell_inds
+
+cdef class CountByDomain(OctVisitor):
+    cdef np.int64_t[:] domain_counts
+
+cdef class StoreOctree(OctVisitor):
+    cdef np.uint8_t[:] ref_mask
+
+cdef class LoadOctree(OctVisitor):
+    cdef np.uint8_t[:] ref_mask
+    cdef Oct* octs
+    cdef np.int64_t *nocts
+    cdef np.int64_t *nfinest
 
 cdef inline int cind(int i, int j, int k):
     # THIS ONLY WORKS FOR CHILDREN.  It is not general for zones.
     return (((i*2)+j)*2+k)
 
-cdef inline int oind(OctVisitorData *data):
-    cdef int d = (1 << data.oref)
-    return (((data.ind[0]*d)+data.ind[1])*d+data.ind[2])
-
-cdef inline int rind(OctVisitorData *data):
-    cdef int d = (1 << data.oref)
-    return (((data.ind[2]*d)+data.ind[1])*d+data.ind[0])

diff -r 5beb2280f5c889b80eaa6cfa962e298b105aff1d -r 45dc609be9e893c48cdfc2e568eb26ca0a0f8cbe yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -19,209 +19,264 @@
 import numpy
 from fp_utils cimport *
 from libc.stdlib cimport malloc, free
+from yt.geometry.oct_container cimport OctreeContainer
 
 # Now some visitor functions
 
-cdef void copy_array_f64(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # We should always have global_index less than our source.
-    # "last" here tells us the dimensionality of the array.
-    if selected == 0: return
-    cdef int i
-    # There are this many records between "octs"
-    cdef np.int64_t index = (data.global_index * data.nz)*data.dims
-    cdef np.float64_t **p = <np.float64_t**> data.array
-    index += oind(data)*data.dims
-    for i in range(data.dims):
-        p[1][data.index + i] = p[0][index + i]
-    data.index += data.dims
+cdef class OctVisitor:
+    def __init__(self, OctreeContainer octree, int domain_id = -1):
+        cdef int i
+        self.index = 0
+        self.last = -1
+        self.global_index = -1
+        for i in range(3):
+            self.pos[i] = -1
+            self.ind[i] = -1
+        self.dims = 0
+        self.domain = domain_id
+        self.level = -1
+        self.oref = octree.oref
+        self.nz = (1 << (self.oref*3))
 
-cdef void copy_array_i64(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # We should always have global_index less than our source.
-    # "last" here tells us the dimensionality of the array.
-    if selected == 0: return
-    cdef int i
-    cdef np.int64_t index = (data.global_index * data.nz)*data.dims
-    cdef np.int64_t **p = <np.int64_t**> data.array
-    index += oind(data)*data.dims
-    for i in range(data.dims):
-        p[1][data.index + i] = p[0][index + i]
-    data.index += data.dims
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        raise NotImplementedError
 
-cdef void count_total_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # Count even if not selected.
-    # Number of *octs* visited.
-    if data.last != o.domain_ind:
-        data.index += 1
-        data.last = o.domain_ind
+# This copies an integer array from the source to the destination, based on the
+# selection criteria.
+cdef class CopyArrayI64(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # We should always have global_index less than our source.
+        # "last" here tells us the dimensionality of the array.
+        if selected == 0: return
+        # There are this many records between "octs"
+        self.dest[self.index, :] = self.source[
+                self.ind[2], self.ind[1], self.ind[0],
+                self.global_index, :]
+        self.index += 1
 
-cdef void count_total_cells(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # Number of *cells* visited and selected.
-    data.index += selected
+# This copies a floating point array from the source to the destination, based
+# on the selection criteria.
+cdef class CopyArrayF64(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # We should always have global_index less than our source.
+        # "last" here tells us the dimensionality of the array.
+        if selected == 0: return
+        # There are this many records between "octs"
+        self.dest[self.index, :] = self.source[
+                self.ind[2], self.ind[1], self.ind[0],
+                self.global_index, :]
+        self.index += 1
 
-cdef void mark_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # We mark them even if they are not selected
-    cdef int i
-    cdef np.uint8_t *arr = <np.uint8_t *> data.array
-    if data.last != o.domain_ind:
-        data.last = o.domain_ind
-        data.index += 1
-    cdef np.int64_t index = data.index * data.nz
-    index += oind(data)
-    arr[index] = 1
+# This counts the number of octs, selected or not, that the selector hits.
+# Note that the selector will not recursively visit unselected octs, so this is
+# still useful.
+cdef class CountTotalOcts(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # Count even if not selected.
+        # Number of *octs* visited.
+        if self.last != o.domain_ind:
+            self.index += 1
+            self.last = o.domain_ind
 
-cdef void mask_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    if selected == 0: return
-    cdef int i
-    cdef np.uint8_t *arr = <np.uint8_t *> data.array
-    cdef np.int64_t index = data.global_index * data.nz
-    index += oind(data)
-    arr[index] = 1
+# This counts the number of selected cells.
+cdef class CountTotalCells(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # Number of *cells* visited and selected.
+        self.index += selected
 
-cdef void index_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # Note that we provide an index even if the cell is not selected.
-    cdef int i
-    cdef np.int64_t *arr
-    if data.last != o.domain_ind:
-        data.last = o.domain_ind
-        arr = <np.int64_t *> data.array
-        arr[o.domain_ind] = data.index
-        data.index += 1
+# Every time a cell is visited, mark it.  This will be for all visited octs.
+cdef class MarkOcts(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # We mark them even if they are not selected
+        if self.last != o.domain_ind:
+            self.last = o.domain_ind
+            self.index += 1
+        self.mark[self.index, self.ind[2], self.ind[1], self.ind[0]] = 1
 
-cdef void icoords_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    if selected == 0: return
-    cdef np.int64_t *coords = <np.int64_t*> data.array
-    cdef int i
-    for i in range(3):
-        coords[data.index * 3 + i] = (data.pos[i] << data.oref) + data.ind[i]
-    data.index += 1
+# Mask all the selected cells.
+cdef class MaskOcts(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        if selected == 0: return
+        self.mask[self.global_index, self.ind[2], self.ind[1], self.ind[0]] = 1
 
-cdef void ires_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    if selected == 0: return
-    cdef np.int64_t *ires = <np.int64_t*> data.array
-    ires[data.index] = data.level
-    data.index += 1
+# Compute a mapping from domain_ind to flattened index.
+cdef class IndexOcts(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # Note that we provide an index even if the cell is not selected.
+        if self.last != o.domain_ind:
+            self.last = o.domain_ind
+            self.oct_index[o.domain_ind] = self.index
+            self.index += 1
 
- at cython.cdivision(True)
-cdef void fcoords_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # Note that this does not actually give the correct floating point
-    # coordinates.  It gives them in some unit system where the domain is 1.0
-    # in all directions, and assumes that they will be scaled later.
-    if selected == 0: return
-    cdef np.float64_t *fcoords = <np.float64_t*> data.array
-    cdef int i
-    cdef np.float64_t c, dx
-    dx = 1.0 / ((1 << data.oref) << data.level)
-    for i in range(3):
-        c = <np.float64_t> ((data.pos[i] << data.oref ) + data.ind[i])
-        fcoords[data.index * 3 + i] = (c + 0.5) * dx
-    data.index += 1
+# Integer coordinates
+cdef class ICoordsOcts(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        if selected == 0: return
+        cdef int i
+        for i in range(3):
+            self.icoords[self.index,i] = (self.pos[i] << self.oref) + self.ind[i]
+        self.index += 1
 
- at cython.cdivision(True)
-cdef void fwidth_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # Note that this does not actually give the correct floating point
-    # coordinates.  It gives them in some unit system where the domain is 1.0
-    # in all directions, and assumes that they will be scaled later.
-    if selected == 0: return
-    cdef np.float64_t *fwidth = <np.float64_t*> data.array
-    cdef int i
-    cdef np.float64_t dx
-    dx = 1.0 / ((1 << data.oref) << data.level)
-    for i in range(3):
-        fwidth[data.index * 3 + i] = dx
-    data.index += 1
+# Level
+cdef class IResOcts(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        if selected == 0: return
+        self.ires[self.index] = self.level
+        self.index += 1
 
-cdef void identify_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # We assume that our domain has *already* been selected by, which means
-    # we'll get all cells within the domain for a by-domain selector and all
-    # cells within the domain *and* selector for the selector itself.
-    if selected == 0: return
-    cdef np.uint8_t *arr = <np.uint8_t *> data.array
-    arr[o.domain - 1] = 1
+# Floating point coordinates
+cdef class FCoordsOcts(OctVisitor):
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # Note that this does not actually give the correct floating point
+        # coordinates.  It gives them in some unit system where the domain is 1.0
+        # in all directions, and assumes that they will be scaled later.
+        if selected == 0: return
+        cdef int i
+        cdef np.float64_t c, dx
+        dx = 1.0 / ((1 << self.oref) << self.level)
+        for i in range(3):
+            c = <np.float64_t> ((self.pos[i] << self.oref ) + self.ind[i])
+            self.fcoords[self.index,i] = (c + 0.5) * dx
+        self.index += 1
 
-cdef void assign_domain_ind(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    o.domain_ind = data.global_index
-    data.index += 1
+# Floating point widths; domain modifications are done later.
+cdef class FWidthOcts(OctVisitor):
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # Note that this does not actually give the correct floating point
+        # coordinates.  It gives them in some unit system where the domain is 1.0
+        # in all directions, and assumes that they will be scaled later.
+        if selected == 0: return
+        cdef int i
+        cdef np.float64_t dx
+        dx = 1.0 / ((1 << self.oref) << self.level)
+        for i in range(3):
+            self.fwidth[self.index,i] = dx
+        self.index += 1
 
-cdef void fill_file_indices_oind(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # We fill these arrays, then inside the level filler we use these as
-    # indices as we fill a second array from the data.
-    if selected == 0: return
-    cdef void **p = <void **> data.array
-    cdef np.uint8_t *level_arr = <np.uint8_t *> p[0]
-    cdef np.int64_t *find_arr = <np.int64_t *> p[1]
-    cdef np.uint8_t *cell_arr = <np.uint8_t *> p[2]
-    level_arr[data.index] = data.level
-    find_arr[data.index] = o.file_ind
-    cell_arr[data.index] = oind(data)
-    data.index +=1
+# Mark which domains are touched by a selector.
+cdef class IdentifyOcts(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # We assume that our domain has *already* been selected by, which means
+        # we'll get all cells within the domain for a by-domain selector and all
+        # cells within the domain *and* selector for the selector itself.
+        if selected == 0: return
+        self.domain_mask[o.domain - 1] = 1
 
-cdef void fill_file_indices_rind(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # We fill these arrays, then inside the level filler we use these as
-    # indices as we fill a second array from the data.
-    if selected == 0: return
-    cdef void **p = <void **> data.array
-    cdef np.uint8_t *level_arr = <np.uint8_t *> p[0]
-    cdef np.int64_t *find_arr = <np.int64_t *> p[1]
-    cdef np.uint8_t *cell_arr = <np.uint8_t *> p[2]
-    level_arr[data.index] = data.level
-    find_arr[data.index] = o.file_ind
-    cell_arr[data.index] = rind(data)
-    data.index +=1
+# Assign domain indices to octs
+cdef class AssignDomainInd(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        o.domain_ind = self.global_index
+        self.index += 1
 
-cdef void count_by_domain(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    cdef np.int64_t *arr
-    if selected == 0: return
-    # NOTE: We do this for every *cell*.
-    arr = <np.int64_t *> data.array
-    arr[o.domain - 1] += 1
+# From the file, fill in C order
+cdef class FillFileIndicesO(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # We fill these arrays, then inside the level filler we use these as
+        # indices as we fill a second array from the self.
+        if selected == 0: return
+        self.levels[self.index] = self.level
+        self.file_inds[self.index] = o.file_ind
+        self.cell_inds[self.index] = self.oind()
+        self.index +=1
 
-cdef void store_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    cdef np.uint8_t res, ii
-    cdef np.uint8_t *arr
-    cdef np.uint8_t *always_descend
-    ii = cind(data.ind[0], data.ind[1], data.ind[2])
-    cdef void **p = <void **> data.array
-    arr = <np.uint8_t *> p[0]
-    if o.children == NULL:
-        # Not refined.
-        res = 0
-    else:
-        res = 1
-    arr[data.index] = res
-    data.index += 1
+# From the file, fill in F order
+cdef class FillFileIndicesR(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        # We fill these arrays, then inside the level filler we use these as
+        # indices as we fill a second array from the self.
+        if selected == 0: return
+        self.levels[self.index] = self.level
+        self.file_inds[self.index] = o.file_ind
+        self.cell_inds[self.index] = self.rind()
+        self.index +=1
 
-cdef void load_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    cdef void **p = <void **> data.array
-    cdef np.uint8_t *arr = <np.uint8_t *> p[0]
-    cdef Oct* octs = <Oct*> p[1]
-    cdef np.int64_t *nocts = <np.int64_t*> p[2]
-    cdef np.int64_t *nfinest = <np.int64_t*> p[3]
-    cdef int i, ii
-    ii = cind(data.ind[0], data.ind[1], data.ind[2])
-    if arr[data.index] == 0:
-        # We only want to do this once.  Otherwise we end up with way too many
-        # nfinest for our tastes.
-        if o.file_ind == -1:
-            o.children = NULL
-            o.file_ind = nfinest[0]
-            o.domain = 1
-            nfinest[0] += 1
-    elif arr[data.index] > 0:
-        if arr[data.index] != 1 and arr[data.index] != 8:
-            print "ARRAY CLUE: ", arr[data.index], "UNKNOWN"
+# Count octs by domain
+cdef class CountByDomain(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        if selected == 0: return
+        # NOTE: We do this for every *cell*.
+        self.domain_counts[o.domain - 1] += 1
+
+# Store the refinement mapping of the octree to be loaded later
+cdef class StoreOctree(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        cdef np.uint8_t res, ii
+        ii = cind(self.ind[0], self.ind[1], self.ind[2])
+        if o.children == NULL:
+            # Not refined.
+            res = 0
+        else:
+            res = 1
+        self.ref_mask[self.index] = res
+        self.index += 1
+
+# Go from a refinement mapping to a new octree
+cdef class LoadOctree(OctVisitor):
+    @cython.boundscheck(False)
+    @cython.initializedcheck(False)
+    cdef void visit(self, Oct* o, np.uint8_t selected):
+        cdef int i, ii
+        ii = cind(self.ind[0], self.ind[1], self.ind[2])
+        if self.ref_mask[self.index] == 0:
+            # We only want to do this once.  Otherwise we end up with way too many
+            # nfinest for our tastes.
+            if o.file_ind == -1:
+                o.children = NULL
+                o.file_ind = self.nfinest[0]
+                o.domain = 1
+                self.nfinest[0] += 1
+        elif self.ref_mask[self.index] > 0:
+            if self.ref_mask[self.index] != 1 and self.ref_mask[self.index] != 8:
+                print "ARRAY CLUE: ", self.ref_mask[self.index], "UNKNOWN"
+                raise RuntimeError
+            if o.children == NULL:
+                o.children = <Oct **> malloc(sizeof(Oct *) * 8)
+                for i in range(8):
+                    o.children[i] = NULL
+            for i in range(8):
+                o.children[ii + i] = &self.octs[self.nocts[0]]
+                o.children[ii + i].domain_ind = self.nocts[0]
+                o.children[ii + i].file_ind = -1
+                o.children[ii + i].domain = -1
+                o.children[ii + i].children = NULL
+                self.nocts[0] += 1
+        else:
+            print "SOMETHING IS AMISS", self.index
             raise RuntimeError
-        if o.children == NULL:
-            o.children = <Oct **> malloc(sizeof(Oct *) * 8)
-            for i in range(8):
-                o.children[i] = NULL
-        for i in range(8):
-            o.children[ii + i] = &octs[nocts[0]]
-            o.children[ii + i].domain_ind = nocts[0]
-            o.children[ii + i].file_ind = -1
-            o.children[ii + i].domain = -1
-            o.children[ii + i].children = NULL
-            nocts[0] += 1
-    else:
-        print "SOMETHING IS AMISS", data.index
-        raise RuntimeError
-    data.index += 1
+        self.index += 1

diff -r 5beb2280f5c889b80eaa6cfa962e298b105aff1d -r 45dc609be9e893c48cdfc2e568eb26ca0a0f8cbe yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -15,15 +15,13 @@
 #-----------------------------------------------------------------------------
 
 from oct_container cimport OctreeContainer, Oct, OctInfo, ORDER_MAX
-cimport oct_visitors
 from oct_visitors cimport cind
 from libc.stdlib cimport malloc, free, qsort
 from libc.math cimport floor
 from fp_utils cimport *
 cimport numpy as np
 import numpy as np
-from selection_routines cimport SelectorObject, \
-    OctVisitorData, oct_visitor_function
+from selection_routines cimport SelectorObject
 cimport cython
 
 cdef class ParticleOctreeContainer(OctreeContainer):

diff -r 5beb2280f5c889b80eaa6cfa962e298b105aff1d -r 45dc609be9e893c48cdfc2e568eb26ca0a0f8cbe yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -15,8 +15,7 @@
 #-----------------------------------------------------------------------------
 
 cimport numpy as np
-from oct_visitors cimport Oct, OctVisitorData, \
-    oct_visitor_function
+from oct_visitors cimport Oct, OctVisitor
 from grid_visitors cimport GridTreeNode, GridVisitorData, \
     grid_visitor_function, check_child_masked
 
@@ -43,12 +42,11 @@
     cdef void recursively_visit_octs(self, Oct *root,
                         np.float64_t pos[3], np.float64_t dds[3],
                         int level,
-                        oct_visitor_function *func,
-                        OctVisitorData *data,
+                        OctVisitor visitor,
                         int visit_covered = ?)
-    cdef void visit_oct_cells(self, OctVisitorData *data, Oct *root, Oct *ch,
+    cdef void visit_oct_cells(self, Oct *root, Oct *ch,
                               np.float64_t spos[3], np.float64_t sdds[3],
-                              oct_visitor_function *func, int i, int j, int k)
+                              OctVisitor visitor, int i, int j, int k)
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
                                np.int32_t level, Oct *o = ?) nogil

diff -r 5beb2280f5c889b80eaa6cfa962e298b105aff1d -r 45dc609be9e893c48cdfc2e568eb26ca0a0f8cbe yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -164,16 +164,16 @@
         return gridi.astype("bool")
 
     def count_octs(self, OctreeContainer octree, int domain_id = -1):
-        cdef OctVisitorData data
-        octree.setup_data(&data, domain_id)
-        octree.visit_all_octs(self, oct_visitors.count_total_octs, &data)
-        return data.index
+        cdef oct_visitors.CountTotalOcts visitor
+        visitor = oct_visitors.CountTotalOcts(octree, domain_id)
+        octree.visit_all_octs(self, visitor)
+        return visitor.index
 
     def count_oct_cells(self, OctreeContainer octree, int domain_id = -1):
-        cdef OctVisitorData data
-        octree.setup_data(&data, domain_id)
-        octree.visit_all_octs(self, oct_visitors.count_total_cells, &data)
-        return data.index
+        cdef oct_visitors.CountTotalCells visitor
+        visitor = oct_visitors.CountTotalCells(octree, domain_id)
+        octree.visit_all_octs(self, visitor)
+        return visitor.index
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -181,8 +181,7 @@
     cdef void recursively_visit_octs(self, Oct *root,
                         np.float64_t pos[3], np.float64_t dds[3],
                         int level,
-                        oct_visitor_function *func,
-                        OctVisitorData *data,
+                        OctVisitor visitor,
                         int visit_covered = 0):
         # visit_covered tells us whether this octree supports partial
         # refinement.  If it does, we need to handle this specially -- first
@@ -203,7 +202,7 @@
             RE[i] = pos[i] + dds[i]/2.0
         #print LE[0], RE[0], LE[1], RE[1], LE[2], RE[2]
         res = self.select_grid(LE, RE, level, root)
-        if res == 1 and data.domain > 0 and root.domain != data.domain:
+        if res == 1 and visitor.domain > 0 and root.domain != visitor.domain:
             res = -1
         cdef int increment = 1
         cdef int next_level, this_level
@@ -243,52 +242,52 @@
                         if root.children != NULL:
                             ch = root.children[cind(i, j, k)]
                         if iter == 1 and next_level == 1 and ch != NULL:
-                            # Note that data.pos is always going to be the
+                            # Note that visitor.pos is always going to be the
                             # position of the Oct -- it is *not* always going
                             # to be the same as the position of the cell under
                             # investigation.
-                            data.pos[0] = (data.pos[0] << 1) + i
-                            data.pos[1] = (data.pos[1] << 1) + j
-                            data.pos[2] = (data.pos[2] << 1) + k
-                            data.level += 1
+                            visitor.pos[0] = (visitor.pos[0] << 1) + i
+                            visitor.pos[1] = (visitor.pos[1] << 1) + j
+                            visitor.pos[2] = (visitor.pos[2] << 1) + k
+                            visitor.level += 1
                             self.recursively_visit_octs(
-                                ch, spos, sdds, level + 1, func, data,
+                                ch, spos, sdds, level + 1, visitor,
                                 visit_covered)
-                            data.pos[0] = (data.pos[0] >> 1)
-                            data.pos[1] = (data.pos[1] >> 1)
-                            data.pos[2] = (data.pos[2] >> 1)
-                            data.level -= 1
-                        elif this_level == 1 and data.oref > 0:
-                            data.global_index += increment
+                            visitor.pos[0] = (visitor.pos[0] >> 1)
+                            visitor.pos[1] = (visitor.pos[1] >> 1)
+                            visitor.pos[2] = (visitor.pos[2] >> 1)
+                            visitor.level -= 1
+                        elif this_level == 1 and visitor.oref > 0:
+                            visitor.global_index += increment
                             increment = 0
-                            self.visit_oct_cells(data, root, ch, spos, sdds,
-                                                 func, i, j, k)
+                            self.visit_oct_cells(root, ch, spos, sdds,
+                                                 visitor, i, j, k)
                         elif this_level == 1 and increment == 1:
-                            data.global_index += increment
+                            visitor.global_index += increment
                             increment = 0
-                            data.ind[0] = data.ind[1] = data.ind[2] = 0
-                            func(root, data, 1)
+                            visitor.ind[0] = visitor.ind[1] = visitor.ind[2] = 0
+                            visitor.visit(root, 1)
                         spos[2] += sdds[2]
                     spos[1] += sdds[1]
                 spos[0] += sdds[0]
             this_level = 0 # We turn this off for the second pass.
             iter += 1
 
-    cdef void visit_oct_cells(self, OctVisitorData *data, Oct *root, Oct *ch,
+    cdef void visit_oct_cells(self, Oct *root, Oct *ch,
                               np.float64_t spos[3], np.float64_t sdds[3],
-                              oct_visitor_function *func, int i, int j, int k):
+                              OctVisitor visitor, int i, int j, int k):
         # We can short-circuit the whole process if data.oref == 1.
         # This saves us some funny-business.
         cdef int selected
-        if data.oref == 1:
+        if visitor.oref == 1:
             selected = self.select_cell(spos, sdds)
             if ch != NULL:
                 selected *= self.overlap_cells
-            # data.ind refers to the cell, not to the oct.
-            data.ind[0] = i
-            data.ind[1] = j
-            data.ind[2] = k
-            func(root, data, selected)
+            # visitor.ind refers to the cell, not to the oct.
+            visitor.ind[0] = i
+            visitor.ind[1] = j
+            visitor.ind[2] = k
+            visitor.visit(root, selected)
             return
         # Okay, now that we've got that out of the way, we have to do some
         # other checks here.  In this case, spos[] is the position of the
@@ -298,7 +297,7 @@
         cdef np.float64_t dds[3]
         cdef np.float64_t pos[3]
         cdef int ci, cj, ck
-        cdef int nr = (1 << (data.oref - 1))
+        cdef int nr = (1 << (visitor.oref - 1))
         for ci in range(3):
             dds[ci] = sdds[ci] / nr
         # Boot strap at the first index.
@@ -311,10 +310,10 @@
                     selected = self.select_cell(pos, dds)
                     if ch != NULL:
                         selected *= self.overlap_cells
-                    data.ind[0] = ci + i * nr
-                    data.ind[1] = cj + j * nr
-                    data.ind[2] = ck + k * nr
-                    func(root, data, selected)
+                    visitor.ind[0] = ci + i * nr
+                    visitor.ind[1] = cj + j * nr
+                    visitor.ind[2] = ck + k * nr
+                    visitor.visit(root, selected)
                     pos[2] += dds[2]
                 pos[1] += dds[1]
             pos[0] += dds[0]

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list