[yt-svn] commit/yt: 88 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Oct 14 05:33:10 PDT 2013


88 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/69c0b3816ba1/
Changeset:   69c0b3816ba1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-18 22:38:07
Summary:     First draft of ionization species calculation for RAMSES.
Affected #:  2 files

diff -r 6793ff95963ec411086d25ef94c7aff7985a8984 -r 69c0b3816ba156bc9eb7d9b04f739eb2320ffd61 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -50,7 +50,10 @@
 import yt.utilities.fortran_utils as fpu
 from yt.geometry.oct_container import \
     RAMSESOctreeContainer
-from .fields import RAMSESFieldInfo, KnownRAMSESFields
+from .fields import \
+    RAMSESFieldInfo, \
+    KnownRAMSESFields, \
+    create_cooling_fields
 
 class RAMSESDomainFile(object):
     _last_mask = None
@@ -317,6 +320,10 @@
             pfl.update(set(domain.particle_field_offsets.keys()))
         self.particle_field_list = list(pfl)
         self.field_list = self.fluid_field_list + self.particle_field_list
+
+    def _setup_derived_fields(self):
+        self._parse_cooling()
+        super(RAMSESGeometryHandler, self)._setup_derived_fields()
     
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
@@ -353,6 +360,14 @@
         for subset in oobjs:
             yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
 
+    def _parse_cooling(self):
+        pf = self.parameter_file
+        num = os.path.basename(pf.parameter_filename).split("."
+                )[0].split("_")[1]
+        basename = "%s/cooling_%05i.out" % (
+            os.path.dirname(pf.parameter_filename), int(num))
+        create_cooling_fields(basename, pf.field_info)
+
 class RAMSESStaticOutput(StaticOutput):
     _hierarchy_class = RAMSESGeometryHandler
     _fieldinfo_fallback = RAMSESFieldInfo

diff -r 6793ff95963ec411086d25ef94c7aff7985a8984 -r 69c0b3816ba156bc9eb7d9b04f739eb2320ffd61 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -23,6 +23,8 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import os
+
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     NullFunc, \
@@ -42,6 +44,10 @@
     mass_hydrogen_cgs, \
     mass_sun_cgs, \
     mh
+from yt.utilities.linear_interpolators import \
+    BilinearFieldInterpolator
+import yt.utilities.fortran_utils as fpu
+from yt.funcs import mylog
 import numpy as np
 
 RAMSESFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo, "RFI")
@@ -205,3 +211,42 @@
                           RAMSESFieldInfo)
 particle_deposition_functions("all", "Coordinates", "particle_mass",
                                RAMSESFieldInfo)
+_cool_axes = ("lognH", "logT", "logTeq")
+_cool_arrs = ("metal", "cool", "heat", "metal_prime", "cool_prime",
+              "heat_prime", "mu", "abundances")
+_cool_species = ("Electron_Fraction", "HI_Fraction", "HII_Fraction",
+                 "HeI_Fraction", "HeII_Fraction", "HeIII_Fraction")
+
+def create_cooling_fields(filename, field_info):
+    if not os.path.exists(filename): return
+    def _create_field(name, interp_object):
+        def _func(field, data):
+            shape = data["Temperature"].shape
+            d = {'lognH': np.log10(data["Density"]/mh).ravel(),
+                 'logT' : np.log10(data["Temperature"]).ravel()}
+            rv = 10**interp_object(d).reshape(shape)
+            return rv
+        field_info.add_field(name = name, function=_func,
+                             units = r"\rm{g}/\rm{cm}^3",
+                             projected_units = r"\rm{g}/\rm{cm}^2")
+    avals = {}
+    tvals = {}
+    with open(filename, "rb") as f:
+        n1, n2 = fpu.read_vector(f, 'i')
+        n = n1 * n2
+        for ax in _cool_axes:
+            avals[ax] = fpu.read_vector(f, 'd')
+        for tname in _cool_arrs:
+            var = fpu.read_vector(f, 'd')
+            if var.size == n1*n2:
+                tvals[tname] = var.reshape((n1, n2), order='F')
+            else:
+                var = var.reshape((n1, n2, var.size / (n1*n2)), order='F')
+                for i in range(var.shape[-1]):
+                    tvals[_cool_species[i]] = var[:,:,i]
+    
+    for n in tvals:
+        interp = BilinearFieldInterpolator(tvals[n],
+                    (avals["lognH"], avals["logT"]),
+                    ["lognH", "logT"], truncate = True)
+        _create_field(n, interp)


https://bitbucket.org/yt_analysis/yt/commits/e54521a262de/
Changeset:   e54521a262de
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-18 22:55:03
Summary:     Converted the species fractions to use the cooling file explicitly.
Affected #:  1 file

diff -r 69c0b3816ba156bc9eb7d9b04f739eb2320ffd61 -r e54521a262de7a62fa1c5e8e05e121719e870693 yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -157,10 +157,6 @@
     ef = (1.0 + data.pf.current_redshift)**3.0
     return data[sp] / ef
 
-def _SpeciesFraction(field, data):
-    sp = field.name.split("_")[0] + "_Density"
-    return data[sp] / data["Density"]
-
 def _SpeciesMass(field, data):
     sp = field.name.split("_")[0] + "_Density"
     return data[sp] * data["CellVolume"]
@@ -170,22 +166,22 @@
     sp = field.name.split("_")[0] + "_Density"
     return data[sp] / _speciesMass[species]
 
+def _SpeciesDensity(field, data):
+    species = field.name.split("_")[0]
+    sp = field.name.split("_")[0] + "_Fraction"
+    return data[sp] * data["Density"]
+
 def _convertCellMassMsun(data):
     return 1.0/mass_sun_cgs # g^-1
 def _ConvertNumberDensity(data):
     return 1.0/mh
 
 for species in _speciesList:
-    add_ramses_field("%s_Density" % species,
-             function = NullFunc,
+    add_field("%s_Density" % species,
+             function = _SpeciesDensity,
              display_name = "%s\/Density" % species,
-             convert_function = _convertDensity,
              units = r"\rm{g}/\rm{cm}^3",
              projected_units = r"\rm{g}/\rm{cm}^2")
-    add_field("%s_Fraction" % species,
-             function=_SpeciesFraction,
-             validators=ValidateDataField("%s_Density" % species),
-             display_name="%s\/Fraction" % species)
     add_field("Comoving_%s_Density" % species,
              function=_SpeciesComovingDensity,
              validators=ValidateDataField("%s_Density" % species),


https://bitbucket.org/yt_analysis/yt/commits/7326a8d3469f/
Changeset:   7326a8d3469f
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-29 00:44:45
Summary:     Adding OctList object and beginning a neighbor search.
Affected #:  3 files

diff -r b95e8f2a3c0e10f8b10c60b606886c54523a9df9 -r 7326a8d3469f1377e7604c5454a65876cd130f26 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -40,6 +40,8 @@
 cdef struct OctInfo:
     np.float64_t left_edge[3]
     np.float64_t dds[3]
+    np.int64_t ipos[3]
+    np.int32_t level
 
 cdef struct OctAllocationContainer
 cdef struct OctAllocationContainer:
@@ -49,6 +51,16 @@
     OctAllocationContainer *next
     Oct *my_octs
 
+cdef struct OctList
+
+cdef struct OctList:
+    OctList *next
+    Oct *o
+
+cdef OctList *OctList_append(OctList *list, Oct *o)
+cdef int OctList_count(OctList *list)
+cdef void OctList_delete(OctList *list)
+
 cdef class OctreeContainer:
     cdef OctAllocationContainer *cont
     cdef OctAllocationContainer **domains
@@ -60,7 +72,7 @@
     cdef public int max_domain
     cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
     cdef int get_root(self, int ind[3], Oct **o)
-    cdef void neighbors(self, Oct *, Oct **)
+    cdef int neighbors(self, OctInfo *oinfo, Oct **neighbors)
     cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
     # This function must return the offset from global-to-local domains; i.e.,
     # OctAllocationContainer.offset if such a thing exists.

diff -r b95e8f2a3c0e10f8b10c60b606886c54523a9df9 -r 7326a8d3469f1377e7604c5454a65876cd130f26 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -190,18 +190,22 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL):
+    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL,
+                  ):
         #Given a floating point position, retrieve the most
         #refined oct at that time
-        cdef int ind[3]
+        cdef int ind[3], level
+        cdef np.int64_t ipos[3]
         cdef np.float64_t dds[3], cp[3], pp[3]
         cdef Oct *cur, *next
+        cdef int i
         cur = next = NULL
-        cdef int i
+        level = 0
         for i in range(3):
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
             ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
             cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
+            ipos[i] = ind[i]
         self.get_root(ind, &next)
         # We want to stop recursing when there's nowhere else to go
         while next != NULL:
@@ -216,6 +220,9 @@
                     cp[i] += dds[i]/2.0
             if cur.children != NULL:
                 next = cur.children[cind(ind[0],ind[1],ind[2])]
+                for i in range(3):
+                    ipos[i] = (ipos[i] << 1) + ind[i]
+                level += 1
             else:
                 next = NULL
         if oinfo == NULL: return cur
@@ -230,6 +237,8 @@
             # oct width, thus making it already the cell width
             oinfo.dds[i] = dds[i] # Cell width
             oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
+            oinfo.ipos[i] = ipos[i]
+            oinfo.level = level
         return cur
 
     def domain_identify(self, SelectorObject selector):
@@ -249,99 +258,10 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef void neighbors(self, Oct* o, Oct* neighbors[27]):
-        #Get 3x3x3 neighbors, although the 1,1,1 oct is the
-        #central one. 
-        #Return an array of Octs
-        cdef np.int64_t curopos[3]
-        cdef np.int64_t curnpos[3]
-        cdef np.int64_t npos[3]
-        cdef int i, j, k, ni, nj, nk, ind[3], nn, dl, skip
-        cdef np.float64_t dds[3], cp[3], pp[3]
+    cdef int neighbors(self, OctInfo *oinfo, Oct** neighbors):
         cdef Oct* candidate
-        for i in range(27): neighbors[i] = NULL
         nn = 0
-        raise RuntimeError
-        #for ni in range(3):
-        #    for nj in range(3):
-        #        for nk in range(3):
-        #            if ni == nj == nk == 1:
-        #                neighbors[nn] = o
-        #                nn += 1
-        #                continue
-        #            npos[0] = o.pos[0] + (ni - 1)
-        #            npos[1] = o.pos[1] + (nj - 1)
-        #            npos[2] = o.pos[2] + (nk - 1)
-        #            for i in range(3):
-        #                # Periodicity
-        #                if npos[i] == -1:
-        #                    npos[i] = (self.nn[i]  << o.level) - 1
-        #                elif npos[i] == (self.nn[i] << o.level):
-        #                    npos[i] = 0
-        #                curopos[i] = o.pos[i]
-        #                curnpos[i] = npos[i] 
-        #            # Now we have our neighbor position and a safe place to
-        #            # keep it.  curnpos will be the root index of the neighbor
-        #            # at a given level, and npos will be constant.  curopos is
-        #            # the candidate root at a level.
-        #            candidate = o
-        #            while candidate != NULL:
-        #                if ((curopos[0] == curnpos[0]) and 
-        #                    (curopos[1] == curnpos[1]) and
-        #                    (curopos[2] == curnpos[2])):
-        #                    break
-        #                # This one doesn't meet it, so we pop up a level.
-        #                # First we update our positions, then we update our
-        #                # candidate.
-        #                for i in range(3):
-        #                    # We strip a digit off the right
-        #                    curopos[i] = (curopos[i] >> 1)
-        #                    curnpos[i] = (curnpos[i] >> 1)
-        #                # Now we update to the candidate's parent, which should
-        #                # have a matching position to curopos[]
-        #                # TODO: This has not survived the transition to
-        #                # mostly-stateless Octs!
-        #                raise RuntimeError
-        #                candidate = candidate.parent
-        #            if candidate == NULL:
-        #                # Worst case scenario
-        #                for i in range(3):
-        #                    ind[i] = (npos[i] >> (o.level))
-        #                candidate = self.root_mesh[ind[0]][ind[1]][ind[2]]
-        #            # Now we have the common root, which may be NULL
-        #            while candidate.level < o.level:
-        #                dl = o.level - (candidate.level + 1)
-        #                for i in range(3):
-        #                    ind[i] = (npos[i] >> dl) & 1
-        #                if candidate.children[cind(ind[0],ind[1],ind[2])] \
-        #                        == NULL:
-        #                    break
-        #                candidate = candidate.children[cind(ind[0],ind[1],ind[2])]
-        #            neighbors[nn] = candidate
-        #            nn += 1
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def get_neighbor_boundaries(self, oppos):
-        cdef int i, ii
-        cdef np.float64_t ppos[3]
-        for i in range(3):
-            ppos[i] = oppos[i]
-        cdef Oct *main = self.get(ppos)
-        cdef Oct* neighbors[27]
-        self.neighbors(main, neighbors)
-        cdef np.ndarray[np.float64_t, ndim=2] bounds
-        cdef np.float64_t corner[3], size[3]
-        bounds = np.zeros((27,6), dtype="float64")
-        tnp = 0
-        raise RuntimeError
-        for i in range(27):
-            self.oct_bounds(neighbors[i], corner, size)
-            for ii in range(3):
-                bounds[i, ii] = corner[ii]
-                bounds[i, 3+ii] = size[ii]
-        return bounds
+        return 0
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -786,3 +706,26 @@
                             dest[local_filled + offset] = source[ox,oy,oz]
                             local_filled += 1
         return local_filled
+
+cdef OctList *OctList_append(OctList *list, Oct *o):
+    cdef OctList *this = list
+    while this.next != NULL:
+        this = this.next
+    this.next = <OctList*> malloc(sizeof(OctList))
+    this.next.o = o
+    return this.next
+
+cdef int OctList_count(OctList *list):
+    cdef OctList *this = list
+    cdef int i = 0 # Count the list
+    while this != NULL:
+        i += 1
+        this = this.next
+    return i
+
+cdef void OctList_delete(OctList *list):
+    cdef OctList *next, *this = list
+    while this != NULL:
+        next = this.next
+        free(this)
+        this = next

diff -r b95e8f2a3c0e10f8b10c60b606886c54523a9df9 -r 7326a8d3469f1377e7604c5454a65876cd130f26 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -3,7 +3,7 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: Columbia University
-Homepage: http://yt.enzotools.org/
+Homepage: http://yt-project.org/
 License:
   Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
 


https://bitbucket.org/yt_analysis/yt/commits/f13ec690598b/
Changeset:   f13ec690598b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-29 01:36:17
Summary:     Implement first draft of neighbor-finding.
Affected #:  1 file

diff -r 7326a8d3469f1377e7604c5454a65876cd130f26 -r f13ec690598b6e11068438e7ae099ae8caf6b15c yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -261,7 +261,50 @@
     cdef int neighbors(self, OctInfo *oinfo, Oct** neighbors):
         cdef Oct* candidate
         nn = 0
-        return 0
+        # We are going to do a brute-force search here.
+        # This is not the most efficient -- in fact, it's relatively bad.  But
+        # we will attempt to improve it in a future iteration, where we will
+        # grow a stack of parent Octs.
+        # Note that in the first iteration, we will just find the up-to-27
+        # neighbors, including the main oct.
+        cdef int i, j, k, n, level, ind[3], ii, nfound = 0
+        cdef OctList *olist, *my_list
+        my_list = olist = NULL
+        cdef Oct *cand
+        cdef np.int64_t npos[3]
+        for i in range(3):
+            npos[0] = oinfo.ipos[0] + (1 - i)
+            for j in range(3):
+                nj = 1 - j
+                npos[1] = oinfo.ipos[1] + (1 - j)
+                for k in range(3):
+                    nk = 1 - k
+                    npos[2] = oinfo.ipos[2] + (1 - k)
+                    # Now we have our npos, which we just need to find.
+                    cand = NULL
+                    for level in range(oinfo.level + 1):
+                        for n in range(3):
+                            ind[n] = ((npos[n] >> (oinfo.level - level)) & 1)
+                        if level == 0:
+                            self.get_root(ind, &cand)
+                            if cand == NULL: break
+                            continue
+                        if cand.children == NULL: break
+                        ii = cind(ind[0],ind[1],ind[2])
+                        if cand.children[ii] == NULL: break
+                        cand = cand.children[ii]
+                    if cand != NULL:
+                        nfound += 1
+                        olist = OctList_append(olist, cand)
+                        if my_list == NULL: my_list = olist
+        olist = my_list
+        cdef int noct = OctList_count(olist)
+        neighbors = <Oct **> malloc(sizeof(Oct*)*noct)
+        for i in range(noct):
+            neighbors[i] = olist.o
+            olist = olist.next
+        OctList_delete(my_list)
+        return noct
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -707,24 +750,31 @@
                             local_filled += 1
         return local_filled
 
-cdef OctList *OctList_append(OctList *list, Oct *o):
-    cdef OctList *this = list
+cdef OctList *OctList_append(OctList *olist, Oct *o):
+    cdef OctList *this = olist
+    if olist == NULL:
+        this = <OctList *> malloc(sizeof(OctList))
+        this.next = NULL
+        this.o = o
+        return this
     while this.next != NULL:
         this = this.next
     this.next = <OctList*> malloc(sizeof(OctList))
-    this.next.o = o
-    return this.next
+    this = this.next
+    this.o = o
+    this.next = NULL
+    return this
 
-cdef int OctList_count(OctList *list):
-    cdef OctList *this = list
+cdef int OctList_count(OctList *olist):
+    cdef OctList *this = olist
     cdef int i = 0 # Count the list
     while this != NULL:
         i += 1
         this = this.next
     return i
 
-cdef void OctList_delete(OctList *list):
-    cdef OctList *next, *this = list
+cdef void OctList_delete(OctList *olist):
+    cdef OctList *next, *this = olist
     while this != NULL:
         next = this.next
         free(this)


https://bitbucket.org/yt_analysis/yt/commits/02aa58342c33/
Changeset:   02aa58342c33
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-29 23:44:00
Summary:     Finishing neighbors() implementation.
Affected #:  1 file

diff -r f13ec690598b6e11068438e7ae099ae8caf6b15c -r 02aa58342c331aab0fbbfa83acb4a0154e0e23a3 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -184,8 +184,13 @@
         return 0
 
     cdef int get_root(self, int ind[3], Oct **o):
+        cdef int i
+        for i in range(3):
+            if ind[i] < 0 or ind[i] >= self.nn[i]:
+                o[0] = NULL
+                return 1
         o[0] = self.root_mesh[ind[0]][ind[1]][ind[2]]
-        return 1
+        return 0
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -200,15 +205,18 @@
         cdef Oct *cur, *next
         cdef int i
         cur = next = NULL
-        level = 0
+        level = -1
         for i in range(3):
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
             ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
             cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
-            ipos[i] = ind[i]
+            ipos[i] = 0
         self.get_root(ind, &next)
         # We want to stop recursing when there's nowhere else to go
         while next != NULL:
+            level += 1
+            for i in range(3):
+                ipos[i] = (ipos[i] << 1) + ind[i]
             cur = next
             for i in range(3):
                 dds[i] = dds[i] / 2.0
@@ -220,9 +228,6 @@
                     cp[i] += dds[i]/2.0
             if cur.children != NULL:
                 next = cur.children[cind(ind[0],ind[1],ind[2])]
-                for i in range(3):
-                    ipos[i] = (ipos[i] << 1) + ind[i]
-                level += 1
             else:
                 next = NULL
         if oinfo == NULL: return cur
@@ -258,7 +263,7 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef int neighbors(self, OctInfo *oinfo, Oct** neighbors):
+    cdef int neighbors(self, OctInfo *oi, Oct** neighbors):
         cdef Oct* candidate
         nn = 0
         # We are going to do a brute-force search here.
@@ -271,25 +276,38 @@
         cdef OctList *olist, *my_list
         my_list = olist = NULL
         cdef Oct *cand
-        cdef np.int64_t npos[3]
+        cdef np.int64_t npos[3], ndim[3]
+        # Now we get our boundaries for this level, so that we can wrap around
+        # if need be.
         for i in range(3):
-            npos[0] = oinfo.ipos[0] + (1 - i)
+            ndim[i] = <np.int64_t> ((self.DRE[i] - self.DLE[i])/oi.dds[i])
+        for i in range(3):
+            npos[0] = (oi.ipos[0] + (1 - i))
+            if npos[0] < 0: npos[0] += ndim[0]
+            if npos[0] >= ndim[0]: npos[0] -= ndim[0]
             for j in range(3):
                 nj = 1 - j
-                npos[1] = oinfo.ipos[1] + (1 - j)
+                npos[1] = (oi.ipos[1] + (1 - j))
+                if npos[1] < 0: npos[1] += ndim[1]
+                if npos[1] >= ndim[1]: npos[1] -= ndim[1]
                 for k in range(3):
                     nk = 1 - k
-                    npos[2] = oinfo.ipos[2] + (1 - k)
+                    npos[2] = (oi.ipos[2] + (1 - k))
+                    if npos[2] < 0: npos[2] += ndim[2]
+                    if npos[2] >= ndim[2]: npos[2] -= ndim[2]
                     # Now we have our npos, which we just need to find.
+                    # Level 0 gets bootstrapped
+                    for n in range(3):
+                        ind[n] = ((npos[n] >> (oi.level + 1)) & 1)
                     cand = NULL
-                    for level in range(oinfo.level + 1):
+                    self.get_root(ind, &cand)
+                    # We should not get a NULL if we handle periodicity
+                    # correctly, but we might.
+                    if cand == NULL: continue
+                    for level in range(1, oi.level+1):
+                        if cand.children == NULL: break
                         for n in range(3):
-                            ind[n] = ((npos[n] >> (oinfo.level - level)) & 1)
-                        if level == 0:
-                            self.get_root(ind, &cand)
-                            if cand == NULL: break
-                            continue
-                        if cand.children == NULL: break
+                            ind[n] = (npos[n] >> (oi.level - (level + 1))) & 1
                         ii = cind(ind[0],ind[1],ind[2])
                         if cand.children[ii] == NULL: break
                         cand = cand.children[ii]


https://bitbucket.org/yt_analysis/yt/commits/0e78005bde3d/
Changeset:   0e78005bde3d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-30 02:38:50
Summary:     First, untested draft of smoothing support.
Affected #:  5 files

diff -r 02aa58342c331aab0fbbfa83acb4a0154e0e23a3 -r 0e78005bde3d2e92c16bfc12377cd2c0dedc92b2 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -5,7 +5,7 @@
 Affiliation: UC Santa Cruz
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: Columbia University
-Homepage: http://yt.enzotools.org/
+Homepage: http://yt-project.org/
 License:
   Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
 
@@ -32,7 +32,7 @@
 from libc.math cimport sqrt
 
 from fp_utils cimport *
-from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+from .oct_container cimport Oct, OctAllocationContainer, OctreeContainer
 
 cdef extern from "alloca.h":
     void *alloca(int)
@@ -62,8 +62,7 @@
 cdef class ParticleDepositOperation:
     # We assume each will allocate and define their own temporary storage
     cdef public object nvals
-    cdef public int bad_indices
-    cdef int update_values
+    cdef public int update_values
     cdef void process(self, int dim[3], np.float64_t left_edge[3],
                       np.float64_t dds[3], np.int64_t offset,
                       np.float64_t ppos[3], np.float64_t *fields,

diff -r 02aa58342c331aab0fbbfa83acb4a0154e0e23a3 -r 0e78005bde3d2e92c16bfc12377cd2c0dedc92b2 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -54,7 +54,6 @@
                      fields = None, int domain_id = -1,
                      int domain_offset = 0):
         cdef int nf, i, j
-        self.bad_indices = 0
         if fields is None:
             fields = []
         nf = len(fields)

diff -r 02aa58342c331aab0fbbfa83acb4a0154e0e23a3 -r 0e78005bde3d2e92c16bfc12377cd2c0dedc92b2 yt/geometry/particle_smooth.pxd
--- /dev/null
+++ b/yt/geometry/particle_smooth.pxd
@@ -0,0 +1,49 @@
+"""
+Particle Deposition onto Octs
+
+Author: Christopher Moody <chris.e.moody at gmail.com>
+Affiliation: UC Santa Cruz
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+import numpy as np
+from libc.stdlib cimport malloc, free
+cimport cython
+from libc.math cimport sqrt
+
+from fp_utils cimport *
+from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+from .particle_deposit cimport sph_kernel, gind
+
+cdef extern from "alloca.h":
+    void *alloca(int)
+
+cdef class ParticleSmoothOperation:
+    # We assume each will allocate and define their own temporary storage
+    cdef public object nvals
+    cdef void process(self, int dim[3], np.float64_t left_edge[3],
+                      np.float64_t dds[3], np.float64_t *ppos,
+                      np.float64_t **fields, np.int64_t nneighbors,
+                      np.int64_t *nind, np.int64_t *doffs,
+                      np.int64_t *pinds, np.int64_t *pcounts,
+                      np.int64_t offset)

diff -r 02aa58342c331aab0fbbfa83acb4a0154e0e23a3 -r 0e78005bde3d2e92c16bfc12377cd2c0dedc92b2 yt/geometry/particle_smooth.pyx
--- /dev/null
+++ b/yt/geometry/particle_smooth.pyx
@@ -0,0 +1,164 @@
+"""
+Particle smoothing in cells
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+import numpy as np
+from libc.stdlib cimport malloc, free, realloc
+cimport cython
+from libc.math cimport sqrt
+
+from fp_utils cimport *
+from oct_container cimport Oct, OctAllocationContainer, \
+    OctreeContainer, OctInfo
+
+cdef class ParticleSmoothOperation:
+    def __init__(self, nvals):
+        # This is the set of cells, in grids, blocks or octs, we are handling.
+        self.nvals = nvals 
+
+    def initialize(self, *args):
+        raise NotImplementedError
+
+    def finalize(self, *args):
+        raise NotImplementedError
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def process_octree(self, OctreeContainer octree,
+                     np.ndarray[np.int64_t, ndim=1] dom_ind,
+                     np.ndarray[np.float64_t, ndim=2] positions,
+                     fields = None, int domain_id = -1,
+                     int domain_offset = 0,
+                     int test_neighbors = 0):
+        # This will be a several-step operation.
+        #
+        # We first take all of our particles and assign them to Octs.  If they
+        # are not in an Oct, we will assume they are out of bounds.  Note that
+        # this means that if we have loaded neighbor particles for which an Oct
+        # does not exist, we are going to be discarding them -- so sparse
+        # octrees will need to ensure that neighbor octs *exist*.  Particles
+        # will be assigned in a new NumPy array.  Note that this incurs
+        # overhead, but reduces complexity as we will now be able to use
+        # argsort.
+        #
+        # After the particles have been assigned to Octs, we process each Oct
+        # individually.  We will do this by calling "get" for the *first*
+        # particle in each set of Octs in the sorted list.  After this, we get
+        # neighbors for each Oct.
+        #
+        # Now, with the set of neighbors (and thus their indices) we allocate
+        # an array of particles and their fields, fill these in, and call our
+        # process function.
+        #
+        # This is not terribly efficient -- for starters, the neighbor function
+        # is not the most efficient yet.  We will also need to handle some
+        # mechanism of an expandable array for holding pointers to Octs, so
+        # that we can deal with >27 neighbors.  As I write this comment,
+        # neighbors() only returns 27 neighbors.
+        cdef int nf, i, j, dims[3]
+        cdef np.float64_t **field_pointers, *field_vals, pos[3], *ppos
+        cdef int nsize = 0
+        cdef np.int64_t *nind = NULL
+        cdef OctInfo oi
+        cdef Oct *oct, **neighbors = NULL
+        cdef np.int64_t nneighbors, numpart, offset, moff, local_ind
+        cdef np.int64_t *doffs, *pinds, *pcounts
+        cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
+        cdef np.ndarray[np.float64_t, ndim=1] tarr
+        dims[0] = dims[1] = dims[2] = 2
+        numpart = positions.shape[0]
+        pcount = np.zeros_like(dom_ind)
+        doff = np.zeros_like(dom_ind) - 1
+        moff = octree.get_domain_offset(domain_id + domain_offset)
+        pdoms = np.zeros(positions.shape[0], dtype="int64") - 1
+        if fields is None:
+            fields = []
+        field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
+        for i in range(nf):
+            tarr = fields[i]
+            field_pointers[i] = <np.float64_t *> tarr.data
+        for i in range(positions.shape[0]):
+            for j in range(3):
+                pos[j] = positions[i, j]
+            oct = octree.get(pos)
+            if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
+                continue
+            # Note that this has to be our local index, not our in-file index.
+            # This is the particle count, which we'll use once we have sorted
+            # the particles to calculate the offsets into each oct's particles.
+            pcount[oct.domain_ind - moff] += 1
+            pdoms[i] = oct.domain_ind - moff # We store the *actual* offset.
+        # Now we have oct assignments.  Let's sort them.
+        # Note that what we will be providing to our processing functions will
+        # actually be indirectly-sorted fields.  This preserves memory at the
+        # expense of additional pointer lookups.
+        pind = np.argsort(pdoms)
+        # This now gives us the indices to the particles for each domain.
+        for i in range(positions.shape[0]):
+            # This is the domain_ind (minus moff) for this particle
+            offset = pdoms[pind[i]] 
+            if doff[offset] < 0:
+                doff[offset] = i
+        # Now doff is full of offsets to the first entry in the pind that
+        # refers to that oct's particles.
+        ppos = <np.float64_t *> positions.data
+        doffs = <np.int64_t*> doff.data
+        pinds = <np.int64_t*> pind.data
+        pcounts = <np.int64_t*> pcount.data
+        for i in range(doff.shape[0]):
+            for j in range(3):
+                pos[j] = positions[pind[doff[i]], j]
+            oct = octree.get(pos, &oi)
+            if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
+                continue
+            offset = dom_ind[oct.domain_ind - moff] * 8
+            nneighbors = octree.neighbors(&oi, neighbors)
+            # Now we have all our neighbors.  And, we should be set for what
+            # else we need to do.
+            if nneighbors > nsize:
+                nind = <np.int64_t *> realloc(nind, nneighbors)
+                nsize = nneighbors
+            for j in range(nneighbors):
+                nind[j] = neighbors[j].domain_ind - moff
+            self.process(dims, oi.left_edge, oi.dds,
+                         ppos, field_pointers, nneighbors, nind, doffs,
+                         pinds, pcounts, offset)
+        if nind != NULL:
+            free(nind)
+        
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def process_grid(self, gobj,
+                     np.ndarray[np.float64_t, ndim=2] positions,
+                     fields = None):
+        raise NotImplementedError
+
+    cdef void process(self, int dim[3], np.float64_t left_edge[3],
+                      np.float64_t dds[3], np.float64_t *ppos,
+                      np.float64_t **fields, np.int64_t nneighbors,
+                      np.int64_t *nind, np.int64_t *doffs,
+                      np.int64_t *pinds, np.int64_t *pcounts,
+                      np.int64_t offset):
+        raise NotImplementedError

diff -r 02aa58342c331aab0fbbfa83acb4a0154e0e23a3 -r 0e78005bde3d2e92c16bfc12377cd2c0dedc92b2 yt/geometry/setup.py
--- a/yt/geometry/setup.py
+++ b/yt/geometry/setup.py
@@ -43,6 +43,15 @@
                          "yt/geometry/oct_container.pxd",
                          "yt/geometry/selection_routines.pxd",
                          "yt/geometry/particle_deposit.pxd"])
+    config.add_extension("particle_smooth", 
+                ["yt/geometry/particle_smooth.pyx"],
+                include_dirs=["yt/utilities/lib/"],
+                libraries=["m"],
+                depends=["yt/utilities/lib/fp_utils.pxd",
+                         "yt/geometry/oct_container.pxd",
+                         "yt/geometry/selection_routines.pxd",
+                         "yt/geometry/particle_deposit.pxd",
+                         "yt/geometry/particle_smooth.pxd"])
     config.add_extension("fake_octree", 
                 ["yt/geometry/fake_octree.pyx"],
                 include_dirs=["yt/utilities/lib/"],


https://bitbucket.org/yt_analysis/yt/commits/970ba2d35422/
Changeset:   970ba2d35422
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-30 02:46:08
Summary:     First "SimpleNeighborSmooth" object.

Next we need a heap-based priority queue for distance and neighbor
calculations.
Affected #:  2 files

diff -r 0e78005bde3d2e92c16bfc12377cd2c0dedc92b2 -r 970ba2d35422d407e947b7b2a61fc88c28f98f83 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -41,6 +41,7 @@
 cdef class ParticleSmoothOperation:
     # We assume each will allocate and define their own temporary storage
     cdef public object nvals
+    cdef int nfields
     cdef void process(self, int dim[3], np.float64_t left_edge[3],
                       np.float64_t dds[3], np.float64_t *ppos,
                       np.float64_t **fields, np.int64_t nneighbors,

diff -r 0e78005bde3d2e92c16bfc12377cd2c0dedc92b2 -r 970ba2d35422d407e947b7b2a61fc88c28f98f83 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -34,9 +34,10 @@
     OctreeContainer, OctInfo
 
 cdef class ParticleSmoothOperation:
-    def __init__(self, nvals):
+    def __init__(self, nvals, nfields):
         # This is the set of cells, in grids, blocks or octs, we are handling.
         self.nvals = nvals 
+        self.nfields = nfields
 
     def initialize(self, *args):
         raise NotImplementedError
@@ -162,3 +163,33 @@
                       np.int64_t *pinds, np.int64_t *pcounts,
                       np.int64_t offset):
         raise NotImplementedError
+
+cdef class SimpleNeighborSmooth(ParticleSmoothOperation):
+    cdef np.float64_t **fp
+    def initialize(self):
+        if self.nvals < 2:
+            # We need at least two fields, the smoothing length and the 
+            # field to smooth, to operate.
+            raise RuntimeError
+        self.vals = [np.zeros(self.nvals, dtype="float64")
+                     for i in range(self.nfields)]
+        self.fp = <np.float64_t **> malloc(
+            sizeof(np.float64_t *) * self.nfields)
+
+    def finalize(self):
+        free(self.fp)
+        return self.vals
+
+    cdef void process(self, int dim[3], np.float64_t left_edge[3],
+                      np.float64_t dds[3], np.float64_t *ppos,
+                      np.float64_t **fields, np.int64_t nneighbors,
+                      np.int64_t *nind, np.int64_t *doffs,
+                      np.int64_t *pinds, np.int64_t *pcounts,
+                      np.int64_t offset):
+        # Note that we assume that fields[0] == smoothing length in the native
+        # units supplied.  We can now iterate over every cell in the block and
+        # every particle to find the nearest.  We will use a priority heap.
+        raise NotImplementedError
+
+simple_neighbor_smooth = SimpleNeighborSmooth
+


https://bitbucket.org/yt_analysis/yt/commits/916c066ad226/
Changeset:   916c066ad226
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-30 03:36:09
Summary:     Simplifying API a bit and adding neighbor finding.
Affected #:  2 files

diff -r 970ba2d35422d407e947b7b2a61fc88c28f98f83 -r 916c066ad2267c40e3a222adc4b0bbc21965a709 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -38,13 +38,52 @@
 cdef extern from "alloca.h":
     void *alloca(int)
 
+cdef struct NeighborList
+cdef struct NeighborList:
+    np.int64_t pn       # Particle number
+    np.float64_t r2     # radius**2
+
+cdef inline np.float64_t r2dist(np.float64_t ppos[3],
+                                np.float64_t cpos[3],
+                                np.float64_t DW[3]):
+    cdef int i
+    cdef np.float64_t r2, DR
+    r2 = 0.0
+    for i in range(3):
+        DR = (ppos[i] - cpos[i])
+        if (DR > DW[i]/2.0):
+            DR -= DW[i]/2.0
+        elif (DR < -DW[i]/2.0):
+            DR += DW[i]/2.0
+        r2 += DR * DR
+    return r2
+
 cdef class ParticleSmoothOperation:
     # We assume each will allocate and define their own temporary storage
     cdef public object nvals
+    cdef np.float64_t DW[3]
     cdef int nfields
-    cdef void process(self, int dim[3], np.float64_t left_edge[3],
-                      np.float64_t dds[3], np.float64_t *ppos,
-                      np.float64_t **fields, np.int64_t nneighbors,
-                      np.int64_t *nind, np.int64_t *doffs,
-                      np.int64_t *pinds, np.int64_t *pcounts,
-                      np.int64_t offset)
+    cdef int maxn
+    cdef int curn
+    # Note that we are preallocating here, so this is *not* threadsafe.
+    cdef NeighborList *neighbors
+    cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
+                               np.float64_t dds[3], np.float64_t *ppos,
+                               np.float64_t **fields, np.int64_t nneighbors,
+                               np.int64_t *nind, np.int64_t *doffs,
+                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t offset)
+    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
+                            np.float64_t cpos[3])
+    cdef void neighbor_reset(self)
+    cdef void neighbor_find(self,
+                            np.int64_t nneighbors,
+                            np.int64_t *nind,
+                            np.int64_t *doffs,
+                            np.int64_t *pcounts,
+                            np.int64_t *pinds,
+                            np.float64_t *ppos,
+                            np.float64_t cpos[3])
+    cdef void process(self, np.int64_t offset, int i, int j, int k,
+                      int dim[3], np.float64_t cpos[3])
+

diff -r 970ba2d35422d407e947b7b2a61fc88c28f98f83 -r 916c066ad2267c40e3a222adc4b0bbc21965a709 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -34,10 +34,15 @@
     OctreeContainer, OctInfo
 
 cdef class ParticleSmoothOperation:
-    def __init__(self, nvals, nfields):
+    def __init__(self, nvals, nfields, max_neighbors):
         # This is the set of cells, in grids, blocks or octs, we are handling.
+        cdef int i
         self.nvals = nvals 
         self.nfields = nfields
+        self.maxn = max_neighbors
+        self.neighbors = <NeighborList *> malloc(
+            sizeof(NeighborList) * self.maxn)
+        self.neighbor_reset()
 
     def initialize(self, *args):
         raise NotImplementedError
@@ -94,12 +99,15 @@
         doff = np.zeros_like(dom_ind) - 1
         moff = octree.get_domain_offset(domain_id + domain_offset)
         pdoms = np.zeros(positions.shape[0], dtype="int64") - 1
+        nf = len(fields)
         if fields is None:
             fields = []
         field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
         for i in range(nf):
             tarr = fields[i]
             field_pointers[i] = <np.float64_t *> tarr.data
+        for i in range(3):
+            self.DW[i] = (octree.DRE[i] - octree.DLE[i])
         for i in range(positions.shape[0]):
             for j in range(3):
                 pos[j] = positions[i, j]
@@ -143,7 +151,7 @@
                 nsize = nneighbors
             for j in range(nneighbors):
                 nind[j] = neighbors[j].domain_ind - moff
-            self.process(dims, oi.left_edge, oi.dds,
+            self.neighbor_process(dims, oi.left_edge, oi.dds,
                          ppos, field_pointers, nneighbors, nind, doffs,
                          pinds, pcounts, offset)
         if nind != NULL:
@@ -156,14 +164,103 @@
                      fields = None):
         raise NotImplementedError
 
-    cdef void process(self, int dim[3], np.float64_t left_edge[3],
-                      np.float64_t dds[3], np.float64_t *ppos,
-                      np.float64_t **fields, np.int64_t nneighbors,
-                      np.int64_t *nind, np.int64_t *doffs,
-                      np.int64_t *pinds, np.int64_t *pcounts,
-                      np.int64_t offset):
+    cdef void process(self, np.int64_t offset, int i, int j, int k,
+                      int dim[3], np.float64_t cpos[3]):
         raise NotImplementedError
 
+    cdef void neighbor_reset(self):
+        self.curn = 0
+        for i in range(self.maxn):
+            self.neighbors[i].pn = -1
+            self.neighbors[i].r2 = 1e300
+
+    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
+                            np.float64_t cpos[3]):
+        cdef NeighborList *cur
+        cdef int i
+        # _c means candidate (what we're evaluating)
+        # _o means other (the item in the list)
+        cdef np.float64_t r2_c, r2_o
+        cdef np.int64_t pn_c, pn_o
+        if self.curn < self.maxn:
+            cur = &self.neighbors[self.curn]
+            cur.pn = pn
+            cur.r2 = r2dist(ppos, cpos, self.DW)
+            self.curn += 1
+            return
+        # This will go (curn - 1) through 0.
+        r2_c = r2dist(ppos, cpos, self.DW)
+        pn_c = pn
+        for i in range((self.curn - 1), -1, -1):
+            # First we evaluate against i.  If our candidate radius is greater
+            # than the one we're inspecting, we quit early.
+            cur = &self.neighbors[i]
+            r2_o = cur.r2
+            pn_o = cur.pn
+            if r2_c >= r2_o:
+                break
+            # Now we know we need to swap them.  First we assign our candidate
+            # values to cur.
+            cur.r2 = r2_c
+            cur.pn = pn_c
+            if i + 1 >= self.maxn:
+                continue # No swapping
+            cur = &self.neighbors[i + 1]
+            cur.r2 = r2_o
+            cur.pn = pn_o
+        # At this point, we've evaluated all the particles and we should have a
+        # sorted set of values.  So, we're done.
+
+    cdef void neighbor_find(self,
+                            np.int64_t nneighbors,
+                            np.int64_t *nind,
+                            np.int64_t *doffs,
+                            np.int64_t *pcounts,
+                            np.int64_t *pinds,
+                            np.float64_t *ppos,
+                            np.float64_t cpos[3]
+                            ):
+        # We are now given the number of neighbors, the indices into the
+        # domains for them, and the number of particles for each.
+        cdef int ni, i, j
+        cdef np.int64_t offset, pn, pc
+        cdef np.float64_t pos[3]
+        self.neighbor_reset()
+        for ni in range(nneighbors):
+            offset = doffs[nind[ni]]
+            pc = pcounts[nind[ni]]
+            for i in range(pc):
+                pn = pinds[offset + i]
+                for j in range(3):
+                    pos[j] = ppos[pn * 3 + j]
+                self.neighbor_eval(pn, pos, cpos)
+
+    cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
+                               np.float64_t dds[3], np.float64_t *ppos,
+                               np.float64_t **fields, np.int64_t nneighbors,
+                               np.int64_t *nind, np.int64_t *doffs,
+                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t offset):
+        # Note that we assume that fields[0] == smoothing length in the native
+        # units supplied.  We can now iterate over every cell in the block and
+        # every particle to find the nearest.  We will use a priority heap.
+        cdef int i, j, k
+        cdef np.float64_t cpos[3]
+        cpos[0] = left_edge[0] + 0.5*dds[0]
+        for i in range(dim[0]):
+            cpos[1] = left_edge[1] + 0.5*dds[1]
+            for j in range(dim[1]):
+                cpos[2] = left_edge[2] + 0.5*dds[2]
+                for k in range(dim[2]):
+                    self.neighbor_find(nneighbors, nind, doffs, pcounts,
+                        pinds, ppos, cpos)
+                    # Now we have all our neighbors in our neighbor list.
+                    self.process(offset, i, j, k, dim, cpos)
+                    cpos[2] += dds[2]
+                cpos[1] += dds[1]
+            cpos[0] += dds[0]
+
+
 cdef class SimpleNeighborSmooth(ParticleSmoothOperation):
     cdef np.float64_t **fp
     def initialize(self):
@@ -180,15 +277,9 @@
         free(self.fp)
         return self.vals
 
-    cdef void process(self, int dim[3], np.float64_t left_edge[3],
-                      np.float64_t dds[3], np.float64_t *ppos,
-                      np.float64_t **fields, np.int64_t nneighbors,
-                      np.int64_t *nind, np.int64_t *doffs,
-                      np.int64_t *pinds, np.int64_t *pcounts,
-                      np.int64_t offset):
-        # Note that we assume that fields[0] == smoothing length in the native
-        # units supplied.  We can now iterate over every cell in the block and
-        # every particle to find the nearest.  We will use a priority heap.
+    cdef void process(self, np.int64_t offset, int i, int j, int k,
+                      int dim[3], np.float64_t cpos[3]):
+        # We have our i, j, k for our cell 
         raise NotImplementedError
 
 simple_neighbor_smooth = SimpleNeighborSmooth


https://bitbucket.org/yt_analysis/yt/commits/de63479c8fb3/
Changeset:   de63479c8fb3
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-30 06:39:37
Summary:     Implement first pass at .smooth() for Octree.
Affected #:  5 files

diff -r 916c066ad2267c40e3a222adc4b0bbc21965a709 -r de63479c8fb3f514ce9fd127b584192390bd2a87 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -36,6 +36,7 @@
     NeedsProperty, \
     NeedsParameter
 import yt.geometry.particle_deposit as particle_deposit
+import yt.geometry.particle_smooth as particle_smooth
 from yt.funcs import *
 
 class OctreeSubset(YTSelectionContainer):
@@ -124,6 +125,23 @@
         if vals is None: return
         return np.asfortranarray(vals)
 
+    def smooth(self, positions, fields = None, method = None):
+        # Here we perform our particle deposition.
+        cls = getattr(particle_smooth, "%s_smooth" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        nvals = (2, 2, 2, (self.domain_ind >= 0).sum())
+        if fields is None: fields = []
+        op = cls(nvals, len(fields), 64)
+        op.initialize()
+        mylog.debug("Smoothing %s particles into %s Octs",
+            positions.shape[0], nvals[-1])
+        op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
+            self.domain_id, self._domain_offset)
+        vals = op.finalize()
+        if vals is None: return
+        return np.asfortranarray(vals)
+
     def select_icoords(self, dobj):
         d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,
                                      num_octs = self._num_octs)

diff -r 916c066ad2267c40e3a222adc4b0bbc21965a709 -r de63479c8fb3f514ce9fd127b584192390bd2a87 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -72,7 +72,7 @@
     cdef public int max_domain
     cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
     cdef int get_root(self, int ind[3], Oct **o)
-    cdef int neighbors(self, OctInfo *oinfo, Oct **neighbors)
+    cdef int neighbors(self, OctInfo *oinfo, Oct ***neighbors)
     cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
     # This function must return the offset from global-to-local domains; i.e.,
     # OctAllocationContainer.offset if such a thing exists.

diff -r 916c066ad2267c40e3a222adc4b0bbc21965a709 -r de63479c8fb3f514ce9fd127b584192390bd2a87 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -263,7 +263,7 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef int neighbors(self, OctInfo *oi, Oct** neighbors):
+    cdef int neighbors(self, OctInfo *oi, Oct*** neighbors):
         cdef Oct* candidate
         nn = 0
         # We are going to do a brute-force search here.
@@ -317,9 +317,9 @@
                         if my_list == NULL: my_list = olist
         olist = my_list
         cdef int noct = OctList_count(olist)
-        neighbors = <Oct **> malloc(sizeof(Oct*)*noct)
+        neighbors[0] = <Oct **> malloc(sizeof(Oct*)*noct)
         for i in range(noct):
-            neighbors[i] = olist.o
+            neighbors[0][i] = olist.o
             olist = olist.next
         OctList_delete(my_list)
         return noct

diff -r 916c066ad2267c40e3a222adc4b0bbc21965a709 -r de63479c8fb3f514ce9fd127b584192390bd2a87 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -65,6 +65,10 @@
     cdef int nfields
     cdef int maxn
     cdef int curn
+    cdef np.int64_t *doffs
+    cdef np.int64_t *pinds
+    cdef np.int64_t *pcounts
+    cdef np.float64_t *ppos
     # Note that we are preallocating here, so this is *not* threadsafe.
     cdef NeighborList *neighbors
     cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],

diff -r 916c066ad2267c40e3a222adc4b0bbc21965a709 -r de63479c8fb3f514ce9fd127b584192390bd2a87 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -40,6 +40,7 @@
         self.nvals = nvals 
         self.nfields = nfields
         self.maxn = max_neighbors
+        print "CREATED", nvals, nfields, max_neighbors
         self.neighbors = <NeighborList *> malloc(
             sizeof(NeighborList) * self.maxn)
         self.neighbor_reset()
@@ -128,8 +129,7 @@
         for i in range(positions.shape[0]):
             # This is the domain_ind (minus moff) for this particle
             offset = pdoms[pind[i]] 
-            if doff[offset] < 0:
-                doff[offset] = i
+            if doff[offset] < 0: doff[offset] = i
         # Now doff is full of offsets to the first entry in the pind that
         # refers to that oct's particles.
         ppos = <np.float64_t *> positions.data
@@ -143,7 +143,7 @@
             if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
                 continue
             offset = dom_ind[oct.domain_ind - moff] * 8
-            nneighbors = octree.neighbors(&oi, neighbors)
+            nneighbors = octree.neighbors(&oi, &neighbors)
             # Now we have all our neighbors.  And, we should be set for what
             # else we need to do.
             if nneighbors > nsize:
@@ -154,6 +154,9 @@
             self.neighbor_process(dims, oi.left_edge, oi.dds,
                          ppos, field_pointers, nneighbors, nind, doffs,
                          pinds, pcounts, offset)
+            # This is allocated by the neighbors function, so we deallocate it.
+            free(neighbors)
+            neighbors = NULL
         if nind != NULL:
             free(nind)
         
@@ -263,6 +266,7 @@
 
 cdef class SimpleNeighborSmooth(ParticleSmoothOperation):
     cdef np.float64_t **fp
+    cdef public object vals
     def initialize(self):
         if self.nvals < 2:
             # We need at least two fields, the smoothing length and the 
@@ -280,7 +284,7 @@
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3]):
         # We have our i, j, k for our cell 
-        raise NotImplementedError
+        #print "Offset", offset, i, j, k, self.curn
+        return
 
 simple_neighbor_smooth = SimpleNeighborSmooth
-


https://bitbucket.org/yt_analysis/yt/commits/187c82b93c73/
Changeset:   187c82b93c73
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-30 06:51:59
Summary:     If domain offset < 0, no particles.
Affected #:  1 file

diff -r de63479c8fb3f514ce9fd127b584192390bd2a87 -r 187c82b93c733aa697f94474667f345657d4cac9 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -51,6 +51,7 @@
     def finalize(self, *args):
         raise NotImplementedError
 
+    @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def process_octree(self, OctreeContainer octree,
@@ -136,7 +137,10 @@
         doffs = <np.int64_t*> doff.data
         pinds = <np.int64_t*> pind.data
         pcounts = <np.int64_t*> pcount.data
+        cdef np.int64_t pn
         for i in range(doff.shape[0]):
+            # Nothing assigned.
+            if doff[i] < 0: continue
             for j in range(3):
                 pos[j] = positions[pind[doff[i]], j]
             oct = octree.get(pos, &oi)
@@ -160,6 +164,7 @@
         if nind != NULL:
             free(nind)
         
+    @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def process_grid(self, gobj,


https://bitbucket.org/yt_analysis/yt/commits/781176f368f9/
Changeset:   781176f368f9
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-30 17:22:59
Summary:     It makes more sense to return a neighbors** than to fill it.  Also fix segfault
from undeclared nsize.
Affected #:  3 files

diff -r 187c82b93c733aa697f94474667f345657d4cac9 -r 781176f368f9c2cdf164576e7aa345b7cc266b6e yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -72,7 +72,7 @@
     cdef public int max_domain
     cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
     cdef int get_root(self, int ind[3], Oct **o)
-    cdef int neighbors(self, OctInfo *oinfo, Oct ***neighbors)
+    cdef Oct **neighbors(self, OctInfo *oinfo, np.int64_t *nneighbors)
     cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
     # This function must return the offset from global-to-local domains; i.e.,
     # OctAllocationContainer.offset if such a thing exists.

diff -r 187c82b93c733aa697f94474667f345657d4cac9 -r 781176f368f9c2cdf164576e7aa345b7cc266b6e yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -263,7 +263,7 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef int neighbors(self, OctInfo *oi, Oct*** neighbors):
+    cdef Oct** neighbors(self, OctInfo *oi, np.int64_t *nneighbors):
         cdef Oct* candidate
         nn = 0
         # We are going to do a brute-force search here.
@@ -317,12 +317,14 @@
                         if my_list == NULL: my_list = olist
         olist = my_list
         cdef int noct = OctList_count(olist)
-        neighbors[0] = <Oct **> malloc(sizeof(Oct*)*noct)
+        cdef Oct **neighbors
+        neighbors = <Oct **> malloc(sizeof(Oct*)*noct)
         for i in range(noct):
-            neighbors[0][i] = olist.o
+            neighbors[i] = olist.o
             olist = olist.next
         OctList_delete(my_list)
-        return noct
+        nneighbors[0] = noct
+        return neighbors
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -770,7 +772,7 @@
 
 cdef OctList *OctList_append(OctList *olist, Oct *o):
     cdef OctList *this = olist
-    if olist == NULL:
+    if this == NULL:
         this = <OctList *> malloc(sizeof(OctList))
         this.next = NULL
         this.o = o

diff -r 187c82b93c733aa697f94474667f345657d4cac9 -r 781176f368f9c2cdf164576e7aa345b7cc266b6e yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -138,6 +138,8 @@
         pinds = <np.int64_t*> pind.data
         pcounts = <np.int64_t*> pcount.data
         cdef np.int64_t pn
+        nsize = 27
+        nind = <np.int64_t *> malloc(sizeof(np.int64_t)*nsize)
         for i in range(doff.shape[0]):
             # Nothing assigned.
             if doff[i] < 0: continue
@@ -147,20 +149,20 @@
             if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
                 continue
             offset = dom_ind[oct.domain_ind - moff] * 8
-            nneighbors = octree.neighbors(&oi, &neighbors)
+            neighbors = octree.neighbors(&oi, &nneighbors)
             # Now we have all our neighbors.  And, we should be set for what
             # else we need to do.
             if nneighbors > nsize:
-                nind = <np.int64_t *> realloc(nind, nneighbors)
+                nind = <np.int64_t *> realloc(
+                    nind, sizeof(np.int64_t)*nneighbors)
                 nsize = nneighbors
             for j in range(nneighbors):
                 nind[j] = neighbors[j].domain_ind - moff
+            free(neighbors)
             self.neighbor_process(dims, oi.left_edge, oi.dds,
                          ppos, field_pointers, nneighbors, nind, doffs,
                          pinds, pcounts, offset)
             # This is allocated by the neighbors function, so we deallocate it.
-            free(neighbors)
-            neighbors = NULL
         if nind != NULL:
             free(nind)
         


https://bitbucket.org/yt_analysis/yt/commits/eaa322474306/
Changeset:   eaa322474306
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-30 17:38:20
Summary:     Removed print statement.
Affected #:  1 file

diff -r 781176f368f9c2cdf164576e7aa345b7cc266b6e -r eaa32247430624efb2a3f32c99447c6d9f576117 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -40,7 +40,6 @@
         self.nvals = nvals 
         self.nfields = nfields
         self.maxn = max_neighbors
-        print "CREATED", nvals, nfields, max_neighbors
         self.neighbors = <NeighborList *> malloc(
             sizeof(NeighborList) * self.maxn)
         self.neighbor_reset()


https://bitbucket.org/yt_analysis/yt/commits/c2a86529276e/
Changeset:   c2a86529276e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-31 00:49:33
Summary:     Continuing simple neighbor smoothing implementation.

Currently gives something of garbage results.  They look vaguely like data ...
but not like the right data.
Affected #:  3 files

diff -r eaa32247430624efb2a3f32c99447c6d9f576117 -r c2a86529276e5f2accddb40027deb47da07a8dea yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -140,7 +140,11 @@
             self.domain_id, self._domain_offset)
         vals = op.finalize()
         if vals is None: return
-        return np.asfortranarray(vals)
+        if isinstance(vals, list):
+            vals = [np.asfortranarray(v) for v in vals]
+        else:
+            vals = np.asfortranarray(vals)
+        return vals
 
     def select_icoords(self, dobj):
         d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,

diff -r eaa32247430624efb2a3f32c99447c6d9f576117 -r c2a86529276e5f2accddb40027deb47da07a8dea yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -89,5 +89,4 @@
                             np.float64_t *ppos,
                             np.float64_t cpos[3])
     cdef void process(self, np.int64_t offset, int i, int j, int k,
-                      int dim[3], np.float64_t cpos[3])
-
+                      int dim[3], np.float64_t cpos[3], np.float64_t **fields)

diff -r eaa32247430624efb2a3f32c99447c6d9f576117 -r c2a86529276e5f2accddb40027deb47da07a8dea yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -174,7 +174,7 @@
         raise NotImplementedError
 
     cdef void process(self, np.int64_t offset, int i, int j, int k,
-                      int dim[3], np.float64_t cpos[3]):
+                      int dim[3], np.float64_t cpos[3], np.float64_t **fields):
         raise NotImplementedError
 
     cdef void neighbor_reset(self):
@@ -264,7 +264,7 @@
                     self.neighbor_find(nneighbors, nind, doffs, pcounts,
                         pinds, ppos, cpos)
                     # Now we have all our neighbors in our neighbor list.
-                    self.process(offset, i, j, k, dim, cpos)
+                    self.process(offset, i, j, k, dim, cpos, fields)
                     cpos[2] += dds[2]
                 cpos[1] += dds[1]
             cpos[0] += dds[0]
@@ -274,23 +274,42 @@
     cdef np.float64_t **fp
     cdef public object vals
     def initialize(self):
-        if self.nvals < 2:
+        cdef int i
+        if self.nfields < 4:
             # We need at least two fields, the smoothing length and the 
             # field to smooth, to operate.
             raise RuntimeError
-        self.vals = [np.zeros(self.nvals, dtype="float64")
-                     for i in range(self.nfields)]
+        cdef np.ndarray tarr
         self.fp = <np.float64_t **> malloc(
             sizeof(np.float64_t *) * self.nfields)
+        self.vals = []
+        for i in range(self.nfields):
+            tarr = np.zeros(self.nvals, dtype="float64", order="F")
+            self.vals.append(tarr)
+            self.fp[i] = <np.float64_t *> tarr.data
 
     def finalize(self):
         free(self.fp)
         return self.vals
 
     cdef void process(self, np.int64_t offset, int i, int j, int k,
-                      int dim[3], np.float64_t cpos[3]):
-        # We have our i, j, k for our cell 
-        #print "Offset", offset, i, j, k, self.curn
+                      int dim[3], np.float64_t cpos[3], np.float64_t **fields):
+        # We have our i, j, k for our cell, as well as the cell position.
+        # We also have a list of neighboring particles with particle numbers.
+        cdef int n, fi
+        cdef np.float64_t weight, r2, val
+        cdef np.int64_t pn
+        for n in range(self.curn):
+            # No normalization for the moment.
+            # fields[0] is the smoothing length.
+            r2 = self.neighbors[n].r2
+            pn = self.neighbors[n].pn
+            # Smoothing kernel weight function
+            weight = sph_kernel(sqrt(r2) / fields[0][pn])
+            # Mass of the particle times the value divided by the Density
+            for fi in range(self.nfields - 3):
+                val = fields[1][pn] * fields[fi + 3][pn]/fields[2][pn]
+                self.fp[fi + 3][gind(i,j,k,dim) + offset] = val
         return
 
 simple_neighbor_smooth = SimpleNeighborSmooth


https://bitbucket.org/yt_analysis/yt/commits/af666312fecd/
Changeset:   af666312fecd
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-31 00:58:20
Summary:     Add a quick check that we don't double-process an oct and its neighbors.
Affected #:  1 file

diff -r c2a86529276e5f2accddb40027deb47da07a8dea -r af666312fecdf5fee9752b3f8447cf64e282744c yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -84,7 +84,7 @@
         # mechanism of an expandable array for holding pointers to Octs, so
         # that we can deal with >27 neighbors.  As I write this comment,
         # neighbors() only returns 27 neighbors.
-        cdef int nf, i, j, dims[3]
+        cdef int nf, i, j, dims[3], n
         cdef np.float64_t **field_pointers, *field_vals, pos[3], *ppos
         cdef int nsize = 0
         cdef np.int64_t *nind = NULL
@@ -157,6 +157,10 @@
                 nsize = nneighbors
             for j in range(nneighbors):
                 nind[j] = neighbors[j].domain_ind - moff
+                for n in range(j):
+                    if nind[j] == nind[n]:
+                        nind[j] = -1
+                    break
             free(neighbors)
             self.neighbor_process(dims, oi.left_edge, oi.dds,
                          ppos, field_pointers, nneighbors, nind, doffs,
@@ -236,6 +240,7 @@
         cdef np.float64_t pos[3]
         self.neighbor_reset()
         for ni in range(nneighbors):
+            if nind[ni] == -1: continue
             offset = doffs[nind[ni]]
             pc = pcounts[nind[ni]]
             for i in range(pc):


https://bitbucket.org/yt_analysis/yt/commits/fedd682f5272/
Changeset:   fedd682f5272
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-31 03:34:31
Summary:     Adding smarter sorting, fixing neighbor finding.

This fixes the problem of oddness at higher levels.  I verified this by
inspecting whether or not (when nneighbors == 27) the 13th item is the oct
itself.
Affected #:  3 files

diff -r af666312fecdf5fee9752b3f8447cf64e282744c -r fedd682f5272d3d6813b5ef8152a5a7a13b6a68d yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -243,7 +243,7 @@
             oinfo.dds[i] = dds[i] # Cell width
             oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
             oinfo.ipos[i] = ipos[i]
-            oinfo.level = level
+        oinfo.level = level
         return cur
 
     def domain_identify(self, SelectorObject selector):
@@ -280,25 +280,23 @@
         # Now we get our boundaries for this level, so that we can wrap around
         # if need be.
         for i in range(3):
-            ndim[i] = <np.int64_t> ((self.DRE[i] - self.DLE[i])/oi.dds[i])
+            ndim[i] = <np.int64_t> ((self.DRE[i] - self.DLE[i])/(2*oi.dds[i]))
         for i in range(3):
             npos[0] = (oi.ipos[0] + (1 - i))
             if npos[0] < 0: npos[0] += ndim[0]
             if npos[0] >= ndim[0]: npos[0] -= ndim[0]
             for j in range(3):
-                nj = 1 - j
                 npos[1] = (oi.ipos[1] + (1 - j))
                 if npos[1] < 0: npos[1] += ndim[1]
                 if npos[1] >= ndim[1]: npos[1] -= ndim[1]
                 for k in range(3):
-                    nk = 1 - k
                     npos[2] = (oi.ipos[2] + (1 - k))
                     if npos[2] < 0: npos[2] += ndim[2]
                     if npos[2] >= ndim[2]: npos[2] -= ndim[2]
                     # Now we have our npos, which we just need to find.
                     # Level 0 gets bootstrapped
                     for n in range(3):
-                        ind[n] = ((npos[n] >> (oi.level + 1)) & 1)
+                        ind[n] = ((npos[n] >> (oi.level)) & 1)
                     cand = NULL
                     self.get_root(ind, &cand)
                     # We should not get a NULL if we handle periodicity
@@ -307,7 +305,7 @@
                     for level in range(1, oi.level+1):
                         if cand.children == NULL: break
                         for n in range(3):
-                            ind[n] = (npos[n] >> (oi.level - (level + 1))) & 1
+                            ind[n] = (npos[n] >> (oi.level - (level))) & 1
                         ii = cind(ind[0],ind[1],ind[2])
                         if cand.children[ii] == NULL: break
                         cand = cand.children[ii]

diff -r af666312fecdf5fee9752b3f8447cf64e282744c -r fedd682f5272d3d6813b5ef8152a5a7a13b6a68d yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -27,7 +27,7 @@
 
 cimport numpy as np
 import numpy as np
-from libc.stdlib cimport malloc, free
+from libc.stdlib cimport malloc, free, qsort
 cimport cython
 from libc.math cimport sqrt
 

diff -r af666312fecdf5fee9752b3f8447cf64e282744c -r fedd682f5272d3d6813b5ef8152a5a7a13b6a68d yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -33,6 +33,19 @@
 from oct_container cimport Oct, OctAllocationContainer, \
     OctreeContainer, OctInfo
 
+cdef int Neighbor_compare(void *on1, void *on2) nogil:
+    cdef NeighborList *n1, *n2
+    n1 = <NeighborList *> on1
+    n2 = <NeighborList *> on2
+    # Note that we set this up so that "greatest" evaluates to the *end* of the
+    # list, so we can do standard radius comparisons.
+    if n1.r2 < n2.r2:
+        return -1
+    elif n1.r2 == n2.r2:
+        return 0
+    else:
+        return 1
+
 cdef class ParticleSmoothOperation:
     def __init__(self, nvals, nfields, max_neighbors):
         # This is the set of cells, in grids, blocks or octs, we are handling.
@@ -91,14 +104,18 @@
         cdef OctInfo oi
         cdef Oct *oct, **neighbors = NULL
         cdef np.int64_t nneighbors, numpart, offset, moff, local_ind
-        cdef np.int64_t *doffs, *pinds, *pcounts
+        cdef np.int64_t *doffs, *pinds, *pcounts, poff
         cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
         cdef np.ndarray[np.float64_t, ndim=1] tarr
         dims[0] = dims[1] = dims[2] = 2
         numpart = positions.shape[0]
+        # pcount is the number of particles per oct.
         pcount = np.zeros_like(dom_ind)
+        # doff is the offset to a given oct in the sorted particles.
         doff = np.zeros_like(dom_ind) - 1
         moff = octree.get_domain_offset(domain_id + domain_offset)
+        # pdoms points particles at their octs.  So the value in this array, for
+        # a given index, is the local oct index.
         pdoms = np.zeros(positions.shape[0], dtype="int64") - 1
         nf = len(fields)
         if fields is None:
@@ -118,17 +135,24 @@
             # Note that this has to be our local index, not our in-file index.
             # This is the particle count, which we'll use once we have sorted
             # the particles to calculate the offsets into each oct's particles.
-            pcount[oct.domain_ind - moff] += 1
-            pdoms[i] = oct.domain_ind - moff # We store the *actual* offset.
+            offset = oct.domain_ind - moff
+            pcount[offset] += 1
+            pdoms[i] = offset # We store the *actual* offset.
         # Now we have oct assignments.  Let's sort them.
         # Note that what we will be providing to our processing functions will
         # actually be indirectly-sorted fields.  This preserves memory at the
         # expense of additional pointer lookups.
         pind = np.argsort(pdoms)
+        # So what this means is that we now have all the oct-0 particle indices
+        # in order, then the oct-1, etc etc.
         # This now gives us the indices to the particles for each domain.
         for i in range(positions.shape[0]):
-            # This is the domain_ind (minus moff) for this particle
-            offset = pdoms[pind[i]] 
+            # This value, poff, is the index of the particle in the *unsorted*
+            # arrays.
+            poff = pind[i] 
+            offset = pdoms[poff] 
+            # If we have yet to assign the starting index to this oct, we do so
+            # now.
             if doff[offset] < 0: doff[offset] = i
         # Now doff is full of offsets to the first entry in the pind that
         # refers to that oct's particles.
@@ -136,14 +160,16 @@
         doffs = <np.int64_t*> doff.data
         pinds = <np.int64_t*> pind.data
         pcounts = <np.int64_t*> pcount.data
-        cdef np.int64_t pn
         nsize = 27
         nind = <np.int64_t *> malloc(sizeof(np.int64_t)*nsize)
         for i in range(doff.shape[0]):
             # Nothing assigned.
             if doff[i] < 0: continue
+            # The first particle assigned to this oct should be the one we
+            # want.
+            poff = pind[doff[i]]
             for j in range(3):
-                pos[j] = positions[pind[doff[i]], j]
+                pos[j] = positions[poff, j]
             oct = octree.get(pos, &oi)
             if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
                 continue
@@ -161,11 +187,11 @@
                     if nind[j] == nind[n]:
                         nind[j] = -1
                     break
+            # This is allocated by the neighbors function, so we deallocate it.
             free(neighbors)
             self.neighbor_process(dims, oi.left_edge, oi.dds,
                          ppos, field_pointers, nneighbors, nind, doffs,
                          pinds, pcounts, offset)
-            # This is allocated by the neighbors function, so we deallocate it.
         if nind != NULL:
             free(nind)
         
@@ -195,18 +221,26 @@
         # _o means other (the item in the list)
         cdef np.float64_t r2_c, r2_o
         cdef np.int64_t pn_c, pn_o
+        # If we're less than the maximum number of neighbors, we simply append.
+        # After that, we will sort, and then only compare against the rightmost
+        # entries.
         if self.curn < self.maxn:
             cur = &self.neighbors[self.curn]
             cur.pn = pn
             cur.r2 = r2dist(ppos, cpos, self.DW)
             self.curn += 1
+            if self.curn == self.maxn:
+                # This time we sort it, so that future insertions will be able
+                # to be done in order.
+                qsort(self.neighbors, self.curn, sizeof(NeighborList), 
+                      Neighbor_compare)
             return
         # This will go (curn - 1) through 0.
         r2_c = r2dist(ppos, cpos, self.DW)
         pn_c = pn
         for i in range((self.curn - 1), -1, -1):
             # First we evaluate against i.  If our candidate radius is greater
-            # than the one we're inspecting, we quit early.
+            # than the one we're inspecting, we quit.
             cur = &self.neighbors[i]
             r2_o = cur.r2
             pn_o = cur.pn
@@ -314,7 +348,7 @@
             # Mass of the particle times the value divided by the Density
             for fi in range(self.nfields - 3):
                 val = fields[1][pn] * fields[fi + 3][pn]/fields[2][pn]
-                self.fp[fi + 3][gind(i,j,k,dim) + offset] = val
+                self.fp[fi + 3][gind(i,j,k,dim) + offset] = val * weight
         return
 
 simple_neighbor_smooth = SimpleNeighborSmooth


https://bitbucket.org/yt_analysis/yt/commits/be5daa03f219/
Changeset:   be5daa03f219
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-13 16:08:26
Summary:     If passed None, assume we have no periodicity.
Affected #:  1 file

diff -r fedd682f5272d3d6813b5ef8152a5a7a13b6a68d -r be5daa03f219683784d5e415beb3b626ba18fb59 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -121,15 +121,19 @@
         self.min_level = getattr(dobj, "min_level", 0)
         self.max_level = getattr(dobj, "max_level", 99)
         self.overlap_cells = 0
+        if dobj is None:
+            for i in range(3):
+                self.periodicity[i] = False
+                self.domain_width[i] = 0.0
+        else:
+            for i in range(3) :
+                if dobj.pf.periodicity[i] and dobj.pf.domain_left_edge[i] != 0.0 :
+                    print "SelectorObject periodicity assumes left_edge == 0"
+                    raise RuntimeError
 
-        for i in range(3) :
-            if dobj.pf.periodicity[i] and dobj.pf.domain_left_edge[i] != 0.0 :
-                print "SelectorObject periodicity assumes left_edge == 0"
-                raise RuntimeError
-
-            self.domain_width[i] = dobj.pf.domain_right_edge[i] - \
-                                   dobj.pf.domain_left_edge[i]
-            self.periodicity[i] = dobj.pf.periodicity[i]
+                self.domain_width[i] = dobj.pf.domain_right_edge[i] - \
+                                       dobj.pf.domain_left_edge[i]
+                self.periodicity[i] = dobj.pf.periodicity[i]
 
     @cython.boundscheck(False)
     @cython.wraparound(False)


https://bitbucket.org/yt_analysis/yt/commits/d02eca0d4b95/
Changeset:   d02eca0d4b95
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-21 21:30:15
Summary:     Removing unused functions and adding TODO notes.
Affected #:  4 files

diff -r be5daa03f219683784d5e415beb3b626ba18fb59 -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -332,6 +332,7 @@
         if num_octs == -1:
             num_octs = selector.count_octs(self, domain_id)
         cdef np.ndarray[np.int64_t, ndim=2] coords
+        # TODO: This *8 needs to be made generic.
         coords = np.empty((num_octs * 8, 3), dtype="int64")
         cdef OctVisitorData data
         data.array = <void *> coords.data
@@ -349,6 +350,7 @@
             num_octs = selector.count_octs(self, domain_id)
         #Return the 'resolution' of each cell; ie the level
         cdef np.ndarray[np.int64_t, ndim=1] res
+        # TODO: This *8 needs to be made generic.
         res = np.empty(num_octs * 8, dtype="int64")
         cdef OctVisitorData data
         data.array = <void *> res.data
@@ -365,6 +367,7 @@
         if num_octs == -1:
             num_octs = selector.count_octs(self, domain_id)
         cdef np.ndarray[np.float64_t, ndim=2] fwidth
+        # TODO: This *8 needs to be made generic.
         fwidth = np.empty((num_octs * 8, 3), dtype="float64")
         cdef OctVisitorData data
         data.array = <void *> fwidth.data
@@ -386,6 +389,7 @@
             num_octs = selector.count_octs(self, domain_id)
         #Return the floating point unitary position of every cell
         cdef np.ndarray[np.float64_t, ndim=2] coords
+        # TODO: This *8 needs to be made generic.
         coords = np.empty((num_octs * 8, 3), dtype="float64")
         cdef OctVisitorData data
         data.array = <void *> coords.data
@@ -438,8 +442,10 @@
         else:
             raise NotImplementedError
         self.visit_all_octs(selector, func, &data)
+        # TODO: This *8 needs to be made generic.
         if (data.global_index + 1) * 8 * data.dims > source.size:
             print "GLOBAL INDEX RAN AHEAD.",
+            # TODO: This *8 needs to be made generic.
             print (data.global_index + 1) * 8 * data.dims - source.size
             print dest.size, source.size, num_cells
             raise RuntimeError
@@ -542,6 +548,7 @@
         if parent.children != NULL:
             next = parent.children[cind(ind[0],ind[1],ind[2])]
         else:
+            # TODO: This *8 does NOT need to be made generic.
             parent.children = <Oct **> malloc(sizeof(Oct *) * 8)
             for i in range(8):
                 parent.children[i] = NULL
@@ -608,6 +615,7 @@
         data.index = 0
         data.domain = 1
         self.visit_all_octs(selector, oct_visitors.assign_domain_ind, &data)
+        # TODO: This *8 needs to be made generic.
         assert ((data.global_index+1)*8 == data.index)
 
 cdef int root_node_compare(void *a, void *b) nogil:

diff -r be5daa03f219683784d5e415beb3b626ba18fb59 -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -49,8 +49,6 @@
 
 cdef oct_visitor_function count_total_octs
 cdef oct_visitor_function count_total_cells
-cdef oct_visitor_function mark_octs
-cdef oct_visitor_function mask_octs
 cdef oct_visitor_function index_octs
 cdef oct_visitor_function icoords_octs
 cdef oct_visitor_function ires_octs

diff -r be5daa03f219683784d5e415beb3b626ba18fb59 -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -38,6 +38,7 @@
     if selected == 0: return
     cdef int i
     # There are this many records between "octs"
+    # TODO: This 8 needs to be made into a generic value.
     cdef np.int64_t index = (data.global_index * 8)*data.dims
     cdef np.float64_t **p = <np.float64_t**> data.array
     index += oind(data)*data.dims
@@ -50,6 +51,7 @@
     # "last" here tells us the dimensionality of the array.
     if selected == 0: return
     cdef int i
+    # TODO: This 8 needs to be made into a generic value.
     cdef np.int64_t index = (data.global_index * 8)*data.dims
     cdef np.int64_t **p = <np.int64_t**> data.array
     index += oind(data)*data.dims
@@ -68,25 +70,6 @@
     # Number of *cells* visited and selected.
     data.index += selected
 
-cdef void mark_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    # We mark them even if they are not selected
-    cdef int i
-    cdef np.uint8_t *arr = <np.uint8_t *> data.array
-    if data.last != o.domain_ind:
-        data.last = o.domain_ind
-        data.index += 1
-    cdef np.int64_t index = data.index * 8
-    index += oind(data)
-    arr[index] = 1
-
-cdef void mask_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    if selected == 0: return
-    cdef int i
-    cdef np.uint8_t *arr = <np.uint8_t *> data.array
-    cdef np.int64_t index = data.global_index * 8
-    index += oind(data)
-    arr[index] = 1
-
 cdef void index_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
     # Note that we provide an index even if the cell is not selected.
     cdef int i
@@ -101,6 +84,8 @@
     if selected == 0: return
     cdef np.int64_t *coords = <np.int64_t*> data.array
     cdef int i
+    # TODO: data.ind and the number of bits we shift need to be made general
+    # for octrees with > 8 zones.
     for i in range(3):
         coords[data.index * 3 + i] = (data.pos[i] << 1) + data.ind[i]
     data.index += 1
@@ -120,6 +105,8 @@
     cdef np.float64_t *fcoords = <np.float64_t*> data.array
     cdef int i
     cdef np.float64_t c, dx 
+    # TODO: data.ind and the number of bits we shift in dx and in data.pos need
+    # to be made general for octrees with > 8 zones.
     dx = 1.0 / (2 << data.level)
     for i in range(3):
         c = <np.float64_t> ((data.pos[i] << 1 ) + data.ind[i]) 

diff -r be5daa03f219683784d5e415beb3b626ba18fb59 -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -231,6 +231,10 @@
                         if root.children != NULL:
                             ch = root.children[cind(i,j,k)]
                         if iter == 1 and next_level == 1 and ch != NULL:
+                            # Note that data.pos is always going to be the
+                            # position of the Oct -- it is *not* always going
+                            # to be the same as the position of the cell under
+                            # investigation.
                             data.pos[0] = (data.pos[0] << 1) + i
                             data.pos[1] = (data.pos[1] << 1) + j
                             data.pos[2] = (data.pos[2] << 1) + k
@@ -243,11 +247,15 @@
                             data.pos[2] = (data.pos[2] >> 1)
                             data.level -= 1
                         elif this_level == 1:
+                            # TODO: Refactor to enable multiple cells
+                            #       This code should be able to iterate over
+                            #       cells, even though the rest cannot.
                             selected = self.select_cell(spos, sdds)
                             if ch != NULL:
                                 selected *= self.overlap_cells
                             data.global_index += increment
                             increment = 0
+                            # data.ind refers to the cell, not to the oct.
                             data.ind[0] = i
                             data.ind[1] = j
                             data.ind[2] = k


https://bitbucket.org/yt_analysis/yt/commits/0849d317e494/
Changeset:   0849d317e494
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-21 21:34:50
Summary:     Merging from main yt-3.0 branch into 'smoothing' bookmark.
Affected #:  82 files

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -4,8 +4,16 @@
 juxtaposicion at gmail.com = cemoody at ucsc.edu
 chummels at gmail.com = chummels at astro.columbia.edu
 jwise at astro.princeton.edu = jwise at physics.gatech.edu
-atmyers = atmyers at berkeley.edu
 sam.skillman at gmail.com = samskillman at gmail.com
 casey at thestarkeffect.com = caseywstark at gmail.com
 chiffre = chiffre at posteo.de
 Christian Karch = chiffre at posteo.de
+atmyers at berkeley.edu = atmyers2 at gmail.com
+atmyers = atmyers2 at gmail.com
+drudd = drudd at uchicago.edu
+awetzel = andrew.wetzel at yale.edu
+David Collins (dcollins4096 at gmail.com) = dcollins4096 at gmail.com
+dcollins at physics.ucsd.edu = dcollins4096 at gmail.com
+tabel = tabel at slac.stanford.edu
+sername=kayleanelson = kaylea.nelson at yale.edu
+kayleanelson = kaylea.nelson at yale.edu

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -12,13 +12,16 @@
 yt/frontends/sph/smoothing_kernel.c
 yt/geometry/fake_octree.c
 yt/geometry/oct_container.c
+yt/geometry/oct_visitors.c
 yt/geometry/particle_deposit.c
+yt/geometry/particle_oct_container.c
 yt/geometry/selection_routines.c
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h
 yt/utilities/libconfig_wrapper.c
 yt/utilities/spatial/ckdtree.c
 yt/utilities/lib/alt_ray_tracers.c
+yt/utilities/lib/amr_kdtools.c
 yt/utilities/lib/CICDeposit.c
 yt/utilities/lib/ContourFinding.c
 yt/utilities/lib/DepthFirstOctree.c

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5158,3 +5158,4 @@
 0000000000000000000000000000000000000000 hop callback
 a71dffe4bc813fdadc506ccad9efb632e23dc843 yt-3.0a1
 954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
+f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,4 @@
 include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
 recursive-include yt *.pyx *.pxd *.hh *.h README*
+recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE
\ No newline at end of file

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -473,11 +473,18 @@
 function do_setup_py
 {
     [ -e $1/done ] && return
-    echo "Installing $1 (arguments: '$*')"
-    [ ! -e $1/extracted ] && tar xfz $1.tar.gz
-    touch $1/extracted
-    cd $1
-    if [ ! -z `echo $1 | grep h5py` ]
+    LIB=$1
+    shift
+    if [ -z "$@" ]
+    then
+        echo "Installing $LIB"
+    else
+        echo "Installing $LIB (arguments: '$@')"
+    fi
+    [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
+    touch $LIB/extracted
+    cd $LIB
+    if [ ! -z `echo $LIB | grep h5py` ]
     then
         shift
 	( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -519,8 +526,8 @@
 
 function get_ytproject
 {
+    [ -e $1 ] && return
     echo "Downloading $1 from yt-project.org"
-    [ -e $1 ] && return
     ${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
     ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
@@ -551,67 +558,93 @@
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
+CYTHON='Cython-0.19.1'
+FORTHON='Forthon-0.8.11'
+PYX='PyX-0.12.1'
+PYTHON='Python-2.7.5'
+BZLIB='bzip2-1.0.6'
+FREETYPE_VER='freetype-2.4.12'
+H5PY='h5py-2.1.3'
+HDF5='hdf5-1.8.11'
+IPYTHON='ipython-1.0.0'
+LAPACK='lapack-3.4.2'
+PNG=libpng-1.6.3
+MATPLOTLIB='matplotlib-1.3.0'
+MERCURIAL='mercurial-2.7'
+NOSE='nose-1.3.0'
+NUMPY='numpy-1.7.1'
+PYTHON_HGLIB='python-hglib-1.0'
+PYZMQ='pyzmq-13.1.0'
+ROCKSTAR='rockstar-0.99.6'
+SCIPY='scipy-0.12.0'
+SQLITE='sqlite-autoconf-3071700'
+SYMPY='sympy-0.7.3'
+TORNADO='tornado-3.1'
+ZEROMQ='zeromq-3.2.3'
+ZLIB='zlib-1.2.8'
+
 # Now we dump all our SHA512 files out.
-echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1  Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
+echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0  Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299  Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
+echo 'd6580eb170b36ad50f3a30023fe6ca60234156af91ccb3971b0b0983119b86f3a9f6c717a515c3c6cb72b3dcbf1d02695c6d0b92745f460b46a3defd3ff6ef2f  Python-2.7.5.tgz' > Python-2.7.5.tgz.sha512
+echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
+echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3  freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
-echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3  h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
-echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
-echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554  libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
-echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208  matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
-echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2  mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
-echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8  numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
-echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd  sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
-echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d  zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
-echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865  zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
-echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83  pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
-echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1  tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
-echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
-echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397  python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
-echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
+echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
+echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202  h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
+echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1  hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
+echo '1b309c08009583e66d1725a2d2051e6de934db246129568fa6d5ba33ad6babd3b443e7c2782d817128d2b112e21bcdd71e66be34fbd528badd900f1d0ed3db56  ipython-1.0.0.tar.gz' > ipython-1.0.0.tar.gz.sha512
+echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
+echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
+echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
+echo 'e425778edb0f71c34e719e04561ee3de37feaa1be4d60b94c780aebdbe6d41f8f4ab15103a8bbe8894ebeb228c42f0e2cd41b8db840f8384e1cd7cd2d5b67b97  mercurial-2.7.tar.gz' > mercurial-2.7.tar.gz.sha512
+echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4  nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
+echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
+echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
+echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b  pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
-echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
-echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
-echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
+echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4  scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
+echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
+echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
+echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf  tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
+echo '34ffb6aa645f62bd1158a8f2888bf92929ccf90917a6c50ed51ed1240732f498522e164d1536f26480c87ad5457fe614a93bf0e15f2f89b0b168e64a30de68ca  zeromq-3.2.3.tar.gz' > zeromq-3.2.3.tar.gz.sha512
+echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
 # Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
-[ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject $ZLIB.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject $BZLIB.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject $PNG.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject $FREETYPE_VER.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject $SQLITE.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject $PYX.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $ZEROMQ.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $PYZMQ.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $TORNADO.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject $SCIPY.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
-[ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.4.tgz
-get_ytproject numpy-1.7.0.tar.gz
-get_ytproject matplotlib-1.2.1.tar.gz
-get_ytproject mercurial-2.5.4.tar.gz
-get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.2.tar.gz
-get_ytproject Cython-0.18.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject $LAPACK.tar.gz
+get_ytproject $PYTHON.tgz
+get_ytproject $NUMPY.tar.gz
+get_ytproject $MATPLOTLIB.tar.gz
+get_ytproject $MERCURIAL.tar.gz
+get_ytproject $IPYTHON.tar.gz
+get_ytproject $H5PY.tar.gz
+get_ytproject $CYTHON.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.11.tar.gz
-get_ytproject nose-1.2.1.tar.gz
-get_ytproject python-hglib-0.3.tar.gz
-get_ytproject sympy-0.7.2.tar.gz
-get_ytproject rockstar-0.99.6.tar.gz
+get_ytproject $FORTHON.tar.gz
+get_ytproject $NOSE.tar.gz
+get_ytproject $PYTHON_HGLIB.tar.gz
+get_ytproject $SYMPY.tar.gz
+get_ytproject $ROCKSTAR.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
-    if [ ! -e bzip2-1.0.6/done ]
+    if [ ! -e $BZLIB/done ]
     then
-        [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
+        [ ! -e $BZLIB ] && tar xfz $BZLIB.tar.gz
         echo "Installing BZLIB"
-        cd bzip2-1.0.6
+        cd $BZLIB
         if [ `uname` = "Darwin" ]
         then
             if [ -z "${CC}" ]
@@ -634,11 +667,11 @@
 
 if [ $INST_ZLIB -eq 1 ]
 then
-    if [ ! -e zlib-1.2.7/done ]
+    if [ ! -e $ZLIB/done ]
     then
-        [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
+        [ ! -e $ZLIB ] && tar xfz $ZLIB.tar.gz
         echo "Installing ZLIB"
-        cd zlib-1.2.7
+        cd $ZLIB
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -652,11 +685,11 @@
 
 if [ $INST_PNG -eq 1 ]
 then
-    if [ ! -e libpng-1.6.1/done ]
+    if [ ! -e $PNG/done ]
     then
-        [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
+        [ ! -e $PNG ] && tar xfz $PNG.tar.gz
         echo "Installing PNG"
-        cd libpng-1.6.1
+        cd $PNG
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -670,13 +703,14 @@
 
 if [ $INST_FTYPE -eq 1 ]
 then
-    if [ ! -e freetype-2.4.11/done ]
+    if [ ! -e $FREETYPE_VER/done ]
     then
-        [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
+        [ ! -e $FREETYPE_VER ] && tar xfz $FREETYPE_VER.tar.gz
         echo "Installing FreeType2"
-        cd freetype-2.4.11
+        cd $FREETYPE_VER
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make 2>&1 ) 1>> ${LOG_FILE} || do_exit
+		( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
@@ -688,11 +722,11 @@
 
 if [ -z "$HDF5_DIR" ]
 then
-    if [ ! -e hdf5-1.8.9/done ]
+    if [ ! -e $HDF5/done ]
     then
-        [ ! -e hdf5-1.8.9 ] && tar xfz hdf5-1.8.9.tar.gz
+        [ ! -e $HDF5 ] && tar xfz $HDF5.tar.gz
         echo "Installing HDF5"
-        cd hdf5-1.8.9
+        cd $HDF5
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -707,11 +741,11 @@
 
 if [ $INST_SQLITE3 -eq 1 ]
 then
-    if [ ! -e sqlite-autoconf-3071601/done ]
+    if [ ! -e $SQLITE/done ]
     then
-        [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
+        [ ! -e $SQLITE ] && tar xfz $SQLITE.tar.gz
         echo "Installing SQLite3"
-        cd sqlite-autoconf-3071601
+        cd $SQLITE
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -720,11 +754,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.4/done ]
+if [ ! -e $PYTHON/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
-    cd Python-2.7.4
+    [ ! -e $PYTHON ] && tar xfz $PYTHON.tgz
+    cd $PYTHON
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -739,7 +773,7 @@
 
 if [ $INST_HG -eq 1 ]
 then
-    do_setup_py mercurial-2.5.4
+    do_setup_py $MERCURIAL
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
@@ -788,9 +822,9 @@
 
 if [ $INST_SCIPY -eq 0 ]
 then
-    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
+    do_setup_py $NUMPY ${NUMPY_ARGS}
 else
-    if [ ! -e scipy-0.11.0/done ]
+    if [ ! -e $SCIPY/done ]
     then
 	if [ ! -e BLAS/done ]
 	then
@@ -798,17 +832,17 @@
 	    echo "Building BLAS"
 	    cd BLAS
 	    gfortran -O2 -fPIC -fno-second-underscore -c *.f
-	    ar r libfblas.a *.o 1>> ${LOG_FILE}
+	    ar r libfblas.a *.o &>> ${LOG_FILE}
 	    ranlib libfblas.a 1>> ${LOG_FILE}
 	    rm -rf *.o
 	    touch done
 	    cd ..
 	fi
-	if [ ! -e lapack-3.4.2/done ]
+	if [ ! -e $LAPACK/done ]
 	then
-	    tar xfz lapack-3.4.2.tar.gz
+	    tar xfz $LAPACK.tar.gz
 	    echo "Building LAPACK"
-	    cd lapack-3.4.2/
+	    cd $LAPACK/
 	    cp INSTALL/make.inc.gfortran make.inc
 	    make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
 	    touch done
@@ -816,9 +850,9 @@
 	fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
-    export LAPACK=$PWD/lapack-3.4.2/liblapack.a
-    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
-    do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
+    export LAPACK=$PWD/$LAPACK/liblapack.a
+    do_setup_py $NUMPY ${NUMPY_ARGS}
+    do_setup_py $SCIPY ${NUMPY_ARGS}
 fi
 
 if [ -n "${MPL_SUPP_LDFLAGS}" ]
@@ -840,10 +874,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
-do_setup_py matplotlib-1.2.1
+mkdir -p ${DEST_DIR}/src/$MATPLOTLIB
+echo "[directories]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+do_setup_py $MATPLOTLIB
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -855,36 +889,36 @@
 # Now we do our IPython installation, which has two optional dependencies.
 if [ $INST_0MQ -eq 1 ]
 then
-    if [ ! -e zeromq-3.2.2/done ]
+    if [ ! -e $ZEROMQ/done ]
     then
-        [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
+        [ ! -e $ZEROMQ ] && tar xfz $ZEROMQ.tar.gz
         echo "Installing ZeroMQ"
-        cd zeromq-3.2.2
+        cd $ZEROMQ
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
-    do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
-    do_setup_py tornado-3.0
+    do_setup_py $PYZMQ --zmq=${DEST_DIR}
+    do_setup_py $TORNADO
 fi
 
-do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.2
-do_setup_py Cython-0.18
-do_setup_py Forthon-0.8.11
-do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.3
-do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
+do_setup_py $IPYTHON
+do_setup_py $H5PY
+do_setup_py $CYTHON
+do_setup_py $FORTHON
+do_setup_py $NOSE
+do_setup_py $PYTHON_HGLIB
+do_setup_py $SYMPY
+[ $INST_PYX -eq 1 ] && do_setup_py $PYX
 
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
 then
     if [ ! -e Rockstar/done ]
     then
-        [ ! -e Rockstar ] && tar xfz rockstar-0.99.6.tar.gz
+        [ ! -e Rockstar ] && tar xfz $ROCKSTAR.tar.gz
         echo "Building Rockstar"
         cd Rockstar
         ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -1,6 +1,6 @@
 #!python
 import os, re
-from distutils import version
+from distutils.version import LooseVersion
 from yt.mods import *
 from yt.data_objects.data_containers import YTDataContainer
 namespace = locals().copy()
@@ -23,10 +23,12 @@
     code.interact(doc, None, namespace)
     sys.exit()
 
-if version.LooseVersion(IPython.__version__) <= version.LooseVersion('0.10'):
+if LooseVersion(IPython.__version__) <= LooseVersion('0.10'):
     api_version = '0.10'
+elif LooseVersion(IPython.__version__) <= LooseVersion('1.0'):
+    api_version = '0.11'
 else:
-    api_version = '0.11'
+    api_version = '1.0'
 
 if api_version == "0.10" and "DISPLAY" in os.environ:
     from matplotlib import rcParams
@@ -42,13 +44,18 @@
         ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
 elif api_version == "0.10":
     ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
-elif api_version == "0.11":
-    from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
+else:
+    if api_version == "0.11":
+        from IPython.frontend.terminal.interactiveshell import \
+            TerminalInteractiveShell
+    elif api_version == "1.0":
+        from IPython.terminal.interactiveshell import TerminalInteractiveShell
+    else:
+        raise RuntimeError
     ip_shell = TerminalInteractiveShell(user_ns=namespace, banner1 = doc,
                     display_banner = True)
     if "DISPLAY" in os.environ: ip_shell.enable_pylab(import_all=False)
-else:
-    raise RuntimeError
+
 
 # The rest is a modified version of the IPython default profile code
 
@@ -77,7 +84,7 @@
     ip = ip_shell.IP.getapi()
     try_next = IPython.ipapi.TryNext
     kwargs = dict(sys_exit=1, banner=doc)
-elif api_version == "0.11":
+elif api_version in ("0.11", "1.0"):
     ip = ip_shell
     try_next = IPython.core.error.TryNext
     kwargs = dict()

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -96,7 +96,7 @@
     if answer_big_data:
         nose_argv.append('--answer-big-data')
     log_suppress = ytcfg.getboolean("yt","suppressStreamLogging")
-    ytcfg["yt","suppressStreamLogging"] = 'True'
+    ytcfg.set("yt","suppressStreamLogging", 'True')
     initial_dir = os.getcwd()
     yt_file = os.path.abspath(__file__)
     yt_dir = os.path.dirname(yt_file)
@@ -105,4 +105,4 @@
         nose.run(argv=nose_argv)
     finally:
         os.chdir(initial_dir)
-        ytcfg["yt","suppressStreamLogging"] = log_suppress
+        ytcfg.set("yt","suppressStreamLogging", str(log_suppress))

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- /dev/null
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -0,0 +1,809 @@
+from scipy import optimize
+import numpy as na
+import h5py
+from yt.analysis_modules.absorption_spectrum.absorption_line \
+        import voigt
+
+
+def generate_total_fit(x, fluxData, orderFits, speciesDicts, 
+        minError=1E-5, complexLim=.999,
+        fitLim=.99, minLength=3, 
+        maxLength=1000, splitLim=.99,
+        output_file=None):
+
+    """
+    This function is designed to fit an absorption spectrum by breaking 
+    the spectrum up into absorption complexes, and iteratively adding
+    and optimizing voigt profiles to each complex.
+
+    Parameters
+    ----------
+    x : (N) ndarray
+        1d array of wavelengths
+    fluxData : (N) ndarray
+        array of flux corresponding to the wavelengths given
+        in x. (needs to be the same size as x)
+    orderFits : list
+        list of the names of the species in the order that they 
+        should be fit. Names should correspond to the names of the species
+        given in speciesDicts. (ex: ['lya','OVI'])
+    speciesDicts : dictionary
+        Dictionary of dictionaries (I'm addicted to dictionaries, I
+        confess). Top level keys should be the names of all the species given
+        in orderFits. The entries should be dictionaries containing all 
+        relevant parameters needed to create an absorption line of a given 
+        species (f,Gamma,lambda0) as well as max and min values for parameters
+        to be fit
+    complexLim : float, optional
+        Maximum flux to start the edge of an absorption complex. Different 
+        from fitLim because it decides extent of a complex rather than 
+        whether or not a complex is accepted. 
+    fitLim : float,optional
+        Maximum flux where the level of absorption will trigger 
+        identification of the region as an absorption complex. Default = .98.
+        (ex: for a minSize=.98, a region where all the flux is between 1.0 and
+        .99 will not be separated out to be fit as an absorbing complex, but
+        a region that contains a point where the flux is .97 will be fit
+        as an absorbing complex.)
+    minLength : int, optional
+        number of cells required for a complex to be included. 
+        default is 3 cells.
+    maxLength : int, optional
+        number of cells required for a complex to be split up. Default
+        is 1000 cells.
+    splitLim : float, optional
+        if attempting to split a region for being larger than maxlength
+        the point of the split must have a flux greater than splitLim 
+        (ie: absorption greater than splitLim). Default= .99.
+    output_file : string, optional
+        location to save the results of the fit. 
+
+    Returns
+    -------
+    allSpeciesLines : dictionary
+        Dictionary of dictionaries representing the fit lines. 
+        Top level keys are the species given in orderFits and the corresponding
+        entries are dictionaries with the keys 'N','b','z', and 'group#'. 
+        Each of these corresponds to a list of the parameters for every
+        accepted fitted line. (ie: N[0],b[0],z[0] will create a line that
+        fits some part of the absorption spectrum). 'group#' is a similar list
+        but identifies which absorbing complex each line belongs to. Lines
+        with the same group# were fit at the same time. group#'s do not
+        correlate between species (ie: an lya line with group number 1 and
+        an OVI line with group number 1 were not fit together and do
+        not necessarily correspond to the same region)
+    yFit : (N) ndarray
+        array of flux corresponding to the combination of all fitted
+        absorption profiles. Same size as x.
+    """
+
+    #Empty dictionary for fitted lines
+    allSpeciesLines = {}
+
+    #Wavelength of beginning of array, wavelength resolution
+    x0,xRes=x[0],x[1]-x[0]
+
+    #Empty fit without any lines
+    yFit = na.ones(len(fluxData))
+
+    #Find all regions where lines/groups of lines are present
+    cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
+            complexLim=complexLim, minLength=minLength,
+            maxLength=maxLength, splitLim=splitLim)
+
+    #Fit all species one at a time in given order from low to high wavelength
+    for species in orderFits:
+        speciesDict = speciesDicts[species]
+        speciesLines = {'N':na.array([]),
+                        'b':na.array([]),
+                        'z':na.array([]),
+                        'group#':na.array([])}
+
+        #Set up wavelengths for species
+        initWl = speciesDict['wavelength'][0]
+
+        for b_i,b in enumerate(cBounds):
+            xBounded=x[b[1]:b[2]]
+            yDatBounded=fluxData[b[1]:b[2]]
+            yFitBounded=yFit[b[1]:b[2]]
+
+            #Find init redshift
+            z=(xBounded[yDatBounded.argmin()]-initWl)/initWl
+
+            #Check if any flux at partner sites
+            if not _line_exists(speciesDict['wavelength'],
+                    fluxData,z,x0,xRes,fitLim): 
+                continue 
+
+            #Fit Using complex tools
+            newLinesP,flag=_complex_fit(xBounded,yDatBounded,yFitBounded,
+                    z,fitLim,minError*(b[2]-b[1]),speciesDict)
+
+            #Check existence of partner lines if applicable
+            newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, 
+                    b, minError*(b[2]-b[1]),
+                    x0, xRes, speciesDict)
+
+            #If flagged as a bad fit, species is lyman alpha,
+            #   and it may be a saturated line, use special tools
+            if flag and species=='lya' and min(yDatBounded)<.1:
+                newLinesP=_large_flag_fit(xBounded,yDatBounded,
+                        yFitBounded,z,speciesDict,
+                        minSize,minError*(b[2]-b[1]))
+
+            #Adjust total current fit
+            yFit=yFit*_gen_flux_lines(x,newLinesP,speciesDict)
+
+            #Add new group to all fitted lines
+            if na.size(newLinesP)>0:
+                speciesLines['N']=na.append(speciesLines['N'],newLinesP[:,0])
+                speciesLines['b']=na.append(speciesLines['b'],newLinesP[:,1])
+                speciesLines['z']=na.append(speciesLines['z'],newLinesP[:,2])
+                groupNums = b_i*na.ones(na.size(newLinesP[:,0]))
+                speciesLines['group#']=na.append(speciesLines['group#'],groupNums)
+
+        allSpeciesLines[species]=speciesLines
+
+    if output_file:
+        _output_fit(allSpeciesLines, output_file)
+
+    return (allSpeciesLines,yFit)
+
+def _complex_fit(x, yDat, yFit, initz, minSize, errBound, speciesDict, 
+        initP=None):
+    """ Fit an absorption complex by iteratively adding and optimizing
+    voigt profiles.
+    
+    A complex is defined as a region where some number of lines may be present,
+    or a region of non zero of absorption. Lines are iteratively added
+    and optimized until the difference between the flux generated using
+    the optimized parameters has a least squares difference between the 
+    desired flux profile less than the error bound.
+
+    Parameters
+    ----------
+    x : (N) ndarray
+        array of wavelength
+    ydat : (N) ndarray
+        array of desired flux profile to be fitted for the wavelength
+        space given by x. Same size as x.
+    yFit : (N) ndarray
+        array of flux profile fitted for the wavelength
+        space given by x already. Same size as x.
+    initz : float
+        redshift to try putting first line at 
+        (maximum absorption for region)
+    minsize : float
+        minimum absorption allowed for a line to still count as a line
+        given in normalized flux (ie: for minSize=.9, only lines with minimum
+        flux less than .9 will be fitted)
+    errbound : float
+        maximum total error allowed for an acceptable fit
+    speciesDict : dictionary
+        dictionary containing all relevant parameters needed
+        to create an absorption line of a given species (f,Gamma,lambda0)
+        as well as max and min values for parameters to be fit
+    initP : (,3,) ndarray
+        initial guess to try for line parameters to fit the region. Used
+        by large_flag_fit. Default = None, and initial guess generated
+        automatically.
+
+    Returns
+    -------
+    linesP : (3,) ndarray
+        Array of best parameters if a good enough fit is found in 
+        the form [[N1,b1,z1], [N2,b2,z2],...]
+    flag : bool
+        boolean value indicating the success of the fit (True if unsuccessful)
+    """
+
+    #Setup initial line guesses
+    if initP==None: #Regular fit
+        initP = [0,0,0] 
+        if min(yDat)<.5: #Large lines get larger initial guess 
+            initP[0] = 10**16
+        elif min(yDat)>.9: #Small lines get smaller initial guess
+            initP[0] = 10**12.5
+        else:
+            initP[0] = speciesDict['init_N']
+        initP[1] = speciesDict['init_b']
+        initP[2]=initz
+        initP=na.array([initP])
+
+    linesP = initP
+
+    #For generating new z guesses
+    wl0 = speciesDict['wavelength'][0]
+
+    #Check if first line exists still
+    if min(yDat-yFit+1)>minSize: 
+        return [],False
+    
+    #Values to proceed through first run
+    errSq,prevErrSq=1,1000
+
+    while True:
+        #Initial parameter guess from joining parameters from all lines
+        #   in lines into a single array
+        initP = linesP.flatten()
+
+        #Optimize line
+        fitP,success=optimize.leastsq(_voigt_error,initP,
+                args=(x,yDat,yFit,speciesDict),
+                epsfcn=1E-10,maxfev=1000)
+
+        #Set results of optimization
+        linesP = na.reshape(fitP,(-1,3))
+
+        #Generate difference between current best fit and data
+        yNewFit=_gen_flux_lines(x,linesP,speciesDict)
+        dif = yFit*yNewFit-yDat
+
+        #Sum to get idea of goodness of fit
+        errSq=sum(dif**2)
+
+        #If good enough, break
+        if errSq < errBound: 
+            break
+
+        #If last fit was worse, reject the last line and revert to last fit
+        if errSq > prevErrSq*10:
+            #If its still pretty damn bad, cut losses and try flag fit tools
+            if prevErrSq >1E2*errBound and speciesDict['name']=='HI lya':
+                return [],True
+            else:
+                yNewFit=_gen_flux_lines(x,prevLinesP,speciesDict)
+                break
+
+        #If too many lines 
+        if na.shape(linesP)[0]>8 or na.size(linesP)+3>=len(x):
+            #If its fitable by flag tools and still bad, use flag tools
+            if errSq >1E2*errBound and speciesDict['name']=='HI lya':
+                return [],True
+            else:
+                break 
+
+        #Store previous data in case reject next fit
+        prevErrSq = errSq
+        prevLinesP = linesP
+
+
+        #Set up initial condition for new line
+        newP = [0,0,0] 
+        if min(dif)<.1:
+            newP[0]=10**12
+        elif min(dif)>.9:
+            newP[0]=10**16
+        else:
+            newP[0]=10**14
+        newP[1] = speciesDict['init_b']
+        newP[2]=(x[dif.argmax()]-wl0)/wl0
+        linesP=na.append(linesP,[newP],axis=0)
+
+
+    #Check the parameters of all lines to see if they fall in an
+    #   acceptable range, as given in dict ref
+    remove=[]
+    for i,p in enumerate(linesP):
+        check=_check_params(na.array([p]),speciesDict)
+        if check: 
+            remove.append(i)
+    linesP = na.delete(linesP,remove,axis=0)
+
+    return linesP,False
+
+def _large_flag_fit(x, yDat, yFit, initz, speciesDict, minSize, errBound):
+    """
+    Attempts to more robustly fit saturated lyman alpha regions that have
+    not converged to satisfactory fits using the standard tools.
+
+    Uses a preselected sample of a wide range of initial parameter guesses
+    designed to fit saturated lines (see get_test_lines).
+
+    Parameters
+    ----------
+    x : (N) ndarray
+        array of wavelength
+    ydat : (N) ndarray
+        array of desired flux profile to be fitted for the wavelength
+        space given by x. Same size as x.
+    yFit : (N) ndarray
+        array of flux profile fitted for the wavelength
+        space given by x already. Same size as x.
+    initz : float
+        redshift to try putting first line at 
+        (maximum absorption for region)
+    speciesDict : dictionary
+        dictionary containing all relevant parameters needed
+        to create an absorption line of a given species (f,Gamma,lambda0)
+        as well as max and min values for parameters to be fit
+    minsize : float
+        minimum absorption allowed for a line to still count as a line
+        given in normalized flux (ie: for minSize=.9, only lines with minimum
+        flux less than .9 will be fitted)
+    errbound : float
+        maximum total error allowed for an acceptable fit
+
+    Returns
+    -------
+    bestP : (3,) ndarray
+        array of best parameters if a good enough fit is found in 
+        the form [[N1,b1,z1], [N2,b2,z2],...]  
+    """
+
+    #Set up some initial line guesses
+    lineTests = _get_test_lines(initz)
+
+    #Keep track of the lowest achieved error
+    bestError = 1000 
+
+    #Iterate through test line guesses
+    for initLines in lineTests:
+        if initLines[1,0]==0:
+            initLines = na.delete(initLines,1,axis=0)
+
+        #Do fitting with initLines as first guess
+        linesP,flag=_complex_fit(x,yDat,yFit,initz,
+                minSize,errBound,speciesDict,initP=initLines)
+
+        #Find error of last fit
+        yNewFit=_gen_flux_lines(x,linesP,speciesDict)
+        dif = yFit*yNewFit-yDat
+        errSq=sum(dif**2)
+
+        #If error lower, keep track of the lines used to make that fit
+        if errSq < bestError:
+            bestError = errSq
+            bestP = linesP
+
+    if bestError>10*errBound*len(x): 
+        return []
+    else:
+        return bestP
+
+def _get_test_lines(initz):
+    """
+    Returns a 3d numpy array of lines to test as initial guesses for difficult
+    to fit lyman alpha absorbers that are saturated. 
+    
+    The array is 3d because
+    the first dimension gives separate initial guesses, the second dimension
+    has multiple lines for the same guess (trying a broad line plus a 
+    saturated line) and the 3d dimension contains the 3 fit parameters (N,b,z)
+
+    Parameters
+    ----------
+    initz : float
+        redshift to give all the test lines
+
+    Returns
+    -------
+    testP : (,3,) ndarray
+        numpy array of the form 
+        [[[N1a,b1a,z1a], [N1b,b1b,z1b]], [[N2a,b2,z2a],...] ...]
+    """
+
+    #Set up a bunch of empty lines
+    testP = na.zeros((10,2,3))
+
+    testP[0,0,:]=[1E18,20,initz]
+    testP[1,0,:]=[1E18,40,initz]
+    testP[2,0,:]=[1E16,5, initz]
+    testP[3,0,:]=[1E16,20,initz]
+    testP[4,0,:]=[1E16,80,initz]
+
+    testP[5,0,:]=[1E18,20,initz]
+    testP[6,0,:]=[1E18,40,initz]
+    testP[7,0,:]=[1E16,5, initz]
+    testP[8,0,:]=[1E16,20,initz]
+    testP[9,0,:]=[1E16,80,initz]
+
+    testP[5,1,:]=[1E13,100,initz]
+    testP[6,1,:]=[1E13,100,initz]
+    testP[7,1,:]=[1E13,100,initz]
+    testP[8,1,:]=[1E13,100,initz]
+    testP[9,1,:]=[1E13,100,initz]
+
+    return testP
+
+def _get_bounds(z, b, wl, x0, xRes):
+    """ 
+    Gets the indices of range of wavelength that the wavelength wl is in 
+    with the size of some initial wavelength range.
+
+    Used for checking if species with multiple lines (as in the OVI doublet)
+    fit all lines appropriately.
+
+    Parameters
+    ----------
+    z : float
+        redshift
+    b : (3) ndarray/list
+        initial bounds in form [i0,i1,i2] where i0 is the index of the 
+        minimum flux for the complex, i1 is index of the lower wavelength 
+        edge of the complex, and i2 is the index of the higher wavelength
+        edge of the complex.
+    wl : float
+        unredshifted wavelength of the peak of the new region 
+    x0 : float
+        wavelength of the index 0
+    xRes : float
+        difference in wavelength for two consecutive indices
+    
+    Returns
+    -------
+    indices : (2) tuple
+        Tuple (i1,i2) where i1 is the index of the lower wavelength bound of 
+        the new region and i2 is the index of the higher wavelength bound of
+        the new region
+    """
+
+    r=[-b[1]+100+b[0],b[2]+100-b[0]]
+    redWl = (z+1)*wl
+    iRedWl=int((redWl-x0)/xRes)
+    indices = (iRedWl-r[0],iRedWl+r[1])
+
+    return indices
+
+def _remove_unaccepted_partners(linesP, x, y, b, errBound, 
+        x0, xRes, speciesDict):
+    """
+    Given a set of parameters [N,b,z] that form multiple lines for a given
+    species (as in the OVI doublet), remove any set of parameters where
+    not all transition wavelengths have a line that matches the fit.
+
+    (ex: if a fit is determined based on the first line of the OVI doublet,
+    but the given parameters give a bad fit of the wavelength space of
+    the second line then that set of parameters is removed from the array
+    of line parameters.)
+
+    Parameters
+    ----------
+    linesP : (3,) ndarray
+        array giving sets of line parameters in 
+        form [[N1, b1, z1], ...]
+    x : (N) ndarray
+        wavelength array [nm]
+    y : (N) ndarray
+        normalized flux array of original data
+    b : (3) tuple/list/ndarray
+        indices that give the bounds of the original region so that another 
+        region of similar size can be used to determine the goodness
+        of fit of the other wavelengths
+    errBound : float
+        size of the error that is appropriate for a given region, 
+        adjusted to account for the size of the region.
+
+    Returns
+    -------
+    linesP : (3,) ndarray
+        array similar to linesP that only contains lines with
+        appropriate fits of all transition wavelengths.
+    """
+
+    #List of lines to remove
+    removeLines=[]
+
+    #Iterate through all sets of line parameters
+    for i,p in enumerate(linesP):
+
+        #iterate over all transition wavelengths
+        for wl in speciesDict['wavelength']:
+
+            #Get the bounds of a similar sized region around the
+            #   appropriate wavelength, and then get the appropriate
+            #   region of wavelength and flux
+            lb = _get_bounds(p[2],b,wl,x0,xRes)
+            xb,yb=x[lb[0]:lb[1]],y[lb[0]:lb[1]]
+
+            #Generate a fit and find the difference to data
+            yFitb=_gen_flux_lines(xb,na.array([p]),speciesDict)
+            dif =yb-yFitb
+
+            #Only counts as an error if line is too big ---------------<
+            dif = [k for k in dif if k>0]
+            err = sum(dif)
+
+            #If the fit is too bad then add the line to list of removed lines
+            if err > errBound*1E2:
+                removeLines.append(i)
+                break
+
+    #Remove all bad line fits
+    linesP = na.delete(linesP,removeLines,axis=0)
+
+    return linesP 
+
+
+
+def _line_exists(wavelengths, y, z, x0, xRes,fluxMin):
+    """For a group of lines finds if the there is some change in flux greater
+    than some minimum at the same redshift with different initial wavelengths
+
+    Parameters
+    ----------
+    wavelengths : (N) ndarray
+        array of initial wavelengths to check
+    y : (N) ndarray
+        flux array to check
+    x0 : float
+        wavelength of the first value in y
+    xRes : float
+        difference in wavelength between consecutive cells in flux array
+    fluxMin : float
+        maximum flux to count as a line existing. 
+
+    Returns
+    -------
+
+    flag : boolean 
+        value indicating whether all lines exist. True if all lines exist
+    """
+
+    #Iterate through initial wavelengths
+    for wl in wavelengths:
+        #Redshifted wavelength
+        redWl = (z+1)*wl
+
+        #Index of the redshifted wavelength
+        indexRedWl = (redWl-x0)/xRes
+
+        #Check if surpasses minimum absorption bound
+        if y[int(indexRedWl)]>fluxMin:
+            return False
+
+    return True
+
+def _find_complexes(x, yDat, complexLim=.999, fitLim=.99,
+        minLength =3, maxLength=1000, splitLim=.99):
+    """Breaks up the wavelength space into groups
+    where there is some absorption. 
+
+    Parameters
+    ----------
+    x : (N) ndarray
+        array of wavelengths
+    yDat : (N) ndarray
+        array of flux corresponding to the wavelengths given
+        in x. (needs to be the same size as x)
+    complexLim : float, optional
+        Maximum flux to start the edge of an absorption complex. Different 
+        from fitLim because it decides extent of a complex rather than 
+        whether or not a complex is accepted. 
+    fitLim : float,optional
+        Maximum flux where the level of absorption will trigger 
+        identification of the region as an absorption complex. Default = .98.
+        (ex: for a minSize=.98, a region where all the flux is between 1.0 and
+        .99 will not be separated out to be fit as an absorbing complex, but
+        a region that contains a point where the flux is .97 will be fit
+        as an absorbing complex.)
+    minLength : int, optional
+        number of cells required for a complex to be included. 
+        default is 3 cells.
+    maxLength : int, optional
+        number of cells required for a complex to be split up. Default
+        is 1000 cells.
+    splitLim : float, optional
+        if attempting to split a region for being larger than maxlength
+        the point of the split must have a flux greater than splitLim 
+        (ie: absorption greater than splitLim). Default= .99.
+
+    Returns
+    -------
+    cBounds : (3,) 
+        list of bounds in the form [[i0,i1,i2],...] where i0 is the 
+        index of the maximum flux for a complex, i1 is the index of the
+        beginning of the complex, and i2 is the index of the end of the 
+        complex. Indexes refer to the indices of x and yDat.
+    """
+
+    #Initialize empty list of bounds
+    cBounds=[]
+
+    #Iterate through cells of flux
+    i=0
+    while (i<len(x)):
+
+        #Start tracking at a region that surpasses flux of edge
+        if yDat[i]<complexLim:
+
+            #Iterate through until reach next edge
+            j=0
+            while yDat[i+j]<complexLim: j=j+1
+
+            #Check if the complex is big enough
+            if j >minLength:
+
+                #Check if there is enough absorption for the complex to
+                #   be included
+                cPeak = yDat[i:i+j].argmin()
+                if yDat[cPeak+i]<fitLim:
+                    cBounds.append([cPeak+i,i,i+j])
+
+            i=i+j
+        i=i+1
+
+    i=0
+    #Iterate through the bounds
+    while i < len(cBounds):
+        b=cBounds[i]
+
+        #Check if the region needs to be divided
+        if b[2]-b[1]>maxLength:
+
+            #Find the minimum absorption in the middle two quartiles of
+            #   the large complex
+            q=(b[2]-b[1])/4
+            cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+
+            #Only break it up if the minimum absorption is actually low enough
+            if yDat[cut]>splitLim:
+
+                #Get the new two peaks
+                b1Peak = yDat[b[1]:cut].argmin()+b[1]
+                b2Peak = yDat[cut:b[2]].argmin()+cut
+
+                #add the two regions separately
+                cBounds.insert(i+1,[b1Peak,b[1],cut])
+                cBounds.insert(i+2,[b2Peak,cut,b[2]])
+
+                #Remove the original region
+                cBounds.pop(i)
+                i=i+1
+        i=i+1
+
+    return cBounds
+
+def _gen_flux_lines(x, linesP, speciesDict):
+    """
+    Calculates the normalized flux for a region of wavelength space
+    generated by a set of absorption lines.
+
+    Parameters
+    ----------
+    x : (N) ndarray
+        Array of wavelength
+    linesP: (3,) ndarray
+        Array giving sets of line parameters in 
+        form [[N1, b1, z1], ...]
+    speciesDict : dictionary
+        Dictionary containing all relevant parameters needed
+        to create an absorption line of a given species (f,Gamma,lambda0)
+
+    Returns
+    -------
+    flux : (N) ndarray
+        Array of normalized flux generated by the line parameters
+        given in linesP over the wavelength space given in x. Same size as x.
+    """
+    y=0
+    for p in linesP:
+        for i in range(speciesDict['numLines']):
+            f=speciesDict['f'][i]
+            g=speciesDict['Gamma'][i]
+            wl=speciesDict['wavelength'][i]
+            y = y+ _gen_tau(x,p,f,g,wl)
+    flux = na.exp(-y)
+    return flux
+
+def _gen_tau(t, p, f, Gamma, lambda_unshifted):
+    """This calculates a flux distribution for given parameters using the yt
+    voigt profile generator"""
+    N,b,z= p
+    
+    #Calculating quantities
+    tau_o = 1.4973614E-15*N*f*lambda_unshifted/b
+    a=7.95774715459E-15*Gamma*lambda_unshifted/b
+    x=299792.458/b*(lambda_unshifted*(1+z)/t-1)
+    
+    H = na.zeros(len(x))
+    H = voigt(a,x)
+    
+    tau = tau_o*H
+
+    return tau
+
+def _voigt_error(pTotal, x, yDat, yFit, speciesDict):
+    """
+    Gives the error of each point  used to optimize the fit of a group
+        of absorption lines to a given flux profile.
+
+        If the parameters are not in the acceptable range as defined
+        in speciesDict, the first value of the error array will
+        contain a large value (999), to prevent the optimizer from running
+        into negative number problems.
+
+    Parameters
+    ----------
+    pTotal : (3,) ndarray 
+        Array with form [[N1, b1, z1], ...] 
+    x : (N) ndarray
+        array of wavelengths [nm]
+    yDat : (N) ndarray
+        desired normalized flux from fits of lines in wavelength
+        space given by x
+    yFit : (N) ndarray
+        previous fit over the wavelength space given by x.
+    speciesDict : dictionary
+        dictionary containing all relevant parameters needed
+        to create an absorption line of a given species (f,Gamma,lambda0)
+        as well as max and min values for parameters to be fit
+
+    Returns
+    -------
+    error : (N) ndarray
+        the difference between the fit generated by the parameters
+        given in pTotal multiplied by the previous fit and the desired
+        flux profile, w/ first index modified appropriately for bad 
+        parameter choices
+    """
+
+    pTotal.shape = (-1,3)
+    yNewFit = _gen_flux_lines(x,pTotal,speciesDict)
+
+    error = yDat-yFit*yNewFit
+    error[0] = _check_params(pTotal,speciesDict)
+
+    return error
+
+def _check_params(p, speciesDict):
+    """
+    Check to see if any of the parameters in p fall outside the range 
+        given in speciesDict.
+
+    Parameters
+    ----------
+    p : (3,) ndarray
+        array with form [[N1, b1, z1], ...] 
+    speciesDict : dictionary
+        dictionary with properties giving the max and min
+        values appropriate for each parameter N,b, and z.
+
+    Returns
+    -------
+    check : int
+        0 if all values are fine
+        999 if any values fall outside acceptable range
+    """
+    check = 0
+    if any(p[:,0] > speciesDict['maxN']) or\
+          any(p[:,0] < speciesDict['minN']) or\
+          any(p[:,1] > speciesDict['maxb']) or\
+          any(p[:,1] < speciesDict['minb']) or\
+          any(p[:,2] > speciesDict['maxz']) or\
+          any(p[:,2] < speciesDict['minz']):
+              check = 999
+    return check
+
+
+def _output_fit(lineDic, file_name = 'spectrum_fit.h5'):
+    """
+    This function is designed to output the parameters of the series
+    of lines used to fit an absorption spectrum. 
+
+    The dataset contains entries in the form species/N, species/b
+    species/z, and species/complex. The ith entry in each of the datasets
+    is the fitted parameter for the ith line fitted to the spectrum for
+    the given species. The species names come from the fitted line
+    dictionary.
+
+    Parameters
+    ----------
+    lineDic : dictionary
+        Dictionary of dictionaries representing the fit lines. 
+        Top level keys are the species given in orderFits and the corresponding
+        entries are dictionaries with the keys 'N','b','z', and 'group#'. 
+        Each of these corresponds to a list of the parameters for every
+        accepted fitted line. 
+    fileName : string, optional
+        Name of the file to output fit to. Default = 'spectrum_fit.h5'
+
+    """
+    f = h5py.File(file_name, 'w')
+    for ion, params in lineDic.iteritems():
+        f.create_dataset("{0}/N".format(ion),data=params['N'])
+        f.create_dataset("{0}/b".format(ion),data=params['b'])
+        f.create_dataset("{0}/z".format(ion),data=params['z'])
+        f.create_dataset("{0}/complex".format(ion),data=params['group#'])
+    print 'Writing spectrum fit to {0}'.format(file_name)
+

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/analysis_modules/absorption_spectrum/api.py
--- a/yt/analysis_modules/absorption_spectrum/api.py
+++ b/yt/analysis_modules/absorption_spectrum/api.py
@@ -30,3 +30,6 @@
 
 from .absorption_spectrum import \
     AbsorptionSpectrum
+
+from .absorption_spectrum_fit import \
+    generate_total_fit

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/analysis_modules/halo_profiler/standard_analysis.py
--- a/yt/analysis_modules/halo_profiler/standard_analysis.py
+++ b/yt/analysis_modules/halo_profiler/standard_analysis.py
@@ -30,6 +30,7 @@
 
 class StandardRadialAnalysis(object):
     def __init__(self, pf, center, radius, n_bins = 128, inner_radius = None):
+        raise NotImplementedError  # see TODO
         self.pf = pf
         # We actually don't want to replicate the handling of setting the
         # center here, so we will pass it to the sphere creator.
@@ -53,6 +54,7 @@
         prof = BinnedProfile1D(self.obj, self.n_bins, "Radius",
                                self.inner_radius, self.outer_radius)
         by_weights = defaultdict(list)
+        # TODO: analysis_field_list is undefined
         for fspec in analysis_field_list:
             if isinstance(fspec, types.TupleType) and len(fspec) == 2:
                 field, weight = fspec

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -28,7 +28,7 @@
 import ConfigParser, os, os.path, types
 
 ytcfgDefaults = dict(
-    serialize = 'True',
+    serialize = 'False',
     onlydeserialize = 'False',
     timefunctions = 'False',
     logfile = 'False',
@@ -62,7 +62,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold008',
+    gold_standard_filename = 'gold010',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None',
     thread_field_detection = 'False'

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/analyzer_objects.py
--- a/yt/data_objects/analyzer_objects.py
+++ b/yt/data_objects/analyzer_objects.py
@@ -80,7 +80,7 @@
 
     def eval(self, pf):
         slc = self.SlicePlot(pf, self.axis, self.field, center = self.center)
-        return pc.save()
+        return slc.save()
 
 class QuantityProxy(AnalysisTask):
     _params = None

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -89,3 +89,6 @@
 
 from particle_trajectories import \
     ParticleTrajectoryCollection
+
+from particle_filters import \
+    particle_filter

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -36,6 +36,7 @@
 import fileinput
 from re import finditer
 
+from yt.config import ytcfg
 from yt.funcs import *
 from yt.utilities.logger import ytLogger
 from .data_containers import \

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -411,10 +411,12 @@
     def blocks(self):
         for io_chunk in self.chunks([], "io"):
             for i,chunk in enumerate(self.chunks([], "spatial", ngz = 0)):
-                g = self._current_chunk.objs[0]
-                mask = g._get_selector_mask(self.selector)
-                if mask is None: continue
-                yield g, mask
+                # For grids this will be a grid object, and for octrees it will
+                # be an OctreeSubset.  Note that we delegate to the sub-object.
+                o = self._current_chunk.objs[0]
+                for b, m in o.select_blocks(self.selector):
+                    if m is None: continue
+                    yield b, m
 
 class GenerationInProgress(Exception):
     def __init__(self, fields):
@@ -433,7 +435,9 @@
     @property
     def selector(self):
         if self._selector is not None: return self._selector
-        sclass = getattr(yt.geometry.selection_routines,
+        s_module = getattr(self, '_selector_module',
+                           yt.geometry.selection_routines)
+        sclass = getattr(s_module,
                          "%s_selector" % self._type_name, None)
         if sclass is None:
             raise YTDataSelectorNotImplemented(self._type_name)
@@ -456,7 +460,9 @@
         for field in itertools.cycle(fields_to_get):
             if inspected >= len(fields_to_get): break
             inspected += 1
-            if field not in self.pf.field_dependencies: continue
+            fd = self.pf.field_dependencies.get(field, None) or \
+                 self.pf.field_dependencies.get(field[1], None)
+            if fd is None: continue
             fd = self.pf.field_dependencies[field]
             requested = self._determine_fields(list(set(fd.requested)))
             deps = [d for d in requested if d not in fields_to_get]

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -276,6 +276,13 @@
             else:
                 field = item
             finfo = self.pf._get_field_info(*field)
+            # For those cases where we are guessing the field type, we will
+            # need to re-update -- otherwise, our item will always not have the
+            # field type.  This can lead to, for instance, "unknown" particle
+            # types not getting correctly identified.
+            # Note that the *only* way this works is if we also fix our field
+            # dependencies during checking.  Bug #627 talks about this.
+            item = self.pf._last_freq
         else:
             FI = getattr(self.pf, "field_info", FieldInfo)
             if item in FI:
@@ -444,7 +451,7 @@
         dd['units'] = self._units
         dd['projected_units'] = self._projected_units,
         dd['take_log'] = self.take_log
-        dd['validators'] = self.validators.copy()
+        dd['validators'] = list(self.validators)
         dd['particle_type'] = self.particle_type
         dd['vector_field'] = self.vector_field
         dd['display_field'] = True

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -492,6 +492,10 @@
         if vals is None: return
         return vals.reshape(self.ActiveDimensions, order="C")
 
+    def select_blocks(self, selector):
+        mask = self._get_selector_mask(selector)
+        yield self, mask
+
     def _get_selector_mask(self, selector):
         if hash(selector) == self._last_selector_id:
             mask = self._last_mask

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -92,16 +92,32 @@
             return tr
         return tr
 
+    @property
+    def nz(self):
+        return self._num_zones + 2*self._num_ghost_zones
+
     def _reshape_vals(self, arr):
         if len(arr.shape) == 4: return arr
-        nz = self._num_zones + 2*self._num_ghost_zones
+        nz = self.nz
         n_oct = arr.shape[0] / (nz**3.0)
-        arr = arr.reshape((nz, nz, nz, n_oct), order="F")
+        if arr.size == nz*nz*nz*n_oct:
+            arr = arr.reshape((nz, nz, nz, n_oct), order="F")
+        elif arr.size == nz*nz*nz*n_oct * 3:
+            arr = arr.reshape((nz, nz, nz, n_oct, 3), order="F")
+        else:
+            raise RuntimeError
         arr = np.asfortranarray(arr)
         return arr
 
     _domain_ind = None
 
+    def select_blocks(self, selector):
+        mask = self.oct_handler.mask(selector)
+        mask = self._reshape_vals(mask)
+        slicer = OctreeSubsetBlockSlice(self)
+        for i, sl in slicer:
+            yield sl, mask[:,:,:,i]
+
     @property
     def domain_ind(self):
         if self._domain_ind is None:
@@ -114,12 +130,17 @@
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
-        nvals = (2, 2, 2, (self.domain_ind >= 0).sum())
+        nz = self.nz
+        nvals = (nz, nz, nz, (self.domain_ind >= 0).sum())
         op = cls(nvals) # We allocate number of zones, not number of octs
         op.initialize()
-        mylog.debug("Depositing %s particles into %s Octs",
-            positions.shape[0], nvals[-1])
-        op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
+        mylog.debug("Depositing %s (%s^3) particles into %s Octs",
+            positions.shape[0], positions.shape[0]**0.3333333, nvals[-1])
+        pos = np.array(positions, dtype="float64")
+        # We should not need the following if we know in advance all our fields
+        # need no casting.
+        fields = [np.asarray(f, dtype="float64") for f in fields]
+        op.process_octree(self.oct_handler, self.domain_ind, pos, fields,
             self.domain_id, self._domain_offset)
         vals = op.finalize()
         if vals is None: return
@@ -149,7 +170,7 @@
     def select_icoords(self, dobj):
         d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,
                                      num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / 8
+        self._num_octs = d.shape[0] / (self.nz**3)
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
                                             domain_id = self.domain_id)
         return tr
@@ -157,7 +178,7 @@
     def select_fcoords(self, dobj):
         d = self.oct_handler.fcoords(self.selector, domain_id = self.domain_id,
                                      num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / 8
+        self._num_octs = d.shape[0] / (self.nz**3)
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
                                             domain_id = self.domain_id)
         return tr
@@ -165,7 +186,7 @@
     def select_fwidth(self, dobj):
         d = self.oct_handler.fwidth(self.selector, domain_id = self.domain_id,
                                   num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / 8
+        self._num_octs = d.shape[0] / (self.nz**3)
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
                                             domain_id = self.domain_id)
         return tr
@@ -173,7 +194,7 @@
     def select_ires(self, dobj):
         d = self.oct_handler.ires(self.selector, domain_id = self.domain_id,
                                   num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / 8
+        self._num_octs = d.shape[0] / (self.nz**3)
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 1,
                                             domain_id = self.domain_id)
         return tr
@@ -204,7 +225,7 @@
     # This is some subset of an octree.  Note that the sum of subsets of an
     # octree may multiply include data files.  While we can attempt to mitigate
     # this, it's unavoidable for many types of data storage on disk.
-    _type_name = 'particle_octree_subset'
+    _type_name = 'indexed_octree_subset'
     _con_args = ('data_files', 'pf', 'min_ind', 'max_ind')
     domain_id = -1
     def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0):
@@ -225,3 +246,49 @@
         self.base_region = base_region
         self.base_selector = base_region.selector
 
+class OctreeSubsetBlockSlice(object):
+    def __init__(self, octree_subset):
+        self.ind = None
+        self.octree_subset = octree_subset
+        # Cache some attributes
+        nz = octree_subset.nz
+        self.ActiveDimensions = np.array([nz,nz,nz], dtype="int64")
+        for attr in ["ires", "icoords", "fcoords", "fwidth"]:
+            v = getattr(octree_subset, attr)
+            setattr(self, "_%s" % attr, octree_subset._reshape_vals(v))
+
+    def __iter__(self):
+        for i in range(self._ires.shape[-1]):
+            self.ind = i
+            yield i, self
+
+    def clear_data(self):
+        pass
+
+    def __getitem__(self, key):
+        return self.octree_subset[key][:,:,:,self.ind]
+
+    def get_vertex_centered_data(self, *args, **kwargs):
+        raise NotImplementedError
+
+    @property
+    def id(self):
+        return np.random.randint(1)
+
+    @property
+    def Level(self):
+        return self._ires[0,0,0,self.ind]
+
+    @property
+    def LeftEdge(self):
+        LE = self._fcoords[0,0,0,self.ind,:] - self._fwidth[0,0,0,self.ind,:]*0.5
+        return LE
+
+    @property
+    def RightEdge(self):
+        RE = self._fcoords[1,1,1,self.ind,:] + self._fwidth[1,1,1,self.ind,:]*0.5
+        return RE
+
+    @property
+    def dds(self):
+        return self._fwidth[0,0,0,self.ind,:]

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -41,6 +41,32 @@
     mass_sun_cgs, \
     mh
 
+def _field_concat(fname):
+    def _AllFields(field, data):
+        v = []
+        for ptype in data.pf.particle_types:
+            data.pf._last_freq = (ptype, None)
+            if ptype == "all" or \
+                ptype in data.pf.known_filters:
+                  continue
+            v.append(data[ptype, fname].copy())
+        rv = np.concatenate(v, axis=0)
+        return rv
+    return _AllFields
+
+def _field_concat_slice(fname, axi):
+    def _AllFields(field, data):
+        v = []
+        for ptype in data.pf.particle_types:
+            data.pf._last_freq = (ptype, None)
+            if ptype == "all" or \
+                ptype in data.pf.known_filters:
+                  continue
+            v.append(data[ptype, fname][:,axi])
+        rv = np.concatenate(v, axis=0)
+        return rv
+    return _AllFields
+
 def particle_deposition_functions(ptype, coord_name, mass_name, registry):
     orig = set(registry.keys())
     def particle_count(field, data):

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -183,6 +183,8 @@
 
         # Get our bins
         if log_space:
+            if lower_bound <= 0.0 or upper_bound <= 0.0:
+                raise YTIllDefinedBounds(lower_bound, upper_bound)
             func = np.logspace
             lower_bound, upper_bound = np.log10(lower_bound), np.log10(upper_bound)
         else:
@@ -522,7 +524,10 @@
         return [self.x_bin_field, self.y_bin_field]
 
 def fix_bounds(upper, lower, logit):
-    if logit: return np.log10(upper), np.log10(lower)
+    if logit:
+        if lower <= 0.0 or upper <= 0.0:
+            raise YTIllDefinedBounds(lower, upper)
+        return np.log10(upper), np.log10(lower)
     return upper, lower
 
 class BinnedProfile2DInlineCut(BinnedProfile2D):
@@ -545,6 +550,8 @@
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
+        m_field = self._get_empty_field()
+        q_field = self._get_empty_field()
         used_field = self._get_empty_field()
         mi = args[0]
         bin_indices_x = args[1][self.indices].ravel().astype('int64')
@@ -553,8 +560,8 @@
         weight_data = weight_data[mi][self.indices]
         nx = bin_indices_x.size
         #mylog.debug("Binning %s / %s times", source_data.size, nx)
-        Bin2DProfile(bin_indices_x, bin_indices_y, weight_data, source_data,
-                     weight_field, binned_field, used_field)
+        bin_profile2d(bin_indices_x, bin_indices_y, weight_data, source_data,
+                      weight_field, binned_field, m_field, q_field, used_field)
         if accumulation: # Fix for laziness
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -194,7 +194,7 @@
             ts = np.abs(ts)
         self._dts[grid.id] = dts
         self._ts[grid.id] = ts
-        self._masks[grid.id] = masks
+        self._masks[grid.id] = mask
         return mask
 
 
@@ -644,38 +644,6 @@
             raise SyntaxError("Making a fixed resolution slice with "
                               "particles isn't supported yet.")
 
-    def reslice(self, normal, center, width):
-
-        # Cleanup
-        del self._coord
-        del self._pixelmask
-
-        self.center = center
-        self.width = width
-        self.dds = self.width / self.dims
-        self.set_field_parameter('center', center)
-        self._norm_vec = normal/np.sqrt(np.dot(normal,normal))
-        self._d = -1.0 * np.dot(self._norm_vec, self.center)
-        # First we try all three, see which has the best result:
-        vecs = np.identity(3)
-        _t = np.cross(self._norm_vec, vecs).sum(axis=1)
-        ax = _t.argmax()
-        self._x_vec = np.cross(vecs[ax,:], self._norm_vec).ravel()
-        self._x_vec /= np.sqrt(np.dot(self._x_vec, self._x_vec))
-        self._y_vec = np.cross(self._norm_vec, self._x_vec).ravel()
-        self._y_vec /= np.sqrt(np.dot(self._y_vec, self._y_vec))
-        self.set_field_parameter('cp_x_vec',self._x_vec)
-        self.set_field_parameter('cp_y_vec',self._y_vec)
-        self.set_field_parameter('cp_z_vec',self._norm_vec)
-        # Calculate coordinates of each pixel
-        _co = self.dds * \
-              (np.mgrid[-self.dims/2 : self.dims/2,
-                        -self.dims/2 : self.dims/2] + 0.5)
-
-        self._coord = self.center + np.outer(_co[0,:,:], self._x_vec) + \
-                      np.outer(_co[1,:,:], self._y_vec)
-        self._pixelmask = np.ones(self.dims*self.dims, dtype='int8')
-
     def get_data(self, fields):
         """
         Iterates over the list of fields and generates/reads them all.
@@ -860,7 +828,6 @@
     """
     _type_name = "region"
     _con_args = ('center', 'left_edge', 'right_edge')
-    _dx_pad = 0.5
     def __init__(self, center, left_edge, right_edge, fields = None,
                  pf = None, **kwargs):
         YTSelectionContainer3D.__init__(self, center, fields, pf, **kwargs)

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/setup.py
--- a/yt/data_objects/setup.py
+++ b/yt/data_objects/setup.py
@@ -8,7 +8,7 @@
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('data_objects', parent_package, top_path)
+    config.add_subpackage("tests")
     config.make_config_py()  # installs __config__.py
-    config.add_subpackage("tests")
     #config.make_svn_version_py()
     return config

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -86,8 +86,11 @@
         if not os.path.exists(apath): raise IOError(filename)
         if apath not in _cached_pfs:
             obj = object.__new__(cls)
-            _cached_pfs[apath] = obj
-        return _cached_pfs[apath]
+            if obj._skip_cache is False:
+                _cached_pfs[apath] = obj
+        else:
+            obj = _cached_pfs[apath]
+        return obj
 
     def __init__(self, filename, data_style=None, file_style=None):
         """
@@ -157,6 +160,10 @@
     def _mrep(self):
         return MinimalStaticOutput(self)
 
+    @property
+    def _skip_cache(self):
+        return False
+
     def hub_upload(self):
         self._mrep.upload()
 
@@ -261,6 +268,10 @@
             raise YTGeometryNotSupported(self.geometry)
 
     def add_particle_filter(self, filter):
+        # This is a dummy, which we set up to enable passthrough of "all"
+        # concatenation fields.
+        n = getattr(filter, "name", filter)
+        self.known_filters[n] = None
         if isinstance(filter, types.StringTypes):
             used = False
             for f in filter_registry[filter]:
@@ -271,6 +282,7 @@
         else:
             used = self.h._setup_filtered_type(filter)
         if not used:
+            self.known_filters.pop(n, None)
             return False
         self.known_filters[filter.name] = filter
         return True
@@ -290,20 +302,25 @@
             self._last_finfo = self.field_info[(ftype, fname)]
             return self._last_finfo
         if fname == self._last_freq[1]:
-            mylog.debug("Guessing field %s is (%s, %s)", fname,
-                        self._last_freq[0], self._last_freq[1])
             return self._last_finfo
         if fname in self.field_info:
+            # Sometimes, if guessing_type == True, this will be switched for
+            # the type of field it is.  So we look at the field type and
+            # determine if we need to change the type.
+            fi = self._last_finfo = self.field_info[fname]
+            if fi.particle_type and self._last_freq[0] \
+                not in self.particle_types:
+                    field = "all", field[1]
+            elif not fi.particle_type and self._last_freq[0] \
+                not in self.fluid_types:
+                    field = self.default_fluid_type, field[1]
             self._last_freq = field
-            self._last_finfo = self.field_info[fname]
             return self._last_finfo
         # We also should check "all" for particles, which can show up if you're
         # mixing deposition/gas fields with particle fields.
         if guessing_type and ("all", fname) in self.field_info:
             self._last_freq = ("all", fname)
             self._last_finfo = self.field_info["all", fname]
-            mylog.debug("Guessing field %s is (%s, %s)", fname,
-                        "all", fname)
             return self._last_finfo
         raise YTFieldNotFound((ftype, fname), self)
 

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -99,6 +99,7 @@
         if fname.startswith("Overdensity"): continue
         if FieldInfo[field].particle_type: continue
         for nproc in [1, 4, 8]:
+            test_all_fields.__name__ = "%s_%s" % (field, nproc)
             yield TestFieldAccess(field, nproc)
 
 if __name__ == "__main__":

diff -r d02eca0d4b95c3fc8e898eba92016e74e6ec58f4 -r 0849d317e494957569b94d9b982c492064c34483 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -753,6 +753,8 @@
         rdw = radius.copy()
     for i, ax in enumerate('xyz'):
         np.subtract(data["%s%s" % (field_prefix, ax)], center[i], r)
+        if data.pf.dimensionality < i+1:
+            break
         if data.pf.periodicity[i] == True:
             np.abs(r, r)
             np.subtract(r, DW[i], rdw)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/74a519fdf95b/
Changeset:   74a519fdf95b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-21 21:49:27
Summary:     Rolling back removal of the masking functions.
Affected #:  2 files

diff -r 0849d317e494957569b94d9b982c492064c34483 -r 74a519fdf95b0cf651646e0cc19da33b86e9c8ea yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -49,6 +49,8 @@
 
 cdef oct_visitor_function count_total_octs
 cdef oct_visitor_function count_total_cells
+cdef oct_visitor_function mark_octs
+cdef oct_visitor_function mask_octs
 cdef oct_visitor_function index_octs
 cdef oct_visitor_function icoords_octs
 cdef oct_visitor_function ires_octs

diff -r 0849d317e494957569b94d9b982c492064c34483 -r 74a519fdf95b0cf651646e0cc19da33b86e9c8ea yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -70,6 +70,25 @@
     # Number of *cells* visited and selected.
     data.index += selected
 
+cdef void mark_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    # We mark them even if they are not selected
+    cdef int i
+    cdef np.uint8_t *arr = <np.uint8_t *> data.array
+    if data.last != o.domain_ind:
+        data.last = o.domain_ind
+        data.index += 1
+    cdef np.int64_t index = data.index * 8
+    index += oind(data)
+    arr[index] = 1
+
+cdef void mask_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    if selected == 0: return
+    cdef int i
+    cdef np.uint8_t *arr = <np.uint8_t *> data.array
+    cdef np.int64_t index = data.global_index * 8
+    index += oind(data)
+    arr[index] = 1
+
 cdef void index_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
     # Note that we provide an index even if the cell is not selected.
     cdef int i


https://bitbucket.org/yt_analysis/yt/commits/f25eb47bca30/
Changeset:   f25eb47bca30
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-21 21:57:00
Summary:     Adding an over_refine_factor and abstracting num_zones somewhat.  Added setup_data().
Affected #:  7 files

diff -r 74a519fdf95b0cf651646e0cc19da33b86e9c8ea -r f25eb47bca303667357c097b4f400a23bebd0d08 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -42,7 +42,6 @@
 class OctreeSubset(YTSelectionContainer):
     _spatial = True
     _num_ghost_zones = 0
-    _num_zones = 2
     _type_name = 'octree_subset'
     _skip_add = True
     _con_args = ('base_region', 'domain', 'pf')
@@ -50,7 +49,8 @@
     _domain_offset = 0
     _num_octs = -1
 
-    def __init__(self, base_region, domain, pf):
+    def __init__(self, base_region, domain, pf, over_refine_factor = 1):
+        self._num_zones = 1 << (over_refine_factor)
         self.field_data = YTFieldData()
         self.field_parameters = {}
         self.domain = domain
@@ -228,8 +228,10 @@
     _type_name = 'indexed_octree_subset'
     _con_args = ('data_files', 'pf', 'min_ind', 'max_ind')
     domain_id = -1
-    def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0):
+    def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0,
+                 over_refine_factor = 2):
         # The first attempt at this will not work in parallel.
+        self._num_zones = 1 << (over_refine_factor)
         self.data_files = data_files
         self.field_data = YTFieldData()
         self.field_parameters = {}

diff -r 74a519fdf95b0cf651646e0cc19da33b86e9c8ea -r f25eb47bca303667357c097b4f400a23bebd0d08 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -68,6 +68,7 @@
     cdef oct_visitor_function *fill_func
     cdef int partial_coverage
     cdef int nn[3]
+    cdef np.uint8_t oref
     cdef np.float64_t DLE[3], DRE[3]
     cdef public np.int64_t nocts
     cdef public int max_domain
@@ -83,6 +84,7 @@
                         OctVisitorData *data)
     cdef Oct *next_root(self, int domain_id, int ind[3])
     cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
+    cdef void setup_data(self, OctVisitorData *data, int domain_id = ?)
 
 cdef class SparseOctreeContainer(OctreeContainer):
     cdef OctKey *root_nodes

diff -r 74a519fdf95b0cf651646e0cc19da33b86e9c8ea -r f25eb47bca303667357c097b4f400a23bebd0d08 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -96,8 +96,10 @@
 cdef class OctreeContainer:
 
     def __init__(self, oct_domain_dimensions, domain_left_edge,
-                 domain_right_edge, partial_coverage = 0):
+                 domain_right_edge, partial_coverage = 0,
+                 over_refine = 1):
         # This will just initialize the root mesh octs
+        self.oref = over_refine
         self.partial_coverage = partial_coverage
         cdef int i, j, k, p
         for i in range(3):
@@ -120,6 +122,12 @@
                 for k in range(self.nn[2]):
                     self.root_mesh[i][j][k] = NULL
 
+    cdef void setup_data(self, OctVisitorData *data, int domain_id = -1):
+        data.index = 0
+        data.last = -1
+        data.domain = domain_id
+        data.oref = self.oref
+
     def __dealloc__(self):
         free_octs(self.cont)
         if self.root_mesh == NULL: return
@@ -251,8 +259,8 @@
         cdef np.ndarray[np.uint8_t, ndim=1] domain_mask
         domain_mask = np.zeros(self.max_domain, dtype="uint8")
         cdef OctVisitorData data
+        self.setup_data(&data)
         data.array = domain_mask.data
-        data.domain = -1
         self.visit_all_octs(selector, oct_visitors.identify_octs, &data)
         cdef int i
         domain_ids = []
@@ -335,9 +343,8 @@
         cdef np.ndarray[np.uint8_t, ndim=1] coords
         coords = np.zeros((num_octs * 8), dtype="uint8")
         cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         data.array = <void *> coords.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.mask_octs, &data)
         return coords.astype("bool")
 
@@ -352,9 +359,8 @@
         # TODO: This *8 needs to be made generic.
         coords = np.empty((num_octs * 8, 3), dtype="int64")
         cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         data.array = <void *> coords.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.icoords_octs, &data)
         return coords
 
@@ -370,9 +376,8 @@
         # TODO: This *8 needs to be made generic.
         res = np.empty(num_octs * 8, dtype="int64")
         cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         data.array = <void *> res.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.ires_octs, &data)
         return res
 
@@ -387,9 +392,8 @@
         # TODO: This *8 needs to be made generic.
         fwidth = np.empty((num_octs * 8, 3), dtype="float64")
         cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         data.array = <void *> fwidth.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.fwidth_octs, &data)
         cdef np.float64_t base_dx
         for i in range(3):
@@ -409,9 +413,8 @@
         # TODO: This *8 needs to be made generic.
         coords = np.empty((num_octs * 8, 3), dtype="float64")
         cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         data.array = <void *> coords.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.fcoords_octs, &data)
         cdef int i
         cdef np.float64_t base_dx
@@ -441,8 +444,8 @@
             else:
                 dest = np.zeros(num_cells, dtype=source.dtype, order='C')
         cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         data.index = offset
-        data.domain = domain_id
         # We only need this so we can continue calculating the offset
         data.dims = dims
         cdef void *p[2]
@@ -479,10 +482,8 @@
         # Here's where we grab the masked items.
         ind = np.zeros(self.nocts, 'int64') - 1
         cdef OctVisitorData data
-        data.domain = domain_id
+        self.setup_data(&data, domain_id)
         data.array = ind.data
-        data.index = 0
-        data.last = -1
         self.visit_all_octs(selector, oct_visitors.index_octs, &data)
         return ind
 
@@ -595,13 +596,12 @@
             file_inds[i] = -1
             cell_inds[i] = 9
         cdef OctVisitorData data
-        data.index = 0
+        self.setup_data(&data, domain_id)
         cdef void *p[3]
         p[0] = levels.data
         p[1] = file_inds.data
         p[2] = cell_inds.data
         data.array = p
-        data.domain = domain_id
         self.visit_all_octs(selector, self.fill_func, &data)
         return levels, cell_inds, file_inds
 
@@ -629,8 +629,7 @@
     def finalize(self):
         cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
         cdef OctVisitorData data
-        data.index = 0
-        data.domain = 1
+        self.setup_data(&data, 1)
         self.visit_all_octs(selector, oct_visitors.assign_domain_ind, &data)
         # TODO: This *8 needs to be made generic.
         assert ((data.global_index+1)*8 == data.index)
@@ -648,9 +647,11 @@
 
 cdef class SparseOctreeContainer(OctreeContainer):
 
-    def __init__(self, domain_dimensions, domain_left_edge, domain_right_edge):
+    def __init__(self, domain_dimensions, domain_left_edge, domain_right_edge,
+                 over_refine = 1):
         cdef int i, j, k, p
         self.partial_coverage = 1
+        self.oref = over_refine
         for i in range(3):
             self.nn[i] = domain_dimensions[i]
         self.max_domain = -1

diff -r 74a519fdf95b0cf651646e0cc19da33b86e9c8ea -r f25eb47bca303667357c097b4f400a23bebd0d08 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -43,6 +43,9 @@
     int dims
     np.int32_t domain
     np.int8_t level
+    np.int8_t oref # This is the level of overref.  1 => 8 zones, 2 => 64, etc.
+                   # To calculate nzones, 1 << (oref * 3)
+                            
 
 ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
                                    np.uint8_t selected)

diff -r 74a519fdf95b0cf651646e0cc19da33b86e9c8ea -r f25eb47bca303667357c097b4f400a23bebd0d08 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -77,6 +77,7 @@
     if data.last != o.domain_ind:
         data.last = o.domain_ind
         data.index += 1
+    # TODO: This 8 needs to be made into a generic value.
     cdef np.int64_t index = data.index * 8
     index += oind(data)
     arr[index] = 1
@@ -85,6 +86,7 @@
     if selected == 0: return
     cdef int i
     cdef np.uint8_t *arr = <np.uint8_t *> data.array
+    # TODO: This 8 needs to be made into a generic value.
     cdef np.int64_t index = data.global_index * 8
     index += oind(data)
     arr[index] = 1

diff -r 74a519fdf95b0cf651646e0cc19da33b86e9c8ea -r f25eb47bca303667357c097b4f400a23bebd0d08 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -205,6 +205,7 @@
         cdef int i, j, k, m, n, ind[3]
         cdef Oct *noct
         cdef np.uint64_t prefix1, prefix2
+        # TODO: This does not need to be changed.
         o.children = <Oct **> malloc(sizeof(Oct *)*8)
         for i in range(2):
             for j in range(2):

diff -r 74a519fdf95b0cf651646e0cc19da33b86e9c8ea -r f25eb47bca303667357c097b4f400a23bebd0d08 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -155,16 +155,13 @@
 
     def count_octs(self, OctreeContainer octree, int domain_id = -1):
         cdef OctVisitorData data
-        data.index = 0
-        data.last = -1
-        data.domain = domain_id
+        octree.setup_data(&data, domain_id)
         octree.visit_all_octs(self, oct_visitors.count_total_octs, &data)
         return data.index
 
     def count_oct_cells(self, OctreeContainer octree, int domain_id = -1):
         cdef OctVisitorData data
-        data.index = 0
-        data.domain = domain_id
+        octree.setup_data(&data, domain_id)
         octree.visit_all_octs(self, oct_visitors.count_total_cells, &data)
         return data.index
 


https://bitbucket.org/yt_analysis/yt/commits/c37cad5b6b0a/
Changeset:   c37cad5b6b0a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-21 23:02:46
Summary:     First pass at generalization of cell count in octs.
Affected #:  6 files

diff -r f25eb47bca303667357c097b4f400a23bebd0d08 -r c37cad5b6b0a9152ca507e57ae225e9227840cf4 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -229,7 +229,7 @@
     _con_args = ('data_files', 'pf', 'min_ind', 'max_ind')
     domain_id = -1
     def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0,
-                 over_refine_factor = 2):
+                 over_refine_factor = 1):
         # The first attempt at this will not work in parallel.
         self._num_zones = 1 << (over_refine_factor)
         self.data_files = data_files

diff -r f25eb47bca303667357c097b4f400a23bebd0d08 -r c37cad5b6b0a9152ca507e57ae225e9227840cf4 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -127,6 +127,7 @@
         data.last = -1
         data.domain = domain_id
         data.oref = self.oref
+        data.nz = (1 << (data.oref*3))
 
     def __dealloc__(self):
         free_octs(self.cont)
@@ -341,9 +342,9 @@
         if num_octs == -1:
             num_octs = selector.count_octs(self, domain_id)
         cdef np.ndarray[np.uint8_t, ndim=1] coords
-        coords = np.zeros((num_octs * 8), dtype="uint8")
         cdef OctVisitorData data
         self.setup_data(&data, domain_id)
+        coords = np.zeros((num_octs * data.nz), dtype="uint8")
         data.array = <void *> coords.data
         self.visit_all_octs(selector, oct_visitors.mask_octs, &data)
         return coords.astype("bool")
@@ -355,11 +356,10 @@
                 int domain_id = -1):
         if num_octs == -1:
             num_octs = selector.count_octs(self, domain_id)
-        cdef np.ndarray[np.int64_t, ndim=2] coords
-        # TODO: This *8 needs to be made generic.
-        coords = np.empty((num_octs * 8, 3), dtype="int64")
         cdef OctVisitorData data
         self.setup_data(&data, domain_id)
+        cdef np.ndarray[np.int64_t, ndim=2] coords
+        coords = np.empty((num_octs * data.nz, 3), dtype="int64")
         data.array = <void *> coords.data
         self.visit_all_octs(selector, oct_visitors.icoords_octs, &data)
         return coords
@@ -371,12 +371,11 @@
                 int domain_id = -1):
         if num_octs == -1:
             num_octs = selector.count_octs(self, domain_id)
+        cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         #Return the 'resolution' of each cell; ie the level
         cdef np.ndarray[np.int64_t, ndim=1] res
-        # TODO: This *8 needs to be made generic.
-        res = np.empty(num_octs * 8, dtype="int64")
-        cdef OctVisitorData data
-        self.setup_data(&data, domain_id)
+        res = np.empty(num_octs * data.nz, dtype="int64")
         data.array = <void *> res.data
         self.visit_all_octs(selector, oct_visitors.ires_octs, &data)
         return res
@@ -388,11 +387,10 @@
                 int domain_id = -1):
         if num_octs == -1:
             num_octs = selector.count_octs(self, domain_id)
-        cdef np.ndarray[np.float64_t, ndim=2] fwidth
-        # TODO: This *8 needs to be made generic.
-        fwidth = np.empty((num_octs * 8, 3), dtype="float64")
         cdef OctVisitorData data
         self.setup_data(&data, domain_id)
+        cdef np.ndarray[np.float64_t, ndim=2] fwidth
+        fwidth = np.empty((num_octs * data.nz, 3), dtype="float64")
         data.array = <void *> fwidth.data
         self.visit_all_octs(selector, oct_visitors.fwidth_octs, &data)
         cdef np.float64_t base_dx
@@ -408,12 +406,11 @@
                 int domain_id = -1):
         if num_octs == -1:
             num_octs = selector.count_octs(self, domain_id)
+        cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         #Return the floating point unitary position of every cell
         cdef np.ndarray[np.float64_t, ndim=2] coords
-        # TODO: This *8 needs to be made generic.
-        coords = np.empty((num_octs * 8, 3), dtype="float64")
-        cdef OctVisitorData data
-        self.setup_data(&data, domain_id)
+        coords = np.empty((num_octs * data.nz, 3), dtype="float64")
         data.array = <void *> coords.data
         self.visit_all_octs(selector, oct_visitors.fcoords_octs, &data)
         cdef int i
@@ -462,11 +459,9 @@
         else:
             raise NotImplementedError
         self.visit_all_octs(selector, func, &data)
-        # TODO: This *8 needs to be made generic.
-        if (data.global_index + 1) * 8 * data.dims > source.size:
+        if (data.global_index + 1) * data.nz * data.dims > source.size:
             print "GLOBAL INDEX RAN AHEAD.",
-            # TODO: This *8 needs to be made generic.
-            print (data.global_index + 1) * 8 * data.dims - source.size
+            print (data.global_index + 1) * data.nz * data.dims - source.size
             print dest.size, source.size, num_cells
             raise RuntimeError
         if data.index > dest.size:
@@ -566,7 +561,7 @@
         if parent.children != NULL:
             next = parent.children[cind(ind[0],ind[1],ind[2])]
         else:
-            # TODO: This *8 does NOT need to be made generic.
+            # This *8 does NOT need to be made generic.
             parent.children = <Oct **> malloc(sizeof(Oct *) * 8)
             for i in range(8):
                 parent.children[i] = NULL
@@ -631,8 +626,7 @@
         cdef OctVisitorData data
         self.setup_data(&data, 1)
         self.visit_all_octs(selector, oct_visitors.assign_domain_ind, &data)
-        # TODO: This *8 needs to be made generic.
-        assert ((data.global_index+1)*8 == data.index)
+        assert ((data.global_index+1)*data.nz == data.index)
 
 cdef int root_node_compare(void *a, void *b) nogil:
     cdef OctKey *ao, *bo

diff -r f25eb47bca303667357c097b4f400a23bebd0d08 -r c37cad5b6b0a9152ca507e57ae225e9227840cf4 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -45,6 +45,7 @@
     np.int8_t level
     np.int8_t oref # This is the level of overref.  1 => 8 zones, 2 => 64, etc.
                    # To calculate nzones, 1 << (oref * 3)
+    np.int32_t nz
                             
 
 ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
@@ -67,10 +68,15 @@
 cdef oct_visitor_function fill_file_indices_rind
 
 cdef inline int cind(int i, int j, int k):
+    # THIS ONLY WORKS FOR CHILDREN.  It is not general for zones.
     return (((i*2)+j)*2+k)
 
 cdef inline int oind(OctVisitorData *data):
-    return (((data.ind[0]*2)+data.ind[1])*2+data.ind[2])
+    return (((data.ind[0]*(1<<data.oref))
+             +data.ind[1])*(1<<data.oref)
+             +data.ind[2])
 
 cdef inline int rind(OctVisitorData *data):
-    return (((data.ind[2]*2)+data.ind[1])*2+data.ind[0])
+    return (((data.ind[2]*(1<<data.oref))
+             +data.ind[1])*(1<<data.oref)
+             +data.ind[0])

diff -r f25eb47bca303667357c097b4f400a23bebd0d08 -r c37cad5b6b0a9152ca507e57ae225e9227840cf4 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -38,8 +38,7 @@
     if selected == 0: return
     cdef int i
     # There are this many records between "octs"
-    # TODO: This 8 needs to be made into a generic value.
-    cdef np.int64_t index = (data.global_index * 8)*data.dims
+    cdef np.int64_t index = (data.global_index * data.nz)*data.dims
     cdef np.float64_t **p = <np.float64_t**> data.array
     index += oind(data)*data.dims
     for i in range(data.dims):
@@ -51,8 +50,7 @@
     # "last" here tells us the dimensionality of the array.
     if selected == 0: return
     cdef int i
-    # TODO: This 8 needs to be made into a generic value.
-    cdef np.int64_t index = (data.global_index * 8)*data.dims
+    cdef np.int64_t index = (data.global_index * data.nz)*data.dims
     cdef np.int64_t **p = <np.int64_t**> data.array
     index += oind(data)*data.dims
     for i in range(data.dims):
@@ -77,8 +75,7 @@
     if data.last != o.domain_ind:
         data.last = o.domain_ind
         data.index += 1
-    # TODO: This 8 needs to be made into a generic value.
-    cdef np.int64_t index = data.index * 8
+    cdef np.int64_t index = data.index * data.nz
     index += oind(data)
     arr[index] = 1
 
@@ -86,8 +83,7 @@
     if selected == 0: return
     cdef int i
     cdef np.uint8_t *arr = <np.uint8_t *> data.array
-    # TODO: This 8 needs to be made into a generic value.
-    cdef np.int64_t index = data.global_index * 8
+    cdef np.int64_t index = data.global_index * data.nz
     index += oind(data)
     arr[index] = 1
 
@@ -105,10 +101,8 @@
     if selected == 0: return
     cdef np.int64_t *coords = <np.int64_t*> data.array
     cdef int i
-    # TODO: data.ind and the number of bits we shift need to be made general
-    # for octrees with > 8 zones.
     for i in range(3):
-        coords[data.index * 3 + i] = (data.pos[i] << 1) + data.ind[i]
+        coords[data.index * 3 + i] = (data.pos[i] << data.oref) + data.ind[i]
     data.index += 1
 
 cdef void ires_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
@@ -126,11 +120,9 @@
     cdef np.float64_t *fcoords = <np.float64_t*> data.array
     cdef int i
     cdef np.float64_t c, dx 
-    # TODO: data.ind and the number of bits we shift in dx and in data.pos need
-    # to be made general for octrees with > 8 zones.
-    dx = 1.0 / (2 << data.level)
+    dx = 1.0 / ((1 << data.oref) << data.level)
     for i in range(3):
-        c = <np.float64_t> ((data.pos[i] << 1 ) + data.ind[i]) 
+        c = <np.float64_t> ((data.pos[i] << data.oref ) + data.ind[i]) 
         fcoords[data.index * 3 + i] = (c + 0.5) * dx
     data.index += 1
 
@@ -143,7 +135,7 @@
     cdef np.float64_t *fwidth = <np.float64_t*> data.array
     cdef int i
     cdef np.float64_t dx 
-    dx = 1.0 / (2 << data.level)
+    dx = 1.0 / ((1 << data.oref) << data.level)
     for i in range(3):
         fwidth[data.index * 3 + i] = dx
     data.index += 1

diff -r f25eb47bca303667357c097b4f400a23bebd0d08 -r c37cad5b6b0a9152ca507e57ae225e9227840cf4 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -40,6 +40,9 @@
                         oct_visitor_function *func,
                         OctVisitorData *data,
                         int visit_covered = ?)
+    cdef void visit_oct_cells(self, OctVisitorData *data, Oct *root, Oct *ch,
+                              np.float64_t spos[3], np.float64_t sdds[3],
+                              oct_visitor_function *func, int i, int j, int k)
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
                                np.int32_t level, Oct *o = ?) nogil

diff -r f25eb47bca303667357c097b4f400a23bebd0d08 -r c37cad5b6b0a9152ca507e57ae225e9227840cf4 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -241,25 +241,59 @@
                             data.pos[2] = (data.pos[2] >> 1)
                             data.level -= 1
                         elif this_level == 1:
-                            # TODO: Refactor to enable multiple cells
-                            #       This code should be able to iterate over
-                            #       cells, even though the rest cannot.
-                            selected = self.select_cell(spos, sdds)
-                            if ch != NULL:
-                                selected *= self.overlap_cells
                             data.global_index += increment
                             increment = 0
-                            # data.ind refers to the cell, not to the oct.
-                            data.ind[0] = i
-                            data.ind[1] = j
-                            data.ind[2] = k
-                            func(root, data, selected)
+                            self.visit_oct_cells(data, root, ch, spos, sdds,
+                                                 func, i, j, k)
                         spos[2] += sdds[2]
                     spos[1] += sdds[1]
                 spos[0] += sdds[0]
             this_level = 0 # We turn this off for the second pass.
             iter += 1
 
+    cdef void visit_oct_cells(self, OctVisitorData *data, Oct *root, Oct *ch,
+                              np.float64_t spos[3], np.float64_t sdds[3],
+                              oct_visitor_function *func, int i, int j, int k):
+        # We can short-circuit the whole process if data.oref == 1.
+        # This saves us some funny-business.
+        cdef int selected
+        if data.oref == 1:
+            selected = self.select_cell(spos, sdds)
+            if ch != NULL:
+                selected *= self.overlap_cells
+            # data.ind refers to the cell, not to the oct.
+            data.ind[0] = i
+            data.ind[1] = j
+            data.ind[2] = k
+            func(root, data, selected)
+            return
+        # Okay, now that we've got that out of the way, we have to do some
+        # other checks here.  In this case, spos[] is the position of the
+        # center of a *possible* oct child, which means it is the center of a
+        # cluster of cells.  That cluster might have 1, 8, 64, ... cells in it.
+        # But, we can figure it out by calculating the cell dds.
+        cdef np.float64_t dds[3], pos[3]
+        cdef int ci, cj, ck
+        for i in range(3):
+            dds[i] = sdds[i] / data.oref
+        # Boot strap at the first index.
+        pos[0] = (spos[0] - sdds[0]/2.0) + dds[0] * 0.5
+        for ci in range(data.oref):
+            pos[1] = (spos[1] - sdds[1]/2.0) + dds[1] * 0.5
+            for cj in range(data.oref):
+                pos[2] = (spos[2] - sdds[2]/2.0) + dds[2] * 0.5
+                for ck in range(data.oref):
+                    selected = self.select_cell(pos, dds)
+                    if ch != NULL:
+                        selected *= self.overlap_cells
+                    data.ind[0] = i
+                    data.ind[1] = j
+                    data.ind[2] = k
+                    pos[2] += dds[2]
+                    func(root, data, selected)
+                pos[1] += dds[1]
+            pos[0] += dds[0]
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)


https://bitbucket.org/yt_analysis/yt/commits/53eafdeb8919/
Changeset:   53eafdeb8919
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-21 23:14:27
Summary:     Starting to thread over_refine_factor through constructors.
Affected #:  3 files

diff -r c37cad5b6b0a9152ca507e57ae225e9227840cf4 -r 53eafdeb8919b8a8bcf86020667d8a1dba866fc1 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -96,6 +96,7 @@
 
 class ParticleStaticOutput(StaticOutput):
     _unit_base = None
+    over_refine_factor = 1
 
     def _set_units(self):
         self.units = {}
@@ -154,8 +155,10 @@
 
     def __init__(self, filename, data_style="gadget_binary",
                  additional_fields = (),
-                 unit_base = None, n_ref = 64):
+                 unit_base = None, n_ref = 64,
+                 over_refine_factor = 1):
         self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
         self.storage_filename = None
         if unit_base is not None and "UnitLength_in_cm" in unit_base:
             # We assume this is comoving, because in the absence of comoving
@@ -268,11 +271,13 @@
     _particle_coordinates_name = "Coordinates"
     _header_spec = None # Override so that there's no confusion
 
-    def __init__(self, filename, data_style="OWLS", n_ref = 64):
+    def __init__(self, filename, data_style="OWLS", n_ref = 64,
+                 over_refine_factor = 1):
         self.storage_filename = None
-        super(OWLSStaticOutput, self).__init__(filename, data_style,
-                                               unit_base = None,
-                                               n_ref = n_ref)
+        super(OWLSStaticOutput, self).__init__(
+                               filename, data_style,
+                               unit_base = None, n_ref = n_ref,
+                               over_refine_factor = over_refine_factor)
 
     def __repr__(self):
         return os.path.basename(self.parameter_filename).split(".")[0]

diff -r c37cad5b6b0a9152ca507e57ae225e9227840cf4 -r 53eafdeb8919b8a8bcf86020667d8a1dba866fc1 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -738,10 +738,11 @@
     file_count = 1
     filename_template = "stream_file"
     n_ref = 64
+    over_refine_factor = 1
 
 def load_particles(data, sim_unit_to_cm, bbox=None,
                       sim_time=0.0, periodicity=(True, True, True),
-                      n_ref = 64):
+                      n_ref = 64, over_refine_factor = 1):
     r"""Load a set of particles into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
 
@@ -828,6 +829,7 @@
 
     spf = StreamParticlesStaticOutput(handler)
     spf.n_ref = n_ref
+    spf.over_refine_factor = over_refine_factor
     spf.units["cm"] = sim_unit_to_cm
     spf.units['1'] = 1.0
     spf.units["unitary"] = 1.0

diff -r c37cad5b6b0a9152ca507e57ae225e9227840cf4 -r 53eafdeb8919b8a8bcf86020667d8a1dba866fc1 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -86,7 +86,8 @@
                 sum(d.total_particles.values()) for d in self.data_files)
         pf = self.parameter_file
         self.oct_handler = ParticleOctreeContainer(
-            [1, 1, 1], pf.domain_left_edge, pf.domain_right_edge)
+            [1, 1, 1], pf.domain_left_edge, pf.domain_right_edge,
+            over_refine = pf.over_refine_factor)
         self.oct_handler.n_ref = pf.n_ref
         mylog.info("Allocating for %0.3e particles", self.total_particles)
         # No more than 256^3 in the region finder.
@@ -148,7 +149,8 @@
                               self.regions.identify_data_files(dobj.selector)]
             base_region = getattr(dobj, "base_region", dobj)
             subset = [ParticleOctreeSubset(base_region, data_files, 
-                        self.parameter_file)]
+                        self.parameter_file,
+                        self.parameter_file.over_refine_factor)]
             dobj._chunk_info = subset
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
 


https://bitbucket.org/yt_analysis/yt/commits/e9032910ce34/
Changeset:   e9032910ce34
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-21 23:27:47
Summary:     Initial implementation of the smoothing with over_refine.

Works for over_refine = 1, but not 2 or higher.

http://paste.yt-project.org/show/3797/
Affected #:  4 files

diff -r 53eafdeb8919b8a8bcf86020667d8a1dba866fc1 -r e9032910ce34ce257354ff126a0d2b6f94979d48 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -151,7 +151,8 @@
         cls = getattr(particle_smooth, "%s_smooth" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
-        nvals = (2, 2, 2, (self.domain_ind >= 0).sum())
+        nz = self.nz
+        nvals = (nz, nz, nz, (self.domain_ind >= 0).sum())
         if fields is None: fields = []
         op = cls(nvals, len(fields), 64)
         op.initialize()

diff -r 53eafdeb8919b8a8bcf86020667d8a1dba866fc1 -r e9032910ce34ce257354ff126a0d2b6f94979d48 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -248,9 +248,11 @@
                 cp[i] -= dds[i]/2.0 # Now centered
             else:
                 cp[i] += dds[i]/2.0
-            # We don't need to change dds[i] as it has been halved from the
-            # oct width, thus making it already the cell width
-            oinfo.dds[i] = dds[i] # Cell width
+            # We don't normally need to change dds[i] as it has been halved
+            # from the oct width, thus making it already the cell width.
+            # But, for some cases where the oref != 1, this needs to be
+            # changed.
+            oinfo.dds[i] = dds[i] / self.oref # Cell width
             oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
             oinfo.ipos[i] = ipos[i]
         oinfo.level = level

diff -r 53eafdeb8919b8a8bcf86020667d8a1dba866fc1 -r e9032910ce34ce257354ff126a0d2b6f94979d48 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -107,7 +107,8 @@
         cdef np.int64_t *doffs, *pinds, *pcounts, poff
         cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
         cdef np.ndarray[np.float64_t, ndim=1] tarr
-        dims[0] = dims[1] = dims[2] = 2
+        dims[0] = dims[1] = dims[2] = (1 << octree.oref)
+        cdef int nz = dims[0] * dims[1] * dims[2]
         numpart = positions.shape[0]
         # pcount is the number of particles per oct.
         pcount = np.zeros_like(dom_ind)
@@ -173,7 +174,7 @@
             oct = octree.get(pos, &oi)
             if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
                 continue
-            offset = dom_ind[oct.domain_ind - moff] * 8
+            offset = dom_ind[oct.domain_ind - moff] * nz
             neighbors = octree.neighbors(&oi, &nneighbors)
             # Now we have all our neighbors.  And, we should be set for what
             # else we need to do.

diff -r 53eafdeb8919b8a8bcf86020667d8a1dba866fc1 -r e9032910ce34ce257354ff126a0d2b6f94979d48 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -286,9 +286,9 @@
                     selected = self.select_cell(pos, dds)
                     if ch != NULL:
                         selected *= self.overlap_cells
-                    data.ind[0] = i
-                    data.ind[1] = j
-                    data.ind[2] = k
+                    data.ind[0] = ci
+                    data.ind[1] = cj
+                    data.ind[2] = ck
                     pos[2] += dds[2]
                     func(root, data, selected)
                 pos[1] += dds[1]


https://bitbucket.org/yt_analysis/yt/commits/d1c15c019b54/
Changeset:   d1c15c019b54
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-21 23:40:36
Summary:     Fixing domain_dimensions to match number of zones.
Affected #:  1 file

diff -r e9032910ce34ce257354ff126a0d2b6f94979d48 -r d1c15c019b54a2cd720f378896cb6f069778acce yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -191,7 +191,8 @@
 
         self.domain_left_edge = np.zeros(3, "float64")
         self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
-        self.domain_dimensions = np.ones(3, "int32") * 2
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
         self.periodicity = (True, True, True)
 
         self.cosmological_simulation = 1
@@ -297,7 +298,8 @@
         self.current_time = hvals["Time_GYR"] * sec_conversion["Gyr"]
         self.domain_left_edge = np.zeros(3, "float64")
         self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
-        self.domain_dimensions = np.ones(3, "int32") * 2
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
         self.cosmological_simulation = 1
         self.periodicity = (True, True, True)
         self.current_redshift = hvals["Redshift"]
@@ -443,7 +445,8 @@
                 self.parameters[param] = val
 
         self.current_time = hvals["time"]
-        self.domain_dimensions = np.ones(3, "int32") * 2
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
         if self.parameters.get('bPeriodic', True):
             self.periodicity = (True, True, True)
         else:


https://bitbucket.org/yt_analysis/yt/commits/b282b6c89e67/
Changeset:   b282b6c89e67
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-22 19:53:05
Summary:     Order of arguments fixed, also fixing cell/oct width in oi.
Affected #:  3 files

diff -r d1c15c019b54a2cd720f378896cb6f069778acce -r b282b6c89e67388f40f1a970b1e375757eae5f4d yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -291,8 +291,10 @@
         cdef np.int64_t npos[3], ndim[3]
         # Now we get our boundaries for this level, so that we can wrap around
         # if need be.
+        # ndim is the oct dimensions of the level, not the cell dimensions.
         for i in range(3):
-            ndim[i] = <np.int64_t> ((self.DRE[i] - self.DLE[i])/(2*oi.dds[i]))
+            ndim[i] = <np.int64_t> ((self.DRE[i] - self.DLE[i]) / oi.dds[i])
+            ndim[i] = (ndim[i] >> self.oref)
         for i in range(3):
             npos[0] = (oi.ipos[0] + (1 - i))
             if npos[0] < 0: npos[0] += ndim[0]
@@ -325,6 +327,7 @@
                         nfound += 1
                         olist = OctList_append(olist, cand)
                         if my_list == NULL: my_list = olist
+
         olist = my_list
         cdef int noct = OctList_count(olist)
         cdef Oct **neighbors

diff -r d1c15c019b54a2cd720f378896cb6f069778acce -r b282b6c89e67388f40f1a970b1e375757eae5f4d yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -148,9 +148,9 @@
                 data_files = [self.data_files[i] for i in
                               self.regions.identify_data_files(dobj.selector)]
             base_region = getattr(dobj, "base_region", dobj)
+            oref = self.parameter_file.over_refine_factor
             subset = [ParticleOctreeSubset(base_region, data_files, 
-                        self.parameter_file,
-                        self.parameter_file.over_refine_factor)]
+                        self.parameter_file, over_refine_factor = oref)]
             dobj._chunk_info = subset
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
 

diff -r d1c15c019b54a2cd720f378896cb6f069778acce -r b282b6c89e67388f40f1a970b1e375757eae5f4d yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -98,7 +98,7 @@
         # that we can deal with >27 neighbors.  As I write this comment,
         # neighbors() only returns 27 neighbors.
         cdef int nf, i, j, dims[3], n
-        cdef np.float64_t **field_pointers, *field_vals, pos[3], *ppos
+        cdef np.float64_t **field_pointers, *field_vals, pos[3], *ppos, dds[3]
         cdef int nsize = 0
         cdef np.int64_t *nind = NULL
         cdef OctInfo oi
@@ -144,6 +144,7 @@
         # actually be indirectly-sorted fields.  This preserves memory at the
         # expense of additional pointer lookups.
         pind = np.argsort(pdoms)
+        pind = np.asarray(pind, dtype='int64', order='C')
         # So what this means is that we now have all the oct-0 particle indices
         # in order, then the oct-1, etc etc.
         # This now gives us the indices to the particles for each domain.
@@ -176,6 +177,8 @@
                 continue
             offset = dom_ind[oct.domain_ind - moff] * nz
             neighbors = octree.neighbors(&oi, &nneighbors)
+            for j in range(3):
+                dds[j] = oi.dds[j] / octree.oref
             # Now we have all our neighbors.  And, we should be set for what
             # else we need to do.
             if nneighbors > nsize:
@@ -190,7 +193,7 @@
                     break
             # This is allocated by the neighbors function, so we deallocate it.
             free(neighbors)
-            self.neighbor_process(dims, oi.left_edge, oi.dds,
+            self.neighbor_process(dims, oi.left_edge, dds,
                          ppos, field_pointers, nneighbors, nind, doffs,
                          pinds, pcounts, offset)
         if nind != NULL:
@@ -332,6 +335,9 @@
         free(self.fp)
         return self.vals
 
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields):
         # We have our i, j, k for our cell, as well as the cell position.


https://bitbucket.org/yt_analysis/yt/commits/e36b3ff3f5d5/
Changeset:   e36b3ff3f5d5
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-22 21:17:13
Summary:     Fixing typo in setting of dds.  Over refine now preliminarily works.
Affected #:  2 files

diff -r b282b6c89e67388f40f1a970b1e375757eae5f4d -r e36b3ff3f5d59add3baad4cae87ea499ca57a3b8 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -123,9 +123,17 @@
                     self.root_mesh[i][j][k] = NULL
 
     cdef void setup_data(self, OctVisitorData *data, int domain_id = -1):
+        cdef int i
         data.index = 0
         data.last = -1
+        data.global_index = -1
+        for i in range(3):
+            data.pos[i] = -1
+            data.ind[i] = -1
+        data.array = NULL
+        data.dims = 0
         data.domain = domain_id
+        data.level = -1
         data.oref = self.oref
         data.nz = (1 << (data.oref*3))
 
@@ -472,6 +480,8 @@
         if data.index > dest.size:
             print "DEST INDEX RAN AHEAD.",
             print data.index - dest.size
+            print (data.global_index + 1) * data.nz * data.dims, source.size
+            print num_cells
             raise RuntimeError
         if num_cells >= 0:
             return dest

diff -r b282b6c89e67388f40f1a970b1e375757eae5f4d -r e36b3ff3f5d59add3baad4cae87ea499ca57a3b8 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -274,23 +274,24 @@
         # But, we can figure it out by calculating the cell dds.
         cdef np.float64_t dds[3], pos[3]
         cdef int ci, cj, ck
-        for i in range(3):
-            dds[i] = sdds[i] / data.oref
+        cdef int nr = (1 << (data.oref - 1))
+        for ci in range(3):
+            dds[ci] = sdds[ci] / nr
         # Boot strap at the first index.
         pos[0] = (spos[0] - sdds[0]/2.0) + dds[0] * 0.5
-        for ci in range(data.oref):
+        for ci in range(nr):
             pos[1] = (spos[1] - sdds[1]/2.0) + dds[1] * 0.5
-            for cj in range(data.oref):
+            for cj in range(nr):
                 pos[2] = (spos[2] - sdds[2]/2.0) + dds[2] * 0.5
-                for ck in range(data.oref):
+                for ck in range(nr):
                     selected = self.select_cell(pos, dds)
                     if ch != NULL:
                         selected *= self.overlap_cells
-                    data.ind[0] = ci
-                    data.ind[1] = cj
-                    data.ind[2] = ck
+                    data.ind[0] = ci + i * nr
+                    data.ind[1] = cj + j * nr
+                    data.ind[2] = ck + k * nr
+                    func(root, data, selected)
                     pos[2] += dds[2]
-                    func(root, data, selected)
                 pos[1] += dds[1]
             pos[0] += dds[0]
 


https://bitbucket.org/yt_analysis/yt/commits/89a35feb1443/
Changeset:   89a35feb1443
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-23 02:47:30
Summary:     This enables find_max for RAMSES.

Note that in many cases, find_max already worked!  In fact, if finest_levels
were specified to be false, it would have worked just fine as-is.  But what
this will do is ensure that find_max works in all cases, but specifically those
cases where the maximum "level" of the Octree is not the same as levelmax in
the header file -- as in, those simulations which have not yet refined to their
maximum level yet.  In those cases, no domains would be selected in the
inclusion check for the "all_data" selector, and the max would not be found.

This change makes two distinctions:

ds.h.max_level
ds.max_level

These are no longer the same.  The former is the maximum *reached* level of
refinement, whereas the latter is the maximum *possible* level of refinement.
I believe that this behavior is valid, although I am not terribly fond of it.
Affected #:  1 file

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 89a35feb1443686c3e18ed7f251cdc6f81bdf3de yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -215,6 +215,7 @@
                                 self.amr_header['nboundary']*l]
             return ng
         min_level = self.pf.min_level
+        max_level = min_level
         nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
         for level in range(self.amr_header['nlevelmax']):
             # Easier if do this 1-indexed
@@ -248,6 +249,8 @@
                     assert(pos.shape[0] == ng)
                     n = self.oct_handler.add(cpu + 1, level - min_level, pos)
                     assert(n == ng)
+                    if n > 0: max_level = max(level - min_level, max_level)
+        self.max_level = max_level
         self.oct_handler.finalize()
 
     def included(self, selector):
@@ -297,7 +300,7 @@
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-        self.max_level = pf.max_level
+        self.max_level = None
 
         self.float_type = np.float64
         super(RAMSESGeometryHandler, self).__init__(pf, data_style)
@@ -308,6 +311,7 @@
                         for i in range(self.parameter_file['ncpu'])]
         total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
                          for dom in self.domains)
+        self.max_level = max(dom.max_level for dom in self.domains)
         self.num_grids = total_octs
 
     def _detect_fields(self):


https://bitbucket.org/yt_analysis/yt/commits/6cdf51a81849/
Changeset:   6cdf51a81849
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-23 20:56:47
Summary:     Initial creation of ChunkDataCache object.
Affected #:  1 file

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 6cdf51a81849b0a3179f0200df58182c2e72c6cf yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -607,3 +607,31 @@
             cdt[ind:ind+gdt.size] = gdt
             ind += gt.size
         return cdt
+
+class ChunkDataCache(object):
+    def __init__(self, base_iter, preload_fields, geometry_handler,
+                 max_length = 256):
+        # At some point, max_length should instead become a heuristic function,
+        # potentially looking at estimated memory usage.  Note that this never
+        # initializes the iterator; it assumes the iterator is already created,
+        # and it calls next() on it.
+        self.base_iter = base_iter
+        self.queue = []
+        self.max_length = max_length
+        self.preload_fields = preload_fields
+        self.geometry_handler = geometry_handler
+
+    def __iter__(self):
+        return self
+    
+    def next(self):
+        if len(self.queue) == 0:
+            for i in range(self.max_length):
+                try:
+                    self.queue.append(self.base_iter.next())
+                except StopIteration:
+                    break
+        if len(self.queue) == 0:
+            # If it's still zero ...
+            raise StopIteration
+        return self.queue.pop(0)


https://bitbucket.org/yt_analysis/yt/commits/21835af6a5dd/
Changeset:   21835af6a5dd
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-23 22:54:43
Summary:     Implementing caching that works for Enzo.
Affected #:  6 files

diff -r 6cdf51a81849b0a3179f0200df58182c2e72c6cf -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -47,7 +47,7 @@
     ParameterFileStore
 from .derived_quantities import DerivedQuantityCollection
 from .field_info_container import \
-    NeedsGridType
+    NeedsGridType, ValidateSpatial
 import yt.geometry.selection_routines
 
 def force_array(item, shape):
@@ -92,6 +92,7 @@
     _con_args = ()
     _skip_add = False
     _container_fields = ()
+    _field_cache = None
 
     class __metaclass__(type):
         def __init__(cls, name, b, d):
@@ -192,13 +193,17 @@
         Returns a single field.  Will add if necessary.
         """
         f = self._determine_fields(key)[0]
-        if f not in self.field_data:
+        if f not in self.field_data and key not in self.field_data:
             if f in self._container_fields:
                 self.field_data[f] = self._generate_container_field(f)
                 return self.field_data[f]
             else:
                 self.get_data(f)
-        return self.field_data[f]
+        # Note that this is less succinct so that we can account for the case
+        # when there are, for example, no elements in the object.
+        rv = self.field_data.get(f, None)
+        if rv is None: rv = self.field_data[key]
+        return rv
 
     def __setitem__(self, key, val):
         """
@@ -249,10 +254,14 @@
         rv = np.empty(self.ires.size, dtype="float64")
         ind = 0
         if ngz == 0:
+            deps = self._identify_dependencies([field], spatial = True)
+            deps = self._determine_fields(deps)
             for io_chunk in self.chunks([], "io", cache = False):
-                for i,chunk in enumerate(self.chunks(field, "spatial", ngz = 0)):
-                    ind += self._current_chunk.objs[0].select(
-                            self.selector, self[field], rv, ind)
+                for i,chunk in enumerate(self.chunks([], "spatial", ngz = 0,
+                                                    preload_fields = deps)):
+                    o = self._current_chunk.objs[0]
+                    with o._activate_cache():
+                        ind += o.select(self.selector, self[field], rv, ind)
         else:
             chunks = self.hierarchy._chunk(self, "spatial", ngz = ngz)
             for i, chunk in enumerate(chunks):
@@ -454,12 +463,18 @@
                 # NOTE: we yield before releasing the context
                 yield self
 
-    def _identify_dependencies(self, fields_to_get):
+    def _identify_dependencies(self, fields_to_get, spatial = False):
         inspected = 0
         fields_to_get = fields_to_get[:]
         for field in itertools.cycle(fields_to_get):
             if inspected >= len(fields_to_get): break
             inspected += 1
+            fi = self.pf._get_field_info(*field)
+            if not spatial and any(
+                    isinstance(v, ValidateSpatial) for v in fi.validators):
+                # We don't want to pre-fetch anything that's spatial, as that
+                # will be done later.
+                continue
             fd = self.pf.field_dependencies.get(field, None) or \
                  self.pf.field_dependencies.get(field[1], None)
             if fd is None: continue
@@ -570,6 +585,25 @@
         self._current_chunk = old_chunk
         self._locked = old_locked
 
+    @contextmanager
+    def _activate_cache(self):
+        cache = self._field_cache or {}
+        old_fields = {}
+        for field in (f for f in cache if f in self.field_data):
+            old_fields[field] = self.field_data[field]
+        self.field_data.update(cache)
+        yield
+        for field in cache:
+            self.field_data.pop(field)
+            if field in old_fields:
+                self.field_data[field] = old_fields.pop(field)
+        self._field_cache = None
+
+    def _initialize_cache(self, cache):
+        # Wipe out what came before
+        self._field_cache = {}
+        self._field_cache.update(cache)
+
     @property
     def icoords(self):
         if self._current_chunk is None:

diff -r 6cdf51a81849b0a3179f0200df58182c2e72c6cf -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -201,6 +201,7 @@
 
     _strip_path = False
     grid = EnzoGrid
+    _preload_implemented = True
 
     def __init__(self, pf, data_style):
         

diff -r 6cdf51a81849b0a3179f0200df58182c2e72c6cf -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -196,6 +196,8 @@
             elif g.filename is None:
                 continue
             grids_by_file[g.filename].append(g.id)
+        #if len(chunk.objs) == 1 and len(grids_by_file) > 0:
+        #    raise RuntimeError
         sets = [fname for ftype, fname in fields]
         for filename in grids_by_file:
             nodes = grids_by_file[filename]

diff -r 6cdf51a81849b0a3179f0200df58182c2e72c6cf -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -615,11 +615,12 @@
         # potentially looking at estimated memory usage.  Note that this never
         # initializes the iterator; it assumes the iterator is already created,
         # and it calls next() on it.
-        self.base_iter = base_iter
+        self.base_iter = base_iter.__iter__()
         self.queue = []
         self.max_length = max_length
         self.preload_fields = preload_fields
         self.geometry_handler = geometry_handler
+        self.cache = {}
 
     def __iter__(self):
         return self
@@ -631,7 +632,11 @@
                     self.queue.append(self.base_iter.next())
                 except StopIteration:
                     break
-        if len(self.queue) == 0:
             # If it's still zero ...
-            raise StopIteration
-        return self.queue.pop(0)
+            if len(self.queue) == 0: raise StopIteration
+            chunk = YTDataChunk(None, "cache", self.queue, cache=False)
+            self.cache = self.geometry_handler.io._read_chunk_data(
+                chunk, self.preload_fields)
+        g = self.queue.pop(0)
+        g._initialize_cache(self.cache.pop(g.id, {}))
+        return g

diff -r 6cdf51a81849b0a3179f0200df58182c2e72c6cf -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -35,7 +35,8 @@
 from yt.arraytypes import blankRecordArray
 from yt.config import ytcfg
 from yt.data_objects.field_info_container import NullFunc
-from yt.geometry.geometry_handler import GeometryHandler, YTDataChunk
+from yt.geometry.geometry_handler import \
+    GeometryHandler, YTDataChunk, ChunkDataCache
 from yt.utilities.definitions import MAXLEVEL
 from yt.utilities.physical_constants import sec_per_year
 from yt.utilities.io_handler import io_registry
@@ -47,6 +48,7 @@
 
 class GridGeometryHandler(GeometryHandler):
     float_type = 'float64'
+    _preload_implemented = False
 
     def _setup_geometry(self):
         mylog.debug("Counting grids.")
@@ -256,7 +258,7 @@
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         yield YTDataChunk(dobj, "all", gobjs, dobj.size, cache)
         
-    def _chunk_spatial(self, dobj, ngz, sort = None):
+    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         if sort in ("+level", "level"):
             giter = sorted(gobjs, key = g.Level)
@@ -264,7 +266,9 @@
             giter = sorted(gobjs, key = -g.Level)
         elif sort is None:
             giter = gobjs
-        for i,og in enumerate(giter):
+        if self._preload_implemented and preload_fields is not None and ngz == 0:
+            giter = ChunkDataCache(list(giter), preload_fields, self)
+        for i, og in enumerate(giter):
             if ngz > 0:
                 g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
             else:
@@ -284,3 +288,4 @@
             gs = gfiles[fn]
             yield YTDataChunk(dobj, "io", gs, self._count_selection(dobj, gs),
                               cache = cache)
+

diff -r 6cdf51a81849b0a3179f0200df58182c2e72c6cf -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -118,6 +118,9 @@
     def _read_exception(self):
         return None
 
+    def _read_chunk_data(self, chunk, fields):
+        return None
+
 class IOHandlerExtracted(BaseIOHandler):
 
     _data_style = 'extracted'


https://bitbucket.org/yt_analysis/yt/commits/3e72bfc43c5e/
Changeset:   3e72bfc43c5e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-26 20:39:47
Summary:     Merging from yt2x branch
Affected #:  24 files

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -832,8 +832,8 @@
 	    echo "Building BLAS"
 	    cd BLAS
 	    gfortran -O2 -fPIC -fno-second-underscore -c *.f
-	    ar r libfblas.a *.o &>> ${LOG_FILE}
-	    ranlib libfblas.a 1>> ${LOG_FILE}
+	    ( ar r libfblas.a *.o 2>&1 ) 1>> ${LOG_FILE}
+	    ( ranlib libfblas.a 2>&1 ) 1>> ${LOG_FILE}
 	    rm -rf *.o
 	    touch done
 	    cd ..
@@ -844,7 +844,7 @@
 	    echo "Building LAPACK"
 	    cd $LAPACK/
 	    cp INSTALL/make.inc.gfortran make.inc
-	    make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
+	    ( make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 2>&1 ) 1>> ${LOG_FILE} || do_exit
 	    touch done
 	    cd ..
 	fi
@@ -943,10 +943,10 @@
 touch done
 cd $MY_PWD
 
-if !(${DEST_DIR}/bin/python2.7 -c "import readline" >> ${LOG_FILE})
+if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
 then
     echo "Installing pure-python readline"
-    ${DEST_DIR}/bin/pip install readline 1>> ${LOG_FILE}
+    ( ${DEST_DIR}/bin/pip install readline 2>&1 ) 1>> ${LOG_FILE}
 fi
 
 if [ $INST_ENZO -eq 1 ]

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1062,8 +1062,9 @@
     def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is True (default), only run it on the dark matter particles, otherwise
-        on all particles.  Returns an iterable collection of *HopGroup* items.
+        *dm_only* is True (default), only run it on the dark matter particles, 
+        otherwise on all particles.  Returns an iterable collection of 
+        *HopGroup* items.
         """
         self._data_source = data_source
         self.dm_only = dm_only

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/data_objects/tests/test_cutting_plane.py
--- a/yt/data_objects/tests/test_cutting_plane.py
+++ b/yt/data_objects/tests/test_cutting_plane.py
@@ -1,5 +1,6 @@
 from yt.testing import *
 import os
+import tempfile
 
 def setup():
     from yt.config import ytcfg
@@ -7,7 +8,10 @@
 
 def teardown_func(fns):
     for fn in fns:
-        os.remove(fn)
+        try:
+            os.remove(fn)
+        except OSError:
+            pass
 
 def test_cutting_plane():
     for nprocs in [8, 1]:
@@ -23,7 +27,9 @@
         yield assert_equal, cut["Ones"].min(), 1.0
         yield assert_equal, cut["Ones"].max(), 1.0
         pw = cut.to_pw()
-        fns += pw.save()
+        tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+        os.close(tmpfd)
+        fns += pw.save(name=tmpname)
         frb = cut.to_frb((1.0,'unitary'), 64)
         for cut_field in ['Ones', 'Density']:
             yield assert_equal, frb[cut_field].info['data_source'], \

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/data_objects/tests/test_image_array.py
--- a/yt/data_objects/tests/test_image_array.py
+++ b/yt/data_objects/tests/test_image_array.py
@@ -1,130 +1,94 @@
-from yt.testing import *
-from yt.data_objects.image_array import ImageArray
 import numpy as np
 import os
 import tempfile
 import shutil
+import unittest
+from yt.data_objects.image_array import ImageArray
+from yt.testing import \
+    assert_equal
+
 
 def setup():
     from yt.config import ytcfg
-    ytcfg["yt","__withintesting"] = "True"
-    np.seterr(all = 'ignore')
+    ytcfg["yt", "__withintesting"] = "True"
+    np.seterr(all='ignore')
+
+
+def dummy_image(kstep, nlayers):
+    im = np.zeros([64, 128, nlayers])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i, :, k] = np.linspace(0.0, kstep * k, im.shape[1])
+    return im
+
 
 def test_rgba_rescale():
-    im = np.zeros([64,128,4])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
-    im_arr = ImageArray(im)
+    im_arr = ImageArray(dummy_image(10.0, 4))
 
     new_im = im_arr.rescale(inline=False)
-    yield assert_equal, im_arr[:,:,:3].max(), 2*10.
-    yield assert_equal, im_arr[:,:,3].max(), 3*10.
-    yield assert_equal, new_im[:,:,:3].sum(axis=2).max(), 1.0 
-    yield assert_equal, new_im[:,:,3].max(), 1.0
+    yield assert_equal, im_arr[:, :, :3].max(), 2 * 10.
+    yield assert_equal, im_arr[:, :, 3].max(), 3 * 10.
+    yield assert_equal, new_im[:, :, :3].sum(axis=2).max(), 1.0
+    yield assert_equal, new_im[:, :, 3].max(), 1.0
 
     im_arr.rescale()
-    yield assert_equal, im_arr[:,:,:3].sum(axis=2).max(), 1.0
-    yield assert_equal, im_arr[:,:,3].max(), 1.0
+    yield assert_equal, im_arr[:, :, :3].sum(axis=2).max(), 1.0
+    yield assert_equal, im_arr[:, :, 3].max(), 1.0
 
-def test_image_array_hdf5():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
 
-    im = np.zeros([64,128,3])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+class TestImageArray(unittest.TestCase):
 
-    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        'width':0.245, 'units':'cm', 'type':'rendering'}
+    tmpdir = None
+    curdir = None
 
-    im_arr = ImageArray(im, info=myinfo)
-    im_arr.save('test_3d_ImageArray')
+    def setUp(self):
+        self.tmpdir = tempfile.mkdtemp()
+        self.curdir = os.getcwd()
+        os.chdir(self.tmpdir)
 
-    im = np.zeros([64,128])
-    for i in xrange(im.shape[0]):
-        im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+    def test_image_array_hdf5(self):
+        myinfo = {'field': 'dinosaurs', 'east_vector': np.array([1., 0., 0.]),
+                  'north_vector': np.array([0., 0., 1.]),
+                  'normal_vector': np.array([0., 1., 0.]),
+                  'width': 0.245, 'units': 'cm', 'type': 'rendering'}
 
-    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        'width':0.245, 'units':'cm', 'type':'rendering'}
+        im_arr = ImageArray(dummy_image(0.3, 3), info=myinfo)
+        im_arr.save('test_3d_ImageArray')
 
-    im_arr = ImageArray(im, info=myinfo)
-    im_arr.save('test_2d_ImageArray')
+        im = np.zeros([64, 128])
+        for i in xrange(im.shape[0]):
+            im[i, :] = np.linspace(0., 0.3 * 2, im.shape[1])
 
-    os.chdir(curdir)
-    # clean up
-    shutil.rmtree(tmpdir)
+        myinfo = {'field': 'dinosaurs', 'east_vector': np.array([1., 0., 0.]),
+                  'north_vector': np.array([0., 0., 1.]),
+                  'normal_vector': np.array([0., 1., 0.]),
+                  'width': 0.245, 'units': 'cm', 'type': 'rendering'}
 
-def test_image_array_rgb_png():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
+        im_arr = ImageArray(im, info=myinfo)
+        im_arr.save('test_2d_ImageArray')
 
-    im = np.zeros([64,128,3])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
+    def test_image_array_rgb_png(self):
+        im_arr = ImageArray(dummy_image(10.0, 3))
+        im_arr.write_png('standard.png')
 
-    im_arr = ImageArray(im)
-    im_arr.write_png('standard.png')
+    def test_image_array_rgba_png(self):
+        im_arr = ImageArray(dummy_image(10.0, 4))
+        im_arr.write_png('standard.png')
+        im_arr.write_png('non-scaled.png', rescale=False)
+        im_arr.write_png('black_bg.png', background='black')
+        im_arr.write_png('white_bg.png', background='white')
+        im_arr.write_png('green_bg.png', background=[0., 1., 0., 1.])
+        im_arr.write_png('transparent_bg.png', background=None)
 
-def test_image_array_rgba_png():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
+    def test_image_array_background(self):
+        im_arr = ImageArray(dummy_image(10.0, 4))
+        im_arr.rescale()
+        new_im = im_arr.add_background_color([1., 0., 0., 1.], inline=False)
+        new_im.write_png('red_bg.png')
+        im_arr.add_background_color('black')
+        im_arr.write_png('black_bg2.png')
 
-    im = np.zeros([64,128,4])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
-
-    im_arr = ImageArray(im)
-    im_arr.write_png('standard.png')
-    im_arr.write_png('non-scaled.png', rescale=False)
-    im_arr.write_png('black_bg.png', background='black')
-    im_arr.write_png('white_bg.png', background='white')
-    im_arr.write_png('green_bg.png', background=[0.,1.,0.,1.])
-    im_arr.write_png('transparent_bg.png', background=None)
-
-
-def test_image_array_background():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
-
-    im = np.zeros([64,128,4])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
-
-    im_arr = ImageArray(im)
-    im_arr.rescale()
-    new_im = im_arr.add_background_color([1.,0.,0.,1.], inline=False)
-    new_im.write_png('red_bg.png')
-    im_arr.add_background_color('black')
-    im_arr.write_png('black_bg2.png')
- 
-    os.chdir(curdir)
-    # clean up
-    shutil.rmtree(tmpdir)
-
-
-
-
-
-
-
-
-
-
-
-
-
+    def tearDown(self):
+        os.chdir(self.curdir)
+        # clean up
+        shutil.rmtree(self.tmpdir)

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -1,5 +1,6 @@
 from yt.testing import *
 import os
+import tempfile
 
 def setup():
     from yt.config import ytcfg
@@ -7,7 +8,10 @@
 
 def teardown_func(fns):
     for fn in fns:
-        os.remove(fn)
+        try:
+            os.remove(fn)
+        except OSError:
+            pass
 
 def test_projection():
     for nprocs in [8, 1]:
@@ -37,7 +41,9 @@
                 yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
                 pw = proj.to_pw()
-                fns += pw.save()
+                tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+                os.close(tmpfd)
+                fns += pw.save(name=tmpname)
                 frb = proj.to_frb((1.0,'unitary'), 64)
                 for proj_field in ['Ones', 'Density']:
                     yield assert_equal, frb[proj_field].info['data_source'], \

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -27,6 +27,7 @@
 """
 import os
 import numpy as np
+import tempfile
 from nose.tools import raises
 from yt.testing import \
     fake_random_pf, assert_equal, assert_array_equal
@@ -42,7 +43,10 @@
 
 def teardown_func(fns):
     for fn in fns:
-        os.remove(fn)
+        try:
+            os.remove(fn)
+        except OSError:
+            pass
 
 
 def test_slice():
@@ -72,7 +76,9 @@
                 yield assert_equal, np.unique(slc["pdx"]), 0.5 / dims[xax]
                 yield assert_equal, np.unique(slc["pdy"]), 0.5 / dims[yax]
                 pw = slc.to_pw()
-                fns += pw.save()
+                tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+                os.close(tmpfd)
+                fns += pw.save(name=tmpname)
                 frb = slc.to_frb((1.0, 'unitary'), 64)
                 for slc_field in ['Ones', 'Density']:
                     yield assert_equal, frb[slc_field].info['data_source'], \

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/extern/__init__.py
--- /dev/null
+++ b/yt/extern/__init__.py
@@ -0,0 +1,4 @@
+"""
+This packages contains python packages that are bundled with yt
+and are developed by 3rd party upstream.
+"""

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/extern/parameterized.py
--- /dev/null
+++ b/yt/extern/parameterized.py
@@ -0,0 +1,226 @@
+import re
+import inspect
+from functools import wraps
+from collections import namedtuple
+
+from nose.tools import nottest
+from unittest import TestCase
+
+from . import six
+
+if six.PY3:
+    def new_instancemethod(f, *args):
+        return f
+else:
+    import new
+    new_instancemethod = new.instancemethod
+
+_param = namedtuple("param", "args kwargs")
+
+class param(_param):
+    """ Represents a single parameter to a test case.
+
+        For example::
+
+            >>> p = param("foo", bar=16)
+            >>> p
+            param("foo", bar=16)
+            >>> p.args
+            ('foo', )
+            >>> p.kwargs
+            {'bar': 16}
+
+        Intended to be used as an argument to ``@parameterized``::
+
+            @parameterized([
+                param("foo", bar=16),
+            ])
+            def test_stuff(foo, bar=16):
+                pass
+        """
+
+    def __new__(cls, *args , **kwargs):
+        return _param.__new__(cls, args, kwargs)
+
+    @classmethod
+    def explicit(cls, args=None, kwargs=None):
+        """ Creates a ``param`` by explicitly specifying ``args`` and
+            ``kwargs``::
+
+                >>> param.explicit([1,2,3])
+                param(*(1, 2, 3))
+                >>> param.explicit(kwargs={"foo": 42})
+                param(*(), **{"foo": "42"})
+            """
+        args = args or ()
+        kwargs = kwargs or {}
+        return cls(*args, **kwargs)
+
+    @classmethod
+    def from_decorator(cls, args):
+        """ Returns an instance of ``param()`` for ``@parameterized`` argument
+            ``args``::
+
+                >>> param.from_decorator((42, ))
+                param(args=(42, ), kwargs={})
+                >>> param.from_decorator("foo")
+                param(args=("foo", ), kwargs={})
+            """
+        if isinstance(args, param):
+            return args
+        if isinstance(args, six.string_types):
+            args = (args, )
+        return cls(*args)
+
+    def __repr__(self):
+        return "param(*%r, **%r)" %self
+
+class parameterized(object):
+    """ Parameterize a test case::
+
+            class TestInt(object):
+                @parameterized([
+                    ("A", 10),
+                    ("F", 15),
+                    param("10", 42, base=42)
+                ])
+                def test_int(self, input, expected, base=16):
+                    actual = int(input, base=base)
+                    assert_equal(actual, expected)
+
+            @parameterized([
+                (2, 3, 5)
+                (3, 5, 8),
+            ])
+            def test_add(a, b, expected):
+                assert_equal(a + b, expected)
+        """
+
+    def __init__(self, input):
+        self.get_input = self.input_as_callable(input)
+
+    def __call__(self, test_func):
+        self.assert_not_in_testcase_subclass()
+
+        @wraps(test_func)
+        def parameterized_helper_method(test_self=None):
+            f = test_func
+            if test_self is not None:
+                # If we are a test method (which we suppose to be true if we
+                # are being passed a "self" argument), we first need to create
+                # an instance method, attach it to the instance of the test
+                # class, then pull it back off to turn it into a bound method.
+                # If we don't do this, Nose gets cranky.
+                f = self.make_bound_method(test_self, test_func)
+            # Note: because nose is so very picky, the more obvious
+            # ``return self.yield_nose_tuples(f)`` won't work here.
+            for nose_tuple in self.yield_nose_tuples(f):
+                yield nose_tuple
+
+        test_func.__name__ = "_helper_for_%s" %(test_func.__name__, )
+        parameterized_helper_method.parameterized_input = input
+        parameterized_helper_method.parameterized_func = test_func
+        return parameterized_helper_method
+
+    def yield_nose_tuples(self, func):
+        for args in self.get_input():
+            p = param.from_decorator(args)
+            # ... then yield that as a tuple. If those steps aren't
+            # followed precicely, Nose gets upset and doesn't run the test
+            # or doesn't run setup methods.
+            yield self.param_as_nose_tuple(p, func)
+
+    def param_as_nose_tuple(self, p, func):
+        nose_func = func
+        nose_args = p.args
+        if p.kwargs:
+            nose_func = wraps(func)(lambda args, kwargs: func(*args, **kwargs))
+            nose_args = (p.args, p.kwargs)
+        return (nose_func, ) + nose_args
+
+    def make_bound_method(self, instance, func):
+        cls = type(instance)
+        im_f = new_instancemethod(func, None, cls)
+        setattr(cls, func.__name__, im_f)
+        return getattr(instance, func.__name__)
+
+    def assert_not_in_testcase_subclass(self):
+        parent_classes = self._terrible_magic_get_defining_classes()
+        if any(issubclass(cls, TestCase) for cls in parent_classes):
+            raise Exception("Warning: '@parameterized' tests won't work "
+                            "inside subclasses of 'TestCase' - use "
+                            "'@parameterized.expand' instead")
+
+    def _terrible_magic_get_defining_classes(self):
+        """ Returns the set of parent classes of the class currently being defined.
+            Will likely only work if called from the ``parameterized`` decorator.
+            This function is entirely @brandon_rhodes's fault, as he suggested
+            the implementation: http://stackoverflow.com/a/8793684/71522
+            """
+        stack = inspect.stack()
+        if len(stack) <= 4:
+            return []
+        frame = stack[4]
+        code_context = frame[4] and frame[4][0].strip()
+        if not (code_context and code_context.startswith("class ")):
+            return []
+        _, parents = code_context.split("(", 1)
+        parents, _ = parents.rsplit(")", 1)
+        return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals)
+
+    @classmethod
+    def input_as_callable(cls, input):
+        if callable(input):
+            return lambda: cls.check_input_values(input())
+        input_values = cls.check_input_values(input)
+        return lambda: input_values
+
+    @classmethod
+    def check_input_values(cls, input_values):
+        if not hasattr(input_values, "__iter__"):
+            raise ValueError("expected iterable input; got %r" %(input, ))
+        return input_values
+
+    @classmethod
+    def expand(cls, input):
+        """ A "brute force" method of parameterizing test cases. Creates new
+            test cases and injects them into the namespace that the wrapped
+            function is being defined in. Useful for parameterizing tests in
+            subclasses of 'UnitTest', where Nose test generators don't work.
+
+            >>> @parameterized.expand([("foo", 1, 2)])
+            ... def test_add1(name, input, expected):
+            ...     actual = add1(input)
+            ...     assert_equal(actual, expected)
+            ...
+            >>> locals()
+            ... 'test_add1_foo_0': <function ...> ...
+            >>>
+            """
+
+        def parameterized_expand_wrapper(f):
+            stack = inspect.stack()
+            frame = stack[1]
+            frame_locals = frame[0].f_locals
+
+            base_name = f.__name__
+            get_input = cls.input_as_callable(input)
+            for num, args in enumerate(get_input()):
+                p = param.from_decorator(args)
+                name_suffix = "_%s" %(num, )
+                if len(p.args) > 0 and isinstance(p.args[0], six.string_types):
+                    name_suffix += "_" + cls.to_safe_name(p.args[0])
+                name = base_name + name_suffix
+                frame_locals[name] = cls.param_as_standalone_func(p, f, name)
+            return nottest(f)
+        return parameterized_expand_wrapper
+
+    @classmethod
+    def param_as_standalone_func(cls, p, func, name):
+        standalone_func = lambda *a: func(*(a + p.args), **p.kwargs)
+        standalone_func.__name__ = name
+        return standalone_func
+
+    @classmethod
+    def to_safe_name(cls, s):
+        return str(re.sub("[^a-zA-Z0-9_]", "", s))

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/extern/six.py
--- /dev/null
+++ b/yt/extern/six.py
@@ -0,0 +1,404 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2013 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin at python.org>"
+__version__ = "1.3.0"
+
+
+# True if we are running on Python 3.
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+    string_types = str,
+    integer_types = int,
+    class_types = type,
+    text_type = str
+    binary_type = bytes
+
+    MAXSIZE = sys.maxsize
+else:
+    string_types = basestring,
+    integer_types = (int, long)
+    class_types = (type, types.ClassType)
+    text_type = unicode
+    binary_type = str
+
+    if sys.platform.startswith("java"):
+        # Jython always uses 32 bits.
+        MAXSIZE = int((1 << 31) - 1)
+    else:
+        # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+        class X(object):
+            def __len__(self):
+                return 1 << 31
+        try:
+            len(X())
+        except OverflowError:
+            # 32-bit
+            MAXSIZE = int((1 << 31) - 1)
+        else:
+            # 64-bit
+            MAXSIZE = int((1 << 63) - 1)
+            del X
+
+
+def _add_doc(func, doc):
+    """Add documentation to a function."""
+    func.__doc__ = doc
+
+
+def _import_module(name):
+    """Import module, returning the module after the last dot."""
+    __import__(name)
+    return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+    def __init__(self, name):
+        self.name = name
+
+    def __get__(self, obj, tp):
+        result = self._resolve()
+        setattr(obj, self.name, result)
+        # This is a bit ugly, but it avoids running this again.
+        delattr(tp, self.name)
+        return result
+
+
+class MovedModule(_LazyDescr):
+
+    def __init__(self, name, old, new=None):
+        super(MovedModule, self).__init__(name)
+        if PY3:
+            if new is None:
+                new = name
+            self.mod = new
+        else:
+            self.mod = old
+
+    def _resolve(self):
+        return _import_module(self.mod)
+
+
+class MovedAttribute(_LazyDescr):
+
+    def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+        super(MovedAttribute, self).__init__(name)
+        if PY3:
+            if new_mod is None:
+                new_mod = name
+            self.mod = new_mod
+            if new_attr is None:
+                if old_attr is None:
+                    new_attr = name
+                else:
+                    new_attr = old_attr
+            self.attr = new_attr
+        else:
+            self.mod = old_mod
+            if old_attr is None:
+                old_attr = name
+            self.attr = old_attr
+
+    def _resolve(self):
+        module = _import_module(self.mod)
+        return getattr(module, self.attr)
+
+
+
+class _MovedItems(types.ModuleType):
+    """Lazy loading of moved objects"""
+
+
+_moved_attributes = [
+    MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+    MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+    MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+    MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+    MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
+    MovedAttribute("reduce", "__builtin__", "functools"),
+    MovedAttribute("StringIO", "StringIO", "io"),
+    MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+
+    MovedModule("builtins", "__builtin__"),
+    MovedModule("configparser", "ConfigParser"),
+    MovedModule("copyreg", "copy_reg"),
+    MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+    MovedModule("http_cookies", "Cookie", "http.cookies"),
+    MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+    MovedModule("html_parser", "HTMLParser", "html.parser"),
+    MovedModule("http_client", "httplib", "http.client"),
+    MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+    MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+    MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+    MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+    MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+    MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+    MovedModule("cPickle", "cPickle", "pickle"),
+    MovedModule("queue", "Queue"),
+    MovedModule("reprlib", "repr"),
+    MovedModule("socketserver", "SocketServer"),
+    MovedModule("tkinter", "Tkinter"),
+    MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+    MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+    MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+    MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+    MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+    MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+    MovedModule("tkinter_colorchooser", "tkColorChooser",
+                "tkinter.colorchooser"),
+    MovedModule("tkinter_commondialog", "tkCommonDialog",
+                "tkinter.commondialog"),
+    MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+    MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+    MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+                "tkinter.simpledialog"),
+    MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+    MovedModule("winreg", "_winreg"),
+]
+for attr in _moved_attributes:
+    setattr(_MovedItems, attr.name, attr)
+del attr
+
+moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
+
+
+def add_move(move):
+    """Add an item to six.moves."""
+    setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+    """Remove item from six.moves."""
+    try:
+        delattr(_MovedItems, name)
+    except AttributeError:
+        try:
+            del moves.__dict__[name]
+        except KeyError:
+            raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+    _meth_func = "__func__"
+    _meth_self = "__self__"
+
+    _func_closure = "__closure__"
+    _func_code = "__code__"
+    _func_defaults = "__defaults__"
+    _func_globals = "__globals__"
+
+    _iterkeys = "keys"
+    _itervalues = "values"
+    _iteritems = "items"
+    _iterlists = "lists"
+else:
+    _meth_func = "im_func"
+    _meth_self = "im_self"
+
+    _func_closure = "func_closure"
+    _func_code = "func_code"
+    _func_defaults = "func_defaults"
+    _func_globals = "func_globals"
+
+    _iterkeys = "iterkeys"
+    _itervalues = "itervalues"
+    _iteritems = "iteritems"
+    _iterlists = "iterlists"
+
+
+try:
+    advance_iterator = next
+except NameError:
+    def advance_iterator(it):
+        return it.next()
+next = advance_iterator
+
+
+try:
+    callable = callable
+except NameError:
+    def callable(obj):
+        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+    def get_unbound_function(unbound):
+        return unbound
+
+    Iterator = object
+else:
+    def get_unbound_function(unbound):
+        return unbound.im_func
+
+    class Iterator(object):
+
+        def next(self):
+            return type(self).__next__(self)
+
+    callable = callable
+_add_doc(get_unbound_function,
+         """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+def iterkeys(d, **kw):
+    """Return an iterator over the keys of a dictionary."""
+    return iter(getattr(d, _iterkeys)(**kw))
+
+def itervalues(d, **kw):
+    """Return an iterator over the values of a dictionary."""
+    return iter(getattr(d, _itervalues)(**kw))
+
+def iteritems(d, **kw):
+    """Return an iterator over the (key, value) pairs of a dictionary."""
+    return iter(getattr(d, _iteritems)(**kw))
+
+def iterlists(d, **kw):
+    """Return an iterator over the (key, [values]) pairs of a dictionary."""
+    return iter(getattr(d, _iterlists)(**kw))
+
+
+if PY3:
+    def b(s):
+        return s.encode("latin-1")
+    def u(s):
+        return s
+    if sys.version_info[1] <= 1:
+        def int2byte(i):
+            return bytes((i,))
+    else:
+        # This is about 2x faster than the implementation above on 3.2+
+        int2byte = operator.methodcaller("to_bytes", 1, "big")
+    import io
+    StringIO = io.StringIO
+    BytesIO = io.BytesIO
+else:
+    def b(s):
+        return s
+    def u(s):
+        return unicode(s, "unicode_escape")
+    int2byte = chr
+    import StringIO
+    StringIO = BytesIO = StringIO.StringIO
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+if PY3:
+    import builtins
+    exec_ = getattr(builtins, "exec")
+
+
+    def reraise(tp, value, tb=None):
+        if value.__traceback__ is not tb:
+            raise value.with_traceback(tb)
+        raise value
+
+
+    print_ = getattr(builtins, "print")
+    del builtins
+
+else:
+    def exec_(_code_, _globs_=None, _locs_=None):
+        """Execute code in a namespace."""
+        if _globs_ is None:
+            frame = sys._getframe(1)
+            _globs_ = frame.f_globals
+            if _locs_ is None:
+                _locs_ = frame.f_locals
+            del frame
+        elif _locs_ is None:
+            _locs_ = _globs_
+        exec("""exec _code_ in _globs_, _locs_""")
+
+
+    exec_("""def reraise(tp, value, tb=None):
+    raise tp, value, tb
+""")
+
+
+    def print_(*args, **kwargs):
+        """The new-style print function."""
+        fp = kwargs.pop("file", sys.stdout)
+        if fp is None:
+            return
+        def write(data):
+            if not isinstance(data, basestring):
+                data = str(data)
+            fp.write(data)
+        want_unicode = False
+        sep = kwargs.pop("sep", None)
+        if sep is not None:
+            if isinstance(sep, unicode):
+                want_unicode = True
+            elif not isinstance(sep, str):
+                raise TypeError("sep must be None or a string")
+        end = kwargs.pop("end", None)
+        if end is not None:
+            if isinstance(end, unicode):
+                want_unicode = True
+            elif not isinstance(end, str):
+                raise TypeError("end must be None or a string")
+        if kwargs:
+            raise TypeError("invalid keyword arguments to print()")
+        if not want_unicode:
+            for arg in args:
+                if isinstance(arg, unicode):
+                    want_unicode = True
+                    break
+        if want_unicode:
+            newline = unicode("\n")
+            space = unicode(" ")
+        else:
+            newline = "\n"
+            space = " "
+        if sep is None:
+            sep = space
+        if end is None:
+            end = newline
+        for i, arg in enumerate(args):
+            if i:
+                write(sep)
+            write(arg)
+        write(end)
+
+_add_doc(reraise, """Reraise an exception.""")
+
+
+def with_metaclass(meta, base=object):
+    """Create a base class with a metaclass."""
+    return meta("NewBase", (base,), {})

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -68,9 +68,9 @@
                 data = data[2::3].reshape(grid_dims,order='F').copy()
         f.close()
         if grid.pf.field_ordering == 1:
-            return data.T
+            return data.T.astype("float64")
         else:
-            return data
+            return data.astype("float64")
 
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(None), slice(None), slice(None)]

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -224,7 +224,10 @@
             else:
                 self.units[field_name] = 1.0
             if 'field_units' in current_field.attrs:
-                current_fields_unit = just_one(current_field.attrs['field_units'])
+                if type(current_field.attrs['field_units']) == str:
+                    current_fields_unit = current_field.attrs['field_units']
+                else:
+                    current_fields_unit = just_one(current_field.attrs['field_units'])
             else:
                 current_fields_unit = ""
             self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/frontends/gdf/fields.py
--- a/yt/frontends/gdf/fields.py
+++ b/yt/frontends/gdf/fields.py
@@ -84,8 +84,11 @@
           units=r"\rm{cm}/\rm{s}")
 
 for f,v in log_translation_dict.items():
-    add_field(f, TranslationFunc(v), take_log=True)
+    add_field(f, TranslationFunc(v), take_log=True,
+              units=KnownGDFFields[v].get_units(),
+              projected_units=KnownGDFFields[v].get_projected_units())
 
 for f,v in translation_dict.items():
-    add_field(f, TranslationFunc(v), take_log=False)
-
+    add_field(f, TranslationFunc(v), take_log=False,
+              units=KnownGDFFields[v].get_units(),
+              projected_units=KnownGDFFields[v].get_projected_units())

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -144,7 +144,8 @@
     get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
     callback_registry, write_bitmap, write_image, annotate_image, \
     apply_colormap, scale_image, write_projection, write_fits, \
-    SlicePlot, OffAxisSlicePlot, ProjectionPlot, OffAxisProjectionPlot
+    SlicePlot, OffAxisSlicePlot, ProjectionPlot, OffAxisProjectionPlot, \
+    show_colormaps
 
 from yt.visualization.volume_rendering.api import \
     ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/setup.py
--- a/yt/setup.py
+++ b/yt/setup.py
@@ -9,6 +9,7 @@
     config = Configuration('yt', parent_package, top_path)
     config.add_subpackage('analysis_modules')
     config.add_subpackage('data_objects')
+    config.add_subpackage('extern')
     config.add_subpackage('frontends')
     config.add_subpackage('geometry')
     config.add_subpackage('gui')

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -33,6 +33,7 @@
 import cPickle
 import shelve
 import zlib
+import tempfile
 
 from matplotlib.testing.compare import compare_images
 from nose.plugins import Plugin
@@ -604,9 +605,11 @@
                                 self.plot_axis, self.plot_kwargs)
         attr = getattr(plot, self.attr_name)
         attr(*self.attr_args[0], **self.attr_args[1])
-        fn = plot.save()[0]
-        image = mpimg.imread(fn)
-        os.remove(fn)
+        tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+        os.close(tmpfd)
+        plot.save(name=tmpname)
+        image = mpimg.imread(tmpname)
+        os.remove(tmpname)
         return [zlib.compress(image.dumps())]
 
     def compare(self, new_result, old_result):

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/utilities/grid_data_format/tests/test_writer.py
--- a/yt/utilities/grid_data_format/tests/test_writer.py
+++ b/yt/utilities/grid_data_format/tests/test_writer.py
@@ -50,17 +50,18 @@
     tmpdir = tempfile.mkdtemp()
     tmpfile = os.path.join(tmpdir, 'test_gdf.h5')
 
-    test_pf = fake_random_pf(64)
-    write_to_gdf(test_pf, tmpfile, data_author=TEST_AUTHOR,
-                 data_comment=TEST_COMMENT)
-    del test_pf
+    try:
+        test_pf = fake_random_pf(64)
+        write_to_gdf(test_pf, tmpfile, data_author=TEST_AUTHOR,
+                     data_comment=TEST_COMMENT)
+        del test_pf
+        assert isinstance(load(tmpfile), GDFStaticOutput)
 
-    assert isinstance(load(tmpfile), GDFStaticOutput)
+        h5f = h5.File(tmpfile, 'r')
+        gdf = h5f['gridded_data_format'].attrs
+        assert_equal(gdf['data_author'], TEST_AUTHOR)
+        assert_equal(gdf['data_comment'], TEST_COMMENT)
+        h5f.close()
 
-    h5f = h5.File(tmpfile, 'r')
-    gdf = h5f['gridded_data_format'].attrs
-    assert_equal(gdf['data_author'], TEST_AUTHOR)
-    assert_equal(gdf['data_comment'], TEST_COMMENT)
-    h5f.close()
-
-    shutil.rmtree(tmpdir)
+    finally:
+        shutil.rmtree(tmpdir)

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -20,36 +20,37 @@
     # Create a temporary directory
     tmpdir = tempfile.mkdtemp()
     curdir = os.getcwd()
-    os.chdir(tmpdir)
+    exit_code = 1
 
-    # Get compiler invocation
-    compiler = os.getenv('CC', 'cc')
+    try:
+        os.chdir(tmpdir)
 
-    # Attempt to compile a test script.
-    # See http://openmp.org/wp/openmp-compilers/
-    filename = r'test.c'
-    file = open(filename,'w', 0)
-    file.write(
-        "#include <omp.h>\n"
-        "#include <stdio.h>\n"
-        "int main() {\n"
-        "#pragma omp parallel\n"
-        "printf(\"Hello from thread %d, nthreads %d\\n\", omp_get_thread_num(), omp_get_num_threads());\n"
-        "}"
-        )
-    with open(os.devnull, 'w') as fnull:
-        exit_code = subprocess.call([compiler, '-fopenmp', filename],
-                                    stdout=fnull, stderr=fnull)
+        # Get compiler invocation
+        compiler = os.getenv('CC', 'cc')
 
-    # Clean up
-    file.close()
-    os.chdir(curdir)
-    shutil.rmtree(tmpdir)
+        # Attempt to compile a test script.
+        # See http://openmp.org/wp/openmp-compilers/
+        filename = r'test.c'
+        file = open(filename,'w', 0)
+        file.write(
+            "#include <omp.h>\n"
+            "#include <stdio.h>\n"
+            "int main() {\n"
+            "#pragma omp parallel\n"
+            "printf(\"Hello from thread %d, nthreads %d\\n\", omp_get_thread_num(), omp_get_num_threads());\n"
+            "}"
+            )
+        with open(os.devnull, 'w') as fnull:
+            exit_code = subprocess.call([compiler, '-fopenmp', filename],
+                                        stdout=fnull, stderr=fnull)
 
-    if exit_code == 0:
-        return True
-    else:
-        return False
+        # Clean up
+        file.close()
+    finally:
+        os.chdir(curdir)
+        shutil.rmtree(tmpdir)
+
+    return exit_code == 0
 
 def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/visualization/_colormap_data.py
--- a/yt/visualization/_colormap_data.py
+++ b/yt/visualization/_colormap_data.py
@@ -7798,3 +7798,11 @@
 color_map_luts['Rainbow18'] = color_map_luts['idl38']
 color_map_luts['Rainbow + white'] = color_map_luts['idl39']
 color_map_luts['Rainbow + black'] = color_map_luts['idl40']
+
+# Create a reversed LUT for each of the above defined LUTs
+# and append a "_r" (for reversal. consistent with MPL convention).
+# So for example, the reversal of "Waves" is "Waves_r"
+temp = {}
+for k,v in color_map_luts.iteritems():
+    temp[k+"_r"] = (v[0][::-1], v[1][::-1], v[2][::-1], v[3][::-1])
+color_map_luts.update(temp)

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -29,7 +29,8 @@
 """
 
 from color_maps import \
-    add_cmap
+    add_cmap, \
+    show_colormaps
 
 from plot_collection import \
     PlotCollection, \

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -145,3 +145,56 @@
     b = cmap._lut[:-3, 2]
     a = np.ones(b.shape)
     return [r, g, b, a]
+
+def show_colormaps(subset = "all", filename=None):
+    """
+    Displays the colormaps available to yt.  Note, most functions can use
+    both the matplotlib and the native yt colormaps; however, there are 
+    some special functions existing within image_writer.py (e.g. write_image()
+    write_fits(), write_bitmap(), etc.), which cannot access the matplotlib
+    colormaps.
+
+    In addition to the colormaps listed, one can access the reverse of each 
+    colormap by appending a "_r" to any map.
+    
+    Parameters
+    ----------
+
+    subset : string, opt
+
+        valid values : "all", "yt_native"
+        default : "all"
+
+        As mentioned above, a few functions can only access yt_native 
+        colormaps.  To display only the yt_native colormaps, set this
+        to "yt_native".  
+
+    filename : string, opt
+
+        default: None
+
+        If filename is set, then it will save the colormaps to an output
+        file.  If it is not set, it will "show" the result interactively.
+    """
+    import pylab as pl
+
+    a=np.outer(np.arange(0,1,0.01), np.ones(10))
+    if (subset == "all"):
+        maps = [ m for m in pl.cm.datad if (not m.startswith("idl")) & (not m.endswith("_r"))]
+    if (subset == "yt_native"):
+        maps = [ m for m in _cm.color_map_luts if (not m.startswith("idl")) & (not m.endswith("_r"))]
+    maps = list(set(maps))
+    maps.sort()
+    # scale the image size by the number of cmaps
+    pl.figure(figsize=(2.*len(maps)/10.,6))
+    pl.subplots_adjust(top=0.7,bottom=0.05,left=0.01,right=0.99)
+    l = len(maps)+1
+    for i,m in enumerate(maps):
+        pl.subplot(1,l,i+1)
+        pl.axis("off")
+        pl.imshow(a, aspect='auto',cmap=pl.get_cmap(m),origin="lower")      
+        pl.title(m,rotation=90, fontsize=10, verticalalignment='bottom')
+    if filename is not None:
+        pl.savefig(filename, dpi=100, facecolor='gray') 
+    else:  
+        pl.show()

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -45,6 +45,7 @@
     sec_per_Gyr, sec_per_Myr, \
     sec_per_kyr, sec_per_year, \
     sec_per_day, sec_per_hr
+from yt.visualization.image_writer import apply_colormap
 
 import _MPL
 
@@ -176,7 +177,8 @@
 
 class QuiverCallback(PlotCallback):
     """
-    annotate_quiver(field_x, field_y, factor, scale=None, scale_units=None, normalize=False):
+    annotate_quiver(field_x, field_y, factor=16, scale=None, scale_units=None, 
+                    normalize=False, bv_x=0, bv_y=0):
 
     Adds a 'quiver' plot to any plot, using the *field_x* and *field_y*
     from the associated data, skipping every *factor* datapoints
@@ -230,8 +232,8 @@
 
 class ContourCallback(PlotCallback):
     """
-    annotate_contour(self, field, ncont=5, factor=4, take_log=None, clim=None,
-                     plot_args = None):
+    annotate_contour(field, ncont=5, factor=4, take_log=None, clim=None,
+                     plot_args=None, label=False, label_args=None):
 
     Add contours in *field* to the plot.  *ncont* governs the number of
     contours generated, *factor* governs the number of points used in the
@@ -338,18 +340,21 @@
 
 class GridBoundaryCallback(PlotCallback):
     """
-    annotate_grids(alpha=1.0, min_pix=1, draw_ids=False, periodic=True)
+    annotate_grids(alpha=0.7, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True, 
+                 min_level=None, max_level=None, cmap='B-W LINEAR_r'):
 
-    Adds grid boundaries to a plot, optionally with *alpha*-blending.
-    Cuttoff for display is at *min_pix* wide.
-    *draw_ids* puts the grid id in the corner of the grid.  (Not so great in projections...)
-    Grids must be wider than *min_pix_ids* otherwise the ID will not be drawn.  If *min_level* 
-    is specified, only draw grids at or above min_level.  If *max_level* is specified, only 
-    draw grids at or below max_level.
+    Draws grids on an existing PlotWindow object.
+    Adds grid boundaries to a plot, optionally with alpha-blending. By default, 
+    colors different levels of grids with different colors going from white to
+    black, but you can change to any arbitrary colormap with cmap keyword 
+    (or all black cells for all levels with cmap=None).  Cuttoff for display is at 
+    min_pix wide. draw_ids puts the grid id in the corner of the grid. 
+    (Not so great in projections...).  One can set min and maximum level of
+    grids to display.
     """
     _type_name = "grids"
-    def __init__(self, alpha=1.0, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True, 
-                 min_level=None, max_level=None):
+    def __init__(self, alpha=0.7, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True, 
+                 min_level=None, max_level=None, cmap='B-W LINEAR_r'):
         PlotCallback.__init__(self)
         self.alpha = alpha
         self.min_pix = min_pix
@@ -358,6 +363,7 @@
         self.periodic = periodic
         self.min_level = min_level
         self.max_level = max_level
+        self.cmap = cmap
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -375,15 +381,16 @@
             pxs, pys = np.mgrid[-1:1:3j,-1:1:3j]
         else:
             pxs, pys = np.mgrid[0:0:1j,0:0:1j]
-        GLE = plot.data.pf.h.grid_left_edge
-        GRE = plot.data.pf.h.grid_right_edge
-        grid_levels = plot.data.pf.h.grid_levels[:,0]
+        GLE = plot.data.grid_left_edge
+        GRE = plot.data.grid_right_edge
+        levels = plot.data.grid_levels[:,0]
         min_level = self.min_level
-        max_level = self.min_level
+        max_level = self.max_level
+        if max_level is None:
+            max_level = plot.data.pf.h.max_level
         if min_level is None:
             min_level = 0
-        if max_level is None:
-            max_level = plot.data.pf.h.max_level
+
         for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
             pxo = px_off * dom[px_index]
             pyo = py_off * dom[py_index]
@@ -393,19 +400,28 @@
             right_edge_y = (GRE[:,py_index]+pyo-y0)*dy + yy0
             visible =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix ) & \
                        ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix ) & \
-                       ( grid_levels >= min_level) & \
-                       ( grid_levels <= max_level)
+                       ( levels >= min_level) & \
+                       ( levels <= max_level)
+
+            if self.cmap is not None: 
+                edgecolors = apply_colormap(levels[(levels <= max_level) & (levels >= min_level)]*1.0,
+                                  color_bounds=[0,plot.data.pf.h.max_level],
+                                  cmap_name=self.cmap)[0,:,:]*1.0/255.
+                edgecolors[:,3] = self.alpha
+            else:
+                edgecolors = (0.0,0.0,0.0,self.alpha)
+
             if visible.nonzero()[0].size == 0: continue
             verts = np.array(
                 [(left_edge_x, left_edge_x, right_edge_x, right_edge_x),
                  (left_edge_y, right_edge_y, right_edge_y, left_edge_y)])
             verts=verts.transpose()[visible,:,:]
-            edgecolors = (0.0,0.0,0.0,self.alpha)
             grid_collection = matplotlib.collections.PolyCollection(
                 verts, facecolors="none",
                 edgecolors=edgecolors)
             plot._axes.hold(True)
             plot._axes.add_collection(grid_collection)
+
             if self.draw_ids:
                 visible_ids =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix_ids ) & \
                                ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix_ids )

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -148,15 +148,19 @@
         else:
             norm = mpl.matplotlib.colors.Normalize()
         if use_mesh:
-            pcm = axes.pcolormesh(x_bins, y_bins, self.image, norm=norm,
+            mappable = axes.pcolormesh(
+                                  x_bins, y_bins, self.image, norm=norm,
                                   shading='flat', cmap = self.cbar.cmap,
                                   rasterized=True)
             if self.x_spec.scale == 'log': axes.set_xscale("log")
             if self.y_spec.scale == 'log': axes.set_yscale("log")
         else:
-            axes.imshow(self.image, origin='lower', interpolation='nearest',
+            mappable = axes.imshow(
+                        self.image, origin='lower', interpolation='nearest',
                         cmap = self.cbar.cmap, extent = [xmi,xma,ymi,yma],
                         norm = norm)
+        cbar = figure.colorbar(mappable)
+        cbar.set_label(self.cbar.title)
         if self.x_spec.title is not None:
             axes.set_xlabel(self.x_spec.title)
         if self.y_spec.title is not None:

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -22,9 +22,12 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
+import itertools
 import os
 import tempfile
 import shutil
+import unittest
+from yt.extern.parameterized import parameterized, param
 from yt.testing import \
     fake_random_pf, assert_equal, assert_rel_equal
 from yt.utilities.answer_testing.framework import \
@@ -65,132 +68,163 @@
 
     return image_type == os.path.splitext(fname)[1]
 
-attr_args ={ "pan"             : [( ((0.1, 0.1),), {} )],
-             "pan_rel"         : [( ((0.1, 0.1),), {} )],
-             "set_axes_unit"   : [( ("kpc",), {} ),
-                                  ( ("Mpc",), {} ),
-                                  ( (("kpc", "kpc"),), {} ),
-                                  ( (("kpc", "Mpc"),), {} )],
-             "set_buff_size"   : [( (1600,), {} ),
-                                  ( ((600, 800),), {} )],
-             "set_center"      : [( ((0.4, 0.3),), {} )],
-             "set_cmap"        : [( ('Density', 'RdBu'), {} ),
-                                  ( ('Density', 'kamae'), {} )],
-             "set_font"        : [( ({'family':'sans-serif', 'style':'italic',
-                                      'weight':'bold', 'size':24},), {} )],
-             "set_log"         : [( ('Density', False), {} )],
-             "set_window_size" : [( (7.0,), {} )],
-             "set_zlim" : [( ('Density', 1e-25, 1e-23), {} ),
-                           ( ('Density', 1e-25, None), {'dynamic_range' : 4} )],
-             "zoom" : [( (10,), {} )] }
 
-m7 = "DD0010/moving7_0010"
-wt = "WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030"
- at requires_pf(m7)
- at requires_pf(wt)
+TEST_FLNMS = [None, 'test.png', 'test.eps',
+              'test.ps', 'test.pdf']
+M7 = "DD0010/moving7_0010"
+WT = "WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030"
+
+ATTR_ARGS = {"pan": [(((0.1, 0.1), ), {})],
+             "pan_rel": [(((0.1, 0.1), ), {})],
+             "set_axes_unit": [(("kpc", ), {}),
+                               (("Mpc", ), {}),
+                               ((("kpc", "kpc"),), {}),
+                               ((("kpc", "Mpc"),), {})],
+             "set_buff_size": [((1600, ), {}),
+                               (((600, 800), ), {})],
+             "set_center": [(((0.4, 0.3), ), {})],
+             "set_cmap": [(('Density', 'RdBu'), {}),
+                          (('Density', 'kamae'), {})],
+             "set_font": [(({'family': 'sans-serif', 'style': 'italic',
+                             'weight': 'bold', 'size': 24}, ), {})],
+             "set_log": [(('Density', False), {})],
+             "set_window_size": [((7.0, ), {})],
+             "set_zlim": [(('Density', 1e-25, 1e-23), {}),
+                          (('Density', 1e-25, None), {'dynamic_range': 4})],
+             "zoom": [((10, ), {})]}
+
+
+ at requires_pf(M7)
 def test_attributes():
     """Test plot member functions that aren't callbacks"""
     plot_field = 'Density'
     decimals = 3
 
-    pf = data_dir_load(m7)
+    pf = data_dir_load(M7)
     for ax in 'xyz':
-        for attr_name in attr_args.keys():
-            for args in attr_args[attr_name]:
+        for attr_name in ATTR_ARGS.keys():
+            for args in ATTR_ARGS[attr_name]:
                 yield PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
                                               args, decimals)
-    pf = data_dir_load(wt)
+
+
+ at requires_pf(WT)
+def test_attributes_wt():
+    plot_field = 'Density'
+    decimals = 3
+
+    pf = data_dir_load(WT)
     ax = 'z'
-    for attr_name in attr_args.keys():
-        for args in attr_args[attr_name]:
+    for attr_name in ATTR_ARGS.keys():
+        for args in ATTR_ARGS[attr_name]:
             yield PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
                                           args, decimals)
 
-def test_setwidth():
-    pf = fake_random_pf(64)
 
-    slc = SlicePlot(pf, 0, 'Density')
+class TestSetWidth(unittest.TestCase):
 
-    yield assert_equal, [slc.xlim, slc.ylim, slc.width], \
-        [(0.0, 1.0), (0.0, 1.0), (1.0, 1.0)]
+    pf = None
 
-    slc.set_width((0.5,0.8))
+    def setUp(self):
+        if self.pf is None:
+            self.pf = fake_random_pf(64)
+            self.slc = SlicePlot(self.pf, 0, 'Density')
 
-    yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
-        [(0.25, 0.75), (0.1, 0.9), (0.5, 0.8)], 15
+    def _assert_15kpc(self):
+        assert_rel_equal([self.slc.xlim, self.slc.ylim, self.slc.width],
+                         [(-7.5 / self.pf['kpc'], 7.5 / self.pf['kpc']),
+                          (-7.5 / self.pf['kpc'], 7.5 / self.pf['kpc']),
+                          (15.0 / self.pf['kpc'], 15. / self.pf['kpc'])], 15)
 
-    slc.set_width(15,'kpc')
+    def _assert_15_10kpc(self):
+        assert_rel_equal([self.slc.xlim, self.slc.ylim, self.slc.width],
+                         [(-7.5 / self.pf['kpc'], 7.5 / self.pf['kpc']),
+                          (-5.0 / self.pf['kpc'], 5.0 / self.pf['kpc']),
+                          (15.0 / self.pf['kpc'], 10. / self.pf['kpc'])], 15)
 
-    yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
-        [(-7.5/pf['kpc'], 7.5/pf['kpc']),
-         (-7.5/pf['kpc'], 7.5/pf['kpc']),
-         (15/pf['kpc'], 15/pf['kpc'])], 15
+    def test_set_width_one(self):
+        assert_equal([self.slc.xlim, self.slc.ylim, self.slc.width],
+                     [(0.0, 1.0), (0.0, 1.0), (1.0, 1.0)])
 
-    slc.set_width((15,'kpc'))
+    def test_set_width_nonequal(self):
+        self.slc.set_width((0.5, 0.8))
+        assert_rel_equal([self.slc.xlim, self.slc.ylim, self.slc.width],
+                         [(0.25, 0.75), (0.1, 0.9), (0.5, 0.8)], 15)
 
-    yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
-        [(-7.5/pf['kpc'], 7.5/pf['kpc']),
-         (-7.5/pf['kpc'], 7.5/pf['kpc']),
-         (15/pf['kpc'], 15/pf['kpc'])], 15
+    def test_twoargs_eq(self):
+        self.slc.set_width(15, 'kpc')
+        self._assert_15kpc()
 
-    slc.set_width(((15,'kpc'),(10,'kpc')))
+    def test_tuple_eq(self):
+        self.slc.set_width((15, 'kpc'))
+        self._assert_15kpc()
 
-    yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
-        [(-7.5/pf['kpc'], 7.5/pf['kpc']),
-         (-5/pf['kpc'], 5/pf['kpc']),
-         (15/pf['kpc'], 10/pf['kpc'])], 15
+    def test_tuple_of_tuples_neq(self):
+        self.slc.set_width(((15, 'kpc'), (10, 'kpc')))
+        self._assert_15_10kpc()
 
-    slc.set_width(((15,'kpc'),(10000,'pc')))
+    def test_tuple_of_tuples_neq2(self):
+        self.slc.set_width(((15, 'kpc'), (10000, 'pc')))
+        self._assert_15_10kpc()
 
-    yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
-        [(-7.5/pf['kpc'], 7.5/pf['kpc']),
-         (-5/pf['kpc'], 5/pf['kpc']),
-         (15/pf['kpc'], 10/pf['kpc'])], 15
+    def test_pair_of_tuples_neq(self):
+        self.slc.set_width((15, 'kpc'), (10000, 'pc'))
+        self._assert_15_10kpc()
 
-    slc.set_width((15,'kpc'),(10000,'pc'))
 
-    yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
-        [(-7.5/pf['kpc'], 7.5/pf['kpc']),
-         (-5/pf['kpc'], 5/pf['kpc']),
-         (15/pf['kpc'], 10/pf['kpc'])], 15
+class TestPlotWindowSave(unittest.TestCase):
 
-def test_save():
-    """Test plot window creation and saving to disk."""
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
+    @classmethod
+    def setUpClass(cls):
+        test_pf = fake_random_pf(64)
+        normal = [1, 1, 1]
+        ds_region = test_pf.h.region([0.5] * 3, [0.4] * 3, [0.6] * 3)
+        projections = []
+        projections_ds = []
+        for dim in range(3):
+            projections.append(ProjectionPlot(test_pf, dim, 'Density'))
+            projections_ds.append(ProjectionPlot(test_pf, dim, 'Density',
+                                                 data_source=ds_region))
 
-    normal = [1, 1, 1]
+        cls.slices = [SlicePlot(test_pf, dim, 'Density') for dim in range(3)]
+        cls.projections = projections
+        cls.projections_ds = projections_ds
+        cls.offaxis_slice = OffAxisSlicePlot(test_pf, normal, 'Density')
+        cls.offaxis_proj = OffAxisProjectionPlot(test_pf, normal, 'Density')
 
-    test_pf = fake_random_pf(64)
-    test_flnms = [None, 'test.png', 'test.eps',
-                  'test.ps', 'test.pdf']
+    def setUp(self):
+        self.tmpdir = tempfile.mkdtemp()
+        self.curdir = os.getcwd()
+        os.chdir(self.tmpdir)
 
-    ds_region = test_pf.h.region([0.5]*3,[0.4]*3,[0.6]*3)
+    def tearDown(self):
+        os.chdir(self.curdir)
+        shutil.rmtree(self.tmpdir)
 
-    for dim in [0, 1, 2]:
-        obj = SlicePlot(test_pf, dim, 'Density')
-        for fname in test_flnms:
-            yield assert_equal, assert_fname(obj.save(fname)[0]), True
+    @parameterized.expand(
+        param.explicit(item)
+        for item in itertools.product(range(3), TEST_FLNMS))
+    def test_slice_plot(self, dim, fname):
+        assert assert_fname(self.slices[dim].save(fname)[0])
 
-    for dim in [0, 1, 2]:
-        obj = ProjectionPlot(test_pf, dim, 'Density')
-        for fname in test_flnms:
-            yield assert_equal, assert_fname(obj.save(fname)[0]), True
-        # Test ProjectionPlot's data_source keyword
-        obj = ProjectionPlot(test_pf, dim, 'Density',
-                             data_source=ds_region)
-        obj.save()
+    @parameterized.expand(
+        param.explicit(item)
+        for item in itertools.product(range(3), TEST_FLNMS))
+    def test_projection_plot(self, dim, fname):
+        assert assert_fname(self.projections[dim].save(fname)[0])
 
-    obj = OffAxisSlicePlot(test_pf, normal, 'Density')
-    for fname in test_flnms:
-        yield assert_equal, assert_fname(obj.save(fname)[0]), True
+    @parameterized.expand([(0, ), (1, ), (2, )])
+    def test_projection_plot_ds(self, dim):
+        self.projections_ds[dim].save()
 
-    obj = OffAxisProjectionPlot(test_pf, normal, 'Density')
-    for fname in test_flnms:
-        yield assert_equal, assert_fname(obj.save(fname)[0]), True
+    @parameterized.expand(
+        param.explicit((fname, ))
+        for fname in TEST_FLNMS)
+    def test_offaxis_slice_plot(self, fname):
+        assert assert_fname(self.offaxis_slice.save(fname)[0])
 
-    os.chdir(curdir)
-    # clean up
-    shutil.rmtree(tmpdir)
+    @parameterized.expand(
+        param.explicit((fname, ))
+        for fname in TEST_FLNMS)
+    def test_offaxis_projection_plot(self, fname):
+        assert assert_fname(self.offaxis_proj.save(fname)[0])

diff -r 2c8e6279eb37d4966d28f951e3831e1aefea2baa -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -237,7 +237,7 @@
                    max_level=None):
         r"""Draws Grids on an existing volume rendering.
 
-        By mapping grid level to a color, drawes edges of grids on 
+        By mapping grid level to a color, draws edges of grids on 
         a volume rendering using the camera orientation.
 
         Parameters


https://bitbucket.org/yt_analysis/yt/commits/36d4de9d6782/
Changeset:   36d4de9d6782
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-27 22:27:52
Summary:     Minor optimizations for ARTIO.
Affected #:  2 files

diff -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 -r 36d4de9d67827150246ecb1f5a52e583457988f7 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -975,12 +975,16 @@
     cdef artio_fileset_handle *handle
     cdef np.uint64_t sfc_start
     cdef np.uint64_t sfc_end
+    cdef public object _last_mask
+    cdef public object _last_selector_id
 
     def __init__(self, domain_dimensions, # cells
                  domain_left_edge,
                  domain_right_edge,
                  artio_fileset artio_handle,
                  sfc_start, sfc_end):
+        self._last_selector_id = None
+        self._last_mask = None
         self.artio_handle = artio_handle
         self.handle = artio_handle.handle
         cdef int i
@@ -1086,6 +1090,9 @@
         res = np.zeros(num_octs, dtype="int64")
         return res
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def selector_fill(self, SelectorObject selector,
                       np.ndarray source,
                       np.ndarray dest = None,
@@ -1130,15 +1137,21 @@
             return dest
         return filled
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def mask(self, SelectorObject selector, np.int64_t num_octs = -1):
         cdef int i, status
         cdef double dpos[3]
         cdef np.float64_t pos[3]
+        cdef np.int64_t sfc
+        if self._last_selector_id == hash(selector):
+            return self._last_mask
         if num_octs == -1:
             # We need to count, but this process will only occur one time,
             # since num_octs will later be cached.
             num_octs = self.sfc_end - self.sfc_start + 1
-        assert(num_octs == (self.sfc_end - self.sfc_start + 1))
+        #assert(num_octs == (self.sfc_end - self.sfc_start + 1))
         cdef np.ndarray[np.uint8_t, ndim=1] mask
         cdef int num_oct_levels
         cdef int max_level = self.artio_handle.max_level
@@ -1165,7 +1178,9 @@
             mask[sfc - self.sfc_start] = 1
         artio_grid_clear_sfc_cache(self.handle)
         free(num_octs_per_level)
-        return mask.astype("bool")
+        self._last_mask = mask.astype("bool")
+        self._last_selector_id = hash(selector)
+        return self._last_mask
 
     def fill_sfc_particles(self, fields):
         rv = read_sfc_particles(self.artio_handle,

diff -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 -r 36d4de9d67827150246ecb1f5a52e583457988f7 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -375,7 +375,7 @@
 
     def _chunk_all(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield YTDataChunk(dobj, "all", oobjs, None)
+        yield YTDataChunk(dobj, "all", oobjs, None, cache = True)
 
     def _chunk_spatial(self, dobj, ngz):
         if ngz > 0:
@@ -387,7 +387,7 @@
                 g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
             else:
                 g = og
-            yield YTDataChunk(dobj, "spatial", [g], None)
+            yield YTDataChunk(dobj, "spatial", [g], None, cache = True)
 
     def _chunk_io(self, dobj, cache = True):
         # _current_chunk is made from identify_base_chunk


https://bitbucket.org/yt_analysis/yt/commits/7830ab86ae14/
Changeset:   7830ab86ae14
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-23 02:15:23
Summary:     Fixes for Oct dds and making a few things more clear.
Affected #:  4 files

diff -r e36b3ff3f5d59add3baad4cae87ea499ca57a3b8 -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -260,7 +260,7 @@
             # from the oct width, thus making it already the cell width.
             # But, for some cases where the oref != 1, this needs to be
             # changed.
-            oinfo.dds[i] = dds[i] / self.oref # Cell width
+            oinfo.dds[i] = dds[i] / (1 << (self.oref-1)) # Cell width
             oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
             oinfo.ipos[i] = ipos[i]
         oinfo.level = level

diff -r e36b3ff3f5d59add3baad4cae87ea499ca57a3b8 -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -72,11 +72,9 @@
     return (((i*2)+j)*2+k)
 
 cdef inline int oind(OctVisitorData *data):
-    return (((data.ind[0]*(1<<data.oref))
-             +data.ind[1])*(1<<data.oref)
-             +data.ind[2])
+    cdef int d = (1 << data.oref)
+    return (((data.ind[0]*d)+data.ind[1])*d+data.ind[2])
 
 cdef inline int rind(OctVisitorData *data):
-    return (((data.ind[2]*(1<<data.oref))
-             +data.ind[1])*(1<<data.oref)
-             +data.ind[0])
+    cdef int d = (1 << data.oref)
+    return (((data.ind[2]*d)+data.ind[1])*d+data.ind[0])

diff -r e36b3ff3f5d59add3baad4cae87ea499ca57a3b8 -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -65,7 +65,8 @@
             tarr = fields[i]
             field_pointers[i] = <np.float64_t *> tarr.data
         cdef int dims[3]
-        dims[0] = dims[1] = dims[2] = 2
+        dims[0] = dims[1] = dims[2] = (1 << octree.oref)
+        cdef int nz = dims[0] * dims[1] * dims[2]
         cdef OctInfo oi
         cdef np.int64_t offset, moff
         cdef Oct *oct
@@ -97,7 +98,7 @@
             if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
                 continue
             # Note that this has to be our local index, not our in-file index.
-            offset = dom_ind[oct.domain_ind - moff] * 8
+            offset = dom_ind[oct.domain_ind - moff] * nz
             if offset < 0: continue
             # Check that we found the oct ...
             self.process(dims, oi.left_edge, oi.dds,

diff -r e36b3ff3f5d59add3baad4cae87ea499ca57a3b8 -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -177,8 +177,6 @@
                 continue
             offset = dom_ind[oct.domain_ind - moff] * nz
             neighbors = octree.neighbors(&oi, &nneighbors)
-            for j in range(3):
-                dds[j] = oi.dds[j] / octree.oref
             # Now we have all our neighbors.  And, we should be set for what
             # else we need to do.
             if nneighbors > nsize:
@@ -193,7 +191,7 @@
                     break
             # This is allocated by the neighbors function, so we deallocate it.
             free(neighbors)
-            self.neighbor_process(dims, oi.left_edge, dds,
+            self.neighbor_process(dims, oi.left_edge, oi.dds,
                          ppos, field_pointers, nneighbors, nind, doffs,
                          pinds, pcounts, offset)
         if nind != NULL:


https://bitbucket.org/yt_analysis/yt/commits/68bb0d0b94e4/
Changeset:   68bb0d0b94e4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-28 17:16:07
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #84)

This enables find_max for RAMSES.
Affected #:  1 file

diff -r 3e72bfc43c5eaecdd5d496c2228ca6cc85da5434 -r 68bb0d0b94e4def4eadba5903e1184264851ca1c yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -215,6 +215,7 @@
                                 self.amr_header['nboundary']*l]
             return ng
         min_level = self.pf.min_level
+        max_level = min_level
         nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
         for level in range(self.amr_header['nlevelmax']):
             # Easier if do this 1-indexed
@@ -248,6 +249,8 @@
                     assert(pos.shape[0] == ng)
                     n = self.oct_handler.add(cpu + 1, level - min_level, pos)
                     assert(n == ng)
+                    if n > 0: max_level = max(level - min_level, max_level)
+        self.max_level = max_level
         self.oct_handler.finalize()
 
     def included(self, selector):
@@ -297,7 +300,7 @@
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-        self.max_level = pf.max_level
+        self.max_level = None
 
         self.float_type = np.float64
         super(RAMSESGeometryHandler, self).__init__(pf, data_style)
@@ -308,6 +311,7 @@
                         for i in range(self.parameter_file['ncpu'])]
         total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
                          for dom in self.domains)
+        self.max_level = max(dom.max_level for dom in self.domains)
         self.num_grids = total_octs
 
     def _detect_fields(self):


https://bitbucket.org/yt_analysis/yt/commits/34ebb7b6fabd/
Changeset:   34ebb7b6fabd
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-28 20:26:33
Summary:     Merging from tip of yt-3.0
Affected #:  26 files

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -832,8 +832,8 @@
 	    echo "Building BLAS"
 	    cd BLAS
 	    gfortran -O2 -fPIC -fno-second-underscore -c *.f
-	    ar r libfblas.a *.o &>> ${LOG_FILE}
-	    ranlib libfblas.a 1>> ${LOG_FILE}
+	    ( ar r libfblas.a *.o 2>&1 ) 1>> ${LOG_FILE}
+	    ( ranlib libfblas.a 2>&1 ) 1>> ${LOG_FILE}
 	    rm -rf *.o
 	    touch done
 	    cd ..
@@ -844,7 +844,7 @@
 	    echo "Building LAPACK"
 	    cd $LAPACK/
 	    cp INSTALL/make.inc.gfortran make.inc
-	    make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
+	    ( make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 2>&1 ) 1>> ${LOG_FILE} || do_exit
 	    touch done
 	    cd ..
 	fi
@@ -943,10 +943,10 @@
 touch done
 cd $MY_PWD
 
-if !(${DEST_DIR}/bin/python2.7 -c "import readline" >> ${LOG_FILE})
+if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
 then
     echo "Installing pure-python readline"
-    ${DEST_DIR}/bin/pip install readline 1>> ${LOG_FILE}
+    ( ${DEST_DIR}/bin/pip install readline 2>&1 ) 1>> ${LOG_FILE}
 fi
 
 if [ $INST_ENZO -eq 1 ]

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1062,8 +1062,9 @@
     def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is True (default), only run it on the dark matter particles, otherwise
-        on all particles.  Returns an iterable collection of *HopGroup* items.
+        *dm_only* is True (default), only run it on the dark matter particles, 
+        otherwise on all particles.  Returns an iterable collection of 
+        *HopGroup* items.
         """
         self._data_source = data_source
         self.dm_only = dm_only

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/data_objects/tests/test_cutting_plane.py
--- a/yt/data_objects/tests/test_cutting_plane.py
+++ b/yt/data_objects/tests/test_cutting_plane.py
@@ -1,5 +1,6 @@
 from yt.testing import *
 import os
+import tempfile
 
 def setup():
     from yt.config import ytcfg
@@ -7,7 +8,10 @@
 
 def teardown_func(fns):
     for fn in fns:
-        os.remove(fn)
+        try:
+            os.remove(fn)
+        except OSError:
+            pass
 
 def test_cutting_plane():
     for nprocs in [8, 1]:
@@ -23,7 +27,9 @@
         yield assert_equal, cut["Ones"].min(), 1.0
         yield assert_equal, cut["Ones"].max(), 1.0
         pw = cut.to_pw()
-        fns += pw.save()
+        tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+        os.close(tmpfd)
+        fns += pw.save(name=tmpname)
         frb = cut.to_frb((1.0,'unitary'), 64)
         for cut_field in ['Ones', 'Density']:
             yield assert_equal, frb[cut_field].info['data_source'], \

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/data_objects/tests/test_image_array.py
--- a/yt/data_objects/tests/test_image_array.py
+++ b/yt/data_objects/tests/test_image_array.py
@@ -1,130 +1,94 @@
-from yt.testing import *
-from yt.data_objects.image_array import ImageArray
 import numpy as np
 import os
 import tempfile
 import shutil
+import unittest
+from yt.data_objects.image_array import ImageArray
+from yt.testing import \
+    assert_equal
+
 
 def setup():
     from yt.config import ytcfg
-    ytcfg["yt","__withintesting"] = "True"
-    np.seterr(all = 'ignore')
+    ytcfg["yt", "__withintesting"] = "True"
+    np.seterr(all='ignore')
+
+
+def dummy_image(kstep, nlayers):
+    im = np.zeros([64, 128, nlayers])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i, :, k] = np.linspace(0.0, kstep * k, im.shape[1])
+    return im
+
 
 def test_rgba_rescale():
-    im = np.zeros([64,128,4])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
-    im_arr = ImageArray(im)
+    im_arr = ImageArray(dummy_image(10.0, 4))
 
     new_im = im_arr.rescale(inline=False)
-    yield assert_equal, im_arr[:,:,:3].max(), 2*10.
-    yield assert_equal, im_arr[:,:,3].max(), 3*10.
-    yield assert_equal, new_im[:,:,:3].sum(axis=2).max(), 1.0 
-    yield assert_equal, new_im[:,:,3].max(), 1.0
+    yield assert_equal, im_arr[:, :, :3].max(), 2 * 10.
+    yield assert_equal, im_arr[:, :, 3].max(), 3 * 10.
+    yield assert_equal, new_im[:, :, :3].sum(axis=2).max(), 1.0
+    yield assert_equal, new_im[:, :, 3].max(), 1.0
 
     im_arr.rescale()
-    yield assert_equal, im_arr[:,:,:3].sum(axis=2).max(), 1.0
-    yield assert_equal, im_arr[:,:,3].max(), 1.0
+    yield assert_equal, im_arr[:, :, :3].sum(axis=2).max(), 1.0
+    yield assert_equal, im_arr[:, :, 3].max(), 1.0
 
-def test_image_array_hdf5():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
 
-    im = np.zeros([64,128,3])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+class TestImageArray(unittest.TestCase):
 
-    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        'width':0.245, 'units':'cm', 'type':'rendering'}
+    tmpdir = None
+    curdir = None
 
-    im_arr = ImageArray(im, info=myinfo)
-    im_arr.save('test_3d_ImageArray')
+    def setUp(self):
+        self.tmpdir = tempfile.mkdtemp()
+        self.curdir = os.getcwd()
+        os.chdir(self.tmpdir)
 
-    im = np.zeros([64,128])
-    for i in xrange(im.shape[0]):
-        im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+    def test_image_array_hdf5(self):
+        myinfo = {'field': 'dinosaurs', 'east_vector': np.array([1., 0., 0.]),
+                  'north_vector': np.array([0., 0., 1.]),
+                  'normal_vector': np.array([0., 1., 0.]),
+                  'width': 0.245, 'units': 'cm', 'type': 'rendering'}
 
-    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        'width':0.245, 'units':'cm', 'type':'rendering'}
+        im_arr = ImageArray(dummy_image(0.3, 3), info=myinfo)
+        im_arr.save('test_3d_ImageArray')
 
-    im_arr = ImageArray(im, info=myinfo)
-    im_arr.save('test_2d_ImageArray')
+        im = np.zeros([64, 128])
+        for i in xrange(im.shape[0]):
+            im[i, :] = np.linspace(0., 0.3 * 2, im.shape[1])
 
-    os.chdir(curdir)
-    # clean up
-    shutil.rmtree(tmpdir)
+        myinfo = {'field': 'dinosaurs', 'east_vector': np.array([1., 0., 0.]),
+                  'north_vector': np.array([0., 0., 1.]),
+                  'normal_vector': np.array([0., 1., 0.]),
+                  'width': 0.245, 'units': 'cm', 'type': 'rendering'}
 
-def test_image_array_rgb_png():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
+        im_arr = ImageArray(im, info=myinfo)
+        im_arr.save('test_2d_ImageArray')
 
-    im = np.zeros([64,128,3])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
+    def test_image_array_rgb_png(self):
+        im_arr = ImageArray(dummy_image(10.0, 3))
+        im_arr.write_png('standard.png')
 
-    im_arr = ImageArray(im)
-    im_arr.write_png('standard.png')
+    def test_image_array_rgba_png(self):
+        im_arr = ImageArray(dummy_image(10.0, 4))
+        im_arr.write_png('standard.png')
+        im_arr.write_png('non-scaled.png', rescale=False)
+        im_arr.write_png('black_bg.png', background='black')
+        im_arr.write_png('white_bg.png', background='white')
+        im_arr.write_png('green_bg.png', background=[0., 1., 0., 1.])
+        im_arr.write_png('transparent_bg.png', background=None)
 
-def test_image_array_rgba_png():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
+    def test_image_array_background(self):
+        im_arr = ImageArray(dummy_image(10.0, 4))
+        im_arr.rescale()
+        new_im = im_arr.add_background_color([1., 0., 0., 1.], inline=False)
+        new_im.write_png('red_bg.png')
+        im_arr.add_background_color('black')
+        im_arr.write_png('black_bg2.png')
 
-    im = np.zeros([64,128,4])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
-
-    im_arr = ImageArray(im)
-    im_arr.write_png('standard.png')
-    im_arr.write_png('non-scaled.png', rescale=False)
-    im_arr.write_png('black_bg.png', background='black')
-    im_arr.write_png('white_bg.png', background='white')
-    im_arr.write_png('green_bg.png', background=[0.,1.,0.,1.])
-    im_arr.write_png('transparent_bg.png', background=None)
-
-
-def test_image_array_background():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
-
-    im = np.zeros([64,128,4])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
-
-    im_arr = ImageArray(im)
-    im_arr.rescale()
-    new_im = im_arr.add_background_color([1.,0.,0.,1.], inline=False)
-    new_im.write_png('red_bg.png')
-    im_arr.add_background_color('black')
-    im_arr.write_png('black_bg2.png')
- 
-    os.chdir(curdir)
-    # clean up
-    shutil.rmtree(tmpdir)
-
-
-
-
-
-
-
-
-
-
-
-
-
+    def tearDown(self):
+        os.chdir(self.curdir)
+        # clean up
+        shutil.rmtree(self.tmpdir)

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -1,5 +1,6 @@
 from yt.testing import *
 import os
+import tempfile
 
 def setup():
     from yt.config import ytcfg
@@ -7,7 +8,10 @@
 
 def teardown_func(fns):
     for fn in fns:
-        os.remove(fn)
+        try:
+            os.remove(fn)
+        except OSError:
+            pass
 
 def test_projection():
     for nprocs in [8, 1]:
@@ -37,7 +41,9 @@
                 yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
                 pw = proj.to_pw()
-                fns += pw.save()
+                tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+                os.close(tmpfd)
+                fns += pw.save(name=tmpname)
                 frb = proj.to_frb((1.0,'unitary'), 64)
                 for proj_field in ['Ones', 'Density']:
                     yield assert_equal, frb[proj_field].info['data_source'], \

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -27,6 +27,7 @@
 """
 import os
 import numpy as np
+import tempfile
 from nose.tools import raises
 from yt.testing import \
     fake_random_pf, assert_equal, assert_array_equal
@@ -42,7 +43,10 @@
 
 def teardown_func(fns):
     for fn in fns:
-        os.remove(fn)
+        try:
+            os.remove(fn)
+        except OSError:
+            pass
 
 
 def test_slice():
@@ -72,7 +76,9 @@
                 yield assert_equal, np.unique(slc["pdx"]), 0.5 / dims[xax]
                 yield assert_equal, np.unique(slc["pdy"]), 0.5 / dims[yax]
                 pw = slc.to_pw()
-                fns += pw.save()
+                tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+                os.close(tmpfd)
+                fns += pw.save(name=tmpname)
                 frb = slc.to_frb((1.0, 'unitary'), 64)
                 for slc_field in ['Ones', 'Density']:
                     yield assert_equal, frb[slc_field].info['data_source'], \

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/extern/__init__.py
--- /dev/null
+++ b/yt/extern/__init__.py
@@ -0,0 +1,4 @@
+"""
+This packages contains python packages that are bundled with yt
+and are developed by 3rd party upstream.
+"""

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/extern/parameterized.py
--- /dev/null
+++ b/yt/extern/parameterized.py
@@ -0,0 +1,226 @@
+import re
+import inspect
+from functools import wraps
+from collections import namedtuple
+
+from nose.tools import nottest
+from unittest import TestCase
+
+from . import six
+
+if six.PY3:
+    def new_instancemethod(f, *args):
+        return f
+else:
+    import new
+    new_instancemethod = new.instancemethod
+
+_param = namedtuple("param", "args kwargs")
+
+class param(_param):
+    """ Represents a single parameter to a test case.
+
+        For example::
+
+            >>> p = param("foo", bar=16)
+            >>> p
+            param("foo", bar=16)
+            >>> p.args
+            ('foo', )
+            >>> p.kwargs
+            {'bar': 16}
+
+        Intended to be used as an argument to ``@parameterized``::
+
+            @parameterized([
+                param("foo", bar=16),
+            ])
+            def test_stuff(foo, bar=16):
+                pass
+        """
+
+    def __new__(cls, *args , **kwargs):
+        return _param.__new__(cls, args, kwargs)
+
+    @classmethod
+    def explicit(cls, args=None, kwargs=None):
+        """ Creates a ``param`` by explicitly specifying ``args`` and
+            ``kwargs``::
+
+                >>> param.explicit([1,2,3])
+                param(*(1, 2, 3))
+                >>> param.explicit(kwargs={"foo": 42})
+                param(*(), **{"foo": "42"})
+            """
+        args = args or ()
+        kwargs = kwargs or {}
+        return cls(*args, **kwargs)
+
+    @classmethod
+    def from_decorator(cls, args):
+        """ Returns an instance of ``param()`` for ``@parameterized`` argument
+            ``args``::
+
+                >>> param.from_decorator((42, ))
+                param(args=(42, ), kwargs={})
+                >>> param.from_decorator("foo")
+                param(args=("foo", ), kwargs={})
+            """
+        if isinstance(args, param):
+            return args
+        if isinstance(args, six.string_types):
+            args = (args, )
+        return cls(*args)
+
+    def __repr__(self):
+        return "param(*%r, **%r)" %self
+
+class parameterized(object):
+    """ Parameterize a test case::
+
+            class TestInt(object):
+                @parameterized([
+                    ("A", 10),
+                    ("F", 15),
+                    param("10", 42, base=42)
+                ])
+                def test_int(self, input, expected, base=16):
+                    actual = int(input, base=base)
+                    assert_equal(actual, expected)
+
+            @parameterized([
+                (2, 3, 5)
+                (3, 5, 8),
+            ])
+            def test_add(a, b, expected):
+                assert_equal(a + b, expected)
+        """
+
+    def __init__(self, input):
+        self.get_input = self.input_as_callable(input)
+
+    def __call__(self, test_func):
+        self.assert_not_in_testcase_subclass()
+
+        @wraps(test_func)
+        def parameterized_helper_method(test_self=None):
+            f = test_func
+            if test_self is not None:
+                # If we are a test method (which we suppose to be true if we
+                # are being passed a "self" argument), we first need to create
+                # an instance method, attach it to the instance of the test
+                # class, then pull it back off to turn it into a bound method.
+                # If we don't do this, Nose gets cranky.
+                f = self.make_bound_method(test_self, test_func)
+            # Note: because nose is so very picky, the more obvious
+            # ``return self.yield_nose_tuples(f)`` won't work here.
+            for nose_tuple in self.yield_nose_tuples(f):
+                yield nose_tuple
+
+        test_func.__name__ = "_helper_for_%s" %(test_func.__name__, )
+        parameterized_helper_method.parameterized_input = input
+        parameterized_helper_method.parameterized_func = test_func
+        return parameterized_helper_method
+
+    def yield_nose_tuples(self, func):
+        for args in self.get_input():
+            p = param.from_decorator(args)
+            # ... then yield that as a tuple. If those steps aren't
+            # followed precicely, Nose gets upset and doesn't run the test
+            # or doesn't run setup methods.
+            yield self.param_as_nose_tuple(p, func)
+
+    def param_as_nose_tuple(self, p, func):
+        nose_func = func
+        nose_args = p.args
+        if p.kwargs:
+            nose_func = wraps(func)(lambda args, kwargs: func(*args, **kwargs))
+            nose_args = (p.args, p.kwargs)
+        return (nose_func, ) + nose_args
+
+    def make_bound_method(self, instance, func):
+        cls = type(instance)
+        im_f = new_instancemethod(func, None, cls)
+        setattr(cls, func.__name__, im_f)
+        return getattr(instance, func.__name__)
+
+    def assert_not_in_testcase_subclass(self):
+        parent_classes = self._terrible_magic_get_defining_classes()
+        if any(issubclass(cls, TestCase) for cls in parent_classes):
+            raise Exception("Warning: '@parameterized' tests won't work "
+                            "inside subclasses of 'TestCase' - use "
+                            "'@parameterized.expand' instead")
+
+    def _terrible_magic_get_defining_classes(self):
+        """ Returns the set of parent classes of the class currently being defined.
+            Will likely only work if called from the ``parameterized`` decorator.
+            This function is entirely @brandon_rhodes's fault, as he suggested
+            the implementation: http://stackoverflow.com/a/8793684/71522
+            """
+        stack = inspect.stack()
+        if len(stack) <= 4:
+            return []
+        frame = stack[4]
+        code_context = frame[4] and frame[4][0].strip()
+        if not (code_context and code_context.startswith("class ")):
+            return []
+        _, parents = code_context.split("(", 1)
+        parents, _ = parents.rsplit(")", 1)
+        return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals)
+
+    @classmethod
+    def input_as_callable(cls, input):
+        if callable(input):
+            return lambda: cls.check_input_values(input())
+        input_values = cls.check_input_values(input)
+        return lambda: input_values
+
+    @classmethod
+    def check_input_values(cls, input_values):
+        if not hasattr(input_values, "__iter__"):
+            raise ValueError("expected iterable input; got %r" %(input, ))
+        return input_values
+
+    @classmethod
+    def expand(cls, input):
+        """ A "brute force" method of parameterizing test cases. Creates new
+            test cases and injects them into the namespace that the wrapped
+            function is being defined in. Useful for parameterizing tests in
+            subclasses of 'UnitTest', where Nose test generators don't work.
+
+            >>> @parameterized.expand([("foo", 1, 2)])
+            ... def test_add1(name, input, expected):
+            ...     actual = add1(input)
+            ...     assert_equal(actual, expected)
+            ...
+            >>> locals()
+            ... 'test_add1_foo_0': <function ...> ...
+            >>>
+            """
+
+        def parameterized_expand_wrapper(f):
+            stack = inspect.stack()
+            frame = stack[1]
+            frame_locals = frame[0].f_locals
+
+            base_name = f.__name__
+            get_input = cls.input_as_callable(input)
+            for num, args in enumerate(get_input()):
+                p = param.from_decorator(args)
+                name_suffix = "_%s" %(num, )
+                if len(p.args) > 0 and isinstance(p.args[0], six.string_types):
+                    name_suffix += "_" + cls.to_safe_name(p.args[0])
+                name = base_name + name_suffix
+                frame_locals[name] = cls.param_as_standalone_func(p, f, name)
+            return nottest(f)
+        return parameterized_expand_wrapper
+
+    @classmethod
+    def param_as_standalone_func(cls, p, func, name):
+        standalone_func = lambda *a: func(*(a + p.args), **p.kwargs)
+        standalone_func.__name__ = name
+        return standalone_func
+
+    @classmethod
+    def to_safe_name(cls, s):
+        return str(re.sub("[^a-zA-Z0-9_]", "", s))

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/extern/six.py
--- /dev/null
+++ b/yt/extern/six.py
@@ -0,0 +1,404 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2013 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin at python.org>"
+__version__ = "1.3.0"
+
+
+# True if we are running on Python 3.
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+    string_types = str,
+    integer_types = int,
+    class_types = type,
+    text_type = str
+    binary_type = bytes
+
+    MAXSIZE = sys.maxsize
+else:
+    string_types = basestring,
+    integer_types = (int, long)
+    class_types = (type, types.ClassType)
+    text_type = unicode
+    binary_type = str
+
+    if sys.platform.startswith("java"):
+        # Jython always uses 32 bits.
+        MAXSIZE = int((1 << 31) - 1)
+    else:
+        # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+        class X(object):
+            def __len__(self):
+                return 1 << 31
+        try:
+            len(X())
+        except OverflowError:
+            # 32-bit
+            MAXSIZE = int((1 << 31) - 1)
+        else:
+            # 64-bit
+            MAXSIZE = int((1 << 63) - 1)
+            del X
+
+
+def _add_doc(func, doc):
+    """Add documentation to a function."""
+    func.__doc__ = doc
+
+
+def _import_module(name):
+    """Import module, returning the module after the last dot."""
+    __import__(name)
+    return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+    def __init__(self, name):
+        self.name = name
+
+    def __get__(self, obj, tp):
+        result = self._resolve()
+        setattr(obj, self.name, result)
+        # This is a bit ugly, but it avoids running this again.
+        delattr(tp, self.name)
+        return result
+
+
+class MovedModule(_LazyDescr):
+
+    def __init__(self, name, old, new=None):
+        super(MovedModule, self).__init__(name)
+        if PY3:
+            if new is None:
+                new = name
+            self.mod = new
+        else:
+            self.mod = old
+
+    def _resolve(self):
+        return _import_module(self.mod)
+
+
+class MovedAttribute(_LazyDescr):
+
+    def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+        super(MovedAttribute, self).__init__(name)
+        if PY3:
+            if new_mod is None:
+                new_mod = name
+            self.mod = new_mod
+            if new_attr is None:
+                if old_attr is None:
+                    new_attr = name
+                else:
+                    new_attr = old_attr
+            self.attr = new_attr
+        else:
+            self.mod = old_mod
+            if old_attr is None:
+                old_attr = name
+            self.attr = old_attr
+
+    def _resolve(self):
+        module = _import_module(self.mod)
+        return getattr(module, self.attr)
+
+
+
+class _MovedItems(types.ModuleType):
+    """Lazy loading of moved objects"""
+
+
+_moved_attributes = [
+    MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+    MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+    MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+    MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+    MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
+    MovedAttribute("reduce", "__builtin__", "functools"),
+    MovedAttribute("StringIO", "StringIO", "io"),
+    MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+
+    MovedModule("builtins", "__builtin__"),
+    MovedModule("configparser", "ConfigParser"),
+    MovedModule("copyreg", "copy_reg"),
+    MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+    MovedModule("http_cookies", "Cookie", "http.cookies"),
+    MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+    MovedModule("html_parser", "HTMLParser", "html.parser"),
+    MovedModule("http_client", "httplib", "http.client"),
+    MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+    MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+    MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+    MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+    MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+    MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+    MovedModule("cPickle", "cPickle", "pickle"),
+    MovedModule("queue", "Queue"),
+    MovedModule("reprlib", "repr"),
+    MovedModule("socketserver", "SocketServer"),
+    MovedModule("tkinter", "Tkinter"),
+    MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+    MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+    MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+    MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+    MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+    MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+    MovedModule("tkinter_colorchooser", "tkColorChooser",
+                "tkinter.colorchooser"),
+    MovedModule("tkinter_commondialog", "tkCommonDialog",
+                "tkinter.commondialog"),
+    MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+    MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+    MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+                "tkinter.simpledialog"),
+    MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+    MovedModule("winreg", "_winreg"),
+]
+for attr in _moved_attributes:
+    setattr(_MovedItems, attr.name, attr)
+del attr
+
+moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
+
+
+def add_move(move):
+    """Add an item to six.moves."""
+    setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+    """Remove item from six.moves."""
+    try:
+        delattr(_MovedItems, name)
+    except AttributeError:
+        try:
+            del moves.__dict__[name]
+        except KeyError:
+            raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+    _meth_func = "__func__"
+    _meth_self = "__self__"
+
+    _func_closure = "__closure__"
+    _func_code = "__code__"
+    _func_defaults = "__defaults__"
+    _func_globals = "__globals__"
+
+    _iterkeys = "keys"
+    _itervalues = "values"
+    _iteritems = "items"
+    _iterlists = "lists"
+else:
+    _meth_func = "im_func"
+    _meth_self = "im_self"
+
+    _func_closure = "func_closure"
+    _func_code = "func_code"
+    _func_defaults = "func_defaults"
+    _func_globals = "func_globals"
+
+    _iterkeys = "iterkeys"
+    _itervalues = "itervalues"
+    _iteritems = "iteritems"
+    _iterlists = "iterlists"
+
+
+try:
+    advance_iterator = next
+except NameError:
+    def advance_iterator(it):
+        return it.next()
+next = advance_iterator
+
+
+try:
+    callable = callable
+except NameError:
+    def callable(obj):
+        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+    def get_unbound_function(unbound):
+        return unbound
+
+    Iterator = object
+else:
+    def get_unbound_function(unbound):
+        return unbound.im_func
+
+    class Iterator(object):
+
+        def next(self):
+            return type(self).__next__(self)
+
+    callable = callable
+_add_doc(get_unbound_function,
+         """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+def iterkeys(d, **kw):
+    """Return an iterator over the keys of a dictionary."""
+    return iter(getattr(d, _iterkeys)(**kw))
+
+def itervalues(d, **kw):
+    """Return an iterator over the values of a dictionary."""
+    return iter(getattr(d, _itervalues)(**kw))
+
+def iteritems(d, **kw):
+    """Return an iterator over the (key, value) pairs of a dictionary."""
+    return iter(getattr(d, _iteritems)(**kw))
+
+def iterlists(d, **kw):
+    """Return an iterator over the (key, [values]) pairs of a dictionary."""
+    return iter(getattr(d, _iterlists)(**kw))
+
+
+if PY3:
+    def b(s):
+        return s.encode("latin-1")
+    def u(s):
+        return s
+    if sys.version_info[1] <= 1:
+        def int2byte(i):
+            return bytes((i,))
+    else:
+        # This is about 2x faster than the implementation above on 3.2+
+        int2byte = operator.methodcaller("to_bytes", 1, "big")
+    import io
+    StringIO = io.StringIO
+    BytesIO = io.BytesIO
+else:
+    def b(s):
+        return s
+    def u(s):
+        return unicode(s, "unicode_escape")
+    int2byte = chr
+    import StringIO
+    StringIO = BytesIO = StringIO.StringIO
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+if PY3:
+    import builtins
+    exec_ = getattr(builtins, "exec")
+
+
+    def reraise(tp, value, tb=None):
+        if value.__traceback__ is not tb:
+            raise value.with_traceback(tb)
+        raise value
+
+
+    print_ = getattr(builtins, "print")
+    del builtins
+
+else:
+    def exec_(_code_, _globs_=None, _locs_=None):
+        """Execute code in a namespace."""
+        if _globs_ is None:
+            frame = sys._getframe(1)
+            _globs_ = frame.f_globals
+            if _locs_ is None:
+                _locs_ = frame.f_locals
+            del frame
+        elif _locs_ is None:
+            _locs_ = _globs_
+        exec("""exec _code_ in _globs_, _locs_""")
+
+
+    exec_("""def reraise(tp, value, tb=None):
+    raise tp, value, tb
+""")
+
+
+    def print_(*args, **kwargs):
+        """The new-style print function."""
+        fp = kwargs.pop("file", sys.stdout)
+        if fp is None:
+            return
+        def write(data):
+            if not isinstance(data, basestring):
+                data = str(data)
+            fp.write(data)
+        want_unicode = False
+        sep = kwargs.pop("sep", None)
+        if sep is not None:
+            if isinstance(sep, unicode):
+                want_unicode = True
+            elif not isinstance(sep, str):
+                raise TypeError("sep must be None or a string")
+        end = kwargs.pop("end", None)
+        if end is not None:
+            if isinstance(end, unicode):
+                want_unicode = True
+            elif not isinstance(end, str):
+                raise TypeError("end must be None or a string")
+        if kwargs:
+            raise TypeError("invalid keyword arguments to print()")
+        if not want_unicode:
+            for arg in args:
+                if isinstance(arg, unicode):
+                    want_unicode = True
+                    break
+        if want_unicode:
+            newline = unicode("\n")
+            space = unicode(" ")
+        else:
+            newline = "\n"
+            space = " "
+        if sep is None:
+            sep = space
+        if end is None:
+            end = newline
+        for i, arg in enumerate(args):
+            if i:
+                write(sep)
+            write(arg)
+        write(end)
+
+_add_doc(reraise, """Reraise an exception.""")
+
+
+def with_metaclass(meta, base=object):
+    """Create a base class with a metaclass."""
+    return meta("NewBase", (base,), {})

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -68,9 +68,9 @@
                 data = data[2::3].reshape(grid_dims,order='F').copy()
         f.close()
         if grid.pf.field_ordering == 1:
-            return data.T
+            return data.T.astype("float64")
         else:
-            return data
+            return data.astype("float64")
 
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(None), slice(None), slice(None)]

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -224,7 +224,10 @@
             else:
                 self.units[field_name] = 1.0
             if 'field_units' in current_field.attrs:
-                current_fields_unit = just_one(current_field.attrs['field_units'])
+                if type(current_field.attrs['field_units']) == str:
+                    current_fields_unit = current_field.attrs['field_units']
+                else:
+                    current_fields_unit = just_one(current_field.attrs['field_units'])
             else:
                 current_fields_unit = ""
             self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/frontends/gdf/fields.py
--- a/yt/frontends/gdf/fields.py
+++ b/yt/frontends/gdf/fields.py
@@ -84,8 +84,11 @@
           units=r"\rm{cm}/\rm{s}")
 
 for f,v in log_translation_dict.items():
-    add_field(f, TranslationFunc(v), take_log=True)
+    add_field(f, TranslationFunc(v), take_log=True,
+              units=KnownGDFFields[v].get_units(),
+              projected_units=KnownGDFFields[v].get_projected_units())
 
 for f,v in translation_dict.items():
-    add_field(f, TranslationFunc(v), take_log=False)
-
+    add_field(f, TranslationFunc(v), take_log=False,
+              units=KnownGDFFields[v].get_units(),
+              projected_units=KnownGDFFields[v].get_projected_units())

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -215,6 +215,7 @@
                                 self.amr_header['nboundary']*l]
             return ng
         min_level = self.pf.min_level
+        max_level = min_level
         nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
         for level in range(self.amr_header['nlevelmax']):
             # Easier if do this 1-indexed
@@ -248,6 +249,8 @@
                     assert(pos.shape[0] == ng)
                     n = self.oct_handler.add(cpu + 1, level - min_level, pos)
                     assert(n == ng)
+                    if n > 0: max_level = max(level - min_level, max_level)
+        self.max_level = max_level
         self.oct_handler.finalize()
 
     def included(self, selector):
@@ -297,7 +300,7 @@
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-        self.max_level = pf.max_level
+        self.max_level = None
 
         self.float_type = np.float64
         super(RAMSESGeometryHandler, self).__init__(pf, data_style)
@@ -308,6 +311,7 @@
                         for i in range(self.parameter_file['ncpu'])]
         total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
                          for dom in self.domains)
+        self.max_level = max(dom.max_level for dom in self.domains)
         self.num_grids = total_octs
 
     def _detect_fields(self):

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -122,15 +122,17 @@
         self.max_level = getattr(dobj, "max_level", 99)
         self.overlap_cells = 0
 
-        if dobj is None:
-            for i in range(3):
-                self.periodicity[i] = False
-                self.domain_width[i] = 0.0
-        else:
-            for i in range(3) :
-                self.domain_width[i] = dobj.pf.domain_right_edge[i] - \
-                                       dobj.pf.domain_left_edge[i]
-                self.periodicity[i] = dobj.pf.periodicity[i]
+        for i in range(3) :
+            pf = getattr(dobj, 'pf', None)
+            if pf is None:
+                for i in range(3):
+                    self.domain_width[i] = 1.0
+                    self.periodicity[i] = False
+            else:
+                for i in range(3):
+                    self.domain_width[i] = pf.domain_right_edge[i] - \
+                                           pf.domain_left_edge[i]
+                    self.periodicity[i] = pf.periodicity[i]
 
     @cython.boundscheck(False)
     @cython.wraparound(False)

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -144,7 +144,8 @@
     get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
     callback_registry, write_bitmap, write_image, annotate_image, \
     apply_colormap, scale_image, write_projection, write_fits, \
-    SlicePlot, OffAxisSlicePlot, ProjectionPlot, OffAxisProjectionPlot
+    SlicePlot, OffAxisSlicePlot, ProjectionPlot, OffAxisProjectionPlot, \
+    show_colormaps
 
 from yt.visualization.volume_rendering.api import \
     ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/setup.py
--- a/yt/setup.py
+++ b/yt/setup.py
@@ -9,6 +9,7 @@
     config = Configuration('yt', parent_package, top_path)
     config.add_subpackage('analysis_modules')
     config.add_subpackage('data_objects')
+    config.add_subpackage('extern')
     config.add_subpackage('frontends')
     config.add_subpackage('geometry')
     config.add_subpackage('gui')

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -33,6 +33,7 @@
 import cPickle
 import shelve
 import zlib
+import tempfile
 
 from matplotlib.testing.compare import compare_images
 from nose.plugins import Plugin
@@ -604,9 +605,11 @@
                                 self.plot_axis, self.plot_kwargs)
         attr = getattr(plot, self.attr_name)
         attr(*self.attr_args[0], **self.attr_args[1])
-        fn = plot.save()[0]
-        image = mpimg.imread(fn)
-        os.remove(fn)
+        tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+        os.close(tmpfd)
+        plot.save(name=tmpname)
+        image = mpimg.imread(tmpname)
+        os.remove(tmpname)
         return [zlib.compress(image.dumps())]
 
     def compare(self, new_result, old_result):

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/utilities/grid_data_format/tests/test_writer.py
--- a/yt/utilities/grid_data_format/tests/test_writer.py
+++ b/yt/utilities/grid_data_format/tests/test_writer.py
@@ -50,17 +50,18 @@
     tmpdir = tempfile.mkdtemp()
     tmpfile = os.path.join(tmpdir, 'test_gdf.h5')
 
-    test_pf = fake_random_pf(64)
-    write_to_gdf(test_pf, tmpfile, data_author=TEST_AUTHOR,
-                 data_comment=TEST_COMMENT)
-    del test_pf
+    try:
+        test_pf = fake_random_pf(64)
+        write_to_gdf(test_pf, tmpfile, data_author=TEST_AUTHOR,
+                     data_comment=TEST_COMMENT)
+        del test_pf
+        assert isinstance(load(tmpfile), GDFStaticOutput)
 
-    assert isinstance(load(tmpfile), GDFStaticOutput)
+        h5f = h5.File(tmpfile, 'r')
+        gdf = h5f['gridded_data_format'].attrs
+        assert_equal(gdf['data_author'], TEST_AUTHOR)
+        assert_equal(gdf['data_comment'], TEST_COMMENT)
+        h5f.close()
 
-    h5f = h5.File(tmpfile, 'r')
-    gdf = h5f['gridded_data_format'].attrs
-    assert_equal(gdf['data_author'], TEST_AUTHOR)
-    assert_equal(gdf['data_comment'], TEST_COMMENT)
-    h5f.close()
-
-    shutil.rmtree(tmpdir)
+    finally:
+        shutil.rmtree(tmpdir)

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -20,36 +20,37 @@
     # Create a temporary directory
     tmpdir = tempfile.mkdtemp()
     curdir = os.getcwd()
-    os.chdir(tmpdir)
+    exit_code = 1
 
-    # Get compiler invocation
-    compiler = os.getenv('CC', 'cc')
+    try:
+        os.chdir(tmpdir)
 
-    # Attempt to compile a test script.
-    # See http://openmp.org/wp/openmp-compilers/
-    filename = r'test.c'
-    file = open(filename,'w', 0)
-    file.write(
-        "#include <omp.h>\n"
-        "#include <stdio.h>\n"
-        "int main() {\n"
-        "#pragma omp parallel\n"
-        "printf(\"Hello from thread %d, nthreads %d\\n\", omp_get_thread_num(), omp_get_num_threads());\n"
-        "}"
-        )
-    with open(os.devnull, 'w') as fnull:
-        exit_code = subprocess.call([compiler, '-fopenmp', filename],
-                                    stdout=fnull, stderr=fnull)
+        # Get compiler invocation
+        compiler = os.getenv('CC', 'cc')
 
-    # Clean up
-    file.close()
-    os.chdir(curdir)
-    shutil.rmtree(tmpdir)
+        # Attempt to compile a test script.
+        # See http://openmp.org/wp/openmp-compilers/
+        filename = r'test.c'
+        file = open(filename,'w', 0)
+        file.write(
+            "#include <omp.h>\n"
+            "#include <stdio.h>\n"
+            "int main() {\n"
+            "#pragma omp parallel\n"
+            "printf(\"Hello from thread %d, nthreads %d\\n\", omp_get_thread_num(), omp_get_num_threads());\n"
+            "}"
+            )
+        with open(os.devnull, 'w') as fnull:
+            exit_code = subprocess.call([compiler, '-fopenmp', filename],
+                                        stdout=fnull, stderr=fnull)
 
-    if exit_code == 0:
-        return True
-    else:
-        return False
+        # Clean up
+        file.close()
+    finally:
+        os.chdir(curdir)
+        shutil.rmtree(tmpdir)
+
+    return exit_code == 0
 
 def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/visualization/_colormap_data.py
--- a/yt/visualization/_colormap_data.py
+++ b/yt/visualization/_colormap_data.py
@@ -7798,3 +7798,11 @@
 color_map_luts['Rainbow18'] = color_map_luts['idl38']
 color_map_luts['Rainbow + white'] = color_map_luts['idl39']
 color_map_luts['Rainbow + black'] = color_map_luts['idl40']
+
+# Create a reversed LUT for each of the above defined LUTs
+# and append a "_r" (for reversal. consistent with MPL convention).
+# So for example, the reversal of "Waves" is "Waves_r"
+temp = {}
+for k,v in color_map_luts.iteritems():
+    temp[k+"_r"] = (v[0][::-1], v[1][::-1], v[2][::-1], v[3][::-1])
+color_map_luts.update(temp)

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -29,7 +29,8 @@
 """
 
 from color_maps import \
-    add_cmap
+    add_cmap, \
+    show_colormaps
 
 from plot_collection import \
     PlotCollection, \

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -145,3 +145,56 @@
     b = cmap._lut[:-3, 2]
     a = np.ones(b.shape)
     return [r, g, b, a]
+
+def show_colormaps(subset = "all", filename=None):
+    """
+    Displays the colormaps available to yt.  Note, most functions can use
+    both the matplotlib and the native yt colormaps; however, there are 
+    some special functions existing within image_writer.py (e.g. write_image()
+    write_fits(), write_bitmap(), etc.), which cannot access the matplotlib
+    colormaps.
+
+    In addition to the colormaps listed, one can access the reverse of each 
+    colormap by appending a "_r" to any map.
+    
+    Parameters
+    ----------
+
+    subset : string, opt
+
+        valid values : "all", "yt_native"
+        default : "all"
+
+        As mentioned above, a few functions can only access yt_native 
+        colormaps.  To display only the yt_native colormaps, set this
+        to "yt_native".  
+
+    filename : string, opt
+
+        default: None
+
+        If filename is set, then it will save the colormaps to an output
+        file.  If it is not set, it will "show" the result interactively.
+    """
+    import pylab as pl
+
+    a=np.outer(np.arange(0,1,0.01), np.ones(10))
+    if (subset == "all"):
+        maps = [ m for m in pl.cm.datad if (not m.startswith("idl")) & (not m.endswith("_r"))]
+    if (subset == "yt_native"):
+        maps = [ m for m in _cm.color_map_luts if (not m.startswith("idl")) & (not m.endswith("_r"))]
+    maps = list(set(maps))
+    maps.sort()
+    # scale the image size by the number of cmaps
+    pl.figure(figsize=(2.*len(maps)/10.,6))
+    pl.subplots_adjust(top=0.7,bottom=0.05,left=0.01,right=0.99)
+    l = len(maps)+1
+    for i,m in enumerate(maps):
+        pl.subplot(1,l,i+1)
+        pl.axis("off")
+        pl.imshow(a, aspect='auto',cmap=pl.get_cmap(m),origin="lower")      
+        pl.title(m,rotation=90, fontsize=10, verticalalignment='bottom')
+    if filename is not None:
+        pl.savefig(filename, dpi=100, facecolor='gray') 
+    else:  
+        pl.show()

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -45,6 +45,7 @@
     sec_per_Gyr, sec_per_Myr, \
     sec_per_kyr, sec_per_year, \
     sec_per_day, sec_per_hr
+from yt.visualization.image_writer import apply_colormap
 
 import _MPL
 
@@ -176,7 +177,8 @@
 
 class QuiverCallback(PlotCallback):
     """
-    annotate_quiver(field_x, field_y, factor, scale=None, scale_units=None, normalize=False):
+    annotate_quiver(field_x, field_y, factor=16, scale=None, scale_units=None, 
+                    normalize=False, bv_x=0, bv_y=0):
 
     Adds a 'quiver' plot to any plot, using the *field_x* and *field_y*
     from the associated data, skipping every *factor* datapoints
@@ -230,8 +232,8 @@
 
 class ContourCallback(PlotCallback):
     """
-    annotate_contour(self, field, ncont=5, factor=4, take_log=None, clim=None,
-                     plot_args = None):
+    annotate_contour(field, ncont=5, factor=4, take_log=None, clim=None,
+                     plot_args=None, label=False, label_args=None):
 
     Add contours in *field* to the plot.  *ncont* governs the number of
     contours generated, *factor* governs the number of points used in the
@@ -338,18 +340,21 @@
 
 class GridBoundaryCallback(PlotCallback):
     """
-    annotate_grids(alpha=1.0, min_pix=1, draw_ids=False, periodic=True)
+    annotate_grids(alpha=0.7, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True, 
+                 min_level=None, max_level=None, cmap='B-W LINEAR_r'):
 
-    Adds grid boundaries to a plot, optionally with *alpha*-blending.
-    Cuttoff for display is at *min_pix* wide.
-    *draw_ids* puts the grid id in the corner of the grid.  (Not so great in projections...)
-    Grids must be wider than *min_pix_ids* otherwise the ID will not be drawn.  If *min_level* 
-    is specified, only draw grids at or above min_level.  If *max_level* is specified, only 
-    draw grids at or below max_level.
+    Draws grids on an existing PlotWindow object.
+    Adds grid boundaries to a plot, optionally with alpha-blending. By default, 
+    colors different levels of grids with different colors going from white to
+    black, but you can change to any arbitrary colormap with cmap keyword 
+    (or all black cells for all levels with cmap=None).  Cuttoff for display is at 
+    min_pix wide. draw_ids puts the grid id in the corner of the grid. 
+    (Not so great in projections...).  One can set min and maximum level of
+    grids to display.
     """
     _type_name = "grids"
-    def __init__(self, alpha=1.0, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True, 
-                 min_level=None, max_level=None):
+    def __init__(self, alpha=0.7, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True, 
+                 min_level=None, max_level=None, cmap='B-W LINEAR_r'):
         PlotCallback.__init__(self)
         self.alpha = alpha
         self.min_pix = min_pix
@@ -358,6 +363,7 @@
         self.periodic = periodic
         self.min_level = min_level
         self.max_level = max_level
+        self.cmap = cmap
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -375,15 +381,16 @@
             pxs, pys = np.mgrid[-1:1:3j,-1:1:3j]
         else:
             pxs, pys = np.mgrid[0:0:1j,0:0:1j]
-        GLE = plot.data.pf.h.grid_left_edge
-        GRE = plot.data.pf.h.grid_right_edge
-        grid_levels = plot.data.pf.h.grid_levels[:,0]
+        GLE = plot.data.grid_left_edge
+        GRE = plot.data.grid_right_edge
+        levels = plot.data.grid_levels[:,0]
         min_level = self.min_level
-        max_level = self.min_level
+        max_level = self.max_level
+        if max_level is None:
+            max_level = plot.data.pf.h.max_level
         if min_level is None:
             min_level = 0
-        if max_level is None:
-            max_level = plot.data.pf.h.max_level
+
         for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
             pxo = px_off * dom[px_index]
             pyo = py_off * dom[py_index]
@@ -393,19 +400,28 @@
             right_edge_y = (GRE[:,py_index]+pyo-y0)*dy + yy0
             visible =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix ) & \
                        ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix ) & \
-                       ( grid_levels >= min_level) & \
-                       ( grid_levels <= max_level)
+                       ( levels >= min_level) & \
+                       ( levels <= max_level)
+
+            if self.cmap is not None: 
+                edgecolors = apply_colormap(levels[(levels <= max_level) & (levels >= min_level)]*1.0,
+                                  color_bounds=[0,plot.data.pf.h.max_level],
+                                  cmap_name=self.cmap)[0,:,:]*1.0/255.
+                edgecolors[:,3] = self.alpha
+            else:
+                edgecolors = (0.0,0.0,0.0,self.alpha)
+
             if visible.nonzero()[0].size == 0: continue
             verts = np.array(
                 [(left_edge_x, left_edge_x, right_edge_x, right_edge_x),
                  (left_edge_y, right_edge_y, right_edge_y, left_edge_y)])
             verts=verts.transpose()[visible,:,:]
-            edgecolors = (0.0,0.0,0.0,self.alpha)
             grid_collection = matplotlib.collections.PolyCollection(
                 verts, facecolors="none",
                 edgecolors=edgecolors)
             plot._axes.hold(True)
             plot._axes.add_collection(grid_collection)
+
             if self.draw_ids:
                 visible_ids =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix_ids ) & \
                                ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix_ids )

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -148,15 +148,19 @@
         else:
             norm = mpl.matplotlib.colors.Normalize()
         if use_mesh:
-            pcm = axes.pcolormesh(x_bins, y_bins, self.image, norm=norm,
+            mappable = axes.pcolormesh(
+                                  x_bins, y_bins, self.image, norm=norm,
                                   shading='flat', cmap = self.cbar.cmap,
                                   rasterized=True)
             if self.x_spec.scale == 'log': axes.set_xscale("log")
             if self.y_spec.scale == 'log': axes.set_yscale("log")
         else:
-            axes.imshow(self.image, origin='lower', interpolation='nearest',
+            mappable = axes.imshow(
+                        self.image, origin='lower', interpolation='nearest',
                         cmap = self.cbar.cmap, extent = [xmi,xma,ymi,yma],
                         norm = norm)
+        cbar = figure.colorbar(mappable)
+        cbar.set_label(self.cbar.title)
         if self.x_spec.title is not None:
             axes.set_xlabel(self.x_spec.title)
         if self.y_spec.title is not None:

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -22,9 +22,12 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
+import itertools
 import os
 import tempfile
 import shutil
+import unittest
+from yt.extern.parameterized import parameterized, param
 from yt.testing import \
     fake_random_pf, assert_equal, assert_rel_equal
 from yt.utilities.answer_testing.framework import \
@@ -65,132 +68,163 @@
 
     return image_type == os.path.splitext(fname)[1]
 
-attr_args ={ "pan"             : [( ((0.1, 0.1),), {} )],
-             "pan_rel"         : [( ((0.1, 0.1),), {} )],
-             "set_axes_unit"   : [( ("kpc",), {} ),
-                                  ( ("Mpc",), {} ),
-                                  ( (("kpc", "kpc"),), {} ),
-                                  ( (("kpc", "Mpc"),), {} )],
-             "set_buff_size"   : [( (1600,), {} ),
-                                  ( ((600, 800),), {} )],
-             "set_center"      : [( ((0.4, 0.3),), {} )],
-             "set_cmap"        : [( ('Density', 'RdBu'), {} ),
-                                  ( ('Density', 'kamae'), {} )],
-             "set_font"        : [( ({'family':'sans-serif', 'style':'italic',
-                                      'weight':'bold', 'size':24},), {} )],
-             "set_log"         : [( ('Density', False), {} )],
-             "set_window_size" : [( (7.0,), {} )],
-             "set_zlim" : [( ('Density', 1e-25, 1e-23), {} ),
-                           ( ('Density', 1e-25, None), {'dynamic_range' : 4} )],
-             "zoom" : [( (10,), {} )] }
 
-m7 = "DD0010/moving7_0010"
-wt = "WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030"
- at requires_pf(m7)
- at requires_pf(wt)
+TEST_FLNMS = [None, 'test.png', 'test.eps',
+              'test.ps', 'test.pdf']
+M7 = "DD0010/moving7_0010"
+WT = "WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030"
+
+ATTR_ARGS = {"pan": [(((0.1, 0.1), ), {})],
+             "pan_rel": [(((0.1, 0.1), ), {})],
+             "set_axes_unit": [(("kpc", ), {}),
+                               (("Mpc", ), {}),
+                               ((("kpc", "kpc"),), {}),
+                               ((("kpc", "Mpc"),), {})],
+             "set_buff_size": [((1600, ), {}),
+                               (((600, 800), ), {})],
+             "set_center": [(((0.4, 0.3), ), {})],
+             "set_cmap": [(('Density', 'RdBu'), {}),
+                          (('Density', 'kamae'), {})],
+             "set_font": [(({'family': 'sans-serif', 'style': 'italic',
+                             'weight': 'bold', 'size': 24}, ), {})],
+             "set_log": [(('Density', False), {})],
+             "set_window_size": [((7.0, ), {})],
+             "set_zlim": [(('Density', 1e-25, 1e-23), {}),
+                          (('Density', 1e-25, None), {'dynamic_range': 4})],
+             "zoom": [((10, ), {})]}
+
+
+ at requires_pf(M7)
 def test_attributes():
     """Test plot member functions that aren't callbacks"""
     plot_field = 'Density'
     decimals = 3
 
-    pf = data_dir_load(m7)
+    pf = data_dir_load(M7)
     for ax in 'xyz':
-        for attr_name in attr_args.keys():
-            for args in attr_args[attr_name]:
+        for attr_name in ATTR_ARGS.keys():
+            for args in ATTR_ARGS[attr_name]:
                 yield PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
                                               args, decimals)
-    pf = data_dir_load(wt)
+
+
+ at requires_pf(WT)
+def test_attributes_wt():
+    plot_field = 'Density'
+    decimals = 3
+
+    pf = data_dir_load(WT)
     ax = 'z'
-    for attr_name in attr_args.keys():
-        for args in attr_args[attr_name]:
+    for attr_name in ATTR_ARGS.keys():
+        for args in ATTR_ARGS[attr_name]:
             yield PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
                                           args, decimals)
 
-def test_setwidth():
-    pf = fake_random_pf(64)
 
-    slc = SlicePlot(pf, 0, 'Density')
+class TestSetWidth(unittest.TestCase):
 
-    yield assert_equal, [slc.xlim, slc.ylim, slc.width], \
-        [(0.0, 1.0), (0.0, 1.0), (1.0, 1.0)]
+    pf = None
 
-    slc.set_width((0.5,0.8))
+    def setUp(self):
+        if self.pf is None:
+            self.pf = fake_random_pf(64)
+            self.slc = SlicePlot(self.pf, 0, 'Density')
 
-    yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
-        [(0.25, 0.75), (0.1, 0.9), (0.5, 0.8)], 15
+    def _assert_15kpc(self):
+        assert_rel_equal([self.slc.xlim, self.slc.ylim, self.slc.width],
+                         [(-7.5 / self.pf['kpc'], 7.5 / self.pf['kpc']),
+                          (-7.5 / self.pf['kpc'], 7.5 / self.pf['kpc']),
+                          (15.0 / self.pf['kpc'], 15. / self.pf['kpc'])], 15)
 
-    slc.set_width(15,'kpc')
+    def _assert_15_10kpc(self):
+        assert_rel_equal([self.slc.xlim, self.slc.ylim, self.slc.width],
+                         [(-7.5 / self.pf['kpc'], 7.5 / self.pf['kpc']),
+                          (-5.0 / self.pf['kpc'], 5.0 / self.pf['kpc']),
+                          (15.0 / self.pf['kpc'], 10. / self.pf['kpc'])], 15)
 
-    yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
-        [(-7.5/pf['kpc'], 7.5/pf['kpc']),
-         (-7.5/pf['kpc'], 7.5/pf['kpc']),
-         (15/pf['kpc'], 15/pf['kpc'])], 15
+    def test_set_width_one(self):
+        assert_equal([self.slc.xlim, self.slc.ylim, self.slc.width],
+                     [(0.0, 1.0), (0.0, 1.0), (1.0, 1.0)])
 
-    slc.set_width((15,'kpc'))
+    def test_set_width_nonequal(self):
+        self.slc.set_width((0.5, 0.8))
+        assert_rel_equal([self.slc.xlim, self.slc.ylim, self.slc.width],
+                         [(0.25, 0.75), (0.1, 0.9), (0.5, 0.8)], 15)
 
-    yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
-        [(-7.5/pf['kpc'], 7.5/pf['kpc']),
-         (-7.5/pf['kpc'], 7.5/pf['kpc']),
-         (15/pf['kpc'], 15/pf['kpc'])], 15
+    def test_twoargs_eq(self):
+        self.slc.set_width(15, 'kpc')
+        self._assert_15kpc()
 
-    slc.set_width(((15,'kpc'),(10,'kpc')))
+    def test_tuple_eq(self):
+        self.slc.set_width((15, 'kpc'))
+        self._assert_15kpc()
 
-    yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
-        [(-7.5/pf['kpc'], 7.5/pf['kpc']),
-         (-5/pf['kpc'], 5/pf['kpc']),
-         (15/pf['kpc'], 10/pf['kpc'])], 15
+    def test_tuple_of_tuples_neq(self):
+        self.slc.set_width(((15, 'kpc'), (10, 'kpc')))
+        self._assert_15_10kpc()
 
-    slc.set_width(((15,'kpc'),(10000,'pc')))
+    def test_tuple_of_tuples_neq2(self):
+        self.slc.set_width(((15, 'kpc'), (10000, 'pc')))
+        self._assert_15_10kpc()
 
-    yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
-        [(-7.5/pf['kpc'], 7.5/pf['kpc']),
-         (-5/pf['kpc'], 5/pf['kpc']),
-         (15/pf['kpc'], 10/pf['kpc'])], 15
+    def test_pair_of_tuples_neq(self):
+        self.slc.set_width((15, 'kpc'), (10000, 'pc'))
+        self._assert_15_10kpc()
 
-    slc.set_width((15,'kpc'),(10000,'pc'))
 
-    yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
-        [(-7.5/pf['kpc'], 7.5/pf['kpc']),
-         (-5/pf['kpc'], 5/pf['kpc']),
-         (15/pf['kpc'], 10/pf['kpc'])], 15
+class TestPlotWindowSave(unittest.TestCase):
 
-def test_save():
-    """Test plot window creation and saving to disk."""
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
+    @classmethod
+    def setUpClass(cls):
+        test_pf = fake_random_pf(64)
+        normal = [1, 1, 1]
+        ds_region = test_pf.h.region([0.5] * 3, [0.4] * 3, [0.6] * 3)
+        projections = []
+        projections_ds = []
+        for dim in range(3):
+            projections.append(ProjectionPlot(test_pf, dim, 'Density'))
+            projections_ds.append(ProjectionPlot(test_pf, dim, 'Density',
+                                                 data_source=ds_region))
 
-    normal = [1, 1, 1]
+        cls.slices = [SlicePlot(test_pf, dim, 'Density') for dim in range(3)]
+        cls.projections = projections
+        cls.projections_ds = projections_ds
+        cls.offaxis_slice = OffAxisSlicePlot(test_pf, normal, 'Density')
+        cls.offaxis_proj = OffAxisProjectionPlot(test_pf, normal, 'Density')
 
-    test_pf = fake_random_pf(64)
-    test_flnms = [None, 'test.png', 'test.eps',
-                  'test.ps', 'test.pdf']
+    def setUp(self):
+        self.tmpdir = tempfile.mkdtemp()
+        self.curdir = os.getcwd()
+        os.chdir(self.tmpdir)
 
-    ds_region = test_pf.h.region([0.5]*3,[0.4]*3,[0.6]*3)
+    def tearDown(self):
+        os.chdir(self.curdir)
+        shutil.rmtree(self.tmpdir)
 
-    for dim in [0, 1, 2]:
-        obj = SlicePlot(test_pf, dim, 'Density')
-        for fname in test_flnms:
-            yield assert_equal, assert_fname(obj.save(fname)[0]), True
+    @parameterized.expand(
+        param.explicit(item)
+        for item in itertools.product(range(3), TEST_FLNMS))
+    def test_slice_plot(self, dim, fname):
+        assert assert_fname(self.slices[dim].save(fname)[0])
 
-    for dim in [0, 1, 2]:
-        obj = ProjectionPlot(test_pf, dim, 'Density')
-        for fname in test_flnms:
-            yield assert_equal, assert_fname(obj.save(fname)[0]), True
-        # Test ProjectionPlot's data_source keyword
-        obj = ProjectionPlot(test_pf, dim, 'Density',
-                             data_source=ds_region)
-        obj.save()
+    @parameterized.expand(
+        param.explicit(item)
+        for item in itertools.product(range(3), TEST_FLNMS))
+    def test_projection_plot(self, dim, fname):
+        assert assert_fname(self.projections[dim].save(fname)[0])
 
-    obj = OffAxisSlicePlot(test_pf, normal, 'Density')
-    for fname in test_flnms:
-        yield assert_equal, assert_fname(obj.save(fname)[0]), True
+    @parameterized.expand([(0, ), (1, ), (2, )])
+    def test_projection_plot_ds(self, dim):
+        self.projections_ds[dim].save()
 
-    obj = OffAxisProjectionPlot(test_pf, normal, 'Density')
-    for fname in test_flnms:
-        yield assert_equal, assert_fname(obj.save(fname)[0]), True
+    @parameterized.expand(
+        param.explicit((fname, ))
+        for fname in TEST_FLNMS)
+    def test_offaxis_slice_plot(self, fname):
+        assert assert_fname(self.offaxis_slice.save(fname)[0])
 
-    os.chdir(curdir)
-    # clean up
-    shutil.rmtree(tmpdir)
+    @parameterized.expand(
+        param.explicit((fname, ))
+        for fname in TEST_FLNMS)
+    def test_offaxis_projection_plot(self, fname):
+        assert assert_fname(self.offaxis_proj.save(fname)[0])

diff -r 7830ab86ae148fb024c46d4e2998e0e2f1af7bd7 -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -237,7 +237,7 @@
                    max_level=None):
         r"""Draws Grids on an existing volume rendering.
 
-        By mapping grid level to a color, drawes edges of grids on 
+        By mapping grid level to a color, draws edges of grids on 
         a volume rendering using the camera orientation.
 
         Parameters


https://bitbucket.org/yt_analysis/yt/commits/93dc5368d870/
Changeset:   93dc5368d870
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-28 20:47:04
Summary:     This file was missed when I did my last commit.
Affected #:  1 file

diff -r 34ebb7b6fabd1b3dcfaa22f378bbbcaf9296dbd1 -r 93dc5368d870288a369447061324a74201beccf3 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -55,6 +55,7 @@
     domain_id = 2
     _con_args = ("base_region", "sfc_start", "sfc_end", "pf")
     _type_name = 'octree_subset'
+    _num_zones = 2
 
     def __init__(self, base_region, sfc_start, sfc_end, pf):
         self.field_data = YTFieldData()


https://bitbucket.org/yt_analysis/yt/commits/3928be73ffdf/
Changeset:   3928be73ffdf
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-28 20:54:38
Summary:     Merging from smoothing bookmark for ARTIO optimizations.
Affected #:  18 files

diff -r 36d4de9d67827150246ecb1f5a52e583457988f7 -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -36,12 +36,12 @@
     NeedsProperty, \
     NeedsParameter
 import yt.geometry.particle_deposit as particle_deposit
+import yt.geometry.particle_smooth as particle_smooth
 from yt.funcs import *
 
 class OctreeSubset(YTSelectionContainer):
     _spatial = True
     _num_ghost_zones = 0
-    _num_zones = 2
     _type_name = 'octree_subset'
     _skip_add = True
     _con_args = ('base_region', 'domain', 'pf')
@@ -49,7 +49,8 @@
     _domain_offset = 0
     _num_octs = -1
 
-    def __init__(self, base_region, domain, pf):
+    def __init__(self, base_region, domain, pf, over_refine_factor = 1):
+        self._num_zones = 1 << (over_refine_factor)
         self.field_data = YTFieldData()
         self.field_parameters = {}
         self.domain = domain
@@ -145,6 +146,28 @@
         if vals is None: return
         return np.asfortranarray(vals)
 
+    def smooth(self, positions, fields = None, method = None):
+        # Here we perform our particle deposition.
+        cls = getattr(particle_smooth, "%s_smooth" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        nz = self.nz
+        nvals = (nz, nz, nz, (self.domain_ind >= 0).sum())
+        if fields is None: fields = []
+        op = cls(nvals, len(fields), 64)
+        op.initialize()
+        mylog.debug("Smoothing %s particles into %s Octs",
+            positions.shape[0], nvals[-1])
+        op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
+            self.domain_id, self._domain_offset)
+        vals = op.finalize()
+        if vals is None: return
+        if isinstance(vals, list):
+            vals = [np.asfortranarray(v) for v in vals]
+        else:
+            vals = np.asfortranarray(vals)
+        return vals
+
     def select_icoords(self, dobj):
         d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,
                                      num_octs = self._num_octs)
@@ -206,8 +229,10 @@
     _type_name = 'indexed_octree_subset'
     _con_args = ('data_files', 'pf', 'min_ind', 'max_ind')
     domain_id = -1
-    def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0):
+    def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0,
+                 over_refine_factor = 1):
         # The first attempt at this will not work in parallel.
+        self._num_zones = 1 << (over_refine_factor)
         self.data_files = data_files
         self.field_data = YTFieldData()
         self.field_parameters = {}

diff -r 36d4de9d67827150246ecb1f5a52e583457988f7 -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -55,6 +55,7 @@
     domain_id = 2
     _con_args = ("base_region", "sfc_start", "sfc_end", "pf")
     _type_name = 'octree_subset'
+    _num_zones = 2
 
     def __init__(self, base_region, sfc_start, sfc_end, pf):
         self.field_data = YTFieldData()

diff -r 36d4de9d67827150246ecb1f5a52e583457988f7 -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -215,6 +215,7 @@
                                 self.amr_header['nboundary']*l]
             return ng
         min_level = self.pf.min_level
+        max_level = min_level
         nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
         for level in range(self.amr_header['nlevelmax']):
             # Easier if do this 1-indexed
@@ -248,6 +249,8 @@
                     assert(pos.shape[0] == ng)
                     n = self.oct_handler.add(cpu + 1, level - min_level, pos)
                     assert(n == ng)
+                    if n > 0: max_level = max(level - min_level, max_level)
+        self.max_level = max_level
         self.oct_handler.finalize()
 
     def included(self, selector):
@@ -297,7 +300,7 @@
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-        self.max_level = pf.max_level
+        self.max_level = None
 
         self.float_type = np.float64
         super(RAMSESGeometryHandler, self).__init__(pf, data_style)
@@ -308,6 +311,7 @@
                         for i in range(self.parameter_file['ncpu'])]
         total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
                          for dom in self.domains)
+        self.max_level = max(dom.max_level for dom in self.domains)
         self.num_grids = total_octs
 
     def _detect_fields(self):

diff -r 36d4de9d67827150246ecb1f5a52e583457988f7 -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -96,6 +96,7 @@
 
 class ParticleStaticOutput(StaticOutput):
     _unit_base = None
+    over_refine_factor = 1
 
     def _set_units(self):
         self.units = {}
@@ -154,8 +155,10 @@
 
     def __init__(self, filename, data_style="gadget_binary",
                  additional_fields = (),
-                 unit_base = None, n_ref = 64):
+                 unit_base = None, n_ref = 64,
+                 over_refine_factor = 1):
         self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
         self.storage_filename = None
         if unit_base is not None and "UnitLength_in_cm" in unit_base:
             # We assume this is comoving, because in the absence of comoving
@@ -188,7 +191,8 @@
 
         self.domain_left_edge = np.zeros(3, "float64")
         self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
-        self.domain_dimensions = np.ones(3, "int32") * 2
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
         self.periodicity = (True, True, True)
 
         self.cosmological_simulation = 1
@@ -268,11 +272,13 @@
     _particle_coordinates_name = "Coordinates"
     _header_spec = None # Override so that there's no confusion
 
-    def __init__(self, filename, data_style="OWLS", n_ref = 64):
+    def __init__(self, filename, data_style="OWLS", n_ref = 64,
+                 over_refine_factor = 1):
         self.storage_filename = None
-        super(OWLSStaticOutput, self).__init__(filename, data_style,
-                                               unit_base = None,
-                                               n_ref = n_ref)
+        super(OWLSStaticOutput, self).__init__(
+                               filename, data_style,
+                               unit_base = None, n_ref = n_ref,
+                               over_refine_factor = over_refine_factor)
 
     def __repr__(self):
         return os.path.basename(self.parameter_filename).split(".")[0]
@@ -292,7 +298,8 @@
         self.current_time = hvals["Time_GYR"] * sec_conversion["Gyr"]
         self.domain_left_edge = np.zeros(3, "float64")
         self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
-        self.domain_dimensions = np.ones(3, "int32") * 2
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
         self.cosmological_simulation = 1
         self.periodicity = (True, True, True)
         self.current_redshift = hvals["Redshift"]
@@ -438,7 +445,8 @@
                 self.parameters[param] = val
 
         self.current_time = hvals["time"]
-        self.domain_dimensions = np.ones(3, "int32") * 2
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
         if self.parameters.get('bPeriodic', True):
             self.periodicity = (True, True, True)
         else:

diff -r 36d4de9d67827150246ecb1f5a52e583457988f7 -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -738,10 +738,11 @@
     file_count = 1
     filename_template = "stream_file"
     n_ref = 64
+    over_refine_factor = 1
 
 def load_particles(data, sim_unit_to_cm, bbox=None,
                       sim_time=0.0, periodicity=(True, True, True),
-                      n_ref = 64):
+                      n_ref = 64, over_refine_factor = 1):
     r"""Load a set of particles into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
 
@@ -828,6 +829,7 @@
 
     spf = StreamParticlesStaticOutput(handler)
     spf.n_ref = n_ref
+    spf.over_refine_factor = over_refine_factor
     spf.units["cm"] = sim_unit_to_cm
     spf.units['1'] = 1.0
     spf.units["unitary"] = 1.0

diff -r 36d4de9d67827150246ecb1f5a52e583457988f7 -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -40,6 +40,8 @@
 cdef struct OctInfo:
     np.float64_t left_edge[3]
     np.float64_t dds[3]
+    np.int64_t ipos[3]
+    np.int32_t level
 
 cdef struct OctAllocationContainer
 cdef struct OctAllocationContainer:
@@ -49,6 +51,16 @@
     OctAllocationContainer *next
     Oct *my_octs
 
+cdef struct OctList
+
+cdef struct OctList:
+    OctList *next
+    Oct *o
+
+cdef OctList *OctList_append(OctList *list, Oct *o)
+cdef int OctList_count(OctList *list)
+cdef void OctList_delete(OctList *list)
+
 cdef class OctreeContainer:
     cdef OctAllocationContainer *cont
     cdef OctAllocationContainer **domains
@@ -56,12 +68,13 @@
     cdef oct_visitor_function *fill_func
     cdef int partial_coverage
     cdef int nn[3]
+    cdef np.uint8_t oref
     cdef np.float64_t DLE[3], DRE[3]
     cdef public np.int64_t nocts
     cdef public int max_domain
     cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
     cdef int get_root(self, int ind[3], Oct **o)
-    cdef void neighbors(self, Oct *, Oct **)
+    cdef Oct **neighbors(self, OctInfo *oinfo, np.int64_t *nneighbors)
     cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
     # This function must return the offset from global-to-local domains; i.e.,
     # OctAllocationContainer.offset if such a thing exists.
@@ -71,6 +84,7 @@
                         OctVisitorData *data)
     cdef Oct *next_root(self, int domain_id, int ind[3])
     cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
+    cdef void setup_data(self, OctVisitorData *data, int domain_id = ?)
 
 cdef class SparseOctreeContainer(OctreeContainer):
     cdef OctKey *root_nodes

diff -r 36d4de9d67827150246ecb1f5a52e583457988f7 -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -96,8 +96,10 @@
 cdef class OctreeContainer:
 
     def __init__(self, oct_domain_dimensions, domain_left_edge,
-                 domain_right_edge, partial_coverage = 0):
+                 domain_right_edge, partial_coverage = 0,
+                 over_refine = 1):
         # This will just initialize the root mesh octs
+        self.oref = over_refine
         self.partial_coverage = partial_coverage
         cdef int i, j, k, p
         for i in range(3):
@@ -120,6 +122,21 @@
                 for k in range(self.nn[2]):
                     self.root_mesh[i][j][k] = NULL
 
+    cdef void setup_data(self, OctVisitorData *data, int domain_id = -1):
+        cdef int i
+        data.index = 0
+        data.last = -1
+        data.global_index = -1
+        for i in range(3):
+            data.pos[i] = -1
+            data.ind[i] = -1
+        data.array = NULL
+        data.dims = 0
+        data.domain = domain_id
+        data.level = -1
+        data.oref = self.oref
+        data.nz = (1 << (data.oref*3))
+
     def __dealloc__(self):
         free_octs(self.cont)
         if self.root_mesh == NULL: return
@@ -185,27 +202,39 @@
         return 0
 
     cdef int get_root(self, int ind[3], Oct **o):
+        cdef int i
+        for i in range(3):
+            if ind[i] < 0 or ind[i] >= self.nn[i]:
+                o[0] = NULL
+                return 1
         o[0] = self.root_mesh[ind[0]][ind[1]][ind[2]]
-        return 1
+        return 0
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL):
+    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL,
+                  ):
         #Given a floating point position, retrieve the most
         #refined oct at that time
-        cdef int ind[3]
+        cdef int ind[3], level
+        cdef np.int64_t ipos[3]
         cdef np.float64_t dds[3], cp[3], pp[3]
         cdef Oct *cur, *next
+        cdef int i
         cur = next = NULL
-        cdef int i
+        level = -1
         for i in range(3):
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
             ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
             cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
+            ipos[i] = 0
         self.get_root(ind, &next)
         # We want to stop recursing when there's nowhere else to go
         while next != NULL:
+            level += 1
+            for i in range(3):
+                ipos[i] = (ipos[i] << 1) + ind[i]
             cur = next
             for i in range(3):
                 dds[i] = dds[i] / 2.0
@@ -227,18 +256,22 @@
                 cp[i] -= dds[i]/2.0 # Now centered
             else:
                 cp[i] += dds[i]/2.0
-            # We don't need to change dds[i] as it has been halved from the
-            # oct width, thus making it already the cell width
-            oinfo.dds[i] = dds[i] # Cell width
+            # We don't normally need to change dds[i] as it has been halved
+            # from the oct width, thus making it already the cell width.
+            # But, for some cases where the oref != 1, this needs to be
+            # changed.
+            oinfo.dds[i] = dds[i] / (1 << (self.oref-1)) # Cell width
             oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
+            oinfo.ipos[i] = ipos[i]
+        oinfo.level = level
         return cur
 
     def domain_identify(self, SelectorObject selector):
         cdef np.ndarray[np.uint8_t, ndim=1] domain_mask
         domain_mask = np.zeros(self.max_domain, dtype="uint8")
         cdef OctVisitorData data
+        self.setup_data(&data)
         data.array = domain_mask.data
-        data.domain = -1
         self.visit_all_octs(selector, oct_visitors.identify_octs, &data)
         cdef int i
         domain_ids = []
@@ -250,99 +283,69 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef void neighbors(self, Oct* o, Oct* neighbors[27]):
-        #Get 3x3x3 neighbors, although the 1,1,1 oct is the
-        #central one. 
-        #Return an array of Octs
-        cdef np.int64_t curopos[3]
-        cdef np.int64_t curnpos[3]
-        cdef np.int64_t npos[3]
-        cdef int i, j, k, ni, nj, nk, ind[3], nn, dl, skip
-        cdef np.float64_t dds[3], cp[3], pp[3]
+    cdef Oct** neighbors(self, OctInfo *oi, np.int64_t *nneighbors):
         cdef Oct* candidate
-        for i in range(27): neighbors[i] = NULL
         nn = 0
-        raise RuntimeError
-        #for ni in range(3):
-        #    for nj in range(3):
-        #        for nk in range(3):
-        #            if ni == nj == nk == 1:
-        #                neighbors[nn] = o
-        #                nn += 1
-        #                continue
-        #            npos[0] = o.pos[0] + (ni - 1)
-        #            npos[1] = o.pos[1] + (nj - 1)
-        #            npos[2] = o.pos[2] + (nk - 1)
-        #            for i in range(3):
-        #                # Periodicity
-        #                if npos[i] == -1:
-        #                    npos[i] = (self.nn[i]  << o.level) - 1
-        #                elif npos[i] == (self.nn[i] << o.level):
-        #                    npos[i] = 0
-        #                curopos[i] = o.pos[i]
-        #                curnpos[i] = npos[i] 
-        #            # Now we have our neighbor position and a safe place to
-        #            # keep it.  curnpos will be the root index of the neighbor
-        #            # at a given level, and npos will be constant.  curopos is
-        #            # the candidate root at a level.
-        #            candidate = o
-        #            while candidate != NULL:
-        #                if ((curopos[0] == curnpos[0]) and 
-        #                    (curopos[1] == curnpos[1]) and
-        #                    (curopos[2] == curnpos[2])):
-        #                    break
-        #                # This one doesn't meet it, so we pop up a level.
-        #                # First we update our positions, then we update our
-        #                # candidate.
-        #                for i in range(3):
-        #                    # We strip a digit off the right
-        #                    curopos[i] = (curopos[i] >> 1)
-        #                    curnpos[i] = (curnpos[i] >> 1)
-        #                # Now we update to the candidate's parent, which should
-        #                # have a matching position to curopos[]
-        #                # TODO: This has not survived the transition to
-        #                # mostly-stateless Octs!
-        #                raise RuntimeError
-        #                candidate = candidate.parent
-        #            if candidate == NULL:
-        #                # Worst case scenario
-        #                for i in range(3):
-        #                    ind[i] = (npos[i] >> (o.level))
-        #                candidate = self.root_mesh[ind[0]][ind[1]][ind[2]]
-        #            # Now we have the common root, which may be NULL
-        #            while candidate.level < o.level:
-        #                dl = o.level - (candidate.level + 1)
-        #                for i in range(3):
-        #                    ind[i] = (npos[i] >> dl) & 1
-        #                if candidate.children[cind(ind[0],ind[1],ind[2])] \
-        #                        == NULL:
-        #                    break
-        #                candidate = candidate.children[cind(ind[0],ind[1],ind[2])]
-        #            neighbors[nn] = candidate
-        #            nn += 1
+        # We are going to do a brute-force search here.
+        # This is not the most efficient -- in fact, it's relatively bad.  But
+        # we will attempt to improve it in a future iteration, where we will
+        # grow a stack of parent Octs.
+        # Note that in the first iteration, we will just find the up-to-27
+        # neighbors, including the main oct.
+        cdef int i, j, k, n, level, ind[3], ii, nfound = 0
+        cdef OctList *olist, *my_list
+        my_list = olist = NULL
+        cdef Oct *cand
+        cdef np.int64_t npos[3], ndim[3]
+        # Now we get our boundaries for this level, so that we can wrap around
+        # if need be.
+        # ndim is the oct dimensions of the level, not the cell dimensions.
+        for i in range(3):
+            ndim[i] = <np.int64_t> ((self.DRE[i] - self.DLE[i]) / oi.dds[i])
+            ndim[i] = (ndim[i] >> self.oref)
+        for i in range(3):
+            npos[0] = (oi.ipos[0] + (1 - i))
+            if npos[0] < 0: npos[0] += ndim[0]
+            if npos[0] >= ndim[0]: npos[0] -= ndim[0]
+            for j in range(3):
+                npos[1] = (oi.ipos[1] + (1 - j))
+                if npos[1] < 0: npos[1] += ndim[1]
+                if npos[1] >= ndim[1]: npos[1] -= ndim[1]
+                for k in range(3):
+                    npos[2] = (oi.ipos[2] + (1 - k))
+                    if npos[2] < 0: npos[2] += ndim[2]
+                    if npos[2] >= ndim[2]: npos[2] -= ndim[2]
+                    # Now we have our npos, which we just need to find.
+                    # Level 0 gets bootstrapped
+                    for n in range(3):
+                        ind[n] = ((npos[n] >> (oi.level)) & 1)
+                    cand = NULL
+                    self.get_root(ind, &cand)
+                    # We should not get a NULL if we handle periodicity
+                    # correctly, but we might.
+                    if cand == NULL: continue
+                    for level in range(1, oi.level+1):
+                        if cand.children == NULL: break
+                        for n in range(3):
+                            ind[n] = (npos[n] >> (oi.level - (level))) & 1
+                        ii = cind(ind[0],ind[1],ind[2])
+                        if cand.children[ii] == NULL: break
+                        cand = cand.children[ii]
+                    if cand != NULL:
+                        nfound += 1
+                        olist = OctList_append(olist, cand)
+                        if my_list == NULL: my_list = olist
 
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def get_neighbor_boundaries(self, oppos):
-        cdef int i, ii
-        cdef np.float64_t ppos[3]
-        for i in range(3):
-            ppos[i] = oppos[i]
-        cdef Oct *main = self.get(ppos)
-        cdef Oct* neighbors[27]
-        self.neighbors(main, neighbors)
-        cdef np.ndarray[np.float64_t, ndim=2] bounds
-        cdef np.float64_t corner[3], size[3]
-        bounds = np.zeros((27,6), dtype="float64")
-        tnp = 0
-        raise RuntimeError
-        for i in range(27):
-            self.oct_bounds(neighbors[i], corner, size)
-            for ii in range(3):
-                bounds[i, ii] = corner[ii]
-                bounds[i, 3+ii] = size[ii]
-        return bounds
+        olist = my_list
+        cdef int noct = OctList_count(olist)
+        cdef Oct **neighbors
+        neighbors = <Oct **> malloc(sizeof(Oct*)*noct)
+        for i in range(noct):
+            neighbors[i] = olist.o
+            olist = olist.next
+        OctList_delete(my_list)
+        nneighbors[0] = noct
+        return neighbors
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -352,11 +355,10 @@
         if num_octs == -1:
             num_octs = selector.count_octs(self, domain_id)
         cdef np.ndarray[np.uint8_t, ndim=1] coords
-        coords = np.zeros((num_octs * 8), dtype="uint8")
         cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
+        coords = np.zeros((num_octs * data.nz), dtype="uint8")
         data.array = <void *> coords.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.mask_octs, &data)
         return coords.astype("bool")
 
@@ -367,12 +369,11 @@
                 int domain_id = -1):
         if num_octs == -1:
             num_octs = selector.count_octs(self, domain_id)
+        cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         cdef np.ndarray[np.int64_t, ndim=2] coords
-        coords = np.empty((num_octs * 8, 3), dtype="int64")
-        cdef OctVisitorData data
+        coords = np.empty((num_octs * data.nz, 3), dtype="int64")
         data.array = <void *> coords.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.icoords_octs, &data)
         return coords
 
@@ -383,13 +384,12 @@
                 int domain_id = -1):
         if num_octs == -1:
             num_octs = selector.count_octs(self, domain_id)
+        cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         #Return the 'resolution' of each cell; ie the level
         cdef np.ndarray[np.int64_t, ndim=1] res
-        res = np.empty(num_octs * 8, dtype="int64")
-        cdef OctVisitorData data
+        res = np.empty(num_octs * data.nz, dtype="int64")
         data.array = <void *> res.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.ires_octs, &data)
         return res
 
@@ -400,12 +400,11 @@
                 int domain_id = -1):
         if num_octs == -1:
             num_octs = selector.count_octs(self, domain_id)
+        cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         cdef np.ndarray[np.float64_t, ndim=2] fwidth
-        fwidth = np.empty((num_octs * 8, 3), dtype="float64")
-        cdef OctVisitorData data
+        fwidth = np.empty((num_octs * data.nz, 3), dtype="float64")
         data.array = <void *> fwidth.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.fwidth_octs, &data)
         cdef np.float64_t base_dx
         for i in range(3):
@@ -420,13 +419,12 @@
                 int domain_id = -1):
         if num_octs == -1:
             num_octs = selector.count_octs(self, domain_id)
+        cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         #Return the floating point unitary position of every cell
         cdef np.ndarray[np.float64_t, ndim=2] coords
-        coords = np.empty((num_octs * 8, 3), dtype="float64")
-        cdef OctVisitorData data
+        coords = np.empty((num_octs * data.nz, 3), dtype="float64")
         data.array = <void *> coords.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.fcoords_octs, &data)
         cdef int i
         cdef np.float64_t base_dx
@@ -456,8 +454,8 @@
             else:
                 dest = np.zeros(num_cells, dtype=source.dtype, order='C')
         cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         data.index = offset
-        data.domain = domain_id
         # We only need this so we can continue calculating the offset
         data.dims = dims
         cdef void *p[2]
@@ -474,14 +472,16 @@
         else:
             raise NotImplementedError
         self.visit_all_octs(selector, func, &data)
-        if (data.global_index + 1) * 8 * data.dims > source.size:
+        if (data.global_index + 1) * data.nz * data.dims > source.size:
             print "GLOBAL INDEX RAN AHEAD.",
-            print (data.global_index + 1) * 8 * data.dims - source.size
+            print (data.global_index + 1) * data.nz * data.dims - source.size
             print dest.size, source.size, num_cells
             raise RuntimeError
         if data.index > dest.size:
             print "DEST INDEX RAN AHEAD.",
             print data.index - dest.size
+            print (data.global_index + 1) * data.nz * data.dims, source.size
+            print num_cells
             raise RuntimeError
         if num_cells >= 0:
             return dest
@@ -492,10 +492,8 @@
         # Here's where we grab the masked items.
         ind = np.zeros(self.nocts, 'int64') - 1
         cdef OctVisitorData data
-        data.domain = domain_id
+        self.setup_data(&data, domain_id)
         data.array = ind.data
-        data.index = 0
-        data.last = -1
         self.visit_all_octs(selector, oct_visitors.index_octs, &data)
         return ind
 
@@ -578,6 +576,7 @@
         if parent.children != NULL:
             next = parent.children[cind(ind[0],ind[1],ind[2])]
         else:
+            # This *8 does NOT need to be made generic.
             parent.children = <Oct **> malloc(sizeof(Oct *) * 8)
             for i in range(8):
                 parent.children[i] = NULL
@@ -607,13 +606,12 @@
             file_inds[i] = -1
             cell_inds[i] = 9
         cdef OctVisitorData data
-        data.index = 0
+        self.setup_data(&data, domain_id)
         cdef void *p[3]
         p[0] = levels.data
         p[1] = file_inds.data
         p[2] = cell_inds.data
         data.array = p
-        data.domain = domain_id
         self.visit_all_octs(selector, self.fill_func, &data)
         return levels, cell_inds, file_inds
 
@@ -641,10 +639,9 @@
     def finalize(self):
         cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
         cdef OctVisitorData data
-        data.index = 0
-        data.domain = 1
+        self.setup_data(&data, 1)
         self.visit_all_octs(selector, oct_visitors.assign_domain_ind, &data)
-        assert ((data.global_index+1)*8 == data.index)
+        assert ((data.global_index+1)*data.nz == data.index)
 
 cdef int root_node_compare(void *a, void *b) nogil:
     cdef OctKey *ao, *bo
@@ -659,9 +656,11 @@
 
 cdef class SparseOctreeContainer(OctreeContainer):
 
-    def __init__(self, domain_dimensions, domain_left_edge, domain_right_edge):
+    def __init__(self, domain_dimensions, domain_left_edge, domain_right_edge,
+                 over_refine = 1):
         cdef int i, j, k, p
         self.partial_coverage = 1
+        self.oref = over_refine
         for i in range(3):
             self.nn[i] = domain_dimensions[i]
         self.max_domain = -1
@@ -807,3 +806,33 @@
                             dest[local_filled + offset] = source[ox,oy,oz]
                             local_filled += 1
         return local_filled
+
+cdef OctList *OctList_append(OctList *olist, Oct *o):
+    cdef OctList *this = olist
+    if this == NULL:
+        this = <OctList *> malloc(sizeof(OctList))
+        this.next = NULL
+        this.o = o
+        return this
+    while this.next != NULL:
+        this = this.next
+    this.next = <OctList*> malloc(sizeof(OctList))
+    this = this.next
+    this.o = o
+    this.next = NULL
+    return this
+
+cdef int OctList_count(OctList *olist):
+    cdef OctList *this = olist
+    cdef int i = 0 # Count the list
+    while this != NULL:
+        i += 1
+        this = this.next
+    return i
+
+cdef void OctList_delete(OctList *olist):
+    cdef OctList *next, *this = olist
+    while this != NULL:
+        next = this.next
+        free(this)
+        this = next

diff -r 36d4de9d67827150246ecb1f5a52e583457988f7 -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -3,7 +3,7 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: Columbia University
-Homepage: http://yt.enzotools.org/
+Homepage: http://yt-project.org/
 License:
   Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
 
@@ -43,6 +43,10 @@
     int dims
     np.int32_t domain
     np.int8_t level
+    np.int8_t oref # This is the level of overref.  1 => 8 zones, 2 => 64, etc.
+                   # To calculate nzones, 1 << (oref * 3)
+    np.int32_t nz
+                            
 
 ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
                                    np.uint8_t selected)
@@ -64,10 +68,13 @@
 cdef oct_visitor_function fill_file_indices_rind
 
 cdef inline int cind(int i, int j, int k):
+    # THIS ONLY WORKS FOR CHILDREN.  It is not general for zones.
     return (((i*2)+j)*2+k)
 
 cdef inline int oind(OctVisitorData *data):
-    return (((data.ind[0]*2)+data.ind[1])*2+data.ind[2])
+    cdef int d = (1 << data.oref)
+    return (((data.ind[0]*d)+data.ind[1])*d+data.ind[2])
 
 cdef inline int rind(OctVisitorData *data):
-    return (((data.ind[2]*2)+data.ind[1])*2+data.ind[0])
+    cdef int d = (1 << data.oref)
+    return (((data.ind[2]*d)+data.ind[1])*d+data.ind[0])

diff -r 36d4de9d67827150246ecb1f5a52e583457988f7 -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -38,7 +38,7 @@
     if selected == 0: return
     cdef int i
     # There are this many records between "octs"
-    cdef np.int64_t index = (data.global_index * 8)*data.dims
+    cdef np.int64_t index = (data.global_index * data.nz)*data.dims
     cdef np.float64_t **p = <np.float64_t**> data.array
     index += oind(data)*data.dims
     for i in range(data.dims):
@@ -50,7 +50,7 @@
     # "last" here tells us the dimensionality of the array.
     if selected == 0: return
     cdef int i
-    cdef np.int64_t index = (data.global_index * 8)*data.dims
+    cdef np.int64_t index = (data.global_index * data.nz)*data.dims
     cdef np.int64_t **p = <np.int64_t**> data.array
     index += oind(data)*data.dims
     for i in range(data.dims):
@@ -75,7 +75,7 @@
     if data.last != o.domain_ind:
         data.last = o.domain_ind
         data.index += 1
-    cdef np.int64_t index = data.index * 8
+    cdef np.int64_t index = data.index * data.nz
     index += oind(data)
     arr[index] = 1
 
@@ -83,7 +83,7 @@
     if selected == 0: return
     cdef int i
     cdef np.uint8_t *arr = <np.uint8_t *> data.array
-    cdef np.int64_t index = data.global_index * 8
+    cdef np.int64_t index = data.global_index * data.nz
     index += oind(data)
     arr[index] = 1
 
@@ -102,7 +102,7 @@
     cdef np.int64_t *coords = <np.int64_t*> data.array
     cdef int i
     for i in range(3):
-        coords[data.index * 3 + i] = (data.pos[i] << 1) + data.ind[i]
+        coords[data.index * 3 + i] = (data.pos[i] << data.oref) + data.ind[i]
     data.index += 1
 
 cdef void ires_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
@@ -120,9 +120,9 @@
     cdef np.float64_t *fcoords = <np.float64_t*> data.array
     cdef int i
     cdef np.float64_t c, dx 
-    dx = 1.0 / (2 << data.level)
+    dx = 1.0 / ((1 << data.oref) << data.level)
     for i in range(3):
-        c = <np.float64_t> ((data.pos[i] << 1 ) + data.ind[i]) 
+        c = <np.float64_t> ((data.pos[i] << data.oref ) + data.ind[i]) 
         fcoords[data.index * 3 + i] = (c + 0.5) * dx
     data.index += 1
 
@@ -135,7 +135,7 @@
     cdef np.float64_t *fwidth = <np.float64_t*> data.array
     cdef int i
     cdef np.float64_t dx 
-    dx = 1.0 / (2 << data.level)
+    dx = 1.0 / ((1 << data.oref) << data.level)
     for i in range(3):
         fwidth[data.index * 3 + i] = dx
     data.index += 1

diff -r 36d4de9d67827150246ecb1f5a52e583457988f7 -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -5,7 +5,7 @@
 Affiliation: UC Santa Cruz
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: Columbia University
-Homepage: http://yt.enzotools.org/
+Homepage: http://yt-project.org/
 License:
   Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
 
@@ -32,7 +32,7 @@
 from libc.math cimport sqrt
 
 from fp_utils cimport *
-from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+from .oct_container cimport Oct, OctAllocationContainer, OctreeContainer
 
 cdef extern from "alloca.h":
     void *alloca(int)
@@ -62,7 +62,6 @@
 cdef class ParticleDepositOperation:
     # We assume each will allocate and define their own temporary storage
     cdef public object nvals
-    cdef public int bad_indices
     cdef public int update_values
     cdef void process(self, int dim[3], np.float64_t left_edge[3],
                       np.float64_t dds[3], np.int64_t offset,

diff -r 36d4de9d67827150246ecb1f5a52e583457988f7 -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -54,7 +54,6 @@
                      fields = None, int domain_id = -1,
                      int domain_offset = 0):
         cdef int nf, i, j
-        self.bad_indices = 0
         if fields is None:
             fields = []
         nf = len(fields)
@@ -66,7 +65,8 @@
             tarr = fields[i]
             field_pointers[i] = <np.float64_t *> tarr.data
         cdef int dims[3]
-        dims[0] = dims[1] = dims[2] = 2
+        dims[0] = dims[1] = dims[2] = (1 << octree.oref)
+        cdef int nz = dims[0] * dims[1] * dims[2]
         cdef OctInfo oi
         cdef np.int64_t offset, moff
         cdef Oct *oct
@@ -98,7 +98,7 @@
             if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
                 continue
             # Note that this has to be our local index, not our in-file index.
-            offset = dom_ind[oct.domain_ind - moff] * 8
+            offset = dom_ind[oct.domain_ind - moff] * nz
             if offset < 0: continue
             # Check that we found the oct ...
             self.process(dims, oi.left_edge, oi.dds,

diff -r 36d4de9d67827150246ecb1f5a52e583457988f7 -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -86,7 +86,8 @@
                 sum(d.total_particles.values()) for d in self.data_files)
         pf = self.parameter_file
         self.oct_handler = ParticleOctreeContainer(
-            [1, 1, 1], pf.domain_left_edge, pf.domain_right_edge)
+            [1, 1, 1], pf.domain_left_edge, pf.domain_right_edge,
+            over_refine = pf.over_refine_factor)
         self.oct_handler.n_ref = pf.n_ref
         mylog.info("Allocating for %0.3e particles", self.total_particles)
         # No more than 256^3 in the region finder.
@@ -147,8 +148,9 @@
                 data_files = [self.data_files[i] for i in
                               self.regions.identify_data_files(dobj.selector)]
             base_region = getattr(dobj, "base_region", dobj)
+            oref = self.parameter_file.over_refine_factor
             subset = [ParticleOctreeSubset(base_region, data_files, 
-                        self.parameter_file)]
+                        self.parameter_file, over_refine_factor = oref)]
             dobj._chunk_info = subset
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
 

diff -r 36d4de9d67827150246ecb1f5a52e583457988f7 -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -205,6 +205,7 @@
         cdef int i, j, k, m, n, ind[3]
         cdef Oct *noct
         cdef np.uint64_t prefix1, prefix2
+        # TODO: This does not need to be changed.
         o.children = <Oct **> malloc(sizeof(Oct *)*8)
         for i in range(2):
             for j in range(2):

diff -r 36d4de9d67827150246ecb1f5a52e583457988f7 -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d yt/geometry/particle_smooth.pxd
--- /dev/null
+++ b/yt/geometry/particle_smooth.pxd
@@ -0,0 +1,92 @@
+"""
+Particle Deposition onto Octs
+
+Author: Christopher Moody <chris.e.moody at gmail.com>
+Affiliation: UC Santa Cruz
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+import numpy as np
+from libc.stdlib cimport malloc, free, qsort
+cimport cython
+from libc.math cimport sqrt
+
+from fp_utils cimport *
+from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+from .particle_deposit cimport sph_kernel, gind
+
+cdef extern from "alloca.h":
+    void *alloca(int)
+
+cdef struct NeighborList
+cdef struct NeighborList:
+    np.int64_t pn       # Particle number
+    np.float64_t r2     # radius**2
+
+cdef inline np.float64_t r2dist(np.float64_t ppos[3],
+                                np.float64_t cpos[3],
+                                np.float64_t DW[3]):
+    cdef int i
+    cdef np.float64_t r2, DR
+    r2 = 0.0
+    for i in range(3):
+        DR = (ppos[i] - cpos[i])
+        if (DR > DW[i]/2.0):
+            DR -= DW[i]/2.0
+        elif (DR < -DW[i]/2.0):
+            DR += DW[i]/2.0
+        r2 += DR * DR
+    return r2
+
+cdef class ParticleSmoothOperation:
+    # We assume each will allocate and define their own temporary storage
+    cdef public object nvals
+    cdef np.float64_t DW[3]
+    cdef int nfields
+    cdef int maxn
+    cdef int curn
+    cdef np.int64_t *doffs
+    cdef np.int64_t *pinds
+    cdef np.int64_t *pcounts
+    cdef np.float64_t *ppos
+    # Note that we are preallocating here, so this is *not* threadsafe.
+    cdef NeighborList *neighbors
+    cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
+                               np.float64_t dds[3], np.float64_t *ppos,
+                               np.float64_t **fields, np.int64_t nneighbors,
+                               np.int64_t *nind, np.int64_t *doffs,
+                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t offset)
+    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
+                            np.float64_t cpos[3])
+    cdef void neighbor_reset(self)
+    cdef void neighbor_find(self,
+                            np.int64_t nneighbors,
+                            np.int64_t *nind,
+                            np.int64_t *doffs,
+                            np.int64_t *pcounts,
+                            np.int64_t *pinds,
+                            np.float64_t *ppos,
+                            np.float64_t cpos[3])
+    cdef void process(self, np.int64_t offset, int i, int j, int k,
+                      int dim[3], np.float64_t cpos[3], np.float64_t **fields)

diff -r 36d4de9d67827150246ecb1f5a52e583457988f7 -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d yt/geometry/particle_smooth.pyx
--- /dev/null
+++ b/yt/geometry/particle_smooth.pyx
@@ -0,0 +1,359 @@
+"""
+Particle smoothing in cells
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+import numpy as np
+from libc.stdlib cimport malloc, free, realloc
+cimport cython
+from libc.math cimport sqrt
+
+from fp_utils cimport *
+from oct_container cimport Oct, OctAllocationContainer, \
+    OctreeContainer, OctInfo
+
+cdef int Neighbor_compare(void *on1, void *on2) nogil:
+    cdef NeighborList *n1, *n2
+    n1 = <NeighborList *> on1
+    n2 = <NeighborList *> on2
+    # Note that we set this up so that "greatest" evaluates to the *end* of the
+    # list, so we can do standard radius comparisons.
+    if n1.r2 < n2.r2:
+        return -1
+    elif n1.r2 == n2.r2:
+        return 0
+    else:
+        return 1
+
+cdef class ParticleSmoothOperation:
+    def __init__(self, nvals, nfields, max_neighbors):
+        # This is the set of cells, in grids, blocks or octs, we are handling.
+        cdef int i
+        self.nvals = nvals 
+        self.nfields = nfields
+        self.maxn = max_neighbors
+        self.neighbors = <NeighborList *> malloc(
+            sizeof(NeighborList) * self.maxn)
+        self.neighbor_reset()
+
+    def initialize(self, *args):
+        raise NotImplementedError
+
+    def finalize(self, *args):
+        raise NotImplementedError
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def process_octree(self, OctreeContainer octree,
+                     np.ndarray[np.int64_t, ndim=1] dom_ind,
+                     np.ndarray[np.float64_t, ndim=2] positions,
+                     fields = None, int domain_id = -1,
+                     int domain_offset = 0,
+                     int test_neighbors = 0):
+        # This will be a several-step operation.
+        #
+        # We first take all of our particles and assign them to Octs.  If they
+        # are not in an Oct, we will assume they are out of bounds.  Note that
+        # this means that if we have loaded neighbor particles for which an Oct
+        # does not exist, we are going to be discarding them -- so sparse
+        # octrees will need to ensure that neighbor octs *exist*.  Particles
+        # will be assigned in a new NumPy array.  Note that this incurs
+        # overhead, but reduces complexity as we will now be able to use
+        # argsort.
+        #
+        # After the particles have been assigned to Octs, we process each Oct
+        # individually.  We will do this by calling "get" for the *first*
+        # particle in each set of Octs in the sorted list.  After this, we get
+        # neighbors for each Oct.
+        #
+        # Now, with the set of neighbors (and thus their indices) we allocate
+        # an array of particles and their fields, fill these in, and call our
+        # process function.
+        #
+        # This is not terribly efficient -- for starters, the neighbor function
+        # is not the most efficient yet.  We will also need to handle some
+        # mechanism of an expandable array for holding pointers to Octs, so
+        # that we can deal with >27 neighbors.  As I write this comment,
+        # neighbors() only returns 27 neighbors.
+        cdef int nf, i, j, dims[3], n
+        cdef np.float64_t **field_pointers, *field_vals, pos[3], *ppos, dds[3]
+        cdef int nsize = 0
+        cdef np.int64_t *nind = NULL
+        cdef OctInfo oi
+        cdef Oct *oct, **neighbors = NULL
+        cdef np.int64_t nneighbors, numpart, offset, moff, local_ind
+        cdef np.int64_t *doffs, *pinds, *pcounts, poff
+        cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
+        cdef np.ndarray[np.float64_t, ndim=1] tarr
+        dims[0] = dims[1] = dims[2] = (1 << octree.oref)
+        cdef int nz = dims[0] * dims[1] * dims[2]
+        numpart = positions.shape[0]
+        # pcount is the number of particles per oct.
+        pcount = np.zeros_like(dom_ind)
+        # doff is the offset to a given oct in the sorted particles.
+        doff = np.zeros_like(dom_ind) - 1
+        moff = octree.get_domain_offset(domain_id + domain_offset)
+        # pdoms points particles at their octs.  So the value in this array, for
+        # a given index, is the local oct index.
+        pdoms = np.zeros(positions.shape[0], dtype="int64") - 1
+        nf = len(fields)
+        if fields is None:
+            fields = []
+        field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
+        for i in range(nf):
+            tarr = fields[i]
+            field_pointers[i] = <np.float64_t *> tarr.data
+        for i in range(3):
+            self.DW[i] = (octree.DRE[i] - octree.DLE[i])
+        for i in range(positions.shape[0]):
+            for j in range(3):
+                pos[j] = positions[i, j]
+            oct = octree.get(pos)
+            if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
+                continue
+            # Note that this has to be our local index, not our in-file index.
+            # This is the particle count, which we'll use once we have sorted
+            # the particles to calculate the offsets into each oct's particles.
+            offset = oct.domain_ind - moff
+            pcount[offset] += 1
+            pdoms[i] = offset # We store the *actual* offset.
+        # Now we have oct assignments.  Let's sort them.
+        # Note that what we will be providing to our processing functions will
+        # actually be indirectly-sorted fields.  This preserves memory at the
+        # expense of additional pointer lookups.
+        pind = np.argsort(pdoms)
+        pind = np.asarray(pind, dtype='int64', order='C')
+        # So what this means is that we now have all the oct-0 particle indices
+        # in order, then the oct-1, etc etc.
+        # This now gives us the indices to the particles for each domain.
+        for i in range(positions.shape[0]):
+            # This value, poff, is the index of the particle in the *unsorted*
+            # arrays.
+            poff = pind[i] 
+            offset = pdoms[poff] 
+            # If we have yet to assign the starting index to this oct, we do so
+            # now.
+            if doff[offset] < 0: doff[offset] = i
+        # Now doff is full of offsets to the first entry in the pind that
+        # refers to that oct's particles.
+        ppos = <np.float64_t *> positions.data
+        doffs = <np.int64_t*> doff.data
+        pinds = <np.int64_t*> pind.data
+        pcounts = <np.int64_t*> pcount.data
+        nsize = 27
+        nind = <np.int64_t *> malloc(sizeof(np.int64_t)*nsize)
+        for i in range(doff.shape[0]):
+            # Nothing assigned.
+            if doff[i] < 0: continue
+            # The first particle assigned to this oct should be the one we
+            # want.
+            poff = pind[doff[i]]
+            for j in range(3):
+                pos[j] = positions[poff, j]
+            oct = octree.get(pos, &oi)
+            if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
+                continue
+            offset = dom_ind[oct.domain_ind - moff] * nz
+            neighbors = octree.neighbors(&oi, &nneighbors)
+            # Now we have all our neighbors.  And, we should be set for what
+            # else we need to do.
+            if nneighbors > nsize:
+                nind = <np.int64_t *> realloc(
+                    nind, sizeof(np.int64_t)*nneighbors)
+                nsize = nneighbors
+            for j in range(nneighbors):
+                nind[j] = neighbors[j].domain_ind - moff
+                for n in range(j):
+                    if nind[j] == nind[n]:
+                        nind[j] = -1
+                    break
+            # This is allocated by the neighbors function, so we deallocate it.
+            free(neighbors)
+            self.neighbor_process(dims, oi.left_edge, oi.dds,
+                         ppos, field_pointers, nneighbors, nind, doffs,
+                         pinds, pcounts, offset)
+        if nind != NULL:
+            free(nind)
+        
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def process_grid(self, gobj,
+                     np.ndarray[np.float64_t, ndim=2] positions,
+                     fields = None):
+        raise NotImplementedError
+
+    cdef void process(self, np.int64_t offset, int i, int j, int k,
+                      int dim[3], np.float64_t cpos[3], np.float64_t **fields):
+        raise NotImplementedError
+
+    cdef void neighbor_reset(self):
+        self.curn = 0
+        for i in range(self.maxn):
+            self.neighbors[i].pn = -1
+            self.neighbors[i].r2 = 1e300
+
+    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
+                            np.float64_t cpos[3]):
+        cdef NeighborList *cur
+        cdef int i
+        # _c means candidate (what we're evaluating)
+        # _o means other (the item in the list)
+        cdef np.float64_t r2_c, r2_o
+        cdef np.int64_t pn_c, pn_o
+        # If we're less than the maximum number of neighbors, we simply append.
+        # After that, we will sort, and then only compare against the rightmost
+        # entries.
+        if self.curn < self.maxn:
+            cur = &self.neighbors[self.curn]
+            cur.pn = pn
+            cur.r2 = r2dist(ppos, cpos, self.DW)
+            self.curn += 1
+            if self.curn == self.maxn:
+                # This time we sort it, so that future insertions will be able
+                # to be done in order.
+                qsort(self.neighbors, self.curn, sizeof(NeighborList), 
+                      Neighbor_compare)
+            return
+        # This will go (curn - 1) through 0.
+        r2_c = r2dist(ppos, cpos, self.DW)
+        pn_c = pn
+        for i in range((self.curn - 1), -1, -1):
+            # First we evaluate against i.  If our candidate radius is greater
+            # than the one we're inspecting, we quit.
+            cur = &self.neighbors[i]
+            r2_o = cur.r2
+            pn_o = cur.pn
+            if r2_c >= r2_o:
+                break
+            # Now we know we need to swap them.  First we assign our candidate
+            # values to cur.
+            cur.r2 = r2_c
+            cur.pn = pn_c
+            if i + 1 >= self.maxn:
+                continue # No swapping
+            cur = &self.neighbors[i + 1]
+            cur.r2 = r2_o
+            cur.pn = pn_o
+        # At this point, we've evaluated all the particles and we should have a
+        # sorted set of values.  So, we're done.
+
+    cdef void neighbor_find(self,
+                            np.int64_t nneighbors,
+                            np.int64_t *nind,
+                            np.int64_t *doffs,
+                            np.int64_t *pcounts,
+                            np.int64_t *pinds,
+                            np.float64_t *ppos,
+                            np.float64_t cpos[3]
+                            ):
+        # We are now given the number of neighbors, the indices into the
+        # domains for them, and the number of particles for each.
+        cdef int ni, i, j
+        cdef np.int64_t offset, pn, pc
+        cdef np.float64_t pos[3]
+        self.neighbor_reset()
+        for ni in range(nneighbors):
+            if nind[ni] == -1: continue
+            offset = doffs[nind[ni]]
+            pc = pcounts[nind[ni]]
+            for i in range(pc):
+                pn = pinds[offset + i]
+                for j in range(3):
+                    pos[j] = ppos[pn * 3 + j]
+                self.neighbor_eval(pn, pos, cpos)
+
+    cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
+                               np.float64_t dds[3], np.float64_t *ppos,
+                               np.float64_t **fields, np.int64_t nneighbors,
+                               np.int64_t *nind, np.int64_t *doffs,
+                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t offset):
+        # Note that we assume that fields[0] == smoothing length in the native
+        # units supplied.  We can now iterate over every cell in the block and
+        # every particle to find the nearest.  We will use a priority heap.
+        cdef int i, j, k
+        cdef np.float64_t cpos[3]
+        cpos[0] = left_edge[0] + 0.5*dds[0]
+        for i in range(dim[0]):
+            cpos[1] = left_edge[1] + 0.5*dds[1]
+            for j in range(dim[1]):
+                cpos[2] = left_edge[2] + 0.5*dds[2]
+                for k in range(dim[2]):
+                    self.neighbor_find(nneighbors, nind, doffs, pcounts,
+                        pinds, ppos, cpos)
+                    # Now we have all our neighbors in our neighbor list.
+                    self.process(offset, i, j, k, dim, cpos, fields)
+                    cpos[2] += dds[2]
+                cpos[1] += dds[1]
+            cpos[0] += dds[0]
+
+
+cdef class SimpleNeighborSmooth(ParticleSmoothOperation):
+    cdef np.float64_t **fp
+    cdef public object vals
+    def initialize(self):
+        cdef int i
+        if self.nfields < 4:
+            # We need at least two fields, the smoothing length and the 
+            # field to smooth, to operate.
+            raise RuntimeError
+        cdef np.ndarray tarr
+        self.fp = <np.float64_t **> malloc(
+            sizeof(np.float64_t *) * self.nfields)
+        self.vals = []
+        for i in range(self.nfields):
+            tarr = np.zeros(self.nvals, dtype="float64", order="F")
+            self.vals.append(tarr)
+            self.fp[i] = <np.float64_t *> tarr.data
+
+    def finalize(self):
+        free(self.fp)
+        return self.vals
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef void process(self, np.int64_t offset, int i, int j, int k,
+                      int dim[3], np.float64_t cpos[3], np.float64_t **fields):
+        # We have our i, j, k for our cell, as well as the cell position.
+        # We also have a list of neighboring particles with particle numbers.
+        cdef int n, fi
+        cdef np.float64_t weight, r2, val
+        cdef np.int64_t pn
+        for n in range(self.curn):
+            # No normalization for the moment.
+            # fields[0] is the smoothing length.
+            r2 = self.neighbors[n].r2
+            pn = self.neighbors[n].pn
+            # Smoothing kernel weight function
+            weight = sph_kernel(sqrt(r2) / fields[0][pn])
+            # Mass of the particle times the value divided by the Density
+            for fi in range(self.nfields - 3):
+                val = fields[1][pn] * fields[fi + 3][pn]/fields[2][pn]
+                self.fp[fi + 3][gind(i,j,k,dim) + offset] = val * weight
+        return
+
+simple_neighbor_smooth = SimpleNeighborSmooth

diff -r 36d4de9d67827150246ecb1f5a52e583457988f7 -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -40,6 +40,9 @@
                         oct_visitor_function *func,
                         OctVisitorData *data,
                         int visit_covered = ?)
+    cdef void visit_oct_cells(self, OctVisitorData *data, Oct *root, Oct *ch,
+                              np.float64_t spos[3], np.float64_t sdds[3],
+                              oct_visitor_function *func, int i, int j, int k)
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
                                np.int32_t level, Oct *o = ?) nogil

diff -r 36d4de9d67827150246ecb1f5a52e583457988f7 -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -157,16 +157,13 @@
 
     def count_octs(self, OctreeContainer octree, int domain_id = -1):
         cdef OctVisitorData data
-        data.index = 0
-        data.last = -1
-        data.domain = domain_id
+        octree.setup_data(&data, domain_id)
         octree.visit_all_octs(self, oct_visitors.count_total_octs, &data)
         return data.index
 
     def count_oct_cells(self, OctreeContainer octree, int domain_id = -1):
         cdef OctVisitorData data
-        data.index = 0
-        data.domain = domain_id
+        octree.setup_data(&data, domain_id)
         octree.visit_all_octs(self, oct_visitors.count_total_cells, &data)
         return data.index
 
@@ -230,6 +227,10 @@
                         if root.children != NULL:
                             ch = root.children[cind(i, j, k)]
                         if iter == 1 and next_level == 1 and ch != NULL:
+                            # Note that data.pos is always going to be the
+                            # position of the Oct -- it is *not* always going
+                            # to be the same as the position of the cell under
+                            # investigation.
                             data.pos[0] = (data.pos[0] << 1) + i
                             data.pos[1] = (data.pos[1] << 1) + j
                             data.pos[2] = (data.pos[2] << 1) + k
@@ -242,21 +243,60 @@
                             data.pos[2] = (data.pos[2] >> 1)
                             data.level -= 1
                         elif this_level == 1:
-                            selected = self.select_cell(spos, sdds)
-                            if ch != NULL:
-                                selected *= self.overlap_cells
                             data.global_index += increment
                             increment = 0
-                            data.ind[0] = i
-                            data.ind[1] = j
-                            data.ind[2] = k
-                            func(root, data, selected)
+                            self.visit_oct_cells(data, root, ch, spos, sdds,
+                                                 func, i, j, k)
                         spos[2] += sdds[2]
                     spos[1] += sdds[1]
                 spos[0] += sdds[0]
             this_level = 0 # We turn this off for the second pass.
             iter += 1
 
+    cdef void visit_oct_cells(self, OctVisitorData *data, Oct *root, Oct *ch,
+                              np.float64_t spos[3], np.float64_t sdds[3],
+                              oct_visitor_function *func, int i, int j, int k):
+        # We can short-circuit the whole process if data.oref == 1.
+        # This saves us some funny-business.
+        cdef int selected
+        if data.oref == 1:
+            selected = self.select_cell(spos, sdds)
+            if ch != NULL:
+                selected *= self.overlap_cells
+            # data.ind refers to the cell, not to the oct.
+            data.ind[0] = i
+            data.ind[1] = j
+            data.ind[2] = k
+            func(root, data, selected)
+            return
+        # Okay, now that we've got that out of the way, we have to do some
+        # other checks here.  In this case, spos[] is the position of the
+        # center of a *possible* oct child, which means it is the center of a
+        # cluster of cells.  That cluster might have 1, 8, 64, ... cells in it.
+        # But, we can figure it out by calculating the cell dds.
+        cdef np.float64_t dds[3], pos[3]
+        cdef int ci, cj, ck
+        cdef int nr = (1 << (data.oref - 1))
+        for ci in range(3):
+            dds[ci] = sdds[ci] / nr
+        # Boot strap at the first index.
+        pos[0] = (spos[0] - sdds[0]/2.0) + dds[0] * 0.5
+        for ci in range(nr):
+            pos[1] = (spos[1] - sdds[1]/2.0) + dds[1] * 0.5
+            for cj in range(nr):
+                pos[2] = (spos[2] - sdds[2]/2.0) + dds[2] * 0.5
+                for ck in range(nr):
+                    selected = self.select_cell(pos, dds)
+                    if ch != NULL:
+                        selected *= self.overlap_cells
+                    data.ind[0] = ci + i * nr
+                    data.ind[1] = cj + j * nr
+                    data.ind[2] = ck + k * nr
+                    func(root, data, selected)
+                    pos[2] += dds[2]
+                pos[1] += dds[1]
+            pos[0] += dds[0]
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)

diff -r 36d4de9d67827150246ecb1f5a52e583457988f7 -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d yt/geometry/setup.py
--- a/yt/geometry/setup.py
+++ b/yt/geometry/setup.py
@@ -43,6 +43,15 @@
                          "yt/geometry/oct_container.pxd",
                          "yt/geometry/selection_routines.pxd",
                          "yt/geometry/particle_deposit.pxd"])
+    config.add_extension("particle_smooth", 
+                ["yt/geometry/particle_smooth.pyx"],
+                include_dirs=["yt/utilities/lib/"],
+                libraries=["m"],
+                depends=["yt/utilities/lib/fp_utils.pxd",
+                         "yt/geometry/oct_container.pxd",
+                         "yt/geometry/selection_routines.pxd",
+                         "yt/geometry/particle_deposit.pxd",
+                         "yt/geometry/particle_smooth.pxd"])
     config.add_extension("fake_octree", 
                 ["yt/geometry/fake_octree.pyx"],
                 include_dirs=["yt/utilities/lib/"],


https://bitbucket.org/yt_analysis/yt/commits/61bab0aea184/
Changeset:   61bab0aea184
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-28 21:17:54
Summary:     Removing unncessary selector_fill calls.
Affected #:  3 files

diff -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d -r 61bab0aea1843046034e3de17e7a1dc5e195bbf7 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -39,6 +39,16 @@
 import yt.geometry.particle_smooth as particle_smooth
 from yt.funcs import *
 
+def cell_count_cache(func):
+    def cc_cache_func(self, dobj):
+        if hash(dobj.selector) != self._last_selector_id:
+            self._cell_count = -1
+        rv = func(self, dobj)
+        self._cell_count = rv.shape[0]
+        self._last_selector_id = hash(dobj.selector)
+        return rv
+    return cc_cache_func
+
 class OctreeSubset(YTSelectionContainer):
     _spatial = True
     _num_ghost_zones = 0
@@ -47,7 +57,7 @@
     _con_args = ('base_region', 'domain', 'pf')
     _container_fields = ("dx", "dy", "dz")
     _domain_offset = 0
-    _num_octs = -1
+    _cell_count = -1
 
     def __init__(self, base_region, domain, pf, over_refine_factor = 1):
         self._num_zones = 1 << (over_refine_factor)
@@ -168,37 +178,25 @@
             vals = np.asfortranarray(vals)
         return vals
 
+    @cell_count_cache
     def select_icoords(self, dobj):
-        d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,
-                                     num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / (self.nz**3)
-        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
-                                            domain_id = self.domain_id)
-        return tr
+        return self.oct_handler.icoords(dobj.selector, domain_id = self.domain_id,
+                                     num_cells = self._cell_count)
 
+    @cell_count_cache
     def select_fcoords(self, dobj):
-        d = self.oct_handler.fcoords(self.selector, domain_id = self.domain_id,
-                                     num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / (self.nz**3)
-        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
-                                            domain_id = self.domain_id)
-        return tr
+        return self.oct_handler.fcoords(dobj.selector, domain_id = self.domain_id,
+                                        num_cells = self._cell_count)
 
+    @cell_count_cache
     def select_fwidth(self, dobj):
-        d = self.oct_handler.fwidth(self.selector, domain_id = self.domain_id,
-                                  num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / (self.nz**3)
-        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
-                                            domain_id = self.domain_id)
-        return tr
+        return self.oct_handler.fwidth(dobj.selector, domain_id = self.domain_id,
+                                       num_cells = self._cell_count)
 
+    @cell_count_cache
     def select_ires(self, dobj):
-        d = self.oct_handler.ires(self.selector, domain_id = self.domain_id,
-                                  num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / (self.nz**3)
-        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 1,
-                                            domain_id = self.domain_id)
-        return tr
+        return self.oct_handler.ires(dobj.selector, domain_id = self.domain_id,
+                                     num_cells = self._cell_count)
 
     def select(self, selector, source, dest, offset):
         n = self.oct_handler.selector_fill(selector, source, dest, offset,
@@ -206,11 +204,7 @@
         return n
 
     def count(self, selector):
-        if hash(selector) == self._last_selector_id:
-            if self._last_mask is None: return 0
-            return self._last_mask.sum()
-        self.select(selector)
-        return self.count(selector)
+        return -1
 
     def count_particles(self, selector, x, y, z):
         # We don't cache the selector results

diff -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d -r 61bab0aea1843046034e3de17e7a1dc5e195bbf7 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -1022,17 +1022,16 @@
         cdef int i
         return self.mask(selector).sum()
 
-    def icoords(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def icoords(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         # Note that num_octs does not have to equal sfc_end - sfc_start + 1.
         cdef np.int64_t sfc
         cdef int acoords[3], i
-        # We call it num_octs, but it's really num_cells.
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_octs = mask.sum()
+        num_cells = mask.sum()
         cdef np.ndarray[np.int64_t, ndim=2] coords
-        coords = np.empty((num_octs, 3), dtype="int64")
+        coords = np.empty((num_cells, 3), dtype="int64")
         cdef int filled = 0
         for sfc in range(self.sfc_start, self.sfc_end + 1):
             if mask[sfc - self.sfc_start] == 0: continue
@@ -1045,18 +1044,17 @@
             filled += 1
         return coords
 
-    def fcoords(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def fcoords(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
-        # Note that num_octs does not have to equal sfc_end - sfc_start + 1.
+        # Note that num_cells does not have to equal sfc_end - sfc_start + 1.
         cdef np.int64_t sfc
         cdef np.float64_t pos[3]
         cdef int acoords[3], i
-        # We call it num_octs, but it's really num_cells.
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_octs = mask.sum()
+        num_cells = mask.sum()
         cdef np.ndarray[np.float64_t, ndim=2] coords
-        coords = np.empty((num_octs, 3), dtype="float64")
+        coords = np.empty((num_cells, 3), dtype="float64")
         cdef int filled = 0
         for sfc in range(self.sfc_start, self.sfc_end + 1):
             if mask[sfc - self.sfc_start] == 0: continue
@@ -1069,25 +1067,25 @@
             filled += 1
         return coords
 
-    def fwidth(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def fwidth(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         cdef int i
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_octs = mask.sum()
+        num_cells = mask.sum()
         cdef np.ndarray[np.float64_t, ndim=2] width
-        width = np.zeros((num_octs, 3), dtype="float64")
+        width = np.zeros((num_cells, 3), dtype="float64")
         for i in range(3):
             width[:,i] = self.dds[i]
         return width
 
-    def ires(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def ires(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_octs = mask.sum()
+        num_cells = mask.sum()
         cdef np.ndarray[np.int64_t, ndim=1] res
-        res = np.zeros(num_octs, dtype="int64")
+        res = np.zeros(num_cells, dtype="int64")
         return res
 
     @cython.boundscheck(False)
@@ -1140,24 +1138,23 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def mask(self, SelectorObject selector, np.int64_t num_octs = -1):
+    def mask(self, SelectorObject selector, np.int64_t num_cells = -1):
         cdef int i, status
         cdef double dpos[3]
         cdef np.float64_t pos[3]
         cdef np.int64_t sfc
         if self._last_selector_id == hash(selector):
             return self._last_mask
-        if num_octs == -1:
+        if num_cells == -1:
             # We need to count, but this process will only occur one time,
-            # since num_octs will later be cached.
-            num_octs = self.sfc_end - self.sfc_start + 1
-        #assert(num_octs == (self.sfc_end - self.sfc_start + 1))
+            # since num_cells will later be cached.
+            num_cells = self.sfc_end - self.sfc_start + 1
         cdef np.ndarray[np.uint8_t, ndim=1] mask
         cdef int num_oct_levels
         cdef int max_level = self.artio_handle.max_level
         cdef int *num_octs_per_level = <int *>malloc(
             (max_level + 1)*sizeof(int))
-        mask = np.zeros((num_octs), dtype="uint8")
+        mask = np.zeros((num_cells), dtype="uint8")
         status = artio_grid_cache_sfc_range(self.handle, self.sfc_start,
                                             self.sfc_end)
         check_artio_status(status) 

diff -r 3928be73ffdf87d0d8fb71f531fc9add3ce3b02d -r 61bab0aea1843046034e3de17e7a1dc5e195bbf7 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -350,14 +350,14 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def mask(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def mask(self, SelectorObject selector, np.int64_t num_cells = -1,
              int domain_id = -1):
-        if num_octs == -1:
-            num_octs = selector.count_octs(self, domain_id)
+        if num_cells == -1:
+            num_cells = selector.count_oct_cells(self, domain_id)
         cdef np.ndarray[np.uint8_t, ndim=1] coords
         cdef OctVisitorData data
         self.setup_data(&data, domain_id)
-        coords = np.zeros((num_octs * data.nz), dtype="uint8")
+        coords = np.zeros((num_cells), dtype="uint8")
         data.array = <void *> coords.data
         self.visit_all_octs(selector, oct_visitors.mask_octs, &data)
         return coords.astype("bool")
@@ -365,14 +365,14 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def icoords(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def icoords(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
-        if num_octs == -1:
-            num_octs = selector.count_octs(self, domain_id)
+        if num_cells == -1:
+            num_cells = selector.count_oct_cells(self, domain_id)
         cdef OctVisitorData data
         self.setup_data(&data, domain_id)
         cdef np.ndarray[np.int64_t, ndim=2] coords
-        coords = np.empty((num_octs * data.nz, 3), dtype="int64")
+        coords = np.empty((num_cells, 3), dtype="int64")
         data.array = <void *> coords.data
         self.visit_all_octs(selector, oct_visitors.icoords_octs, &data)
         return coords
@@ -380,15 +380,15 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def ires(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def ires(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
-        if num_octs == -1:
-            num_octs = selector.count_octs(self, domain_id)
+        if num_cells == -1:
+            num_cells = selector.count_oct_cells(self, domain_id)
         cdef OctVisitorData data
         self.setup_data(&data, domain_id)
         #Return the 'resolution' of each cell; ie the level
         cdef np.ndarray[np.int64_t, ndim=1] res
-        res = np.empty(num_octs * data.nz, dtype="int64")
+        res = np.empty(num_cells, dtype="int64")
         data.array = <void *> res.data
         self.visit_all_octs(selector, oct_visitors.ires_octs, &data)
         return res
@@ -396,14 +396,14 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def fwidth(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def fwidth(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
-        if num_octs == -1:
-            num_octs = selector.count_octs(self, domain_id)
+        if num_cells == -1:
+            num_cells = selector.count_oct_cells(self, domain_id)
         cdef OctVisitorData data
         self.setup_data(&data, domain_id)
         cdef np.ndarray[np.float64_t, ndim=2] fwidth
-        fwidth = np.empty((num_octs * data.nz, 3), dtype="float64")
+        fwidth = np.empty((num_cells, 3), dtype="float64")
         data.array = <void *> fwidth.data
         self.visit_all_octs(selector, oct_visitors.fwidth_octs, &data)
         cdef np.float64_t base_dx
@@ -415,15 +415,15 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def fcoords(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def fcoords(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
-        if num_octs == -1:
-            num_octs = selector.count_octs(self, domain_id)
+        if num_cells == -1:
+            num_cells = selector.count_oct_cells(self, domain_id)
         cdef OctVisitorData data
         self.setup_data(&data, domain_id)
         #Return the floating point unitary position of every cell
         cdef np.ndarray[np.float64_t, ndim=2] coords
-        coords = np.empty((num_octs * data.nz, 3), dtype="float64")
+        coords = np.empty((num_cells, 3), dtype="float64")
         data.array = <void *> coords.data
         self.visit_all_octs(selector, oct_visitors.fcoords_octs, &data)
         cdef int i


https://bitbucket.org/yt_analysis/yt/commits/805b5cbdf075/
Changeset:   805b5cbdf075
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-28 22:30:02
Summary:     Adding tests for over_refine.
Affected #:  1 file

diff -r 93dc5368d870288a369447061324a74201beccf3 -r 805b5cbdf075af44a4e6580de2d706984f68ebaa yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -59,6 +59,35 @@
         v = np.bincount(bi.astype("int64"))
         yield assert_equal, v.max() <= n_ref, True
 
+def test_particle_overrefine():
+    np.random.seed(int(0x4d3d3d3))
+    pos = []
+    data = {}
+    bbox = []
+    for i, ax in enumerate('xyz'):
+        DW = DRE[i] - DLE[i]
+        LE = DLE[i]
+        data["particle_position_%s" % ax] = \
+            np.random.normal(0.5, scale=0.05, size=(NPART)) * DW + LE
+        bbox.append( [DLE[i], DRE[i]] )
+    bbox = np.array(bbox)
+    _attrs = ('icoords', 'fcoords', 'fwidth', 'ires')
+    for n_ref in [16, 32, 64, 512, 1024]:
+        pf1 = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref)
+        dd1 = pf1.h.all_data()
+        v1 = dict((a, getattr(dd1, a)) for a in _attrs)
+        cv1 = dd1["CellVolumeCode"].sum(dtype="float64")
+        for over_refine in [1, 2, 3]:
+            f = 1 << (3*(over_refine-1))
+            pf2 = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref,
+                                over_refine_factor = over_refine)
+            dd2 = pf2.h.all_data()
+            v2 = dict((a, getattr(dd2, a)) for a in _attrs)
+            for a in sorted(v1):
+                yield assert_equal, v1[a].size * f, v2[a].size
+            cv2 = dd2["CellVolumeCode"].sum(dtype="float64")
+            yield assert_equal, cv1, cv2
+
 if __name__=="__main__":
     for i in test_add_particles_random():
         i[0](*i[1:])


https://bitbucket.org/yt_analysis/yt/commits/46f1d93ecd71/
Changeset:   46f1d93ecd71
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-29 14:06:02
Summary:     Adding over_refine_factor to Tipsy.
Affected #:  1 file

diff -r 805b5cbdf075af44a4e6580de2d706984f68ebaa -r 46f1d93ecd71f9d4e8364431a0a46d1818a2158f yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -371,8 +371,9 @@
                  unit_base = None,
                  cosmology_parameters = None,
                  parameter_file = None,
-                 n_ref = 64):
+                 n_ref = 64, over_refine_factor = 1):
         self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
         self.endian = endian
         self.storage_filename = None
         if domain_left_edge is None:


https://bitbucket.org/yt_analysis/yt/commits/f7704b577541/
Changeset:   f7704b577541
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-29 14:30:49
Summary:     Adding periodicity to r2dist and process_octree for smoothing.
Affected #:  3 files

diff -r 46f1d93ecd71f9d4e8364431a0a46d1818a2158f -r f7704b57754127235bbc5dca4b39449b50ce5feb yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -159,7 +159,7 @@
         mylog.debug("Smoothing %s particles into %s Octs",
             positions.shape[0], nvals[-1])
         op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
-            self.domain_id, self._domain_offset)
+            self.domain_id, self._domain_offset, self.pf.periodicity)
         vals = op.finalize()
         if vals is None: return
         if isinstance(vals, list):

diff -r 46f1d93ecd71f9d4e8364431a0a46d1818a2158f -r f7704b57754127235bbc5dca4b39449b50ce5feb yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -45,7 +45,8 @@
 
 cdef inline np.float64_t r2dist(np.float64_t ppos[3],
                                 np.float64_t cpos[3],
-                                np.float64_t DW[3]):
+                                np.float64_t DW[3],
+                                bint periodicity[3]):
     cdef int i
     cdef np.float64_t r2, DR
     r2 = 0.0
@@ -65,6 +66,7 @@
     cdef int nfields
     cdef int maxn
     cdef int curn
+    cdef bint periodicity[3]
     cdef np.int64_t *doffs
     cdef np.int64_t *pinds
     cdef np.int64_t *pcounts

diff -r 46f1d93ecd71f9d4e8364431a0a46d1818a2158f -r f7704b57754127235bbc5dca4b39449b50ce5feb yt/geometry/particle_smooth.pyx
--- a/yt/geometry/particle_smooth.pyx
+++ b/yt/geometry/particle_smooth.pyx
@@ -71,7 +71,7 @@
                      np.ndarray[np.float64_t, ndim=2] positions,
                      fields = None, int domain_id = -1,
                      int domain_offset = 0,
-                     int test_neighbors = 0):
+                     periodicity = (True, True, True)):
         # This will be a several-step operation.
         #
         # We first take all of our particles and assign them to Octs.  If they
@@ -127,6 +127,7 @@
             field_pointers[i] = <np.float64_t *> tarr.data
         for i in range(3):
             self.DW[i] = (octree.DRE[i] - octree.DLE[i])
+            self.periodicity[i] = periodicity[i]
         for i in range(positions.shape[0]):
             for j in range(3):
                 pos[j] = positions[i, j]
@@ -229,7 +230,7 @@
         if self.curn < self.maxn:
             cur = &self.neighbors[self.curn]
             cur.pn = pn
-            cur.r2 = r2dist(ppos, cpos, self.DW)
+            cur.r2 = r2dist(ppos, cpos, self.DW, self.periodicity)
             self.curn += 1
             if self.curn == self.maxn:
                 # This time we sort it, so that future insertions will be able
@@ -238,7 +239,7 @@
                       Neighbor_compare)
             return
         # This will go (curn - 1) through 0.
-        r2_c = r2dist(ppos, cpos, self.DW)
+        r2_c = r2dist(ppos, cpos, self.DW, self.periodicity)
         pn_c = pn
         for i in range((self.curn - 1), -1, -1):
             # First we evaluate against i.  If our candidate radius is greater


https://bitbucket.org/yt_analysis/yt/commits/cff348ad3097/
Changeset:   cff348ad3097
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-30 03:56:20
Summary:     Adding try/except block for ID offsets, in case grids don't exist.
Affected #:  1 file

diff -r 68bb0d0b94e4def4eadba5903e1184264851ca1c -r cff348ad309725c0296b9b8f04ddcf9aa022f779 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -57,7 +57,10 @@
         min_level=None, max_level=None, data_source=None):
 
         self.pf = pf
-        self._id_offset = self.pf.h.grids[0]._id_offset
+        try:
+            self._id_offset = pf.h.grids[0]._id_offset
+        except AttributeError:
+            self._id_offset = 0
 
         if data_source is None:
             data_source = pf.h.all_data()


https://bitbucket.org/yt_analysis/yt/commits/3aba7ca1ff0b/
Changeset:   3aba7ca1ff0b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-08-30 04:18:22
Summary:     Convert alloca's to malloc's and free's.

For cases where ngrids is very large (octrees) the stack can get completely
blown out by allocating dynamically.  This changes to mallocs.
Affected #:  1 file

diff -r cff348ad309725c0296b9b8f04ddcf9aa022f779 -r 3aba7ca1ff0bebcbbcd8d1e2a526dea084e0fbdb yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -267,12 +267,12 @@
     The entire purpose of this function is to move everything from ndarrays
     to internal C pointers. 
     """
-    pgles = <np.float64_t **> alloca(ngrids * sizeof(np.float64_t*))
-    pgres = <np.float64_t **> alloca(ngrids * sizeof(np.float64_t*))
-    pgids = <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
+    pgles = <np.float64_t **> malloc(ngrids * sizeof(np.float64_t*))
+    pgres = <np.float64_t **> malloc(ngrids * sizeof(np.float64_t*))
+    pgids = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
     for i in range(ngrids):
-        pgles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
-        pgres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        pgles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        pgres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
         pgids[i] = gids[i]
         for j in range(3):
             pgles[i][j] = gles[i, j]
@@ -280,6 +280,11 @@
 
     add_grids(node, ngrids, pgles, pgres, pgids, rank, size)
 
+    for i in range(ngrids):
+        free(pgles[i])
+        free(pgres[i])
+    free(pgles)
+    free(pgres)
 
  
 @cython.boundscheck(False)
@@ -553,22 +558,30 @@
     # Find a Split
     cdef int i, j, k
 
-    data = <np.float64_t ***> alloca(ngrids * sizeof(np.float64_t**))
+    data = <np.float64_t ***> malloc(ngrids * sizeof(np.float64_t**))
     for i in range(ngrids):
-        data[i] = <np.float64_t **> alloca(2 * sizeof(np.float64_t*))
+        data[i] = <np.float64_t **> malloc(2 * sizeof(np.float64_t*))
         for j in range(2):
-            data[i][j] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+            data[i][j] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
         for j in range(3):
             data[i][0][j] = gles[i][j]
             data[i][1][j] = gres[i][j]
 
-    less_ids = <np.uint8_t *> alloca(ngrids * sizeof(np.uint8_t))
-    greater_ids = <np.uint8_t *> alloca(ngrids * sizeof(np.uint8_t))
+    less_ids = <np.uint8_t *> malloc(ngrids * sizeof(np.uint8_t))
+    greater_ids = <np.uint8_t *> malloc(ngrids * sizeof(np.uint8_t))
 
     best_dim, split_pos, nless, ngreater = \
         kdtree_get_choices(ngrids, data, node.left_edge, node.right_edge,
                           less_ids, greater_ids)
  
+    for i in range(ngrids):
+        for j in range(2):
+            free(data[i][j])
+        free(data[i])
+    free(data)
+    free(less_ids)
+    free(greater_ids)
+
     # If best_dim is -1, then we have found a place where there are no choices.
     # Exit out and set the node to None.
     if best_dim == -1:
@@ -579,8 +592,6 @@
     split.dim = best_dim
     split.pos = split_pos
 
-    #del data
-
     # Create a Split
     divide(node, split)
 


https://bitbucket.org/yt_analysis/yt/commits/4c2653d013ab/
Changeset:   4c2653d013ab
Branch:      yt-3.0
User:        drudd
Date:        2013-08-27 22:36:26
Summary:     Updated to latest version of artio, including LGPL license.  Added support for grid-only ARTIO files
Affected #:  16 files

diff -r f6c18705bf76fa980051bd4cb8e1fd4b60d94563 -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -8,7 +8,6 @@
 
 from yt.geometry.selection_routines cimport SelectorObject, AlwaysSelector
 from yt.geometry.oct_container cimport \
-    OctreeContainer, OctAllocationContainer, \
     SparseOctreeContainer
 from yt.geometry.oct_visitors cimport \
     OctVisitorData, oct_visitor_function, Oct
@@ -66,6 +65,9 @@
     int artio_fileset_open_grid(artio_fileset_handle *handle) 
     int artio_fileset_close_grid(artio_fileset_handle *handle) 
 
+    int artio_fileset_has_grid( artio_fileset_handle *handle )
+    int artio_fileset_has_particles( artio_fileset_handle *handle )
+
     # selection functions
     artio_selection *artio_selection_allocate( artio_fileset_handle *handle )
     artio_selection *artio_select_all( artio_fileset_handle *handle )
@@ -136,12 +138,14 @@
     cdef int64_t sfc_min, sfc_max
 
     # grid attributes
+    cdef public int has_grid
     cdef public int min_level, max_level
     cdef public int num_grid_variables
     cdef int *num_octs_per_level
     cdef float *grid_variables
 
     # particle attributes
+    cdef public int has_particles
     cdef public int num_species
     cdef int *particle_position_index
     cdef int *num_particles_per_species
@@ -178,32 +182,48 @@
         if (not self.num_octs_per_level) or (not self.grid_variables) :
             raise MemoryError
 
-        status = artio_fileset_open_grid( self.handle )
-        check_artio_status(status)
+        if artio_fileset_has_grid(self.handle):
+            status = artio_fileset_open_grid(self.handle)
+            check_artio_status(status)
+            self.has_grid = 1
+        else:
+            self.has_grid = 0
 
         # particle detection
-        self.num_species = self.parameters['num_particle_species'][0]
-        self.particle_position_index = <int *>malloc(3*sizeof(int)*self.num_species)
-        if not self.particle_position_index :
-            raise MemoryError
-        for ispec in range(self.num_species) :
-            labels = self.parameters["species_%02d_primary_variable_labels"% (ispec,)]
-            try :
-                self.particle_position_index[3*ispec+0] = labels.index('POSITION_X')
-                self.particle_position_index[3*ispec+1] = labels.index('POSITION_Y')
-                self.particle_position_index[3*ispec+2] = labels.index('POSITION_Z')
-            except ValueError :
-                raise RuntimeError("Unable to locate position information for particle species", ispec )
+        if ( artio_fileset_has_particles(self.handle) ):
+            status = artio_fileset_open_particles(self.handle)
+            check_artio_status(status)
+            self.has_particles = 1
 
-        self.num_particles_per_species =  <int *>malloc(sizeof(int)*self.num_species) 
-        self.primary_variables = <double *>malloc(sizeof(double)*max(self.parameters['num_primary_variables']))  
-        self.secondary_variables = <float *>malloc(sizeof(float)*max(self.parameters['num_secondary_variables']))  
-        if (not self.num_particles_per_species) or (not self.primary_variables) or (not self.secondary_variables) :
-            raise MemoryError
+            for v in ["num_particle_species","num_primary_variables","num_secondary_variables"]:
+                if not self.parameters.has_key(v):
+                    raise RuntimeError("Unable to locate particle header information in artio header: key=", v)
 
-        status = artio_fileset_open_particles( self.handle )
-        check_artio_status(status)
-   
+            self.num_species = self.parameters['num_particle_species'][0]
+            self.particle_position_index = <int *>malloc(3*sizeof(int)*self.num_species)
+            if not self.particle_position_index :
+                raise MemoryError
+            for ispec in range(self.num_species) :
+                species_labels = "species_%02d_primary_variable_labels"% (ispec,)
+                if not self.parameters.has_key(species_labels):
+                    raise RuntimeError("Unable to locate variable labels for species",ispec)
+
+                labels = self.parameters[species_labels]
+                try :
+                    self.particle_position_index[3*ispec+0] = labels.index('POSITION_X')
+                    self.particle_position_index[3*ispec+1] = labels.index('POSITION_Y')
+                    self.particle_position_index[3*ispec+2] = labels.index('POSITION_Z')
+                except ValueError :
+                    raise RuntimeError("Unable to locate position information for particle species", ispec)
+    
+            self.num_particles_per_species =  <int *>malloc(sizeof(int)*self.num_species) 
+            self.primary_variables = <double *>malloc(sizeof(double)*max(self.parameters['num_primary_variables']))  
+            self.secondary_variables = <float *>malloc(sizeof(float)*max(self.parameters['num_secondary_variables']))  
+            if (not self.num_particles_per_species) or (not self.primary_variables) or (not self.secondary_variables) :
+                raise MemoryError
+        else:
+            self.has_particles = 0
+
     def __dealloc__(self) :
         if self.num_octs_per_level : free(self.num_octs_per_level)
         if self.grid_variables : free(self.grid_variables)
@@ -390,7 +410,6 @@
                 raise RuntimeError("Field",f,"is not known to ARTIO")
             field_order[i] = var_labels.index(f)
 
-        # dhr - cache the entire domain (replace later)
         status = artio_grid_cache_sfc_range( self.handle, self.sfc_min, self.sfc_max )
         check_artio_status(status) 
 
@@ -702,7 +721,8 @@
                 raise RuntimeError
             si = ei
             domain = 2
-        artio_grid_clear_sfc_cache(handle)
+        #status = artio_grid_clear_sfc_cache(handle)
+        #check_artio_status(status)
         free(mask)
         free(num_octs_per_level)
         free(tot_octs_per_level)
@@ -951,11 +971,11 @@
         status = artio_particle_read_root_cell_end( handle )
         check_artio_status(status)
 
-    status = artio_particle_clear_sfc_cache(handle)
-    check_artio_status(status)
+    #status = artio_particle_clear_sfc_cache(handle)
+    #check_artio_status(status)
 
-    status = artio_grid_clear_sfc_cache(handle)
-    check_artio_status(status)
+    #status = artio_grid_clear_sfc_cache(handle)
+    #check_artio_status(status)
 
     free(num_octs_per_level)
     free(num_particles_per_species)
@@ -1010,6 +1030,8 @@
     cdef np.int64_t count_cells(self, SelectorObject selector):
         # We visit each cell if it is not refined and determine whether it is
         # included or not.
+        # DHR - selector was used to construct the initial sfc list, so 
+        #  this should always equal the number of root cells
         cdef np.int64_t sfc
         cdef np.float64_t pos[3], right_edge[3]
         cdef int num_cells = 0
@@ -1161,7 +1183,8 @@
             # If refined, we skip
             if num_oct_levels > 0: continue
             mask[sfc - self.sfc_start] = 1
-        artio_grid_clear_sfc_cache(self.handle)
+        #status = artio_grid_clear_sfc_cache(self.handle)
+        #check_artio_status(status)
         free(num_octs_per_level)
         return mask.astype("bool")
 
@@ -1220,8 +1243,8 @@
             status = artio_grid_read_root_cell_end( handle )
             check_artio_status(status)
         # Now we have all our sources.
-        status = artio_grid_clear_sfc_cache(handle)
-        check_artio_status(status)
+        #status = artio_grid_clear_sfc_cache(handle)
+        #check_artio_status(status)
         free(field_ind)
         free(field_vals)
         free(grid_variables)

diff -r f6c18705bf76fa980051bd4cb8e1fd4b60d94563 -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f yt/frontends/artio/artio_headers/LICENSE
--- /dev/null
+++ b/yt/frontends/artio/artio_headers/LICENSE
@@ -0,0 +1,850 @@
+ARTIO is licensed under the GNU Lesser General Public License (LGPL) version 3,
+which is an extension of the GNU Gneral Public License (GPL).  The text of both
+licenses are included here.
+
+===============================================================================
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
+
+===============================================================================
+
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.  We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors.  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights.  Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received.  You must make sure that they, too, receive
+or can get the source code.  And you must show them these terms so they
+know their rights.
+
+  Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+  For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software.  For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+  Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so.  This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software.  The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable.  Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products.  If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+  Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary.  To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Use with the GNU Affero General Public License.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year><name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+    <program>  Copyright (C) <year><name of author>
+    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+  The GNU General Public License does not permit incorporating your program
+into proprietary programs.  If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.  But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
+
+===============================================================================

diff -r f6c18705bf76fa980051bd4cb8e1fd4b60d94563 -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f yt/frontends/artio/artio_headers/artio.c
--- a/yt/frontends/artio/artio_headers/artio.c
+++ b/yt/frontends/artio/artio_headers/artio.c
@@ -1,10 +1,24 @@
-/*
- * artio.c
+/**********************************************************************
+ * Copyright (c) 2012-2013, Douglas H. Rudd
+ * All rights reserved.
  *
- *  Created on: Feb 21, 2010
- *  Author: Yongen Yu
- */
-
+ * This file is part of the artio library.
+ *
+ * artio is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * artio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * Copies of the GNU Lesser General Public License and the GNU General
+ * Public License are available in the file LICENSE, included with this
+ * distribution.  If you failed to receive a copy of this file, see 
+ * <http://www.gnu.org/licenses/>
+ **********************************************************************/
 #include "artio.h"
 #include "artio_internal.h"
 
@@ -20,7 +34,7 @@
 
 int artio_fh_buffer_size = ARTIO_DEFAULT_BUFFER_SIZE;
 
-int artio_set_buffer_size( int buffer_size ) {
+int artio_fileset_set_buffer_size( int buffer_size ) {
 	if ( buffer_size < 0 ) {
 		return ARTIO_ERR_INVALID_BUFFER_SIZE;
 	}
@@ -199,16 +213,18 @@
 	if ( handle != NULL ) {
 		handle->parameters = artio_parameter_list_init();
 
+#ifdef ARTIO_MPI
 		handle->context = (artio_context *)malloc(sizeof(artio_context));
 		if ( handle->context == NULL ) {
 			return NULL;
 		}
 		memcpy( handle->context, context, sizeof(artio_context) );
 
-#ifdef ARTIO_MPI
 		MPI_Comm_size(handle->context->comm, &num_procs);
 		MPI_Comm_rank(handle->context->comm, &my_rank);
 #else
+		handle->context = NULL;
+
 		num_procs = 1;
 		my_rank = 0;
 #endif /* MPI */

diff -r f6c18705bf76fa980051bd4cb8e1fd4b60d94563 -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f yt/frontends/artio/artio_headers/artio.h
--- a/yt/frontends/artio/artio_headers/artio.h
+++ b/yt/frontends/artio/artio_headers/artio.h
@@ -1,20 +1,30 @@
-/*
- * artio.h
+/**********************************************************************
+ * Copyright (c) 2012-2013, Douglas H. Rudd
+ * All rights reserved.
  *
- *  Created on: Feb 21, 2010
- *      Author: Yongen Yu
- *  Modified: Jun 6, 2010 - Doug Rudd
- *            Nov 18, 2010 - Doug Rudd
- *            Nov 14, 2012 - Doug Rudd
- *            Feb 7, 2013 - Doug Rudd - Version 1.0
- *            March 3, 2013 - Doug Rudd - Version 1.1 (inc. selectors)
- */
+ * This file is part of the artio library.
+ *
+ * artio is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * artio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * Copies of the GNU Lesser General Public License and the GNU General
+ * Public License are available in the file LICENSE, included with this
+ * distribution.  If you failed to receive a copy of this file, see
+ * <http://www.gnu.org/licenses/>
+ **********************************************************************/
 
 #ifndef __ARTIO_H__
 #define __ARTIO_H__
 
 #define ARTIO_MAJOR_VERSION     1
-#define ARTIO_MINOR_VERSION     1
+#define ARTIO_MINOR_VERSION     2
 
 #ifdef ARTIO_MPI
 #include <mpi.h>
@@ -146,7 +156,7 @@
  * Description	Close the file
  */
 int artio_fileset_close(artio_fileset *handle);
-
+int artio_fileset_set_buffer_size( int buffer_size );
 int artio_fileset_has_grid( artio_fileset *handle );
 int artio_fileset_has_particles( artio_fileset *handle );
 
@@ -228,8 +238,6 @@
 
 int artio_fileset_open_grid(artio_fileset *handle);
 int artio_fileset_close_grid(artio_fileset *handle);
-int artio_fileset_open_particle(artio_fileset *handle);
-int artio_fileset_close_particle(artio_fileset *handle);
 
 /*
  * Description:	Output the variables of the root level cell and the hierarchy of the Oct tree correlated with this root level cell
@@ -305,7 +313,7 @@
 int artio_grid_clear_sfc_cache(artio_fileset *handle );
 
 int artio_grid_count_octs_in_sfc_range(artio_fileset *handle,
-        int64_t start, int64_t end, int64_t *num_octs);
+        int64_t start, int64_t end, int64_t *num_octs_in_range );
 
 /*
  * Description:	Read a segment of oct nodes

diff -r f6c18705bf76fa980051bd4cb8e1fd4b60d94563 -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f yt/frontends/artio/artio_headers/artio_endian.c
--- a/yt/frontends/artio/artio_headers/artio_endian.c
+++ b/yt/frontends/artio/artio_headers/artio_endian.c
@@ -1,3 +1,25 @@
+/**********************************************************************
+ * Copyright (c) 2012-2013, Douglas H. Rudd
+ * All rights reserved.
+ *
+ * This file is part of the artio library.
+ *
+ * artio is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * artio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * Copies of the GNU Lesser General Public License and the GNU General
+ * Public License are available in the file LICENSE, included with this
+ * distribution.  If you failed to receive a copy of this file, see
+ * <http://www.gnu.org/licenses/>
+ **********************************************************************/
+
 #include "artio_endian.h"
 
 #include <stdint.h>

diff -r f6c18705bf76fa980051bd4cb8e1fd4b60d94563 -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f yt/frontends/artio/artio_headers/artio_endian.h
--- a/yt/frontends/artio/artio_headers/artio_endian.h
+++ b/yt/frontends/artio/artio_headers/artio_endian.h
@@ -1,3 +1,25 @@
+/**********************************************************************
+ * Copyright (c) 2012-2013, Douglas H. Rudd
+ * All rights reserved.
+ *
+ * This file is part of the artio library.
+ *
+ * artio is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * artio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * Copies of the GNU Lesser General Public License and the GNU General
+ * Public License are available in the file LICENSE, included with this
+ * distribution.  If you failed to receive a copy of this file, see
+ * <http://www.gnu.org/licenses/>
+ **********************************************************************/
+
 #ifndef __ARTIO_EDIAN_H__
 #define __ARTIO_EDIAN_H__
 

diff -r f6c18705bf76fa980051bd4cb8e1fd4b60d94563 -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f yt/frontends/artio/artio_headers/artio_file.c
--- /dev/null
+++ b/yt/frontends/artio/artio_headers/artio_file.c
@@ -0,0 +1,161 @@
+/**********************************************************************
+ * Copyright (c) 2012-2013, Douglas H. Rudd
+ * All rights reserved.
+ *
+ * This file is part of the artio library.
+ *
+ * artio is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * artio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * Copies of the GNU Lesser General Public License and the GNU General
+ * Public License are available in the file LICENSE, included with this
+ * distribution.  If you failed to receive a copy of this file, see
+ * <http://www.gnu.org/licenses/>
+ **********************************************************************/
+
+#include "artio.h"
+#include "artio_internal.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+
+artio_fh *artio_file_fopen( char * filename, int mode, const artio_context *context) {
+	artio_fh *fh;
+#ifdef ARTIO_DEBUG
+	printf( "artio_file_fopen( filename=%s, mode=%d, context=%p )\n", 
+			filename, mode, context ); fflush(stdout);
+#endif /* ARTIO_DEBUG */
+	fh = artio_file_fopen_i(filename,mode,context);
+#ifdef ARTIO_DEBUG
+	printf(" artio_file_fopen = %p\n", fh ); fflush(stdout);
+#endif /* ARTIO_DEBUG */
+	return fh;
+}
+
+int artio_file_attach_buffer( artio_fh *handle, void *buf, int buf_size ) {
+    int status;
+#ifdef ARTIO_DEBUG
+	printf( "artio_file_attach_buffer( handle=%p, buf=%p, buf_size = %d )\n",
+			handle, buf, buf_size ); fflush(stdout);
+#endif /* ARTIO_DEBUG */
+    status = artio_file_attach_buffer_i(handle,buf,buf_size);
+#ifdef ARTIO_DEBUG
+	if ( status != ARTIO_SUCCESS ) {
+		printf(" artio_file_attach_buffer(%p) = %d\n", handle, status ); fflush(stdout);
+	}
+#endif /* ARTIO_DEBUG */
+    return status;
+}
+
+int artio_file_detach_buffer( artio_fh *handle ) {
+	int status;
+#ifdef ARTIO_DEBUG
+	printf( "artio_file_detach_buffer( handle=%p )\n", handle ); fflush(stdout);
+#endif /* ARTIO_DEBUG */
+	status = artio_file_detach_buffer_i(handle);
+#ifdef ARTIO_DEBUG
+	if ( status != ARTIO_SUCCESS ) {
+		printf( "artio_file_detach_buffer(%p) = %d\n", handle, status ); fflush(stdout);
+	}
+#endif /* ARTIO_DEBUG */
+	return status;
+}
+
+int artio_file_fwrite( artio_fh *handle, const void *buf, int64_t count, int type ) {
+	int status;
+#ifdef ARTIO_DEBUG
+	printf( "artio_file_fwrite( handle=%p, buf=%p, count=%ld, type=%d )\n",
+			handle, buf, count, type ); fflush(stdout);
+#endif /* ARTIO_DEBUG */
+	status = artio_file_fwrite_i(handle,buf,count,type);
+#ifdef ARTIO_DEBUG
+	if ( status != ARTIO_SUCCESS ) {
+		printf( "artio_file_fwrite(%p) = %d", handle, status ); fflush(stdout);
+	}
+#endif /* ARTIO_DEBUG */
+	return status;
+}
+
+int artio_file_fflush(artio_fh *handle) {
+	int status;
+#ifdef ARTIO_DEBUG
+	printf( "artio_file_fflush( handle=%p )\n", handle ); fflush(stdout);
+#endif /* ARTIO_DEBUG */
+	status = artio_file_fflush_i(handle);
+#ifdef ARTIO_DEBUG
+	if ( status != ARTIO_SUCCESS ) {
+		printf( "artio_file_fflush(%p) = %d\n", handle, status ); fflush(stdout);
+	}
+#endif /* ARTIO_DEBUG */
+	return status;
+}
+
+int artio_file_fread(artio_fh *handle, void *buf, int64_t count, int type ) {
+	int status;
+#ifdef ARTIO_DEBUG
+	printf( "artio_file_fread( handle=%p, buf=%p, count=%ld, type=%d )\n",
+			handle, buf, count, type ); fflush(stdout);
+#endif /* ARTIO_DEBUG */
+	status = artio_file_fread_i(handle,buf,count,type);
+#ifdef ARTIO_DEBUG
+	if ( status != ARTIO_SUCCESS ) {
+		printf( "artio_file_fread(%p) = %d", handle, status );
+	}
+#endif /* ARTIO_DEBUG */
+	return status;
+}
+
+int artio_file_ftell(artio_fh *handle, int64_t *offset) {
+	int status;
+#ifdef ARTIO_DEBUG
+	printf( "artio_file_ftell( handle=%p, offset=%p )\n",
+		handle, offset ); fflush(stdout);
+#endif /* ARTIO_DEBUG */
+	status = artio_file_ftell_i(handle,offset);
+#ifdef ARTIO_DEBUG
+	if ( status != ARTIO_SUCCESS ) {
+		printf("artio_file_ftell(%p) = %d\n", handle, status ); fflush(stdout);
+	}
+#endif /* ARTIO_DEBUG */
+	return status;
+}
+
+int artio_file_fseek(artio_fh *handle, int64_t offset, int whence ) {
+	int status;
+#ifdef ARTIO_DEBUG
+    printf( "artio_file_fseek( handle=%p, offset=%ld, whence=%d )\n",
+        handle, offset, whence ); fflush(stdout);
+#endif /* ARTIO_DEBUG */
+	status = artio_file_fseek_i(handle,offset,whence);
+#ifdef ARTIO_DEBUG
+	if ( status != ARTIO_SUCCESS ) {
+		printf( "artio_file_fseek(%p) = %d\n", handle, status ); fflush(stdout);
+	}
+#endif /* ARTIO_DEBUG */
+	return status;
+}
+
+int artio_file_fclose(artio_fh *handle) {
+	int status;
+#ifdef ARTIO_DEBUG
+	printf( "artio_file_fclose( handle=%p )\n", handle ); fflush(stdout);
+#endif /* ARTIO_DEBUG */
+	status = artio_file_fclose_i(handle);
+#ifdef ARTIO_DEBUG
+	if ( status != ARTIO_SUCCESS ) {
+		printf( "artio_file_fclose(%p) = %d\n", handle, status ); fflush(stdout);
+	}
+#endif /* ARTIO_DEBUG */
+	return status;
+}
+
+void artio_file_set_endian_swap_tag(artio_fh *handle) {
+	artio_file_set_endian_swap_tag_i(handle);
+}

diff -r f6c18705bf76fa980051bd4cb8e1fd4b60d94563 -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f yt/frontends/artio/artio_headers/artio_grid.c
--- a/yt/frontends/artio/artio_headers/artio_grid.c
+++ b/yt/frontends/artio/artio_headers/artio_grid.c
@@ -1,9 +1,25 @@
-/*
- * artio_grid.c
+/**********************************************************************
+ * Copyright (c) 2012-2013, Douglas H. Rudd
+ * All rights reserved.
  *
- *  Created on: May 10, 2011
- *      Author: Yongen Yu
- */
+ * This file is part of the artio library.
+ *
+ * artio is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * artio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * Copies of the GNU Lesser General Public License and the GNU General
+ * Public License are available in the file LICENSE, included with this
+ * distribution.  If you failed to receive a copy of this file, see
+ * <http://www.gnu.org/licenses/>
+ **********************************************************************/
+
 #include "artio.h"
 #include "artio_internal.h"
 
@@ -310,7 +326,7 @@
 		}
 	}
 
-		handle->grid = ghandle;
+	handle->grid = ghandle;
 
 	artio_parameter_set_long_array(handle, "grid_file_sfc_index",
 			ghandle->num_grid_files + 1, ghandle->file_sfc_index);
@@ -396,7 +412,7 @@
 }
 
 int artio_grid_count_octs_in_sfc_range(artio_fileset *handle, 
-		int64_t start, int64_t end, int64_t *num_octs) {
+		int64_t start, int64_t end, int64_t *num_octs_in_range ) {
     int i;
 	int ret;
 	int file, first;
@@ -428,7 +444,7 @@
 		return ARTIO_ERR_INVALID_STATE;
 	}
 
-	*num_octs = 0;
+	*num_octs_in_range = 0;
 
 	if ( 8*ghandle->num_grid_variables <= ghandle->file_max_level ) {
 		/* we can't compute the number of octs through the offset table */
@@ -446,7 +462,7 @@
 			if ( ret != ARTIO_SUCCESS ) return ret;
 
 			for ( i = 0; i < num_oct_levels; i++ ) {
-				*num_octs += num_octs_per_level[i];
+				*num_octs_in_range += num_octs_per_level[i];
 			}	
 
 			ret = artio_grid_read_root_cell_end( handle );
@@ -490,7 +506,7 @@
 			/* this assumes (num_levels_per_root_tree)*sizeof(int) <
 			 *   size of an oct, or 8*num_variables > max_level so the 
 			 *   number of levels drops off in rounding to int */
-			*num_octs += (size_offset - offset - 
+			*num_octs_in_range += (size_offset - offset - 
 				sizeof(float)*ghandle->num_grid_variables - sizeof(int) ) /
 				(8*(sizeof(float)*ghandle->num_grid_variables + sizeof(int) ));
 			offset = next_offset;

diff -r f6c18705bf76fa980051bd4cb8e1fd4b60d94563 -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f yt/frontends/artio/artio_headers/artio_internal.h
--- a/yt/frontends/artio/artio_headers/artio_internal.h
+++ b/yt/frontends/artio/artio_headers/artio_internal.h
@@ -1,10 +1,24 @@
-/*
- * artio_internal.h
+/**********************************************************************
+ * Copyright (c) 2012-2013, Douglas H. Rudd
+ * All rights reserved.
  *
- *  Created on: Apr 9, 2010
- *      Author: Yongen Yu
- *  Renamed/Modified: Nov 18, 2010 - Douglas Rudd
- */
+ * This file is part of the artio library.
+ *
+ * artio is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * artio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * Copies of the GNU Lesser General Public License and the GNU General
+ * Public License are available in the file LICENSE, included with this
+ * distribution.  If you failed to receive a copy of this file, see
+ * <http://www.gnu.org/licenses/>
+ **********************************************************************/
 
 #ifndef __ARTIO_INTERNAL_H__
 #define __ARTIO_INTERNAL_H__
@@ -160,6 +174,7 @@
 #define ARTIO_SEEK_CUR          1
 #define ARTIO_SEEK_END			2
 
+/* wrapper functions for profiling and debugging */
 artio_fh *artio_file_fopen( char * filename, int amode, const artio_context *context );
 int artio_file_attach_buffer( artio_fh *handle, void *buf, int buf_size );
 int artio_file_detach_buffer( artio_fh *handle );
@@ -169,7 +184,19 @@
 int artio_file_fseek(artio_fh *ffh, int64_t offset, int whence);
 int artio_file_fread(artio_fh *handle, void *buf, int64_t count, int type );
 int artio_file_fclose(artio_fh *handle);
-void artio_set_endian_swap_tag(artio_fh *handle);
+void artio_file_set_endian_swap_tag(artio_fh *handle);
+
+/* internal versions */
+artio_fh *artio_file_fopen_i( char * filename, int amode, const artio_context *context );
+int artio_file_attach_buffer_i( artio_fh *handle, void *buf, int buf_size );
+int artio_file_detach_buffer_i( artio_fh *handle );
+int artio_file_fwrite_i(artio_fh *handle, const void *buf, int64_t count, int type );
+int artio_file_ftell_i( artio_fh *handle, int64_t *offset );
+int artio_file_fflush_i(artio_fh *handle);
+int artio_file_fseek_i(artio_fh *ffh, int64_t offset, int whence);
+int artio_file_fread_i(artio_fh *handle, void *buf, int64_t count, int type );
+int artio_file_fclose_i(artio_fh *handle);
+void artio_file_set_endian_swap_tag_i(artio_fh *handle);
 
 #define ARTIO_ENDIAN_MAGIC	0x1234
 

diff -r f6c18705bf76fa980051bd4cb8e1fd4b60d94563 -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f yt/frontends/artio/artio_headers/artio_mpi.c
--- a/yt/frontends/artio/artio_headers/artio_mpi.c
+++ b/yt/frontends/artio/artio_headers/artio_mpi.c
@@ -1,10 +1,24 @@
-/*
- * artio_mpi.c
+/**********************************************************************
+ * Copyright (c) 2012-2013, Douglas H. Rudd
+ * All rights reserved.
  *
- *  Created on: Apr 9, 2010
- *      Author: Yongen Yu
- *  Modified: Nov 18, 2010 - Douglas Rudd
- */
+ * This file is part of the artio library.
+ *
+ * artio is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * artio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * Copies of the GNU Lesser General Public License and the GNU General
+ * Public License are available in the file LICENSE, included with this
+ * distribution.  If you failed to receive a copy of this file, see
+ * <http://www.gnu.org/licenses/>
+ **********************************************************************/
 
 #include "artio.h"
 #include "artio_internal.h"
@@ -29,7 +43,7 @@
 	int bfend;
 };
 
-artio_fh *artio_file_fopen( char * filename, int mode, const artio_context *context) {
+artio_fh *artio_file_fopen_i( char * filename, int mode, const artio_context *context) {
 	int status;
 	int flag;
 	int rank;
@@ -53,7 +67,9 @@
 
 	ffh->mode = mode;
 	ffh->data = NULL;
-	ffh->bfsize = 0;
+	ffh->bfsize = -1;
+	ffh->bfend = -1;
+	ffh->bfptr = -1;
 
 	flag = mode & ARTIO_MODE_ACCESS;
 
@@ -77,7 +93,7 @@
 	return ffh;
 }
 
-int artio_file_attach_buffer( artio_fh *handle, void *buf, int buf_size ) {
+int artio_file_attach_buffer_i( artio_fh *handle, void *buf, int buf_size ) {
 	if ( !(handle->mode & ARTIO_MODE_ACCESS ) ) {
 		return ARTIO_ERR_INVALID_FILE_MODE;
 	}
@@ -94,7 +110,7 @@
 	return ARTIO_SUCCESS;                                                                                           
 }
 
-int artio_file_detach_buffer( artio_fh *handle ) {
+int artio_file_detach_buffer_i( artio_fh *handle ) {
 	int ret;
 	ret = artio_file_fflush(handle);
 	if ( ret != ARTIO_SUCCESS ) return ret;
@@ -107,7 +123,7 @@
 	return ARTIO_SUCCESS;   
 }
 
-int artio_file_fwrite( artio_fh *handle, const void *buf, int64_t count, int type ) {
+int artio_file_fwrite_i( artio_fh *handle, const void *buf, int64_t count, int type ) {
 	size_t size;
 	int64_t remain;
 	int size32;
@@ -119,9 +135,9 @@
 	}
 
 	size = artio_type_size( type );
-    if ( size == (size_t)-1 ) {
-        return ARTIO_ERR_INVALID_DATATYPE;                                                                          
-    }
+	if ( size == (size_t)-1 ) {
+		return ARTIO_ERR_INVALID_DATATYPE;
+	}
 
 	if ( count > ARTIO_INT64_MAX / size ) {
 		return ARTIO_ERR_IO_OVERFLOW;
@@ -151,6 +167,8 @@
 					MPI_BYTE, MPI_STATUS_IGNORE ) != MPI_SUCCESS ) {
 			return ARTIO_ERR_IO_WRITE;
 		}
+		p += size32;
+		remain -= size32;
 
 		while ( remain > handle->bfsize ) {
 			if ( MPI_File_write(handle->fh, p, handle->bfsize, 
@@ -168,7 +186,7 @@
 	return ARTIO_SUCCESS;
 }
 
-int artio_file_fflush(artio_fh *handle) {
+int artio_file_fflush_i(artio_fh *handle) {
 	if ( !(handle->mode & ARTIO_MODE_ACCESS) ) {
 		return ARTIO_ERR_INVALID_FILE_MODE;
 	}
@@ -182,8 +200,8 @@
 			handle->bfptr = 0;
 		}
 	} else if ( handle->mode & ARTIO_MODE_READ ) {
+		handle->bfend = -1;
 		handle->bfptr = 0;
-		handle->bfend = -1;
 	} else {
 		return ARTIO_ERR_INVALID_FILE_MODE;
 	}
@@ -191,13 +209,12 @@
 	return ARTIO_SUCCESS;
 }
 
-int artio_file_fread(artio_fh *handle, void *buf, int64_t count, int type ) {
+int artio_file_fread_i(artio_fh *handle, void *buf, int64_t count, int type ) {
 	MPI_Status status;
 	size_t size, avail, remain;
 	int size_read, size32;
 	char *p;
 	
-
 	if ( !(handle->mode & ARTIO_MODE_READ) ) {
 		return ARTIO_ERR_INVALID_FILE_MODE;
 	}
@@ -290,18 +307,22 @@
 	return ARTIO_SUCCESS;
 }
 
-int artio_file_ftell(artio_fh *handle, int64_t *offset) {
+int artio_file_ftell_i(artio_fh *handle, int64_t *offset) {
 	MPI_Offset current;
 	MPI_File_get_position( handle->fh, &current );
-	if ( handle->bfend == 0 ) {
-		*offset = current;
-	} else {
-		*offset = current - handle->bfend + handle->bfptr;
+
+	if ( handle->bfend > 0 ) {
+		current -= handle->bfend;
+	} 
+	if ( handle->bfptr > 0 ) {
+		current += handle->bfptr;
 	}
+	*offset = (int64_t)current;
+
 	return ARTIO_SUCCESS;
 }
 
-int artio_file_fseek(artio_fh *handle, int64_t offset, int whence ) {
+int artio_file_fseek_i(artio_fh *handle, int64_t offset, int whence ) {
 	MPI_Offset current;
 
 	if ( handle->mode & ARTIO_MODE_ACCESS ) {
@@ -309,6 +330,7 @@
 			if ( offset == 0 ) {
 				return ARTIO_SUCCESS;
 			} else if ( handle->mode & ARTIO_MODE_READ &&
+					handle->bfend > 0 &&
                 	handle->bfptr + offset >= 0 && 
 					handle->bfptr + offset < handle->bfend ) {
 				handle->bfptr += offset;
@@ -326,13 +348,14 @@
 		} else if ( whence == ARTIO_SEEK_SET ) {
 			MPI_File_get_position( handle->fh, &current );
 			if (handle->mode & ARTIO_MODE_WRITE &&
-					current<=offset && offset<(current + handle->bfsize) && 
-					handle->bfptr==(offset - current)) {
+					current <= offset && 
+					offset < current + handle->bfsize && 
+					handle->bfptr == offset - current ) {
 				return ARTIO_SUCCESS;
 			} else if ( handle->mode & ARTIO_MODE_READ &&
 					handle->bfptr > 0 &&
+					handle->bfend > 0 &&
 					handle->bfptr < handle->bfend &&
-					handle->bfend > 0 &&
 					offset >= current - handle->bfend &&
 					offset < current ) {
 				handle->bfptr = offset - current + handle->bfend;
@@ -355,7 +378,7 @@
 	return ARTIO_SUCCESS;
 }
 
-int artio_file_fclose(artio_fh *handle) {
+int artio_file_fclose_i(artio_fh *handle) {
 	if ( handle->mode & ARTIO_MODE_ACCESS ) {
 		artio_file_fflush(handle);
 		MPI_File_close(&handle->fh);
@@ -365,7 +388,7 @@
 	return ARTIO_SUCCESS;
 }
 
-void artio_set_endian_swap_tag(artio_fh *handle) {
+void artio_file_set_endian_swap_tag_i(artio_fh *handle) {
 	handle->mode |= ARTIO_MODE_ENDIAN_SWAP;
 }
 

diff -r f6c18705bf76fa980051bd4cb8e1fd4b60d94563 -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f yt/frontends/artio/artio_headers/artio_parameter.c
--- a/yt/frontends/artio/artio_headers/artio_parameter.c
+++ b/yt/frontends/artio/artio_headers/artio_parameter.c
@@ -1,9 +1,24 @@
-/*
- * artio_parameter.c
+/**********************************************************************
+ * Copyright (c) 2012-2013, Douglas H. Rudd
+ * All rights reserved.
  *
- *  Created on: Jun 8, 2010
- *      Author: Yongen Yu
- */
+ * This file is part of the artio library.
+ *
+ * artio is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * artio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * Copies of the GNU Lesser General Public License and the GNU General
+ * Public License are available in the file LICENSE, included with this
+ * distribution.  If you failed to receive a copy of this file, see
+ * <http://www.gnu.org/licenses/>
+ **********************************************************************/
 
 #include "artio.h"
 #include "artio_internal.h"
@@ -68,7 +83,7 @@
 	if ( endian_tag != ARTIO_ENDIAN_MAGIC ) {
 		artio_int_swap( &endian_tag, 1 );
 		if ( endian_tag == ARTIO_ENDIAN_MAGIC ) {
-			artio_set_endian_swap_tag(handle);
+			artio_file_set_endian_swap_tag(handle);
 		} else {
 			return ARTIO_ERR_PARAM_CORRUPTED_MAGIC;
 		}

diff -r f6c18705bf76fa980051bd4cb8e1fd4b60d94563 -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f yt/frontends/artio/artio_headers/artio_particle.c
--- a/yt/frontends/artio/artio_headers/artio_particle.c
+++ b/yt/frontends/artio/artio_headers/artio_particle.c
@@ -1,9 +1,25 @@
-/*
- * artio_particle.c
+/**********************************************************************
+ * Copyright (c) 2012-2013, Douglas H. Rudd
+ * All rights reserved.
  *
- *  Created on: May 10, 2011
- *      Author: eric
- */
+ * This file is part of the artio library.
+ *
+ * artio is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * artio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * Copies of the GNU Lesser General Public License and the GNU General
+ * Public License are available in the file LICENSE, included with this
+ * distribution.  If you failed to receive a copy of this file, see
+ * <http://www.gnu.org/licenses/>
+ **********************************************************************/
+
 #include "artio.h"
 #include "artio_internal.h"
 

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/59edd14640f7/
Changeset:   59edd14640f7
Branch:      yt-3.0
User:        drudd
Date:        2013-08-29 17:24:16
Summary:     Updated with upstream, performance fixes in artio front end
Affected #:  39 files

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5158,3 +5158,4 @@
 0000000000000000000000000000000000000000 hop callback
 a71dffe4bc813fdadc506ccad9efb632e23dc843 yt-3.0a1
 954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
+f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -473,11 +473,18 @@
 function do_setup_py
 {
     [ -e $1/done ] && return
-    echo "Installing $1 (arguments: '$*')"
-    [ ! -e $1/extracted ] && tar xfz $1.tar.gz
-    touch $1/extracted
-    cd $1
-    if [ ! -z `echo $1 | grep h5py` ]
+    LIB=$1
+    shift
+    if [ -z "$@" ]
+    then
+        echo "Installing $LIB"
+    else
+        echo "Installing $LIB (arguments: '$@')"
+    fi
+    [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
+    touch $LIB/extracted
+    cd $LIB
+    if [ ! -z `echo $LIB | grep h5py` ]
     then
         shift
 	( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -519,8 +526,8 @@
 
 function get_ytproject
 {
+    [ -e $1 ] && return
     echo "Downloading $1 from yt-project.org"
-    [ -e $1 ] && return
     ${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
     ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
@@ -551,67 +558,93 @@
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
+CYTHON='Cython-0.19.1'
+FORTHON='Forthon-0.8.11'
+PYX='PyX-0.12.1'
+PYTHON='Python-2.7.5'
+BZLIB='bzip2-1.0.6'
+FREETYPE_VER='freetype-2.4.12'
+H5PY='h5py-2.1.3'
+HDF5='hdf5-1.8.11'
+IPYTHON='ipython-1.0.0'
+LAPACK='lapack-3.4.2'
+PNG=libpng-1.6.3
+MATPLOTLIB='matplotlib-1.3.0'
+MERCURIAL='mercurial-2.7'
+NOSE='nose-1.3.0'
+NUMPY='numpy-1.7.1'
+PYTHON_HGLIB='python-hglib-1.0'
+PYZMQ='pyzmq-13.1.0'
+ROCKSTAR='rockstar-0.99.6'
+SCIPY='scipy-0.12.0'
+SQLITE='sqlite-autoconf-3071700'
+SYMPY='sympy-0.7.3'
+TORNADO='tornado-3.1'
+ZEROMQ='zeromq-3.2.3'
+ZLIB='zlib-1.2.8'
+
 # Now we dump all our SHA512 files out.
-echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1  Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
+echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0  Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299  Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
+echo 'd6580eb170b36ad50f3a30023fe6ca60234156af91ccb3971b0b0983119b86f3a9f6c717a515c3c6cb72b3dcbf1d02695c6d0b92745f460b46a3defd3ff6ef2f  Python-2.7.5.tgz' > Python-2.7.5.tgz.sha512
+echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
+echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3  freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
-echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3  h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
-echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
-echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554  libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
-echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208  matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
-echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2  mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
-echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8  numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
-echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd  sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
-echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d  zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
-echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865  zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
-echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83  pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
-echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1  tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
-echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
-echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397  python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
-echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
+echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
+echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202  h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
+echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1  hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
+echo '1b309c08009583e66d1725a2d2051e6de934db246129568fa6d5ba33ad6babd3b443e7c2782d817128d2b112e21bcdd71e66be34fbd528badd900f1d0ed3db56  ipython-1.0.0.tar.gz' > ipython-1.0.0.tar.gz.sha512
+echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
+echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
+echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
+echo 'e425778edb0f71c34e719e04561ee3de37feaa1be4d60b94c780aebdbe6d41f8f4ab15103a8bbe8894ebeb228c42f0e2cd41b8db840f8384e1cd7cd2d5b67b97  mercurial-2.7.tar.gz' > mercurial-2.7.tar.gz.sha512
+echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4  nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
+echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
+echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
+echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b  pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
-echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
-echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
-echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
+echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4  scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
+echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
+echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
+echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf  tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
+echo '34ffb6aa645f62bd1158a8f2888bf92929ccf90917a6c50ed51ed1240732f498522e164d1536f26480c87ad5457fe614a93bf0e15f2f89b0b168e64a30de68ca  zeromq-3.2.3.tar.gz' > zeromq-3.2.3.tar.gz.sha512
+echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
 # Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
-[ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject $ZLIB.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject $BZLIB.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject $PNG.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject $FREETYPE_VER.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject $SQLITE.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject $PYX.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $ZEROMQ.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $PYZMQ.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $TORNADO.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject $SCIPY.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
-[ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.4.tgz
-get_ytproject numpy-1.7.0.tar.gz
-get_ytproject matplotlib-1.2.1.tar.gz
-get_ytproject mercurial-2.5.4.tar.gz
-get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.2.tar.gz
-get_ytproject Cython-0.18.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject $LAPACK.tar.gz
+get_ytproject $PYTHON.tgz
+get_ytproject $NUMPY.tar.gz
+get_ytproject $MATPLOTLIB.tar.gz
+get_ytproject $MERCURIAL.tar.gz
+get_ytproject $IPYTHON.tar.gz
+get_ytproject $H5PY.tar.gz
+get_ytproject $CYTHON.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.11.tar.gz
-get_ytproject nose-1.2.1.tar.gz
-get_ytproject python-hglib-0.3.tar.gz
-get_ytproject sympy-0.7.2.tar.gz
-get_ytproject rockstar-0.99.6.tar.gz
+get_ytproject $FORTHON.tar.gz
+get_ytproject $NOSE.tar.gz
+get_ytproject $PYTHON_HGLIB.tar.gz
+get_ytproject $SYMPY.tar.gz
+get_ytproject $ROCKSTAR.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
-    if [ ! -e bzip2-1.0.6/done ]
+    if [ ! -e $BZLIB/done ]
     then
-        [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
+        [ ! -e $BZLIB ] && tar xfz $BZLIB.tar.gz
         echo "Installing BZLIB"
-        cd bzip2-1.0.6
+        cd $BZLIB
         if [ `uname` = "Darwin" ]
         then
             if [ -z "${CC}" ]
@@ -634,11 +667,11 @@
 
 if [ $INST_ZLIB -eq 1 ]
 then
-    if [ ! -e zlib-1.2.7/done ]
+    if [ ! -e $ZLIB/done ]
     then
-        [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
+        [ ! -e $ZLIB ] && tar xfz $ZLIB.tar.gz
         echo "Installing ZLIB"
-        cd zlib-1.2.7
+        cd $ZLIB
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -652,11 +685,11 @@
 
 if [ $INST_PNG -eq 1 ]
 then
-    if [ ! -e libpng-1.6.1/done ]
+    if [ ! -e $PNG/done ]
     then
-        [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
+        [ ! -e $PNG ] && tar xfz $PNG.tar.gz
         echo "Installing PNG"
-        cd libpng-1.6.1
+        cd $PNG
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -670,13 +703,14 @@
 
 if [ $INST_FTYPE -eq 1 ]
 then
-    if [ ! -e freetype-2.4.11/done ]
+    if [ ! -e $FREETYPE_VER/done ]
     then
-        [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
+        [ ! -e $FREETYPE_VER ] && tar xfz $FREETYPE_VER.tar.gz
         echo "Installing FreeType2"
-        cd freetype-2.4.11
+        cd $FREETYPE_VER
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make 2>&1 ) 1>> ${LOG_FILE} || do_exit
+		( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
@@ -688,11 +722,11 @@
 
 if [ -z "$HDF5_DIR" ]
 then
-    if [ ! -e hdf5-1.8.9/done ]
+    if [ ! -e $HDF5/done ]
     then
-        [ ! -e hdf5-1.8.9 ] && tar xfz hdf5-1.8.9.tar.gz
+        [ ! -e $HDF5 ] && tar xfz $HDF5.tar.gz
         echo "Installing HDF5"
-        cd hdf5-1.8.9
+        cd $HDF5
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -707,11 +741,11 @@
 
 if [ $INST_SQLITE3 -eq 1 ]
 then
-    if [ ! -e sqlite-autoconf-3071601/done ]
+    if [ ! -e $SQLITE/done ]
     then
-        [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
+        [ ! -e $SQLITE ] && tar xfz $SQLITE.tar.gz
         echo "Installing SQLite3"
-        cd sqlite-autoconf-3071601
+        cd $SQLITE
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -720,11 +754,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.4/done ]
+if [ ! -e $PYTHON/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
-    cd Python-2.7.4
+    [ ! -e $PYTHON ] && tar xfz $PYTHON.tgz
+    cd $PYTHON
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -739,7 +773,7 @@
 
 if [ $INST_HG -eq 1 ]
 then
-    do_setup_py mercurial-2.5.4
+    do_setup_py $MERCURIAL
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
@@ -788,9 +822,9 @@
 
 if [ $INST_SCIPY -eq 0 ]
 then
-    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
+    do_setup_py $NUMPY ${NUMPY_ARGS}
 else
-    if [ ! -e scipy-0.11.0/done ]
+    if [ ! -e $SCIPY/done ]
     then
 	if [ ! -e BLAS/done ]
 	then
@@ -798,27 +832,27 @@
 	    echo "Building BLAS"
 	    cd BLAS
 	    gfortran -O2 -fPIC -fno-second-underscore -c *.f
-	    ar r libfblas.a *.o 1>> ${LOG_FILE}
-	    ranlib libfblas.a 1>> ${LOG_FILE}
+	    ( ar r libfblas.a *.o 2>&1 ) 1>> ${LOG_FILE}
+	    ( ranlib libfblas.a 2>&1 ) 1>> ${LOG_FILE}
 	    rm -rf *.o
 	    touch done
 	    cd ..
 	fi
-	if [ ! -e lapack-3.4.2/done ]
+	if [ ! -e $LAPACK/done ]
 	then
-	    tar xfz lapack-3.4.2.tar.gz
+	    tar xfz $LAPACK.tar.gz
 	    echo "Building LAPACK"
-	    cd lapack-3.4.2/
+	    cd $LAPACK/
 	    cp INSTALL/make.inc.gfortran make.inc
-	    make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
+	    ( make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 2>&1 ) 1>> ${LOG_FILE} || do_exit
 	    touch done
 	    cd ..
 	fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
-    export LAPACK=$PWD/lapack-3.4.2/liblapack.a
-    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
-    do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
+    export LAPACK=$PWD/$LAPACK/liblapack.a
+    do_setup_py $NUMPY ${NUMPY_ARGS}
+    do_setup_py $SCIPY ${NUMPY_ARGS}
 fi
 
 if [ -n "${MPL_SUPP_LDFLAGS}" ]
@@ -840,10 +874,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
-do_setup_py matplotlib-1.2.1
+mkdir -p ${DEST_DIR}/src/$MATPLOTLIB
+echo "[directories]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+do_setup_py $MATPLOTLIB
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -855,36 +889,36 @@
 # Now we do our IPython installation, which has two optional dependencies.
 if [ $INST_0MQ -eq 1 ]
 then
-    if [ ! -e zeromq-3.2.2/done ]
+    if [ ! -e $ZEROMQ/done ]
     then
-        [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
+        [ ! -e $ZEROMQ ] && tar xfz $ZEROMQ.tar.gz
         echo "Installing ZeroMQ"
-        cd zeromq-3.2.2
+        cd $ZEROMQ
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
-    do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
-    do_setup_py tornado-3.0
+    do_setup_py $PYZMQ --zmq=${DEST_DIR}
+    do_setup_py $TORNADO
 fi
 
-do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.2
-do_setup_py Cython-0.18
-do_setup_py Forthon-0.8.11
-do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.3
-do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
+do_setup_py $IPYTHON
+do_setup_py $H5PY
+do_setup_py $CYTHON
+do_setup_py $FORTHON
+do_setup_py $NOSE
+do_setup_py $PYTHON_HGLIB
+do_setup_py $SYMPY
+[ $INST_PYX -eq 1 ] && do_setup_py $PYX
 
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
 then
     if [ ! -e Rockstar/done ]
     then
-        [ ! -e Rockstar ] && tar xfz rockstar-0.99.6.tar.gz
+        [ ! -e Rockstar ] && tar xfz $ROCKSTAR.tar.gz
         echo "Building Rockstar"
         cd Rockstar
         ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -909,10 +943,10 @@
 touch done
 cd $MY_PWD
 
-if !(${DEST_DIR}/bin/python2.7 -c "import readline" >> ${LOG_FILE})
+if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
 then
     echo "Installing pure-python readline"
-    ${DEST_DIR}/bin/pip install readline 1>> ${LOG_FILE}
+    ( ${DEST_DIR}/bin/pip install readline 2>&1 ) 1>> ${LOG_FILE}
 fi
 
 if [ $INST_ENZO -eq 1 ]

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1062,8 +1062,9 @@
     def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is True (default), only run it on the dark matter particles, otherwise
-        on all particles.  Returns an iterable collection of *HopGroup* items.
+        *dm_only* is True (default), only run it on the dark matter particles, 
+        otherwise on all particles.  Returns an iterable collection of 
+        *HopGroup* items.
         """
         self._data_source = data_source
         self.dm_only = dm_only

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,7 +62,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold009',
+    gold_standard_filename = 'gold010',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None',
     thread_field_detection = 'False'

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -89,3 +89,6 @@
 
 from particle_trajectories import \
     ParticleTrajectoryCollection
+
+from particle_filters import \
+    particle_filter

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -45,6 +45,7 @@
     def _AllFields(field, data):
         v = []
         for ptype in data.pf.particle_types:
+            data.pf._last_freq = (ptype, None)
             if ptype == "all" or \
                 ptype in data.pf.known_filters:
                   continue
@@ -57,6 +58,7 @@
     def _AllFields(field, data):
         v = []
         for ptype in data.pf.particle_types:
+            data.pf._last_freq = (ptype, None)
             if ptype == "all" or \
                 ptype in data.pf.known_filters:
                   continue

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 yt/data_objects/tests/test_cutting_plane.py
--- a/yt/data_objects/tests/test_cutting_plane.py
+++ b/yt/data_objects/tests/test_cutting_plane.py
@@ -1,5 +1,6 @@
 from yt.testing import *
 import os
+import tempfile
 
 def setup():
     from yt.config import ytcfg
@@ -7,7 +8,10 @@
 
 def teardown_func(fns):
     for fn in fns:
-        os.remove(fn)
+        try:
+            os.remove(fn)
+        except OSError:
+            pass
 
 def test_cutting_plane():
     for nprocs in [8, 1]:
@@ -23,7 +27,9 @@
         yield assert_equal, cut["Ones"].min(), 1.0
         yield assert_equal, cut["Ones"].max(), 1.0
         pw = cut.to_pw()
-        fns += pw.save()
+        tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+        os.close(tmpfd)
+        fns += pw.save(name=tmpname)
         frb = cut.to_frb((1.0,'unitary'), 64)
         for cut_field in ['Ones', 'Density']:
             yield assert_equal, frb[cut_field].info['data_source'], \

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 yt/data_objects/tests/test_image_array.py
--- a/yt/data_objects/tests/test_image_array.py
+++ b/yt/data_objects/tests/test_image_array.py
@@ -1,130 +1,94 @@
-from yt.testing import *
-from yt.data_objects.image_array import ImageArray
 import numpy as np
 import os
 import tempfile
 import shutil
+import unittest
+from yt.data_objects.image_array import ImageArray
+from yt.testing import \
+    assert_equal
+
 
 def setup():
     from yt.config import ytcfg
-    ytcfg["yt","__withintesting"] = "True"
-    np.seterr(all = 'ignore')
+    ytcfg["yt", "__withintesting"] = "True"
+    np.seterr(all='ignore')
+
+
+def dummy_image(kstep, nlayers):
+    im = np.zeros([64, 128, nlayers])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i, :, k] = np.linspace(0.0, kstep * k, im.shape[1])
+    return im
+
 
 def test_rgba_rescale():
-    im = np.zeros([64,128,4])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
-    im_arr = ImageArray(im)
+    im_arr = ImageArray(dummy_image(10.0, 4))
 
     new_im = im_arr.rescale(inline=False)
-    yield assert_equal, im_arr[:,:,:3].max(), 2*10.
-    yield assert_equal, im_arr[:,:,3].max(), 3*10.
-    yield assert_equal, new_im[:,:,:3].sum(axis=2).max(), 1.0 
-    yield assert_equal, new_im[:,:,3].max(), 1.0
+    yield assert_equal, im_arr[:, :, :3].max(), 2 * 10.
+    yield assert_equal, im_arr[:, :, 3].max(), 3 * 10.
+    yield assert_equal, new_im[:, :, :3].sum(axis=2).max(), 1.0
+    yield assert_equal, new_im[:, :, 3].max(), 1.0
 
     im_arr.rescale()
-    yield assert_equal, im_arr[:,:,:3].sum(axis=2).max(), 1.0
-    yield assert_equal, im_arr[:,:,3].max(), 1.0
+    yield assert_equal, im_arr[:, :, :3].sum(axis=2).max(), 1.0
+    yield assert_equal, im_arr[:, :, 3].max(), 1.0
 
-def test_image_array_hdf5():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
 
-    im = np.zeros([64,128,3])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+class TestImageArray(unittest.TestCase):
 
-    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        'width':0.245, 'units':'cm', 'type':'rendering'}
+    tmpdir = None
+    curdir = None
 
-    im_arr = ImageArray(im, info=myinfo)
-    im_arr.save('test_3d_ImageArray')
+    def setUp(self):
+        self.tmpdir = tempfile.mkdtemp()
+        self.curdir = os.getcwd()
+        os.chdir(self.tmpdir)
 
-    im = np.zeros([64,128])
-    for i in xrange(im.shape[0]):
-        im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+    def test_image_array_hdf5(self):
+        myinfo = {'field': 'dinosaurs', 'east_vector': np.array([1., 0., 0.]),
+                  'north_vector': np.array([0., 0., 1.]),
+                  'normal_vector': np.array([0., 1., 0.]),
+                  'width': 0.245, 'units': 'cm', 'type': 'rendering'}
 
-    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        'width':0.245, 'units':'cm', 'type':'rendering'}
+        im_arr = ImageArray(dummy_image(0.3, 3), info=myinfo)
+        im_arr.save('test_3d_ImageArray')
 
-    im_arr = ImageArray(im, info=myinfo)
-    im_arr.save('test_2d_ImageArray')
+        im = np.zeros([64, 128])
+        for i in xrange(im.shape[0]):
+            im[i, :] = np.linspace(0., 0.3 * 2, im.shape[1])
 
-    os.chdir(curdir)
-    # clean up
-    shutil.rmtree(tmpdir)
+        myinfo = {'field': 'dinosaurs', 'east_vector': np.array([1., 0., 0.]),
+                  'north_vector': np.array([0., 0., 1.]),
+                  'normal_vector': np.array([0., 1., 0.]),
+                  'width': 0.245, 'units': 'cm', 'type': 'rendering'}
 
-def test_image_array_rgb_png():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
+        im_arr = ImageArray(im, info=myinfo)
+        im_arr.save('test_2d_ImageArray')
 
-    im = np.zeros([64,128,3])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
+    def test_image_array_rgb_png(self):
+        im_arr = ImageArray(dummy_image(10.0, 3))
+        im_arr.write_png('standard.png')
 
-    im_arr = ImageArray(im)
-    im_arr.write_png('standard.png')
+    def test_image_array_rgba_png(self):
+        im_arr = ImageArray(dummy_image(10.0, 4))
+        im_arr.write_png('standard.png')
+        im_arr.write_png('non-scaled.png', rescale=False)
+        im_arr.write_png('black_bg.png', background='black')
+        im_arr.write_png('white_bg.png', background='white')
+        im_arr.write_png('green_bg.png', background=[0., 1., 0., 1.])
+        im_arr.write_png('transparent_bg.png', background=None)
 
-def test_image_array_rgba_png():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
+    def test_image_array_background(self):
+        im_arr = ImageArray(dummy_image(10.0, 4))
+        im_arr.rescale()
+        new_im = im_arr.add_background_color([1., 0., 0., 1.], inline=False)
+        new_im.write_png('red_bg.png')
+        im_arr.add_background_color('black')
+        im_arr.write_png('black_bg2.png')
 
-    im = np.zeros([64,128,4])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
-
-    im_arr = ImageArray(im)
-    im_arr.write_png('standard.png')
-    im_arr.write_png('non-scaled.png', rescale=False)
-    im_arr.write_png('black_bg.png', background='black')
-    im_arr.write_png('white_bg.png', background='white')
-    im_arr.write_png('green_bg.png', background=[0.,1.,0.,1.])
-    im_arr.write_png('transparent_bg.png', background=None)
-
-
-def test_image_array_background():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
-
-    im = np.zeros([64,128,4])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
-
-    im_arr = ImageArray(im)
-    im_arr.rescale()
-    new_im = im_arr.add_background_color([1.,0.,0.,1.], inline=False)
-    new_im.write_png('red_bg.png')
-    im_arr.add_background_color('black')
-    im_arr.write_png('black_bg2.png')
- 
-    os.chdir(curdir)
-    # clean up
-    shutil.rmtree(tmpdir)
-
-
-
-
-
-
-
-
-
-
-
-
-
+    def tearDown(self):
+        os.chdir(self.curdir)
+        # clean up
+        shutil.rmtree(self.tmpdir)

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -1,5 +1,6 @@
 from yt.testing import *
 import os
+import tempfile
 
 def setup():
     from yt.config import ytcfg
@@ -7,7 +8,10 @@
 
 def teardown_func(fns):
     for fn in fns:
-        os.remove(fn)
+        try:
+            os.remove(fn)
+        except OSError:
+            pass
 
 def test_projection():
     for nprocs in [8, 1]:
@@ -37,7 +41,9 @@
                 yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
                 pw = proj.to_pw()
-                fns += pw.save()
+                tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+                os.close(tmpfd)
+                fns += pw.save(name=tmpname)
                 frb = proj.to_frb((1.0,'unitary'), 64)
                 for proj_field in ['Ones', 'Density']:
                     yield assert_equal, frb[proj_field].info['data_source'], \

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -27,6 +27,7 @@
 """
 import os
 import numpy as np
+import tempfile
 from nose.tools import raises
 from yt.testing import \
     fake_random_pf, assert_equal, assert_array_equal
@@ -42,7 +43,10 @@
 
 def teardown_func(fns):
     for fn in fns:
-        os.remove(fn)
+        try:
+            os.remove(fn)
+        except OSError:
+            pass
 
 
 def test_slice():
@@ -72,7 +76,9 @@
                 yield assert_equal, np.unique(slc["pdx"]), 0.5 / dims[xax]
                 yield assert_equal, np.unique(slc["pdy"]), 0.5 / dims[yax]
                 pw = slc.to_pw()
-                fns += pw.save()
+                tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+                os.close(tmpfd)
+                fns += pw.save(name=tmpname)
                 frb = slc.to_frb((1.0, 'unitary'), 64)
                 for slc_field in ['Ones', 'Density']:
                     yield assert_equal, frb[slc_field].info['data_source'], \

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 yt/extern/__init__.py
--- /dev/null
+++ b/yt/extern/__init__.py
@@ -0,0 +1,4 @@
+"""
+This packages contains python packages that are bundled with yt
+and are developed by 3rd party upstream.
+"""

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 yt/extern/parameterized.py
--- /dev/null
+++ b/yt/extern/parameterized.py
@@ -0,0 +1,226 @@
+import re
+import inspect
+from functools import wraps
+from collections import namedtuple
+
+from nose.tools import nottest
+from unittest import TestCase
+
+from . import six
+
+if six.PY3:
+    def new_instancemethod(f, *args):
+        return f
+else:
+    import new
+    new_instancemethod = new.instancemethod
+
+_param = namedtuple("param", "args kwargs")
+
+class param(_param):
+    """ Represents a single parameter to a test case.
+
+        For example::
+
+            >>> p = param("foo", bar=16)
+            >>> p
+            param("foo", bar=16)
+            >>> p.args
+            ('foo', )
+            >>> p.kwargs
+            {'bar': 16}
+
+        Intended to be used as an argument to ``@parameterized``::
+
+            @parameterized([
+                param("foo", bar=16),
+            ])
+            def test_stuff(foo, bar=16):
+                pass
+        """
+
+    def __new__(cls, *args , **kwargs):
+        return _param.__new__(cls, args, kwargs)
+
+    @classmethod
+    def explicit(cls, args=None, kwargs=None):
+        """ Creates a ``param`` by explicitly specifying ``args`` and
+            ``kwargs``::
+
+                >>> param.explicit([1,2,3])
+                param(*(1, 2, 3))
+                >>> param.explicit(kwargs={"foo": 42})
+                param(*(), **{"foo": "42"})
+            """
+        args = args or ()
+        kwargs = kwargs or {}
+        return cls(*args, **kwargs)
+
+    @classmethod
+    def from_decorator(cls, args):
+        """ Returns an instance of ``param()`` for ``@parameterized`` argument
+            ``args``::
+
+                >>> param.from_decorator((42, ))
+                param(args=(42, ), kwargs={})
+                >>> param.from_decorator("foo")
+                param(args=("foo", ), kwargs={})
+            """
+        if isinstance(args, param):
+            return args
+        if isinstance(args, six.string_types):
+            args = (args, )
+        return cls(*args)
+
+    def __repr__(self):
+        return "param(*%r, **%r)" %self
+
+class parameterized(object):
+    """ Parameterize a test case::
+
+            class TestInt(object):
+                @parameterized([
+                    ("A", 10),
+                    ("F", 15),
+                    param("10", 42, base=42)
+                ])
+                def test_int(self, input, expected, base=16):
+                    actual = int(input, base=base)
+                    assert_equal(actual, expected)
+
+            @parameterized([
+                (2, 3, 5)
+                (3, 5, 8),
+            ])
+            def test_add(a, b, expected):
+                assert_equal(a + b, expected)
+        """
+
+    def __init__(self, input):
+        self.get_input = self.input_as_callable(input)
+
+    def __call__(self, test_func):
+        self.assert_not_in_testcase_subclass()
+
+        @wraps(test_func)
+        def parameterized_helper_method(test_self=None):
+            f = test_func
+            if test_self is not None:
+                # If we are a test method (which we suppose to be true if we
+                # are being passed a "self" argument), we first need to create
+                # an instance method, attach it to the instance of the test
+                # class, then pull it back off to turn it into a bound method.
+                # If we don't do this, Nose gets cranky.
+                f = self.make_bound_method(test_self, test_func)
+            # Note: because nose is so very picky, the more obvious
+            # ``return self.yield_nose_tuples(f)`` won't work here.
+            for nose_tuple in self.yield_nose_tuples(f):
+                yield nose_tuple
+
+        test_func.__name__ = "_helper_for_%s" %(test_func.__name__, )
+        parameterized_helper_method.parameterized_input = input
+        parameterized_helper_method.parameterized_func = test_func
+        return parameterized_helper_method
+
+    def yield_nose_tuples(self, func):
+        for args in self.get_input():
+            p = param.from_decorator(args)
+            # ... then yield that as a tuple. If those steps aren't
+            # followed precicely, Nose gets upset and doesn't run the test
+            # or doesn't run setup methods.
+            yield self.param_as_nose_tuple(p, func)
+
+    def param_as_nose_tuple(self, p, func):
+        nose_func = func
+        nose_args = p.args
+        if p.kwargs:
+            nose_func = wraps(func)(lambda args, kwargs: func(*args, **kwargs))
+            nose_args = (p.args, p.kwargs)
+        return (nose_func, ) + nose_args
+
+    def make_bound_method(self, instance, func):
+        cls = type(instance)
+        im_f = new_instancemethod(func, None, cls)
+        setattr(cls, func.__name__, im_f)
+        return getattr(instance, func.__name__)
+
+    def assert_not_in_testcase_subclass(self):
+        parent_classes = self._terrible_magic_get_defining_classes()
+        if any(issubclass(cls, TestCase) for cls in parent_classes):
+            raise Exception("Warning: '@parameterized' tests won't work "
+                            "inside subclasses of 'TestCase' - use "
+                            "'@parameterized.expand' instead")
+
+    def _terrible_magic_get_defining_classes(self):
+        """ Returns the set of parent classes of the class currently being defined.
+            Will likely only work if called from the ``parameterized`` decorator.
+            This function is entirely @brandon_rhodes's fault, as he suggested
+            the implementation: http://stackoverflow.com/a/8793684/71522
+            """
+        stack = inspect.stack()
+        if len(stack) <= 4:
+            return []
+        frame = stack[4]
+        code_context = frame[4] and frame[4][0].strip()
+        if not (code_context and code_context.startswith("class ")):
+            return []
+        _, parents = code_context.split("(", 1)
+        parents, _ = parents.rsplit(")", 1)
+        return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals)
+
+    @classmethod
+    def input_as_callable(cls, input):
+        if callable(input):
+            return lambda: cls.check_input_values(input())
+        input_values = cls.check_input_values(input)
+        return lambda: input_values
+
+    @classmethod
+    def check_input_values(cls, input_values):
+        if not hasattr(input_values, "__iter__"):
+            raise ValueError("expected iterable input; got %r" %(input, ))
+        return input_values
+
+    @classmethod
+    def expand(cls, input):
+        """ A "brute force" method of parameterizing test cases. Creates new
+            test cases and injects them into the namespace that the wrapped
+            function is being defined in. Useful for parameterizing tests in
+            subclasses of 'UnitTest', where Nose test generators don't work.
+
+            >>> @parameterized.expand([("foo", 1, 2)])
+            ... def test_add1(name, input, expected):
+            ...     actual = add1(input)
+            ...     assert_equal(actual, expected)
+            ...
+            >>> locals()
+            ... 'test_add1_foo_0': <function ...> ...
+            >>>
+            """
+
+        def parameterized_expand_wrapper(f):
+            stack = inspect.stack()
+            frame = stack[1]
+            frame_locals = frame[0].f_locals
+
+            base_name = f.__name__
+            get_input = cls.input_as_callable(input)
+            for num, args in enumerate(get_input()):
+                p = param.from_decorator(args)
+                name_suffix = "_%s" %(num, )
+                if len(p.args) > 0 and isinstance(p.args[0], six.string_types):
+                    name_suffix += "_" + cls.to_safe_name(p.args[0])
+                name = base_name + name_suffix
+                frame_locals[name] = cls.param_as_standalone_func(p, f, name)
+            return nottest(f)
+        return parameterized_expand_wrapper
+
+    @classmethod
+    def param_as_standalone_func(cls, p, func, name):
+        standalone_func = lambda *a: func(*(a + p.args), **p.kwargs)
+        standalone_func.__name__ = name
+        return standalone_func
+
+    @classmethod
+    def to_safe_name(cls, s):
+        return str(re.sub("[^a-zA-Z0-9_]", "", s))

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 yt/extern/six.py
--- /dev/null
+++ b/yt/extern/six.py
@@ -0,0 +1,404 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2013 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin at python.org>"
+__version__ = "1.3.0"
+
+
+# True if we are running on Python 3.
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+    string_types = str,
+    integer_types = int,
+    class_types = type,
+    text_type = str
+    binary_type = bytes
+
+    MAXSIZE = sys.maxsize
+else:
+    string_types = basestring,
+    integer_types = (int, long)
+    class_types = (type, types.ClassType)
+    text_type = unicode
+    binary_type = str
+
+    if sys.platform.startswith("java"):
+        # Jython always uses 32 bits.
+        MAXSIZE = int((1 << 31) - 1)
+    else:
+        # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+        class X(object):
+            def __len__(self):
+                return 1 << 31
+        try:
+            len(X())
+        except OverflowError:
+            # 32-bit
+            MAXSIZE = int((1 << 31) - 1)
+        else:
+            # 64-bit
+            MAXSIZE = int((1 << 63) - 1)
+            del X
+
+
+def _add_doc(func, doc):
+    """Add documentation to a function."""
+    func.__doc__ = doc
+
+
+def _import_module(name):
+    """Import module, returning the module after the last dot."""
+    __import__(name)
+    return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+    def __init__(self, name):
+        self.name = name
+
+    def __get__(self, obj, tp):
+        result = self._resolve()
+        setattr(obj, self.name, result)
+        # This is a bit ugly, but it avoids running this again.
+        delattr(tp, self.name)
+        return result
+
+
+class MovedModule(_LazyDescr):
+
+    def __init__(self, name, old, new=None):
+        super(MovedModule, self).__init__(name)
+        if PY3:
+            if new is None:
+                new = name
+            self.mod = new
+        else:
+            self.mod = old
+
+    def _resolve(self):
+        return _import_module(self.mod)
+
+
+class MovedAttribute(_LazyDescr):
+
+    def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+        super(MovedAttribute, self).__init__(name)
+        if PY3:
+            if new_mod is None:
+                new_mod = name
+            self.mod = new_mod
+            if new_attr is None:
+                if old_attr is None:
+                    new_attr = name
+                else:
+                    new_attr = old_attr
+            self.attr = new_attr
+        else:
+            self.mod = old_mod
+            if old_attr is None:
+                old_attr = name
+            self.attr = old_attr
+
+    def _resolve(self):
+        module = _import_module(self.mod)
+        return getattr(module, self.attr)
+
+
+
+class _MovedItems(types.ModuleType):
+    """Lazy loading of moved objects"""
+
+
+_moved_attributes = [
+    MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+    MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+    MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+    MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+    MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
+    MovedAttribute("reduce", "__builtin__", "functools"),
+    MovedAttribute("StringIO", "StringIO", "io"),
+    MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+
+    MovedModule("builtins", "__builtin__"),
+    MovedModule("configparser", "ConfigParser"),
+    MovedModule("copyreg", "copy_reg"),
+    MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+    MovedModule("http_cookies", "Cookie", "http.cookies"),
+    MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+    MovedModule("html_parser", "HTMLParser", "html.parser"),
+    MovedModule("http_client", "httplib", "http.client"),
+    MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+    MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+    MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+    MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+    MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+    MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+    MovedModule("cPickle", "cPickle", "pickle"),
+    MovedModule("queue", "Queue"),
+    MovedModule("reprlib", "repr"),
+    MovedModule("socketserver", "SocketServer"),
+    MovedModule("tkinter", "Tkinter"),
+    MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+    MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+    MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+    MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+    MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+    MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+    MovedModule("tkinter_colorchooser", "tkColorChooser",
+                "tkinter.colorchooser"),
+    MovedModule("tkinter_commondialog", "tkCommonDialog",
+                "tkinter.commondialog"),
+    MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+    MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+    MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+                "tkinter.simpledialog"),
+    MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+    MovedModule("winreg", "_winreg"),
+]
+for attr in _moved_attributes:
+    setattr(_MovedItems, attr.name, attr)
+del attr
+
+moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
+
+
+def add_move(move):
+    """Add an item to six.moves."""
+    setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+    """Remove item from six.moves."""
+    try:
+        delattr(_MovedItems, name)
+    except AttributeError:
+        try:
+            del moves.__dict__[name]
+        except KeyError:
+            raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+    _meth_func = "__func__"
+    _meth_self = "__self__"
+
+    _func_closure = "__closure__"
+    _func_code = "__code__"
+    _func_defaults = "__defaults__"
+    _func_globals = "__globals__"
+
+    _iterkeys = "keys"
+    _itervalues = "values"
+    _iteritems = "items"
+    _iterlists = "lists"
+else:
+    _meth_func = "im_func"
+    _meth_self = "im_self"
+
+    _func_closure = "func_closure"
+    _func_code = "func_code"
+    _func_defaults = "func_defaults"
+    _func_globals = "func_globals"
+
+    _iterkeys = "iterkeys"
+    _itervalues = "itervalues"
+    _iteritems = "iteritems"
+    _iterlists = "iterlists"
+
+
+try:
+    advance_iterator = next
+except NameError:
+    def advance_iterator(it):
+        return it.next()
+next = advance_iterator
+
+
+try:
+    callable = callable
+except NameError:
+    def callable(obj):
+        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+    def get_unbound_function(unbound):
+        return unbound
+
+    Iterator = object
+else:
+    def get_unbound_function(unbound):
+        return unbound.im_func
+
+    class Iterator(object):
+
+        def next(self):
+            return type(self).__next__(self)
+
+    callable = callable
+_add_doc(get_unbound_function,
+         """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+def iterkeys(d, **kw):
+    """Return an iterator over the keys of a dictionary."""
+    return iter(getattr(d, _iterkeys)(**kw))
+
+def itervalues(d, **kw):
+    """Return an iterator over the values of a dictionary."""
+    return iter(getattr(d, _itervalues)(**kw))
+
+def iteritems(d, **kw):
+    """Return an iterator over the (key, value) pairs of a dictionary."""
+    return iter(getattr(d, _iteritems)(**kw))
+
+def iterlists(d, **kw):
+    """Return an iterator over the (key, [values]) pairs of a dictionary."""
+    return iter(getattr(d, _iterlists)(**kw))
+
+
+if PY3:
+    def b(s):
+        return s.encode("latin-1")
+    def u(s):
+        return s
+    if sys.version_info[1] <= 1:
+        def int2byte(i):
+            return bytes((i,))
+    else:
+        # This is about 2x faster than the implementation above on 3.2+
+        int2byte = operator.methodcaller("to_bytes", 1, "big")
+    import io
+    StringIO = io.StringIO
+    BytesIO = io.BytesIO
+else:
+    def b(s):
+        return s
+    def u(s):
+        return unicode(s, "unicode_escape")
+    int2byte = chr
+    import StringIO
+    StringIO = BytesIO = StringIO.StringIO
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+if PY3:
+    import builtins
+    exec_ = getattr(builtins, "exec")
+
+
+    def reraise(tp, value, tb=None):
+        if value.__traceback__ is not tb:
+            raise value.with_traceback(tb)
+        raise value
+
+
+    print_ = getattr(builtins, "print")
+    del builtins
+
+else:
+    def exec_(_code_, _globs_=None, _locs_=None):
+        """Execute code in a namespace."""
+        if _globs_ is None:
+            frame = sys._getframe(1)
+            _globs_ = frame.f_globals
+            if _locs_ is None:
+                _locs_ = frame.f_locals
+            del frame
+        elif _locs_ is None:
+            _locs_ = _globs_
+        exec("""exec _code_ in _globs_, _locs_""")
+
+
+    exec_("""def reraise(tp, value, tb=None):
+    raise tp, value, tb
+""")
+
+
+    def print_(*args, **kwargs):
+        """The new-style print function."""
+        fp = kwargs.pop("file", sys.stdout)
+        if fp is None:
+            return
+        def write(data):
+            if not isinstance(data, basestring):
+                data = str(data)
+            fp.write(data)
+        want_unicode = False
+        sep = kwargs.pop("sep", None)
+        if sep is not None:
+            if isinstance(sep, unicode):
+                want_unicode = True
+            elif not isinstance(sep, str):
+                raise TypeError("sep must be None or a string")
+        end = kwargs.pop("end", None)
+        if end is not None:
+            if isinstance(end, unicode):
+                want_unicode = True
+            elif not isinstance(end, str):
+                raise TypeError("end must be None or a string")
+        if kwargs:
+            raise TypeError("invalid keyword arguments to print()")
+        if not want_unicode:
+            for arg in args:
+                if isinstance(arg, unicode):
+                    want_unicode = True
+                    break
+        if want_unicode:
+            newline = unicode("\n")
+            space = unicode(" ")
+        else:
+            newline = "\n"
+            space = " "
+        if sep is None:
+            sep = space
+        if end is None:
+            end = newline
+        for i, arg in enumerate(args):
+            if i:
+                write(sep)
+            write(arg)
+        write(end)
+
+_add_doc(reraise, """Reraise an exception.""")
+
+
+def with_metaclass(meta, base=object):
+    """Create a base class with a metaclass."""
+    return meta("NewBase", (base,), {})

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -1,3 +1,4 @@
+#cython: profile=True
 """
 
 """
@@ -134,7 +135,7 @@
 
     # common attributes
     cdef public int num_grid
-    cdef int64_t num_root_cells
+    cdef public int64_t num_root_cells
     cdef int64_t sfc_min, sfc_max
 
     # grid attributes
@@ -606,126 +607,118 @@
     def _initialize_root_mesh(self):
         # We actually will not be initializing the root mesh here, we will be
         # initializing the entire mesh between sfc_start and sfc_end.
-        cdef np.int64_t oct_ind, sfc, nadded, tot_octs, ipos
-        cdef np.uint8_t bits
-        cdef int status
+        cdef np.int64_t oct_ind, sfc, tot_octs, ipos
+        cdef int i, status, level, num_oct_levels, num_root, num_octs
+        cdef int num_level_octs
         cdef artio_fileset_handle *handle = self.artio_handle.handle
-        cdef double dpos[3]
         cdef int coords[3]
-        cdef int num_oct_levels, level, i, j
         cdef int max_level = self.artio_handle.max_level
+        cdef double *dpos
+        cdef double rpos[3]
+        cdef int *olevel
         cdef int *num_octs_per_level = <int *>malloc(
             (max_level + 1)*sizeof(int))
-        cdef np.int64_t *tot_octs_per_level = <np.int64_t *>malloc(
-            (max_level + 1)*sizeof(np.int64_t))
         self.level_indices = <np.int64_t *>malloc(
             (max_level + 1)*sizeof(np.int64_t))
-        for level in range(max_level + 1):
-            tot_octs_per_level[level] = 0
-        status = artio_grid_cache_sfc_range(handle,
-            self.sfc_start, self.sfc_end )
-        check_artio_status(status) 
-        # Now we iterate and create them, level by level.
-        # Note that we are doing a bit of magic to figure out how many root
-        # nodes we will need at most
-        cdef int nmask = self.nn[0] * self.nn[1] * self.nn[2] / 8
-        cdef np.uint8_t *mask = <np.uint8_t *> malloc(
-            self.nn[0] * self.nn[1] * self.nn[2]) # one bit for each one
-        for i in range(nmask): mask[i] = 0
-        for sfc in range(self.sfc_start, self.sfc_end + 1):
-            status = artio_grid_read_root_cell_begin( handle, sfc, 
-                    dpos, NULL, &num_oct_levels, num_octs_per_level )
-            check_artio_status(status) 
-            artio_sfc_coords(handle, sfc, coords)
-            # Now we mask that bit
-            for i in range(3):
-                coords[i] = <int> (coords[i]/2)
-            ipos = ((coords[0]*self.nn[1])+coords[1])*self.nn[2]+coords[2]
-            bits = ipos % 8
-            mask[ <int> (ipos/8) ] |= (1 << bits)
-            for level in range(1, num_oct_levels+1):
-                # Now we are simply counting so we can pre-allocate arrays.
-                # Because the grids have all been cached this should be fine.
-                tot_octs_per_level[level] += num_octs_per_level[level-1]
-            status = artio_grid_read_root_cell_end( handle )
-            check_artio_status(status)
-        cdef np.int64_t num_root = 0
-        for i in range(nmask):
-            for j in range(8):
-                num_root += ((mask[i] >> j) & 1)
-        tot_octs_per_level[0] = num_root
-        cdef np.int64_t tot = 0
-        for i in range(max_level + 1):
-            self.level_indices[i] = tot
-            tot += tot_octs_per_level[i]
-        self.allocate_domains([num_root, tot - num_root], num_root)
-        # Now we have everything counted, and we need to create the appropriate
-        # number of arrays.
-        cdef np.ndarray[np.float64_t, ndim=2] pos
-        pos = np.empty((tot, 3), dtype="float64")
-        # We do a special case for level 0
+        for i in range(max_level+1):
+            self.level_indices[i] = 0
         cdef np.float64_t dds[3]
         for i in range(3):
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
+
+        status = artio_grid_cache_sfc_range(handle, 0, self.artio_handle.num_root_cells-1)
+        check_artio_status(status)
+        #status = artio_grid_cache_sfc_range(handle,
+        #    self.sfc_start, self.sfc_end )
+        #check_artio_status(status)
+
+        # compute total octs in sfc range (not including root level)
+        status = artio_grid_count_octs_in_sfc_range(handle,self.sfc_start,self.sfc_end,&tot_octs)
+        check_artio_status(status)
+
+        # now determine the number of root octs we touch
+        root_octs = {}
         for sfc in range(self.sfc_start, self.sfc_end + 1):
-            status = artio_grid_read_root_cell_begin( handle, sfc, 
-                    dpos, NULL, &num_oct_levels, num_octs_per_level)
-            check_artio_status(status) 
             artio_sfc_coords(handle, sfc, coords)
-            # Now we check if we have added yet or not
             for i in range(3):
                 coords[i] = <int> (coords[i]/2)
-            ipos = ((coords[0]*self.nn[1])+coords[1])*self.nn[2]+coords[2]
-            bits = ipos % 8
-            if ((mask[<int>(ipos/8)] >> bits) & 1) == 1:
-                # We add it here
+            ipos = (coords[0]*self.nn[1]+coords[1])*self.nn[2]+coords[2]
+            root_octs[ipos] = 1
+        num_root = len(root_octs)
+
+        self.allocate_domains([num_root, tot_octs], num_root)
+        pos = np.empty((num_root, 3), dtype="float64")
+
+        for sfc in range(self.sfc_start, self.sfc_end + 1):
+            artio_sfc_coords(handle, sfc, coords)
+            for i in range(3):
+                coords[i] = <int> (coords[i]/2)
+            ipos = (coords[0]*self.nn[1]+coords[1])*self.nn[2]+coords[2]
+            if root_octs[ipos] == 1:
                 for i in range(3):
-                    dpos[i] = self.DLE[i] + (coords[i]+0.5)*dds[i]
-                    pos[self.level_indices[0], i] = dpos[i]
-                mask[<int>(ipos/8)] -= (1 << bits)
+                    pos[self.level_indices[0], i] = \
+                            self.DLE[i] + (coords[i]+0.5)*dds[i]
                 self.level_indices[0] += 1
-            # Now we iterate over all the children
-            for level in range(1, num_oct_levels+1):
-                status = artio_grid_read_level_begin(handle, level)
-                check_artio_status(status) 
-                for oct_ind in range(num_octs_per_level[level - 1]):
-                    status = artio_grid_read_oct(handle, dpos, NULL, NULL)
+                root_octs[ipos] = 0
+        del root_octs
+
+        # add all root octs
+        self.add(1, 0, pos)
+        del pos
+
+        # now scan through grid file to load oct positions
+        if tot_octs > 0:
+            dpos = <double *>malloc(3*tot_octs*sizeof(double))
+            olevel = <int *>malloc(tot_octs*sizeof(int))
+
+            num_octs = 0
+            for sfc in range(self.sfc_start, self.sfc_end + 1):
+                status = artio_grid_read_root_cell_begin( handle, sfc,
+                    rpos, NULL, &num_oct_levels, num_octs_per_level)
+                check_artio_status(status)
+                for level in range(1, num_oct_levels+1):
+                    self.level_indices[level] += num_octs_per_level[level - 1]
+                    status = artio_grid_read_level_begin(handle, level)
                     check_artio_status(status)
-                    for i in range(3):
-                        pos[self.level_indices[level], i] = dpos[i]
-                    self.level_indices[level] += 1
-                status = artio_grid_read_level_end(handle)
+                    for oct_ind in range(num_octs_per_level[level - 1]):
+                        status = artio_grid_read_oct(handle, &dpos[3*num_octs], NULL, NULL)
+                        check_artio_status(status)
+                        olevel[num_octs] = level
+                        num_octs += 1
+                    status = artio_grid_read_level_end(handle)
+                    check_artio_status(status)
+                status = artio_grid_read_root_cell_end(handle)
                 check_artio_status(status)
-            status = artio_grid_read_root_cell_end( handle )
-            check_artio_status(status)
-        nadded = 0
-        cdef np.int64_t si, ei
-        si = 0
-        # We initialize domain to 1 so that all root mesh octs are viewed as
-        # not belonging to this domain.  This way we don't get confused with
-        # how the different meshes are interfaced, and the root mesh container
-        # will own all the root mesh octs.  Then, for all octs at higher
-        # levels, we use domain == 2.
-        cdef int domain = 1
+
+            num_level_octs = 0
+            for level in range(1, max_level+1):
+                if self.level_indices[level] > num_level_octs: 
+                    num_level_octs = self.level_indices[level]
+            pos = np.empty((num_level_octs, 3), dtype="float64")
+            for level in range(1, max_level+1):
+                if self.level_indices[level] == 0: continue
+                num_level_octs = 0
+                for oct_ind in range(num_octs):
+                    if olevel[oct_ind] == level:
+                        for i in range(3):
+                            pos[num_level_octs,i] = dpos[3*oct_ind+i]
+                        num_level_octs += 1
+                assert(num_level_octs == self.level_indices[level])
+                num_level_octs = self.add( 2, level, pos[:num_level_octs, :])
+                if num_level_octs != self.level_indices[level]:
+                    print self.sfc_start, self.sfc_end
+                    print level, self.level_indices[level], num_level_octs
+                    raise RuntimeError
+     
+            free(olevel)
+            free(dpos)
+        free(num_octs_per_level)
+ 
+        num_octs = 0
         for level in range(max_level + 1):
-            self.level_indices[level] = si
-            ei = si + tot_octs_per_level[level]
-            if tot_octs_per_level[level] == 0: break
-            nadded = self.add(domain, level, pos[si:ei, :])
-            if level > 0 and nadded != (ei - si):
-                print domain, self.sfc_start, self.sfc_end
-                print level, nadded, ei, si, self.max_root,
-                print self.level_indices[level]
-                print pos[si:ei,:]
-                print nadded, (ei - si), tot_octs_per_level[0]
-                raise RuntimeError
-            si = ei
-            domain = 2
-        #status = artio_grid_clear_sfc_cache(handle)
-        #check_artio_status(status)
-        free(mask)
-        free(num_octs_per_level)
-        free(tot_octs_per_level)
+            num_level_octs = self.level_indices[level]
+            self.level_indices[level] = num_octs
+            num_octs += num_level_octs
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -770,8 +763,9 @@
             field_vals[i] = <np.float32_t*> source.data
         # First we need to walk the mesh in the file.  Then we fill in the dest
         # location based on the file index.
-        status = artio_grid_cache_sfc_range(handle,
-            self.sfc_start, self.sfc_end )
+        #status = artio_grid_cache_sfc_range(handle,
+        #    self.sfc_start, self.sfc_end )
+        status = artio_grid_cache_sfc_range(handle,0,self.artio_handle.num_root_cells-1)
         check_artio_status(status) 
         for sfc in range(self.sfc_start, self.sfc_end + 1):
             status = artio_grid_read_root_cell_begin( handle, sfc, 
@@ -995,12 +989,16 @@
     cdef artio_fileset_handle *handle
     cdef np.uint64_t sfc_start
     cdef np.uint64_t sfc_end
+    cdef public object _last_mask
+    cdef public object _last_selector_id
 
     def __init__(self, domain_dimensions, # cells
                  domain_left_edge,
                  domain_right_edge,
                  artio_fileset artio_handle,
                  sfc_start, sfc_end):
+        self._last_selector_id = None
+        self._last_mask = None
         self.artio_handle = artio_handle
         self.handle = artio_handle.handle
         cdef int i
@@ -1012,6 +1010,7 @@
         self.sfc_start = sfc_start
         self.sfc_end = sfc_end
 
+    @cython.cdivision(True)
     cdef np.int64_t pos_to_sfc(self, np.float64_t pos[3]) nogil:
         # Calculate the index
         cdef int coords[3], i
@@ -1021,6 +1020,7 @@
         sfc = artio_sfc_index(self.handle, coords)
         return sfc
 
+    @cython.cdivision(True)
     cdef void sfc_to_pos(self, np.int64_t sfc, np.float64_t pos[3]) nogil:
         cdef int coords[3], i
         artio_sfc_coords(self.handle, sfc, coords)
@@ -1030,8 +1030,6 @@
     cdef np.int64_t count_cells(self, SelectorObject selector):
         # We visit each cell if it is not refined and determine whether it is
         # included or not.
-        # DHR - selector was used to construct the initial sfc list, so 
-        #  this should always equal the number of root cells
         cdef np.int64_t sfc
         cdef np.float64_t pos[3], right_edge[3]
         cdef int num_cells = 0
@@ -1106,6 +1104,9 @@
         res = np.zeros(num_octs, dtype="int64")
         return res
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def selector_fill(self, SelectorObject selector,
                       np.ndarray source,
                       np.ndarray dest = None,
@@ -1150,30 +1151,46 @@
             return dest
         return filled
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def mask(self, SelectorObject selector, np.int64_t num_octs = -1):
+        if self._last_selector_id == hash(selector):
+            return self._last_mask
+        else:
+            return self.mask2(selector,num_octs)
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def mask2(self, SelectorObject selector, np.int64_t num_octs = -1):
         cdef int i, status
         cdef double dpos[3]
         cdef np.float64_t pos[3]
+        cdef np.int64_t sfc
+        if self._last_selector_id == hash(selector):
+            return self._last_mask
         if num_octs == -1:
             # We need to count, but this process will only occur one time,
             # since num_octs will later be cached.
             num_octs = self.sfc_end - self.sfc_start + 1
-        assert(num_octs == (self.sfc_end - self.sfc_start + 1))
+        #assert(num_octs == (self.sfc_end - self.sfc_start + 1))
         cdef np.ndarray[np.uint8_t, ndim=1] mask
         cdef int num_oct_levels
         cdef int max_level = self.artio_handle.max_level
         cdef int *num_octs_per_level = <int *>malloc(
             (max_level + 1)*sizeof(int))
         mask = np.zeros((num_octs), dtype="uint8")
-        status = artio_grid_cache_sfc_range(self.handle, self.sfc_start,
-                                            self.sfc_end)
+        #status = artio_grid_cache_sfc_range(self.handle, self.sfc_start,
+        #                                    self.sfc_end)
+        status = artio_grid_cache_sfc_range(self.handle,0,self.artio_handle.num_root_cells-1)
         check_artio_status(status) 
         for sfc in range(self.sfc_start, self.sfc_end + 1):
             # We check if the SFC is in our selector, and if so, we copy
             # Note that because we initialize to zeros, we can just continue if
             # it's not included.
-            self.sfc_to_pos(sfc, pos)
-            if selector.select_cell(pos, self.dds) == 0: continue
+            #self.sfc_to_pos(sfc, pos)
+            #if selector.select_cell(pos, self.dds) == 0: continue
             # Now we just need to check if the cells are refined.
             status = artio_grid_read_root_cell_begin( self.handle,
                 sfc, dpos, NULL, &num_oct_levels, num_octs_per_level)
@@ -1182,11 +1199,17 @@
             check_artio_status(status)
             # If refined, we skip
             if num_oct_levels > 0: continue
+            # check selector
+            for i in range(3):
+                pos[i] = dpos[i]
+            if selector.select_cell(pos, self.dds) == 0: continue
             mask[sfc - self.sfc_start] = 1
         #status = artio_grid_clear_sfc_cache(self.handle)
         #check_artio_status(status)
         free(num_octs_per_level)
-        return mask.astype("bool")
+        self._last_mask = mask.astype("bool")
+        self._last_selector_id = hash(selector)
+        return self._last_mask
 
     def fill_sfc_particles(self, fields):
         rv = read_sfc_particles(self.artio_handle,
@@ -1228,8 +1251,9 @@
         # First we need to walk the mesh in the file.  Then we fill in the dest
         # location based on the file index.
         cdef int filled = 0
-        status = artio_grid_cache_sfc_range(handle,
-            self.sfc_start, self.sfc_end )
+        #status = artio_grid_cache_sfc_range(handle,
+        #    self.sfc_start, self.sfc_end )
+        status = artio_grid_cache_sfc_range(handle,0,self.artio_handle.num_root_cells-1)
         check_artio_status(status) 
         for sfc in range(self.sfc_start, self.sfc_end + 1):
             if mask[sfc - self.sfc_start] == 0: continue

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -374,7 +374,7 @@
 
     def _chunk_all(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield YTDataChunk(dobj, "all", oobjs, None)
+        yield YTDataChunk(dobj, "all", oobjs, None, cache = True)
 
     def _chunk_spatial(self, dobj, ngz):
         if ngz > 0:
@@ -386,7 +386,7 @@
                 g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
             else:
                 g = og
-            yield YTDataChunk(dobj, "spatial", [g], None)
+            yield YTDataChunk(dobj, "spatial", [g], None, cache = True)
 
     def _chunk_io(self, dobj, cache = True):
         # _current_chunk is made from identify_base_chunk

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -68,9 +68,9 @@
                 data = data[2::3].reshape(grid_dims,order='F').copy()
         f.close()
         if grid.pf.field_ordering == 1:
-            return data.T
+            return data.T.astype("float64")
         else:
-            return data
+            return data.astype("float64")
 
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(None), slice(None), slice(None)]

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -444,6 +444,12 @@
                     mylog.warning('Identical domain left edge and right edges '
                                   'along dummy dimension (%i), attempting to read anyway' % d)
                     self.domain_right_edge[d] = self.domain_left_edge[d]+1.0
+        if self.dimensionality < 3 and self.geometry == "cylindrical":
+            mylog.warning("Extending theta dimension to 2PI + left edge.")
+            self.domain_right_edge[2] = self.domain_left_edge[2] + 2*np.pi
+        elif self.dimensionality < 3 and self.geometry == "polar":
+            mylog.warning("Extending theta dimension to 2PI + left edge.")
+            self.domain_right_edge[1] = self.domain_left_edge[1] + 2*np.pi
         self.domain_dimensions = \
             np.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
 

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -82,7 +82,9 @@
         rv = {}
         for field in fields:
             ftype, fname = field
-            rv[field] = np.empty(size, dtype=f["/%s" % fname].dtype)
+            dt = f["/%s" % fname].dtype
+            if dt == "float32": dt = "float64"
+            rv[field] = np.empty(size, dtype=dt)
         ng = sum(len(c.objs) for c in chunks)
         mylog.debug("Reading %s cells of %s fields in %s blocks",
                     size, [f2 for f1, f2 in fields], ng)

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -224,7 +224,10 @@
             else:
                 self.units[field_name] = 1.0
             if 'field_units' in current_field.attrs:
-                current_fields_unit = just_one(current_field.attrs['field_units'])
+                if type(current_field.attrs['field_units']) == str:
+                    current_fields_unit = current_field.attrs['field_units']
+                else:
+                    current_fields_unit = just_one(current_field.attrs['field_units'])
             else:
                 current_fields_unit = ""
             self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 yt/frontends/gdf/fields.py
--- a/yt/frontends/gdf/fields.py
+++ b/yt/frontends/gdf/fields.py
@@ -84,8 +84,11 @@
           units=r"\rm{cm}/\rm{s}")
 
 for f,v in log_translation_dict.items():
-    add_field(f, TranslationFunc(v), take_log=True)
+    add_field(f, TranslationFunc(v), take_log=True,
+              units=KnownGDFFields[v].get_units(),
+              projected_units=KnownGDFFields[v].get_projected_units())
 
 for f,v in translation_dict.items():
-    add_field(f, TranslationFunc(v), take_log=False)
-
+    add_field(f, TranslationFunc(v), take_log=False,
+              units=KnownGDFFields[v].get_units(),
+              projected_units=KnownGDFFields[v].get_projected_units())

diff -r 4c2653d013ab9a47ea280c51fa13d16d4c77145f -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -112,9 +112,9 @@
         if self.cosmological_simulation:
             for unit in mpc_conversion:
                 mpch['%sh' % unit] = mpch[unit] * self.hubble_constant
-                mpch['%shcm' % unit] = (mpch["%sh" % unit] /
+                mpch['%shcm' % unit] = (mpch["%sh" % unit] *
                                 (1 + self.current_redshift))
-                mpch['%scm' % unit] = mpch[unit] / (1 + self.current_redshift)
+                mpch['%scm' % unit] = mpch[unit] * (1 + self.current_redshift)
         # ud == unit destination
         # ur == unit registry
         for ud, ur in [(self.units, mpch), (self.time_units, sec_conversion)]:

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/829c7ab5fcfa/
Changeset:   829c7ab5fcfa
Branch:      yt-3.0
User:        drudd
Date:        2013-08-30 21:13:03
Summary:     Updated with Matt's selector_fill refactoring
Affected #:  19 files

diff -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -36,20 +36,31 @@
     NeedsProperty, \
     NeedsParameter
 import yt.geometry.particle_deposit as particle_deposit
+import yt.geometry.particle_smooth as particle_smooth
 from yt.funcs import *
 
+def cell_count_cache(func):
+    def cc_cache_func(self, dobj):
+        if hash(dobj.selector) != self._last_selector_id:
+            self._cell_count = -1
+        rv = func(self, dobj)
+        self._cell_count = rv.shape[0]
+        self._last_selector_id = hash(dobj.selector)
+        return rv
+    return cc_cache_func
+
 class OctreeSubset(YTSelectionContainer):
     _spatial = True
     _num_ghost_zones = 0
-    _num_zones = 2
     _type_name = 'octree_subset'
     _skip_add = True
     _con_args = ('base_region', 'domain', 'pf')
     _container_fields = ("dx", "dy", "dz")
     _domain_offset = 0
-    _num_octs = -1
+    _cell_count = -1
 
-    def __init__(self, base_region, domain, pf):
+    def __init__(self, base_region, domain, pf, over_refine_factor = 1):
+        self._num_zones = 1 << (over_refine_factor)
         self.field_data = YTFieldData()
         self.field_parameters = {}
         self.domain = domain
@@ -145,37 +156,47 @@
         if vals is None: return
         return np.asfortranarray(vals)
 
+    def smooth(self, positions, fields = None, method = None):
+        # Here we perform our particle deposition.
+        cls = getattr(particle_smooth, "%s_smooth" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        nz = self.nz
+        nvals = (nz, nz, nz, (self.domain_ind >= 0).sum())
+        if fields is None: fields = []
+        op = cls(nvals, len(fields), 64)
+        op.initialize()
+        mylog.debug("Smoothing %s particles into %s Octs",
+            positions.shape[0], nvals[-1])
+        op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
+            self.domain_id, self._domain_offset)
+        vals = op.finalize()
+        if vals is None: return
+        if isinstance(vals, list):
+            vals = [np.asfortranarray(v) for v in vals]
+        else:
+            vals = np.asfortranarray(vals)
+        return vals
+
+    @cell_count_cache
     def select_icoords(self, dobj):
-        d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,
-                                     num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / (self.nz**3)
-        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
-                                            domain_id = self.domain_id)
-        return tr
+        return self.oct_handler.icoords(dobj.selector, domain_id = self.domain_id,
+                                     num_cells = self._cell_count)
 
+    @cell_count_cache
     def select_fcoords(self, dobj):
-        d = self.oct_handler.fcoords(self.selector, domain_id = self.domain_id,
-                                     num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / (self.nz**3)
-        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
-                                            domain_id = self.domain_id)
-        return tr
+        return self.oct_handler.fcoords(dobj.selector, domain_id = self.domain_id,
+                                        num_cells = self._cell_count)
 
+    @cell_count_cache
     def select_fwidth(self, dobj):
-        d = self.oct_handler.fwidth(self.selector, domain_id = self.domain_id,
-                                  num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / (self.nz**3)
-        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
-                                            domain_id = self.domain_id)
-        return tr
+        return self.oct_handler.fwidth(dobj.selector, domain_id = self.domain_id,
+                                       num_cells = self._cell_count)
 
+    @cell_count_cache
     def select_ires(self, dobj):
-        d = self.oct_handler.ires(self.selector, domain_id = self.domain_id,
-                                  num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / (self.nz**3)
-        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 1,
-                                            domain_id = self.domain_id)
-        return tr
+        return self.oct_handler.ires(dobj.selector, domain_id = self.domain_id,
+                                     num_cells = self._cell_count)
 
     def select(self, selector, source, dest, offset):
         n = self.oct_handler.selector_fill(selector, source, dest, offset,
@@ -183,11 +204,7 @@
         return n
 
     def count(self, selector):
-        if hash(selector) == self._last_selector_id:
-            if self._last_mask is None: return 0
-            return self._last_mask.sum()
-        self.select(selector)
-        return self.count(selector)
+        return -1
 
     def count_particles(self, selector, x, y, z):
         # We don't cache the selector results
@@ -206,8 +223,10 @@
     _type_name = 'indexed_octree_subset'
     _con_args = ('data_files', 'pf', 'min_ind', 'max_ind')
     domain_id = -1
-    def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0):
+    def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0,
+                 over_refine_factor = 1):
         # The first attempt at this will not work in parallel.
+        self._num_zones = 1 << (over_refine_factor)
         self.data_files = data_files
         self.field_data = YTFieldData()
         self.field_parameters = {}

diff -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -135,7 +135,7 @@
 
     # common attributes
     cdef public int num_grid
-    cdef public int64_t num_root_cells
+    cdef int64_t num_root_cells
     cdef int64_t sfc_min, sfc_max
 
     # grid attributes
@@ -575,7 +575,7 @@
     np.float64_t *pvars[16]
     np.float64_t *svars[16]
 
-cdef class ARTIOOctreeContainer(SparseOctreeContainer):
+    cdef class ARTIOOctreeContainer(SparseOctreeContainer):
     # This is a transitory, created-on-demand OctreeContainer.  It should not
     # be considered to be long-lasting, and during its creation it will read
     # the index file.  This means that when created it will then be able to
@@ -626,11 +626,9 @@
         for i in range(3):
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
 
-        status = artio_grid_cache_sfc_range(handle, 0, self.artio_handle.num_root_cells-1)
+        status = artio_grid_cache_sfc_range(handle,
+            self.sfc_start, self.sfc_end )
         check_artio_status(status)
-        #status = artio_grid_cache_sfc_range(handle,
-        #    self.sfc_start, self.sfc_end )
-        #check_artio_status(status)
 
         # compute total octs in sfc range (not including root level)
         status = artio_grid_count_octs_in_sfc_range(handle,self.sfc_start,self.sfc_end,&tot_octs)
@@ -763,9 +761,8 @@
             field_vals[i] = <np.float32_t*> source.data
         # First we need to walk the mesh in the file.  Then we fill in the dest
         # location based on the file index.
-        #status = artio_grid_cache_sfc_range(handle,
-        #    self.sfc_start, self.sfc_end )
-        status = artio_grid_cache_sfc_range(handle,0,self.artio_handle.num_root_cells-1)
+        status = artio_grid_cache_sfc_range(handle,
+            self.sfc_start, self.sfc_end )
         check_artio_status(status) 
         for sfc in range(self.sfc_start, self.sfc_end + 1):
             status = artio_grid_read_root_cell_begin( handle, sfc, 
@@ -1036,17 +1033,16 @@
         cdef int i
         return self.mask(selector).sum()
 
-    def icoords(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def icoords(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         # Note that num_octs does not have to equal sfc_end - sfc_start + 1.
         cdef np.int64_t sfc
         cdef int acoords[3], i
-        # We call it num_octs, but it's really num_cells.
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_octs = mask.sum()
+        num_cells = mask.sum()
         cdef np.ndarray[np.int64_t, ndim=2] coords
-        coords = np.empty((num_octs, 3), dtype="int64")
+        coords = np.empty((num_cells, 3), dtype="int64")
         cdef int filled = 0
         for sfc in range(self.sfc_start, self.sfc_end + 1):
             if mask[sfc - self.sfc_start] == 0: continue
@@ -1059,18 +1055,17 @@
             filled += 1
         return coords
 
-    def fcoords(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def fcoords(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
-        # Note that num_octs does not have to equal sfc_end - sfc_start + 1.
+        # Note that num_cells does not have to equal sfc_end - sfc_start + 1.
         cdef np.int64_t sfc
         cdef np.float64_t pos[3]
         cdef int acoords[3], i
-        # We call it num_octs, but it's really num_cells.
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_octs = mask.sum()
+        num_cells = mask.sum()
         cdef np.ndarray[np.float64_t, ndim=2] coords
-        coords = np.empty((num_octs, 3), dtype="float64")
+        coords = np.empty((num_cells, 3), dtype="float64")
         cdef int filled = 0
         for sfc in range(self.sfc_start, self.sfc_end + 1):
             if mask[sfc - self.sfc_start] == 0: continue
@@ -1083,25 +1078,25 @@
             filled += 1
         return coords
 
-    def fwidth(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def fwidth(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         cdef int i
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_octs = mask.sum()
+        num_cells = mask.sum()
         cdef np.ndarray[np.float64_t, ndim=2] width
-        width = np.zeros((num_octs, 3), dtype="float64")
+        width = np.zeros((num_cells, 3), dtype="float64")
         for i in range(3):
             width[:,i] = self.dds[i]
         return width
 
-    def ires(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def ires(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_octs = mask.sum()
+        num_cells = mask.sum()
         cdef np.ndarray[np.int64_t, ndim=1] res
-        res = np.zeros(num_octs, dtype="int64")
+        res = np.zeros(num_cells, dtype="int64")
         return res
 
     @cython.boundscheck(False)
@@ -1163,27 +1158,25 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def mask2(self, SelectorObject selector, np.int64_t num_octs = -1):
+    def mask2(self, SelectorObject selector, np.int64_t num_cells = -1):
         cdef int i, status
         cdef double dpos[3]
         cdef np.float64_t pos[3]
         cdef np.int64_t sfc
         if self._last_selector_id == hash(selector):
             return self._last_mask
-        if num_octs == -1:
+        if num_cells == -1:
             # We need to count, but this process will only occur one time,
-            # since num_octs will later be cached.
-            num_octs = self.sfc_end - self.sfc_start + 1
-        #assert(num_octs == (self.sfc_end - self.sfc_start + 1))
+            # since num_cells will later be cached.
+            num_cells = self.sfc_end - self.sfc_start + 1
         cdef np.ndarray[np.uint8_t, ndim=1] mask
         cdef int num_oct_levels
         cdef int max_level = self.artio_handle.max_level
         cdef int *num_octs_per_level = <int *>malloc(
             (max_level + 1)*sizeof(int))
-        mask = np.zeros((num_octs), dtype="uint8")
-        #status = artio_grid_cache_sfc_range(self.handle, self.sfc_start,
-        #                                    self.sfc_end)
-        status = artio_grid_cache_sfc_range(self.handle,0,self.artio_handle.num_root_cells-1)
+        mask = np.zeros((num_cells), dtype="uint8")
+        status = artio_grid_cache_sfc_range(self.handle, self.sfc_start,
+                                            self.sfc_end)
         check_artio_status(status) 
         for sfc in range(self.sfc_start, self.sfc_end + 1):
             # We check if the SFC is in our selector, and if so, we copy
@@ -1251,9 +1244,8 @@
         # First we need to walk the mesh in the file.  Then we fill in the dest
         # location based on the file index.
         cdef int filled = 0
-        #status = artio_grid_cache_sfc_range(handle,
-        #    self.sfc_start, self.sfc_end )
-        status = artio_grid_cache_sfc_range(handle,0,self.artio_handle.num_root_cells-1)
+        status = artio_grid_cache_sfc_range(handle,
+            self.sfc_start, self.sfc_end )
         check_artio_status(status) 
         for sfc in range(self.sfc_start, self.sfc_end + 1):
             if mask[sfc - self.sfc_start] == 0: continue

diff -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -55,6 +55,7 @@
     domain_id = 2
     _con_args = ("base_region", "sfc_start", "sfc_end", "pf")
     _type_name = 'octree_subset'
+    _num_zones = 2
 
     def __init__(self, base_region, sfc_start, sfc_end, pf):
         self.field_data = YTFieldData()

diff -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -215,6 +215,7 @@
                                 self.amr_header['nboundary']*l]
             return ng
         min_level = self.pf.min_level
+        max_level = min_level
         nx, ny, nz = (((i-1.0)/2.0) for i in self.amr_header['nx'])
         for level in range(self.amr_header['nlevelmax']):
             # Easier if do this 1-indexed
@@ -248,6 +249,8 @@
                     assert(pos.shape[0] == ng)
                     n = self.oct_handler.add(cpu + 1, level - min_level, pos)
                     assert(n == ng)
+                    if n > 0: max_level = max(level - min_level, max_level)
+        self.max_level = max_level
         self.oct_handler.finalize()
 
     def included(self, selector):
@@ -297,7 +300,7 @@
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-        self.max_level = pf.max_level
+        self.max_level = None
 
         self.float_type = np.float64
         super(RAMSESGeometryHandler, self).__init__(pf, data_style)
@@ -308,6 +311,7 @@
                         for i in range(self.parameter_file['ncpu'])]
         total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
                          for dom in self.domains)
+        self.max_level = max(dom.max_level for dom in self.domains)
         self.num_grids = total_octs
 
     def _detect_fields(self):

diff -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -96,6 +96,7 @@
 
 class ParticleStaticOutput(StaticOutput):
     _unit_base = None
+    over_refine_factor = 1
 
     def _set_units(self):
         self.units = {}
@@ -154,8 +155,10 @@
 
     def __init__(self, filename, data_style="gadget_binary",
                  additional_fields = (),
-                 unit_base = None, n_ref = 64):
+                 unit_base = None, n_ref = 64,
+                 over_refine_factor = 1):
         self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
         self.storage_filename = None
         if unit_base is not None and "UnitLength_in_cm" in unit_base:
             # We assume this is comoving, because in the absence of comoving
@@ -188,7 +191,8 @@
 
         self.domain_left_edge = np.zeros(3, "float64")
         self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
-        self.domain_dimensions = np.ones(3, "int32") * 2
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
         self.periodicity = (True, True, True)
 
         self.cosmological_simulation = 1
@@ -268,11 +272,13 @@
     _particle_coordinates_name = "Coordinates"
     _header_spec = None # Override so that there's no confusion
 
-    def __init__(self, filename, data_style="OWLS", n_ref = 64):
+    def __init__(self, filename, data_style="OWLS", n_ref = 64,
+                 over_refine_factor = 1):
         self.storage_filename = None
-        super(OWLSStaticOutput, self).__init__(filename, data_style,
-                                               unit_base = None,
-                                               n_ref = n_ref)
+        super(OWLSStaticOutput, self).__init__(
+                               filename, data_style,
+                               unit_base = None, n_ref = n_ref,
+                               over_refine_factor = over_refine_factor)
 
     def __repr__(self):
         return os.path.basename(self.parameter_filename).split(".")[0]
@@ -292,7 +298,8 @@
         self.current_time = hvals["Time_GYR"] * sec_conversion["Gyr"]
         self.domain_left_edge = np.zeros(3, "float64")
         self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
-        self.domain_dimensions = np.ones(3, "int32") * 2
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
         self.cosmological_simulation = 1
         self.periodicity = (True, True, True)
         self.current_redshift = hvals["Redshift"]
@@ -438,7 +445,8 @@
                 self.parameters[param] = val
 
         self.current_time = hvals["time"]
-        self.domain_dimensions = np.ones(3, "int32") * 2
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
         if self.parameters.get('bPeriodic', True):
             self.periodicity = (True, True, True)
         else:

diff -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -738,10 +738,11 @@
     file_count = 1
     filename_template = "stream_file"
     n_ref = 64
+    over_refine_factor = 1
 
 def load_particles(data, sim_unit_to_cm, bbox=None,
                       sim_time=0.0, periodicity=(True, True, True),
-                      n_ref = 64):
+                      n_ref = 64, over_refine_factor = 1):
     r"""Load a set of particles into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
 
@@ -828,6 +829,7 @@
 
     spf = StreamParticlesStaticOutput(handler)
     spf.n_ref = n_ref
+    spf.over_refine_factor = over_refine_factor
     spf.units["cm"] = sim_unit_to_cm
     spf.units['1'] = 1.0
     spf.units["unitary"] = 1.0

diff -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -40,6 +40,8 @@
 cdef struct OctInfo:
     np.float64_t left_edge[3]
     np.float64_t dds[3]
+    np.int64_t ipos[3]
+    np.int32_t level
 
 cdef struct OctAllocationContainer
 cdef struct OctAllocationContainer:
@@ -49,6 +51,16 @@
     OctAllocationContainer *next
     Oct *my_octs
 
+cdef struct OctList
+
+cdef struct OctList:
+    OctList *next
+    Oct *o
+
+cdef OctList *OctList_append(OctList *list, Oct *o)
+cdef int OctList_count(OctList *list)
+cdef void OctList_delete(OctList *list)
+
 cdef class OctreeContainer:
     cdef OctAllocationContainer *cont
     cdef OctAllocationContainer **domains
@@ -56,12 +68,13 @@
     cdef oct_visitor_function *fill_func
     cdef int partial_coverage
     cdef int nn[3]
+    cdef np.uint8_t oref
     cdef np.float64_t DLE[3], DRE[3]
     cdef public np.int64_t nocts
     cdef public int max_domain
     cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
     cdef int get_root(self, int ind[3], Oct **o)
-    cdef void neighbors(self, Oct *, Oct **)
+    cdef Oct **neighbors(self, OctInfo *oinfo, np.int64_t *nneighbors)
     cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
     # This function must return the offset from global-to-local domains; i.e.,
     # OctAllocationContainer.offset if such a thing exists.
@@ -71,6 +84,7 @@
                         OctVisitorData *data)
     cdef Oct *next_root(self, int domain_id, int ind[3])
     cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
+    cdef void setup_data(self, OctVisitorData *data, int domain_id = ?)
 
 cdef class SparseOctreeContainer(OctreeContainer):
     cdef OctKey *root_nodes

diff -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -96,8 +96,10 @@
 cdef class OctreeContainer:
 
     def __init__(self, oct_domain_dimensions, domain_left_edge,
-                 domain_right_edge, partial_coverage = 0):
+                 domain_right_edge, partial_coverage = 0,
+                 over_refine = 1):
         # This will just initialize the root mesh octs
+        self.oref = over_refine
         self.partial_coverage = partial_coverage
         cdef int i, j, k, p
         for i in range(3):
@@ -120,6 +122,21 @@
                 for k in range(self.nn[2]):
                     self.root_mesh[i][j][k] = NULL
 
+    cdef void setup_data(self, OctVisitorData *data, int domain_id = -1):
+        cdef int i
+        data.index = 0
+        data.last = -1
+        data.global_index = -1
+        for i in range(3):
+            data.pos[i] = -1
+            data.ind[i] = -1
+        data.array = NULL
+        data.dims = 0
+        data.domain = domain_id
+        data.level = -1
+        data.oref = self.oref
+        data.nz = (1 << (data.oref*3))
+
     def __dealloc__(self):
         free_octs(self.cont)
         if self.root_mesh == NULL: return
@@ -185,27 +202,39 @@
         return 0
 
     cdef int get_root(self, int ind[3], Oct **o):
+        cdef int i
+        for i in range(3):
+            if ind[i] < 0 or ind[i] >= self.nn[i]:
+                o[0] = NULL
+                return 1
         o[0] = self.root_mesh[ind[0]][ind[1]][ind[2]]
-        return 1
+        return 0
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL):
+    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL,
+                  ):
         #Given a floating point position, retrieve the most
         #refined oct at that time
-        cdef int ind[3]
+        cdef int ind[3], level
+        cdef np.int64_t ipos[3]
         cdef np.float64_t dds[3], cp[3], pp[3]
         cdef Oct *cur, *next
+        cdef int i
         cur = next = NULL
-        cdef int i
+        level = -1
         for i in range(3):
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
             ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
             cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
+            ipos[i] = 0
         self.get_root(ind, &next)
         # We want to stop recursing when there's nowhere else to go
         while next != NULL:
+            level += 1
+            for i in range(3):
+                ipos[i] = (ipos[i] << 1) + ind[i]
             cur = next
             for i in range(3):
                 dds[i] = dds[i] / 2.0
@@ -227,18 +256,22 @@
                 cp[i] -= dds[i]/2.0 # Now centered
             else:
                 cp[i] += dds[i]/2.0
-            # We don't need to change dds[i] as it has been halved from the
-            # oct width, thus making it already the cell width
-            oinfo.dds[i] = dds[i] # Cell width
+            # We don't normally need to change dds[i] as it has been halved
+            # from the oct width, thus making it already the cell width.
+            # But, for some cases where the oref != 1, this needs to be
+            # changed.
+            oinfo.dds[i] = dds[i] / (1 << (self.oref-1)) # Cell width
             oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
+            oinfo.ipos[i] = ipos[i]
+        oinfo.level = level
         return cur
 
     def domain_identify(self, SelectorObject selector):
         cdef np.ndarray[np.uint8_t, ndim=1] domain_mask
         domain_mask = np.zeros(self.max_domain, dtype="uint8")
         cdef OctVisitorData data
+        self.setup_data(&data)
         data.array = domain_mask.data
-        data.domain = -1
         self.visit_all_octs(selector, oct_visitors.identify_octs, &data)
         cdef int i
         domain_ids = []
@@ -250,162 +283,128 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef void neighbors(self, Oct* o, Oct* neighbors[27]):
-        #Get 3x3x3 neighbors, although the 1,1,1 oct is the
-        #central one. 
-        #Return an array of Octs
-        cdef np.int64_t curopos[3]
-        cdef np.int64_t curnpos[3]
-        cdef np.int64_t npos[3]
-        cdef int i, j, k, ni, nj, nk, ind[3], nn, dl, skip
-        cdef np.float64_t dds[3], cp[3], pp[3]
+    cdef Oct** neighbors(self, OctInfo *oi, np.int64_t *nneighbors):
         cdef Oct* candidate
-        for i in range(27): neighbors[i] = NULL
         nn = 0
-        raise RuntimeError
-        #for ni in range(3):
-        #    for nj in range(3):
-        #        for nk in range(3):
-        #            if ni == nj == nk == 1:
-        #                neighbors[nn] = o
-        #                nn += 1
-        #                continue
-        #            npos[0] = o.pos[0] + (ni - 1)
-        #            npos[1] = o.pos[1] + (nj - 1)
-        #            npos[2] = o.pos[2] + (nk - 1)
-        #            for i in range(3):
-        #                # Periodicity
-        #                if npos[i] == -1:
-        #                    npos[i] = (self.nn[i]  << o.level) - 1
-        #                elif npos[i] == (self.nn[i] << o.level):
-        #                    npos[i] = 0
-        #                curopos[i] = o.pos[i]
-        #                curnpos[i] = npos[i] 
-        #            # Now we have our neighbor position and a safe place to
-        #            # keep it.  curnpos will be the root index of the neighbor
-        #            # at a given level, and npos will be constant.  curopos is
-        #            # the candidate root at a level.
-        #            candidate = o
-        #            while candidate != NULL:
-        #                if ((curopos[0] == curnpos[0]) and 
-        #                    (curopos[1] == curnpos[1]) and
-        #                    (curopos[2] == curnpos[2])):
-        #                    break
-        #                # This one doesn't meet it, so we pop up a level.
-        #                # First we update our positions, then we update our
-        #                # candidate.
-        #                for i in range(3):
-        #                    # We strip a digit off the right
-        #                    curopos[i] = (curopos[i] >> 1)
-        #                    curnpos[i] = (curnpos[i] >> 1)
-        #                # Now we update to the candidate's parent, which should
-        #                # have a matching position to curopos[]
-        #                # TODO: This has not survived the transition to
-        #                # mostly-stateless Octs!
-        #                raise RuntimeError
-        #                candidate = candidate.parent
-        #            if candidate == NULL:
-        #                # Worst case scenario
-        #                for i in range(3):
-        #                    ind[i] = (npos[i] >> (o.level))
-        #                candidate = self.root_mesh[ind[0]][ind[1]][ind[2]]
-        #            # Now we have the common root, which may be NULL
-        #            while candidate.level < o.level:
-        #                dl = o.level - (candidate.level + 1)
-        #                for i in range(3):
-        #                    ind[i] = (npos[i] >> dl) & 1
-        #                if candidate.children[cind(ind[0],ind[1],ind[2])] \
-        #                        == NULL:
-        #                    break
-        #                candidate = candidate.children[cind(ind[0],ind[1],ind[2])]
-        #            neighbors[nn] = candidate
-        #            nn += 1
+        # We are going to do a brute-force search here.
+        # This is not the most efficient -- in fact, it's relatively bad.  But
+        # we will attempt to improve it in a future iteration, where we will
+        # grow a stack of parent Octs.
+        # Note that in the first iteration, we will just find the up-to-27
+        # neighbors, including the main oct.
+        cdef int i, j, k, n, level, ind[3], ii, nfound = 0
+        cdef OctList *olist, *my_list
+        my_list = olist = NULL
+        cdef Oct *cand
+        cdef np.int64_t npos[3], ndim[3]
+        # Now we get our boundaries for this level, so that we can wrap around
+        # if need be.
+        # ndim is the oct dimensions of the level, not the cell dimensions.
+        for i in range(3):
+            ndim[i] = <np.int64_t> ((self.DRE[i] - self.DLE[i]) / oi.dds[i])
+            ndim[i] = (ndim[i] >> self.oref)
+        for i in range(3):
+            npos[0] = (oi.ipos[0] + (1 - i))
+            if npos[0] < 0: npos[0] += ndim[0]
+            if npos[0] >= ndim[0]: npos[0] -= ndim[0]
+            for j in range(3):
+                npos[1] = (oi.ipos[1] + (1 - j))
+                if npos[1] < 0: npos[1] += ndim[1]
+                if npos[1] >= ndim[1]: npos[1] -= ndim[1]
+                for k in range(3):
+                    npos[2] = (oi.ipos[2] + (1 - k))
+                    if npos[2] < 0: npos[2] += ndim[2]
+                    if npos[2] >= ndim[2]: npos[2] -= ndim[2]
+                    # Now we have our npos, which we just need to find.
+                    # Level 0 gets bootstrapped
+                    for n in range(3):
+                        ind[n] = ((npos[n] >> (oi.level)) & 1)
+                    cand = NULL
+                    self.get_root(ind, &cand)
+                    # We should not get a NULL if we handle periodicity
+                    # correctly, but we might.
+                    if cand == NULL: continue
+                    for level in range(1, oi.level+1):
+                        if cand.children == NULL: break
+                        for n in range(3):
+                            ind[n] = (npos[n] >> (oi.level - (level))) & 1
+                        ii = cind(ind[0],ind[1],ind[2])
+                        if cand.children[ii] == NULL: break
+                        cand = cand.children[ii]
+                    if cand != NULL:
+                        nfound += 1
+                        olist = OctList_append(olist, cand)
+                        if my_list == NULL: my_list = olist
+
+        olist = my_list
+        cdef int noct = OctList_count(olist)
+        cdef Oct **neighbors
+        neighbors = <Oct **> malloc(sizeof(Oct*)*noct)
+        for i in range(noct):
+            neighbors[i] = olist.o
+            olist = olist.next
+        OctList_delete(my_list)
+        nneighbors[0] = noct
+        return neighbors
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def get_neighbor_boundaries(self, oppos):
-        cdef int i, ii
-        cdef np.float64_t ppos[3]
-        for i in range(3):
-            ppos[i] = oppos[i]
-        cdef Oct *main = self.get(ppos)
-        cdef Oct* neighbors[27]
-        self.neighbors(main, neighbors)
-        cdef np.ndarray[np.float64_t, ndim=2] bounds
-        cdef np.float64_t corner[3], size[3]
-        bounds = np.zeros((27,6), dtype="float64")
-        tnp = 0
-        raise RuntimeError
-        for i in range(27):
-            self.oct_bounds(neighbors[i], corner, size)
-            for ii in range(3):
-                bounds[i, ii] = corner[ii]
-                bounds[i, 3+ii] = size[ii]
-        return bounds
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def mask(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def mask(self, SelectorObject selector, np.int64_t num_cells = -1,
              int domain_id = -1):
-        if num_octs == -1:
-            num_octs = selector.count_octs(self, domain_id)
+        if num_cells == -1:
+            num_cells = selector.count_oct_cells(self, domain_id)
         cdef np.ndarray[np.uint8_t, ndim=1] coords
-        coords = np.zeros((num_octs * 8), dtype="uint8")
         cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
+        coords = np.zeros((num_cells), dtype="uint8")
         data.array = <void *> coords.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.mask_octs, &data)
         return coords.astype("bool")
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def icoords(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def icoords(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
-        if num_octs == -1:
-            num_octs = selector.count_octs(self, domain_id)
+        if num_cells == -1:
+            num_cells = selector.count_oct_cells(self, domain_id)
+        cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         cdef np.ndarray[np.int64_t, ndim=2] coords
-        coords = np.empty((num_octs * 8, 3), dtype="int64")
-        cdef OctVisitorData data
+        coords = np.empty((num_cells, 3), dtype="int64")
         data.array = <void *> coords.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.icoords_octs, &data)
         return coords
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def ires(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def ires(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
-        if num_octs == -1:
-            num_octs = selector.count_octs(self, domain_id)
+        if num_cells == -1:
+            num_cells = selector.count_oct_cells(self, domain_id)
+        cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         #Return the 'resolution' of each cell; ie the level
         cdef np.ndarray[np.int64_t, ndim=1] res
-        res = np.empty(num_octs * 8, dtype="int64")
-        cdef OctVisitorData data
+        res = np.empty(num_cells, dtype="int64")
         data.array = <void *> res.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.ires_octs, &data)
         return res
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def fwidth(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def fwidth(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
-        if num_octs == -1:
-            num_octs = selector.count_octs(self, domain_id)
+        if num_cells == -1:
+            num_cells = selector.count_oct_cells(self, domain_id)
+        cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         cdef np.ndarray[np.float64_t, ndim=2] fwidth
-        fwidth = np.empty((num_octs * 8, 3), dtype="float64")
-        cdef OctVisitorData data
+        fwidth = np.empty((num_cells, 3), dtype="float64")
         data.array = <void *> fwidth.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.fwidth_octs, &data)
         cdef np.float64_t base_dx
         for i in range(3):
@@ -416,17 +415,16 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def fcoords(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def fcoords(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
-        if num_octs == -1:
-            num_octs = selector.count_octs(self, domain_id)
+        if num_cells == -1:
+            num_cells = selector.count_oct_cells(self, domain_id)
+        cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         #Return the floating point unitary position of every cell
         cdef np.ndarray[np.float64_t, ndim=2] coords
-        coords = np.empty((num_octs * 8, 3), dtype="float64")
-        cdef OctVisitorData data
+        coords = np.empty((num_cells, 3), dtype="float64")
         data.array = <void *> coords.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.fcoords_octs, &data)
         cdef int i
         cdef np.float64_t base_dx
@@ -456,8 +454,8 @@
             else:
                 dest = np.zeros(num_cells, dtype=source.dtype, order='C')
         cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         data.index = offset
-        data.domain = domain_id
         # We only need this so we can continue calculating the offset
         data.dims = dims
         cdef void *p[2]
@@ -474,14 +472,16 @@
         else:
             raise NotImplementedError
         self.visit_all_octs(selector, func, &data)
-        if (data.global_index + 1) * 8 * data.dims > source.size:
+        if (data.global_index + 1) * data.nz * data.dims > source.size:
             print "GLOBAL INDEX RAN AHEAD.",
-            print (data.global_index + 1) * 8 * data.dims - source.size
+            print (data.global_index + 1) * data.nz * data.dims - source.size
             print dest.size, source.size, num_cells
             raise RuntimeError
         if data.index > dest.size:
             print "DEST INDEX RAN AHEAD.",
             print data.index - dest.size
+            print (data.global_index + 1) * data.nz * data.dims, source.size
+            print num_cells
             raise RuntimeError
         if num_cells >= 0:
             return dest
@@ -492,10 +492,8 @@
         # Here's where we grab the masked items.
         ind = np.zeros(self.nocts, 'int64') - 1
         cdef OctVisitorData data
-        data.domain = domain_id
+        self.setup_data(&data, domain_id)
         data.array = ind.data
-        data.index = 0
-        data.last = -1
         self.visit_all_octs(selector, oct_visitors.index_octs, &data)
         return ind
 
@@ -578,6 +576,7 @@
         if parent.children != NULL:
             next = parent.children[cind(ind[0],ind[1],ind[2])]
         else:
+            # This *8 does NOT need to be made generic.
             parent.children = <Oct **> malloc(sizeof(Oct *) * 8)
             for i in range(8):
                 parent.children[i] = NULL
@@ -607,13 +606,12 @@
             file_inds[i] = -1
             cell_inds[i] = 9
         cdef OctVisitorData data
-        data.index = 0
+        self.setup_data(&data, domain_id)
         cdef void *p[3]
         p[0] = levels.data
         p[1] = file_inds.data
         p[2] = cell_inds.data
         data.array = p
-        data.domain = domain_id
         self.visit_all_octs(selector, self.fill_func, &data)
         return levels, cell_inds, file_inds
 
@@ -641,10 +639,9 @@
     def finalize(self):
         cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
         cdef OctVisitorData data
-        data.index = 0
-        data.domain = 1
+        self.setup_data(&data, 1)
         self.visit_all_octs(selector, oct_visitors.assign_domain_ind, &data)
-        assert ((data.global_index+1)*8 == data.index)
+        assert ((data.global_index+1)*data.nz == data.index)
 
 cdef int root_node_compare(void *a, void *b) nogil:
     cdef OctKey *ao, *bo
@@ -659,9 +656,11 @@
 
 cdef class SparseOctreeContainer(OctreeContainer):
 
-    def __init__(self, domain_dimensions, domain_left_edge, domain_right_edge):
+    def __init__(self, domain_dimensions, domain_left_edge, domain_right_edge,
+                 over_refine = 1):
         cdef int i, j, k, p
         self.partial_coverage = 1
+        self.oref = over_refine
         for i in range(3):
             self.nn[i] = domain_dimensions[i]
         self.max_domain = -1
@@ -807,3 +806,33 @@
                             dest[local_filled + offset] = source[ox,oy,oz]
                             local_filled += 1
         return local_filled
+
+cdef OctList *OctList_append(OctList *olist, Oct *o):
+    cdef OctList *this = olist
+    if this == NULL:
+        this = <OctList *> malloc(sizeof(OctList))
+        this.next = NULL
+        this.o = o
+        return this
+    while this.next != NULL:
+        this = this.next
+    this.next = <OctList*> malloc(sizeof(OctList))
+    this = this.next
+    this.o = o
+    this.next = NULL
+    return this
+
+cdef int OctList_count(OctList *olist):
+    cdef OctList *this = olist
+    cdef int i = 0 # Count the list
+    while this != NULL:
+        i += 1
+        this = this.next
+    return i
+
+cdef void OctList_delete(OctList *olist):
+    cdef OctList *next, *this = olist
+    while this != NULL:
+        next = this.next
+        free(this)
+        this = next

diff -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -3,7 +3,7 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: Columbia University
-Homepage: http://yt.enzotools.org/
+Homepage: http://yt-project.org/
 License:
   Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
 
@@ -43,6 +43,10 @@
     int dims
     np.int32_t domain
     np.int8_t level
+    np.int8_t oref # This is the level of overref.  1 => 8 zones, 2 => 64, etc.
+                   # To calculate nzones, 1 << (oref * 3)
+    np.int32_t nz
+                            
 
 ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
                                    np.uint8_t selected)
@@ -64,10 +68,13 @@
 cdef oct_visitor_function fill_file_indices_rind
 
 cdef inline int cind(int i, int j, int k):
+    # THIS ONLY WORKS FOR CHILDREN.  It is not general for zones.
     return (((i*2)+j)*2+k)
 
 cdef inline int oind(OctVisitorData *data):
-    return (((data.ind[0]*2)+data.ind[1])*2+data.ind[2])
+    cdef int d = (1 << data.oref)
+    return (((data.ind[0]*d)+data.ind[1])*d+data.ind[2])
 
 cdef inline int rind(OctVisitorData *data):
-    return (((data.ind[2]*2)+data.ind[1])*2+data.ind[0])
+    cdef int d = (1 << data.oref)
+    return (((data.ind[2]*d)+data.ind[1])*d+data.ind[0])

diff -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -38,7 +38,7 @@
     if selected == 0: return
     cdef int i
     # There are this many records between "octs"
-    cdef np.int64_t index = (data.global_index * 8)*data.dims
+    cdef np.int64_t index = (data.global_index * data.nz)*data.dims
     cdef np.float64_t **p = <np.float64_t**> data.array
     index += oind(data)*data.dims
     for i in range(data.dims):
@@ -50,7 +50,7 @@
     # "last" here tells us the dimensionality of the array.
     if selected == 0: return
     cdef int i
-    cdef np.int64_t index = (data.global_index * 8)*data.dims
+    cdef np.int64_t index = (data.global_index * data.nz)*data.dims
     cdef np.int64_t **p = <np.int64_t**> data.array
     index += oind(data)*data.dims
     for i in range(data.dims):
@@ -75,7 +75,7 @@
     if data.last != o.domain_ind:
         data.last = o.domain_ind
         data.index += 1
-    cdef np.int64_t index = data.index * 8
+    cdef np.int64_t index = data.index * data.nz
     index += oind(data)
     arr[index] = 1
 
@@ -83,7 +83,7 @@
     if selected == 0: return
     cdef int i
     cdef np.uint8_t *arr = <np.uint8_t *> data.array
-    cdef np.int64_t index = data.global_index * 8
+    cdef np.int64_t index = data.global_index * data.nz
     index += oind(data)
     arr[index] = 1
 
@@ -102,7 +102,7 @@
     cdef np.int64_t *coords = <np.int64_t*> data.array
     cdef int i
     for i in range(3):
-        coords[data.index * 3 + i] = (data.pos[i] << 1) + data.ind[i]
+        coords[data.index * 3 + i] = (data.pos[i] << data.oref) + data.ind[i]
     data.index += 1
 
 cdef void ires_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
@@ -120,9 +120,9 @@
     cdef np.float64_t *fcoords = <np.float64_t*> data.array
     cdef int i
     cdef np.float64_t c, dx 
-    dx = 1.0 / (2 << data.level)
+    dx = 1.0 / ((1 << data.oref) << data.level)
     for i in range(3):
-        c = <np.float64_t> ((data.pos[i] << 1 ) + data.ind[i]) 
+        c = <np.float64_t> ((data.pos[i] << data.oref ) + data.ind[i]) 
         fcoords[data.index * 3 + i] = (c + 0.5) * dx
     data.index += 1
 
@@ -135,7 +135,7 @@
     cdef np.float64_t *fwidth = <np.float64_t*> data.array
     cdef int i
     cdef np.float64_t dx 
-    dx = 1.0 / (2 << data.level)
+    dx = 1.0 / ((1 << data.oref) << data.level)
     for i in range(3):
         fwidth[data.index * 3 + i] = dx
     data.index += 1

diff -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -5,7 +5,7 @@
 Affiliation: UC Santa Cruz
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: Columbia University
-Homepage: http://yt.enzotools.org/
+Homepage: http://yt-project.org/
 License:
   Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
 
@@ -32,7 +32,7 @@
 from libc.math cimport sqrt
 
 from fp_utils cimport *
-from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+from .oct_container cimport Oct, OctAllocationContainer, OctreeContainer
 
 cdef extern from "alloca.h":
     void *alloca(int)
@@ -62,7 +62,6 @@
 cdef class ParticleDepositOperation:
     # We assume each will allocate and define their own temporary storage
     cdef public object nvals
-    cdef public int bad_indices
     cdef public int update_values
     cdef void process(self, int dim[3], np.float64_t left_edge[3],
                       np.float64_t dds[3], np.int64_t offset,

diff -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -54,7 +54,6 @@
                      fields = None, int domain_id = -1,
                      int domain_offset = 0):
         cdef int nf, i, j
-        self.bad_indices = 0
         if fields is None:
             fields = []
         nf = len(fields)
@@ -66,7 +65,8 @@
             tarr = fields[i]
             field_pointers[i] = <np.float64_t *> tarr.data
         cdef int dims[3]
-        dims[0] = dims[1] = dims[2] = 2
+        dims[0] = dims[1] = dims[2] = (1 << octree.oref)
+        cdef int nz = dims[0] * dims[1] * dims[2]
         cdef OctInfo oi
         cdef np.int64_t offset, moff
         cdef Oct *oct
@@ -98,7 +98,7 @@
             if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
                 continue
             # Note that this has to be our local index, not our in-file index.
-            offset = dom_ind[oct.domain_ind - moff] * 8
+            offset = dom_ind[oct.domain_ind - moff] * nz
             if offset < 0: continue
             # Check that we found the oct ...
             self.process(dims, oi.left_edge, oi.dds,

diff -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -86,7 +86,8 @@
                 sum(d.total_particles.values()) for d in self.data_files)
         pf = self.parameter_file
         self.oct_handler = ParticleOctreeContainer(
-            [1, 1, 1], pf.domain_left_edge, pf.domain_right_edge)
+            [1, 1, 1], pf.domain_left_edge, pf.domain_right_edge,
+            over_refine = pf.over_refine_factor)
         self.oct_handler.n_ref = pf.n_ref
         mylog.info("Allocating for %0.3e particles", self.total_particles)
         # No more than 256^3 in the region finder.
@@ -147,8 +148,9 @@
                 data_files = [self.data_files[i] for i in
                               self.regions.identify_data_files(dobj.selector)]
             base_region = getattr(dobj, "base_region", dobj)
+            oref = self.parameter_file.over_refine_factor
             subset = [ParticleOctreeSubset(base_region, data_files, 
-                        self.parameter_file)]
+                        self.parameter_file, over_refine_factor = oref)]
             dobj._chunk_info = subset
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
 

diff -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -205,6 +205,7 @@
         cdef int i, j, k, m, n, ind[3]
         cdef Oct *noct
         cdef np.uint64_t prefix1, prefix2
+        # TODO: This does not need to be changed.
         o.children = <Oct **> malloc(sizeof(Oct *)*8)
         for i in range(2):
             for j in range(2):

diff -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 yt/geometry/particle_smooth.pxd
--- /dev/null
+++ b/yt/geometry/particle_smooth.pxd
@@ -0,0 +1,92 @@
+"""
+Particle Deposition onto Octs
+
+Author: Christopher Moody <chris.e.moody at gmail.com>
+Affiliation: UC Santa Cruz
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+import numpy as np
+from libc.stdlib cimport malloc, free, qsort
+cimport cython
+from libc.math cimport sqrt
+
+from fp_utils cimport *
+from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+from .particle_deposit cimport sph_kernel, gind
+
+cdef extern from "alloca.h":
+    void *alloca(int)
+
+cdef struct NeighborList
+cdef struct NeighborList:
+    np.int64_t pn       # Particle number
+    np.float64_t r2     # radius**2
+
+cdef inline np.float64_t r2dist(np.float64_t ppos[3],
+                                np.float64_t cpos[3],
+                                np.float64_t DW[3]):
+    cdef int i
+    cdef np.float64_t r2, DR
+    r2 = 0.0
+    for i in range(3):
+        DR = (ppos[i] - cpos[i])
+        if (DR > DW[i]/2.0):
+            DR -= DW[i]/2.0
+        elif (DR < -DW[i]/2.0):
+            DR += DW[i]/2.0
+        r2 += DR * DR
+    return r2
+
+cdef class ParticleSmoothOperation:
+    # We assume each will allocate and define their own temporary storage
+    cdef public object nvals
+    cdef np.float64_t DW[3]
+    cdef int nfields
+    cdef int maxn
+    cdef int curn
+    cdef np.int64_t *doffs
+    cdef np.int64_t *pinds
+    cdef np.int64_t *pcounts
+    cdef np.float64_t *ppos
+    # Note that we are preallocating here, so this is *not* threadsafe.
+    cdef NeighborList *neighbors
+    cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
+                               np.float64_t dds[3], np.float64_t *ppos,
+                               np.float64_t **fields, np.int64_t nneighbors,
+                               np.int64_t *nind, np.int64_t *doffs,
+                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t offset)
+    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
+                            np.float64_t cpos[3])
+    cdef void neighbor_reset(self)
+    cdef void neighbor_find(self,
+                            np.int64_t nneighbors,
+                            np.int64_t *nind,
+                            np.int64_t *doffs,
+                            np.int64_t *pcounts,
+                            np.int64_t *pinds,
+                            np.float64_t *ppos,
+                            np.float64_t cpos[3])
+    cdef void process(self, np.int64_t offset, int i, int j, int k,
+                      int dim[3], np.float64_t cpos[3], np.float64_t **fields)

diff -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 yt/geometry/particle_smooth.pyx
--- /dev/null
+++ b/yt/geometry/particle_smooth.pyx
@@ -0,0 +1,359 @@
+"""
+Particle smoothing in cells
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+import numpy as np
+from libc.stdlib cimport malloc, free, realloc
+cimport cython
+from libc.math cimport sqrt
+
+from fp_utils cimport *
+from oct_container cimport Oct, OctAllocationContainer, \
+    OctreeContainer, OctInfo
+
+cdef int Neighbor_compare(void *on1, void *on2) nogil:
+    cdef NeighborList *n1, *n2
+    n1 = <NeighborList *> on1
+    n2 = <NeighborList *> on2
+    # Note that we set this up so that "greatest" evaluates to the *end* of the
+    # list, so we can do standard radius comparisons.
+    if n1.r2 < n2.r2:
+        return -1
+    elif n1.r2 == n2.r2:
+        return 0
+    else:
+        return 1
+
+cdef class ParticleSmoothOperation:
+    def __init__(self, nvals, nfields, max_neighbors):
+        # This is the set of cells, in grids, blocks or octs, we are handling.
+        cdef int i
+        self.nvals = nvals 
+        self.nfields = nfields
+        self.maxn = max_neighbors
+        self.neighbors = <NeighborList *> malloc(
+            sizeof(NeighborList) * self.maxn)
+        self.neighbor_reset()
+
+    def initialize(self, *args):
+        raise NotImplementedError
+
+    def finalize(self, *args):
+        raise NotImplementedError
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def process_octree(self, OctreeContainer octree,
+                     np.ndarray[np.int64_t, ndim=1] dom_ind,
+                     np.ndarray[np.float64_t, ndim=2] positions,
+                     fields = None, int domain_id = -1,
+                     int domain_offset = 0,
+                     int test_neighbors = 0):
+        # This will be a several-step operation.
+        #
+        # We first take all of our particles and assign them to Octs.  If they
+        # are not in an Oct, we will assume they are out of bounds.  Note that
+        # this means that if we have loaded neighbor particles for which an Oct
+        # does not exist, we are going to be discarding them -- so sparse
+        # octrees will need to ensure that neighbor octs *exist*.  Particles
+        # will be assigned in a new NumPy array.  Note that this incurs
+        # overhead, but reduces complexity as we will now be able to use
+        # argsort.
+        #
+        # After the particles have been assigned to Octs, we process each Oct
+        # individually.  We will do this by calling "get" for the *first*
+        # particle in each set of Octs in the sorted list.  After this, we get
+        # neighbors for each Oct.
+        #
+        # Now, with the set of neighbors (and thus their indices) we allocate
+        # an array of particles and their fields, fill these in, and call our
+        # process function.
+        #
+        # This is not terribly efficient -- for starters, the neighbor function
+        # is not the most efficient yet.  We will also need to handle some
+        # mechanism of an expandable array for holding pointers to Octs, so
+        # that we can deal with >27 neighbors.  As I write this comment,
+        # neighbors() only returns 27 neighbors.
+        cdef int nf, i, j, dims[3], n
+        cdef np.float64_t **field_pointers, *field_vals, pos[3], *ppos, dds[3]
+        cdef int nsize = 0
+        cdef np.int64_t *nind = NULL
+        cdef OctInfo oi
+        cdef Oct *oct, **neighbors = NULL
+        cdef np.int64_t nneighbors, numpart, offset, moff, local_ind
+        cdef np.int64_t *doffs, *pinds, *pcounts, poff
+        cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
+        cdef np.ndarray[np.float64_t, ndim=1] tarr
+        dims[0] = dims[1] = dims[2] = (1 << octree.oref)
+        cdef int nz = dims[0] * dims[1] * dims[2]
+        numpart = positions.shape[0]
+        # pcount is the number of particles per oct.
+        pcount = np.zeros_like(dom_ind)
+        # doff is the offset to a given oct in the sorted particles.
+        doff = np.zeros_like(dom_ind) - 1
+        moff = octree.get_domain_offset(domain_id + domain_offset)
+        # pdoms points particles at their octs.  So the value in this array, for
+        # a given index, is the local oct index.
+        pdoms = np.zeros(positions.shape[0], dtype="int64") - 1
+        nf = len(fields)
+        if fields is None:
+            fields = []
+        field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
+        for i in range(nf):
+            tarr = fields[i]
+            field_pointers[i] = <np.float64_t *> tarr.data
+        for i in range(3):
+            self.DW[i] = (octree.DRE[i] - octree.DLE[i])
+        for i in range(positions.shape[0]):
+            for j in range(3):
+                pos[j] = positions[i, j]
+            oct = octree.get(pos)
+            if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
+                continue
+            # Note that this has to be our local index, not our in-file index.
+            # This is the particle count, which we'll use once we have sorted
+            # the particles to calculate the offsets into each oct's particles.
+            offset = oct.domain_ind - moff
+            pcount[offset] += 1
+            pdoms[i] = offset # We store the *actual* offset.
+        # Now we have oct assignments.  Let's sort them.
+        # Note that what we will be providing to our processing functions will
+        # actually be indirectly-sorted fields.  This preserves memory at the
+        # expense of additional pointer lookups.
+        pind = np.argsort(pdoms)
+        pind = np.asarray(pind, dtype='int64', order='C')
+        # So what this means is that we now have all the oct-0 particle indices
+        # in order, then the oct-1, etc etc.
+        # This now gives us the indices to the particles for each domain.
+        for i in range(positions.shape[0]):
+            # This value, poff, is the index of the particle in the *unsorted*
+            # arrays.
+            poff = pind[i] 
+            offset = pdoms[poff] 
+            # If we have yet to assign the starting index to this oct, we do so
+            # now.
+            if doff[offset] < 0: doff[offset] = i
+        # Now doff is full of offsets to the first entry in the pind that
+        # refers to that oct's particles.
+        ppos = <np.float64_t *> positions.data
+        doffs = <np.int64_t*> doff.data
+        pinds = <np.int64_t*> pind.data
+        pcounts = <np.int64_t*> pcount.data
+        nsize = 27
+        nind = <np.int64_t *> malloc(sizeof(np.int64_t)*nsize)
+        for i in range(doff.shape[0]):
+            # Nothing assigned.
+            if doff[i] < 0: continue
+            # The first particle assigned to this oct should be the one we
+            # want.
+            poff = pind[doff[i]]
+            for j in range(3):
+                pos[j] = positions[poff, j]
+            oct = octree.get(pos, &oi)
+            if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
+                continue
+            offset = dom_ind[oct.domain_ind - moff] * nz
+            neighbors = octree.neighbors(&oi, &nneighbors)
+            # Now we have all our neighbors.  And, we should be set for what
+            # else we need to do.
+            if nneighbors > nsize:
+                nind = <np.int64_t *> realloc(
+                    nind, sizeof(np.int64_t)*nneighbors)
+                nsize = nneighbors
+            for j in range(nneighbors):
+                nind[j] = neighbors[j].domain_ind - moff
+                for n in range(j):
+                    if nind[j] == nind[n]:
+                        nind[j] = -1
+                    break
+            # This is allocated by the neighbors function, so we deallocate it.
+            free(neighbors)
+            self.neighbor_process(dims, oi.left_edge, oi.dds,
+                         ppos, field_pointers, nneighbors, nind, doffs,
+                         pinds, pcounts, offset)
+        if nind != NULL:
+            free(nind)
+        
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def process_grid(self, gobj,
+                     np.ndarray[np.float64_t, ndim=2] positions,
+                     fields = None):
+        raise NotImplementedError
+
+    cdef void process(self, np.int64_t offset, int i, int j, int k,
+                      int dim[3], np.float64_t cpos[3], np.float64_t **fields):
+        raise NotImplementedError
+
+    cdef void neighbor_reset(self):
+        self.curn = 0
+        for i in range(self.maxn):
+            self.neighbors[i].pn = -1
+            self.neighbors[i].r2 = 1e300
+
+    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
+                            np.float64_t cpos[3]):
+        cdef NeighborList *cur
+        cdef int i
+        # _c means candidate (what we're evaluating)
+        # _o means other (the item in the list)
+        cdef np.float64_t r2_c, r2_o
+        cdef np.int64_t pn_c, pn_o
+        # If we're less than the maximum number of neighbors, we simply append.
+        # After that, we will sort, and then only compare against the rightmost
+        # entries.
+        if self.curn < self.maxn:
+            cur = &self.neighbors[self.curn]
+            cur.pn = pn
+            cur.r2 = r2dist(ppos, cpos, self.DW)
+            self.curn += 1
+            if self.curn == self.maxn:
+                # This time we sort it, so that future insertions will be able
+                # to be done in order.
+                qsort(self.neighbors, self.curn, sizeof(NeighborList), 
+                      Neighbor_compare)
+            return
+        # This will go (curn - 1) through 0.
+        r2_c = r2dist(ppos, cpos, self.DW)
+        pn_c = pn
+        for i in range((self.curn - 1), -1, -1):
+            # First we evaluate against i.  If our candidate radius is greater
+            # than the one we're inspecting, we quit.
+            cur = &self.neighbors[i]
+            r2_o = cur.r2
+            pn_o = cur.pn
+            if r2_c >= r2_o:
+                break
+            # Now we know we need to swap them.  First we assign our candidate
+            # values to cur.
+            cur.r2 = r2_c
+            cur.pn = pn_c
+            if i + 1 >= self.maxn:
+                continue # No swapping
+            cur = &self.neighbors[i + 1]
+            cur.r2 = r2_o
+            cur.pn = pn_o
+        # At this point, we've evaluated all the particles and we should have a
+        # sorted set of values.  So, we're done.
+
+    cdef void neighbor_find(self,
+                            np.int64_t nneighbors,
+                            np.int64_t *nind,
+                            np.int64_t *doffs,
+                            np.int64_t *pcounts,
+                            np.int64_t *pinds,
+                            np.float64_t *ppos,
+                            np.float64_t cpos[3]
+                            ):
+        # We are now given the number of neighbors, the indices into the
+        # domains for them, and the number of particles for each.
+        cdef int ni, i, j
+        cdef np.int64_t offset, pn, pc
+        cdef np.float64_t pos[3]
+        self.neighbor_reset()
+        for ni in range(nneighbors):
+            if nind[ni] == -1: continue
+            offset = doffs[nind[ni]]
+            pc = pcounts[nind[ni]]
+            for i in range(pc):
+                pn = pinds[offset + i]
+                for j in range(3):
+                    pos[j] = ppos[pn * 3 + j]
+                self.neighbor_eval(pn, pos, cpos)
+
+    cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
+                               np.float64_t dds[3], np.float64_t *ppos,
+                               np.float64_t **fields, np.int64_t nneighbors,
+                               np.int64_t *nind, np.int64_t *doffs,
+                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t offset):
+        # Note that we assume that fields[0] == smoothing length in the native
+        # units supplied.  We can now iterate over every cell in the block and
+        # every particle to find the nearest.  We will use a priority heap.
+        cdef int i, j, k
+        cdef np.float64_t cpos[3]
+        cpos[0] = left_edge[0] + 0.5*dds[0]
+        for i in range(dim[0]):
+            cpos[1] = left_edge[1] + 0.5*dds[1]
+            for j in range(dim[1]):
+                cpos[2] = left_edge[2] + 0.5*dds[2]
+                for k in range(dim[2]):
+                    self.neighbor_find(nneighbors, nind, doffs, pcounts,
+                        pinds, ppos, cpos)
+                    # Now we have all our neighbors in our neighbor list.
+                    self.process(offset, i, j, k, dim, cpos, fields)
+                    cpos[2] += dds[2]
+                cpos[1] += dds[1]
+            cpos[0] += dds[0]
+
+
+cdef class SimpleNeighborSmooth(ParticleSmoothOperation):
+    cdef np.float64_t **fp
+    cdef public object vals
+    def initialize(self):
+        cdef int i
+        if self.nfields < 4:
+            # We need at least two fields, the smoothing length and the 
+            # field to smooth, to operate.
+            raise RuntimeError
+        cdef np.ndarray tarr
+        self.fp = <np.float64_t **> malloc(
+            sizeof(np.float64_t *) * self.nfields)
+        self.vals = []
+        for i in range(self.nfields):
+            tarr = np.zeros(self.nvals, dtype="float64", order="F")
+            self.vals.append(tarr)
+            self.fp[i] = <np.float64_t *> tarr.data
+
+    def finalize(self):
+        free(self.fp)
+        return self.vals
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef void process(self, np.int64_t offset, int i, int j, int k,
+                      int dim[3], np.float64_t cpos[3], np.float64_t **fields):
+        # We have our i, j, k for our cell, as well as the cell position.
+        # We also have a list of neighboring particles with particle numbers.
+        cdef int n, fi
+        cdef np.float64_t weight, r2, val
+        cdef np.int64_t pn
+        for n in range(self.curn):
+            # No normalization for the moment.
+            # fields[0] is the smoothing length.
+            r2 = self.neighbors[n].r2
+            pn = self.neighbors[n].pn
+            # Smoothing kernel weight function
+            weight = sph_kernel(sqrt(r2) / fields[0][pn])
+            # Mass of the particle times the value divided by the Density
+            for fi in range(self.nfields - 3):
+                val = fields[1][pn] * fields[fi + 3][pn]/fields[2][pn]
+                self.fp[fi + 3][gind(i,j,k,dim) + offset] = val * weight
+        return
+
+simple_neighbor_smooth = SimpleNeighborSmooth

diff -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -40,6 +40,9 @@
                         oct_visitor_function *func,
                         OctVisitorData *data,
                         int visit_covered = ?)
+    cdef void visit_oct_cells(self, OctVisitorData *data, Oct *root, Oct *ch,
+                              np.float64_t spos[3], np.float64_t sdds[3],
+                              oct_visitor_function *func, int i, int j, int k)
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
                                np.int32_t level, Oct *o = ?) nogil

diff -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -157,16 +157,13 @@
 
     def count_octs(self, OctreeContainer octree, int domain_id = -1):
         cdef OctVisitorData data
-        data.index = 0
-        data.last = -1
-        data.domain = domain_id
+        octree.setup_data(&data, domain_id)
         octree.visit_all_octs(self, oct_visitors.count_total_octs, &data)
         return data.index
 
     def count_oct_cells(self, OctreeContainer octree, int domain_id = -1):
         cdef OctVisitorData data
-        data.index = 0
-        data.domain = domain_id
+        octree.setup_data(&data, domain_id)
         octree.visit_all_octs(self, oct_visitors.count_total_cells, &data)
         return data.index
 
@@ -230,6 +227,10 @@
                         if root.children != NULL:
                             ch = root.children[cind(i, j, k)]
                         if iter == 1 and next_level == 1 and ch != NULL:
+                            # Note that data.pos is always going to be the
+                            # position of the Oct -- it is *not* always going
+                            # to be the same as the position of the cell under
+                            # investigation.
                             data.pos[0] = (data.pos[0] << 1) + i
                             data.pos[1] = (data.pos[1] << 1) + j
                             data.pos[2] = (data.pos[2] << 1) + k
@@ -242,21 +243,60 @@
                             data.pos[2] = (data.pos[2] >> 1)
                             data.level -= 1
                         elif this_level == 1:
-                            selected = self.select_cell(spos, sdds)
-                            if ch != NULL:
-                                selected *= self.overlap_cells
                             data.global_index += increment
                             increment = 0
-                            data.ind[0] = i
-                            data.ind[1] = j
-                            data.ind[2] = k
-                            func(root, data, selected)
+                            self.visit_oct_cells(data, root, ch, spos, sdds,
+                                                 func, i, j, k)
                         spos[2] += sdds[2]
                     spos[1] += sdds[1]
                 spos[0] += sdds[0]
             this_level = 0 # We turn this off for the second pass.
             iter += 1
 
+    cdef void visit_oct_cells(self, OctVisitorData *data, Oct *root, Oct *ch,
+                              np.float64_t spos[3], np.float64_t sdds[3],
+                              oct_visitor_function *func, int i, int j, int k):
+        # We can short-circuit the whole process if data.oref == 1.
+        # This saves us some funny-business.
+        cdef int selected
+        if data.oref == 1:
+            selected = self.select_cell(spos, sdds)
+            if ch != NULL:
+                selected *= self.overlap_cells
+            # data.ind refers to the cell, not to the oct.
+            data.ind[0] = i
+            data.ind[1] = j
+            data.ind[2] = k
+            func(root, data, selected)
+            return
+        # Okay, now that we've got that out of the way, we have to do some
+        # other checks here.  In this case, spos[] is the position of the
+        # center of a *possible* oct child, which means it is the center of a
+        # cluster of cells.  That cluster might have 1, 8, 64, ... cells in it.
+        # But, we can figure it out by calculating the cell dds.
+        cdef np.float64_t dds[3], pos[3]
+        cdef int ci, cj, ck
+        cdef int nr = (1 << (data.oref - 1))
+        for ci in range(3):
+            dds[ci] = sdds[ci] / nr
+        # Boot strap at the first index.
+        pos[0] = (spos[0] - sdds[0]/2.0) + dds[0] * 0.5
+        for ci in range(nr):
+            pos[1] = (spos[1] - sdds[1]/2.0) + dds[1] * 0.5
+            for cj in range(nr):
+                pos[2] = (spos[2] - sdds[2]/2.0) + dds[2] * 0.5
+                for ck in range(nr):
+                    selected = self.select_cell(pos, dds)
+                    if ch != NULL:
+                        selected *= self.overlap_cells
+                    data.ind[0] = ci + i * nr
+                    data.ind[1] = cj + j * nr
+                    data.ind[2] = ck + k * nr
+                    func(root, data, selected)
+                    pos[2] += dds[2]
+                pos[1] += dds[1]
+            pos[0] += dds[0]
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)

diff -r 59edd14640f7b4ebffb36bd3a1bd9804e63e7b51 -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 yt/geometry/setup.py
--- a/yt/geometry/setup.py
+++ b/yt/geometry/setup.py
@@ -43,6 +43,15 @@
                          "yt/geometry/oct_container.pxd",
                          "yt/geometry/selection_routines.pxd",
                          "yt/geometry/particle_deposit.pxd"])
+    config.add_extension("particle_smooth", 
+                ["yt/geometry/particle_smooth.pyx"],
+                include_dirs=["yt/utilities/lib/"],
+                libraries=["m"],
+                depends=["yt/utilities/lib/fp_utils.pxd",
+                         "yt/geometry/oct_container.pxd",
+                         "yt/geometry/selection_routines.pxd",
+                         "yt/geometry/particle_deposit.pxd",
+                         "yt/geometry/particle_smooth.pxd"])
     config.add_extension("fake_octree", 
                 ["yt/geometry/fake_octree.pyx"],
                 include_dirs=["yt/utilities/lib/"],


https://bitbucket.org/yt_analysis/yt/commits/ec8858ae1d54/
Changeset:   ec8858ae1d54
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-03 23:21:16
Summary:     Attempting to simplify SFC range construction, phase 1.
Affected #:  1 file

diff -r 829c7ab5fcfa65ae4eb017060074c6131c7f8b51 -r ec8858ae1d545e27ca5269728c69794781d4f6aa yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -8,6 +8,7 @@
 import sys 
 
 from yt.geometry.selection_routines cimport SelectorObject, AlwaysSelector
+from yt.utilities.lib.fp_utils cimport imax
 from yt.geometry.oct_container cimport \
     SparseOctreeContainer
 from yt.geometry.oct_visitors cimport \
@@ -550,6 +551,75 @@
         artio_fileset_close(handle) 
     return True
 
+cdef class ARTIOSFCRangeHandler:
+    cdef public np.int64_t sfc_start
+    cdef public np.int64_t sfc_end
+    cdef public artio_fileset artio_handle
+    cdef public object root_mesh_handler
+    cdef public object octree_handlers
+    cdef public object oct_count
+    cdef artio_fileset_handle *handle
+    cdef np.float64_t DLE[3]
+    cdef np.float64_t DRE[3]
+    cdef np.float64_t dds[3]
+    cdef np.int64_t dims[3]
+
+    def __init__(self, domain_dimensions, # cells
+                 domain_left_edge,
+                 domain_right_edge,
+                 artio_fileset artio_handle,
+                 sfc_start, sfc_end):
+        cdef int i
+        self.sfc_start = sfc_start
+        self.sfc_end = sfc_end
+        self.artio_handle = artio_handle
+        self.root_mesh_handler = None
+        self.octree_handlers = {}
+        self.handle = artio_handle.handle
+        self.oct_count = None
+        for i in range(3):
+            self.dims[i] = domain_dimensions[i]
+            self.DLE[i] = domain_left_edge[i]
+            self.DRE[i] = domain_right_edge[i]
+            self.dds[i] = (self.DRE[i] - self.DLE[i])/self.dims[i]
+
+    def construct_mesh(self):
+        cdef int status, level
+        cdef np.int64_t sfc, oc
+        cdef double dpos[3]
+        cdef int num_oct_levels
+        cdef int max_level = self.artio_handle.max_level
+        cdef int *num_octs_per_level = <int *>malloc(
+            (max_level + 1)*sizeof(int))
+        cdef ARTIOOctreeContainer octree
+        cdef np.ndarray[np.int64_t, ndim=1] oct_count
+        oct_count = np.zeros(self.sfc_end - self.sfc_start + 1, dtype="int64")
+        status = artio_grid_cache_sfc_range(self.handle, self.sfc_start,
+                                            self.sfc_end)
+        check_artio_status(status) 
+        for sfc in range(self.sfc_start, self.sfc_end + 1):
+            status = artio_grid_read_root_cell_begin( self.handle,
+                sfc, dpos, NULL, &num_oct_levels, num_octs_per_level)
+            check_artio_status(status)
+            if num_oct_levels > 0:
+                oc = 0
+                for level in range(num_oct_levels):
+                    oc += num_octs_per_level[level]
+                oct_count[sfc - self.sfc_start] = oc
+                octree = ARTIOOctreeContainer(self.artio_handle, sfc, oc)
+                octree.initialize_mesh(oc, num_oct_levels, num_octs_per_level)
+                self.octree_handlers[sfc] = octree
+            status = artio_grid_read_root_cell_end( self.handle )
+            check_artio_status(status)
+        free(num_octs_per_level)
+        self.root_mesh_handler = ARTIORootMeshContainer(self)
+        self.oct_count = oct_count
+
+    def free_mesh(self):
+        self.octree_handlers.clear()
+        self.root_mesh_handler = None
+        self.oct_count = None
+
 def get_coords(artio_fileset handle, np.int64_t s):
     cdef int coords[3]
     artio_sfc_coords(handle.handle, s, coords)
@@ -575,7 +645,7 @@
     np.float64_t *pvars[16]
     np.float64_t *svars[16]
 
-    cdef class ARTIOOctreeContainer(SparseOctreeContainer):
+cdef class ARTIOOctreeContainer(SparseOctreeContainer):
     # This is a transitory, created-on-demand OctreeContainer.  It should not
     # be considered to be long-lasting, and during its creation it will read
     # the index file.  This means that when created it will then be able to
@@ -583,141 +653,80 @@
     # the file again, despite knowing the indexing system already.  Because of
     # this, we will avoid creating it as long as possible.
 
-    cdef public np.int64_t sfc_start
-    cdef public np.int64_t sfc_end
+    cdef public np.int64_t sfc
+    cdef public np.int64_t sfc_offset
     cdef public artio_fileset artio_handle
     cdef Oct **root_octs
     cdef np.int64_t *level_indices
 
-    def __init__(self, oct_dimensions, domain_left_edge, domain_right_edge,
-                 int64_t sfc_start, int64_t sfc_end, artio_fileset artio_handle):
-        self.artio_handle = artio_handle
-        self.sfc_start = sfc_start
-        self.sfc_end = sfc_end
+    def __init__(self, ARTIOSFCRangeHandler range_handler, np.int64_t sfc):
+        self.artio_handle = range_handler.artio_handle
+        self.sfc = sfc
         # Note the final argument is partial_coverage, which indicates whether
         # or not an Oct can be partially refined.
-        super(ARTIOOctreeContainer, self).__init__(oct_dimensions,
-            domain_left_edge, domain_right_edge)
+        dims, DLE, DRE = [], [], []
+        for i in range(3):
+            dims.append(range_handler.dims[i])
+            DLE.append(range_handler.DLE[i])
+            DRE.append(range_handler.DRE[i])
+        super(ARTIOOctreeContainer, self).__init__(dims, DLE, DRE)
+        self.artio_handle = range_handler.artio_handle
+        self.sfc_offset = range_handler.sfc_start
         self.level_indices = NULL
-        self._initialize_root_mesh()
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def _initialize_root_mesh(self):
+    cdef void initialize_mesh(self, np.int64_t oct_count,
+                              int num_oct_levels, int *num_octs_per_level):
         # We actually will not be initializing the root mesh here, we will be
         # initializing the entire mesh between sfc_start and sfc_end.
         cdef np.int64_t oct_ind, sfc, tot_octs, ipos
-        cdef int i, status, level, num_oct_levels, num_root, num_octs
+        cdef int i, status, level, num_root, num_octs
         cdef int num_level_octs
         cdef artio_fileset_handle *handle = self.artio_handle.handle
         cdef int coords[3]
         cdef int max_level = self.artio_handle.max_level
-        cdef double *dpos
-        cdef double rpos[3]
-        cdef int *olevel
-        cdef int *num_octs_per_level = <int *>malloc(
-            (max_level + 1)*sizeof(int))
+        cdef double dpos[3]
+        cdef np.float64_t f64pos[3]
         self.level_indices = <np.int64_t *>malloc(
-            (max_level + 1)*sizeof(np.int64_t))
-        for i in range(max_level+1):
-            self.level_indices[i] = 0
-        cdef np.float64_t dds[3]
+            num_oct_levels*sizeof(np.int64_t))
+        # NOTE: We do not cache any SFC ranges here, as we should only ever be
+        # called from within a pre-cached operation in the SFC handler.
+
+        # We only allow one root oct.
+        self.allocate_domains([1, oct_count], 1)
+        pos = np.empty((1, 3), dtype="float64")
+        self.range_handler.sfc
+        artio_sfc_coords(self.artio_handle.handle, self.sfc, coords)
         for i in range(3):
-            dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
+            pos[0, i] = self.DLE[i] + (coords[i] + 0.5) * self.dds[i]
+        self.add(1, 0, pos)
 
-        status = artio_grid_cache_sfc_range(handle,
-            self.sfc_start, self.sfc_end )
-        check_artio_status(status)
+        # Now we set up our position array and our level_indices.
+        ipos = 0
+        oct_ind = -1
+        for level in range(num_oct_levels):
+            self.level_indices[level] = ipos
+            ipos += num_octs_per_level[level]
+            oct_ind = imax(oct_ind, num_octs_per_level[level])
+        pos = np.empty((oct_ind, 3), dtype="float64")
 
-        # compute total octs in sfc range (not including root level)
-        status = artio_grid_count_octs_in_sfc_range(handle,self.sfc_start,self.sfc_end,&tot_octs)
-        check_artio_status(status)
-
-        # now determine the number of root octs we touch
-        root_octs = {}
-        for sfc in range(self.sfc_start, self.sfc_end + 1):
-            artio_sfc_coords(handle, sfc, coords)
-            for i in range(3):
-                coords[i] = <int> (coords[i]/2)
-            ipos = (coords[0]*self.nn[1]+coords[1])*self.nn[2]+coords[2]
-            root_octs[ipos] = 1
-        num_root = len(root_octs)
-
-        self.allocate_domains([num_root, tot_octs], num_root)
-        pos = np.empty((num_root, 3), dtype="float64")
-
-        for sfc in range(self.sfc_start, self.sfc_end + 1):
-            artio_sfc_coords(handle, sfc, coords)
-            for i in range(3):
-                coords[i] = <int> (coords[i]/2)
-            ipos = (coords[0]*self.nn[1]+coords[1])*self.nn[2]+coords[2]
-            if root_octs[ipos] == 1:
+        # Now we initialize
+        num_octs = 0
+        # Note that we also assume we have already started reading the level.
+        for level in range(1, num_oct_levels+1):
+            status = artio_grid_read_level_begin(handle, level)
+            check_artio_status(status)
+            for oct_ind in range(num_octs_per_level[level - 1]):
+                status = artio_grid_read_oct(handle, dpos, NULL, NULL)
                 for i in range(3):
-                    pos[self.level_indices[0], i] = \
-                            self.DLE[i] + (coords[i]+0.5)*dds[i]
-                self.level_indices[0] += 1
-                root_octs[ipos] = 0
-        del root_octs
-
-        # add all root octs
-        self.add(1, 0, pos)
-        del pos
-
-        # now scan through grid file to load oct positions
-        if tot_octs > 0:
-            dpos = <double *>malloc(3*tot_octs*sizeof(double))
-            olevel = <int *>malloc(tot_octs*sizeof(int))
-
-            num_octs = 0
-            for sfc in range(self.sfc_start, self.sfc_end + 1):
-                status = artio_grid_read_root_cell_begin( handle, sfc,
-                    rpos, NULL, &num_oct_levels, num_octs_per_level)
+                    pos[oct_ind, i] = dpos[i]
                 check_artio_status(status)
-                for level in range(1, num_oct_levels+1):
-                    self.level_indices[level] += num_octs_per_level[level - 1]
-                    status = artio_grid_read_level_begin(handle, level)
-                    check_artio_status(status)
-                    for oct_ind in range(num_octs_per_level[level - 1]):
-                        status = artio_grid_read_oct(handle, &dpos[3*num_octs], NULL, NULL)
-                        check_artio_status(status)
-                        olevel[num_octs] = level
-                        num_octs += 1
-                    status = artio_grid_read_level_end(handle)
-                    check_artio_status(status)
-                status = artio_grid_read_root_cell_end(handle)
-                check_artio_status(status)
-
-            num_level_octs = 0
-            for level in range(1, max_level+1):
-                if self.level_indices[level] > num_level_octs: 
-                    num_level_octs = self.level_indices[level]
-            pos = np.empty((num_level_octs, 3), dtype="float64")
-            for level in range(1, max_level+1):
-                if self.level_indices[level] == 0: continue
-                num_level_octs = 0
-                for oct_ind in range(num_octs):
-                    if olevel[oct_ind] == level:
-                        for i in range(3):
-                            pos[num_level_octs,i] = dpos[3*oct_ind+i]
-                        num_level_octs += 1
-                assert(num_level_octs == self.level_indices[level])
-                num_level_octs = self.add( 2, level, pos[:num_level_octs, :])
-                if num_level_octs != self.level_indices[level]:
-                    print self.sfc_start, self.sfc_end
-                    print level, self.level_indices[level], num_level_octs
-                    raise RuntimeError
-     
-            free(olevel)
-            free(dpos)
-        free(num_octs_per_level)
+            status = artio_grid_read_level_end(handle)
+            check_artio_status(status)
+            self.add(2, level, pos[:num_octs_per_level[level - 1]])
  
-        num_octs = 0
-        for level in range(max_level + 1):
-            num_level_octs = self.level_indices[level]
-            self.level_indices[level] = num_octs
-            num_octs += num_level_octs
-
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -988,24 +997,21 @@
     cdef np.uint64_t sfc_end
     cdef public object _last_mask
     cdef public object _last_selector_id
+    cdef ARTIOSFCRangeHandler range_handler
 
-    def __init__(self, domain_dimensions, # cells
-                 domain_left_edge,
-                 domain_right_edge,
-                 artio_fileset artio_handle,
-                 sfc_start, sfc_end):
-        self._last_selector_id = None
-        self._last_mask = None
-        self.artio_handle = artio_handle
-        self.handle = artio_handle.handle
+    def __init__(self, ARTIOSFCRangeHandler range_handler):
         cdef int i
         for i in range(3):
-            self.dims[i] = domain_dimensions[i]
-            self.DLE[i] = domain_left_edge[i]
-            self.DRE[i] = domain_right_edge[i]
-            self.dds[i] = (self.DRE[i] - self.DLE[i])/self.dims[i]
-        self.sfc_start = sfc_start
-        self.sfc_end = sfc_end
+            self.DLE[i] = range_handler.DLE[i]
+            self.DRE[i] = range_handler.DRE[i]
+            self.dims[i] = range_handler.dims[i]
+            self.dds[i] = range_handler.dds[i]
+        self.handle = range_handler.handle
+        self.artio_handle = range_handler.artio_handle
+        self._last_mask = self._last_selector_id = None
+        self.sfc_start = range_handler.sfc_start
+        self.sfc_end = range_handler.sfc_end
+        self.range_handler = range_handler
 
     @cython.cdivision(True)
     cdef np.int64_t pos_to_sfc(self, np.float64_t pos[3]) nogil:
@@ -1149,18 +1155,8 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def mask(self, SelectorObject selector, np.int64_t num_octs = -1):
-        if self._last_selector_id == hash(selector):
-            return self._last_mask
-        else:
-            return self.mask2(selector,num_octs)
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def mask2(self, SelectorObject selector, np.int64_t num_cells = -1):
-        cdef int i, status
-        cdef double dpos[3]
+    def mask(self, SelectorObject selector, np.int64_t num_cells = -1):
+        cdef int i
         cdef np.float64_t pos[3]
         cdef np.int64_t sfc
         if self._last_selector_id == hash(selector):
@@ -1169,37 +1165,12 @@
             # We need to count, but this process will only occur one time,
             # since num_cells will later be cached.
             num_cells = self.sfc_end - self.sfc_start + 1
-        cdef np.ndarray[np.uint8_t, ndim=1] mask
-        cdef int num_oct_levels
-        cdef int max_level = self.artio_handle.max_level
-        cdef int *num_octs_per_level = <int *>malloc(
-            (max_level + 1)*sizeof(int))
         mask = np.zeros((num_cells), dtype="uint8")
-        status = artio_grid_cache_sfc_range(self.handle, self.sfc_start,
-                                            self.sfc_end)
-        check_artio_status(status) 
         for sfc in range(self.sfc_start, self.sfc_end + 1):
-            # We check if the SFC is in our selector, and if so, we copy
-            # Note that because we initialize to zeros, we can just continue if
-            # it's not included.
-            #self.sfc_to_pos(sfc, pos)
-            #if selector.select_cell(pos, self.dds) == 0: continue
-            # Now we just need to check if the cells are refined.
-            status = artio_grid_read_root_cell_begin( self.handle,
-                sfc, dpos, NULL, &num_oct_levels, num_octs_per_level)
-            check_artio_status(status)
-            status = artio_grid_read_root_cell_end( self.handle )
-            check_artio_status(status)
-            # If refined, we skip
-            if num_oct_levels > 0: continue
-            # check selector
-            for i in range(3):
-                pos[i] = dpos[i]
+            if self.range_handler.oct_count[sfc - self.sfc_start] > 0: continue
+            self.sfc_to_pos(sfc, pos)
             if selector.select_cell(pos, self.dds) == 0: continue
             mask[sfc - self.sfc_start] = 1
-        #status = artio_grid_clear_sfc_cache(self.handle)
-        #check_artio_status(status)
-        free(num_octs_per_level)
         self._last_mask = mask.astype("bool")
         self._last_selector_id = hash(selector)
         return self._last_mask


https://bitbucket.org/yt_analysis/yt/commits/5a8ce45e8827/
Changeset:   5a8ce45e8827
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-04 16:05:39
Summary:     Continuing refactor.  Moving toward fill_sfc working.
Affected #:  2 files

diff -r ec8858ae1d545e27ca5269728c69794781d4f6aa -r 5a8ce45e8827967b3b6e63bb33bdec3d7ed9cc75 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -606,7 +606,7 @@
                 for level in range(num_oct_levels):
                     oc += num_octs_per_level[level]
                 oct_count[sfc - self.sfc_start] = oc
-                octree = ARTIOOctreeContainer(self.artio_handle, sfc, oc)
+                octree = ARTIOOctreeContainer(self, sfc)
                 octree.initialize_mesh(oc, num_oct_levels, num_octs_per_level)
                 self.octree_handlers[sfc] = octree
             status = artio_grid_read_root_cell_end( self.handle )
@@ -657,7 +657,6 @@
     cdef public np.int64_t sfc_offset
     cdef public artio_fileset artio_handle
     cdef Oct **root_octs
-    cdef np.int64_t *level_indices
 
     def __init__(self, ARTIOSFCRangeHandler range_handler, np.int64_t sfc):
         self.artio_handle = range_handler.artio_handle
@@ -666,17 +665,16 @@
         # or not an Oct can be partially refined.
         dims, DLE, DRE = [], [], []
         for i in range(3):
-            dims.append(range_handler.dims[i])
+            dims.append(range_handler.dims[i]/2)
             DLE.append(range_handler.DLE[i])
             DRE.append(range_handler.DRE[i])
         super(ARTIOOctreeContainer, self).__init__(dims, DLE, DRE)
         self.artio_handle = range_handler.artio_handle
         self.sfc_offset = range_handler.sfc_start
-        self.level_indices = NULL
 
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
+    #@cython.boundscheck(False)
+    #@cython.wraparound(False)
+    #@cython.cdivision(True)
     cdef void initialize_mesh(self, np.int64_t oct_count,
                               int num_oct_levels, int *num_octs_per_level):
         # We actually will not be initializing the root mesh here, we will be
@@ -688,28 +686,22 @@
         cdef int coords[3]
         cdef int max_level = self.artio_handle.max_level
         cdef double dpos[3]
-        cdef np.float64_t f64pos[3]
-        self.level_indices = <np.int64_t *>malloc(
-            num_oct_levels*sizeof(np.int64_t))
+        cdef np.float64_t f64pos[3], dds[3]
         # NOTE: We do not cache any SFC ranges here, as we should only ever be
         # called from within a pre-cached operation in the SFC handler.
 
         # We only allow one root oct.
         self.allocate_domains([1, oct_count], 1)
         pos = np.empty((1, 3), dtype="float64")
-        self.range_handler.sfc
         artio_sfc_coords(self.artio_handle.handle, self.sfc, coords)
         for i in range(3):
-            pos[0, i] = self.DLE[i] + (coords[i] + 0.5) * self.dds[i]
+            dds[i] = (self.DRE[i] - self.DLE[i]) / (self.nn[i]*2)
+            pos[0, i] = self.DLE[i] + (coords[i] + 0.5) * dds[i]
         self.add(1, 0, pos)
 
-        # Now we set up our position array and our level_indices.
-        ipos = 0
         oct_ind = -1
-        for level in range(num_oct_levels):
-            self.level_indices[level] = ipos
-            ipos += num_octs_per_level[level]
-            oct_ind = imax(oct_ind, num_octs_per_level[level])
+        for level in range(1, num_oct_levels+1):
+            oct_ind = imax(oct_ind, num_octs_per_level[level - 1])
         pos = np.empty((oct_ind, 3), dtype="float64")
 
         # Now we initialize
@@ -725,7 +717,7 @@
                 check_artio_status(status)
             status = artio_grid_read_level_end(handle)
             check_artio_status(status)
-            self.add(2, level, pos[:num_octs_per_level[level - 1]])
+            self.add(2, level, pos[:num_octs_per_level[level - 1],:])
  
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -755,45 +747,32 @@
             nf * sizeof(int))
         cdef np.float32_t **field_vals = <np.float32_t**> malloc(
             nf * sizeof(np.float32_t*))
-        cdef np.int64_t *local_ind = <np.int64_t *> malloc(
-            (max_level + 1) * sizeof(np.int64_t))
-        for i in range(max_level + 1):
-            # This will help us keep track of where we are in the flattened
-            # array, which will be indexed by file_ind.
-            local_ind[i] = self.level_indices[i]
         source_arrays = []
         for i in range(nf):
             field_ind[i] = field_indices[i]
-            # This zeros should be an empty once we handle the root grid
-            source = np.zeros((self.nocts, 8), dtype="float32")
+            # Note that we subtract one, because we're not using the root mesh.
+            source = np.zeros((self.nocts - 1, 8), dtype="float32")
             source_arrays.append(source)
             field_vals[i] = <np.float32_t*> source.data
         # First we need to walk the mesh in the file.  Then we fill in the dest
         # location based on the file index.
-        status = artio_grid_cache_sfc_range(handle,
-            self.sfc_start, self.sfc_end )
+        status = artio_grid_read_root_cell_begin( handle, self.sfc, 
+                dpos, NULL, &num_oct_levels, num_octs_per_level)
         check_artio_status(status) 
-        for sfc in range(self.sfc_start, self.sfc_end + 1):
-            status = artio_grid_read_root_cell_begin( handle, sfc, 
-                    dpos, NULL, &num_oct_levels, num_octs_per_level)
+        for level in range(1, num_oct_levels+1):
+            status = artio_grid_read_level_begin(handle, level)
             check_artio_status(status) 
-            for level in range(1, num_oct_levels+1):
-                status = artio_grid_read_level_begin(handle, level)
-                check_artio_status(status) 
-                for oct_ind in range(num_octs_per_level[level - 1]):
-                    status = artio_grid_read_oct(handle, dpos, grid_variables, NULL)
-                    check_artio_status(status)
-                    for j in range(8):
-                        for i in range(nf):
-                            field_vals[i][local_ind[level] * 8 + j] = \
-                                grid_variables[ngv * j + i]
-                    local_ind[level] += 1
-                status = artio_grid_read_level_end(handle)
+            for oct_ind in range(num_octs_per_level[level - 1]):
+                status = artio_grid_read_oct(handle, dpos, grid_variables, NULL)
                 check_artio_status(status)
-            status = artio_grid_read_root_cell_end( handle )
+                for j in range(8):
+                    for i in range(nf):
+                        field_vals[i][oct_ind*8+j] = grid_variables[ngv*j+i]
+            status = artio_grid_read_level_end(handle)
             check_artio_status(status)
+        status = artio_grid_read_root_cell_end( handle )
+        check_artio_status(status)
         # Now we have all our sources.
-        artio_grid_clear_sfc_cache(handle)
         for j in range(nf):
             dest = dest_fields[j]
             source = source_arrays[j]
@@ -803,7 +782,6 @@
                 dest[i] = source[file_inds[i] + oct_ind, cell_inds[i]]
         free(field_ind)
         free(field_vals)
-        free(local_ind)
         free(grid_variables)
         free(num_octs_per_level)
 

diff -r ec8858ae1d545e27ca5269728c69794781d4f6aa -r 5a8ce45e8827967b3b6e63bb33bdec3d7ed9cc75 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -30,7 +30,7 @@
 from .definitions import yt_to_art, art_to_yt, ARTIOconstants
 from _artio_caller import \
     artio_is_valid, artio_fileset, ARTIOOctreeContainer, \
-    ARTIORootMeshContainer
+    ARTIORootMeshContainer, ARTIOSFCRangeHandler
 import _artio_caller
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
@@ -50,6 +50,8 @@
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, NullFunc
 
+
+
 class ARTIOOctreeSubset(OctreeSubset):
     _domain_offset = 0
     domain_id = 2
@@ -57,11 +59,11 @@
     _type_name = 'octree_subset'
     _num_zones = 2
 
-    def __init__(self, base_region, sfc_start, sfc_end, pf):
+    def __init__(self, base_region, sfc, root_mesh, pf):
         self.field_data = YTFieldData()
         self.field_parameters = {}
-        self.sfc_start = sfc_start
-        self.sfc_end = sfc_end
+        self.sfc = sfc
+        self.root_mesh = root_mesh
         self.pf = pf
         self.hierarchy = self.pf.hierarchy
         self._last_mask = None
@@ -70,17 +72,7 @@
         self._current_fluid_type = self.pf.default_fluid_type
         self.base_region = base_region
         self.base_selector = base_region.selector
-
-    _oct_handler = None
-
-    @property
-    def oct_handler(self):
-        if self._oct_handler is None: 
-            self._oct_handler = ARTIOOctreeContainer(
-                self.pf.domain_dimensions/2, # Octs, not cells
-                self.pf.domain_left_edge, self.pf.domain_right_edge,
-                self.sfc_start, self.sfc_end, self.pf._handle)
-        return self._oct_handler
+        self.oct_handler = root_mesh.octree_handlers[sfc]
 
     @property
     def min_ind(self):
@@ -142,15 +134,20 @@
     _selector_module = _artio_caller
     domain_id = 1
 
-    @property
-    def oct_handler(self):
-        if self._oct_handler is None: 
-            self._oct_handler = ARTIORootMeshContainer(
-                self.pf.domain_dimensions, # Cells, not octs
-                self.pf.domain_left_edge, self.pf.domain_right_edge,
-                self.pf._handle,
-                self.sfc_start, self.sfc_end)
-        return self._oct_handler
+    def __init__(self, base_region, sfc_start, sfc_end, oct_handler, pf):
+        self.field_data = YTFieldData()
+        self.field_parameters = {}
+        self.sfc_start = sfc_start
+        self.sfc_end = sfc_end
+        self.oct_handler = oct_handler
+        self.pf = pf
+        self.hierarchy = self.pf.hierarchy
+        self._last_mask = None
+        self._last_selector_id = None
+        self._current_particle_type = 'all'
+        self._current_fluid_type = self.pf.default_fluid_type
+        self.base_region = base_region
+        self.base_selector = base_region.selector
 
     def fill(self, fields, selector):
         # We know how big these will be.
@@ -356,12 +353,17 @@
                 list_sfc_ranges = self.pf._handle.root_sfc_ranges(
                     dobj.selector)
             ci = []
-            if domain != 2:
-                ci += [ARTIORootMeshSubset(base_region, start, end, self.pf)
-                        for (start, end) in list_sfc_ranges]
-            if domain != 1:
-                ci += [ARTIOOctreeSubset(base_region, start, end, self.pf)
-                       for (start, end) in list_sfc_ranges]
+            for (start, end) in list_sfc_ranges:
+                range_handler = ARTIOSFCRangeHandler(
+                    self.pf.domain_dimensions,
+                    self.pf.domain_left_edge, self.pf.domain_right_edge,
+                    self.pf._handle, start, end)
+                range_handler.construct_mesh()
+                ci.append(ARTIORootMeshSubset(base_region, start, end,
+                            range_handler.root_mesh_handler, self.pf))
+                for sfc in sorted(range_handler.octree_handlers):
+                    ci.append(ARTIOOctreeSubset(base_region, sfc,
+                      range_handler, self.pf))
             dobj._chunk_info = ci
             if len(list_sfc_ranges) > 1:
                 mylog.info("Created %d chunks for ARTIO" % len(list_sfc_ranges))


https://bitbucket.org/yt_analysis/yt/commits/cbb3151ddfa6/
Changeset:   cbb3151ddfa6
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-04 19:54:12
Summary:     Removing some Cython items and fixing the forest of octs info.
Affected #:  2 files

diff -r 5a8ce45e8827967b3b6e63bb33bdec3d7ed9cc75 -r cbb3151ddfa636f0e8c1c199cef9c566b01b64ba yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -657,6 +657,8 @@
     cdef public np.int64_t sfc_offset
     cdef public artio_fileset artio_handle
     cdef Oct **root_octs
+    cdef np.int64_t level_indices[32]
+    cdef np.int64_t oct_count[32]
 
     def __init__(self, ARTIOSFCRangeHandler range_handler, np.int64_t sfc):
         self.artio_handle = range_handler.artio_handle
@@ -664,8 +666,13 @@
         # Note the final argument is partial_coverage, which indicates whether
         # or not an Oct can be partially refined.
         dims, DLE, DRE = [], [], []
+        for i in range(32):
+            self.level_indices[i] = 0
+            self.oct_count[i] = 0
         for i in range(3):
-            dims.append(range_handler.dims[i]/2)
+            # range_handler has dims in cells, which is the same as the number
+            # of possible octs.  This is because we have a forest of octrees.
+            dims.append(range_handler.dims[i])
             DLE.append(range_handler.DLE[i])
             DRE.append(range_handler.DRE[i])
         super(ARTIOOctreeContainer, self).__init__(dims, DLE, DRE)
@@ -691,37 +698,34 @@
         # called from within a pre-cached operation in the SFC handler.
 
         # We only allow one root oct.
-        self.allocate_domains([1, oct_count], 1)
+        self.allocate_domains([oct_count], 1)
         pos = np.empty((1, 3), dtype="float64")
-        artio_sfc_coords(self.artio_handle.handle, self.sfc, coords)
-        for i in range(3):
-            dds[i] = (self.DRE[i] - self.DLE[i]) / (self.nn[i]*2)
-            pos[0, i] = self.DLE[i] + (coords[i] + 0.5) * dds[i]
-        self.add(1, 0, pos)
 
         oct_ind = -1
+        ipos = 0
         for level in range(1, num_oct_levels+1):
             oct_ind = imax(oct_ind, num_octs_per_level[level - 1])
+            self.level_indices[level] = ipos
+            ipos += num_octs_per_level[level - 1]
         pos = np.empty((oct_ind, 3), dtype="float64")
 
         # Now we initialize
-        num_octs = 0
         # Note that we also assume we have already started reading the level.
-        for level in range(1, num_oct_levels+1):
-            status = artio_grid_read_level_begin(handle, level)
+        for level in range(num_oct_levels):
+            status = artio_grid_read_level_begin(handle, level + 1)
             check_artio_status(status)
-            for oct_ind in range(num_octs_per_level[level - 1]):
+            for oct_ind in range(num_octs_per_level[level]):
                 status = artio_grid_read_oct(handle, dpos, NULL, NULL)
                 for i in range(3):
                     pos[oct_ind, i] = dpos[i]
                 check_artio_status(status)
             status = artio_grid_read_level_end(handle)
             check_artio_status(status)
-            self.add(2, level, pos[:num_octs_per_level[level - 1],:])
+            nadded = self.add(1, level, pos[:num_octs_per_level[level],:])
  
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
+    #@cython.boundscheck(False)
+    #@cython.wraparound(False)
+    #@cython.cdivision(True)
     def fill_sfc(self, 
                  np.ndarray[np.uint8_t, ndim=1] levels,
                  np.ndarray[np.uint8_t, ndim=1] cell_inds,
@@ -730,7 +734,7 @@
         cdef np.ndarray[np.float32_t, ndim=2] source
         cdef np.ndarray[np.float64_t, ndim=1] dest
         cdef int n, status, i, di, num_oct_levels, nf, ngv, max_level
-        cdef np.int64_t sfc
+        cdef np.int64_t sfc, ipos
         cdef np.float64_t val
         cdef artio_fileset_handle *handle = self.artio_handle.handle
         cdef double dpos[3]
@@ -751,7 +755,7 @@
         for i in range(nf):
             field_ind[i] = field_indices[i]
             # Note that we subtract one, because we're not using the root mesh.
-            source = np.zeros((self.nocts - 1, 8), dtype="float32")
+            source = np.zeros((self.nocts, 8), dtype="float32")
             source_arrays.append(source)
             field_vals[i] = <np.float32_t*> source.data
         # First we need to walk the mesh in the file.  Then we fill in the dest
@@ -759,6 +763,7 @@
         status = artio_grid_read_root_cell_begin( handle, self.sfc, 
                 dpos, NULL, &num_oct_levels, num_octs_per_level)
         check_artio_status(status) 
+        ipos = 0
         for level in range(1, num_oct_levels+1):
             status = artio_grid_read_level_begin(handle, level)
             check_artio_status(status) 
@@ -767,7 +772,8 @@
                 check_artio_status(status)
                 for j in range(8):
                     for i in range(nf):
-                        field_vals[i][oct_ind*8+j] = grid_variables[ngv*j+i]
+                        field_vals[i][ipos*8+j] = grid_variables[ngv*j+i]
+                ipos += 1
             status = artio_grid_read_level_end(handle)
             check_artio_status(status)
         status = artio_grid_read_root_cell_end( handle )
@@ -778,8 +784,8 @@
             source = source_arrays[j]
             for i in range(levels.shape[0]):
                 if levels[i] == 0: continue
-                oct_ind = self.level_indices[levels[i]]
-                dest[i] = source[file_inds[i] + oct_ind, cell_inds[i]]
+                oct_ind = self.level_indices[levels[i] - 1]
+                dest[i] = source[file_inds[i], cell_inds[i]]
         free(field_ind)
         free(field_vals)
         free(grid_variables)

diff -r 5a8ce45e8827967b3b6e63bb33bdec3d7ed9cc75 -r cbb3151ddfa636f0e8c1c199cef9c566b01b64ba yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -363,7 +363,7 @@
                             range_handler.root_mesh_handler, self.pf))
                 for sfc in sorted(range_handler.octree_handlers):
                     ci.append(ARTIOOctreeSubset(base_region, sfc,
-                      range_handler, self.pf))
+                    range_handler, self.pf))
             dobj._chunk_info = ci
             if len(list_sfc_ranges) > 1:
                 mylog.info("Created %d chunks for ARTIO" % len(list_sfc_ranges))


https://bitbucket.org/yt_analysis/yt/commits/51c59380d126/
Changeset:   51c59380d126
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-04 20:00:22
Summary:     Volume metrics are now correct.
Affected #:  1 file

diff -r cbb3151ddfa636f0e8c1c199cef9c566b01b64ba -r 51c59380d1261fb59ca69069eb08421791f57ea9 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -54,7 +54,7 @@
 
 class ARTIOOctreeSubset(OctreeSubset):
     _domain_offset = 0
-    domain_id = 2
+    domain_id = 1
     _con_args = ("base_region", "sfc_start", "sfc_end", "pf")
     _type_name = 'octree_subset'
     _num_zones = 2


https://bitbucket.org/yt_analysis/yt/commits/f8d1c6dedbc4/
Changeset:   f8d1c6dedbc4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-04 21:12:45
Summary:     A few more ordering fixes; not quite there yet.
Affected #:  1 file

diff -r 51c59380d1261fb59ca69069eb08421791f57ea9 -r f8d1c6dedbc46aba22077086878d7aeb4681112c yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -763,17 +763,17 @@
         status = artio_grid_read_root_cell_begin( handle, self.sfc, 
                 dpos, NULL, &num_oct_levels, num_octs_per_level)
         check_artio_status(status) 
-        ipos = 0
-        for level in range(1, num_oct_levels+1):
-            status = artio_grid_read_level_begin(handle, level)
+        for level in range(num_oct_levels):
+            status = artio_grid_read_level_begin(handle, level + 1)
             check_artio_status(status) 
-            for oct_ind in range(num_octs_per_level[level - 1]):
+            ipos = self.level_indices[level]
+            for oct_ind in range(num_octs_per_level[level]):
                 status = artio_grid_read_oct(handle, dpos, grid_variables, NULL)
                 check_artio_status(status)
                 for j in range(8):
                     for i in range(nf):
-                        field_vals[i][ipos*8+j] = grid_variables[ngv*j+i]
-                ipos += 1
+                        field_vals[i][(ipos+oct_ind)*8+j] = \
+                            grid_variables[ngv*j+field_ind[i]]
             status = artio_grid_read_level_end(handle)
             check_artio_status(status)
         status = artio_grid_read_root_cell_end( handle )
@@ -783,9 +783,9 @@
             dest = dest_fields[j]
             source = source_arrays[j]
             for i in range(levels.shape[0]):
-                if levels[i] == 0: continue
-                oct_ind = self.level_indices[levels[i] - 1]
-                dest[i] = source[file_inds[i], cell_inds[i]]
+                level = levels[i]
+                oct_ind = self.level_indices[level]
+                dest[i] = source[file_inds[i] + oct_ind, cell_inds[i]]
         free(field_ind)
         free(field_vals)
         free(grid_variables)


https://bitbucket.org/yt_analysis/yt/commits/a1539bc332ac/
Changeset:   a1539bc332ac
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-04 21:22:15
Summary:     Fix off-by-one, and correcting ires.

Note that the change to ires may need to be reverted, once the ForestOfOctrees
system has been created and unified.
Affected #:  3 files

diff -r f8d1c6dedbc46aba22077086878d7aeb4681112c -r a1539bc332acd405f038e43b2af124495001c0ca yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -678,6 +678,7 @@
         super(ARTIOOctreeContainer, self).__init__(dims, DLE, DRE)
         self.artio_handle = range_handler.artio_handle
         self.sfc_offset = range_handler.sfc_start
+        self.level_offset = 1
 
     #@cython.boundscheck(False)
     #@cython.wraparound(False)
@@ -703,10 +704,10 @@
 
         oct_ind = -1
         ipos = 0
-        for level in range(1, num_oct_levels+1):
-            oct_ind = imax(oct_ind, num_octs_per_level[level - 1])
+        for level in range(num_oct_levels):
+            oct_ind = imax(oct_ind, num_octs_per_level[level])
             self.level_indices[level] = ipos
-            ipos += num_octs_per_level[level - 1]
+            ipos += num_octs_per_level[level]
         pos = np.empty((oct_ind, 3), dtype="float64")
 
         # Now we initialize

diff -r f8d1c6dedbc46aba22077086878d7aeb4681112c -r a1539bc332acd405f038e43b2af124495001c0ca yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -67,6 +67,7 @@
     cdef Oct ****root_mesh
     cdef oct_visitor_function *fill_func
     cdef int partial_coverage
+    cdef int level_offset
     cdef int nn[3]
     cdef np.uint8_t oref
     cdef np.float64_t DLE[3], DRE[3]

diff -r f8d1c6dedbc46aba22077086878d7aeb4681112c -r a1539bc332acd405f038e43b2af124495001c0ca yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -105,6 +105,7 @@
         for i in range(3):
             self.nn[i] = oct_domain_dimensions[i]
         self.max_domain = -1
+        self.level_offset = 0
         p = 0
         self.nocts = 0 # Increment when initialized
         for i in range(3):
@@ -382,6 +383,7 @@
     @cython.cdivision(True)
     def ires(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
+        cdef int i
         if num_cells == -1:
             num_cells = selector.count_oct_cells(self, domain_id)
         cdef OctVisitorData data
@@ -391,6 +393,9 @@
         res = np.empty(num_cells, dtype="int64")
         data.array = <void *> res.data
         self.visit_all_octs(selector, oct_visitors.ires_octs, &data)
+        if self.level_offset > 0:
+            for i in range(num_cells):
+                res[i] += self.level_offset
         return res
 
     @cython.boundscheck(False)
@@ -664,6 +669,7 @@
         for i in range(3):
             self.nn[i] = domain_dimensions[i]
         self.max_domain = -1
+        self.level_offset = 0
         self.nocts = 0 # Increment when initialized
         self.root_mesh = NULL
         self.root_nodes = NULL


https://bitbucket.org/yt_analysis/yt/commits/8b5cb33c1e3e/
Changeset:   8b5cb33c1e3e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-04 22:57:33
Summary:     Fixing _con_args and fixing spatial data selection.
Affected #:  1 file

diff -r a1539bc332acd405f038e43b2af124495001c0ca -r 8b5cb33c1e3e0303dc816e305f48aa02308afca5 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -55,14 +55,14 @@
 class ARTIOOctreeSubset(OctreeSubset):
     _domain_offset = 0
     domain_id = 1
-    _con_args = ("base_region", "sfc_start", "sfc_end", "pf")
+    _con_args = ("base_region", "sfc", "root_mesh", "pf")
     _type_name = 'octree_subset'
     _num_zones = 2
 
     def __init__(self, base_region, sfc, root_mesh, pf):
         self.field_data = YTFieldData()
         self.field_parameters = {}
-        self.sfc = sfc
+        self.sfc = self.sfc_start = self.sfc_end = sfc
         self.root_mesh = root_mesh
         self.pf = pf
         self.hierarchy = self.pf.hierarchy
@@ -130,6 +130,7 @@
 # only manage the root mesh.
 class ARTIORootMeshSubset(ARTIOOctreeSubset):
     _num_zones = 1
+    _con_args = ("base_region", "sfc_start", "sfc_end", "oct_handler", "pf")
     _type_name = 'sfc_subset'
     _selector_module = _artio_caller
     domain_id = 1
@@ -341,7 +342,7 @@
             base_region = getattr(dobj, "base_region", dobj)
             sfc_start = getattr(dobj, "sfc_start", None)
             sfc_end = getattr(dobj, "sfc_end", None)
-            domain = getattr(dobj, "domain_id", 0)
+            nz = getattr(dobj, "_num_zones", 0)
             if all_data:
                 mylog.debug("Selecting entire artio domain")
                 list_sfc_ranges = self.pf._handle.root_sfc_ranges_all()
@@ -359,11 +360,13 @@
                     self.pf.domain_left_edge, self.pf.domain_right_edge,
                     self.pf._handle, start, end)
                 range_handler.construct_mesh()
-                ci.append(ARTIORootMeshSubset(base_region, start, end,
-                            range_handler.root_mesh_handler, self.pf))
-                for sfc in sorted(range_handler.octree_handlers):
-                    ci.append(ARTIOOctreeSubset(base_region, sfc,
-                    range_handler, self.pf))
+                if nz != 2:
+                    ci.append(ARTIORootMeshSubset(base_region, start, end,
+                                range_handler.root_mesh_handler, self.pf))
+                if nz != 1:
+                    for sfc in sorted(range_handler.octree_handlers):
+                        ci.append(ARTIOOctreeSubset(base_region, sfc,
+                        range_handler, self.pf))
             dobj._chunk_info = ci
             if len(list_sfc_ranges) > 1:
                 mylog.info("Created %d chunks for ARTIO" % len(list_sfc_ranges))


https://bitbucket.org/yt_analysis/yt/commits/916e871601ba/
Changeset:   916e871601ba
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-04 23:10:02
Summary:     Enabling some Cython optimizations.
Affected #:  1 file

diff -r 8b5cb33c1e3e0303dc816e305f48aa02308afca5 -r 916e871601ba2ce1db6bb52df20faec0859b6075 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -1,7 +1,3 @@
-#cython: profile=True
-"""
-
-"""
 cimport cython
 import numpy as np
 cimport numpy as np
@@ -680,9 +676,9 @@
         self.sfc_offset = range_handler.sfc_start
         self.level_offset = 1
 
-    #@cython.boundscheck(False)
-    #@cython.wraparound(False)
-    #@cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     cdef void initialize_mesh(self, np.int64_t oct_count,
                               int num_oct_levels, int *num_octs_per_level):
         # We actually will not be initializing the root mesh here, we will be
@@ -724,9 +720,9 @@
             check_artio_status(status)
             nadded = self.add(1, level, pos[:num_octs_per_level[level],:])
  
-    #@cython.boundscheck(False)
-    #@cython.wraparound(False)
-    #@cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def fill_sfc(self, 
                  np.ndarray[np.uint8_t, ndim=1] levels,
                  np.ndarray[np.uint8_t, ndim=1] cell_inds,


https://bitbucket.org/yt_analysis/yt/commits/9a886dfddffb/
Changeset:   9a886dfddffb
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-04 23:19:14
Summary:     A few optimizations for speed of the mask() function.
Affected #:  1 file

diff -r 916e871601ba2ce1db6bb52df20faec0859b6075 -r 9a886dfddffbd92bfbdf427d22922172cb56f809 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -1140,6 +1140,7 @@
         cdef int i
         cdef np.float64_t pos[3]
         cdef np.int64_t sfc
+        cdef np.ndarray[np.int64_t, ndim=1] oct_count
         if self._last_selector_id == hash(selector):
             return self._last_mask
         if num_cells == -1:
@@ -1147,8 +1148,9 @@
             # since num_cells will later be cached.
             num_cells = self.sfc_end - self.sfc_start + 1
         mask = np.zeros((num_cells), dtype="uint8")
+        oct_count = self.range_handler.oct_count
         for sfc in range(self.sfc_start, self.sfc_end + 1):
-            if self.range_handler.oct_count[sfc - self.sfc_start] > 0: continue
+            if oct_count[sfc - self.sfc_start] > 0: continue
             self.sfc_to_pos(sfc, pos)
             if selector.select_cell(pos, self.dds) == 0: continue
             mask[sfc - self.sfc_start] = 1


https://bitbucket.org/yt_analysis/yt/commits/550cad9d01b9/
Changeset:   550cad9d01b9
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-05 02:50:59
Summary:     Declare these variables in Cython for speedup.
Affected #:  1 file

diff -r 9a886dfddffbd92bfbdf427d22922172cb56f809 -r 550cad9d01b9f2b9fb344b186207cd0fefd01257 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -731,6 +731,7 @@
         cdef np.ndarray[np.float32_t, ndim=2] source
         cdef np.ndarray[np.float64_t, ndim=1] dest
         cdef int n, status, i, di, num_oct_levels, nf, ngv, max_level
+        cdef int level, j, oct_ind
         cdef np.int64_t sfc, ipos
         cdef np.float64_t val
         cdef artio_fileset_handle *handle = self.artio_handle.handle


https://bitbucket.org/yt_analysis/yt/commits/0d9bfae7d95f/
Changeset:   0d9bfae7d95f
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-05 14:53:32
Summary:     Initial refactor of key methods for SparseOctreeContainer.
Affected #:  2 files

diff -r 550cad9d01b9f2b9fb344b186207cd0fefd01257 -r 0d9bfae7d95f050ae4f799cad06fc1140bb93635 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -92,6 +92,8 @@
     cdef void *tree_root
     cdef int num_root
     cdef int max_root
+    cdef void key_to_ipos(self, np.int64_t key, np.int64_t pos[3])
+    cdef np.int64_t ipos_to_key(self, int pos[3])
 
 cdef class RAMSESOctreeContainer(SparseOctreeContainer):
     pass

diff -r 550cad9d01b9f2b9fb344b186207cd0fefd01257 -r 0d9bfae7d95f050ae4f799cad06fc1140bb93635 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -684,9 +684,7 @@
     cdef int get_root(self, int ind[3], Oct **o):
         o[0] = NULL
         cdef int i
-        cdef np.int64_t key = 0
-        for i in range(3):
-            key |= ((<np.int64_t>ind[i]) << 20 * (2 - i))
+        cdef np.int64_t key = self.ipos_to_key(ind)
         cdef OctKey okey, **oresult
         okey.key = key
         okey.node = NULL
@@ -695,6 +693,25 @@
         if oresult != NULL:
             o[0] = oresult[0].node
 
+    cdef void key_to_ipos(self, np.int64_t key, np.int64_t pos[3]):
+        # Note: this is the result of doing
+        # ukey = 0
+        # for i in range(20):
+        #     ukey |= (1 << i)
+        cdef np.int64_t ukey = 1048575
+        cdef int j
+        for j in range(3):
+            pos[2 - j] = (key & ukey)
+            key = key >> 20
+
+    cdef np.int64_t ipos_to_key(self, int pos[3]):
+        # We (hope) that 20 bits is enough for each index.
+        cdef int i
+        cdef np.int64_t key = 0
+        for i in range(3):
+            key |= (pos[i] << 20 * (2 - i))
+        return key
+
     @cython.cdivision(True)
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
@@ -710,15 +727,10 @@
             dds[i] = (self.DRE[i] - self.DLE[i]) / self.nn[i]
         # Pos is the center of the octs
         cdef Oct *o
-        ukey = 0
-        for i in range(20):
-            ukey |= (1 << i)
         for i in range(self.num_root):
             o = self.root_nodes[i].node
             key = self.root_nodes[i].key
-            for j in range(3):
-                data.pos[2 - j] = (key & ukey)
-                key = key >> 20
+            self.key_to_ipos(key, data.pos)
             for j in range(3):
                 pos[j] = self.DLE[j] + (data.pos[j] + 0.5) * dds[j]
             selector.recursively_visit_octs(
@@ -728,7 +740,6 @@
         return 0 # We no longer have a domain offset.
 
     cdef Oct* next_root(self, int domain_id, int ind[3]):
-        # We assume that 20 bits is enough for each index.
         cdef int i
         cdef Oct *next
         self.get_root(ind, &next)
@@ -744,8 +755,7 @@
         cont.n_assigned += 1
         cdef np.int64_t key = 0
         cdef OctKey *ikey = &self.root_nodes[self.num_root]
-        for i in range(3):
-            key |= ((<np.int64_t>ind[i]) << 20 * (2 - i))
+        key = self.ipos_to_key(ind)
         self.root_nodes[self.num_root].key = key
         self.root_nodes[self.num_root].node = next
         tsearch(<void*>ikey, &self.tree_root, root_node_compare)


https://bitbucket.org/yt_analysis/yt/commits/aa497112dd6f/
Changeset:   aa497112dd6f
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-05 15:42:16
Summary:     First refactor step for appending domains and oct allocations.
Affected #:  2 files

diff -r 0d9bfae7d95f050ae4f799cad06fc1140bb93635 -r aa497112dd6fdb882fe500f89de833d54c7243bc yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -72,7 +72,7 @@
     cdef np.uint8_t oref
     cdef np.float64_t DLE[3], DRE[3]
     cdef public np.int64_t nocts
-    cdef public int max_domain
+    cdef public int num_domains
     cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
     cdef int get_root(self, int ind[3], Oct **o)
     cdef Oct **neighbors(self, OctInfo *oinfo, np.int64_t *nneighbors)

diff -r 0d9bfae7d95f050ae4f799cad06fc1140bb93635 -r aa497112dd6fdb882fe500f89de833d54c7243bc yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-from libc.stdlib cimport malloc, free, qsort
+from libc.stdlib cimport malloc, free, qsort, realloc
 from libc.math cimport floor
 cimport numpy as np
 import numpy as np
@@ -104,8 +104,9 @@
         cdef int i, j, k, p
         for i in range(3):
             self.nn[i] = oct_domain_dimensions[i]
-        self.max_domain = -1
+        self.num_domains = 0
         self.level_offset = 0
+        self.domains = NULL
         p = 0
         self.nocts = 0 # Increment when initialized
         for i in range(3):
@@ -269,14 +270,14 @@
 
     def domain_identify(self, SelectorObject selector):
         cdef np.ndarray[np.uint8_t, ndim=1] domain_mask
-        domain_mask = np.zeros(self.max_domain, dtype="uint8")
+        domain_mask = np.zeros(self.num_domains, dtype="uint8")
         cdef OctVisitorData data
         self.setup_data(&data)
         data.array = domain_mask.data
         self.visit_all_octs(selector, oct_visitors.identify_octs, &data)
         cdef int i
         domain_ids = []
-        for i in range(self.max_domain):
+        for i in range(self.num_domains):
             if domain_mask[i] == 1:
                 domain_ids.append(i+1)
         return domain_ids
@@ -512,7 +513,7 @@
         cdef Oct *cur, *next = NULL
         cdef np.float64_t pp[3], cp[3], dds[3]
         no = pos.shape[0] #number of octs
-        if curdom > self.max_domain: return 0
+        if curdom > self.num_domains: return 0
         cdef OctAllocationContainer *cont = self.domains[curdom - 1]
         cdef int initial = cont.n_assigned
         cdef int in_boundary = 0
@@ -556,7 +557,7 @@
         cdef int count, i
         cdef OctAllocationContainer *cur = self.cont
         assert(cur == NULL)
-        self.max_domain = len(domain_counts) # 1-indexed
+        self.num_domains = len(domain_counts) # 1-indexed
         self.domains = <OctAllocationContainer **> malloc(
             sizeof(OctAllocationContainer *) * len(domain_counts))
         for i, count in enumerate(domain_counts):
@@ -564,6 +565,20 @@
             if self.cont == NULL: self.cont = cur
             self.domains[i] = cur
 
+    def append_domain(self, domain_count):
+        self.num_domains += 1
+        self.domains = <OctAllocationContainer **> realloc(self.domains, 
+                sizeof(OctAllocationContainer *) * self.num_domains)
+        self.domains[self.num_domains - 1] = NULL
+        cdef OctAllocationContainer *cur
+        if self.num_domains == 1:
+            cur = NULL
+        else:
+            cur = self.domains[self.num_domains - 2]
+        cur = allocate_octs(domain_count, cur)
+        if self.cont == NULL: self.cont = cur
+        self.domains[self.num_domainss - 1] = cur
+
     cdef Oct* next_root(self, int domain_id, int ind[3]):
         cdef Oct *next = self.root_mesh[ind[0]][ind[1]][ind[2]]
         if next != NULL: return next
@@ -668,7 +683,7 @@
         self.oref = over_refine
         for i in range(3):
             self.nn[i] = domain_dimensions[i]
-        self.max_domain = -1
+        self.num_domains = 0
         self.level_offset = 0
         self.nocts = 0 # Increment when initialized
         self.root_mesh = NULL
@@ -771,6 +786,15 @@
             self.root_nodes[i].key = -1
             self.root_nodes[i].node = NULL
 
+    def append_domain(self, domain_count, bint new_root = False):
+        OctreeContainer.append_domain(self, domain_count)
+        if not new_root: return
+        self.max_root += 1
+        self.root_nodes = <OctKey *> realloc(self.root_nodes,
+                                  sizeof(OctKey) * self.max_root)
+        self.root_nodes[self.max_root - 1].key = -1
+        self.root_nodes[self.max_root - 1].node = NULL
+
     def __dealloc__(self):
         # This gets called BEFORE the superclass deallocation.  But, both get
         # called.


https://bitbucket.org/yt_analysis/yt/commits/65d312fded66/
Changeset:   65d312fded66
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-05 22:15:21
Summary:     Remove a few reallocs and fix the key generation.
Affected #:  3 files

diff -r aa497112dd6fdb882fe500f89de833d54c7243bc -r 65d312fded66e42d28ab454b3d6b4a66e1c93529 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -588,6 +588,12 @@
         cdef int *num_octs_per_level = <int *>malloc(
             (max_level + 1)*sizeof(int))
         cdef ARTIOOctreeContainer octree
+        octree = ARTIOOctreeContainer(self)
+        # We want to pre-allocate an array of root pointers.  In the future,
+        # this will be pre-determined by the ARTIO library.  However, because
+        # realloc plays havoc with our tree searching, we can't utilize an
+        # expanding array at the present time.
+        octree.allocate_domains([], self.sfc_end - self.sfc_start + 1)
         cdef np.ndarray[np.int64_t, ndim=1] oct_count
         oct_count = np.zeros(self.sfc_end - self.sfc_start + 1, dtype="int64")
         status = artio_grid_cache_sfc_range(self.handle, self.sfc_start,
@@ -602,9 +608,7 @@
                 for level in range(num_oct_levels):
                     oc += num_octs_per_level[level]
                 oct_count[sfc - self.sfc_start] = oc
-                octree = ARTIOOctreeContainer(self, sfc)
-                octree.initialize_mesh(oc, num_oct_levels, num_octs_per_level)
-                self.octree_handlers[sfc] = octree
+                octree.initialize_local_mesh(oc, num_oct_levels, num_octs_per_level)
             status = artio_grid_read_root_cell_end( self.handle )
             check_artio_status(status)
         free(num_octs_per_level)
@@ -649,22 +653,16 @@
     # the file again, despite knowing the indexing system already.  Because of
     # this, we will avoid creating it as long as possible.
 
-    cdef public np.int64_t sfc
-    cdef public np.int64_t sfc_offset
     cdef public artio_fileset artio_handle
-    cdef Oct **root_octs
     cdef np.int64_t level_indices[32]
-    cdef np.int64_t oct_count[32]
 
-    def __init__(self, ARTIOSFCRangeHandler range_handler, np.int64_t sfc):
+    def __init__(self, ARTIOSFCRangeHandler range_handler):
         self.artio_handle = range_handler.artio_handle
-        self.sfc = sfc
         # Note the final argument is partial_coverage, which indicates whether
         # or not an Oct can be partially refined.
         dims, DLE, DRE = [], [], []
         for i in range(32):
             self.level_indices[i] = 0
-            self.oct_count[i] = 0
         for i in range(3):
             # range_handler has dims in cells, which is the same as the number
             # of possible octs.  This is because we have a forest of octrees.
@@ -673,13 +671,14 @@
             DRE.append(range_handler.DRE[i])
         super(ARTIOOctreeContainer, self).__init__(dims, DLE, DRE)
         self.artio_handle = range_handler.artio_handle
-        self.sfc_offset = range_handler.sfc_start
         self.level_offset = 1
+        self.domains = NULL
+        self.root_nodes = NULL
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef void initialize_mesh(self, np.int64_t oct_count,
+    cdef void initialize_local_mesh(self, np.int64_t oct_count,
                               int num_oct_levels, int *num_octs_per_level):
         # We actually will not be initializing the root mesh here, we will be
         # initializing the entire mesh between sfc_start and sfc_end.
@@ -695,8 +694,7 @@
         # called from within a pre-cached operation in the SFC handler.
 
         # We only allow one root oct.
-        self.allocate_domains([oct_count], 1)
-        pos = np.empty((1, 3), dtype="float64")
+        self.append_domain(oct_count)
 
         oct_ind = -1
         ipos = 0
@@ -708,6 +706,7 @@
 
         # Now we initialize
         # Note that we also assume we have already started reading the level.
+        ipos = 0
         for level in range(num_oct_levels):
             status = artio_grid_read_level_begin(handle, level + 1)
             check_artio_status(status)
@@ -718,16 +717,17 @@
                 check_artio_status(status)
             status = artio_grid_read_level_end(handle)
             check_artio_status(status)
-            nadded = self.add(1, level, pos[:num_octs_per_level[level],:])
- 
+            nadded = self.add(self.num_domains, level, pos[:num_octs_per_level[level],:])
+            if nadded != num_octs_per_level[level]: raise RuntimeError
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
     def fill_sfc(self, 
-                 np.ndarray[np.uint8_t, ndim=1] levels,
-                 np.ndarray[np.uint8_t, ndim=1] cell_inds,
-                 np.ndarray[np.int64_t, ndim=1] file_inds,
-                 field_indices, dest_fields):
+             np.ndarray[np.uint8_t, ndim=1] levels,
+             np.ndarray[np.uint8_t, ndim=1] cell_inds,
+             np.ndarray[np.int64_t, ndim=1] file_inds,
+             field_indices, dest_fields):
         cdef np.ndarray[np.float32_t, ndim=2] source
         cdef np.ndarray[np.float64_t, ndim=1] dest
         cdef int n, status, i, di, num_oct_levels, nf, ngv, max_level

diff -r aa497112dd6fdb882fe500f89de833d54c7243bc -r 65d312fded66e42d28ab454b3d6b4a66e1c93529 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -50,11 +50,9 @@
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, NullFunc
 
-
-
 class ARTIOOctreeSubset(OctreeSubset):
     _domain_offset = 0
-    domain_id = 1
+    domain_id = -1
     _con_args = ("base_region", "sfc", "root_mesh", "pf")
     _type_name = 'octree_subset'
     _num_zones = 2
@@ -133,7 +131,7 @@
     _con_args = ("base_region", "sfc_start", "sfc_end", "oct_handler", "pf")
     _type_name = 'sfc_subset'
     _selector_module = _artio_caller
-    domain_id = 1
+    domain_id = -1
 
     def __init__(self, base_region, sfc_start, sfc_end, oct_handler, pf):
         self.field_data = YTFieldData()

diff -r aa497112dd6fdb882fe500f89de833d54c7243bc -r 65d312fded66e42d28ab454b3d6b4a66e1c93529 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -101,6 +101,7 @@
         # This will just initialize the root mesh octs
         self.oref = over_refine
         self.partial_coverage = partial_coverage
+        self.cont = NULL
         cdef int i, j, k, p
         for i in range(3):
             self.nn[i] = oct_domain_dimensions[i]
@@ -569,15 +570,14 @@
         self.num_domains += 1
         self.domains = <OctAllocationContainer **> realloc(self.domains, 
                 sizeof(OctAllocationContainer *) * self.num_domains)
+        if self.domains == NULL: raise RuntimeError
         self.domains[self.num_domains - 1] = NULL
-        cdef OctAllocationContainer *cur
-        if self.num_domains == 1:
-            cur = NULL
-        else:
+        cdef OctAllocationContainer *cur = NULL
+        if self.num_domains > 1:
             cur = self.domains[self.num_domains - 2]
         cur = allocate_octs(domain_count, cur)
         if self.cont == NULL: self.cont = cur
-        self.domains[self.num_domainss - 1] = cur
+        self.domains[self.num_domains - 1] = cur
 
     cdef Oct* next_root(self, int domain_id, int ind[3]):
         cdef Oct *next = self.root_mesh[ind[0]][ind[1]][ind[2]]
@@ -690,6 +690,7 @@
         self.root_nodes = NULL
         self.tree_root = NULL
         self.num_root = 0
+        self.max_root = 0
         # We don't initialize the octs yet
         for i in range(3):
             self.DLE[i] = domain_left_edge[i] #0
@@ -700,23 +701,24 @@
         o[0] = NULL
         cdef int i
         cdef np.int64_t key = self.ipos_to_key(ind)
-        cdef OctKey okey, **oresult
+        cdef OctKey okey, **oresult = NULL
         okey.key = key
         okey.node = NULL
         oresult = <OctKey **> tfind(<void*>&okey,
             &self.tree_root, root_node_compare)
         if oresult != NULL:
             o[0] = oresult[0].node
+            return 1
+        return 0
 
     cdef void key_to_ipos(self, np.int64_t key, np.int64_t pos[3]):
         # Note: this is the result of doing
-        # ukey = 0
         # for i in range(20):
         #     ukey |= (1 << i)
         cdef np.int64_t ukey = 1048575
         cdef int j
         for j in range(3):
-            pos[2 - j] = (key & ukey)
+            pos[2 - j] = (<np.int64_t>(key & ukey))
             key = key >> 20
 
     cdef np.int64_t ipos_to_key(self, int pos[3]):
@@ -724,7 +726,8 @@
         cdef int i
         cdef np.int64_t key = 0
         for i in range(3):
-            key |= (pos[i] << 20 * (2 - i))
+            # Note the casting here.  Bitshifting can cause issues otherwise.
+            key |= ((<np.int64_t>pos[i]) << 20 * (2 - i))
         return key
 
     @cython.cdivision(True)
@@ -756,7 +759,7 @@
 
     cdef Oct* next_root(self, int domain_id, int ind[3]):
         cdef int i
-        cdef Oct *next
+        cdef Oct *next = NULL
         self.get_root(ind, &next)
         if next != NULL: return next
         cdef OctAllocationContainer *cont = self.domains[domain_id - 1]
@@ -770,6 +773,7 @@
         cont.n_assigned += 1
         cdef np.int64_t key = 0
         cdef OctKey *ikey = &self.root_nodes[self.num_root]
+        cdef np.int64_t okey = ikey.key
         key = self.ipos_to_key(ind)
         self.root_nodes[self.num_root].key = key
         self.root_nodes[self.num_root].node = next
@@ -786,15 +790,6 @@
             self.root_nodes[i].key = -1
             self.root_nodes[i].node = NULL
 
-    def append_domain(self, domain_count, bint new_root = False):
-        OctreeContainer.append_domain(self, domain_count)
-        if not new_root: return
-        self.max_root += 1
-        self.root_nodes = <OctKey *> realloc(self.root_nodes,
-                                  sizeof(OctKey) * self.max_root)
-        self.root_nodes[self.max_root - 1].key = -1
-        self.root_nodes[self.max_root - 1].node = NULL
-
     def __dealloc__(self):
         # This gets called BEFORE the superclass deallocation.  But, both get
         # called.


https://bitbucket.org/yt_analysis/yt/commits/1fd20d49e35a/
Changeset:   1fd20d49e35a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-05 22:25:20
Summary:     A few minor optimizations for speed.  Now use the single OctreeHandler in chunks.
Affected #:  3 files

diff -r 65d312fded66e42d28ab454b3d6b4a66e1c93529 -r 1fd20d49e35a091d79e1226847cd38664cb3568c yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -552,8 +552,8 @@
     cdef public np.int64_t sfc_end
     cdef public artio_fileset artio_handle
     cdef public object root_mesh_handler
-    cdef public object octree_handlers
     cdef public object oct_count
+    cdef public object octree_handler
     cdef artio_fileset_handle *handle
     cdef np.float64_t DLE[3]
     cdef np.float64_t DRE[3]
@@ -570,7 +570,7 @@
         self.sfc_end = sfc_end
         self.artio_handle = artio_handle
         self.root_mesh_handler = None
-        self.octree_handlers = {}
+        self.octree_handler = None
         self.handle = artio_handle.handle
         self.oct_count = None
         for i in range(3):
@@ -579,6 +579,9 @@
             self.DRE[i] = domain_right_edge[i]
             self.dds[i] = (self.DRE[i] - self.DLE[i])/self.dims[i]
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def construct_mesh(self):
         cdef int status, level
         cdef np.int64_t sfc, oc
@@ -588,7 +591,7 @@
         cdef int *num_octs_per_level = <int *>malloc(
             (max_level + 1)*sizeof(int))
         cdef ARTIOOctreeContainer octree
-        octree = ARTIOOctreeContainer(self)
+        self.octree_handler = octree = ARTIOOctreeContainer(self)
         # We want to pre-allocate an array of root pointers.  In the future,
         # this will be pre-determined by the ARTIO library.  However, because
         # realloc plays havoc with our tree searching, we can't utilize an
@@ -616,7 +619,7 @@
         self.oct_count = oct_count
 
     def free_mesh(self):
-        self.octree_handlers.clear()
+        self.octree_handler = None
         self.root_mesh_handler = None
         self.oct_count = None
 
@@ -682,7 +685,7 @@
                               int num_oct_levels, int *num_octs_per_level):
         # We actually will not be initializing the root mesh here, we will be
         # initializing the entire mesh between sfc_start and sfc_end.
-        cdef np.int64_t oct_ind, sfc, tot_octs, ipos
+        cdef np.int64_t oct_ind, sfc, tot_octs, ipos, nadded
         cdef int i, status, level, num_root, num_octs
         cdef int num_level_octs
         cdef artio_fileset_handle *handle = self.artio_handle.handle
@@ -690,6 +693,7 @@
         cdef int max_level = self.artio_handle.max_level
         cdef double dpos[3]
         cdef np.float64_t f64pos[3], dds[3]
+        cdef np.ndarray[np.float64_t, ndim=2] pos
         # NOTE: We do not cache any SFC ranges here, as we should only ever be
         # called from within a pre-cached operation in the SFC handler.
 
@@ -718,7 +722,8 @@
             status = artio_grid_read_level_end(handle)
             check_artio_status(status)
             nadded = self.add(self.num_domains, level, pos[:num_octs_per_level[level],:])
-            if nadded != num_octs_per_level[level]: raise RuntimeError
+            if nadded != num_octs_per_level[level]:
+                raise RuntimeError
 
     @cython.boundscheck(False)
     @cython.wraparound(False)

diff -r 65d312fded66e42d28ab454b3d6b4a66e1c93529 -r 1fd20d49e35a091d79e1226847cd38664cb3568c yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -53,15 +53,16 @@
 class ARTIOOctreeSubset(OctreeSubset):
     _domain_offset = 0
     domain_id = -1
-    _con_args = ("base_region", "sfc", "root_mesh", "pf")
+    _con_args = ("base_region", "sfc_start", "sfc_end", "oct_handler", "pf")
     _type_name = 'octree_subset'
     _num_zones = 2
 
-    def __init__(self, base_region, sfc, root_mesh, pf):
+    def __init__(self, base_region, sfc_start, sfc_end, oct_handler, pf):
         self.field_data = YTFieldData()
         self.field_parameters = {}
-        self.sfc = self.sfc_start = self.sfc_end = sfc
-        self.root_mesh = root_mesh
+        self.sfc_start = sfc_start
+        self.sfc_end = sfc_end
+        self.oct_handler = oct_handler
         self.pf = pf
         self.hierarchy = self.pf.hierarchy
         self._last_mask = None
@@ -70,7 +71,6 @@
         self._current_fluid_type = self.pf.default_fluid_type
         self.base_region = base_region
         self.base_selector = base_region.selector
-        self.oct_handler = root_mesh.octree_handlers[sfc]
 
     @property
     def min_ind(self):
@@ -128,26 +128,10 @@
 # only manage the root mesh.
 class ARTIORootMeshSubset(ARTIOOctreeSubset):
     _num_zones = 1
-    _con_args = ("base_region", "sfc_start", "sfc_end", "oct_handler", "pf")
     _type_name = 'sfc_subset'
     _selector_module = _artio_caller
     domain_id = -1
 
-    def __init__(self, base_region, sfc_start, sfc_end, oct_handler, pf):
-        self.field_data = YTFieldData()
-        self.field_parameters = {}
-        self.sfc_start = sfc_start
-        self.sfc_end = sfc_end
-        self.oct_handler = oct_handler
-        self.pf = pf
-        self.hierarchy = self.pf.hierarchy
-        self._last_mask = None
-        self._last_selector_id = None
-        self._current_particle_type = 'all'
-        self._current_fluid_type = self.pf.default_fluid_type
-        self.base_region = base_region
-        self.base_selector = base_region.selector
-
     def fill(self, fields, selector):
         # We know how big these will be.
         handle = self.pf._handle
@@ -362,9 +346,8 @@
                     ci.append(ARTIORootMeshSubset(base_region, start, end,
                                 range_handler.root_mesh_handler, self.pf))
                 if nz != 1:
-                    for sfc in sorted(range_handler.octree_handlers):
-                        ci.append(ARTIOOctreeSubset(base_region, sfc,
-                        range_handler, self.pf))
+                    ci.append(ARTIOOctreeSubset(base_region, start, end,
+                      range_handler.octree_handler, self.pf))
             dobj._chunk_info = ci
             if len(list_sfc_ranges) > 1:
                 mylog.info("Created %d chunks for ARTIO" % len(list_sfc_ranges))

diff -r 65d312fded66e42d28ab454b3d6b4a66e1c93529 -r 1fd20d49e35a091d79e1226847cd38664cb3568c yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -566,7 +566,7 @@
             if self.cont == NULL: self.cont = cur
             self.domains[i] = cur
 
-    def append_domain(self, domain_count):
+    def append_domain(self, np.int64_t domain_count):
         self.num_domains += 1
         self.domains = <OctAllocationContainer **> realloc(self.domains, 
                 sizeof(OctAllocationContainer *) * self.num_domains)


https://bitbucket.org/yt_analysis/yt/commits/5db7741383a2/
Changeset:   5db7741383a2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-05 23:00:55
Summary:     Only add the Octrees if we have Octs to handle.
Affected #:  4 files

diff -r 1fd20d49e35a091d79e1226847cd38664cb3568c -r 5db7741383a257389b82edea1bc7b174ee0e90d7 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -559,6 +559,7 @@
     cdef np.float64_t DRE[3]
     cdef np.float64_t dds[3]
     cdef np.int64_t dims[3]
+    cdef public np.int64_t total_octs
 
     def __init__(self, domain_dimensions, # cells
                  domain_left_edge,
@@ -610,6 +611,7 @@
                 oc = 0
                 for level in range(num_oct_levels):
                     oc += num_octs_per_level[level]
+                self.total_octs += oc
                 oct_count[sfc - self.sfc_start] = oc
                 octree.initialize_local_mesh(oc, num_oct_levels, num_octs_per_level)
             status = artio_grid_read_root_cell_end( self.handle )

diff -r 1fd20d49e35a091d79e1226847cd38664cb3568c -r 5db7741383a257389b82edea1bc7b174ee0e90d7 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -345,7 +345,7 @@
                 if nz != 2:
                     ci.append(ARTIORootMeshSubset(base_region, start, end,
                                 range_handler.root_mesh_handler, self.pf))
-                if nz != 1:
+                if nz != 1 and range_handler.total_octs > 0:
                     ci.append(ARTIOOctreeSubset(base_region, start, end,
                       range_handler.octree_handler, self.pf))
             dobj._chunk_info = ci

diff -r 1fd20d49e35a091d79e1226847cd38664cb3568c -r 5db7741383a257389b82edea1bc7b174ee0e90d7 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -86,6 +86,7 @@
     cdef Oct *next_root(self, int domain_id, int ind[3])
     cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
     cdef void setup_data(self, OctVisitorData *data, int domain_id = ?)
+    cdef void append_domain(self, np.int64_t domain_count)
 
 cdef class SparseOctreeContainer(OctreeContainer):
     cdef OctKey *root_nodes

diff -r 1fd20d49e35a091d79e1226847cd38664cb3568c -r 5db7741383a257389b82edea1bc7b174ee0e90d7 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -566,7 +566,7 @@
             if self.cont == NULL: self.cont = cur
             self.domains[i] = cur
 
-    def append_domain(self, np.int64_t domain_count):
+    cdef void append_domain(self, np.int64_t domain_count):
         self.num_domains += 1
         self.domains = <OctAllocationContainer **> realloc(self.domains, 
                 sizeof(OctAllocationContainer *) * self.num_domains)


https://bitbucket.org/yt_analysis/yt/commits/557607fcc8dd/
Changeset:   557607fcc8dd
Branch:      yt-3.0
User:        samskillman
Date:        2013-09-03 23:46:15
Summary:     Fixing memory freeing.
Affected #:  1 file

diff -r 3aba7ca1ff0bebcbbcd8d1e2a526dea084e0fbdb -r 557607fcc8dd2175d821a8adb51d663b284d2eff yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -208,14 +208,16 @@
     The entire purpose of this function is to move everything from ndarrays
     to internal C pointers. 
     """
-    pgles = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
-    pgres = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    pgles = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+    pgres = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
     cdef int j
     for j in range(3):
         pgles[j] = gle[j]
         pgres[j] = gre[j]
 
     add_grid(node, pgles, pgres, gid, rank, size)
+    free(pgles)
+    free(pgres)
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -285,6 +287,8 @@
         free(pgres[i])
     free(pgles)
     free(pgres)
+    free(pgids)
+
 
  
 @cython.boundscheck(False)
@@ -363,17 +367,17 @@
     for i in range(nless):
         free(less_gles[i])
         free(less_gres[i])
-    free(l_ids)
-    free(less_ids)
     free(less_gles)
     free(less_gres)
+    free(less_ids)
+    free(l_ids)
     for i in range(ngreater):
         free(greater_gles[i])
         free(greater_gres[i])
-    free(g_ids)
-    free(greater_ids)
     free(greater_gles)
     free(greater_gres)
+    free(greater_ids)
+    free(g_ids)
 
     return
 
@@ -491,10 +495,10 @@
                        ):
     cdef int i, j, k, dim, n_unique, best_dim, n_best, addit, my_split
     cdef np.float64_t **uniquedims, *uniques, split
-    uniquedims = <np.float64_t **> alloca(3 * sizeof(np.float64_t*))
+    uniquedims = <np.float64_t **> malloc(3 * sizeof(np.float64_t*))
     for i in range(3):
         uniquedims[i] = <np.float64_t *> \
-                alloca(2*n_grids * sizeof(np.float64_t))
+                malloc(2*n_grids * sizeof(np.float64_t))
     my_max = 0
     my_split = 0
     best_dim = -1
@@ -542,6 +546,11 @@
             ngreater += 1
         else:
             greater_ids[i] = 0
+
+    for i in range(3):
+        free(uniquedims[i])
+    free(uniquedims)
+
     # Return out unique values
     return best_dim, split, nless, ngreater
 
@@ -574,13 +583,6 @@
         kdtree_get_choices(ngrids, data, node.left_edge, node.right_edge,
                           less_ids, greater_ids)
  
-    for i in range(ngrids):
-        for j in range(2):
-            free(data[i][j])
-        free(data[i])
-    free(data)
-    free(less_ids)
-    free(greater_ids)
 
     # If best_dim is -1, then we have found a place where there are no choices.
     # Exit out and set the node to None.
@@ -653,18 +655,25 @@
     for i in range(nless):
         free(less_gles[i])
         free(less_gres[i])
-    free(l_ids)
-    free(less_index)
     free(less_gles)
     free(less_gres)
+    free(less_ids)
+    free(less_index)
+    free(l_ids)
     for i in range(ngreater):
         free(greater_gles[i])
         free(greater_gres[i])
-    free(g_ids)
-    free(greater_index)
     free(greater_gles)
     free(greater_gres)
+    free(greater_ids)
+    free(greater_index)
+    free(g_ids)
 
+    for i in range(ngrids):
+        for j in range(2):
+            free(data[i][j])
+        free(data[i])
+    free(data)
 
     return 0
 


https://bitbucket.org/yt_analysis/yt/commits/9e43b18b736d/
Changeset:   9e43b18b736d
Branch:      yt-3.0
User:        sleitner
Date:        2013-09-06 04:03:27
Summary:     correcting cm units in ramses and artio
Affected #:  2 files

diff -r 557607fcc8dd2175d821a8adb51d663b284d2eff -r 9e43b18b736d1b738249c93598041b15a7266914 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -452,10 +452,10 @@
                 self.units["%sh" % unit] = self.units[unit] * \
                     self.hubble_constant
                 self.units["%shcm" % unit] = \
-                    (self.units["%sh" % unit] /
+                    (self.units["%sh" % unit] *
                         (1 + self.current_redshift))
                 self.units["%scm" % unit] = \
-                    self.units[unit] / (1 + self.current_redshift)
+                    self.units[unit] * (1 + self.current_redshift)
 
         for unit in sec_conversion.keys():
             self.time_units[unit] = self.parameters['unit_t']\

diff -r 557607fcc8dd2175d821a8adb51d663b284d2eff -r 9e43b18b736d1b738249c93598041b15a7266914 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -411,9 +411,9 @@
         for unit in mpc_conversion.keys():
             self.units[unit] = unit_l * mpc_conversion[unit] / mpc_conversion["cm"]
             self.units['%sh' % unit] = self.units[unit] * self.hubble_constant
-            self.units['%scm' % unit] = (self.units[unit] /
+            self.units['%scm' % unit] = (self.units[unit] *
                                           (1 + self.current_redshift))
-            self.units['%shcm' % unit] = (self.units['%sh' % unit] /
+            self.units['%shcm' % unit] = (self.units['%sh' % unit] *
                                           (1 + self.current_redshift))
         for unit in sec_conversion.keys():
             self.time_units[unit] = self.parameters['unit_t'] / sec_conversion[unit]


https://bitbucket.org/yt_analysis/yt/commits/4b3bf8683aa2/
Changeset:   4b3bf8683aa2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-03 19:29:33
Summary:     Removing periodic_region, periodic_region_strict and region_strict.  All of
these are now just regular regions.
Affected #:  7 files

diff -r 3aba7ca1ff0bebcbbcd8d1e2a526dea084e0fbdb -r 4b3bf8683aa280ebf1e18b9bea06cb186815e95c yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -2216,11 +2216,11 @@
                 self.comm.mpi_bcast(self.bucket_bounds)
             my_bounds = self.bucket_bounds[self.comm.rank]
             LE, RE = my_bounds[0], my_bounds[1]
-            self._data_source = self.hierarchy.region_strict([0.] * 3, LE, RE)
+            self._data_source = self.hierarchy.region([0.] * 3, LE, RE)
         # If this isn't parallel, define the region as an AMRRegionStrict so
         # particle IO works.
         if self.comm.size == 1:
-            self._data_source = self.hierarchy.periodic_region_strict([0.5] * 3,
+            self._data_source = self.hierarchy.region([0.5] * 3,
                 LE, RE)
         # get the average spacing between particles for this region
         # The except is for the serial case where the full box is what we want.
@@ -2306,8 +2306,7 @@
                 np.zeros(3, dtype='float64'))
         # If we're using a subvolume, we now re-divide.
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
-                ds_RE)
+            self._data_source = pf.h.region([0.] * 3, ds_LE, ds_RE)
             # Cut up the volume.
             padded, LE, RE, self._data_source = \
                 self.partition_hierarchy_3d(ds=self._data_source,
@@ -2504,7 +2503,7 @@
         # object representing the entire domain and sum it "lazily" with
         # Derived Quantities.
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE, ds_RE)
+            self._data_source = pf.h.region([0.] * 3, ds_LE, ds_RE)
         else:
             self._data_source = pf.h.all_data()
         self.padding = padding  # * pf["unitary"] # This should be clevererer
@@ -2600,7 +2599,7 @@
             linking_length = np.abs(link)
         self.padding = padding
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
+            self._data_source = pf.h.region([0.] * 3, ds_LE,
                 ds_RE)
         else:
             self._data_source = pf.h.all_data()

diff -r 3aba7ca1ff0bebcbbcd8d1e2a526dea084e0fbdb -r 4b3bf8683aa280ebf1e18b9bea06cb186815e95c yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -811,10 +811,10 @@
                     need_per = True
                     break
 
-            if need_per:
-                region = self.pf.h.periodic_region(halo['center'], leftEdge, rightEdge)
-            else:
-                region = self.pf.h.region(halo['center'], leftEdge, rightEdge)
+            # We use the same type of region regardless.  The selection will be
+            # correct, but we need the need_per variable for projection
+            # shifting.
+            region = self.pf.h.region(halo['center'], leftEdge, rightEdge)
 
             # Make projections.
             if not isinstance(axes, types.ListType): axes = list([axes])

diff -r 3aba7ca1ff0bebcbbcd8d1e2a526dea084e0fbdb -r 4b3bf8683aa280ebf1e18b9bea06cb186815e95c yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -159,8 +159,7 @@
             # This ds business below has to do with changes made for halo
             # finding on subvolumes and serves no purpose here except
             # compatibility. This is not the best policy, if I'm honest.
-            ds = pf.h.periodic_region_strict([0.]*3, self.left_edge, 
-                self.right_edge)
+            ds = pf.h.region([0.]*3, self.left_edge, self.right_edge)
             padded, self.LE, self.RE, self.ds = \
             self.partition_hierarchy_3d(ds = ds, padding=0.,
                 rank_ratio = self.vol_ratio)

diff -r 3aba7ca1ff0bebcbbcd8d1e2a526dea084e0fbdb -r 4b3bf8683aa280ebf1e18b9bea06cb186815e95c yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -122,16 +122,6 @@
                 int(self.periodic), DLE, DRE)
         return (0, args)
 
-class ParticleIOHandlerRegionStrict(ParticleIOHandlerRegion):
-    _source_type = "region_strict"
-
-class ParticleIOHandlerPeriodicRegion(ParticleIOHandlerRegion):
-    periodic = True
-    _source_type = "periodic_region"
-
-class ParticleIOHandlerPeriodicRegionStrict(ParticleIOHandlerPeriodicRegion):
-    _source_type = "periodic_region_strict"
-
 class ParticleIOHandlerSphere(ParticleIOHandlerImplemented):
     _source_type = "sphere"
 

diff -r 3aba7ca1ff0bebcbbcd8d1e2a526dea084e0fbdb -r 4b3bf8683aa280ebf1e18b9bea06cb186815e95c yt/utilities/answer_testing/boolean_region_tests.py
--- a/yt/utilities/answer_testing/boolean_region_tests.py
+++ b/yt/utilities/answer_testing/boolean_region_tests.py
@@ -13,8 +13,8 @@
         four = 0.4 * domain + self.pf.domain_left_edge
         five = 0.5 * domain + self.pf.domain_left_edge
         six = 0.6 * domain + self.pf.domain_left_edge
-        re1 = self.pf.h.region_strict(five, four, six)
-        re2 = self.pf.h.region_strict(five, five, six)
+        re1 = self.pf.h.region(five, four, six)
+        re2 = self.pf.h.region(five, five, six)
         re = self.pf.h.boolean([re1, "AND", re2])
         # re should look like re2.
         x2 = re2['x']
@@ -36,8 +36,8 @@
         four = 0.4 * domain + self.pf.domain_left_edge
         five = 0.5 * domain + self.pf.domain_left_edge
         six = 0.6 * domain + self.pf.domain_left_edge
-        re1 = self.pf.h.region_strict(five, four, six)
-        re2 = self.pf.h.region_strict(five, five, six)
+        re1 = self.pf.h.region(five, four, six)
+        re2 = self.pf.h.region(five, five, six)
         re = self.pf.h.boolean([re1, "OR", re2])
         # re should look like re1
         x1 = re1['x']
@@ -59,15 +59,15 @@
         four = 0.4 * domain + self.pf.domain_left_edge
         five = 0.5 * domain + self.pf.domain_left_edge
         six = 0.6 * domain + self.pf.domain_left_edge
-        re1 = self.pf.h.region_strict(five, four, six)
-        re2 = self.pf.h.region_strict(five, five, six)
+        re1 = self.pf.h.region(five, four, six)
+        re2 = self.pf.h.region(five, five, six)
         # Bottom base
-        re3 = self.pf.h.region_strict(five, four, [six[0], six[1], five[2]])
+        re3 = self.pf.h.region(five, four, [six[0], six[1], five[2]])
         # Side
-        re4 = self.pf.h.region_strict(five, [four[0], four[1], five[2]],
+        re4 = self.pf.h.region(five, [four[0], four[1], five[2]],
             [five[0], six[1], six[2]])
         # Last small cube
-        re5 = self.pf.h.region_strict(five, [five[0], four[0], four[2]],
+        re5 = self.pf.h.region(five, [five[0], four[0], four[2]],
             [six[0], five[1], six[2]])
         # re1 NOT re2 should look like re3 OR re4 OR re5
         re = self.pf.h.boolean([re1, "NOT", re2])
@@ -92,8 +92,8 @@
         four = 0.4 * domain + self.pf.domain_left_edge
         five = 0.5 * domain + self.pf.domain_left_edge
         six = 0.6 * domain + self.pf.domain_left_edge
-        re1 = self.pf.h.region_strict(five, four, six)
-        re2 = self.pf.h.region_strict(five, five, six)
+        re1 = self.pf.h.region(five, four, six)
+        re2 = self.pf.h.region(five, five, six)
         re = self.pf.h.boolean([re1, "AND", re2])
         # re should look like re2.
         x2 = re2['particle_position_x']
@@ -115,8 +115,8 @@
         four = 0.4 * domain + self.pf.domain_left_edge
         five = 0.5 * domain + self.pf.domain_left_edge
         six = 0.6 * domain + self.pf.domain_left_edge
-        re1 = self.pf.h.region_strict(five, four, six)
-        re2 = self.pf.h.region_strict(five, five, six)
+        re1 = self.pf.h.region(five, four, six)
+        re2 = self.pf.h.region(five, five, six)
         re = self.pf.h.boolean([re1, "OR", re2])
         # re should look like re1
         x1 = re1['particle_position_x']
@@ -138,15 +138,15 @@
         four = 0.4 * domain + self.pf.domain_left_edge
         five = 0.5 * domain + self.pf.domain_left_edge
         six = 0.6 * domain + self.pf.domain_left_edge
-        re1 = self.pf.h.region_strict(five, four, six)
-        re2 = self.pf.h.region_strict(five, five, six)
+        re1 = self.pf.h.region(five, four, six)
+        re2 = self.pf.h.region(five, five, six)
         # Bottom base
-        re3 = self.pf.h.region_strict(five, four, [six[0], six[1], five[2]])
+        re3 = self.pf.h.region(five, four, [six[0], six[1], five[2]])
         # Side
-        re4 = self.pf.h.region_strict(five, [four[0], four[1], five[2]],
+        re4 = self.pf.h.region(five, [four[0], four[1], five[2]],
             [five[0], six[1], six[2]])
         # Last small cube
-        re5 = self.pf.h.region_strict(five, [five[0], four[0], four[2]],
+        re5 = self.pf.h.region(five, [five[0], four[0], four[2]],
             [six[0], five[1], six[2]])
         # re1 NOT re2 should look like re3 OR re4 OR re5
         re = self.pf.h.boolean([re1, "NOT", re2])

diff -r 3aba7ca1ff0bebcbbcd8d1e2a526dea084e0fbdb -r 4b3bf8683aa280ebf1e18b9bea06cb186815e95c yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -1058,7 +1058,7 @@
         RE[yax] = y[1] * (DRE[yax]-DLE[yax]) + DLE[yax]
         mylog.debug("Dimensions: %s %s", LE, RE)
 
-        reg = self.hierarchy.region_strict(self.center, LE, RE)
+        reg = self.hierarchy.region(self.center, LE, RE)
         return True, reg
 
     def partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
@@ -1074,8 +1074,7 @@
             return False, LE, RE, ds
         if not self._distributed and subvol:
             return True, LE, RE, \
-            self.hierarchy.periodic_region_strict(self.center,
-                LE-padding, RE+padding)
+            self.hierarchy.region(self.center, LE-padding, RE+padding)
         elif ytcfg.getboolean("yt", "inline"):
             # At this point, we want to identify the root grid tile to which
             # this processor is assigned.
@@ -1102,10 +1101,10 @@
 
         if padding > 0:
             return True, \
-                LE, RE, self.hierarchy.periodic_region_strict(self.center,
+                LE, RE, self.hierarchy.region(self.center,
                 LE-padding, RE+padding)
 
-        return False, LE, RE, self.hierarchy.region_strict(self.center, LE, RE)
+        return False, LE, RE, self.hierarchy.region(self.center, LE, RE)
 
     def partition_region_3d(self, left_edge, right_edge, padding=0.0,
             rank_ratio = 1):
@@ -1130,7 +1129,7 @@
 
         if padding > 0:
             return True, \
-                LE, RE, self.hierarchy.periodic_region(self.center, LE-padding,
+                LE, RE, self.hierarchy.region(self.center, LE-padding,
                     RE+padding)
 
         return False, LE, RE, self.hierarchy.region(self.center, LE, RE)

diff -r 3aba7ca1ff0bebcbbcd8d1e2a526dea084e0fbdb -r 4b3bf8683aa280ebf1e18b9bea06cb186815e95c yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -1134,8 +1134,7 @@
             and np.all(self.region.left_edge <= LE) \
             and np.all(self.region.right_edge >= RE):
             return self.region
-        self.region = data.pf.h.periodic_region(
-            data.center, LE, RE)
+        self.region = data.pf.h.region(data.center, LE, RE)
         return self.region
 
 class TitleCallback(PlotCallback):


https://bitbucket.org/yt_analysis/yt/commits/b31d6fa1d649/
Changeset:   b31d6fa1d649
Branch:      yt-3.0
User:        brittonsmith
Date:        2013-09-06 15:15:53
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #88)

Removing periodic_region, periodic_region_strict and region_strict.
Affected #:  7 files

diff -r 9e43b18b736d1b738249c93598041b15a7266914 -r b31d6fa1d6494f14d4b8f11308389c42620c0041 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -2216,11 +2216,11 @@
                 self.comm.mpi_bcast(self.bucket_bounds)
             my_bounds = self.bucket_bounds[self.comm.rank]
             LE, RE = my_bounds[0], my_bounds[1]
-            self._data_source = self.hierarchy.region_strict([0.] * 3, LE, RE)
+            self._data_source = self.hierarchy.region([0.] * 3, LE, RE)
         # If this isn't parallel, define the region as an AMRRegionStrict so
         # particle IO works.
         if self.comm.size == 1:
-            self._data_source = self.hierarchy.periodic_region_strict([0.5] * 3,
+            self._data_source = self.hierarchy.region([0.5] * 3,
                 LE, RE)
         # get the average spacing between particles for this region
         # The except is for the serial case where the full box is what we want.
@@ -2306,8 +2306,7 @@
                 np.zeros(3, dtype='float64'))
         # If we're using a subvolume, we now re-divide.
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
-                ds_RE)
+            self._data_source = pf.h.region([0.] * 3, ds_LE, ds_RE)
             # Cut up the volume.
             padded, LE, RE, self._data_source = \
                 self.partition_hierarchy_3d(ds=self._data_source,
@@ -2504,7 +2503,7 @@
         # object representing the entire domain and sum it "lazily" with
         # Derived Quantities.
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE, ds_RE)
+            self._data_source = pf.h.region([0.] * 3, ds_LE, ds_RE)
         else:
             self._data_source = pf.h.all_data()
         self.padding = padding  # * pf["unitary"] # This should be clevererer
@@ -2600,7 +2599,7 @@
             linking_length = np.abs(link)
         self.padding = padding
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
+            self._data_source = pf.h.region([0.] * 3, ds_LE,
                 ds_RE)
         else:
             self._data_source = pf.h.all_data()

diff -r 9e43b18b736d1b738249c93598041b15a7266914 -r b31d6fa1d6494f14d4b8f11308389c42620c0041 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -811,10 +811,10 @@
                     need_per = True
                     break
 
-            if need_per:
-                region = self.pf.h.periodic_region(halo['center'], leftEdge, rightEdge)
-            else:
-                region = self.pf.h.region(halo['center'], leftEdge, rightEdge)
+            # We use the same type of region regardless.  The selection will be
+            # correct, but we need the need_per variable for projection
+            # shifting.
+            region = self.pf.h.region(halo['center'], leftEdge, rightEdge)
 
             # Make projections.
             if not isinstance(axes, types.ListType): axes = list([axes])

diff -r 9e43b18b736d1b738249c93598041b15a7266914 -r b31d6fa1d6494f14d4b8f11308389c42620c0041 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -159,8 +159,7 @@
             # This ds business below has to do with changes made for halo
             # finding on subvolumes and serves no purpose here except
             # compatibility. This is not the best policy, if I'm honest.
-            ds = pf.h.periodic_region_strict([0.]*3, self.left_edge, 
-                self.right_edge)
+            ds = pf.h.region([0.]*3, self.left_edge, self.right_edge)
             padded, self.LE, self.RE, self.ds = \
             self.partition_hierarchy_3d(ds = ds, padding=0.,
                 rank_ratio = self.vol_ratio)

diff -r 9e43b18b736d1b738249c93598041b15a7266914 -r b31d6fa1d6494f14d4b8f11308389c42620c0041 yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -122,16 +122,6 @@
                 int(self.periodic), DLE, DRE)
         return (0, args)
 
-class ParticleIOHandlerRegionStrict(ParticleIOHandlerRegion):
-    _source_type = "region_strict"
-
-class ParticleIOHandlerPeriodicRegion(ParticleIOHandlerRegion):
-    periodic = True
-    _source_type = "periodic_region"
-
-class ParticleIOHandlerPeriodicRegionStrict(ParticleIOHandlerPeriodicRegion):
-    _source_type = "periodic_region_strict"
-
 class ParticleIOHandlerSphere(ParticleIOHandlerImplemented):
     _source_type = "sphere"
 

diff -r 9e43b18b736d1b738249c93598041b15a7266914 -r b31d6fa1d6494f14d4b8f11308389c42620c0041 yt/utilities/answer_testing/boolean_region_tests.py
--- a/yt/utilities/answer_testing/boolean_region_tests.py
+++ b/yt/utilities/answer_testing/boolean_region_tests.py
@@ -13,8 +13,8 @@
         four = 0.4 * domain + self.pf.domain_left_edge
         five = 0.5 * domain + self.pf.domain_left_edge
         six = 0.6 * domain + self.pf.domain_left_edge
-        re1 = self.pf.h.region_strict(five, four, six)
-        re2 = self.pf.h.region_strict(five, five, six)
+        re1 = self.pf.h.region(five, four, six)
+        re2 = self.pf.h.region(five, five, six)
         re = self.pf.h.boolean([re1, "AND", re2])
         # re should look like re2.
         x2 = re2['x']
@@ -36,8 +36,8 @@
         four = 0.4 * domain + self.pf.domain_left_edge
         five = 0.5 * domain + self.pf.domain_left_edge
         six = 0.6 * domain + self.pf.domain_left_edge
-        re1 = self.pf.h.region_strict(five, four, six)
-        re2 = self.pf.h.region_strict(five, five, six)
+        re1 = self.pf.h.region(five, four, six)
+        re2 = self.pf.h.region(five, five, six)
         re = self.pf.h.boolean([re1, "OR", re2])
         # re should look like re1
         x1 = re1['x']
@@ -59,15 +59,15 @@
         four = 0.4 * domain + self.pf.domain_left_edge
         five = 0.5 * domain + self.pf.domain_left_edge
         six = 0.6 * domain + self.pf.domain_left_edge
-        re1 = self.pf.h.region_strict(five, four, six)
-        re2 = self.pf.h.region_strict(five, five, six)
+        re1 = self.pf.h.region(five, four, six)
+        re2 = self.pf.h.region(five, five, six)
         # Bottom base
-        re3 = self.pf.h.region_strict(five, four, [six[0], six[1], five[2]])
+        re3 = self.pf.h.region(five, four, [six[0], six[1], five[2]])
         # Side
-        re4 = self.pf.h.region_strict(five, [four[0], four[1], five[2]],
+        re4 = self.pf.h.region(five, [four[0], four[1], five[2]],
             [five[0], six[1], six[2]])
         # Last small cube
-        re5 = self.pf.h.region_strict(five, [five[0], four[0], four[2]],
+        re5 = self.pf.h.region(five, [five[0], four[0], four[2]],
             [six[0], five[1], six[2]])
         # re1 NOT re2 should look like re3 OR re4 OR re5
         re = self.pf.h.boolean([re1, "NOT", re2])
@@ -92,8 +92,8 @@
         four = 0.4 * domain + self.pf.domain_left_edge
         five = 0.5 * domain + self.pf.domain_left_edge
         six = 0.6 * domain + self.pf.domain_left_edge
-        re1 = self.pf.h.region_strict(five, four, six)
-        re2 = self.pf.h.region_strict(five, five, six)
+        re1 = self.pf.h.region(five, four, six)
+        re2 = self.pf.h.region(five, five, six)
         re = self.pf.h.boolean([re1, "AND", re2])
         # re should look like re2.
         x2 = re2['particle_position_x']
@@ -115,8 +115,8 @@
         four = 0.4 * domain + self.pf.domain_left_edge
         five = 0.5 * domain + self.pf.domain_left_edge
         six = 0.6 * domain + self.pf.domain_left_edge
-        re1 = self.pf.h.region_strict(five, four, six)
-        re2 = self.pf.h.region_strict(five, five, six)
+        re1 = self.pf.h.region(five, four, six)
+        re2 = self.pf.h.region(five, five, six)
         re = self.pf.h.boolean([re1, "OR", re2])
         # re should look like re1
         x1 = re1['particle_position_x']
@@ -138,15 +138,15 @@
         four = 0.4 * domain + self.pf.domain_left_edge
         five = 0.5 * domain + self.pf.domain_left_edge
         six = 0.6 * domain + self.pf.domain_left_edge
-        re1 = self.pf.h.region_strict(five, four, six)
-        re2 = self.pf.h.region_strict(five, five, six)
+        re1 = self.pf.h.region(five, four, six)
+        re2 = self.pf.h.region(five, five, six)
         # Bottom base
-        re3 = self.pf.h.region_strict(five, four, [six[0], six[1], five[2]])
+        re3 = self.pf.h.region(five, four, [six[0], six[1], five[2]])
         # Side
-        re4 = self.pf.h.region_strict(five, [four[0], four[1], five[2]],
+        re4 = self.pf.h.region(five, [four[0], four[1], five[2]],
             [five[0], six[1], six[2]])
         # Last small cube
-        re5 = self.pf.h.region_strict(five, [five[0], four[0], four[2]],
+        re5 = self.pf.h.region(five, [five[0], four[0], four[2]],
             [six[0], five[1], six[2]])
         # re1 NOT re2 should look like re3 OR re4 OR re5
         re = self.pf.h.boolean([re1, "NOT", re2])

diff -r 9e43b18b736d1b738249c93598041b15a7266914 -r b31d6fa1d6494f14d4b8f11308389c42620c0041 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -1058,7 +1058,7 @@
         RE[yax] = y[1] * (DRE[yax]-DLE[yax]) + DLE[yax]
         mylog.debug("Dimensions: %s %s", LE, RE)
 
-        reg = self.hierarchy.region_strict(self.center, LE, RE)
+        reg = self.hierarchy.region(self.center, LE, RE)
         return True, reg
 
     def partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
@@ -1074,8 +1074,7 @@
             return False, LE, RE, ds
         if not self._distributed and subvol:
             return True, LE, RE, \
-            self.hierarchy.periodic_region_strict(self.center,
-                LE-padding, RE+padding)
+            self.hierarchy.region(self.center, LE-padding, RE+padding)
         elif ytcfg.getboolean("yt", "inline"):
             # At this point, we want to identify the root grid tile to which
             # this processor is assigned.
@@ -1102,10 +1101,10 @@
 
         if padding > 0:
             return True, \
-                LE, RE, self.hierarchy.periodic_region_strict(self.center,
+                LE, RE, self.hierarchy.region(self.center,
                 LE-padding, RE+padding)
 
-        return False, LE, RE, self.hierarchy.region_strict(self.center, LE, RE)
+        return False, LE, RE, self.hierarchy.region(self.center, LE, RE)
 
     def partition_region_3d(self, left_edge, right_edge, padding=0.0,
             rank_ratio = 1):
@@ -1130,7 +1129,7 @@
 
         if padding > 0:
             return True, \
-                LE, RE, self.hierarchy.periodic_region(self.center, LE-padding,
+                LE, RE, self.hierarchy.region(self.center, LE-padding,
                     RE+padding)
 
         return False, LE, RE, self.hierarchy.region(self.center, LE, RE)

diff -r 9e43b18b736d1b738249c93598041b15a7266914 -r b31d6fa1d6494f14d4b8f11308389c42620c0041 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -1134,8 +1134,7 @@
             and np.all(self.region.left_edge <= LE) \
             and np.all(self.region.right_edge >= RE):
             return self.region
-        self.region = data.pf.h.periodic_region(
-            data.center, LE, RE)
+        self.region = data.pf.h.region(data.center, LE, RE)
         return self.region
 
 class TitleCallback(PlotCallback):


https://bitbucket.org/yt_analysis/yt/commits/0a57d1a9fb4c/
Changeset:   0a57d1a9fb4c
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-09-06 17:48:46
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #86)

Oct cell count generalization and initial particle smoothing operations
Affected #:  18 files

diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -36,12 +36,12 @@
     NeedsProperty, \
     NeedsParameter
 import yt.geometry.particle_deposit as particle_deposit
+import yt.geometry.particle_smooth as particle_smooth
 from yt.funcs import *
 
 class OctreeSubset(YTSelectionContainer):
     _spatial = True
     _num_ghost_zones = 0
-    _num_zones = 2
     _type_name = 'octree_subset'
     _skip_add = True
     _con_args = ('base_region', 'domain', 'pf')
@@ -49,7 +49,8 @@
     _domain_offset = 0
     _num_octs = -1
 
-    def __init__(self, base_region, domain, pf):
+    def __init__(self, base_region, domain, pf, over_refine_factor = 1):
+        self._num_zones = 1 << (over_refine_factor)
         self.field_data = YTFieldData()
         self.field_parameters = {}
         self.domain = domain
@@ -145,6 +146,28 @@
         if vals is None: return
         return np.asfortranarray(vals)
 
+    def smooth(self, positions, fields = None, method = None):
+        # Here we perform our particle deposition.
+        cls = getattr(particle_smooth, "%s_smooth" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        nz = self.nz
+        nvals = (nz, nz, nz, (self.domain_ind >= 0).sum())
+        if fields is None: fields = []
+        op = cls(nvals, len(fields), 64)
+        op.initialize()
+        mylog.debug("Smoothing %s particles into %s Octs",
+            positions.shape[0], nvals[-1])
+        op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
+            self.domain_id, self._domain_offset, self.pf.periodicity)
+        vals = op.finalize()
+        if vals is None: return
+        if isinstance(vals, list):
+            vals = [np.asfortranarray(v) for v in vals]
+        else:
+            vals = np.asfortranarray(vals)
+        return vals
+
     def select_icoords(self, dobj):
         d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,
                                      num_octs = self._num_octs)
@@ -206,8 +229,10 @@
     _type_name = 'indexed_octree_subset'
     _con_args = ('data_files', 'pf', 'min_ind', 'max_ind')
     domain_id = -1
-    def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0):
+    def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0,
+                 over_refine_factor = 1):
         # The first attempt at this will not work in parallel.
+        self._num_zones = 1 << (over_refine_factor)
         self.data_files = data_files
         self.field_data = YTFieldData()
         self.field_parameters = {}

diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -55,6 +55,7 @@
     domain_id = 2
     _con_args = ("base_region", "sfc_start", "sfc_end", "pf")
     _type_name = 'octree_subset'
+    _num_zones = 2
 
     def __init__(self, base_region, sfc_start, sfc_end, pf):
         self.field_data = YTFieldData()

diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -96,6 +96,7 @@
 
 class ParticleStaticOutput(StaticOutput):
     _unit_base = None
+    over_refine_factor = 1
 
     def _set_units(self):
         self.units = {}
@@ -154,8 +155,10 @@
 
     def __init__(self, filename, data_style="gadget_binary",
                  additional_fields = (),
-                 unit_base = None, n_ref = 64):
+                 unit_base = None, n_ref = 64,
+                 over_refine_factor = 1):
         self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
         self.storage_filename = None
         if unit_base is not None and "UnitLength_in_cm" in unit_base:
             # We assume this is comoving, because in the absence of comoving
@@ -188,7 +191,8 @@
 
         self.domain_left_edge = np.zeros(3, "float64")
         self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
-        self.domain_dimensions = np.ones(3, "int32") * 2
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
         self.periodicity = (True, True, True)
 
         self.cosmological_simulation = 1
@@ -268,11 +272,13 @@
     _particle_coordinates_name = "Coordinates"
     _header_spec = None # Override so that there's no confusion
 
-    def __init__(self, filename, data_style="OWLS", n_ref = 64):
+    def __init__(self, filename, data_style="OWLS", n_ref = 64,
+                 over_refine_factor = 1):
         self.storage_filename = None
-        super(OWLSStaticOutput, self).__init__(filename, data_style,
-                                               unit_base = None,
-                                               n_ref = n_ref)
+        super(OWLSStaticOutput, self).__init__(
+                               filename, data_style,
+                               unit_base = None, n_ref = n_ref,
+                               over_refine_factor = over_refine_factor)
 
     def __repr__(self):
         return os.path.basename(self.parameter_filename).split(".")[0]
@@ -292,7 +298,8 @@
         self.current_time = hvals["Time_GYR"] * sec_conversion["Gyr"]
         self.domain_left_edge = np.zeros(3, "float64")
         self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
-        self.domain_dimensions = np.ones(3, "int32") * 2
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
         self.cosmological_simulation = 1
         self.periodicity = (True, True, True)
         self.current_redshift = hvals["Redshift"]
@@ -364,8 +371,9 @@
                  unit_base = None,
                  cosmology_parameters = None,
                  parameter_file = None,
-                 n_ref = 64):
+                 n_ref = 64, over_refine_factor = 1):
         self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
         self.endian = endian
         self.storage_filename = None
         if domain_left_edge is None:
@@ -438,7 +446,8 @@
                 self.parameters[param] = val
 
         self.current_time = hvals["time"]
-        self.domain_dimensions = np.ones(3, "int32") * 2
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
         if self.parameters.get('bPeriodic', True):
             self.periodicity = (True, True, True)
         else:

diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -738,10 +738,11 @@
     file_count = 1
     filename_template = "stream_file"
     n_ref = 64
+    over_refine_factor = 1
 
 def load_particles(data, sim_unit_to_cm, bbox=None,
                       sim_time=0.0, periodicity=(True, True, True),
-                      n_ref = 64):
+                      n_ref = 64, over_refine_factor = 1):
     r"""Load a set of particles into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
 
@@ -828,6 +829,7 @@
 
     spf = StreamParticlesStaticOutput(handler)
     spf.n_ref = n_ref
+    spf.over_refine_factor = over_refine_factor
     spf.units["cm"] = sim_unit_to_cm
     spf.units['1'] = 1.0
     spf.units["unitary"] = 1.0

diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -40,6 +40,8 @@
 cdef struct OctInfo:
     np.float64_t left_edge[3]
     np.float64_t dds[3]
+    np.int64_t ipos[3]
+    np.int32_t level
 
 cdef struct OctAllocationContainer
 cdef struct OctAllocationContainer:
@@ -49,6 +51,16 @@
     OctAllocationContainer *next
     Oct *my_octs
 
+cdef struct OctList
+
+cdef struct OctList:
+    OctList *next
+    Oct *o
+
+cdef OctList *OctList_append(OctList *list, Oct *o)
+cdef int OctList_count(OctList *list)
+cdef void OctList_delete(OctList *list)
+
 cdef class OctreeContainer:
     cdef OctAllocationContainer *cont
     cdef OctAllocationContainer **domains
@@ -56,12 +68,13 @@
     cdef oct_visitor_function *fill_func
     cdef int partial_coverage
     cdef int nn[3]
+    cdef np.uint8_t oref
     cdef np.float64_t DLE[3], DRE[3]
     cdef public np.int64_t nocts
     cdef public int max_domain
     cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
     cdef int get_root(self, int ind[3], Oct **o)
-    cdef void neighbors(self, Oct *, Oct **)
+    cdef Oct **neighbors(self, OctInfo *oinfo, np.int64_t *nneighbors)
     cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
     # This function must return the offset from global-to-local domains; i.e.,
     # OctAllocationContainer.offset if such a thing exists.
@@ -71,6 +84,7 @@
                         OctVisitorData *data)
     cdef Oct *next_root(self, int domain_id, int ind[3])
     cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
+    cdef void setup_data(self, OctVisitorData *data, int domain_id = ?)
 
 cdef class SparseOctreeContainer(OctreeContainer):
     cdef OctKey *root_nodes

diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -96,8 +96,10 @@
 cdef class OctreeContainer:
 
     def __init__(self, oct_domain_dimensions, domain_left_edge,
-                 domain_right_edge, partial_coverage = 0):
+                 domain_right_edge, partial_coverage = 0,
+                 over_refine = 1):
         # This will just initialize the root mesh octs
+        self.oref = over_refine
         self.partial_coverage = partial_coverage
         cdef int i, j, k, p
         for i in range(3):
@@ -120,6 +122,21 @@
                 for k in range(self.nn[2]):
                     self.root_mesh[i][j][k] = NULL
 
+    cdef void setup_data(self, OctVisitorData *data, int domain_id = -1):
+        cdef int i
+        data.index = 0
+        data.last = -1
+        data.global_index = -1
+        for i in range(3):
+            data.pos[i] = -1
+            data.ind[i] = -1
+        data.array = NULL
+        data.dims = 0
+        data.domain = domain_id
+        data.level = -1
+        data.oref = self.oref
+        data.nz = (1 << (data.oref*3))
+
     def __dealloc__(self):
         free_octs(self.cont)
         if self.root_mesh == NULL: return
@@ -185,27 +202,39 @@
         return 0
 
     cdef int get_root(self, int ind[3], Oct **o):
+        cdef int i
+        for i in range(3):
+            if ind[i] < 0 or ind[i] >= self.nn[i]:
+                o[0] = NULL
+                return 1
         o[0] = self.root_mesh[ind[0]][ind[1]][ind[2]]
-        return 1
+        return 0
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL):
+    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL,
+                  ):
         #Given a floating point position, retrieve the most
         #refined oct at that time
-        cdef int ind[3]
+        cdef int ind[3], level
+        cdef np.int64_t ipos[3]
         cdef np.float64_t dds[3], cp[3], pp[3]
         cdef Oct *cur, *next
+        cdef int i
         cur = next = NULL
-        cdef int i
+        level = -1
         for i in range(3):
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
             ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
             cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
+            ipos[i] = 0
         self.get_root(ind, &next)
         # We want to stop recursing when there's nowhere else to go
         while next != NULL:
+            level += 1
+            for i in range(3):
+                ipos[i] = (ipos[i] << 1) + ind[i]
             cur = next
             for i in range(3):
                 dds[i] = dds[i] / 2.0
@@ -227,18 +256,22 @@
                 cp[i] -= dds[i]/2.0 # Now centered
             else:
                 cp[i] += dds[i]/2.0
-            # We don't need to change dds[i] as it has been halved from the
-            # oct width, thus making it already the cell width
-            oinfo.dds[i] = dds[i] # Cell width
+            # We don't normally need to change dds[i] as it has been halved
+            # from the oct width, thus making it already the cell width.
+            # But, for some cases where the oref != 1, this needs to be
+            # changed.
+            oinfo.dds[i] = dds[i] / (1 << (self.oref-1)) # Cell width
             oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
+            oinfo.ipos[i] = ipos[i]
+        oinfo.level = level
         return cur
 
     def domain_identify(self, SelectorObject selector):
         cdef np.ndarray[np.uint8_t, ndim=1] domain_mask
         domain_mask = np.zeros(self.max_domain, dtype="uint8")
         cdef OctVisitorData data
+        self.setup_data(&data)
         data.array = domain_mask.data
-        data.domain = -1
         self.visit_all_octs(selector, oct_visitors.identify_octs, &data)
         cdef int i
         domain_ids = []
@@ -250,99 +283,69 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef void neighbors(self, Oct* o, Oct* neighbors[27]):
-        #Get 3x3x3 neighbors, although the 1,1,1 oct is the
-        #central one. 
-        #Return an array of Octs
-        cdef np.int64_t curopos[3]
-        cdef np.int64_t curnpos[3]
-        cdef np.int64_t npos[3]
-        cdef int i, j, k, ni, nj, nk, ind[3], nn, dl, skip
-        cdef np.float64_t dds[3], cp[3], pp[3]
+    cdef Oct** neighbors(self, OctInfo *oi, np.int64_t *nneighbors):
         cdef Oct* candidate
-        for i in range(27): neighbors[i] = NULL
         nn = 0
-        raise RuntimeError
-        #for ni in range(3):
-        #    for nj in range(3):
-        #        for nk in range(3):
-        #            if ni == nj == nk == 1:
-        #                neighbors[nn] = o
-        #                nn += 1
-        #                continue
-        #            npos[0] = o.pos[0] + (ni - 1)
-        #            npos[1] = o.pos[1] + (nj - 1)
-        #            npos[2] = o.pos[2] + (nk - 1)
-        #            for i in range(3):
-        #                # Periodicity
-        #                if npos[i] == -1:
-        #                    npos[i] = (self.nn[i]  << o.level) - 1
-        #                elif npos[i] == (self.nn[i] << o.level):
-        #                    npos[i] = 0
-        #                curopos[i] = o.pos[i]
-        #                curnpos[i] = npos[i] 
-        #            # Now we have our neighbor position and a safe place to
-        #            # keep it.  curnpos will be the root index of the neighbor
-        #            # at a given level, and npos will be constant.  curopos is
-        #            # the candidate root at a level.
-        #            candidate = o
-        #            while candidate != NULL:
-        #                if ((curopos[0] == curnpos[0]) and 
-        #                    (curopos[1] == curnpos[1]) and
-        #                    (curopos[2] == curnpos[2])):
-        #                    break
-        #                # This one doesn't meet it, so we pop up a level.
-        #                # First we update our positions, then we update our
-        #                # candidate.
-        #                for i in range(3):
-        #                    # We strip a digit off the right
-        #                    curopos[i] = (curopos[i] >> 1)
-        #                    curnpos[i] = (curnpos[i] >> 1)
-        #                # Now we update to the candidate's parent, which should
-        #                # have a matching position to curopos[]
-        #                # TODO: This has not survived the transition to
-        #                # mostly-stateless Octs!
-        #                raise RuntimeError
-        #                candidate = candidate.parent
-        #            if candidate == NULL:
-        #                # Worst case scenario
-        #                for i in range(3):
-        #                    ind[i] = (npos[i] >> (o.level))
-        #                candidate = self.root_mesh[ind[0]][ind[1]][ind[2]]
-        #            # Now we have the common root, which may be NULL
-        #            while candidate.level < o.level:
-        #                dl = o.level - (candidate.level + 1)
-        #                for i in range(3):
-        #                    ind[i] = (npos[i] >> dl) & 1
-        #                if candidate.children[cind(ind[0],ind[1],ind[2])] \
-        #                        == NULL:
-        #                    break
-        #                candidate = candidate.children[cind(ind[0],ind[1],ind[2])]
-        #            neighbors[nn] = candidate
-        #            nn += 1
+        # We are going to do a brute-force search here.
+        # This is not the most efficient -- in fact, it's relatively bad.  But
+        # we will attempt to improve it in a future iteration, where we will
+        # grow a stack of parent Octs.
+        # Note that in the first iteration, we will just find the up-to-27
+        # neighbors, including the main oct.
+        cdef int i, j, k, n, level, ind[3], ii, nfound = 0
+        cdef OctList *olist, *my_list
+        my_list = olist = NULL
+        cdef Oct *cand
+        cdef np.int64_t npos[3], ndim[3]
+        # Now we get our boundaries for this level, so that we can wrap around
+        # if need be.
+        # ndim is the oct dimensions of the level, not the cell dimensions.
+        for i in range(3):
+            ndim[i] = <np.int64_t> ((self.DRE[i] - self.DLE[i]) / oi.dds[i])
+            ndim[i] = (ndim[i] >> self.oref)
+        for i in range(3):
+            npos[0] = (oi.ipos[0] + (1 - i))
+            if npos[0] < 0: npos[0] += ndim[0]
+            if npos[0] >= ndim[0]: npos[0] -= ndim[0]
+            for j in range(3):
+                npos[1] = (oi.ipos[1] + (1 - j))
+                if npos[1] < 0: npos[1] += ndim[1]
+                if npos[1] >= ndim[1]: npos[1] -= ndim[1]
+                for k in range(3):
+                    npos[2] = (oi.ipos[2] + (1 - k))
+                    if npos[2] < 0: npos[2] += ndim[2]
+                    if npos[2] >= ndim[2]: npos[2] -= ndim[2]
+                    # Now we have our npos, which we just need to find.
+                    # Level 0 gets bootstrapped
+                    for n in range(3):
+                        ind[n] = ((npos[n] >> (oi.level)) & 1)
+                    cand = NULL
+                    self.get_root(ind, &cand)
+                    # We should not get a NULL if we handle periodicity
+                    # correctly, but we might.
+                    if cand == NULL: continue
+                    for level in range(1, oi.level+1):
+                        if cand.children == NULL: break
+                        for n in range(3):
+                            ind[n] = (npos[n] >> (oi.level - (level))) & 1
+                        ii = cind(ind[0],ind[1],ind[2])
+                        if cand.children[ii] == NULL: break
+                        cand = cand.children[ii]
+                    if cand != NULL:
+                        nfound += 1
+                        olist = OctList_append(olist, cand)
+                        if my_list == NULL: my_list = olist
 
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def get_neighbor_boundaries(self, oppos):
-        cdef int i, ii
-        cdef np.float64_t ppos[3]
-        for i in range(3):
-            ppos[i] = oppos[i]
-        cdef Oct *main = self.get(ppos)
-        cdef Oct* neighbors[27]
-        self.neighbors(main, neighbors)
-        cdef np.ndarray[np.float64_t, ndim=2] bounds
-        cdef np.float64_t corner[3], size[3]
-        bounds = np.zeros((27,6), dtype="float64")
-        tnp = 0
-        raise RuntimeError
-        for i in range(27):
-            self.oct_bounds(neighbors[i], corner, size)
-            for ii in range(3):
-                bounds[i, ii] = corner[ii]
-                bounds[i, 3+ii] = size[ii]
-        return bounds
+        olist = my_list
+        cdef int noct = OctList_count(olist)
+        cdef Oct **neighbors
+        neighbors = <Oct **> malloc(sizeof(Oct*)*noct)
+        for i in range(noct):
+            neighbors[i] = olist.o
+            olist = olist.next
+        OctList_delete(my_list)
+        nneighbors[0] = noct
+        return neighbors
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -352,11 +355,10 @@
         if num_octs == -1:
             num_octs = selector.count_octs(self, domain_id)
         cdef np.ndarray[np.uint8_t, ndim=1] coords
-        coords = np.zeros((num_octs * 8), dtype="uint8")
         cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
+        coords = np.zeros((num_octs * data.nz), dtype="uint8")
         data.array = <void *> coords.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.mask_octs, &data)
         return coords.astype("bool")
 
@@ -367,12 +369,11 @@
                 int domain_id = -1):
         if num_octs == -1:
             num_octs = selector.count_octs(self, domain_id)
+        cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         cdef np.ndarray[np.int64_t, ndim=2] coords
-        coords = np.empty((num_octs * 8, 3), dtype="int64")
-        cdef OctVisitorData data
+        coords = np.empty((num_octs * data.nz, 3), dtype="int64")
         data.array = <void *> coords.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.icoords_octs, &data)
         return coords
 
@@ -383,13 +384,12 @@
                 int domain_id = -1):
         if num_octs == -1:
             num_octs = selector.count_octs(self, domain_id)
+        cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         #Return the 'resolution' of each cell; ie the level
         cdef np.ndarray[np.int64_t, ndim=1] res
-        res = np.empty(num_octs * 8, dtype="int64")
-        cdef OctVisitorData data
+        res = np.empty(num_octs * data.nz, dtype="int64")
         data.array = <void *> res.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.ires_octs, &data)
         return res
 
@@ -400,12 +400,11 @@
                 int domain_id = -1):
         if num_octs == -1:
             num_octs = selector.count_octs(self, domain_id)
+        cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         cdef np.ndarray[np.float64_t, ndim=2] fwidth
-        fwidth = np.empty((num_octs * 8, 3), dtype="float64")
-        cdef OctVisitorData data
+        fwidth = np.empty((num_octs * data.nz, 3), dtype="float64")
         data.array = <void *> fwidth.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.fwidth_octs, &data)
         cdef np.float64_t base_dx
         for i in range(3):
@@ -420,13 +419,12 @@
                 int domain_id = -1):
         if num_octs == -1:
             num_octs = selector.count_octs(self, domain_id)
+        cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         #Return the floating point unitary position of every cell
         cdef np.ndarray[np.float64_t, ndim=2] coords
-        coords = np.empty((num_octs * 8, 3), dtype="float64")
-        cdef OctVisitorData data
+        coords = np.empty((num_octs * data.nz, 3), dtype="float64")
         data.array = <void *> coords.data
-        data.index = 0
-        data.domain = domain_id
         self.visit_all_octs(selector, oct_visitors.fcoords_octs, &data)
         cdef int i
         cdef np.float64_t base_dx
@@ -456,8 +454,8 @@
             else:
                 dest = np.zeros(num_cells, dtype=source.dtype, order='C')
         cdef OctVisitorData data
+        self.setup_data(&data, domain_id)
         data.index = offset
-        data.domain = domain_id
         # We only need this so we can continue calculating the offset
         data.dims = dims
         cdef void *p[2]
@@ -474,14 +472,16 @@
         else:
             raise NotImplementedError
         self.visit_all_octs(selector, func, &data)
-        if (data.global_index + 1) * 8 * data.dims > source.size:
+        if (data.global_index + 1) * data.nz * data.dims > source.size:
             print "GLOBAL INDEX RAN AHEAD.",
-            print (data.global_index + 1) * 8 * data.dims - source.size
+            print (data.global_index + 1) * data.nz * data.dims - source.size
             print dest.size, source.size, num_cells
             raise RuntimeError
         if data.index > dest.size:
             print "DEST INDEX RAN AHEAD.",
             print data.index - dest.size
+            print (data.global_index + 1) * data.nz * data.dims, source.size
+            print num_cells
             raise RuntimeError
         if num_cells >= 0:
             return dest
@@ -492,10 +492,8 @@
         # Here's where we grab the masked items.
         ind = np.zeros(self.nocts, 'int64') - 1
         cdef OctVisitorData data
-        data.domain = domain_id
+        self.setup_data(&data, domain_id)
         data.array = ind.data
-        data.index = 0
-        data.last = -1
         self.visit_all_octs(selector, oct_visitors.index_octs, &data)
         return ind
 
@@ -578,6 +576,7 @@
         if parent.children != NULL:
             next = parent.children[cind(ind[0],ind[1],ind[2])]
         else:
+            # This *8 does NOT need to be made generic.
             parent.children = <Oct **> malloc(sizeof(Oct *) * 8)
             for i in range(8):
                 parent.children[i] = NULL
@@ -607,13 +606,12 @@
             file_inds[i] = -1
             cell_inds[i] = 9
         cdef OctVisitorData data
-        data.index = 0
+        self.setup_data(&data, domain_id)
         cdef void *p[3]
         p[0] = levels.data
         p[1] = file_inds.data
         p[2] = cell_inds.data
         data.array = p
-        data.domain = domain_id
         self.visit_all_octs(selector, self.fill_func, &data)
         return levels, cell_inds, file_inds
 
@@ -641,10 +639,9 @@
     def finalize(self):
         cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
         cdef OctVisitorData data
-        data.index = 0
-        data.domain = 1
+        self.setup_data(&data, 1)
         self.visit_all_octs(selector, oct_visitors.assign_domain_ind, &data)
-        assert ((data.global_index+1)*8 == data.index)
+        assert ((data.global_index+1)*data.nz == data.index)
 
 cdef int root_node_compare(void *a, void *b) nogil:
     cdef OctKey *ao, *bo
@@ -659,9 +656,11 @@
 
 cdef class SparseOctreeContainer(OctreeContainer):
 
-    def __init__(self, domain_dimensions, domain_left_edge, domain_right_edge):
+    def __init__(self, domain_dimensions, domain_left_edge, domain_right_edge,
+                 over_refine = 1):
         cdef int i, j, k, p
         self.partial_coverage = 1
+        self.oref = over_refine
         for i in range(3):
             self.nn[i] = domain_dimensions[i]
         self.max_domain = -1
@@ -807,3 +806,33 @@
                             dest[local_filled + offset] = source[ox,oy,oz]
                             local_filled += 1
         return local_filled
+
+cdef OctList *OctList_append(OctList *olist, Oct *o):
+    cdef OctList *this = olist
+    if this == NULL:
+        this = <OctList *> malloc(sizeof(OctList))
+        this.next = NULL
+        this.o = o
+        return this
+    while this.next != NULL:
+        this = this.next
+    this.next = <OctList*> malloc(sizeof(OctList))
+    this = this.next
+    this.o = o
+    this.next = NULL
+    return this
+
+cdef int OctList_count(OctList *olist):
+    cdef OctList *this = olist
+    cdef int i = 0 # Count the list
+    while this != NULL:
+        i += 1
+        this = this.next
+    return i
+
+cdef void OctList_delete(OctList *olist):
+    cdef OctList *next, *this = olist
+    while this != NULL:
+        next = this.next
+        free(this)
+        this = next

diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -3,7 +3,7 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: Columbia University
-Homepage: http://yt.enzotools.org/
+Homepage: http://yt-project.org/
 License:
   Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
 
@@ -43,6 +43,10 @@
     int dims
     np.int32_t domain
     np.int8_t level
+    np.int8_t oref # This is the level of overref.  1 => 8 zones, 2 => 64, etc.
+                   # To calculate nzones, 1 << (oref * 3)
+    np.int32_t nz
+                            
 
 ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
                                    np.uint8_t selected)
@@ -64,10 +68,13 @@
 cdef oct_visitor_function fill_file_indices_rind
 
 cdef inline int cind(int i, int j, int k):
+    # THIS ONLY WORKS FOR CHILDREN.  It is not general for zones.
     return (((i*2)+j)*2+k)
 
 cdef inline int oind(OctVisitorData *data):
-    return (((data.ind[0]*2)+data.ind[1])*2+data.ind[2])
+    cdef int d = (1 << data.oref)
+    return (((data.ind[0]*d)+data.ind[1])*d+data.ind[2])
 
 cdef inline int rind(OctVisitorData *data):
-    return (((data.ind[2]*2)+data.ind[1])*2+data.ind[0])
+    cdef int d = (1 << data.oref)
+    return (((data.ind[2]*d)+data.ind[1])*d+data.ind[0])

diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -38,7 +38,7 @@
     if selected == 0: return
     cdef int i
     # There are this many records between "octs"
-    cdef np.int64_t index = (data.global_index * 8)*data.dims
+    cdef np.int64_t index = (data.global_index * data.nz)*data.dims
     cdef np.float64_t **p = <np.float64_t**> data.array
     index += oind(data)*data.dims
     for i in range(data.dims):
@@ -50,7 +50,7 @@
     # "last" here tells us the dimensionality of the array.
     if selected == 0: return
     cdef int i
-    cdef np.int64_t index = (data.global_index * 8)*data.dims
+    cdef np.int64_t index = (data.global_index * data.nz)*data.dims
     cdef np.int64_t **p = <np.int64_t**> data.array
     index += oind(data)*data.dims
     for i in range(data.dims):
@@ -75,7 +75,7 @@
     if data.last != o.domain_ind:
         data.last = o.domain_ind
         data.index += 1
-    cdef np.int64_t index = data.index * 8
+    cdef np.int64_t index = data.index * data.nz
     index += oind(data)
     arr[index] = 1
 
@@ -83,7 +83,7 @@
     if selected == 0: return
     cdef int i
     cdef np.uint8_t *arr = <np.uint8_t *> data.array
-    cdef np.int64_t index = data.global_index * 8
+    cdef np.int64_t index = data.global_index * data.nz
     index += oind(data)
     arr[index] = 1
 
@@ -102,7 +102,7 @@
     cdef np.int64_t *coords = <np.int64_t*> data.array
     cdef int i
     for i in range(3):
-        coords[data.index * 3 + i] = (data.pos[i] << 1) + data.ind[i]
+        coords[data.index * 3 + i] = (data.pos[i] << data.oref) + data.ind[i]
     data.index += 1
 
 cdef void ires_octs(Oct *o, OctVisitorData *data, np.uint8_t selected):
@@ -120,9 +120,9 @@
     cdef np.float64_t *fcoords = <np.float64_t*> data.array
     cdef int i
     cdef np.float64_t c, dx 
-    dx = 1.0 / (2 << data.level)
+    dx = 1.0 / ((1 << data.oref) << data.level)
     for i in range(3):
-        c = <np.float64_t> ((data.pos[i] << 1 ) + data.ind[i]) 
+        c = <np.float64_t> ((data.pos[i] << data.oref ) + data.ind[i]) 
         fcoords[data.index * 3 + i] = (c + 0.5) * dx
     data.index += 1
 
@@ -135,7 +135,7 @@
     cdef np.float64_t *fwidth = <np.float64_t*> data.array
     cdef int i
     cdef np.float64_t dx 
-    dx = 1.0 / (2 << data.level)
+    dx = 1.0 / ((1 << data.oref) << data.level)
     for i in range(3):
         fwidth[data.index * 3 + i] = dx
     data.index += 1

diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -5,7 +5,7 @@
 Affiliation: UC Santa Cruz
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: Columbia University
-Homepage: http://yt.enzotools.org/
+Homepage: http://yt-project.org/
 License:
   Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
 
@@ -32,7 +32,7 @@
 from libc.math cimport sqrt
 
 from fp_utils cimport *
-from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+from .oct_container cimport Oct, OctAllocationContainer, OctreeContainer
 
 cdef extern from "alloca.h":
     void *alloca(int)
@@ -62,7 +62,6 @@
 cdef class ParticleDepositOperation:
     # We assume each will allocate and define their own temporary storage
     cdef public object nvals
-    cdef public int bad_indices
     cdef public int update_values
     cdef void process(self, int dim[3], np.float64_t left_edge[3],
                       np.float64_t dds[3], np.int64_t offset,

diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -54,7 +54,6 @@
                      fields = None, int domain_id = -1,
                      int domain_offset = 0):
         cdef int nf, i, j
-        self.bad_indices = 0
         if fields is None:
             fields = []
         nf = len(fields)
@@ -66,7 +65,8 @@
             tarr = fields[i]
             field_pointers[i] = <np.float64_t *> tarr.data
         cdef int dims[3]
-        dims[0] = dims[1] = dims[2] = 2
+        dims[0] = dims[1] = dims[2] = (1 << octree.oref)
+        cdef int nz = dims[0] * dims[1] * dims[2]
         cdef OctInfo oi
         cdef np.int64_t offset, moff
         cdef Oct *oct
@@ -98,7 +98,7 @@
             if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
                 continue
             # Note that this has to be our local index, not our in-file index.
-            offset = dom_ind[oct.domain_ind - moff] * 8
+            offset = dom_ind[oct.domain_ind - moff] * nz
             if offset < 0: continue
             # Check that we found the oct ...
             self.process(dims, oi.left_edge, oi.dds,

diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -86,7 +86,8 @@
                 sum(d.total_particles.values()) for d in self.data_files)
         pf = self.parameter_file
         self.oct_handler = ParticleOctreeContainer(
-            [1, 1, 1], pf.domain_left_edge, pf.domain_right_edge)
+            [1, 1, 1], pf.domain_left_edge, pf.domain_right_edge,
+            over_refine = pf.over_refine_factor)
         self.oct_handler.n_ref = pf.n_ref
         mylog.info("Allocating for %0.3e particles", self.total_particles)
         # No more than 256^3 in the region finder.
@@ -147,8 +148,9 @@
                 data_files = [self.data_files[i] for i in
                               self.regions.identify_data_files(dobj.selector)]
             base_region = getattr(dobj, "base_region", dobj)
+            oref = self.parameter_file.over_refine_factor
             subset = [ParticleOctreeSubset(base_region, data_files, 
-                        self.parameter_file)]
+                        self.parameter_file, over_refine_factor = oref)]
             dobj._chunk_info = subset
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
 

diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -205,6 +205,7 @@
         cdef int i, j, k, m, n, ind[3]
         cdef Oct *noct
         cdef np.uint64_t prefix1, prefix2
+        # TODO: This does not need to be changed.
         o.children = <Oct **> malloc(sizeof(Oct *)*8)
         for i in range(2):
             for j in range(2):

diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/particle_smooth.pxd
--- /dev/null
+++ b/yt/geometry/particle_smooth.pxd
@@ -0,0 +1,94 @@
+"""
+Particle Deposition onto Octs
+
+Author: Christopher Moody <chris.e.moody at gmail.com>
+Affiliation: UC Santa Cruz
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+import numpy as np
+from libc.stdlib cimport malloc, free, qsort
+cimport cython
+from libc.math cimport sqrt
+
+from fp_utils cimport *
+from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+from .particle_deposit cimport sph_kernel, gind
+
+cdef extern from "alloca.h":
+    void *alloca(int)
+
+cdef struct NeighborList
+cdef struct NeighborList:
+    np.int64_t pn       # Particle number
+    np.float64_t r2     # radius**2
+
+cdef inline np.float64_t r2dist(np.float64_t ppos[3],
+                                np.float64_t cpos[3],
+                                np.float64_t DW[3],
+                                bint periodicity[3]):
+    cdef int i
+    cdef np.float64_t r2, DR
+    r2 = 0.0
+    for i in range(3):
+        DR = (ppos[i] - cpos[i])
+        if (DR > DW[i]/2.0):
+            DR -= DW[i]/2.0
+        elif (DR < -DW[i]/2.0):
+            DR += DW[i]/2.0
+        r2 += DR * DR
+    return r2
+
+cdef class ParticleSmoothOperation:
+    # We assume each will allocate and define their own temporary storage
+    cdef public object nvals
+    cdef np.float64_t DW[3]
+    cdef int nfields
+    cdef int maxn
+    cdef int curn
+    cdef bint periodicity[3]
+    cdef np.int64_t *doffs
+    cdef np.int64_t *pinds
+    cdef np.int64_t *pcounts
+    cdef np.float64_t *ppos
+    # Note that we are preallocating here, so this is *not* threadsafe.
+    cdef NeighborList *neighbors
+    cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
+                               np.float64_t dds[3], np.float64_t *ppos,
+                               np.float64_t **fields, np.int64_t nneighbors,
+                               np.int64_t *nind, np.int64_t *doffs,
+                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t offset)
+    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
+                            np.float64_t cpos[3])
+    cdef void neighbor_reset(self)
+    cdef void neighbor_find(self,
+                            np.int64_t nneighbors,
+                            np.int64_t *nind,
+                            np.int64_t *doffs,
+                            np.int64_t *pcounts,
+                            np.int64_t *pinds,
+                            np.float64_t *ppos,
+                            np.float64_t cpos[3])
+    cdef void process(self, np.int64_t offset, int i, int j, int k,
+                      int dim[3], np.float64_t cpos[3], np.float64_t **fields)

diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/particle_smooth.pyx
--- /dev/null
+++ b/yt/geometry/particle_smooth.pyx
@@ -0,0 +1,360 @@
+"""
+Particle smoothing in cells
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+import numpy as np
+from libc.stdlib cimport malloc, free, realloc
+cimport cython
+from libc.math cimport sqrt
+
+from fp_utils cimport *
+from oct_container cimport Oct, OctAllocationContainer, \
+    OctreeContainer, OctInfo
+
+cdef int Neighbor_compare(void *on1, void *on2) nogil:
+    cdef NeighborList *n1, *n2
+    n1 = <NeighborList *> on1
+    n2 = <NeighborList *> on2
+    # Note that we set this up so that "greatest" evaluates to the *end* of the
+    # list, so we can do standard radius comparisons.
+    if n1.r2 < n2.r2:
+        return -1
+    elif n1.r2 == n2.r2:
+        return 0
+    else:
+        return 1
+
+cdef class ParticleSmoothOperation:
+    def __init__(self, nvals, nfields, max_neighbors):
+        # This is the set of cells, in grids, blocks or octs, we are handling.
+        cdef int i
+        self.nvals = nvals 
+        self.nfields = nfields
+        self.maxn = max_neighbors
+        self.neighbors = <NeighborList *> malloc(
+            sizeof(NeighborList) * self.maxn)
+        self.neighbor_reset()
+
+    def initialize(self, *args):
+        raise NotImplementedError
+
+    def finalize(self, *args):
+        raise NotImplementedError
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def process_octree(self, OctreeContainer octree,
+                     np.ndarray[np.int64_t, ndim=1] dom_ind,
+                     np.ndarray[np.float64_t, ndim=2] positions,
+                     fields = None, int domain_id = -1,
+                     int domain_offset = 0,
+                     periodicity = (True, True, True)):
+        # This will be a several-step operation.
+        #
+        # We first take all of our particles and assign them to Octs.  If they
+        # are not in an Oct, we will assume they are out of bounds.  Note that
+        # this means that if we have loaded neighbor particles for which an Oct
+        # does not exist, we are going to be discarding them -- so sparse
+        # octrees will need to ensure that neighbor octs *exist*.  Particles
+        # will be assigned in a new NumPy array.  Note that this incurs
+        # overhead, but reduces complexity as we will now be able to use
+        # argsort.
+        #
+        # After the particles have been assigned to Octs, we process each Oct
+        # individually.  We will do this by calling "get" for the *first*
+        # particle in each set of Octs in the sorted list.  After this, we get
+        # neighbors for each Oct.
+        #
+        # Now, with the set of neighbors (and thus their indices) we allocate
+        # an array of particles and their fields, fill these in, and call our
+        # process function.
+        #
+        # This is not terribly efficient -- for starters, the neighbor function
+        # is not the most efficient yet.  We will also need to handle some
+        # mechanism of an expandable array for holding pointers to Octs, so
+        # that we can deal with >27 neighbors.  As I write this comment,
+        # neighbors() only returns 27 neighbors.
+        cdef int nf, i, j, dims[3], n
+        cdef np.float64_t **field_pointers, *field_vals, pos[3], *ppos, dds[3]
+        cdef int nsize = 0
+        cdef np.int64_t *nind = NULL
+        cdef OctInfo oi
+        cdef Oct *oct, **neighbors = NULL
+        cdef np.int64_t nneighbors, numpart, offset, moff, local_ind
+        cdef np.int64_t *doffs, *pinds, *pcounts, poff
+        cdef np.ndarray[np.int64_t, ndim=1] pind, doff, pdoms, pcount
+        cdef np.ndarray[np.float64_t, ndim=1] tarr
+        dims[0] = dims[1] = dims[2] = (1 << octree.oref)
+        cdef int nz = dims[0] * dims[1] * dims[2]
+        numpart = positions.shape[0]
+        # pcount is the number of particles per oct.
+        pcount = np.zeros_like(dom_ind)
+        # doff is the offset to a given oct in the sorted particles.
+        doff = np.zeros_like(dom_ind) - 1
+        moff = octree.get_domain_offset(domain_id + domain_offset)
+        # pdoms points particles at their octs.  So the value in this array, for
+        # a given index, is the local oct index.
+        pdoms = np.zeros(positions.shape[0], dtype="int64") - 1
+        nf = len(fields)
+        if fields is None:
+            fields = []
+        field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
+        for i in range(nf):
+            tarr = fields[i]
+            field_pointers[i] = <np.float64_t *> tarr.data
+        for i in range(3):
+            self.DW[i] = (octree.DRE[i] - octree.DLE[i])
+            self.periodicity[i] = periodicity[i]
+        for i in range(positions.shape[0]):
+            for j in range(3):
+                pos[j] = positions[i, j]
+            oct = octree.get(pos)
+            if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
+                continue
+            # Note that this has to be our local index, not our in-file index.
+            # This is the particle count, which we'll use once we have sorted
+            # the particles to calculate the offsets into each oct's particles.
+            offset = oct.domain_ind - moff
+            pcount[offset] += 1
+            pdoms[i] = offset # We store the *actual* offset.
+        # Now we have oct assignments.  Let's sort them.
+        # Note that what we will be providing to our processing functions will
+        # actually be indirectly-sorted fields.  This preserves memory at the
+        # expense of additional pointer lookups.
+        pind = np.argsort(pdoms)
+        pind = np.asarray(pind, dtype='int64', order='C')
+        # So what this means is that we now have all the oct-0 particle indices
+        # in order, then the oct-1, etc etc.
+        # This now gives us the indices to the particles for each domain.
+        for i in range(positions.shape[0]):
+            # This value, poff, is the index of the particle in the *unsorted*
+            # arrays.
+            poff = pind[i] 
+            offset = pdoms[poff] 
+            # If we have yet to assign the starting index to this oct, we do so
+            # now.
+            if doff[offset] < 0: doff[offset] = i
+        # Now doff is full of offsets to the first entry in the pind that
+        # refers to that oct's particles.
+        ppos = <np.float64_t *> positions.data
+        doffs = <np.int64_t*> doff.data
+        pinds = <np.int64_t*> pind.data
+        pcounts = <np.int64_t*> pcount.data
+        nsize = 27
+        nind = <np.int64_t *> malloc(sizeof(np.int64_t)*nsize)
+        for i in range(doff.shape[0]):
+            # Nothing assigned.
+            if doff[i] < 0: continue
+            # The first particle assigned to this oct should be the one we
+            # want.
+            poff = pind[doff[i]]
+            for j in range(3):
+                pos[j] = positions[poff, j]
+            oct = octree.get(pos, &oi)
+            if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
+                continue
+            offset = dom_ind[oct.domain_ind - moff] * nz
+            neighbors = octree.neighbors(&oi, &nneighbors)
+            # Now we have all our neighbors.  And, we should be set for what
+            # else we need to do.
+            if nneighbors > nsize:
+                nind = <np.int64_t *> realloc(
+                    nind, sizeof(np.int64_t)*nneighbors)
+                nsize = nneighbors
+            for j in range(nneighbors):
+                nind[j] = neighbors[j].domain_ind - moff
+                for n in range(j):
+                    if nind[j] == nind[n]:
+                        nind[j] = -1
+                    break
+            # This is allocated by the neighbors function, so we deallocate it.
+            free(neighbors)
+            self.neighbor_process(dims, oi.left_edge, oi.dds,
+                         ppos, field_pointers, nneighbors, nind, doffs,
+                         pinds, pcounts, offset)
+        if nind != NULL:
+            free(nind)
+        
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def process_grid(self, gobj,
+                     np.ndarray[np.float64_t, ndim=2] positions,
+                     fields = None):
+        raise NotImplementedError
+
+    cdef void process(self, np.int64_t offset, int i, int j, int k,
+                      int dim[3], np.float64_t cpos[3], np.float64_t **fields):
+        raise NotImplementedError
+
+    cdef void neighbor_reset(self):
+        self.curn = 0
+        for i in range(self.maxn):
+            self.neighbors[i].pn = -1
+            self.neighbors[i].r2 = 1e300
+
+    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
+                            np.float64_t cpos[3]):
+        cdef NeighborList *cur
+        cdef int i
+        # _c means candidate (what we're evaluating)
+        # _o means other (the item in the list)
+        cdef np.float64_t r2_c, r2_o
+        cdef np.int64_t pn_c, pn_o
+        # If we're less than the maximum number of neighbors, we simply append.
+        # After that, we will sort, and then only compare against the rightmost
+        # entries.
+        if self.curn < self.maxn:
+            cur = &self.neighbors[self.curn]
+            cur.pn = pn
+            cur.r2 = r2dist(ppos, cpos, self.DW, self.periodicity)
+            self.curn += 1
+            if self.curn == self.maxn:
+                # This time we sort it, so that future insertions will be able
+                # to be done in order.
+                qsort(self.neighbors, self.curn, sizeof(NeighborList), 
+                      Neighbor_compare)
+            return
+        # This will go (curn - 1) through 0.
+        r2_c = r2dist(ppos, cpos, self.DW, self.periodicity)
+        pn_c = pn
+        for i in range((self.curn - 1), -1, -1):
+            # First we evaluate against i.  If our candidate radius is greater
+            # than the one we're inspecting, we quit.
+            cur = &self.neighbors[i]
+            r2_o = cur.r2
+            pn_o = cur.pn
+            if r2_c >= r2_o:
+                break
+            # Now we know we need to swap them.  First we assign our candidate
+            # values to cur.
+            cur.r2 = r2_c
+            cur.pn = pn_c
+            if i + 1 >= self.maxn:
+                continue # No swapping
+            cur = &self.neighbors[i + 1]
+            cur.r2 = r2_o
+            cur.pn = pn_o
+        # At this point, we've evaluated all the particles and we should have a
+        # sorted set of values.  So, we're done.
+
+    cdef void neighbor_find(self,
+                            np.int64_t nneighbors,
+                            np.int64_t *nind,
+                            np.int64_t *doffs,
+                            np.int64_t *pcounts,
+                            np.int64_t *pinds,
+                            np.float64_t *ppos,
+                            np.float64_t cpos[3]
+                            ):
+        # We are now given the number of neighbors, the indices into the
+        # domains for them, and the number of particles for each.
+        cdef int ni, i, j
+        cdef np.int64_t offset, pn, pc
+        cdef np.float64_t pos[3]
+        self.neighbor_reset()
+        for ni in range(nneighbors):
+            if nind[ni] == -1: continue
+            offset = doffs[nind[ni]]
+            pc = pcounts[nind[ni]]
+            for i in range(pc):
+                pn = pinds[offset + i]
+                for j in range(3):
+                    pos[j] = ppos[pn * 3 + j]
+                self.neighbor_eval(pn, pos, cpos)
+
+    cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
+                               np.float64_t dds[3], np.float64_t *ppos,
+                               np.float64_t **fields, np.int64_t nneighbors,
+                               np.int64_t *nind, np.int64_t *doffs,
+                               np.int64_t *pinds, np.int64_t *pcounts,
+                               np.int64_t offset):
+        # Note that we assume that fields[0] == smoothing length in the native
+        # units supplied.  We can now iterate over every cell in the block and
+        # every particle to find the nearest.  We will use a priority heap.
+        cdef int i, j, k
+        cdef np.float64_t cpos[3]
+        cpos[0] = left_edge[0] + 0.5*dds[0]
+        for i in range(dim[0]):
+            cpos[1] = left_edge[1] + 0.5*dds[1]
+            for j in range(dim[1]):
+                cpos[2] = left_edge[2] + 0.5*dds[2]
+                for k in range(dim[2]):
+                    self.neighbor_find(nneighbors, nind, doffs, pcounts,
+                        pinds, ppos, cpos)
+                    # Now we have all our neighbors in our neighbor list.
+                    self.process(offset, i, j, k, dim, cpos, fields)
+                    cpos[2] += dds[2]
+                cpos[1] += dds[1]
+            cpos[0] += dds[0]
+
+
+cdef class SimpleNeighborSmooth(ParticleSmoothOperation):
+    cdef np.float64_t **fp
+    cdef public object vals
+    def initialize(self):
+        cdef int i
+        if self.nfields < 4:
+            # We need at least two fields, the smoothing length and the 
+            # field to smooth, to operate.
+            raise RuntimeError
+        cdef np.ndarray tarr
+        self.fp = <np.float64_t **> malloc(
+            sizeof(np.float64_t *) * self.nfields)
+        self.vals = []
+        for i in range(self.nfields):
+            tarr = np.zeros(self.nvals, dtype="float64", order="F")
+            self.vals.append(tarr)
+            self.fp[i] = <np.float64_t *> tarr.data
+
+    def finalize(self):
+        free(self.fp)
+        return self.vals
+
+    @cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef void process(self, np.int64_t offset, int i, int j, int k,
+                      int dim[3], np.float64_t cpos[3], np.float64_t **fields):
+        # We have our i, j, k for our cell, as well as the cell position.
+        # We also have a list of neighboring particles with particle numbers.
+        cdef int n, fi
+        cdef np.float64_t weight, r2, val
+        cdef np.int64_t pn
+        for n in range(self.curn):
+            # No normalization for the moment.
+            # fields[0] is the smoothing length.
+            r2 = self.neighbors[n].r2
+            pn = self.neighbors[n].pn
+            # Smoothing kernel weight function
+            weight = sph_kernel(sqrt(r2) / fields[0][pn])
+            # Mass of the particle times the value divided by the Density
+            for fi in range(self.nfields - 3):
+                val = fields[1][pn] * fields[fi + 3][pn]/fields[2][pn]
+                self.fp[fi + 3][gind(i,j,k,dim) + offset] = val * weight
+        return
+
+simple_neighbor_smooth = SimpleNeighborSmooth

diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -40,6 +40,9 @@
                         oct_visitor_function *func,
                         OctVisitorData *data,
                         int visit_covered = ?)
+    cdef void visit_oct_cells(self, OctVisitorData *data, Oct *root, Oct *ch,
+                              np.float64_t spos[3], np.float64_t sdds[3],
+                              oct_visitor_function *func, int i, int j, int k)
     cdef int select_grid(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3],
                                np.int32_t level, Oct *o = ?) nogil

diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -157,16 +157,13 @@
 
     def count_octs(self, OctreeContainer octree, int domain_id = -1):
         cdef OctVisitorData data
-        data.index = 0
-        data.last = -1
-        data.domain = domain_id
+        octree.setup_data(&data, domain_id)
         octree.visit_all_octs(self, oct_visitors.count_total_octs, &data)
         return data.index
 
     def count_oct_cells(self, OctreeContainer octree, int domain_id = -1):
         cdef OctVisitorData data
-        data.index = 0
-        data.domain = domain_id
+        octree.setup_data(&data, domain_id)
         octree.visit_all_octs(self, oct_visitors.count_total_cells, &data)
         return data.index
 
@@ -230,6 +227,10 @@
                         if root.children != NULL:
                             ch = root.children[cind(i, j, k)]
                         if iter == 1 and next_level == 1 and ch != NULL:
+                            # Note that data.pos is always going to be the
+                            # position of the Oct -- it is *not* always going
+                            # to be the same as the position of the cell under
+                            # investigation.
                             data.pos[0] = (data.pos[0] << 1) + i
                             data.pos[1] = (data.pos[1] << 1) + j
                             data.pos[2] = (data.pos[2] << 1) + k
@@ -242,21 +243,60 @@
                             data.pos[2] = (data.pos[2] >> 1)
                             data.level -= 1
                         elif this_level == 1:
-                            selected = self.select_cell(spos, sdds)
-                            if ch != NULL:
-                                selected *= self.overlap_cells
                             data.global_index += increment
                             increment = 0
-                            data.ind[0] = i
-                            data.ind[1] = j
-                            data.ind[2] = k
-                            func(root, data, selected)
+                            self.visit_oct_cells(data, root, ch, spos, sdds,
+                                                 func, i, j, k)
                         spos[2] += sdds[2]
                     spos[1] += sdds[1]
                 spos[0] += sdds[0]
             this_level = 0 # We turn this off for the second pass.
             iter += 1
 
+    cdef void visit_oct_cells(self, OctVisitorData *data, Oct *root, Oct *ch,
+                              np.float64_t spos[3], np.float64_t sdds[3],
+                              oct_visitor_function *func, int i, int j, int k):
+        # We can short-circuit the whole process if data.oref == 1.
+        # This saves us some funny-business.
+        cdef int selected
+        if data.oref == 1:
+            selected = self.select_cell(spos, sdds)
+            if ch != NULL:
+                selected *= self.overlap_cells
+            # data.ind refers to the cell, not to the oct.
+            data.ind[0] = i
+            data.ind[1] = j
+            data.ind[2] = k
+            func(root, data, selected)
+            return
+        # Okay, now that we've got that out of the way, we have to do some
+        # other checks here.  In this case, spos[] is the position of the
+        # center of a *possible* oct child, which means it is the center of a
+        # cluster of cells.  That cluster might have 1, 8, 64, ... cells in it.
+        # But, we can figure it out by calculating the cell dds.
+        cdef np.float64_t dds[3], pos[3]
+        cdef int ci, cj, ck
+        cdef int nr = (1 << (data.oref - 1))
+        for ci in range(3):
+            dds[ci] = sdds[ci] / nr
+        # Boot strap at the first index.
+        pos[0] = (spos[0] - sdds[0]/2.0) + dds[0] * 0.5
+        for ci in range(nr):
+            pos[1] = (spos[1] - sdds[1]/2.0) + dds[1] * 0.5
+            for cj in range(nr):
+                pos[2] = (spos[2] - sdds[2]/2.0) + dds[2] * 0.5
+                for ck in range(nr):
+                    selected = self.select_cell(pos, dds)
+                    if ch != NULL:
+                        selected *= self.overlap_cells
+                    data.ind[0] = ci + i * nr
+                    data.ind[1] = cj + j * nr
+                    data.ind[2] = ck + k * nr
+                    func(root, data, selected)
+                    pos[2] += dds[2]
+                pos[1] += dds[1]
+            pos[0] += dds[0]
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)

diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/setup.py
--- a/yt/geometry/setup.py
+++ b/yt/geometry/setup.py
@@ -43,6 +43,15 @@
                          "yt/geometry/oct_container.pxd",
                          "yt/geometry/selection_routines.pxd",
                          "yt/geometry/particle_deposit.pxd"])
+    config.add_extension("particle_smooth", 
+                ["yt/geometry/particle_smooth.pyx"],
+                include_dirs=["yt/utilities/lib/"],
+                libraries=["m"],
+                depends=["yt/utilities/lib/fp_utils.pxd",
+                         "yt/geometry/oct_container.pxd",
+                         "yt/geometry/selection_routines.pxd",
+                         "yt/geometry/particle_deposit.pxd",
+                         "yt/geometry/particle_smooth.pxd"])
     config.add_extension("fake_octree", 
                 ["yt/geometry/fake_octree.pyx"],
                 include_dirs=["yt/utilities/lib/"],

diff -r b31d6fa1d6494f14d4b8f11308389c42620c0041 -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -59,6 +59,35 @@
         v = np.bincount(bi.astype("int64"))
         yield assert_equal, v.max() <= n_ref, True
 
+def test_particle_overrefine():
+    np.random.seed(int(0x4d3d3d3))
+    pos = []
+    data = {}
+    bbox = []
+    for i, ax in enumerate('xyz'):
+        DW = DRE[i] - DLE[i]
+        LE = DLE[i]
+        data["particle_position_%s" % ax] = \
+            np.random.normal(0.5, scale=0.05, size=(NPART)) * DW + LE
+        bbox.append( [DLE[i], DRE[i]] )
+    bbox = np.array(bbox)
+    _attrs = ('icoords', 'fcoords', 'fwidth', 'ires')
+    for n_ref in [16, 32, 64, 512, 1024]:
+        pf1 = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref)
+        dd1 = pf1.h.all_data()
+        v1 = dict((a, getattr(dd1, a)) for a in _attrs)
+        cv1 = dd1["CellVolumeCode"].sum(dtype="float64")
+        for over_refine in [1, 2, 3]:
+            f = 1 << (3*(over_refine-1))
+            pf2 = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref,
+                                over_refine_factor = over_refine)
+            dd2 = pf2.h.all_data()
+            v2 = dict((a, getattr(dd2, a)) for a in _attrs)
+            for a in sorted(v1):
+                yield assert_equal, v1[a].size * f, v2[a].size
+            cv2 = dd2["CellVolumeCode"].sum(dtype="float64")
+            yield assert_equal, cv1, cv2
+
 if __name__=="__main__":
     for i in test_add_particles_random():
         i[0](*i[1:])


https://bitbucket.org/yt_analysis/yt/commits/a5b7d8a34375/
Changeset:   a5b7d8a34375
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-06 23:28:33
Summary:     For deposition fields (in *any* frontend) that do not supply a fields argument,
we default to an empty list.  This fixes the three places the deposit() makes
an appearance.

Closes #654.
Affected #:  2 files

diff -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 -r a5b7d8a34375cddfe6337614947dd3f605adecf0 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -127,6 +127,7 @@
 
     def deposit(self, positions, fields = None, method = None):
         # Here we perform our particle deposition.
+        if fields is None: fields = []
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
@@ -148,6 +149,7 @@
 
     def smooth(self, positions, fields = None, method = None):
         # Here we perform our particle deposition.
+        if fields is None: fields = []
         cls = getattr(particle_smooth, "%s_smooth" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)

diff -r 0a57d1a9fb4c3f0c7c8bf93d277f33f81c7c3e98 -r a5b7d8a34375cddfe6337614947dd3f605adecf0 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -166,6 +166,7 @@
 
     def deposit(self, positions, fields = None, method = None):
         # Here we perform our particle deposition.
+        if fields is None: fields = []
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)


https://bitbucket.org/yt_analysis/yt/commits/27232b239b23/
Changeset:   27232b239b23
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 20:29:00
Summary:     Merging from yt-2.x
Affected #:  47 files

diff -r a5b7d8a34375cddfe6337614947dd3f605adecf0 -r 27232b239b23b4cc2469ea151c495ea93238fb01 .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -17,3 +17,4 @@
 tabel = tabel at slac.stanford.edu
 sername=kayleanelson = kaylea.nelson at yale.edu
 kayleanelson = kaylea.nelson at yale.edu
+jcforbes at ucsc.edu = jforbes at ucolick.org

diff -r a5b7d8a34375cddfe6337614947dd3f605adecf0 -r 27232b239b23b4cc2469ea151c495ea93238fb01 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -419,7 +419,7 @@
 echo "be installing ZeroMQ"
 
 printf "%-15s = %s so I " "INST_ROCKSTAR" "${INST_ROCKSTAR}"
-get_willwont ${INST_0MQ}
+get_willwont ${INST_ROCKSTAR}
 echo "be installing Rockstar"
 
 echo
@@ -877,6 +877,11 @@
 mkdir -p ${DEST_DIR}/src/$MATPLOTLIB
 echo "[directories]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
 echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+if [ `uname` = "Darwin" ]
+then
+   echo "[gui_support]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+   echo "macosx = False" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+fi
 do_setup_py $MATPLOTLIB
 if [ -n "${OLD_LDFLAGS}" ]
 then

diff -r a5b7d8a34375cddfe6337614947dd3f605adecf0 -r 27232b239b23b4cc2469ea151c495ea93238fb01 yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -54,8 +54,8 @@
 import glob
 
 from yt.funcs import *
-from yt.utilities.pykdtree import KDTree
-import yt.utilities.pydot as pydot
+from yt.extern.pykdtree import KDTree
+import yt.extern.pydot as pydot
 
 # We don't currently use this, but we may again find a use for it in the
 # future.

diff -r a5b7d8a34375cddfe6337614947dd3f605adecf0 -r 27232b239b23b4cc2469ea151c495ea93238fb01 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -36,7 +36,7 @@
     HaloProfiler
 from yt.convenience import load
 from yt.utilities.logger import ytLogger as mylog
-import yt.utilities.pydot as pydot
+import yt.extern.pydot as pydot
 from yt.utilities.spatial import cKDTree
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelDummy, \

diff -r a5b7d8a34375cddfe6337614947dd3f605adecf0 -r 27232b239b23b4cc2469ea151c495ea93238fb01 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -1254,7 +1254,7 @@
                 mylog.error("Output directory exists, but is not a directory: %s." % my_output_dir)
                 raise IOError(my_output_dir)
         else:
-            os.mkdir(my_output_dir)
+            os.makedirs(my_output_dir)
 
 def _shift_projections(pf, projections, oldCenter, newCenter, axis):
     """

diff -r a5b7d8a34375cddfe6337614947dd3f605adecf0 -r 27232b239b23b4cc2469ea151c495ea93238fb01 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -58,8 +58,19 @@
     candidates = []
     args = [os.path.expanduser(arg) if isinstance(arg, types.StringTypes)
             else arg for arg in args]
-    valid_file = [os.path.exists(arg) if isinstance(arg, types.StringTypes) 
-            else False for arg in args]
+    valid_file = []
+    for argno, arg in enumerate(args):
+        if isinstance(arg, types.StringTypes):
+            if os.path.exists(arg):
+                valid_file.append(True)
+            else:
+                if os.path.exists(os.path.join(ytcfg.get("yt", "test_data_dir"), arg)):
+                    valid_file.append(True)
+                    args[argno] = os.path.join(ytcfg.get("yt", "test_data_dir"), arg)
+                else:
+                    valid_file.append(False)
+        else:
+            valid_file.append(False)
     if not any(valid_file):
         try:
             from yt.data_objects.time_series import TimeSeriesData

diff -r a5b7d8a34375cddfe6337614947dd3f605adecf0 -r 27232b239b23b4cc2469ea151c495ea93238fb01 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -347,6 +347,7 @@
         # Figure out the starting and stopping times and redshift.
         self._calculate_simulation_bounds()
         # Get all possible datasets.
+        self.all_time_outputs = []
         self._get_all_outputs(find_outputs=find_outputs)
         
         self.print_key_parameters()

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/03c04136cbc0/
Changeset:   03c04136cbc0
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 20:30:39
Summary:     Check that the file is a file in ART.
Affected #:  1 file

diff -r 27232b239b23b4cc2469ea151c495ea93238fb01 -r 03c04136cbc0c38251dce8c371c5ef06c98425e7 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -433,6 +433,7 @@
         """
         f = ("%s" % args[0])
         prefix, suffix = filename_pattern['amr']
+        if not os.path.isfile(f): return False
         with open(f, 'rb') as fh:
             try:
                 amr_header_vals = read_attrs(fh, amr_header_struct, '>')


https://bitbucket.org/yt_analysis/yt/commits/0db2b82b59f4/
Changeset:   0db2b82b59f4
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-09-09 20:44:08
Summary:     Merging and resolving a conflict.
Affected #:  1 file

diff -r 03c04136cbc0c38251dce8c371c5ef06c98425e7 -r 0db2b82b59f454b78845d24585fc723dd4db130b yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1112,16 +1112,18 @@
         type = self._plot_type
         if type in ['Projection','OffAxisProjection']:
             weight = self.data_source.weight_field
+            if weight is not None:
+                weight = weight.replace(' ', '_')
         if 'Cutting' in self.data_source.__class__.__name__:
             type = 'OffAxisSlice'
         for k, v in self.plots.iteritems():
             if isinstance(k, types.TupleType):
                 k = k[1]
             if axis:
-                n = "%s_%s_%s_%s" % (name, type, axis, k)
+                n = "%s_%s_%s_%s" % (name, type, axis, k.replace(' ', '_'))
             else:
                 # for cutting planes
-                n = "%s_%s_%s" % (name, type, k)
+                n = "%s_%s_%s" % (name, type, k.replace(' ', '_'))
             if weight:
                 if isinstance(weight, tuple):
                     weight = weight[1]
@@ -1848,8 +1850,17 @@
 class WindowPlotMPL(ImagePlotMPL):
     def __init__(
             self, data, cbname, cmap, extent, aspect, zlim, size, fontsize,
-                figure, axes, cax):
-        fsize, axrect, caxrect = self._get_best_layout(size, fontsize)
+            figure, axes, cax):
+        self._draw_colorbar = True
+        self._draw_axes = True
+        self._cache_layout(size, fontsize)
+
+        # Make room for a colorbar
+        self.input_size = size
+        self.fsize = [size[0] + self._cbar_inches[self._draw_colorbar], size[1]]
+
+        # Compute layout
+        axrect, caxrect = self._get_best_layout(fontsize)
         if np.any(np.array(axrect) < 0):
             msg = 'The axis ratio of the requested plot is very narrow. ' \
                   'There is a good chance the plot will not look very good, ' \
@@ -1859,7 +1870,7 @@
             axrect  = (0.07, 0.10, 0.80, 0.80)
             caxrect = (0.87, 0.10, 0.04, 0.80)
         ImagePlotMPL.__init__(
-            self, fsize, axrect, caxrect, zlim, figure, axes, cax)
+            self, self.fsize, axrect, caxrect, zlim, figure, axes, cax)
         self._init_image(data, cbname, cmap, extent, aspect)
         self.image.axes.ticklabel_format(scilimits=(-2,3))
         if cbname == 'linear':
@@ -1867,31 +1878,74 @@
             self.cb.formatter.set_powerlimits((-2,3))
             self.cb.update_ticks()
 
-    def _get_best_layout(self, size, fontsize=18):
-        aspect = 1.0*size[0]/size[1]
-        fontscale = fontsize / 18.0
+    def _toggle_axes(self, choice):
+        self._draw_axes = choice
+        self.axes.get_xaxis().set_visible(choice)
+        self.axes.get_yaxis().set_visible(choice)
+        axrect, caxrect = self._get_best_layout()
+        self.axes.set_position(axrect)
+        self.cax.set_position(caxrect)
 
-        # add room for a colorbar
-        cbar_inches = fontscale*0.7
-        newsize = [size[0] + cbar_inches, size[1]]
+    def _toggle_colorbar(self, choice):
+        self._draw_colorbar = choice
+        self.cax.set_visible(choice)
+        self.fsize = [self.input_size[0] + self._cbar_inches[choice], self.input_size[1]]
+        axrect, caxrect = self._get_best_layout()
+        self.axes.set_position(axrect)
+        self.cax.set_position(caxrect)
+
+    def hide_axes(self):
+        self._toggle_axes(False)
+        return self
+
+    def show_axes(self):
+        self._toggle_axes(True)
+        return self
+
+    def hide_colorbar(self):
+        self._toggle_colorbar(False)
+        return self
+
+    def show_colorbar(self):
+        self._toggle_colorbar(True)
+        return self
+
+    def _cache_layout(self, size, fontsize):
+        self._cbar_inches = {}
+        self._text_buffx = {}
+        self._text_bottomy = {}
+        self._text_topy = {}
+
+        self._aspect = 1.0*size[0]/size[1]
+        self._fontscale = fontsize / 18.0
+
+        # Leave room for a colorbar, if we are drawing it.
+        self._cbar_inches[True] = self._fontscale*0.7
+        self._cbar_inches[False] = 0
 
         # add buffers for text, and a bit of whitespace on top
-        text_buffx = fontscale * 1.0/(newsize[0])
-        text_bottomy = fontscale * 0.7/size[1]
-        text_topy = fontscale * 0.3/size[1]
+        self._text_buffx[True] = self._fontscale * 1.0/(size[0] + self._cbar_inches[True])
+        self._text_bottomy[True] = self._fontscale * 0.7/size[1]
+        self._text_topy[True] = self._fontscale * 0.3/size[1]
 
+        # No buffer for text if we're not drawing axes
+        self._text_buffx[False] = 0
+        self._text_bottomy[False] = 0
+        self._text_topy[False] = 0
+
+    def _get_best_layout(self, fontsize=18):
         # calculate how much room the colorbar takes
-        cbar_frac = cbar_inches/newsize[0]
+        cbar_frac = self._cbar_inches[self._draw_colorbar]/self.fsize[0]
 
         # Calculate y fraction, then use to make x fraction.
-        yfrac = 1.0-text_bottomy-text_topy
-        ysize = yfrac*size[1]
-        xsize = aspect*ysize
-        xfrac = xsize/newsize[0]
+        yfrac = 1.0-self._text_bottomy[self._draw_axes]-self._text_topy[self._draw_axes]
+        ysize = yfrac*self.fsize[1]
+        xsize = self._aspect*ysize
+        xfrac = xsize/self.fsize[0]
 
         # Now make sure it all fits!
-        xbig = xfrac + text_buffx + 2.0*cbar_frac
-        ybig = yfrac + text_bottomy + text_topy
+        xbig = xfrac + self._text_buffx[self._draw_axes] + 2.0*cbar_frac
+        ybig = yfrac + self._text_bottomy[self._draw_axes] + self._text_topy[self._draw_axes]
 
         if xbig > 1:
             xsize /= xbig
@@ -1899,9 +1953,20 @@
         if ybig > 1:
             xsize /= ybig
             ysize /= ybig
-        xfrac = xsize/newsize[0]
-        yfrac = ysize/newsize[1]
+        xfrac = xsize/self.fsize[0]
+        yfrac = ysize/self.fsize[1]
 
-        axrect = (text_buffx, text_bottomy, xfrac, yfrac )
-        caxrect = (text_buffx+xfrac, text_bottomy, cbar_frac/4., yfrac )
-        return newsize, axrect, caxrect
+        axrect = (
+            self._text_buffx[self._draw_axes],
+            self._text_bottomy[self._draw_axes],
+            xfrac,
+            yfrac
+        )
+
+        caxrect = (
+            self._text_buffx[self._draw_axes]+xfrac,
+            self._text_bottomy[self._draw_axes],
+            cbar_frac/4.,
+            yfrac
+        )
+        return axrect, caxrect


https://bitbucket.org/yt_analysis/yt/commits/bc3295e67941/
Changeset:   bc3295e67941
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 16:22:04
Summary:     Fixing format string error in logging for ARTIO.
Affected #:  1 file

diff -r a5b7d8a34375cddfe6337614947dd3f605adecf0 -r bc3295e679413f5c05473a7380bfaa88b5ec9f80 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -315,7 +315,7 @@
         self.fluid_field_list = self._detect_fluid_fields()
         self.particle_field_list = self._detect_particle_fields()
         self.field_list = self.fluid_field_list + self.particle_field_list
-        mylog.debug("Detected fields:", (self.field_list,))
+        mylog.debug("Detected fields: %s", (self.field_list,))
 
     def _detect_fluid_fields(self):
         return [art_to_yt[f] for f in yt_to_art.values() if f in


https://bitbucket.org/yt_analysis/yt/commits/cb14bdc63b94/
Changeset:   cb14bdc63b94
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 16:24:08
Summary:     We have to correctly set our field indices for ARTIO fill_sfc.
Affected #:  1 file

diff -r bc3295e679413f5c05473a7380bfaa88b5ec9f80 -r cb14bdc63b94677c6ce012be6db112ef08b0bb8b yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -766,7 +766,7 @@
                     for j in range(8):
                         for i in range(nf):
                             field_vals[i][local_ind[level] * 8 + j] = \
-                                grid_variables[ngv * j + i]
+                                grid_variables[ngv * j + field_ind[i]]
                     local_ind[level] += 1
                 status = artio_grid_read_level_end(handle)
                 check_artio_status(status)


https://bitbucket.org/yt_analysis/yt/commits/2c3f399e75d3/
Changeset:   2c3f399e75d3
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 16:30:20
Summary:     Updating ARTIO tests with sample data.
Affected #:  1 file

diff -r cb14bdc63b94677c6ce012be6db112ef08b0bb8b -r 2c3f399e75d390c5b2f574e5667770639e7613ed yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -31,21 +31,21 @@
     FieldValuesTest
 from yt.frontends.artio.api import ARTIOStaticOutput
 
-_fields = ("Temperature", "Density", "VelocityMagnitude") 
+_fields = ("Temperature", "Density", "VelocityMagnitude",
+           ("deposit", "all_density"), ("deposit", "all_count")) 
 
-aiso5 = "artio/aiso_a0.9005.art"
- at requires_pf(aiso5)
-def test_aiso5():
-    pf = data_dir_load(aiso5)
-    yield assert_equal, str(pf), "aiso_a0.9005.art"
+sizmbhloz = "sizmbhloz-clref04SNth-rs9_a0.9011/sizmbhloz-clref04SNth-rs9_a0.9011.art"
+ at requires_pf(sizmbhloz)
+def test_sizmbhloz():
+    pf = data_dir_load(sizmbhloz)
+    yield assert_equal, str(pf), "sizmbhloz-clref04SNth-rs9_a0.9011.art"
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
     for field in _fields:
         for axis in [0, 1, 2]:
             for ds in dso:
                 for weight_field in [None, "Density"]:
                     yield PixelizedProjectionValuesTest(
-                        aiso5, axis, field, weight_field,
+                        sizmbhloz, axis, field, weight_field,
                         ds)
                 yield FieldValuesTest(
-                        aiso5, field, ds)
-
+                        sizmbhloz, field, ds)


https://bitbucket.org/yt_analysis/yt/commits/b8435fe1eb92/
Changeset:   b8435fe1eb92
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 16:30:54
Summary:     Since this is yt-3.0, I have changed the gold standard to be gold310.
Affected #:  1 file

diff -r 2c3f399e75d390c5b2f574e5667770639e7613ed -r b8435fe1eb92169b5dec518e2f096d4dd7a45715 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,7 +62,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold010',
+    gold_standard_filename = 'gold310',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None',
     thread_field_detection = 'False'


https://bitbucket.org/yt_analysis/yt/commits/cf6a60c47a16/
Changeset:   cf6a60c47a16
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 16:59:22
Summary:     Fixing an issue with string formatting and __getitem__.
Affected #:  1 file

diff -r b8435fe1eb92169b5dec518e2f096d4dd7a45715 -r cf6a60c47a16af7538a5d6fdba8275787ec9cf05 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -492,8 +492,9 @@
         frb[self.field]
         frb[self.weight_field]
         d = frb.data
-        d.update( dict( (("%s_sum" % f, proj[f].sum(dtype="float64"))
-                         for f in proj.field_data.keys()) ) )
+        for f in proj.field_data:
+            # Sometimes f will be a tuple.
+            d["%s_sum" % (f,)] = proj.field_data[f].sum(dtype="float64")
         return d
 
     def compare(self, new_result, old_result):


https://bitbucket.org/yt_analysis/yt/commits/b28b5fecc160/
Changeset:   b28b5fecc160
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 17:06:37
Summary:     When loading from another directory, make sure we have an absolute path for
RAMSES files.
Affected #:  1 file

diff -r cf6a60c47a16af7538a5d6fdba8275787ec9cf05 -r b28b5fecc1600d198216b61057d51b96ad6528d3 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -63,7 +63,8 @@
         num = os.path.basename(pf.parameter_filename).split("."
                 )[0].split("_")[1]
         basename = "%s/%%s_%s.out%05i" % (
-            os.path.dirname(pf.parameter_filename),
+            os.path.abspath(
+              os.path.dirname(pf.parameter_filename)),
             num, domain_id)
         for t in ['grav', 'hydro', 'part', 'amr']:
             setattr(self, "%s_fn" % t, basename % t)


https://bitbucket.org/yt_analysis/yt/commits/a9d6608d03ac/
Changeset:   a9d6608d03ac
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 17:09:12
Summary:     Adding RAMSES frontend tests.
Affected #:  2 files

diff -r b28b5fecc1600d198216b61057d51b96ad6528d3 -r a9d6608d03ac9c18dfa2b970c705526e047ce50a yt/frontends/ramses/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -0,0 +1,52 @@
+"""
+RAMSES frontend tests 
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    data_dir_load, \
+    PixelizedProjectionValuesTest, \
+    FieldValuesTest
+from yt.frontends.artio.api import ARTIOStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude",
+           ("deposit", "all_density"), ("deposit", "all_count")) 
+
+output_00080 = "output_00080/info_00080.txt"
+ at requires_pf(output_00080)
+def test_output_00080():
+    pf = data_dir_load(output_00080)
+    yield assert_equal, str(pf), "info_00080"
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    for field in _fields:
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        output_00080, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        output_00080, field, ds)
+


https://bitbucket.org/yt_analysis/yt/commits/f79eeee6ad10/
Changeset:   f79eeee6ad10
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 17:18:42
Summary:     Adding filename paths to the template.
Affected #:  1 file

diff -r a9d6608d03ac9c18dfa2b970c705526e047ce50a -r f79eeee6ad10cf5e622a580a4e45089b8b12fc7c yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -308,7 +308,7 @@
         self.hubble_constant = hvals["HubbleParam"]
         self.parameters = hvals
 
-        prefix = self.parameter_filename.split(".", 1)[0]
+        prefix = os.path.abspath(self.parameter_filename.split(".", 1)[0])
         suffix = self.parameter_filename.rsplit(".", 1)[-1]
         self.filename_template = "%s.%%(num)i.%s" % (prefix, suffix)
         self.file_count = hvals["NumFilesPerSnapshot"]


https://bitbucket.org/yt_analysis/yt/commits/0c3a35d32850/
Changeset:   0c3a35d32850
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 17:19:44
Summary:     Adding OWLS tests.
Affected #:  2 files

diff -r f79eeee6ad10cf5e622a580a4e45089b8b12fc7c -r 0c3a35d328507f6634bac7613f30d6caf409f49e yt/frontends/sph/tests/test_owls.py
--- /dev/null
+++ b/yt/frontends/sph/tests/test_owls.py
@@ -0,0 +1,61 @@
+"""
+Tipsy tests using the OWLS HDF5-Gadget dataset
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load, \
+    PixelizedProjectionValuesTest, \
+    FieldValuesTest
+from yt.frontends.sph.api import OWLSStaticOutput
+
+_fields = (("deposit", "all_density"), ("deposit", "all_count"),
+           ("deposit", "PartType0_density"),
+           ("deposit", "PartType4_density"))
+
+os33 = "snapshot_033/snap_033.0.hdf5"
+ at requires_pf(os33)
+def test_snapshot_033():
+    pf = data_dir_load(os33)
+    yield assert_equal, str(pf), "snap_033"
+    dso = [ None, ("sphere", ("c", (0.1, 'unitary')))]
+    dd = pf.h.all_data()
+    yield assert_equal, dd["Coordinates"].shape[0], 2*(128*128*128)
+    yield assert_equal, dd["Coordinates"].shape[1], 3
+    tot = sum(dd[ptype,"Coordinates"].shape[0]
+              for ptype in pf.particle_types if ptype != "all")
+    yield assert_equal, tot, (2*128*128*128)
+    for field in _fields:
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        os33, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        os33, field, ds)
+


https://bitbucket.org/yt_analysis/yt/commits/8e06fdd4faae/
Changeset:   8e06fdd4faae
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 20:39:47
Summary:     Merging
Affected #:  48 files

diff -r 0c3a35d328507f6634bac7613f30d6caf409f49e -r 8e06fdd4faaee9bf5901e20114fa7f147d4a3760 .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -17,3 +17,4 @@
 tabel = tabel at slac.stanford.edu
 sername=kayleanelson = kaylea.nelson at yale.edu
 kayleanelson = kaylea.nelson at yale.edu
+jcforbes at ucsc.edu = jforbes at ucolick.org

diff -r 0c3a35d328507f6634bac7613f30d6caf409f49e -r 8e06fdd4faaee9bf5901e20114fa7f147d4a3760 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -419,7 +419,7 @@
 echo "be installing ZeroMQ"
 
 printf "%-15s = %s so I " "INST_ROCKSTAR" "${INST_ROCKSTAR}"
-get_willwont ${INST_0MQ}
+get_willwont ${INST_ROCKSTAR}
 echo "be installing Rockstar"
 
 echo
@@ -877,6 +877,11 @@
 mkdir -p ${DEST_DIR}/src/$MATPLOTLIB
 echo "[directories]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
 echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+if [ `uname` = "Darwin" ]
+then
+   echo "[gui_support]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+   echo "macosx = False" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+fi
 do_setup_py $MATPLOTLIB
 if [ -n "${OLD_LDFLAGS}" ]
 then

diff -r 0c3a35d328507f6634bac7613f30d6caf409f49e -r 8e06fdd4faaee9bf5901e20114fa7f147d4a3760 yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -54,8 +54,8 @@
 import glob
 
 from yt.funcs import *
-from yt.utilities.pykdtree import KDTree
-import yt.utilities.pydot as pydot
+from yt.extern.pykdtree import KDTree
+import yt.extern.pydot as pydot
 
 # We don't currently use this, but we may again find a use for it in the
 # future.

diff -r 0c3a35d328507f6634bac7613f30d6caf409f49e -r 8e06fdd4faaee9bf5901e20114fa7f147d4a3760 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -36,7 +36,7 @@
     HaloProfiler
 from yt.convenience import load
 from yt.utilities.logger import ytLogger as mylog
-import yt.utilities.pydot as pydot
+import yt.extern.pydot as pydot
 from yt.utilities.spatial import cKDTree
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelDummy, \

diff -r 0c3a35d328507f6634bac7613f30d6caf409f49e -r 8e06fdd4faaee9bf5901e20114fa7f147d4a3760 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -1254,7 +1254,7 @@
                 mylog.error("Output directory exists, but is not a directory: %s." % my_output_dir)
                 raise IOError(my_output_dir)
         else:
-            os.mkdir(my_output_dir)
+            os.makedirs(my_output_dir)
 
 def _shift_projections(pf, projections, oldCenter, newCenter, axis):
     """

diff -r 0c3a35d328507f6634bac7613f30d6caf409f49e -r 8e06fdd4faaee9bf5901e20114fa7f147d4a3760 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -58,8 +58,19 @@
     candidates = []
     args = [os.path.expanduser(arg) if isinstance(arg, types.StringTypes)
             else arg for arg in args]
-    valid_file = [os.path.exists(arg) if isinstance(arg, types.StringTypes) 
-            else False for arg in args]
+    valid_file = []
+    for argno, arg in enumerate(args):
+        if isinstance(arg, types.StringTypes):
+            if os.path.exists(arg):
+                valid_file.append(True)
+            else:
+                if os.path.exists(os.path.join(ytcfg.get("yt", "test_data_dir"), arg)):
+                    valid_file.append(True)
+                    args[argno] = os.path.join(ytcfg.get("yt", "test_data_dir"), arg)
+                else:
+                    valid_file.append(False)
+        else:
+            valid_file.append(False)
     if not any(valid_file):
         try:
             from yt.data_objects.time_series import TimeSeriesData

diff -r 0c3a35d328507f6634bac7613f30d6caf409f49e -r 8e06fdd4faaee9bf5901e20114fa7f147d4a3760 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -347,6 +347,7 @@
         # Figure out the starting and stopping times and redshift.
         self._calculate_simulation_bounds()
         # Get all possible datasets.
+        self.all_time_outputs = []
         self._get_all_outputs(find_outputs=find_outputs)
         
         self.print_key_parameters()

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/4f048d43cf7c/
Changeset:   4f048d43cf7c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 20:46:37
Summary:     Problems with field detection mean we should disable this for the time being.
Affected #:  1 file

diff -r 8e06fdd4faaee9bf5901e20114fa7f147d4a3760 -r 4f048d43cf7c871f1eabe6b76ebe9c47e95697e8 yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -31,8 +31,7 @@
     data_dir_load
 from yt.frontends.enzo.api import EnzoStaticOutput
 
-_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
-           "particle_density")
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV")
 
 m7 = "DD0010/moving7_0010"
 @requires_pf(m7)


https://bitbucket.org/yt_analysis/yt/commits/f462e06e6a77/
Changeset:   f462e06e6a77
Branch:      yt-3.0
User:        xarthisius
Date:        2013-09-09 21:31:45
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #92)

Adding frontend tests
Affected #:  12 files

diff -r 0db2b82b59f454b78845d24585fc723dd4db130b -r f462e06e6a772cd5277184469803c52404d1ea12 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,7 +62,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold010',
+    gold_standard_filename = 'gold310',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None',
     thread_field_detection = 'False'

diff -r 0db2b82b59f454b78845d24585fc723dd4db130b -r f462e06e6a772cd5277184469803c52404d1ea12 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -766,7 +766,7 @@
                     for j in range(8):
                         for i in range(nf):
                             field_vals[i][local_ind[level] * 8 + j] = \
-                                grid_variables[ngv * j + i]
+                                grid_variables[ngv * j + field_ind[i]]
                     local_ind[level] += 1
                 status = artio_grid_read_level_end(handle)
                 check_artio_status(status)

diff -r 0db2b82b59f454b78845d24585fc723dd4db130b -r f462e06e6a772cd5277184469803c52404d1ea12 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -315,7 +315,7 @@
         self.fluid_field_list = self._detect_fluid_fields()
         self.particle_field_list = self._detect_particle_fields()
         self.field_list = self.fluid_field_list + self.particle_field_list
-        mylog.debug("Detected fields:", (self.field_list,))
+        mylog.debug("Detected fields: %s", (self.field_list,))
 
     def _detect_fluid_fields(self):
         return [art_to_yt[f] for f in yt_to_art.values() if f in

diff -r 0db2b82b59f454b78845d24585fc723dd4db130b -r f462e06e6a772cd5277184469803c52404d1ea12 yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -31,21 +31,21 @@
     FieldValuesTest
 from yt.frontends.artio.api import ARTIOStaticOutput
 
-_fields = ("Temperature", "Density", "VelocityMagnitude") 
+_fields = ("Temperature", "Density", "VelocityMagnitude",
+           ("deposit", "all_density"), ("deposit", "all_count")) 
 
-aiso5 = "artio/aiso_a0.9005.art"
- at requires_pf(aiso5)
-def test_aiso5():
-    pf = data_dir_load(aiso5)
-    yield assert_equal, str(pf), "aiso_a0.9005.art"
+sizmbhloz = "sizmbhloz-clref04SNth-rs9_a0.9011/sizmbhloz-clref04SNth-rs9_a0.9011.art"
+ at requires_pf(sizmbhloz)
+def test_sizmbhloz():
+    pf = data_dir_load(sizmbhloz)
+    yield assert_equal, str(pf), "sizmbhloz-clref04SNth-rs9_a0.9011.art"
     dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
     for field in _fields:
         for axis in [0, 1, 2]:
             for ds in dso:
                 for weight_field in [None, "Density"]:
                     yield PixelizedProjectionValuesTest(
-                        aiso5, axis, field, weight_field,
+                        sizmbhloz, axis, field, weight_field,
                         ds)
                 yield FieldValuesTest(
-                        aiso5, field, ds)
-
+                        sizmbhloz, field, ds)

diff -r 0db2b82b59f454b78845d24585fc723dd4db130b -r f462e06e6a772cd5277184469803c52404d1ea12 yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -31,8 +31,7 @@
     data_dir_load
 from yt.frontends.enzo.api import EnzoStaticOutput
 
-_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV",
-           "particle_density")
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV")
 
 m7 = "DD0010/moving7_0010"
 @requires_pf(m7)

diff -r 0db2b82b59f454b78845d24585fc723dd4db130b -r f462e06e6a772cd5277184469803c52404d1ea12 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -63,7 +63,8 @@
         num = os.path.basename(pf.parameter_filename).split("."
                 )[0].split("_")[1]
         basename = "%s/%%s_%s.out%05i" % (
-            os.path.dirname(pf.parameter_filename),
+            os.path.abspath(
+              os.path.dirname(pf.parameter_filename)),
             num, domain_id)
         for t in ['grav', 'hydro', 'part', 'amr']:
             setattr(self, "%s_fn" % t, basename % t)

diff -r 0db2b82b59f454b78845d24585fc723dd4db130b -r f462e06e6a772cd5277184469803c52404d1ea12 yt/frontends/ramses/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -0,0 +1,52 @@
+"""
+RAMSES frontend tests 
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    data_dir_load, \
+    PixelizedProjectionValuesTest, \
+    FieldValuesTest
+from yt.frontends.artio.api import ARTIOStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude",
+           ("deposit", "all_density"), ("deposit", "all_count")) 
+
+output_00080 = "output_00080/info_00080.txt"
+ at requires_pf(output_00080)
+def test_output_00080():
+    pf = data_dir_load(output_00080)
+    yield assert_equal, str(pf), "info_00080"
+    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
+    for field in _fields:
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        output_00080, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        output_00080, field, ds)
+

diff -r 0db2b82b59f454b78845d24585fc723dd4db130b -r f462e06e6a772cd5277184469803c52404d1ea12 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -308,7 +308,7 @@
         self.hubble_constant = hvals["HubbleParam"]
         self.parameters = hvals
 
-        prefix = self.parameter_filename.split(".", 1)[0]
+        prefix = os.path.abspath(self.parameter_filename.split(".", 1)[0])
         suffix = self.parameter_filename.rsplit(".", 1)[-1]
         self.filename_template = "%s.%%(num)i.%s" % (prefix, suffix)
         self.file_count = hvals["NumFilesPerSnapshot"]

diff -r 0db2b82b59f454b78845d24585fc723dd4db130b -r f462e06e6a772cd5277184469803c52404d1ea12 yt/frontends/sph/tests/test_owls.py
--- /dev/null
+++ b/yt/frontends/sph/tests/test_owls.py
@@ -0,0 +1,61 @@
+"""
+Tipsy tests using the OWLS HDF5-Gadget dataset
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load, \
+    PixelizedProjectionValuesTest, \
+    FieldValuesTest
+from yt.frontends.sph.api import OWLSStaticOutput
+
+_fields = (("deposit", "all_density"), ("deposit", "all_count"),
+           ("deposit", "PartType0_density"),
+           ("deposit", "PartType4_density"))
+
+os33 = "snapshot_033/snap_033.0.hdf5"
+ at requires_pf(os33)
+def test_snapshot_033():
+    pf = data_dir_load(os33)
+    yield assert_equal, str(pf), "snap_033"
+    dso = [ None, ("sphere", ("c", (0.1, 'unitary')))]
+    dd = pf.h.all_data()
+    yield assert_equal, dd["Coordinates"].shape[0], 2*(128*128*128)
+    yield assert_equal, dd["Coordinates"].shape[1], 3
+    tot = sum(dd[ptype,"Coordinates"].shape[0]
+              for ptype in pf.particle_types if ptype != "all")
+    yield assert_equal, tot, (2*128*128*128)
+    for field in _fields:
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield PixelizedProjectionValuesTest(
+                        os33, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        os33, field, ds)
+

diff -r 0db2b82b59f454b78845d24585fc723dd4db130b -r f462e06e6a772cd5277184469803c52404d1ea12 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -492,8 +492,9 @@
         frb[self.field]
         frb[self.weight_field]
         d = frb.data
-        d.update( dict( (("%s_sum" % f, proj[f].sum(dtype="float64"))
-                         for f in proj.field_data.keys()) ) )
+        for f in proj.field_data:
+            # Sometimes f will be a tuple.
+            d["%s_sum" % (f,)] = proj.field_data[f].sum(dtype="float64")
         return d
 
     def compare(self, new_result, old_result):


https://bitbucket.org/yt_analysis/yt/commits/b311ba8d5d5d/
Changeset:   b311ba8d5d5d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 20:52:41
Summary:     Merging from the frontends test
Affected #:  101 files

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -17,3 +17,4 @@
 tabel = tabel at slac.stanford.edu
 sername=kayleanelson = kaylea.nelson at yale.edu
 kayleanelson = kaylea.nelson at yale.edu
+jcforbes at ucsc.edu = jforbes at ucolick.org

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -419,7 +419,7 @@
 echo "be installing ZeroMQ"
 
 printf "%-15s = %s so I " "INST_ROCKSTAR" "${INST_ROCKSTAR}"
-get_willwont ${INST_0MQ}
+get_willwont ${INST_ROCKSTAR}
 echo "be installing Rockstar"
 
 echo
@@ -832,8 +832,8 @@
 	    echo "Building BLAS"
 	    cd BLAS
 	    gfortran -O2 -fPIC -fno-second-underscore -c *.f
-	    ar r libfblas.a *.o &>> ${LOG_FILE}
-	    ranlib libfblas.a 1>> ${LOG_FILE}
+	    ( ar r libfblas.a *.o 2>&1 ) 1>> ${LOG_FILE}
+	    ( ranlib libfblas.a 2>&1 ) 1>> ${LOG_FILE}
 	    rm -rf *.o
 	    touch done
 	    cd ..
@@ -844,7 +844,7 @@
 	    echo "Building LAPACK"
 	    cd $LAPACK/
 	    cp INSTALL/make.inc.gfortran make.inc
-	    make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
+	    ( make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 2>&1 ) 1>> ${LOG_FILE} || do_exit
 	    touch done
 	    cd ..
 	fi
@@ -877,6 +877,11 @@
 mkdir -p ${DEST_DIR}/src/$MATPLOTLIB
 echo "[directories]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
 echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+if [ `uname` = "Darwin" ]
+then
+   echo "[gui_support]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+   echo "macosx = False" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+fi
 do_setup_py $MATPLOTLIB
 if [ -n "${OLD_LDFLAGS}" ]
 then
@@ -943,10 +948,10 @@
 touch done
 cd $MY_PWD
 
-if !(${DEST_DIR}/bin/python2.7 -c "import readline" >> ${LOG_FILE})
+if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
 then
     echo "Installing pure-python readline"
-    ${DEST_DIR}/bin/pip install readline 1>> ${LOG_FILE}
+    ( ${DEST_DIR}/bin/pip install readline 2>&1 ) 1>> ${LOG_FILE}
 fi
 
 if [ $INST_ENZO -eq 1 ]

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1062,8 +1062,9 @@
     def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is True (default), only run it on the dark matter particles, otherwise
-        on all particles.  Returns an iterable collection of *HopGroup* items.
+        *dm_only* is True (default), only run it on the dark matter particles, 
+        otherwise on all particles.  Returns an iterable collection of 
+        *HopGroup* items.
         """
         self._data_source = data_source
         self.dm_only = dm_only
@@ -2215,11 +2216,11 @@
                 self.comm.mpi_bcast(self.bucket_bounds)
             my_bounds = self.bucket_bounds[self.comm.rank]
             LE, RE = my_bounds[0], my_bounds[1]
-            self._data_source = self.hierarchy.region_strict([0.] * 3, LE, RE)
+            self._data_source = self.hierarchy.region([0.] * 3, LE, RE)
         # If this isn't parallel, define the region as an AMRRegionStrict so
         # particle IO works.
         if self.comm.size == 1:
-            self._data_source = self.hierarchy.periodic_region_strict([0.5] * 3,
+            self._data_source = self.hierarchy.region([0.5] * 3,
                 LE, RE)
         # get the average spacing between particles for this region
         # The except is for the serial case where the full box is what we want.
@@ -2305,8 +2306,7 @@
                 np.zeros(3, dtype='float64'))
         # If we're using a subvolume, we now re-divide.
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
-                ds_RE)
+            self._data_source = pf.h.region([0.] * 3, ds_LE, ds_RE)
             # Cut up the volume.
             padded, LE, RE, self._data_source = \
                 self.partition_hierarchy_3d(ds=self._data_source,
@@ -2503,7 +2503,7 @@
         # object representing the entire domain and sum it "lazily" with
         # Derived Quantities.
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE, ds_RE)
+            self._data_source = pf.h.region([0.] * 3, ds_LE, ds_RE)
         else:
             self._data_source = pf.h.all_data()
         self.padding = padding  # * pf["unitary"] # This should be clevererer
@@ -2599,7 +2599,7 @@
             linking_length = np.abs(link)
         self.padding = padding
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
+            self._data_source = pf.h.region([0.] * 3, ds_LE,
                 ds_RE)
         else:
             self._data_source = pf.h.all_data()

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -54,8 +54,8 @@
 import glob
 
 from yt.funcs import *
-from yt.utilities.pykdtree import KDTree
-import yt.utilities.pydot as pydot
+from yt.extern.pykdtree import KDTree
+import yt.extern.pydot as pydot
 
 # We don't currently use this, but we may again find a use for it in the
 # future.

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -36,7 +36,7 @@
     HaloProfiler
 from yt.convenience import load
 from yt.utilities.logger import ytLogger as mylog
-import yt.utilities.pydot as pydot
+import yt.extern.pydot as pydot
 from yt.utilities.spatial import cKDTree
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelDummy, \

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -811,10 +811,10 @@
                     need_per = True
                     break
 
-            if need_per:
-                region = self.pf.h.periodic_region(halo['center'], leftEdge, rightEdge)
-            else:
-                region = self.pf.h.region(halo['center'], leftEdge, rightEdge)
+            # We use the same type of region regardless.  The selection will be
+            # correct, but we need the need_per variable for projection
+            # shifting.
+            region = self.pf.h.region(halo['center'], leftEdge, rightEdge)
 
             # Make projections.
             if not isinstance(axes, types.ListType): axes = list([axes])
@@ -1254,7 +1254,7 @@
                 mylog.error("Output directory exists, but is not a directory: %s." % my_output_dir)
                 raise IOError(my_output_dir)
         else:
-            os.mkdir(my_output_dir)
+            os.makedirs(my_output_dir)
 
 def _shift_projections(pf, projections, oldCenter, newCenter, axis):
     """

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -159,8 +159,7 @@
             # This ds business below has to do with changes made for halo
             # finding on subvolumes and serves no purpose here except
             # compatibility. This is not the best policy, if I'm honest.
-            ds = pf.h.periodic_region_strict([0.]*3, self.left_edge, 
-                self.right_edge)
+            ds = pf.h.region([0.]*3, self.left_edge, self.right_edge)
             padded, self.LE, self.RE, self.ds = \
             self.partition_hierarchy_3d(ds = ds, padding=0.,
                 rank_ratio = self.vol_ratio)

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,7 +62,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold010',
+    gold_standard_filename = 'gold310',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None',
     thread_field_detection = 'False'

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -58,8 +58,19 @@
     candidates = []
     args = [os.path.expanduser(arg) if isinstance(arg, types.StringTypes)
             else arg for arg in args]
-    valid_file = [os.path.exists(arg) if isinstance(arg, types.StringTypes) 
-            else False for arg in args]
+    valid_file = []
+    for argno, arg in enumerate(args):
+        if isinstance(arg, types.StringTypes):
+            if os.path.exists(arg):
+                valid_file.append(True)
+            else:
+                if os.path.exists(os.path.join(ytcfg.get("yt", "test_data_dir"), arg)):
+                    valid_file.append(True)
+                    args[argno] = os.path.join(ytcfg.get("yt", "test_data_dir"), arg)
+                else:
+                    valid_file.append(False)
+        else:
+            valid_file.append(False)
     if not any(valid_file):
         try:
             from yt.data_objects.time_series import TimeSeriesData

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -36,12 +36,12 @@
     NeedsProperty, \
     NeedsParameter
 import yt.geometry.particle_deposit as particle_deposit
+import yt.geometry.particle_smooth as particle_smooth
 from yt.funcs import *
 
 class OctreeSubset(YTSelectionContainer):
     _spatial = True
     _num_ghost_zones = 0
-    _num_zones = 2
     _type_name = 'octree_subset'
     _skip_add = True
     _con_args = ('base_region', 'domain', 'pf')
@@ -49,7 +49,8 @@
     _domain_offset = 0
     _num_octs = -1
 
-    def __init__(self, base_region, domain, pf):
+    def __init__(self, base_region, domain, pf, over_refine_factor = 1):
+        self._num_zones = 1 << (over_refine_factor)
         self.field_data = YTFieldData()
         self.field_parameters = {}
         self.domain = domain
@@ -126,6 +127,7 @@
 
     def deposit(self, positions, fields = None, method = None):
         # Here we perform our particle deposition.
+        if fields is None: fields = []
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
@@ -145,6 +147,29 @@
         if vals is None: return
         return np.asfortranarray(vals)
 
+    def smooth(self, positions, fields = None, method = None):
+        # Here we perform our particle deposition.
+        if fields is None: fields = []
+        cls = getattr(particle_smooth, "%s_smooth" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        nz = self.nz
+        nvals = (nz, nz, nz, (self.domain_ind >= 0).sum())
+        if fields is None: fields = []
+        op = cls(nvals, len(fields), 64)
+        op.initialize()
+        mylog.debug("Smoothing %s particles into %s Octs",
+            positions.shape[0], nvals[-1])
+        op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
+            self.domain_id, self._domain_offset, self.pf.periodicity)
+        vals = op.finalize()
+        if vals is None: return
+        if isinstance(vals, list):
+            vals = [np.asfortranarray(v) for v in vals]
+        else:
+            vals = np.asfortranarray(vals)
+        return vals
+
     def select_icoords(self, dobj):
         d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,
                                      num_octs = self._num_octs)
@@ -206,8 +231,10 @@
     _type_name = 'indexed_octree_subset'
     _con_args = ('data_files', 'pf', 'min_ind', 'max_ind')
     domain_id = -1
-    def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0):
+    def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0,
+                 over_refine_factor = 1):
         # The first attempt at this will not work in parallel.
+        self._num_zones = 1 << (over_refine_factor)
         self.data_files = data_files
         self.field_data = YTFieldData()
         self.field_parameters = {}

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -122,16 +122,6 @@
                 int(self.periodic), DLE, DRE)
         return (0, args)
 
-class ParticleIOHandlerRegionStrict(ParticleIOHandlerRegion):
-    _source_type = "region_strict"
-
-class ParticleIOHandlerPeriodicRegion(ParticleIOHandlerRegion):
-    periodic = True
-    _source_type = "periodic_region"
-
-class ParticleIOHandlerPeriodicRegionStrict(ParticleIOHandlerPeriodicRegion):
-    _source_type = "periodic_region_strict"
-
 class ParticleIOHandlerSphere(ParticleIOHandlerImplemented):
     _source_type = "sphere"
 

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/data_objects/tests/test_cutting_plane.py
--- a/yt/data_objects/tests/test_cutting_plane.py
+++ b/yt/data_objects/tests/test_cutting_plane.py
@@ -1,5 +1,6 @@
 from yt.testing import *
 import os
+import tempfile
 
 def setup():
     from yt.config import ytcfg
@@ -7,7 +8,10 @@
 
 def teardown_func(fns):
     for fn in fns:
-        os.remove(fn)
+        try:
+            os.remove(fn)
+        except OSError:
+            pass
 
 def test_cutting_plane():
     for nprocs in [8, 1]:
@@ -23,7 +27,9 @@
         yield assert_equal, cut["Ones"].min(), 1.0
         yield assert_equal, cut["Ones"].max(), 1.0
         pw = cut.to_pw()
-        fns += pw.save()
+        tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+        os.close(tmpfd)
+        fns += pw.save(name=tmpname)
         frb = cut.to_frb((1.0,'unitary'), 64)
         for cut_field in ['Ones', 'Density']:
             yield assert_equal, frb[cut_field].info['data_source'], \

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/data_objects/tests/test_image_array.py
--- a/yt/data_objects/tests/test_image_array.py
+++ b/yt/data_objects/tests/test_image_array.py
@@ -1,130 +1,94 @@
-from yt.testing import *
-from yt.data_objects.image_array import ImageArray
 import numpy as np
 import os
 import tempfile
 import shutil
+import unittest
+from yt.data_objects.image_array import ImageArray
+from yt.testing import \
+    assert_equal
+
 
 def setup():
     from yt.config import ytcfg
-    ytcfg["yt","__withintesting"] = "True"
-    np.seterr(all = 'ignore')
+    ytcfg["yt", "__withintesting"] = "True"
+    np.seterr(all='ignore')
+
+
+def dummy_image(kstep, nlayers):
+    im = np.zeros([64, 128, nlayers])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i, :, k] = np.linspace(0.0, kstep * k, im.shape[1])
+    return im
+
 
 def test_rgba_rescale():
-    im = np.zeros([64,128,4])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
-    im_arr = ImageArray(im)
+    im_arr = ImageArray(dummy_image(10.0, 4))
 
     new_im = im_arr.rescale(inline=False)
-    yield assert_equal, im_arr[:,:,:3].max(), 2*10.
-    yield assert_equal, im_arr[:,:,3].max(), 3*10.
-    yield assert_equal, new_im[:,:,:3].sum(axis=2).max(), 1.0 
-    yield assert_equal, new_im[:,:,3].max(), 1.0
+    yield assert_equal, im_arr[:, :, :3].max(), 2 * 10.
+    yield assert_equal, im_arr[:, :, 3].max(), 3 * 10.
+    yield assert_equal, new_im[:, :, :3].sum(axis=2).max(), 1.0
+    yield assert_equal, new_im[:, :, 3].max(), 1.0
 
     im_arr.rescale()
-    yield assert_equal, im_arr[:,:,:3].sum(axis=2).max(), 1.0
-    yield assert_equal, im_arr[:,:,3].max(), 1.0
+    yield assert_equal, im_arr[:, :, :3].sum(axis=2).max(), 1.0
+    yield assert_equal, im_arr[:, :, 3].max(), 1.0
 
-def test_image_array_hdf5():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
 
-    im = np.zeros([64,128,3])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+class TestImageArray(unittest.TestCase):
 
-    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        'width':0.245, 'units':'cm', 'type':'rendering'}
+    tmpdir = None
+    curdir = None
 
-    im_arr = ImageArray(im, info=myinfo)
-    im_arr.save('test_3d_ImageArray')
+    def setUp(self):
+        self.tmpdir = tempfile.mkdtemp()
+        self.curdir = os.getcwd()
+        os.chdir(self.tmpdir)
 
-    im = np.zeros([64,128])
-    for i in xrange(im.shape[0]):
-        im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+    def test_image_array_hdf5(self):
+        myinfo = {'field': 'dinosaurs', 'east_vector': np.array([1., 0., 0.]),
+                  'north_vector': np.array([0., 0., 1.]),
+                  'normal_vector': np.array([0., 1., 0.]),
+                  'width': 0.245, 'units': 'cm', 'type': 'rendering'}
 
-    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        'width':0.245, 'units':'cm', 'type':'rendering'}
+        im_arr = ImageArray(dummy_image(0.3, 3), info=myinfo)
+        im_arr.save('test_3d_ImageArray')
 
-    im_arr = ImageArray(im, info=myinfo)
-    im_arr.save('test_2d_ImageArray')
+        im = np.zeros([64, 128])
+        for i in xrange(im.shape[0]):
+            im[i, :] = np.linspace(0., 0.3 * 2, im.shape[1])
 
-    os.chdir(curdir)
-    # clean up
-    shutil.rmtree(tmpdir)
+        myinfo = {'field': 'dinosaurs', 'east_vector': np.array([1., 0., 0.]),
+                  'north_vector': np.array([0., 0., 1.]),
+                  'normal_vector': np.array([0., 1., 0.]),
+                  'width': 0.245, 'units': 'cm', 'type': 'rendering'}
 
-def test_image_array_rgb_png():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
+        im_arr = ImageArray(im, info=myinfo)
+        im_arr.save('test_2d_ImageArray')
 
-    im = np.zeros([64,128,3])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
+    def test_image_array_rgb_png(self):
+        im_arr = ImageArray(dummy_image(10.0, 3))
+        im_arr.write_png('standard.png')
 
-    im_arr = ImageArray(im)
-    im_arr.write_png('standard.png')
+    def test_image_array_rgba_png(self):
+        im_arr = ImageArray(dummy_image(10.0, 4))
+        im_arr.write_png('standard.png')
+        im_arr.write_png('non-scaled.png', rescale=False)
+        im_arr.write_png('black_bg.png', background='black')
+        im_arr.write_png('white_bg.png', background='white')
+        im_arr.write_png('green_bg.png', background=[0., 1., 0., 1.])
+        im_arr.write_png('transparent_bg.png', background=None)
 
-def test_image_array_rgba_png():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
+    def test_image_array_background(self):
+        im_arr = ImageArray(dummy_image(10.0, 4))
+        im_arr.rescale()
+        new_im = im_arr.add_background_color([1., 0., 0., 1.], inline=False)
+        new_im.write_png('red_bg.png')
+        im_arr.add_background_color('black')
+        im_arr.write_png('black_bg2.png')
 
-    im = np.zeros([64,128,4])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
-
-    im_arr = ImageArray(im)
-    im_arr.write_png('standard.png')
-    im_arr.write_png('non-scaled.png', rescale=False)
-    im_arr.write_png('black_bg.png', background='black')
-    im_arr.write_png('white_bg.png', background='white')
-    im_arr.write_png('green_bg.png', background=[0.,1.,0.,1.])
-    im_arr.write_png('transparent_bg.png', background=None)
-
-
-def test_image_array_background():
-    # Perform I/O in safe place instead of yt main dir
-    tmpdir = tempfile.mkdtemp()
-    curdir = os.getcwd()
-    os.chdir(tmpdir)
-
-    im = np.zeros([64,128,4])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
-
-    im_arr = ImageArray(im)
-    im_arr.rescale()
-    new_im = im_arr.add_background_color([1.,0.,0.,1.], inline=False)
-    new_im.write_png('red_bg.png')
-    im_arr.add_background_color('black')
-    im_arr.write_png('black_bg2.png')
- 
-    os.chdir(curdir)
-    # clean up
-    shutil.rmtree(tmpdir)
-
-
-
-
-
-
-
-
-
-
-
-
-
+    def tearDown(self):
+        os.chdir(self.curdir)
+        # clean up
+        shutil.rmtree(self.tmpdir)

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -1,5 +1,6 @@
 from yt.testing import *
 import os
+import tempfile
 
 def setup():
     from yt.config import ytcfg
@@ -7,7 +8,10 @@
 
 def teardown_func(fns):
     for fn in fns:
-        os.remove(fn)
+        try:
+            os.remove(fn)
+        except OSError:
+            pass
 
 def test_projection():
     for nprocs in [8, 1]:
@@ -37,7 +41,9 @@
                 yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
                 pw = proj.to_pw()
-                fns += pw.save()
+                tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+                os.close(tmpfd)
+                fns += pw.save(name=tmpname)
                 frb = proj.to_frb((1.0,'unitary'), 64)
                 for proj_field in ['Ones', 'Density']:
                     yield assert_equal, frb[proj_field].info['data_source'], \

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -27,6 +27,7 @@
 """
 import os
 import numpy as np
+import tempfile
 from nose.tools import raises
 from yt.testing import \
     fake_random_pf, assert_equal, assert_array_equal
@@ -42,7 +43,10 @@
 
 def teardown_func(fns):
     for fn in fns:
-        os.remove(fn)
+        try:
+            os.remove(fn)
+        except OSError:
+            pass
 
 
 def test_slice():
@@ -72,7 +76,9 @@
                 yield assert_equal, np.unique(slc["pdx"]), 0.5 / dims[xax]
                 yield assert_equal, np.unique(slc["pdy"]), 0.5 / dims[yax]
                 pw = slc.to_pw()
-                fns += pw.save()
+                tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+                os.close(tmpfd)
+                fns += pw.save(name=tmpname)
                 frb = slc.to_frb((1.0, 'unitary'), 64)
                 for slc_field in ['Ones', 'Density']:
                     yield assert_equal, frb[slc_field].info['data_source'], \

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -347,6 +347,7 @@
         # Figure out the starting and stopping times and redshift.
         self._calculate_simulation_bounds()
         # Get all possible datasets.
+        self.all_time_outputs = []
         self._get_all_outputs(find_outputs=find_outputs)
         
         self.print_key_parameters()

diff -r 21835af6a5ddb9721ca14d42954888eb5259c0f1 -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 yt/extern/__init__.py
--- /dev/null
+++ b/yt/extern/__init__.py
@@ -0,0 +1,4 @@
+"""
+This packages contains python packages that are bundled with yt
+and are developed by 3rd party upstream.
+"""

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/9833389da475/
Changeset:   9833389da475
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 21:14:15
Summary:     Adding preload_fields to _chunk_spatial definitions.
Affected #:  4 files

diff -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 -r 9833389da47525b9bbb506221743bb6793db9ed5 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -171,7 +171,7 @@
         # as well as the referring data source
         yield YTDataChunk(dobj, "all", oobjs, None)
 
-    def _chunk_spatial(self, dobj, ngz, sort = None):
+    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
         sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for i,og in enumerate(sobjs):
             if ngz > 0:

diff -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 -r 9833389da47525b9bbb506221743bb6793db9ed5 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -379,7 +379,7 @@
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         yield YTDataChunk(dobj, "all", oobjs, None)
 
-    def _chunk_spatial(self, dobj, ngz):
+    def _chunk_spatial(self, dobj, ngz, preload_fields = None):
         if ngz > 0:
             raise NotImplementedError
         sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)

diff -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 -r 9833389da47525b9bbb506221743bb6793db9ed5 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -344,7 +344,7 @@
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         yield YTDataChunk(dobj, "all", oobjs, None)
 
-    def _chunk_spatial(self, dobj, ngz, sort = None):
+    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
         sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for i,og in enumerate(sobjs):
             if ngz > 0:

diff -r b311ba8d5d5db65d2436e756f26e7c078a6e3a46 -r 9833389da47525b9bbb506221743bb6793db9ed5 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -158,7 +158,7 @@
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         yield YTDataChunk(dobj, "all", oobjs, None)
 
-    def _chunk_spatial(self, dobj, ngz, sort = None):
+    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
         sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         # We actually do not really use the data files except as input to the
         # ParticleOctreeSubset.


https://bitbucket.org/yt_analysis/yt/commits/f8c52554df0d/
Changeset:   f8c52554df0d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 22:10:59
Summary:     Fix for SPH datasets and a hotfix for ARTIO Cython speed.
Affected #:  3 files

diff -r f462e06e6a772cd5277184469803c52404d1ea12 -r f8c52554df0d446ee7a95319cd97d695bdb94243 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -718,6 +718,7 @@
         cdef np.ndarray[np.float32_t, ndim=2] source
         cdef np.ndarray[np.float64_t, ndim=1] dest
         cdef int n, status, i, di, num_oct_levels, nf, ngv, max_level
+        cdef int j, oct_ind, level
         cdef np.int64_t sfc
         cdef np.float64_t val
         cdef artio_fileset_handle *handle = self.artio_handle.handle

diff -r f462e06e6a772cd5277184469803c52404d1ea12 -r f8c52554df0d446ee7a95319cd97d695bdb94243 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -275,6 +275,7 @@
     def __init__(self, filename, data_style="OWLS", n_ref = 64,
                  over_refine_factor = 1):
         self.storage_filename = None
+        filename = os.path.abspath(filename)
         super(OWLSStaticOutput, self).__init__(
                                filename, data_style,
                                unit_base = None, n_ref = n_ref,

diff -r f462e06e6a772cd5277184469803c52404d1ea12 -r f8c52554df0d446ee7a95319cd97d695bdb94243 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -394,7 +394,7 @@
         return np.array([avg, mi, ma])
 
     def compare(self, new_result, old_result):
-        err_msg = "Field values for %s not equal." % self.field
+        err_msg = "Field values for %s not equal." % (self.field,)
         if self.decimals is None:
             assert_equal(new_result, old_result,
                          err_msg=err_msg, verbose=True)


https://bitbucket.org/yt_analysis/yt/commits/e2a577d25a69/
Changeset:   e2a577d25a69
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-09 22:12:24
Summary:     Merging a few updates for patch IO.
Affected #:  4 files

diff -r 9833389da47525b9bbb506221743bb6793db9ed5 -r e2a577d25a69e15fe713eed31626052448a2ca7a yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -718,6 +718,7 @@
         cdef np.ndarray[np.float32_t, ndim=2] source
         cdef np.ndarray[np.float64_t, ndim=1] dest
         cdef int n, status, i, di, num_oct_levels, nf, ngv, max_level
+        cdef int j, oct_ind, level
         cdef np.int64_t sfc
         cdef np.float64_t val
         cdef artio_fileset_handle *handle = self.artio_handle.handle

diff -r 9833389da47525b9bbb506221743bb6793db9ed5 -r e2a577d25a69e15fe713eed31626052448a2ca7a yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -275,6 +275,7 @@
     def __init__(self, filename, data_style="OWLS", n_ref = 64,
                  over_refine_factor = 1):
         self.storage_filename = None
+        filename = os.path.abspath(filename)
         super(OWLSStaticOutput, self).__init__(
                                filename, data_style,
                                unit_base = None, n_ref = n_ref,

diff -r 9833389da47525b9bbb506221743bb6793db9ed5 -r e2a577d25a69e15fe713eed31626052448a2ca7a yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -394,7 +394,7 @@
         return np.array([avg, mi, ma])
 
     def compare(self, new_result, old_result):
-        err_msg = "Field values for %s not equal." % self.field
+        err_msg = "Field values for %s not equal." % (self.field,)
         if self.decimals is None:
             assert_equal(new_result, old_result,
                          err_msg=err_msg, verbose=True)

diff -r 9833389da47525b9bbb506221743bb6793db9ed5 -r e2a577d25a69e15fe713eed31626052448a2ca7a yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1112,16 +1112,18 @@
         type = self._plot_type
         if type in ['Projection','OffAxisProjection']:
             weight = self.data_source.weight_field
+            if weight is not None:
+                weight = weight.replace(' ', '_')
         if 'Cutting' in self.data_source.__class__.__name__:
             type = 'OffAxisSlice'
         for k, v in self.plots.iteritems():
             if isinstance(k, types.TupleType):
                 k = k[1]
             if axis:
-                n = "%s_%s_%s_%s" % (name, type, axis, k)
+                n = "%s_%s_%s_%s" % (name, type, axis, k.replace(' ', '_'))
             else:
                 # for cutting planes
-                n = "%s_%s_%s" % (name, type, k)
+                n = "%s_%s_%s" % (name, type, k.replace(' ', '_'))
             if weight:
                 if isinstance(weight, tuple):
                     weight = weight[1]
@@ -1848,8 +1850,17 @@
 class WindowPlotMPL(ImagePlotMPL):
     def __init__(
             self, data, cbname, cmap, extent, aspect, zlim, size, fontsize,
-                figure, axes, cax):
-        fsize, axrect, caxrect = self._get_best_layout(size, fontsize)
+            figure, axes, cax):
+        self._draw_colorbar = True
+        self._draw_axes = True
+        self._cache_layout(size, fontsize)
+
+        # Make room for a colorbar
+        self.input_size = size
+        self.fsize = [size[0] + self._cbar_inches[self._draw_colorbar], size[1]]
+
+        # Compute layout
+        axrect, caxrect = self._get_best_layout(fontsize)
         if np.any(np.array(axrect) < 0):
             msg = 'The axis ratio of the requested plot is very narrow. ' \
                   'There is a good chance the plot will not look very good, ' \
@@ -1859,7 +1870,7 @@
             axrect  = (0.07, 0.10, 0.80, 0.80)
             caxrect = (0.87, 0.10, 0.04, 0.80)
         ImagePlotMPL.__init__(
-            self, fsize, axrect, caxrect, zlim, figure, axes, cax)
+            self, self.fsize, axrect, caxrect, zlim, figure, axes, cax)
         self._init_image(data, cbname, cmap, extent, aspect)
         self.image.axes.ticklabel_format(scilimits=(-2,3))
         if cbname == 'linear':
@@ -1867,31 +1878,74 @@
             self.cb.formatter.set_powerlimits((-2,3))
             self.cb.update_ticks()
 
-    def _get_best_layout(self, size, fontsize=18):
-        aspect = 1.0*size[0]/size[1]
-        fontscale = fontsize / 18.0
+    def _toggle_axes(self, choice):
+        self._draw_axes = choice
+        self.axes.get_xaxis().set_visible(choice)
+        self.axes.get_yaxis().set_visible(choice)
+        axrect, caxrect = self._get_best_layout()
+        self.axes.set_position(axrect)
+        self.cax.set_position(caxrect)
 
-        # add room for a colorbar
-        cbar_inches = fontscale*0.7
-        newsize = [size[0] + cbar_inches, size[1]]
+    def _toggle_colorbar(self, choice):
+        self._draw_colorbar = choice
+        self.cax.set_visible(choice)
+        self.fsize = [self.input_size[0] + self._cbar_inches[choice], self.input_size[1]]
+        axrect, caxrect = self._get_best_layout()
+        self.axes.set_position(axrect)
+        self.cax.set_position(caxrect)
+
+    def hide_axes(self):
+        self._toggle_axes(False)
+        return self
+
+    def show_axes(self):
+        self._toggle_axes(True)
+        return self
+
+    def hide_colorbar(self):
+        self._toggle_colorbar(False)
+        return self
+
+    def show_colorbar(self):
+        self._toggle_colorbar(True)
+        return self
+
+    def _cache_layout(self, size, fontsize):
+        self._cbar_inches = {}
+        self._text_buffx = {}
+        self._text_bottomy = {}
+        self._text_topy = {}
+
+        self._aspect = 1.0*size[0]/size[1]
+        self._fontscale = fontsize / 18.0
+
+        # Leave room for a colorbar, if we are drawing it.
+        self._cbar_inches[True] = self._fontscale*0.7
+        self._cbar_inches[False] = 0
 
         # add buffers for text, and a bit of whitespace on top
-        text_buffx = fontscale * 1.0/(newsize[0])
-        text_bottomy = fontscale * 0.7/size[1]
-        text_topy = fontscale * 0.3/size[1]
+        self._text_buffx[True] = self._fontscale * 1.0/(size[0] + self._cbar_inches[True])
+        self._text_bottomy[True] = self._fontscale * 0.7/size[1]
+        self._text_topy[True] = self._fontscale * 0.3/size[1]
 
+        # No buffer for text if we're not drawing axes
+        self._text_buffx[False] = 0
+        self._text_bottomy[False] = 0
+        self._text_topy[False] = 0
+
+    def _get_best_layout(self, fontsize=18):
         # calculate how much room the colorbar takes
-        cbar_frac = cbar_inches/newsize[0]
+        cbar_frac = self._cbar_inches[self._draw_colorbar]/self.fsize[0]
 
         # Calculate y fraction, then use to make x fraction.
-        yfrac = 1.0-text_bottomy-text_topy
-        ysize = yfrac*size[1]
-        xsize = aspect*ysize
-        xfrac = xsize/newsize[0]
+        yfrac = 1.0-self._text_bottomy[self._draw_axes]-self._text_topy[self._draw_axes]
+        ysize = yfrac*self.fsize[1]
+        xsize = self._aspect*ysize
+        xfrac = xsize/self.fsize[0]
 
         # Now make sure it all fits!
-        xbig = xfrac + text_buffx + 2.0*cbar_frac
-        ybig = yfrac + text_bottomy + text_topy
+        xbig = xfrac + self._text_buffx[self._draw_axes] + 2.0*cbar_frac
+        ybig = yfrac + self._text_bottomy[self._draw_axes] + self._text_topy[self._draw_axes]
 
         if xbig > 1:
             xsize /= xbig
@@ -1899,9 +1953,20 @@
         if ybig > 1:
             xsize /= ybig
             ysize /= ybig
-        xfrac = xsize/newsize[0]
-        yfrac = ysize/newsize[1]
+        xfrac = xsize/self.fsize[0]
+        yfrac = ysize/self.fsize[1]
 
-        axrect = (text_buffx, text_bottomy, xfrac, yfrac )
-        caxrect = (text_buffx+xfrac, text_bottomy, cbar_frac/4., yfrac )
-        return newsize, axrect, caxrect
+        axrect = (
+            self._text_buffx[self._draw_axes],
+            self._text_bottomy[self._draw_axes],
+            xfrac,
+            yfrac
+        )
+
+        caxrect = (
+            self._text_buffx[self._draw_axes]+xfrac,
+            self._text_bottomy[self._draw_axes],
+            cbar_frac/4.,
+            yfrac
+        )
+        return axrect, caxrect


https://bitbucket.org/yt_analysis/yt/commits/8f5ac0b004f6/
Changeset:   8f5ac0b004f6
Branch:      yt-3.0
User:        xarthisius
Date:        2013-09-10 20:00:18
Summary:     [sph] Open parameter_file readonly
Affected #:  1 file

diff -r e2a577d25a69e15fe713eed31626052448a2ca7a -r 8f5ac0b004f63eddf3aad08b49643150444ad5d3 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -285,7 +285,7 @@
         return os.path.basename(self.parameter_filename).split(".")[0]
 
     def _parse_parameter_file(self):
-        handle = h5py.File(self.parameter_filename)
+        handle = h5py.File(self.parameter_filename, mode="r")
         hvals = {}
         hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
 


https://bitbucket.org/yt_analysis/yt/commits/aa55ea602e2f/
Changeset:   aa55ea602e2f
Branch:      yt-3.0
User:        xarthisius
Date:        2013-09-10 20:08:52
Summary:     [sph] use only explicit imports, pep8 and pyflakes compliance
Affected #:  1 file

diff -r 8f5ac0b004f63eddf3aad08b49643150444ad5d3 -r aa55ea602e2fd104ca5c5d95f554694e07f9e738 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -29,24 +29,18 @@
 import weakref
 import struct
 import glob
-from itertools import izip
+import os
 
 from yt.utilities.fortran_utils import read_record
-from yt.funcs import *
+from yt.utilities.logger import ytLogger as mylog
 from yt.geometry.particle_geometry_handler import \
     ParticleGeometryHandler
-from yt.geometry.geometry_handler import \
-    GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
     StaticOutput
-from yt.data_objects.octree_subset import \
-    OctreeSubset
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 from yt.utilities.physical_constants import \
     G, \
-    gravitational_constant_cgs, \
-    km_per_pc, \
     cm_per_kpc, \
     mass_sun_cgs
 from yt.utilities.cosmology import Cosmology
@@ -58,8 +52,6 @@
     TipsyFieldInfo, \
     KnownTipsyFields
 
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, NullFunc
 
 class ParticleFile(object):
     def __init__(self, pf, io, filename, file_id):
@@ -78,6 +70,7 @@
     def _calculate_offsets(self, fields):
         pass
 
+
 class GadgetBinaryFile(ParticleFile):
     def __init__(self, pf, io, filename, file_id):
         with open(filename, "rb") as f:
@@ -86,13 +79,13 @@
             f.seek(0, os.SEEK_END)
             self._file_size = f.tell()
 
-        super(GadgetBinaryFile, self).__init__(pf, io,
-                filename, file_id)
+        super(GadgetBinaryFile, self).__init__(pf, io, filename, file_id)
 
     def _calculate_offsets(self, field_list):
         self.field_offsets = self.io._calculate_field_offsets(
-                field_list, self.total_particles,
-                self._position_offset, self._file_size)
+            field_list, self.total_particles,
+            self._position_offset, self._file_size)
+
 
 class ParticleStaticOutput(StaticOutput):
     _unit_base = None
@@ -105,7 +98,7 @@
         self.units['1'] = 1.0
         DW = self.domain_right_edge - self.domain_left_edge
         self.units["unitary"] = 1.0 / DW.max()
-        # Check 
+        # Check
         base = None
         mpch = {}
         mpch.update(mpc_conversion)
@@ -114,20 +107,22 @@
             for unit in mpc_conversion:
                 mpch['%sh' % unit] = mpch[unit] * self.hubble_constant
                 mpch['%shcm' % unit] = (mpch["%sh" % unit] *
-                                (1 + self.current_redshift))
+                                        (1 + self.current_redshift))
                 mpch['%scm' % unit] = mpch[unit] * (1 + self.current_redshift)
         # ud == unit destination
         # ur == unit registry
         for ud, ur in [(self.units, mpch), (self.time_units, sec_conversion)]:
             for unit in sorted(unit_base):
                 if unit in ur:
-                    ratio = (ur[unit] / ur['mpc'] )
+                    ratio = (ur[unit] / ur['mpc'])
                     base = unit_base[unit] * ratio
                     break
-            if base is None: continue
+            if base is None:
+                continue
             for unit in ur:
                 ud[unit] = ur[unit] / base
 
+
 class GadgetStaticOutput(ParticleStaticOutput):
     _hierarchy_class = ParticleGeometryHandler
     _file_class = GadgetBinaryFile
@@ -151,12 +146,12 @@
                     ('FlagAge', 1, 'i'),
                     ('FlagMEtals', 1, 'i'),
                     ('NallHW', 6, 'i'),
-                    ('unused', 16, 'i') )
+                    ('unused', 16, 'i'))
 
     def __init__(self, filename, data_style="gadget_binary",
-                 additional_fields = (),
-                 unit_base = None, n_ref = 64,
-                 over_refine_factor = 1):
+                 additional_fields=(),
+                 unit_base=None, n_ref=64,
+                 over_refine_factor=1):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         self.storage_filename = None
@@ -180,7 +175,7 @@
         for i in hvals:
             if len(hvals[i]) == 1:
                 hvals[i] = hvals[i][0]
-        
+
         self.dimensionality = 3
         self.refine_by = 2
         self.parameters["HydroMethod"] = "sph"
@@ -188,7 +183,6 @@
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
         # Set standard values
 
-
         self.domain_left_edge = np.zeros(3, "float64")
         self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
         nz = 1 << self.over_refine_factor
@@ -209,7 +203,7 @@
         # somehow, but opinions on this vary.
         if self.omega_lambda == 0.0:
             mylog.info("Omega Lambda is 0.0, so we are turning off Cosmology.")
-            self.hubble_constant = 1.0 # So that scaling comes out correct
+            self.hubble_constant = 1.0  # So that scaling comes out correct
             self.cosmological_simulation = 0
             self.current_redshift = 0.0
             # This may not be correct.
@@ -220,14 +214,14 @@
             # factor, not the actual integration time, so we re-calculate
             # global time from our Cosmology.
             cosmo = Cosmology(self.hubble_constant * 100.0,
-                        self.omega_matter, self.omega_lambda)
+                              self.omega_matter, self.omega_lambda)
             self.current_time = cosmo.UniverseAge(self.current_redshift)
             mylog.info("Calculating time from %0.3e to be %0.3e seconds",
                        hvals["Time"], self.current_time)
         self.parameters = hvals
 
         prefix = self.parameter_filename.split(".", 1)[0]
-        suffix = self.parameter_filename.rsplit(".", 1)[-1]
+        # suffix = self.parameter_filename.rsplit(".", 1)[-1]
 
         if hvals["NumFiles"] > 1:
             self.filename_template = "%s.%%(num)s" % (prefix)
@@ -243,14 +237,15 @@
         length_unit = self.units['cm']
         unit_base = self._unit_base or {}
         velocity_unit = unit_base.get("velocity", 1e5)
-        velocity_unit = unit_base.get("UnitVelocity_in_cm_per_s", velocity_unit)
+        velocity_unit = unit_base.get("UnitVelocity_in_cm_per_s",
+                                      velocity_unit)
         # We set hubble_constant = 1.0 for non-cosmology
         msun10 = mass_sun_cgs * 1e10 / self.hubble_constant
         mass_unit = unit_base.get("g", msun10)
         mass_unit = unit_base.get("UnitMass_in_g", mass_unit)
         self.conversion_factors["velocity"] = velocity_unit
         self.conversion_factors["mass"] = mass_unit
-        self.conversion_factors["density"] = mass_unit / length_unit**3
+        self.conversion_factors["density"] = mass_unit / length_unit ** 3
         # Currently, setting time_units is disabled.  The current_time is
         # accurately set, but until a time that we can confirm how
         # FormationTime for stars is set I am disabling these.
@@ -263,23 +258,23 @@
         # We do not allow load() of these files.
         return False
 
+
 class OWLSStaticOutput(GadgetStaticOutput):
     _hierarchy_class = ParticleGeometryHandler
     _file_class = ParticleFile
-    _fieldinfo_fallback = OWLSFieldInfo # For now we have separate from Gadget
+    _fieldinfo_fallback = OWLSFieldInfo  # For now we have separate from Gadget
     _fieldinfo_known = KnownOWLSFields
     _particle_mass_name = "Mass"
     _particle_coordinates_name = "Coordinates"
-    _header_spec = None # Override so that there's no confusion
+    _header_spec = None  # Override so that there's no confusion
 
-    def __init__(self, filename, data_style="OWLS", n_ref = 64,
-                 over_refine_factor = 1):
+    def __init__(self, filename, data_style="OWLS", n_ref=64,
+                 over_refine_factor=1):
         self.storage_filename = None
         filename = os.path.abspath(filename)
         super(OWLSStaticOutput, self).__init__(
-                               filename, data_style,
-                               unit_base = None, n_ref = n_ref,
-                               over_refine_factor = over_refine_factor)
+            filename, data_style, unit_base=None, n_ref=n_ref,
+            over_refine_factor=over_refine_factor)
 
     def __repr__(self):
         return os.path.basename(self.parameter_filename).split(".")[0]
@@ -316,7 +311,8 @@
 
         # To avoid having to open files twice
         self._unit_base = {}
-        self._unit_base.update((str(k), v) for k, v in handle["/Units"].attrs.items())
+        self._unit_base.update(
+            (str(k), v) for k, v in handle["/Units"].attrs.items())
         # Comoving cm is given in the Units
         self._unit_base['cmcm'] = 1.0 / self._unit_base["UnitLength_in_cm"]
 
@@ -325,7 +321,7 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:
-            fileh = h5py.File(args[0],'r')
+            fileh = h5py.File(args[0], mode='r')
             if "Constants" in fileh["/"].keys() and \
                "Header" in fileh["/"].keys():
                 fileh.close()
@@ -335,6 +331,7 @@
             pass
         return False
 
+
 class TipsyFile(ParticleFile):
 
     def _calculate_offsets(self, field_list):
@@ -344,8 +341,7 @@
         # To go above 1 domain, we need to include an indexing step in the
         # IOHandler, rather than simply reading from a single file.
         assert file_id == 0
-        super(TipsyFile, self).__init__(pf, io,
-                filename, file_id)
+        super(TipsyFile, self).__init__(pf, io, filename, file_id)
         io._create_dtypes(self)
 
 
@@ -365,14 +361,14 @@
                     ('dummy',   'i'))
 
     def __init__(self, filename, data_style="tipsy",
-                 endian = ">",
-                 field_dtypes = None,
-                 domain_left_edge = None,
-                 domain_right_edge = None,
-                 unit_base = None,
-                 cosmology_parameters = None,
-                 parameter_file = None,
-                 n_ref = 64, over_refine_factor = 1):
+                 endian=">",
+                 field_dtypes=None,
+                 domain_left_edge=None,
+                 domain_right_edge=None,
+                 unit_base=None,
+                 cosmology_parameters=None,
+                 parameter_file=None,
+                 n_ref=64, over_refine_factor=1):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         self.endian = endian
@@ -387,7 +383,8 @@
 
         # My understanding is that dtypes are set on a field by field basis,
         # not on a (particle type, field) basis
-        if field_dtypes is None: field_dtypes = {}
+        if field_dtypes is None:
+            field_dtypes = {}
         self._field_dtypes = field_dtypes
 
         self._unit_base = unit_base or {}
@@ -404,7 +401,7 @@
         # the snapshot time and particle counts.
 
         f = open(self.parameter_filename, "rb")
-        hh = self.endian + "".join(["%s" % (b) for a,b in self._header_spec])
+        hh = self.endian + "".join(["%s" % (b) for a, b in self._header_spec])
         hvals = dict([(a, c) for (a, b), c in zip(self._header_spec,
                      struct.unpack(hh, f.read(struct.calcsize(hh))))])
         self.parameters.update(hvals)
@@ -437,7 +434,7 @@
                 if l.startswith('#') or l == '':
                     continue
                 # parse parameters according to tipsy parameter type
-                param, val = (i.strip() for i in line.split('=',1))
+                param, val = (i.strip() for i in line.split('=', 1))
                 if param.startswith('n') or param.startswith('i'):
                     val = long(val)
                 elif param.startswith('d'):
@@ -457,10 +454,10 @@
         if self.parameters.get('bComove', True):
             self.cosmological_simulation = 1
             cosm = self._cosmology_parameters or {}
-            dcosm = dict(current_redshift = 0.0,
-                         omega_lambda = 0.0,
-                         omega_matter = 0.0,
-                         hubble_constant = 1.0)
+            dcosm = dict(current_redshift=0.0,
+                         omega_lambda=0.0,
+                         omega_matter=0.0,
+                         hubble_constant=1.0)
             for param in ['current_redshift', 'omega_lambda',
                           'omega_matter', 'hubble_constant']:
                 pval = cosm.get(param, dcosm[param])
@@ -481,14 +478,14 @@
             DW = (self.domain_right_edge - self.domain_left_edge).max()
             cosmo = Cosmology(self.hubble_constant * 100.0,
                               self.omega_matter, self.omega_lambda)
-            length_unit = DW * self.units['cm'] # Get it in proper cm
+            length_unit = DW * self.units['cm']  # Get it in proper cm
             density_unit = cosmo.CriticalDensity(self.current_redshift)
-            mass_unit = density_unit * length_unit**3
+            mass_unit = density_unit * length_unit ** 3
         else:
             mass_unit = self.parameters.get('dMsolUnit', 1.0) * mass_sun_cgs
             length_unit = self.parameters.get('dKpcUnit', 1.0) * cm_per_kpc
-            density_unit = mass_unit / length_unit**3
-        time_unit = 1.0 / np.sqrt(G*density_unit)
+            density_unit = mass_unit / length_unit ** 3
+        time_unit = 1.0 / np.sqrt(G * density_unit)
         velocity_unit = length_unit / time_unit
         self.conversion_factors["velocity"] = velocity_unit
         self.conversion_factors["mass"] = mass_unit


https://bitbucket.org/yt_analysis/yt/commits/5e799e71a2ec/
Changeset:   5e799e71a2ec
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-12 19:33:00
Summary:     Merging from latest yt-2.X tip
Affected #:  2 files

diff -r aa55ea602e2fd104ca5c5d95f554694e07f9e738 -r 5e799e71a2ec07d37e4a02d7e0e8c02e1da71065 yt/analysis_modules/coordinate_transformation/transforms.py
--- a/yt/analysis_modules/coordinate_transformation/transforms.py
+++ b/yt/analysis_modules/coordinate_transformation/transforms.py
@@ -65,7 +65,7 @@
     new_grid['handled'] = np.zeros(new_grid['x'].shape, dtype='bool')
     for field in fields:
         new_grid[field] = np.zeros(new_grid['x'].shape, dtype='float64')
-    grid_order = np.argsort(data_source.gridLevels)
+    grid_order = np.argsort(data_source.grid_levels[:,0])
     ng = len(data_source._grids)
 
     for i,grid in enumerate(data_source._grids[grid_order][::-1]):

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list