[yt-svn] commit/yt-3.0: 69 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Fri May 24 12:04:34 PDT 2013


69 new commits in yt-3.0:

https://bitbucket.org/yt_analysis/yt-3.0/commits/c5eb6de911ff/
Changeset:   c5eb6de911ff
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-17 22:58:09
Summary:     Adding OctreeSubset and make ART (not ARTIO) and RAMSES subclass this.

This is Phase 1.  Phase 2 will be making OctreeSubset similar to GridPatch in
that it can return __getitem__ and so on.  Phase 3 will be porting this
behavior to ARTIO and the SPH codes.
Affected #:  4 files

diff -r d48a016b4b8ce1c8326e23c308fd90789d0b4ec0 -r c5eb6de911ff807044c607a0a01d75f265beb007 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -31,6 +31,9 @@
 from grid_patch import \
     AMRGridPatch
 
+from octree_subset import \
+    OctreeSubset
+
 from static_output import \
     StaticOutput
 

diff -r d48a016b4b8ce1c8326e23c308fd90789d0b4ec0 -r c5eb6de911ff807044c607a0a01d75f265beb007 yt/data_objects/octree_subset.py
--- /dev/null
+++ b/yt/data_objects/octree_subset.py
@@ -0,0 +1,65 @@
+"""
+Subsets of octrees
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+class OctreeSubset(object):
+    def __init__(self, domain, mask, cell_count):
+        self.mask = mask
+        self.domain = domain
+        self.oct_handler = domain.pf.h.oct_handler
+        self.cell_count = cell_count
+        level_counts = self.oct_handler.count_levels(
+            self.domain.pf.max_level, self.domain.domain_id, mask)
+        assert(level_counts.sum() == cell_count)
+        level_counts[1:] = level_counts[:-1]
+        level_counts[0] = 0
+        self.level_counts = np.add.accumulate(level_counts)
+
+    def select_icoords(self, dobj):
+        return self.oct_handler.icoords(self.domain.domain_id, self.mask,
+                                        self.cell_count,
+                                        self.level_counts.copy())
+
+    def select_fcoords(self, dobj):
+        return self.oct_handler.fcoords(self.domain.domain_id, self.mask,
+                                        self.cell_count,
+                                        self.level_counts.copy())
+
+    def select_fwidth(self, dobj):
+        # Recall domain_dimensions is the number of cells, not octs
+        base_dx = (self.domain.pf.domain_width /
+                   self.domain.pf.domain_dimensions)
+        widths = np.empty((self.cell_count, 3), dtype="float64")
+        dds = (2**self.select_ires(dobj))
+        for i in range(3):
+            widths[:,i] = base_dx[i] / dds
+        return widths
+
+    def select_ires(self, dobj):
+        return self.oct_handler.ires(self.domain.domain_id, self.mask,
+                                     self.cell_count,
+                                     self.level_counts.copy())
+

diff -r d48a016b4b8ce1c8326e23c308fd90789d0b4ec0 -r c5eb6de911ff807044c607a0a01d75f265beb007 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -40,6 +40,8 @@
     GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
     StaticOutput
+from yt.data_objects.octree_subset import \
+    OctreeSubset
 from yt.geometry.oct_container import \
     ARTOctreeContainer
 from yt.data_objects.field_info_container import \
@@ -434,7 +436,7 @@
         return False
 
 
-class ARTDomainSubset(object):
+class ARTDomainSubset(OctreeSubset):
     def __init__(self, domain, mask, cell_count, domain_level):
         self.mask = mask
         self.domain = domain

diff -r d48a016b4b8ce1c8326e23c308fd90789d0b4ec0 -r c5eb6de911ff807044c607a0a01d75f265beb007 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -35,6 +35,8 @@
     GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
     StaticOutput
+from yt.data_objects.octree_subset import \
+    OctreeSubset
 
 from .definitions import ramses_header
 from yt.utilities.definitions import \
@@ -252,43 +254,7 @@
         self.select(selector)
         return self.count(selector)
 
-class RAMSESDomainSubset(object):
-    def __init__(self, domain, mask, cell_count):
-        self.mask = mask
-        self.domain = domain
-        self.oct_handler = domain.pf.h.oct_handler
-        self.cell_count = cell_count
-        level_counts = self.oct_handler.count_levels(
-            self.domain.pf.max_level, self.domain.domain_id, mask)
-        assert(level_counts.sum() == cell_count)
-        level_counts[1:] = level_counts[:-1]
-        level_counts[0] = 0
-        self.level_counts = np.add.accumulate(level_counts)
-
-    def select_icoords(self, dobj):
-        return self.oct_handler.icoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_fcoords(self, dobj):
-        return self.oct_handler.fcoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_fwidth(self, dobj):
-        # Recall domain_dimensions is the number of cells, not octs
-        base_dx = (self.domain.pf.domain_width /
-                   self.domain.pf.domain_dimensions)
-        widths = np.empty((self.cell_count, 3), dtype="float64")
-        dds = (2**self.select_ires(dobj))
-        for i in range(3):
-            widths[:,i] = base_dx[i] / dds
-        return widths
-
-    def select_ires(self, dobj):
-        return self.oct_handler.ires(self.domain.domain_id, self.mask,
-                                     self.cell_count,
-                                     self.level_counts.copy())
+class RAMSESDomainSubset(OctreeSubset):
 
     def fill(self, content, fields):
         # Here we get a copy of the file, which we skip through and read the


https://bitbucket.org/yt_analysis/yt-3.0/commits/57c67f583c86/
Changeset:   57c67f583c86
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-17 23:50:07
Summary:     Begin the process of subclassing OctreeSubset as YTSelectionContainer
Affected #:  4 files

diff -r c5eb6de911ff807044c607a0a01d75f265beb007 -r 57c67f583c863a4f5bdc1f02185a22b130f5d859 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -25,10 +25,34 @@
 
 import numpy as np
 
-class OctreeSubset(object):
+from yt.data_objects.data_containers import \
+    YTFieldData, \
+    YTDataContainer, \
+    YTSelectionContainer
+from .field_info_container import \
+    NeedsGridType, \
+    NeedsOriginalGrid, \
+    NeedsDataField, \
+    NeedsProperty, \
+    NeedsParameter
+
+class OctreeSubset(YTSelectionContainer):
+    _spatial = True
+    _num_ghost_zones = 0
+    _num_zones = 2
+    _type_name = 'octree_subset'
+    _skip_add = True
+    _con_args = ('domain', 'mask', 'cell_count')
+    _container_fields = ("dx", "dy", "dz")
+
     def __init__(self, domain, mask, cell_count):
+        self.field_data = YTFieldData()
+        self.field_parameters = {}
         self.mask = mask
+        self.n_oct = mask.shape[0]
         self.domain = domain
+        self.pf = domain.pf
+        self.hierarchy = self.pf.hierarchy
         self.oct_handler = domain.pf.h.oct_handler
         self.cell_count = cell_count
         level_counts = self.oct_handler.count_levels(
@@ -37,6 +61,8 @@
         level_counts[1:] = level_counts[:-1]
         level_counts[0] = 0
         self.level_counts = np.add.accumulate(level_counts)
+        self._current_particle_type = 'all'
+        self._current_fluid_type = self.pf.default_fluid_type
 
     def select_icoords(self, dobj):
         return self.oct_handler.icoords(self.domain.domain_id, self.mask,
@@ -63,3 +89,18 @@
                                      self.cell_count,
                                      self.level_counts.copy())
 
+    def __getitem__(self, key):
+        tr = super(OctreeSubset, self).__getitem__(key)
+        import pdb; pdb.set_trace()
+        try:
+            fields = self._determine_fields(key)
+        except YTFieldTypeNotFound:
+            return tr
+        finfo = self.pf._get_field_info(*fields[0])
+        if not finfo.particle_type:
+            nz = self._num_zones + 2*self._num_ghost_zones
+            dest_shape = (nz, nz, nz, self.n_oct)
+            return tr.reshape(dest_shape)
+        return tr
+
+

diff -r c5eb6de911ff807044c607a0a01d75f265beb007 -r 57c67f583c863a4f5bdc1f02185a22b130f5d859 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -435,43 +435,10 @@
                 return False
         return False
 
-
 class ARTDomainSubset(OctreeSubset):
     def __init__(self, domain, mask, cell_count, domain_level):
-        self.mask = mask
-        self.domain = domain
-        self.oct_handler = domain.pf.h.oct_handler
-        self.cell_count = cell_count
+        super(ARTDomainSubset, self).__init__(domain, mask, cell_count)
         self.domain_level = domain_level
-        level_counts = self.oct_handler.count_levels(
-            self.domain.pf.max_level, self.domain.domain_id, mask)
-        assert(level_counts.sum() == cell_count)
-        level_counts[1:] = level_counts[:-1]
-        level_counts[0] = 0
-        self.level_counts = np.add.accumulate(level_counts)
-
-    def select_icoords(self, dobj):
-        return self.oct_handler.icoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_fcoords(self, dobj):
-        return self.oct_handler.fcoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_ires(self, dobj):
-        return self.oct_handler.ires(self.domain.domain_id, self.mask,
-                                     self.cell_count,
-                                     self.level_counts.copy())
-
-    def select_fwidth(self, dobj):
-        base_dx = 1.0/self.domain.pf.domain_dimensions
-        widths = np.empty((self.cell_count, 3), dtype="float64")
-        dds = (2**self.select_ires(dobj))
-        for i in range(3):
-            widths[:, i] = base_dx[i] / dds
-        return widths
 
     def fill_root(self, content, ftfields):
         """

diff -r c5eb6de911ff807044c607a0a01d75f265beb007 -r 57c67f583c863a4f5bdc1f02185a22b130f5d859 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -355,8 +355,16 @@
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         yield YTDataChunk(dobj, "all", oobjs, dobj.size)
 
-    def _chunk_spatial(self, dobj, ngz):
-        raise NotImplementedError
+    def _chunk_spatial(self, dobj, ngz, sort = None):
+        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            size = og.cell_count
+            if size == 0: continue
+            yield YTDataChunk(dobj, "spatial", [g], size)
 
     def _chunk_io(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)

diff -r c5eb6de911ff807044c607a0a01d75f265beb007 -r 57c67f583c863a4f5bdc1f02185a22b130f5d859 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1098,3 +1098,44 @@
 
 grid_selector = GridSelector
 
+cdef class OctreeSubsetSelector(SelectorObject):
+    # This is a numpy array, which will be a bool of ndim 1
+    cdef object oct_mask
+
+    def __init__(self, dobj):
+        self.oct_mask = dobj.mask
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def select_octs(self, OctreeContainer octree):
+        return self.oct_mask
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void set_bounds(self,
+                         np.float64_t left_edge[3], np.float64_t right_edge[3],
+                         np.float64_t dds[3], int ind[3][2], int *check):
+        check[0] = 0
+        return
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def select_grids(self,
+                     np.ndarray[np.float64_t, ndim=2] left_edges,
+                     np.ndarray[np.float64_t, ndim=2] right_edges,
+                     np.ndarray[np.int32_t, ndim=2] levels):
+        raise RuntimeError
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3],
+                         int eterm[3]) nogil:
+        return 1
+
+
+octree_subset_selector = OctreeSubsetSelector
+


https://bitbucket.org/yt_analysis/yt-3.0/commits/1babf3636103/
Changeset:   1babf3636103
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-18 12:47:07
Summary:     Add an Octree selector that enables spatial chunking for Octrees.
Affected #:  3 files

diff -r 57c67f583c863a4f5bdc1f02185a22b130f5d859 -r 1babf36361036e3b6d4658818aee47c0a1877a7a yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -49,7 +49,6 @@
         self.field_data = YTFieldData()
         self.field_parameters = {}
         self.mask = mask
-        self.n_oct = mask.shape[0]
         self.domain = domain
         self.pf = domain.pf
         self.hierarchy = self.pf.hierarchy
@@ -91,7 +90,6 @@
 
     def __getitem__(self, key):
         tr = super(OctreeSubset, self).__getitem__(key)
-        import pdb; pdb.set_trace()
         try:
             fields = self._determine_fields(key)
         except YTFieldTypeNotFound:
@@ -99,7 +97,8 @@
         finfo = self.pf._get_field_info(*fields[0])
         if not finfo.particle_type:
             nz = self._num_zones + 2*self._num_ghost_zones
-            dest_shape = (nz, nz, nz, self.n_oct)
+            n_oct = tr.shape[0] / (nz**3.0)
+            dest_shape = (nz, nz, nz, n_oct)
             return tr.reshape(dest_shape)
         return tr
 

diff -r 57c67f583c863a4f5bdc1f02185a22b130f5d859 -r 1babf36361036e3b6d4658818aee47c0a1877a7a yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -666,6 +666,20 @@
                 count[cur.my_octs[i - cur.offset].domain - 1] += 1
         return count
 
+    def domain_and(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                   int domain_id):
+        cdef np.int64_t i, oi, n, 
+        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
+        cdef Oct *o
+        cdef np.ndarray[np.uint8_t, ndim=2] m2 = \
+                np.zeros((mask.shape[0], 8), 'uint8')
+        n = mask.shape[0]
+        for oi in range(cur.n_assigned):
+            o = &cur.my_octs[oi]
+            for i in range(8):
+                m2[o.local_ind, i] = mask[o.local_ind, i]
+        return m2
+
     def check(self, int curdom):
         cdef int dind, pi
         cdef Oct oct

diff -r 57c67f583c863a4f5bdc1f02185a22b130f5d859 -r 1babf36361036e3b6d4658818aee47c0a1877a7a yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1101,15 +1101,26 @@
 cdef class OctreeSubsetSelector(SelectorObject):
     # This is a numpy array, which will be a bool of ndim 1
     cdef object oct_mask
+    cdef int domain_id
 
     def __init__(self, dobj):
         self.oct_mask = dobj.mask
+        self.domain_id = dobj.domain.domain_id
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
     def select_octs(self, OctreeContainer octree):
-        return self.oct_mask
+        cdef np.ndarray[np.uint8_t, ndim=2] m2
+        m2 = octree.domain_and(self.oct_mask, self.domain_id)
+        cdef int oi, i, a
+        for oi in range(m2.shape[0]):
+            a = 0
+            for i in range(8):
+                if m2[oi, i] == 1: a = 1
+            for i in range(8):
+                m2[oi, i] = a
+        return m2.astype("bool")
 
     @cython.boundscheck(False)
     @cython.wraparound(False)


https://bitbucket.org/yt_analysis/yt-3.0/commits/77b261f38610/
Changeset:   77b261f38610
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-18 13:43:28
Summary:     Adding spatial chunking to particle frontends.
Affected #:  3 files

diff -r 1babf36361036e3b6d4658818aee47c0a1877a7a -r 77b261f38610ca068b32241820f144bf82bed38f yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -59,6 +59,7 @@
     particle_types = ("all",)
     geometry = "cartesian"
     coordinates = None
+    max_level = 99
 
     class __metaclass__(type):
         def __init__(cls, name, b, d):

diff -r 1babf36361036e3b6d4658818aee47c0a1877a7a -r 77b261f38610ca068b32241820f144bf82bed38f yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -40,6 +40,8 @@
     GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
     StaticOutput
+from yt.data_objects.octree_subset import \
+    OctreeSubset
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 from .fields import \
@@ -70,40 +72,8 @@
     def _calculate_offsets(self, fields):
         pass
 
-class ParticleDomainSubset(object):
-    def __init__(self, domain, mask, count):
-        self.domain = domain
-        self.mask = mask
-        self.cell_count = count
-        self.oct_handler = domain.pf.h.oct_handler
-        level_counts = self.oct_handler.count_levels(
-            99, self.domain.domain_id, mask)
-        level_counts[1:] = level_counts[:-1]
-        level_counts[0] = 0
-        self.level_counts = np.add.accumulate(level_counts)
-
-    def select_icoords(self, dobj):
-        return self.oct_handler.icoords(self.domain.domain_id, self.mask,
-                                        self.cell_count)
-
-    def select_fcoords(self, dobj):
-        return self.oct_handler.fcoords(self.domain.domain_id, self.mask,
-                                        self.cell_count)
-
-    def select_fwidth(self, dobj):
-        # Recall domain_dimensions is the number of cells, not octs
-        base_dx = (self.domain.pf.domain_width /
-                   self.domain.pf.domain_dimensions)
-        widths = np.empty((self.cell_count, 3), dtype="float64")
-        dds = (2**self.ires(dobj))
-        for i in range(3):
-            widths[:,i] = base_dx[i] / dds
-        return widths
-
-    def select_ires(self, dobj):
-        return self.oct_handler.ires(self.domain.domain_id, self.mask,
-                                     self.cell_count)
-
+class ParticleDomainSubset(OctreeSubset):
+    pass
 
 class ParticleGeometryHandler(OctreeGeometryHandler):
 
@@ -170,8 +140,16 @@
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         yield YTDataChunk(dobj, "all", oobjs, dobj.size)
 
-    def _chunk_spatial(self, dobj, ngz):
-        raise NotImplementedError
+    def _chunk_spatial(self, dobj, ngz, sort = None):
+        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            size = og.cell_count
+            if size == 0: continue
+            yield YTDataChunk(dobj, "spatial", [g], size)
 
     def _chunk_io(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -216,6 +194,7 @@
         self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
         self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
         self.cosmological_simulation = 1
+        self.periodicity = (True, True, True)
         self.current_redshift = hvals["Redshift"]
         self.omega_lambda = hvals["OmegaLambda"]
         self.omega_matter = hvals["Omega0"]
@@ -317,6 +296,7 @@
         self.domain_left_edge = np.zeros(3, "float64")
         self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
         self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
+        self.periodicity = (True, True, True)
 
         self.cosmological_simulation = 1
 
@@ -411,6 +391,7 @@
         self.domain_left_edge = np.zeros(3, "float64") - 0.5
         self.domain_right_edge = np.ones(3, "float64") + 0.5
         self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
+        self.periodicity = (True, True, True)
 
         self.cosmological_simulation = 1
 

diff -r 1babf36361036e3b6d4658818aee47c0a1877a7a -r 77b261f38610ca068b32241820f144bf82bed38f yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -1055,7 +1055,8 @@
     @cython.cdivision(True)
     def icoords(self, int domain_id,
                 np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count):
+                np.int64_t cell_count,
+                np.ndarray[np.int64_t, ndim=1] level_counts):
         #Return the integer positions of the cells
         #Limited to this domain and within the mask
         #Positions are binary; aside from the root mesh
@@ -1084,7 +1085,8 @@
     @cython.cdivision(True)
     def ires(self, int domain_id,
                 np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count):
+                np.int64_t cell_count,
+                np.ndarray[np.int64_t, ndim=1] level_counts):
         #Return the 'resolution' of each cell; ie the level
         cdef np.ndarray[np.int64_t, ndim=1] res
         res = np.empty(cell_count, dtype="int64")
@@ -1104,7 +1106,8 @@
     @cython.cdivision(True)
     def fcoords(self, int domain_id,
                 np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count):
+                np.int64_t cell_count,
+                np.ndarray[np.int64_t, ndim=1] level_counts):
         #Return the floating point unitary position of every cell
         cdef np.ndarray[np.float64_t, ndim=2] coords
         coords = np.empty((cell_count, 3), dtype="float64")
@@ -1423,4 +1426,17 @@
                 count[o.domain] += mask[oi,i]
         return count
 
+    def domain_and(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                   int domain_id):
+        cdef np.int64_t i, oi, n, 
+        cdef Oct *o
+        cdef np.ndarray[np.uint8_t, ndim=2] m2 = \
+                np.zeros((mask.shape[0], 8), 'uint8')
+        n = mask.shape[0]
+        for oi in range(n):
+            o = self.oct_list[oi]
+            if o.domain != domain_id: continue
+            for i in range(8):
+                m2[o.local_ind, i] = mask[o.local_ind, i]
+        return m2
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/b7949c2f0f27/
Changeset:   b7949c2f0f27
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-18 21:38:12
Summary:     Spatial chunking within data objects for Octree codes now works.
Affected #:  3 files

diff -r 77b261f38610ca068b32241820f144bf82bed38f -r b7949c2f0f27d84a367fe70fc2301f4e58a7ef33 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -60,6 +60,8 @@
         level_counts[1:] = level_counts[:-1]
         level_counts[0] = 0
         self.level_counts = np.add.accumulate(level_counts)
+        self._last_mask = None
+        self._last_selector_id = None
         self._current_particle_type = 'all'
         self._current_fluid_type = self.pf.default_fluid_type
 
@@ -98,8 +100,35 @@
         if not finfo.particle_type:
             nz = self._num_zones + 2*self._num_ghost_zones
             n_oct = tr.shape[0] / (nz**3.0)
-            dest_shape = (nz, nz, nz, n_oct)
-            return tr.reshape(dest_shape)
+            tr.shape = (n_oct, nz, nz, nz)
+            tr = np.rollaxis(tr, 0, 4)
+            return tr
         return tr
 
+    def deposit(self, positions, fields, method):
+        pass
 
+    def select(self, selector):
+        if id(selector) == self._last_selector_id:
+            return self._last_mask
+        self._last_mask = self.oct_handler.domain_mask(
+                self.mask, self.domain.domain_id)
+        if self._last_mask.sum() == 0: return None
+        self._last_selector_id = id(selector)
+        return self._last_mask
+
+    def count(self, selector):
+        if id(selector) == self._last_selector_id:
+            if self._last_mask is None: return 0
+            return self._last_mask.sum()
+        self.select(selector)
+        return self.count(selector)
+
+    def count_particles(self, selector, x, y, z):
+        # We don't cache the selector results
+        count = selector.count_points(x,y,z)
+        return count
+
+    def select_particles(self, selector, x, y, z):
+        mask = selector.select_points(x,y,z)
+        return mask

diff -r 77b261f38610ca068b32241820f144bf82bed38f -r b7949c2f0f27d84a367fe70fc2301f4e58a7ef33 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -678,7 +678,43 @@
             o = &cur.my_octs[oi]
             for i in range(8):
                 m2[o.local_ind, i] = mask[o.local_ind, i]
-        return m2
+        return m2 # NOTE: This is uint8_t
+
+    def domain_mask(self,
+                    # mask is the base selector's *global* mask
+                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                    int domain_id):
+        # What distinguishes this one from domain_and is that we have a mask,
+        # which covers the whole domain, but our output will only be of a much
+        # smaller subset of octs that belong to a given domain *and* the mask.
+        # Note also that typically when something calls domain_and, they will 
+        # use a logical_any along the oct axis.  Here we don't do that.
+        # Note also that we change the shape of the returned array.
+        cdef np.int64_t i, j, k, oi, n, nm
+        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
+        cdef Oct *o
+        n = mask.shape[0]
+        nm = 0
+        for oi in range(cur.n_assigned):
+            o = &cur.my_octs[oi]
+            use = 0
+            for i in range(8):
+                if mask[o.local_ind, i] == 1: use = 1
+            nm += use
+        cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
+                np.zeros((2, 2, 2, nm), 'uint8')
+        nm = 0
+        for oi in range(cur.n_assigned):
+            o = &cur.my_octs[oi]
+            use = 0
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        ii = ((k*2)+j)*2+i
+                        if mask[o.local_ind, ii] == 0: continue
+                        use = m2[i, j, k, nm] = 1
+            nm += use
+        return m2.astype("bool")
 
     def check(self, int curdom):
         cdef int dind, pi

diff -r 77b261f38610ca068b32241820f144bf82bed38f -r b7949c2f0f27d84a367fe70fc2301f4e58a7ef33 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1147,6 +1147,5 @@
                          int eterm[3]) nogil:
         return 1
 
-
 octree_subset_selector = OctreeSubsetSelector
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/05d417a01bbd/
Changeset:   05d417a01bbd
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-18 21:54:39
Summary:     Adding domain_mask for particle octrees.
Affected #:  1 file

diff -r b7949c2f0f27d84a367fe70fc2301f4e58a7ef33 -r 05d417a01bbd9874ac07ab9a1ee743ee9ada59d0 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -690,7 +690,7 @@
         # Note also that typically when something calls domain_and, they will 
         # use a logical_any along the oct axis.  Here we don't do that.
         # Note also that we change the shape of the returned array.
-        cdef np.int64_t i, j, k, oi, n, nm
+        cdef np.int64_t i, j, k, oi, n, nm, use
         cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
         cdef Oct *o
         n = mask.shape[0]
@@ -1476,3 +1476,39 @@
                 m2[o.local_ind, i] = mask[o.local_ind, i]
         return m2
 
+    def domain_mask(self,
+                    # mask is the base selector's *global* mask
+                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                    int domain_id):
+        # What distinguishes this one from domain_and is that we have a mask,
+        # which covers the whole domain, but our output will only be of a much
+        # smaller subset of octs that belong to a given domain *and* the mask.
+        # Note also that typically when something calls domain_and, they will 
+        # use a logical_any along the oct axis.  Here we don't do that.
+        # Note also that we change the shape of the returned array.
+        cdef np.int64_t i, j, k, oi, n, nm, use
+        cdef Oct *o
+        n = mask.shape[0]
+        nm = 0
+        for oi in range(n):
+            o = self.oct_list[oi]
+            if o.domain != domain_id: continue
+            use = 0
+            for i in range(8):
+                if mask[o.local_ind, i] == 1: use = 1
+            nm += use
+        cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
+                np.zeros((2, 2, 2, nm), 'uint8')
+        nm = 0
+        for oi in range(n):
+            o = self.oct_list[oi]
+            if o.domain != domain_id: continue
+            use = 0
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        ii = ((k*2)+j)*2+i
+                        if mask[o.local_ind, ii] == 0: continue
+                        use = m2[i, j, k, nm] = 1
+            nm += use
+        return m2.astype("bool")


https://bitbucket.org/yt_analysis/yt-3.0/commits/94dce22865da/
Changeset:   94dce22865da
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-19 21:13:18
Summary:     Enabled PkdGrav output via endian and dtype specification.

Also added Exception for exceeding the bounds of the octree for particles in
tipsy format.
Affected #:  3 files

diff -r 05d417a01bbd9874ac07ab9a1ee743ee9ada59d0 -r 94dce22865dacd30b38dfeb8fe05c5ce709bff64 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -351,10 +351,27 @@
                     ('dummy',   'i'))
 
     def __init__(self, filename, data_style="tipsy",
-                 root_dimensions = 64):
+                 root_dimensions = 64, endian = ">",
+                 field_dtypes = None,
+                 domain_left_edge = None,
+                 domain_right_edge = None):
+        self.endian = endian
         self._root_dimensions = root_dimensions
         # Set up the template for domain files
         self.storage_filename = None
+        if domain_left_edge is None:
+            domain_left_edge = np.zeros(3, "float64") - 0.5
+        if domain_right_edge is None:
+            domain_right_edge = np.ones(3, "float64") + 0.5
+
+        self.domain_left_edge = np.array(domain_left_edge, dtype="float64")
+        self.domain_right_edge = np.array(domain_right_edge, dtype="float64")
+
+        # My understanding is that dtypes are set on a field by field basis,
+        # not on a (particle type, field) basis
+        if field_dtypes is None: field_dtypes = {}
+        self._field_dtypes = field_dtypes
+
         super(TipsyStaticOutput, self).__init__(filename, data_style)
 
     def __repr__(self):
@@ -373,7 +390,7 @@
         # in the GADGET-2 user guide.
 
         f = open(self.parameter_filename, "rb")
-        hh = ">" + "".join(["%s" % (b) for a,b in self._header_spec])
+        hh = self.endian + "".join(["%s" % (b) for a,b in self._header_spec])
         hvals = dict([(a, c) for (a, b), c in zip(self._header_spec,
                      struct.unpack(hh, f.read(struct.calcsize(hh))))])
         self._header_offset = f.tell()
@@ -388,8 +405,9 @@
         # This may not be correct.
         self.current_time = hvals["time"]
 
-        self.domain_left_edge = np.zeros(3, "float64") - 0.5
-        self.domain_right_edge = np.ones(3, "float64") + 0.5
+        # NOTE: These are now set in the main initializer.
+        #self.domain_left_edge = np.zeros(3, "float64") - 0.5
+        #self.domain_right_edge = np.ones(3, "float64") + 0.5
         self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
         self.periodicity = (True, True, True)
 

diff -r 05d417a01bbd9874ac07ab9a1ee743ee9ada59d0 -r 94dce22865dacd30b38dfeb8fe05c5ce709bff64 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -372,6 +372,7 @@
         return rv
 
     def _initialize_octree(self, domain, octree):
+        pf = domain.pf
         with open(domain.domain_filename, "rb") as f:
             f.seek(domain.pf._header_offset)
             for ptype in self._ptypes:
@@ -391,6 +392,11 @@
                             pos[:,1].min(), pos[:,1].max())
                 mylog.debug("Spanning: %0.3e .. %0.3e in z",
                             pos[:,2].min(), pos[:,2].max())
+                if np.any(pos.min(axis=0) < pf.domain_left_edge) or \
+                   np.any(pos.max(axis=0) > pf.domain_right_edge):
+                    raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
+                                           pf.domain_left_edge,
+                                           pf.domain_right_edge)
                 del pp
                 octree.add(pos, domain.domain_id)
 
@@ -412,10 +418,12 @@
         for ptype, field in self._fields:
             pfields = []
             if tp[ptype] == 0: continue
+            dtbase = domain.pf._field_dtypes.get(field, 'f')
+            ff = "%s%s" % (domain.pf.endian, dtbase)
             if field in _vector_fields:
-                dt = (field, [('x', '>f'), ('y', '>f'), ('z', '>f')])
+                dt = (field, [('x', ff), ('y', ff), ('z', ff)])
             else:
-                dt = (field, '>f')
+                dt = (field, ff)
             pds.setdefault(ptype, []).append(dt)
             field_list.append((ptype, field))
         for ptype in pds:

diff -r 05d417a01bbd9874ac07ab9a1ee743ee9ada59d0 -r 94dce22865dacd30b38dfeb8fe05c5ce709bff64 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -249,3 +249,14 @@
 
     def __str__(self):
         return "Data selector '%s' not implemented." % (self.class_name)
+
+class YTDomainOverflow(YTException):
+    def __init__(self, mi, ma, dle, dre):
+        self.mi = mi
+        self.ma = ma
+        self.dle = dle
+        self.dre = dre
+
+    def __str__(self):
+        return "Particle bounds %s and %s exceed domain bounds %s and %s" % (
+            self.mi, self.ma, self.dle, self.dre)


https://bitbucket.org/yt_analysis/yt-3.0/commits/fa49ef603bcf/
Changeset:   fa49ef603bcf
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-20 16:26:26
Summary:     Minor fixes for particle octrees for dx and domain size.
Affected #:  2 files

diff -r 94dce22865dacd30b38dfeb8fe05c5ce709bff64 -r fa49ef603bcf478298d6fefd914f7d050304c831 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -362,7 +362,7 @@
         if domain_left_edge is None:
             domain_left_edge = np.zeros(3, "float64") - 0.5
         if domain_right_edge is None:
-            domain_right_edge = np.ones(3, "float64") + 0.5
+            domain_right_edge = np.zeros(3, "float64") + 0.5
 
         self.domain_left_edge = np.array(domain_left_edge, dtype="float64")
         self.domain_right_edge = np.array(domain_right_edge, dtype="float64")

diff -r 94dce22865dacd30b38dfeb8fe05c5ce709bff64 -r fa49ef603bcf478298d6fefd914f7d050304c831 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -1303,7 +1303,7 @@
                 #IND Corresponding integer index on the root octs
                 #CP Center  point of that oct
                 pp[i] = pos[p, i]
-                dds[i] = (self.DRE[i] + self.DLE[i])/self.nn[i]
+                dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
                 ind[i] = <np.int64_t> ((pp[i] - self.DLE[i])/dds[i])
                 cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
             cur = self.root_mesh[ind[0]][ind[1]][ind[2]]


https://bitbucket.org/yt_analysis/yt-3.0/commits/145a6c342daa/
Changeset:   145a6c342daa
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-22 18:15:04
Summary:     When fields are already in field_data for octrees, we don't need to reshape them.
Affected #:  1 file

diff -r fa49ef603bcf478298d6fefd914f7d050304c831 -r 145a6c342daafe2991a40304daa02c09df0d2e5d yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -99,9 +99,12 @@
         finfo = self.pf._get_field_info(*fields[0])
         if not finfo.particle_type:
             nz = self._num_zones + 2*self._num_ghost_zones
-            n_oct = tr.shape[0] / (nz**3.0)
-            tr.shape = (n_oct, nz, nz, nz)
-            tr = np.rollaxis(tr, 0, 4)
+            # We may need to reshape the field, if it is being queried from
+            # field_data.  If it's already cached, it just passes through.
+            if len(tr.shape) < 4: 
+                n_oct = tr.shape[0] / (nz**3.0)
+                tr.shape = (n_oct, nz, nz, nz)
+                tr = np.rollaxis(tr, 0, 4)
             return tr
         return tr
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/0b7d1ec2011c/
Changeset:   0b7d1ec2011c
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-03-20 23:09:01
Summary:     slight changes to file finding; should work STG datasets
Affected #:  1 file

diff -r 4f194a75accbb4485b0185b95079d0be2a681d2a -r 0b7d1ec2011c66db46bc740ee74766bd6bfb681e yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -224,17 +224,19 @@
         particle header, star files, etc.
         """
         base_prefix, base_suffix = filename_pattern['amr']
-        possibles = glob.glob(os.path.dirname(file_amr)+"/*")
         for filetype, (prefix, suffix) in filename_pattern.iteritems():
-            # if this attribute is already set skip it
+            if "amr" in filetype: continue
             if getattr(self, "_file_"+filetype, None) is not None:
                 continue
             stripped = file_amr.replace(base_prefix, prefix)
             stripped = stripped.replace(base_suffix, suffix)
-            match, = difflib.get_close_matches(stripped, possibles, 1, 0.6)
-            if match is not None:
-                mylog.info('discovered %s:%s', filetype, match)
-                setattr(self, "_file_"+filetype, match)
+            path = "/%s*%s"%(prefix,suffix)
+            possibles = glob.glob(os.path.dirname(file_amr)+path)
+            matches = difflib.get_close_matches(stripped, possibles, 1, 0.85)
+            if len(matches) == 0: continue
+            if matches[0] is not None:
+                mylog.info('discovered %s:%s', filetype, matches[0])
+                setattr(self, "_file_"+filetype, matches[0])
             else:
                 setattr(self, "_file_"+filetype, None)
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/54a5da1fd2a5/
Changeset:   54a5da1fd2a5
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-03-20 23:09:16
Summary:     ensuring 64bit data leaving io.py
Affected #:  1 file

diff -r 0b7d1ec2011c66db46bc740ee74766bd6bfb681e -r 54a5da1fd2a51f8f836d1fd0e1dbf1425ca7c174 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -356,7 +356,7 @@
                 data = np.concatenate((data, temp))
             else:
                 fh.seek(4*np_per_page, 1)
-    return data
+    return data.astype("f8")
 
 
 def read_star_field(file, field=None):


https://bitbucket.org/yt_analysis/yt-3.0/commits/c78f5ddaa98f/
Changeset:   c78f5ddaa98f
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-08 19:15:31
Summary:     replaced tabs with spaces
Affected #:  1 file

diff -r 54a5da1fd2a51f8f836d1fd0e1dbf1425ca7c174 -r c78f5ddaa98f189c80399bb67a4c814672371347 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -312,8 +312,8 @@
         Used for testing the periodic adjustment machinery
         of this derived quantity.
     include_particles : Bool
-	Should we add the mass contribution of particles
-	to calculate binding energy?
+    Should we add the mass contribution of particles
+    to calculate binding energy?
 
     Examples
     --------
@@ -332,13 +332,13 @@
                       (data["z-velocity"] - bv_z)**2)).sum()
 
     if (include_particles):
-	mass_to_use = data["TotalMass"]
+        mass_to_use = data["TotalMass"]
         kinetic += 0.5 * (data["Dark_Matter_Mass"] *
                           ((data["cic_particle_velocity_x"] - bv_x)**2 +
                            (data["cic_particle_velocity_y"] - bv_y)**2 +
                            (data["cic_particle_velocity_z"] - bv_z)**2)).sum()
     else:
-	mass_to_use = data["CellMass"]
+        mass_to_use = data["CellMass"]
     # Add thermal energy to kinetic energy
     if (include_thermal_energy):
         thermal = (data["ThermalEnergy"] * mass_to_use).sum()
@@ -375,8 +375,8 @@
     for label in ["x", "y", "z"]: # Separating CellMass from the for loop
         local_data[label] = data[label]
     local_data["CellMass"] = mass_to_use # Adding CellMass separately
-					 # NOTE: if include_particles = True, local_data["CellMass"]
-					 #       is not the same as data["CellMass"]!!!
+    # NOTE: if include_particles = True, local_data["CellMass"]
+    #       is not the same as data["CellMass"]!!!
     if periodic.any():
         # Adjust local_data to re-center the clump to remove the periodicity
         # by the gap calculated above.
@@ -431,7 +431,7 @@
             thisx = (local_data["x"][sel] / dx).astype('int64') - cover_imin[0] * 2**L
             thisy = (local_data["y"][sel] / dy).astype('int64') - cover_imin[1] * 2**L
             thisz = (local_data["z"][sel] / dz).astype('int64') - cover_imin[2] * 2**L
-	    vals = np.array([local_data["CellMass"][sel]], order='F')
+            vals = np.array([local_data["CellMass"][sel]], order='F')
             octree.add_array_to_tree(L, thisx, thisy, thisz, vals,
                np.ones_like(thisx).astype('float64'), treecode = 1)
         # Now we calculate the binding energy using a treecode.


https://bitbucket.org/yt_analysis/yt-3.0/commits/13db4ab10eae/
Changeset:   13db4ab10eae
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-08 19:41:17
Summary:     Merge
Affected #:  4 files

diff -r c78f5ddaa98f189c80399bb67a4c814672371347 -r 13db4ab10eae7872ef876d7d01094b5077a6aba7 yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -33,6 +33,7 @@
     pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
     pf.current_redshift = 0.0001
     pf.hubble_constant = 0.7
+    pf.omega_matter = 0.27
     for unit in mpc_conversion:
         pf.units[unit+'h'] = pf.units[unit]
         pf.units[unit+'cm'] = pf.units[unit]
@@ -72,7 +73,7 @@
         if not field.particle_type:
             assert_equal(v1, dd1["gas", self.field_name])
         if not needs_spatial:
-            assert_equal(v1, conv*field._function(field, dd2))
+            assert_array_almost_equal_nulp(v1, conv*field._function(field, dd2), 4)
         if not skip_grids:
             for g in pf.h.grids:
                 g.field_parameters.update(_sample_parameters)
@@ -80,7 +81,7 @@
                 v1 = g[self.field_name]
                 g.clear_data()
                 g.field_parameters.update(_sample_parameters)
-                assert_equal(v1, conv*field._function(field, g))
+                assert_array_almost_equal_nulp(v1, conv*field._function(field, g), 4)
 
 def test_all_fields():
     for field in FieldInfo:

diff -r c78f5ddaa98f189c80399bb67a4c814672371347 -r 13db4ab10eae7872ef876d7d01094b5077a6aba7 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -94,7 +94,12 @@
                              self.amr_header['ncpu']):
                 header = ( ('file_ilevel', 1, 'I'),
                            ('file_ncache', 1, 'I') )
-                hvals = fpu.read_attrs(f, header)
+                try:
+                    hvals = fpu.read_attrs(f, header, "=")
+                except AssertionError:
+                    print "You are running with the wrong number of fields."
+                    print "Please specify these in the load command."
+                    raise
                 if hvals['file_ncache'] == 0: continue
                 assert(hvals['file_ilevel'] == level+1)
                 if cpu + 1 == self.domain_id and level >= min_level:
@@ -143,6 +148,7 @@
             fpu.skip(f, 1)
             field_offsets[field] = f.tell()
         self.particle_field_offsets = field_offsets
+        self.particle_field_types = dict(particle_fields)
 
     def _read_amr_header(self):
         hvals = {}
@@ -268,7 +274,7 @@
         base_dx = (self.domain.pf.domain_width /
                    self.domain.pf.domain_dimensions)
         widths = np.empty((self.cell_count, 3), dtype="float64")
-        dds = (2**self.ires(dobj))
+        dds = (2**self.select_ires(dobj))
         for i in range(3):
             widths[:,i] = base_dx[i] / dds
         return widths

diff -r c78f5ddaa98f189c80399bb67a4c814672371347 -r 13db4ab10eae7872ef876d7d01094b5077a6aba7 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -60,33 +60,45 @@
     def _read_particle_selection(self, chunks, selector, fields):
         size = 0
         masks = {}
+        chunks = list(chunks)
+        pos_fields = [("all","particle_position_%s" % ax) for ax in "xyz"]
         for chunk in chunks:
             for subset in chunk.objs:
                 # We read the whole thing, then feed it back to the selector
-                offsets = []
-                f = open(subset.domain.part_fn, "rb")
-                foffsets = subset.domain.particle_field_offsets
-                selection = {}
-                for ax in 'xyz':
-                    field = "particle_position_%s" % ax
-                    f.seek(foffsets[field])
-                    selection[ax] = fpu.read_vector(f, 'd')
-                mask = selector.select_points(selection['x'],
-                            selection['y'], selection['z'])
+                selection = self._read_particle_subset(subset, pos_fields)
+                mask = selector.select_points(
+                    selection["all", "particle_position_x"],
+                    selection["all", "particle_position_y"],
+                    selection["all", "particle_position_z"])
                 if mask is None: continue
+                #print "MASK", mask
                 size += mask.sum()
                 masks[id(subset)] = mask
         # Now our second pass
-        tr = dict((f, np.empty(size, dtype="float64")) for f in fields)
+        tr = {}
+        pos = 0
         for chunk in chunks:
             for subset in chunk.objs:
-                f = open(subset.domain.part_fn, "rb")
+                selection = self._read_particle_subset(subset, fields)
                 mask = masks.pop(id(subset), None)
                 if mask is None: continue
-                for ftype, fname in fields:
-                    offsets.append((foffsets[fname], (ftype,fname)))
-                for offset, field in sorted(offsets):
-                    f.seek(offset)
-                    tr[field] = fpu.read_vector(f, 'd')[mask]
+                count = mask.sum()
+                for field in fields:
+                    ti = selection.pop(field)[mask]
+                    if field not in tr:
+                        dt = subset.domain.particle_field_types[field[1]]
+                        tr[field] = np.empty(size, dt)
+                    tr[field][pos:pos+count] = ti
+                pos += count
         return tr
 
+    def _read_particle_subset(self, subset, fields):
+        f = open(subset.domain.part_fn, "rb")
+        foffsets = subset.domain.particle_field_offsets
+        tr = {}
+        #for field in sorted(fields, key=lambda a:foffsets[a]):
+        for field in fields:
+            f.seek(foffsets[field[1]])
+            dt = subset.domain.particle_field_types[field[1]]
+            tr[field] = fpu.read_vector(f, dt)
+        return tr

diff -r c78f5ddaa98f189c80399bb67a4c814672371347 -r 13db4ab10eae7872ef876d7d01094b5077a6aba7 yt/utilities/fortran_utils.py
--- a/yt/utilities/fortran_utils.py
+++ b/yt/utilities/fortran_utils.py
@@ -117,18 +117,21 @@
     >>> f = open("fort.3", "rb")
     >>> rv = read_vector(f, 'd')
     """
-    fmt = endian+"%s" % d
-    size = struct.calcsize(fmt)
-    padfmt = endian + "I"
-    padsize = struct.calcsize(padfmt)
-    length = struct.unpack(padfmt,f.read(padsize))[0]
-    if length % size!= 0:
+    pad_fmt = "%sI" % (endian)
+    pad_size = struct.calcsize(pad_fmt)
+    vec_len = struct.unpack(pad_fmt,f.read(pad_size))[0] # bytes
+    vec_fmt = "%s%s" % (endian, d)
+    vec_size = struct.calcsize(vec_fmt)
+    if vec_len % vec_size != 0:
         print "fmt = '%s' ; length = %s ; size= %s" % (fmt, length, size)
         raise RuntimeError
-    count = length/ size
-    tr = np.fromfile(f,fmt,count=count)
-    length2= struct.unpack(padfmt,f.read(padsize))[0]
-    assert(length == length2)
+    vec_num = vec_len / vec_size
+    if isinstance(f, file): # Needs to be explicitly a file
+        tr = np.fromfile(f, vec_fmt, count=vec_num)
+    else:
+        tr = np.fromstring(f.read(vec_len), vec_fmt, count=vec_num)
+    vec_len2 = struct.unpack(pad_fmt,f.read(pad_size))[0]
+    assert(vec_len == vec_len2)
     return tr
 
 def skip(f, n=1, endian='='):


https://bitbucket.org/yt_analysis/yt-3.0/commits/ad356961683b/
Changeset:   ad356961683b
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-08 19:46:13
Summary:      allowing quantities to use other quantities
Affected #:  1 file

diff -r 13db4ab10eae7872ef876d7d01094b5077a6aba7 -r ad356961683b903b11dc1666ca187bf44c70b091 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -59,6 +59,7 @@
     def __call__(self, *args, **kwargs):
         e = FieldDetector(flat = True)
         e.NumberOfParticles = 1
+        e.quantities = self._data_source.quantities
         fields = e.requested
         self.func(e, *args, **kwargs)
         retvals = [ [] for i in range(self.n_ret)]


https://bitbucket.org/yt_analysis/yt-3.0/commits/05d1d1d4d88e/
Changeset:   05d1d1d4d88e
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-08 21:02:24
Summary:     removing self.func; fake data breaks the isbound quantity
Affected #:  1 file

diff -r ad356961683b903b11dc1666ca187bf44c70b091 -r 05d1d1d4d88e926746425c3b8731aa3f686798a3 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -59,9 +59,7 @@
     def __call__(self, *args, **kwargs):
         e = FieldDetector(flat = True)
         e.NumberOfParticles = 1
-        e.quantities = self._data_source.quantities
         fields = e.requested
-        self.func(e, *args, **kwargs)
         retvals = [ [] for i in range(self.n_ret)]
         chunks = self._data_source.chunks([], chunking_style="io")
         for ds in parallel_objects(chunks, -1):
@@ -334,7 +332,7 @@
 
     if (include_particles):
         mass_to_use = data["TotalMass"]
-        kinetic += 0.5 * (data["Dark_Matter_Mass"] *
+        kinetic += 0.5 * (data["particle_mass"] *
                           ((data["cic_particle_velocity_x"] - bv_x)**2 +
                            (data["cic_particle_velocity_y"] - bv_y)**2 +
                            (data["cic_particle_velocity_z"] - bv_z)**2)).sum()


https://bitbucket.org/yt_analysis/yt-3.0/commits/44991b2d5f00/
Changeset:   44991b2d5f00
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-09 20:08:28
Summary:     first draft of oct deposit
Affected #:  3 files

diff -r 05d1d1d4d88e926746425c3b8731aa3f686798a3 -r 44991b2d5f009aafa4a6dfee00bb7f64ff9c714b yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -120,7 +120,7 @@
             else:
                 domain._read_amr_level(self.oct_handler)
 
-    def _detect_fields(self):
+    def _detect_fields_original(self):
         self.particle_field_list = particle_fields
         self.field_list = set(fluid_fields + particle_fields +
                               particle_star_fields)
@@ -135,6 +135,30 @@
         else:
             self.parameter_file.particle_types = []
 
+    def _detect_fields(self):
+        #populate particle_field list and field_list
+        self.field_list = [('gas',f) for f in fluid_fields]
+        if "wspecies" in self.parameter_file.parameters.keys():
+            particle_field_list = [f for f in particle_fields]
+            self.parameter_file.particle_types = ["all", "darkmatter"]
+            if pf.file_particle_stars:
+                particle_field_list += particle_star_fields
+                self.parameter_file.particle_types.append("stars")
+            wspecies = self.parameter_file.parameters['wspecies']
+            nspecies = len(wspecies)
+            for specie in range(nspecies):
+                self.parameter_file.particle_types.append("specie%i" % specie)
+            self.particle_field_list = particle_field_list 
+            #maybe change this to (type, name) format?
+        else:
+            self.particle_field_list = []
+            self.parameter_file.particle_types = []
+        for particle_type in self.parameter_file.particle_types:
+            for particle_field in self.particle_field_list:
+                self.field_list.append([particle_type, particle_field])
+                self.field_list.append(["deposit_"+particle_type, 
+                                        particle_field])
+
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         super(ARTGeometryHandler, self)._setup_classes(dd)
@@ -473,6 +497,23 @@
             widths[:, i] = base_dx[i] / dds
         return widths
 
+    def deposit_particle_fields(self, ppos, pdata):
+        """
+        Given the x,y,z,particle_field data, do a particle deposition
+        using the oct_handler to accumulate values. We look up the particle
+        position again for every field, so this is inefficient
+        """
+        import pdb; pdb.set_trace()
+        fields = pdata.keys()
+        filled = {}
+        for field in fields:
+            dest = np.zeros(self.cell_count, 'float64')-1.
+            level = self.domain_level
+            oct_handler.deposit_particle_cumsum(ppos, pdata, self.mask, dest,
+                                                fields, self.domain.domain_id)
+            filled[field] = dest
+        return filled 
+
     def fill_root(self, content, ftfields):
         """
         This is called from IOHandler. It takes content

diff -r 05d1d1d4d88e926746425c3b8731aa3f686798a3 -r 44991b2d5f009aafa4a6dfee00bb7f64ff9c714b yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -55,16 +55,50 @@
                 f = open(subset.domain.pf._file_amr, "rb")
                 # This contains the boundary information, so we skim through
                 # and pick off the right vectors
-                if subset.domain_level == 0:
-                    rv = subset.fill_root(f, fields)
-                else:
-                    rv = subset.fill_level(f, fields)
+                if ft == 'gas':
+                    if subset.domain_level == 0:
+                        rv = subset.fill_root(f, fields)
+                    else:
+                        rv = subset.fill_level(f, fields)
+                elif ft.startswith('deposit'):
+                    import pdb; pdb.set_trace()
+                    # We will find all particles in this chunk and deposit
+                    # this means the particles will be read again for every 
+                    # hydro chunk, unfortunately
+                    # This is also complicated because particle deposition
+                    # is ideally a derived field formed from the position
+                    # fields plus another particle field, but we are treating
+                    # it like a first class native field
+                    if ft == "deposit":
+                        ft = "particle"
+                    else:
+                        #accept "deposit_stars" field for example
+                        ft = ft.replace("deposit_","")
+                    mylog.debug("Deposit L%i particles", 
+                                subset.domain_level)
+                    coords = [(ft, 'particle_position_%s'%ax ) for ax \
+                                in 'xyz']
+                    fnames = [(ft, f) for oft,f in fields ]
+                    pfields = [c for c in coords]
+                    for f in fnames:
+                        if f in pfields: 
+                            continue
+                        pfields.append(f)
+                    pdata = self._read_particle_selection(chunk,selector,
+                                                          pfields)
+                    x, y, z = (pdata[c] for c in coords)
+                    ppos = np.array([x,y,z]).T
+                    del x,y,z
+                    for c in coords:
+                        if c not in fnames:
+                            del pdata[c]
+                    rv = subset.deposit_particle_fields(ppos, pdata)
                 for ft, f in fields:
-                    mylog.debug("Filling L%i %s with %s (%0.3e %0.3e) (%s:%s)",
-                                subset.domain_level,
-                                f, subset.cell_count, rv[f].min(), rv[f].max(),
-                                cp, cp+subset.cell_count)
-                    tr[(ft, f)][cp:cp+subset.cell_count] = rv.pop(f)
+                        mylog.debug("Fill L%i %s with %s (%0.3e %0.3e) (%s:%s)",
+                                    subset.domain_level, f, subset.cell_count, 
+                                    rv[f].min(), rv[f].max(),
+                                    cp, cp+subset.cell_count)
+                        tr[(ft, f)][cp:cp+subset.cell_count] = rv.pop(f)
                 cp += subset.cell_count
         return tr
 

diff -r 05d1d1d4d88e926746425c3b8731aa3f686798a3 -r 44991b2d5f009aafa4a6dfee00bb7f64ff9c714b yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -167,6 +167,44 @@
             cur = cur.children[ind[0]][ind[1]][ind[2]]
         return cur
 
+    @cython.boundscheck(True)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef Oct *get_octant(self, ppos):
+        # This does a bit more than the built in get() function
+        # by also computing the index of the octant the point is in
+        cdef np.int64_t ind[3]
+        cdef np.float64_t dds[3], cp[3], pp[3]
+        cdef Oct *cur
+        cdef int i
+        cdef int ii
+        for i in range(3):
+            pp[i] = ppos[i] - self.DLE[i]
+            dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
+            ind[i] = <np.int64_t> ((pp[i] - self.DLE[i])/dds[i])
+            cp[i] = (ind[i] + 0.5) * dds[i]
+        cur = self.root_mesh[ind[0]][ind[1]][ind[2]]
+        while cur.children[0][0][0] != NULL:
+            for i in range(3):
+                dds[i] = dds[i] / 2.0
+                if cp[i] > pp[i]:
+                    ind[i] = 0
+                    cp[i] -= dds[i] / 2.0
+                else:
+                    ind[i] = 1
+                    cp[i] += dds[i]/2.0
+            cur = cur.children[ind[0]][ind[1]][ind[2]]
+        for i in range(3):
+            dds[i] = dds[i] / 2.0
+            if cp[i] > pp[i]:
+                ind[i] = 0
+                cp[i] -= dds[i] / 2.0
+            else:
+                ind[i] = 1
+                cp[i] += dds[i]/2.0
+        ii = ((ind[2]*2)+ind[1])*2+ind[0]
+        return cur, ii 
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -887,6 +925,26 @@
 
 cdef class ARTOctreeContainer(RAMSESOctreeContainer):
     #this class is specifically for the NMSU ART
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def deposit_particle_cumsum(np.ndarray[np.float64_t, ndim=3] ppos, 
+                                np.ndarray[np.float64_t, ndim=1] pdata,
+                                np.ndarray[np.float64_t, ndim=1] mask,
+                                np.ndarray[np.float64_t, ndim=1] dest,
+                                fields, int domain):
+        cdef Oct *o
+        cdef OctAllocationContainer *dom = self.domains[domain - 1]
+        cdef np.float64_t pos[3]
+        cdef int ii
+        cdef int no = ppos.shape[0]
+        for n in range(no):
+            pos = ppos[n]
+            *o, ii = dom.get_octant(pos) 
+            if mask[o.local_ind,ii]==0: continue
+            dest[o.ind+ii] += pdata[n]
+        return dest
+
     @cython.boundscheck(True)
     @cython.wraparound(False)
     @cython.cdivision(True)


https://bitbucket.org/yt_analysis/yt-3.0/commits/48a93884ff89/
Changeset:   48a93884ff89
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-17 01:28:37
Summary:     Merge
Affected #:  4 files

diff -r ae0003cdf0a5c5c11d3722d37796c67b0b84428a -r 48a93884ff89a3623b166185a943f56182774db7 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -167,6 +167,44 @@
             cur = cur.children[ind[0]][ind[1]][ind[2]]
         return cur
 
+    @cython.boundscheck(True)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef Oct *get_octant(self, ppos):
+        # This does a bit more than the built in get() function
+        # by also computing the index of the octant the point is in
+        cdef np.int64_t ind[3]
+        cdef np.float64_t dds[3], cp[3], pp[3]
+        cdef Oct *cur
+        cdef int i
+        cdef int ii
+        for i in range(3):
+            pp[i] = ppos[i] - self.DLE[i]
+            dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
+            ind[i] = <np.int64_t> ((pp[i] - self.DLE[i])/dds[i])
+            cp[i] = (ind[i] + 0.5) * dds[i]
+        cur = self.root_mesh[ind[0]][ind[1]][ind[2]]
+        while cur.children[0][0][0] != NULL:
+            for i in range(3):
+                dds[i] = dds[i] / 2.0
+                if cp[i] > pp[i]:
+                    ind[i] = 0
+                    cp[i] -= dds[i] / 2.0
+                else:
+                    ind[i] = 1
+                    cp[i] += dds[i]/2.0
+            cur = cur.children[ind[0]][ind[1]][ind[2]]
+        for i in range(3):
+            dds[i] = dds[i] / 2.0
+            if cp[i] > pp[i]:
+                ind[i] = 0
+                cp[i] -= dds[i] / 2.0
+            else:
+                ind[i] = 1
+                cp[i] += dds[i]/2.0
+        ii = ((ind[2]*2)+ind[1])*2+ind[0]
+        return cur, ii 
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -887,6 +925,26 @@
 
 cdef class ARTOctreeContainer(RAMSESOctreeContainer):
     #this class is specifically for the NMSU ART
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def deposit_particle_cumsum(np.ndarray[np.float64_t, ndim=3] ppos, 
+                                np.ndarray[np.float64_t, ndim=1] pdata,
+                                np.ndarray[np.float64_t, ndim=1] mask,
+                                np.ndarray[np.float64_t, ndim=1] dest,
+                                fields, int domain):
+        cdef Oct *o
+        cdef OctAllocationContainer *dom = self.domains[domain - 1]
+        cdef np.float64_t pos[3]
+        cdef int ii
+        cdef int no = ppos.shape[0]
+        for n in range(no):
+            pos = ppos[n]
+            *o, ii = dom.get_octant(pos) 
+            if mask[o.local_ind,ii]==0: continue
+            dest[o.ind+ii] += pdata[n]
+        return dest
+
     @cython.boundscheck(True)
     @cython.wraparound(False)
     @cython.cdivision(True)


https://bitbucket.org/yt_analysis/yt-3.0/commits/897796b0b4aa/
Changeset:   897796b0b4aa
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-17 05:41:57
Summary:     commenting the sph kernel code
Affected #:  1 file

diff -r 48a93884ff89a3623b166185a943f56182774db7 -r 897796b0b4aabc85319cc561d1a252898ece2555 yt/frontends/sph/smoothing_kernel.pyx
--- a/yt/frontends/sph/smoothing_kernel.pyx
+++ b/yt/frontends/sph/smoothing_kernel.pyx
@@ -53,21 +53,28 @@
     for p in range(ngas):
         kernel_sum[p] = 0.0
         skip = 0
+        # Find the # of cells of the kernel
         for i in range(3):
             pos[i] = ppos[p, i]
+            # Get particle root grid integer index
             ind[i] = <int>((pos[i] - left_edge[i]) / dds[i])
+            # How many root grid cells does the smoothing length span + 1
             half_len = <int>(hsml[p]/dds[i]) + 1
+            # Left and right integer indices of the smoothing range
+            # If smoothing len is small could be inside the same bin
             ib0[i] = ind[i] - half_len
             ib1[i] = ind[i] + half_len
             #pos[i] = ppos[p, i] - left_edge[i]
             #ind[i] = <int>(pos[i] / dds[i])
             #ib0[i] = <int>((pos[i] - hsml[i]) / dds[i]) - 1
             #ib1[i] = <int>((pos[i] + hsml[i]) / dds[i]) + 1
+            # Skip if outside out root grid
             if ib0[i] >= dims[i] or ib1[i] < 0:
                 skip = 1
             ib0[i] = iclip(ib0[i], 0, dims[i] - 1)
             ib1[i] = iclip(ib1[i], 0, dims[i] - 1)
         if skip == 1: continue
+        # Having found the kernel shape, calculate the kernel weight
         for i from ib0[0] <= i <= ib1[0]:
             idist[0] = (ind[0] - i) * (ind[0] - i) * sdds[0]
             for j from ib0[1] <= j <= ib1[1]:
@@ -75,10 +82,14 @@
                 for k from ib0[2] <= k <= ib1[2]:
                     idist[2] = (ind[2] - k) * (ind[2] - k) * sdds[2]
                     dist = idist[0] + idist[1] + idist[2]
+                    # Calculate distance in multiples of the smoothing length
                     dist = sqrt(dist) / hsml[p]
+                    # Kernel is 3D but save the elements in a 1D array
                     gi = ((i * dims[1] + j) * dims[2]) + k
                     pdist[gi] = sph_kernel(dist)
+                    # Save sum to normalize later
                     kernel_sum[p] += pdist[gi]
+        # Having found the kernel, deposit accordingly into gdata
         for i from ib0[0] <= i <= ib1[0]:
             for j from ib0[1] <= j <= ib1[1]:
                 for k from ib0[2] <= k <= ib1[2]:


https://bitbucket.org/yt_analysis/yt-3.0/commits/39377274e8c8/
Changeset:   39377274e8c8
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-17 05:42:47
Summary:     wrote pseudo code for oct deposit
Affected #:  1 file

diff -r 897796b0b4aabc85319cc561d1a252898ece2555 -r 39377274e8c8c41d545f3259bc1ab168670fe31f yt/geometry/oct_deposit.pyx
--- /dev/null
+++ b/yt/geometry/oct_deposit.pyx
@@ -0,0 +1,68 @@
+"""
+Particle Deposition onto Octs
+
+Author: Christopher Moody <chris.e.moody at gmail.com>
+Affiliation: UC Santa Cruz
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from libc.stdlib cimport malloc, free, qsort
+from libc.math cimport floor
+cimport numpy as np
+import numpy as np
+from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+cimport cython
+
+cdef np.float64_t kernel_sph(np.float64_t x) nogil:
+    cdef np.float64_t kernel
+    if x <= 0.5:
+        kernel = 1.-6.*x*x*(1.-x)
+    elif x>0.5 and x<=1.0:
+        kernel = 2.*(1.-x)*(1.-x)*(1.-x)
+    else:
+        kernel = 0.
+    return kernel
+
+#modes = count, sum, diff
+modes = {'count': opt_count, 'sum': opt_sum, 'diff': opt_diff}
+selections = {'direct': select_nearest, 'cic': select_radius}
+kernels = {'unitary': kernel_unitary, 'sph': kernel_sph}
+cdef deposit_direct(oct_handler, 
+        np.ndarray[np.float64_t, ndim=2] ppos, #positions,columns are x,y,z
+        np.ndarray[np.float64_t, ndim=2] pd, # particle fields
+        np.ndarray[np.float64_t, ndim=1] pr, # particle radius
+        np.ndarray[np.float64_t, ndim=2] data_out, #write deposited here
+        np.ndarray[np.float64_t, ndim=2] data_in, #used to calc diff, same shape as data_out
+        mode='count', selection='direct', kernel='sph'):
+    fopt = modes[mode]
+    fsel = selections[selection]
+    fker = kernels[kernel]
+    for pi in np.arange(particles):
+        octs = fsel(oct_handler, pr[pi])
+        for oct in octs:
+            w = fker(pr[pi],oct) 
+            weights.append(w)
+        norm = weights.sum()
+        for w, oct in zip(weights, octs):
+            fopt(pd[pi], w/norm, oct.index, data_in, data_out)
+
+


https://bitbucket.org/yt_analysis/yt-3.0/commits/c950797b59b9/
Changeset:   c950797b59b9
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-17 05:44:02
Summary:     realized octs have cells
Affected #:  1 file

diff -r 39377274e8c8c41d545f3259bc1ab168670fe31f -r c950797b59b91e6ccbd4b1417d3ac8e81f783caa yt/geometry/oct_deposit.pyx
--- a/yt/geometry/oct_deposit.pyx
+++ b/yt/geometry/oct_deposit.pyx
@@ -59,10 +59,12 @@
     for pi in np.arange(particles):
         octs = fsel(oct_handler, pr[pi])
         for oct in octs:
-            w = fker(pr[pi],oct) 
-            weights.append(w)
+            for cell in oct.cells:
+                w = fker(pr[pi],cell) 
+                weights.append(w)
         norm = weights.sum()
         for w, oct in zip(weights, octs):
-            fopt(pd[pi], w/norm, oct.index, data_in, data_out)
+            for cell in oct.cells:
+                fopt(pd[pi], w/norm, oct.index, data_in, data_out)
 
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/d298743d7787/
Changeset:   d298743d7787
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-17 08:05:59
Summary:     added kernel function, deposit operations
Affected #:  1 file

diff -r c950797b59b91e6ccbd4b1417d3ac8e81f783caa -r d298743d77873b4db7326165b2e5f42bf1e5a885 yt/geometry/oct_deposit.pyx
--- a/yt/geometry/oct_deposit.pyx
+++ b/yt/geometry/oct_deposit.pyx
@@ -25,13 +25,77 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-from libc.stdlib cimport malloc, free, qsort
-from libc.math cimport floor
+from libc.stdlib cimport malloc, free
 cimport numpy as np
 import numpy as np
-from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
 cimport cython
 
+from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+
+# Mode functions
+ctypedef np.float64_t (*type_opt)(np.float64_t, np.float64_t)
+cdef np.float64_t opt_count(np.float64_t pdata,
+                            np.float64_t weight,
+                            np.int64_t index,
+                            np.ndarray[np.float64_t, ndim=2] data_out, 
+                            np.ndarray[np.float64_t, ndim=2] data_in):
+    data_out[index] += 1.0
+
+cdef np.float64_t opt_sum(np.float64_t pdata,
+                            np.float64_t weight,
+                            np.int64_t index,
+                            np.ndarray[np.float64_t, ndim=2] data_out, 
+                            np.ndarray[np.float64_t, ndim=2] data_in):
+    data_out[index] += pdata 
+
+cdef np.float64_t opt_diff(np.float64_t pdata,
+                            np.float64_t weight,
+                            np.int64_t index,
+                            np.ndarray[np.float64_t, ndim=2] data_out, 
+                            np.ndarray[np.float64_t, ndim=2] data_in):
+    data_out[index] += (data_in[index] - pdata) 
+
+cdef np.float64_t opt_wcount(np.float64_t pdata,
+                            np.float64_t weight,
+                            np.int64_t index,
+                            np.ndarray[np.float64_t, ndim=2] data_out, 
+                            np.ndarray[np.float64_t, ndim=2] data_in):
+    data_out[index] += weight
+
+cdef np.float64_t opt_wsum(np.float64_t pdata,
+                            np.float64_t weight,
+                            np.int64_t index,
+                            np.ndarray[np.float64_t, ndim=2] data_out, 
+                            np.ndarray[np.float64_t, ndim=2] data_in):
+    data_out[index] += pdata * weight
+
+cdef np.float64_t opt_wdiff(np.float64_t pdata,
+                            np.float64_t weight,
+                            np.int64_t index,
+                            np.ndarray[np.float64_t, ndim=2] data_out, 
+                            np.ndarray[np.float64_t, ndim=2] data_in):
+    data_out[index] += (data_in[index] - pdata) * weight
+
+# Selection functions
+ctypedef NOTSURE (*type_sel)(OctreeContainer, 
+                                np.ndarray[np.float64_t, ndim=1],
+                                np.float64_t)
+cdef NOTSURE select_nearest(OctreeContainer oct_handler,
+                            np.ndarray[np.float64_t, ndim=1] pos,
+                            np.float64_t radius):
+    #return only the nearest oct
+    pass
+
+
+cdef NOTSURE select_radius(OctreeContainer oct_handler,
+                            np.ndarray[np.float64_t, ndim=1] pos,
+                            np.float64_t radius):
+    #return a list of octs within the radius
+    pass
+    
+
+# Kernel functions
+ctypedef np.float64_t (*type_ker)(np.float64_t)
 cdef np.float64_t kernel_sph(np.float64_t x) nogil:
     cdef np.float64_t kernel
     if x <= 0.5:
@@ -42,22 +106,46 @@
         kernel = 0.
     return kernel
 
-#modes = count, sum, diff
-modes = {'count': opt_count, 'sum': opt_sum, 'diff': opt_diff}
-selections = {'direct': select_nearest, 'cic': select_radius}
-kernels = {'unitary': kernel_unitary, 'sph': kernel_sph}
-cdef deposit_direct(oct_handler, 
+cdef np.float64_t kernel_null(np.float64_t x) nogil: return 1.0
+
+cdef deposit(OctreeContainer oct_handler, 
         np.ndarray[np.float64_t, ndim=2] ppos, #positions,columns are x,y,z
         np.ndarray[np.float64_t, ndim=2] pd, # particle fields
         np.ndarray[np.float64_t, ndim=1] pr, # particle radius
         np.ndarray[np.float64_t, ndim=2] data_out, #write deposited here
         np.ndarray[np.float64_t, ndim=2] data_in, #used to calc diff, same shape as data_out
-        mode='count', selection='direct', kernel='sph'):
-    fopt = modes[mode]
-    fsel = selections[selection]
-    fker = kernels[kernel]
-    for pi in np.arange(particles):
-        octs = fsel(oct_handler, pr[pi])
+        mode='count', selection='nearest', kernel='null'):
+    cdef type_opt fopt
+    cdef type_sel fsel
+    cdef type_ker fker
+    cdef long pi #particle index
+    cdef long nocts #number of octs in selection
+    cdef Oct oct 
+    cdef np.float64_t w
+    # Can we do this with dicts?
+    # Setup the function pointers
+    if mode == 'count':
+        fopt = opt_count
+    elif mode == 'sum':
+        fopt = opt_sum
+    elif mode == 'diff':
+        fopt = opt_diff
+    if mode == 'wcount':
+        fopt = opt_count
+    elif mode == 'wsum':
+        fopt = opt_sum
+    elif mode == 'wdiff':
+        fopt = opt_diff
+    if selection == 'nearest':
+        fsel = select_nearest
+    elif selection == 'radius':
+        fsel = select_radius
+    if kernel == 'null':
+        fker = kernel_null
+    if kernel == 'sph':
+        fker = kernel_sph
+    for pi in range(particles):
+        octs = fsel(oct_handler, ppos[pi], pr[pi])
         for oct in octs:
             for cell in oct.cells:
                 w = fker(pr[pi],cell) 


https://bitbucket.org/yt_analysis/yt-3.0/commits/01481a357802/
Changeset:   01481a357802
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-17 08:10:53
Summary:     changing in/out order
Affected #:  1 file

diff -r d298743d77873b4db7326165b2e5f42bf1e5a885 -r 01481a3578024cd054fce0a3ca9d5fa5198ea244 yt/geometry/oct_deposit.pyx
--- a/yt/geometry/oct_deposit.pyx
+++ b/yt/geometry/oct_deposit.pyx
@@ -112,8 +112,8 @@
         np.ndarray[np.float64_t, ndim=2] ppos, #positions,columns are x,y,z
         np.ndarray[np.float64_t, ndim=2] pd, # particle fields
         np.ndarray[np.float64_t, ndim=1] pr, # particle radius
+        np.ndarray[np.float64_t, ndim=2] data_in, #used to calc diff, same shape as data_out
         np.ndarray[np.float64_t, ndim=2] data_out, #write deposited here
-        np.ndarray[np.float64_t, ndim=2] data_in, #used to calc diff, same shape as data_out
         mode='count', selection='nearest', kernel='null'):
     cdef type_opt fopt
     cdef type_sel fsel


https://bitbucket.org/yt_analysis/yt-3.0/commits/b65117530f45/
Changeset:   b65117530f45
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-17 20:17:19
Summary:     Consolidate get and get_octant and make code compileable.
Affected #:  2 files

diff -r 01481a3578024cd054fce0a3ca9d5fa5198ea244 -r b65117530f45a23fe46db5f16ec0dedfd838df9d yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -54,7 +54,7 @@
     cdef np.float64_t DLE[3], DRE[3]
     cdef public int nocts
     cdef public int max_domain
-    cdef Oct* get(self, ppos)
+    cdef Oct* get(self, np.float64_t ppos[3], int *ii = ?)
     cdef void neighbors(self, Oct *, Oct **)
     cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
 

diff -r 01481a3578024cd054fce0a3ca9d5fa5198ea244 -r b65117530f45a23fe46db5f16ec0dedfd838df9d yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -142,7 +142,7 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef Oct *get(self, ppos):
+    cdef Oct *get(self, np.float64_t ppos[3], int *ii = NULL):
         #Given a floating point position, retrieve the most
         #refined oct at that time
         cdef np.int64_t ind[3]
@@ -165,45 +165,14 @@
                     ind[i] = 1
                     cp[i] += dds[i]/2.0
             cur = cur.children[ind[0]][ind[1]][ind[2]]
-        return cur
-
-    @cython.boundscheck(True)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef Oct *get_octant(self, ppos):
-        # This does a bit more than the built in get() function
-        # by also computing the index of the octant the point is in
-        cdef np.int64_t ind[3]
-        cdef np.float64_t dds[3], cp[3], pp[3]
-        cdef Oct *cur
-        cdef int i
-        cdef int ii
+        if ii != NULL: return cur
         for i in range(3):
-            pp[i] = ppos[i] - self.DLE[i]
-            dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
-            ind[i] = <np.int64_t> ((pp[i] - self.DLE[i])/dds[i])
-            cp[i] = (ind[i] + 0.5) * dds[i]
-        cur = self.root_mesh[ind[0]][ind[1]][ind[2]]
-        while cur.children[0][0][0] != NULL:
-            for i in range(3):
-                dds[i] = dds[i] / 2.0
-                if cp[i] > pp[i]:
-                    ind[i] = 0
-                    cp[i] -= dds[i] / 2.0
-                else:
-                    ind[i] = 1
-                    cp[i] += dds[i]/2.0
-            cur = cur.children[ind[0]][ind[1]][ind[2]]
-        for i in range(3):
-            dds[i] = dds[i] / 2.0
             if cp[i] > pp[i]:
                 ind[i] = 0
-                cp[i] -= dds[i] / 2.0
             else:
                 ind[i] = 1
-                cp[i] += dds[i]/2.0
-        ii = ((ind[2]*2)+ind[1])*2+ind[0]
-        return cur, ii 
+        ii[0] = ((ind[2]*2)+ind[1])*2+ind[0]
+        return cur
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -298,14 +267,17 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def get_neighbor_boundaries(self, ppos):
+    def get_neighbor_boundaries(self, oppos):
+        cdef int i, ii
+        cdef np.float64_t ppos[3]
+        for i in range(3):
+            ppos[i] = oppos[i]
         cdef Oct *main = self.get(ppos)
         cdef Oct* neighbors[27]
         self.neighbors(main, neighbors)
         cdef np.ndarray[np.float64_t, ndim=2] bounds
         cdef np.float64_t corner[3], size[3]
         bounds = np.zeros((27,6), dtype="float64")
-        cdef int i, ii
         tnp = 0
         for i in range(27):
             self.oct_bounds(neighbors[i], corner, size)
@@ -928,7 +900,8 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def deposit_particle_cumsum(np.ndarray[np.float64_t, ndim=3] ppos, 
+    def deposit_particle_cumsum(self,
+                                np.ndarray[np.float64_t, ndim=2] ppos, 
                                 np.ndarray[np.float64_t, ndim=1] pdata,
                                 np.ndarray[np.float64_t, ndim=1] mask,
                                 np.ndarray[np.float64_t, ndim=1] dest,
@@ -939,8 +912,9 @@
         cdef int ii
         cdef int no = ppos.shape[0]
         for n in range(no):
-            pos = ppos[n]
-            *o, ii = dom.get_octant(pos) 
+            for j in range(3):
+                pos[j] = ppos[n,j]
+            o = self.get(pos, &ii) 
             if mask[o.local_ind,ii]==0: continue
             dest[o.ind+ii] += pdata[n]
         return dest
@@ -1435,12 +1409,15 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def count_neighbor_particles(self, ppos):
+    def count_neighbor_particles(self, oppos):
         #How many particles are in my neighborhood
+        cdef int i, ni, dl, tnp
+        cdef np.float64_t ppos[3]
+        for i in range(3):
+            ppos[i] = oppos[i]
         cdef Oct *main = self.get(ppos)
         cdef Oct* neighbors[27]
         self.neighbors(main, neighbors)
-        cdef int i, ni, dl, tnp
         tnp = 0
         for i in range(27):
             if neighbors[i].sd != NULL:


https://bitbucket.org/yt_analysis/yt-3.0/commits/3b6dbcc91214/
Changeset:   3b6dbcc91214
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-17 21:20:05
Summary:     Stub of deposit function.
Affected #:  1 file

diff -r b65117530f45a23fe46db5f16ec0dedfd838df9d -r 3b6dbcc91214a4ada89232c502a2fa0690fb95af yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -513,6 +513,11 @@
                         if f not in fields_to_generate:
                             fields_to_generate.append(f)
 
+    def deposit(self, positions, fields, op):
+        assert(self._current_chunk.chunk_type == "spatial")
+        fields = ensure_list(fields)
+        self.hierarchy._deposit_particle_fields(self, positions, fields, op)
+
     @contextmanager
     def _field_lock(self):
         self._locked = True


https://bitbucket.org/yt_analysis/yt-3.0/commits/702d94e75e9c/
Changeset:   702d94e75e9c
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-18 00:18:27
Summary:     added fake octree test
Affected #:  3 files

diff -r b65117530f45a23fe46db5f16ec0dedfd838df9d -r 702d94e75e9c58f686f7ffc6e19f69ebee21bf5b yt/geometry/oct_deposit.pyx
--- a/yt/geometry/oct_deposit.pyx
+++ b/yt/geometry/oct_deposit.pyx
@@ -106,7 +106,7 @@
         kernel = 0.
     return kernel
 
-cdef np.float64_t kernel_null(np.float64_t x) nogil: return 1.0
+cdef np.float64_t kernel_null(np.float64_t x) nogil: return 0.0
 
 cdef deposit(OctreeContainer oct_handler, 
         np.ndarray[np.float64_t, ndim=2] ppos, #positions,columns are x,y,z

diff -r b65117530f45a23fe46db5f16ec0dedfd838df9d -r 702d94e75e9c58f686f7ffc6e19f69ebee21bf5b yt/geometry/setup.py
--- a/yt/geometry/setup.py
+++ b/yt/geometry/setup.py
@@ -23,6 +23,13 @@
                 depends=["yt/utilities/lib/fp_utils.pxd",
                          "yt/geometry/oct_container.pxd",
                          "yt/geometry/selection_routines.pxd"])
+    config.add_extension("fake_octree", 
+                ["yt/geometry/fake_octree.pyx"],
+                include_dirs=["yt/utilities/lib/"],
+                libraries=["m"],
+                depends=["yt/utilities/lib/fp_utils.pxd",
+                         "yt/geometry/oct_container.pxd",
+                         "yt/geometry/selection_routines.pxd"])
     config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()
     return config

diff -r b65117530f45a23fe46db5f16ec0dedfd838df9d -r 702d94e75e9c58f686f7ffc6e19f69ebee21bf5b yt/geometry/tests/fake_octree.py
--- /dev/null
+++ b/yt/geometry/tests/fake_octree.py
@@ -0,0 +1,12 @@
+from yt.geometry.fake_octree import create_fake_octree
+import numpy as np
+
+max_leaf = 100
+max_level = 5
+dn = 2
+dd = np.ones(3,dtype='i4')*dn
+dle = np.ones(3,dtype='f8')*0.0
+dre = np.ones(3,dtype='f8')
+fsub = 0.90
+
+octtree = create_fake_octree(max_leaf, max_level, dd, dle, dre, fsub)


https://bitbucket.org/yt_analysis/yt-3.0/commits/de92cbbf84f8/
Changeset:   de92cbbf84f8
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-18 00:18:44
Summary:     fake octree creator; compiles now
Affected #:  1 file

diff -r 702d94e75e9c58f686f7ffc6e19f69ebee21bf5b -r de92cbbf84f841abd203982ec819359b8d43586a yt/geometry/fake_octree.pyx
--- /dev/null
+++ b/yt/geometry/fake_octree.pyx
@@ -0,0 +1,95 @@
+"""
+Make a fake octree, deposit particle at every leaf
+
+Author: Christopher Moody <chris.e.moody at gmail.com>
+Affiliation: UC Santa Cruz
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from libc.stdlib cimport malloc, free, rand, RAND_MAX
+cimport numpy as np
+import numpy as np
+cimport cython
+
+from oct_container cimport Oct, RAMSESOctreeContainer
+
+# Defined only by N leaves
+# Randomly decide if a branch should be subdivide, recurse one level if so
+# Once done, create a position array of len(leafes) with smoothing lengths = oct_size
+
+# Note that with this algorithm the octree won't be balanced once you hit
+# the maximum number of desired leaves
+
+# Use next_child(domain, int[3] octant, Oct parent)
+
+def create_fake_octree(long noct,
+                       long max_level,
+                       np.ndarray[np.int32_t, ndim=1] ndd,
+                       np.ndarray[np.float64_t, ndim=1] dle,
+                       np.ndarray[np.float64_t, ndim=1] dre,
+                       float fsubdivide):
+    cdef RAMSESOctreeContainer oct_handler = RAMSESOctreeContainer(ndd,dle,dre)
+    cdef int[3] ind #hold the octant index
+    cdef int[3] dd #hold the octant index
+    cdef long i
+    for i in range(3):
+        ind[i] = 0
+        dd[i] = ndd[i]
+    cdef long total_oct = (dd[0]*dd[1]*dd[2]) + noct
+    print 'starting'
+    print ind[0], ind[1], ind[2]
+    print 'allocate'
+    print total_oct
+    oct_handler.allocate_domains([total_oct])
+    print 'parent'
+    parent = oct_handler.next_root(oct_handler.max_domain, ind)
+    print 'subdiv'
+    subdivide(oct_handler,ind, dd, parent, 0, 0, noct,
+              max_level, fsubdivide)
+    return oct_handler
+
+cdef subdivide(RAMSESOctreeContainer oct_handler, int ind[3], 
+               int dd[3],
+               Oct *parent, long cur_level, long cur_leaf,
+               long noct, long max_level, float fsubdivide):
+    print "entrance"
+    cdef int ddr[3]
+    cdef long i,j,k
+    cdef float rf #random float from 0-1
+    if cur_level >= max_level: 
+        return
+    if cur_leaf >= noct: 
+        return
+    print "loop over cells"
+    for i in range(3):
+        ind[i] = <int> rand() / RAND_MAX * dd[i]
+        ddr[i] = 2
+    rf = rand() / RAND_MAX
+    print ind[0], ind[1], ind[2]
+    print rf
+    if rf > fsubdivide:
+        #this will mark the octant ind as subdivided
+        print 'subdivide'
+        oct = oct_handler.next_child(1, ind, parent)
+        print 'recurse'
+        subdivide(oct_handler, ind, ddr, oct, cur_level + 1, 
+                  cur_leaf + 1, noct, max_level, fsubdivide)


https://bitbucket.org/yt_analysis/yt-3.0/commits/13f4fb73e78a/
Changeset:   13f4fb73e78a
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-18 01:44:24
Summary:     now creates a balanced octree
Affected #:  1 file

diff -r de92cbbf84f841abd203982ec819359b8d43586a -r 13f4fb73e78ae85c67e50bd333d65df53e132b87 yt/geometry/fake_octree.pyx
--- a/yt/geometry/fake_octree.pyx
+++ b/yt/geometry/fake_octree.pyx
@@ -41,55 +41,54 @@
 
 # Use next_child(domain, int[3] octant, Oct parent)
 
-def create_fake_octree(long noct,
+def create_fake_octree(RAMSESOctreeContainer oct_handler,
+                       long noct,
                        long max_level,
                        np.ndarray[np.int32_t, ndim=1] ndd,
                        np.ndarray[np.float64_t, ndim=1] dle,
                        np.ndarray[np.float64_t, ndim=1] dre,
                        float fsubdivide):
-    cdef RAMSESOctreeContainer oct_handler = RAMSESOctreeContainer(ndd,dle,dre)
     cdef int[3] ind #hold the octant index
     cdef int[3] dd #hold the octant index
     cdef long i
+    cdef long cur_noct = 0
     for i in range(3):
         ind[i] = 0
         dd[i] = ndd[i]
-    cdef long total_oct = (dd[0]*dd[1]*dd[2]) + noct
+    assert dd[0]*dd[1]*dd[2] <= noct
     print 'starting'
     print ind[0], ind[1], ind[2]
     print 'allocate'
-    print total_oct
-    oct_handler.allocate_domains([total_oct])
+    print noct
+    oct_handler.allocate_domains([noct])
+    print 'n_assigned', oct_handler.domains[0].n_assigned
     print 'parent'
     parent = oct_handler.next_root(oct_handler.max_domain, ind)
     print 'subdiv'
-    subdivide(oct_handler,ind, dd, parent, 0, 0, noct,
-              max_level, fsubdivide)
-    return oct_handler
+    while oct_handler.domains[0].n_assigned < noct:
+        cur_noct = subdivide(oct_handler,ind, dd, parent, 0, 0, noct,
+                  max_level, fsubdivide)
 
-cdef subdivide(RAMSESOctreeContainer oct_handler, int ind[3], 
+cdef long subdivide(RAMSESOctreeContainer oct_handler, int ind[3], 
                int dd[3],
-               Oct *parent, long cur_level, long cur_leaf,
+               Oct *parent, long cur_level, long cur_noct,
                long noct, long max_level, float fsubdivide):
-    print "entrance"
+    print cur_level, ' n_assigned ', oct_handler.domains[0].n_assigned, 
+    print ' n', oct_handler.domains[0].n
     cdef int ddr[3]
     cdef long i,j,k
     cdef float rf #random float from 0-1
     if cur_level >= max_level: 
-        return
-    if cur_leaf >= noct: 
-        return
-    print "loop over cells"
+        return cur_noct
+    if oct_handler.domains[0].n_assigned >= noct: 
+        return cur_noct
     for i in range(3):
-        ind[i] = <int> rand() / RAND_MAX * dd[i]
+        ind[i] = <int> ((rand() * 1.0 / RAND_MAX) * dd[i])
         ddr[i] = 2
-    rf = rand() / RAND_MAX
-    print ind[0], ind[1], ind[2]
-    print rf
+    rf = rand() * 1.0 / RAND_MAX
     if rf > fsubdivide:
         #this will mark the octant ind as subdivided
-        print 'subdivide'
         oct = oct_handler.next_child(1, ind, parent)
-        print 'recurse'
         subdivide(oct_handler, ind, ddr, oct, cur_level + 1, 
-                  cur_leaf + 1, noct, max_level, fsubdivide)
+                  cur_noct+ 1, noct, max_level, fsubdivide)
+    return cur_noct


https://bitbucket.org/yt_analysis/yt-3.0/commits/ae7f57c8e2d9/
Changeset:   ae7f57c8e2d9
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-18 01:44:41
Summary:     updating the tests for the octree
Affected #:  1 file

diff -r 13f4fb73e78ae85c67e50bd333d65df53e132b87 -r ae7f57c8e2d9dea4d974c26badf796a53cb67db3 yt/geometry/tests/fake_octree.py
--- a/yt/geometry/tests/fake_octree.py
+++ b/yt/geometry/tests/fake_octree.py
@@ -1,12 +1,25 @@
 from yt.geometry.fake_octree import create_fake_octree
+from yt.geometry.oct_container import RAMSESOctreeContainer
 import numpy as np
 
-max_leaf = 100
-max_level = 5
+nocts= 100
+max_level = 12
 dn = 2
 dd = np.ones(3,dtype='i4')*dn
 dle = np.ones(3,dtype='f8')*0.0
 dre = np.ones(3,dtype='f8')
-fsub = 0.90
+fsub = 0.10
+domain = 0
 
-octtree = create_fake_octree(max_leaf, max_level, dd, dle, dre, fsub)
+oct_handler = RAMSESOctreeContainer(dd,dle,dre)
+create_fake_octree(oct_handler, nocts, max_level, dd, dle, dre, fsub)
+print "filled"
+print oct_handler.check(domain, print_all=1)
+mask = np.ones(nocts,dtype='bool')
+print mask
+cell_count = nocts*8
+level_counts = np.array([nocts]) # not used anyway
+fc = oct_handler.fcoords(domain,mask,cell_count)
+print fc
+print fc.shape
+


https://bitbucket.org/yt_analysis/yt-3.0/commits/861231d91808/
Changeset:   861231d91808
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-18 01:44:58
Summary:     Merge
Affected #:  1 file

diff -r ae7f57c8e2d9dea4d974c26badf796a53cb67db3 -r 861231d918087390f934d55775406563de920dab yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -513,6 +513,11 @@
                         if f not in fields_to_generate:
                             fields_to_generate.append(f)
 
+    def deposit(self, positions, fields, op):
+        assert(self._current_chunk.chunk_type == "spatial")
+        fields = ensure_list(fields)
+        self.hierarchy._deposit_particle_fields(self, positions, fields, op)
+
     @contextmanager
     def _field_lock(self):
         self._locked = True


https://bitbucket.org/yt_analysis/yt-3.0/commits/9e8ac81fa888/
Changeset:   9e8ac81fa888
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-18 01:47:10
Summary:     Merge
Affected #:  6 files

diff -r 861231d918087390f934d55775406563de920dab -r 9e8ac81fa888ddce960f8c4b7a07fe20dfb3633a yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -31,6 +31,9 @@
 from grid_patch import \
     AMRGridPatch
 
+from octree_subset import \
+    OctreeSubset
+
 from static_output import \
     StaticOutput
 

diff -r 861231d918087390f934d55775406563de920dab -r 9e8ac81fa888ddce960f8c4b7a07fe20dfb3633a yt/data_objects/octree_subset.py
--- /dev/null
+++ b/yt/data_objects/octree_subset.py
@@ -0,0 +1,106 @@
+"""
+Subsets of octrees
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+from yt.data_objects.data_containers import \
+    YTFieldData, \
+    YTDataContainer, \
+    YTSelectionContainer
+from .field_info_container import \
+    NeedsGridType, \
+    NeedsOriginalGrid, \
+    NeedsDataField, \
+    NeedsProperty, \
+    NeedsParameter
+
+class OctreeSubset(YTSelectionContainer):
+    _spatial = True
+    _num_ghost_zones = 0
+    _num_zones = 2
+    _type_name = 'octree_subset'
+    _skip_add = True
+    _con_args = ('domain', 'mask', 'cell_count')
+    _container_fields = ("dx", "dy", "dz")
+
+    def __init__(self, domain, mask, cell_count):
+        self.field_data = YTFieldData()
+        self.field_parameters = {}
+        self.mask = mask
+        self.n_oct = mask.shape[0]
+        self.domain = domain
+        self.pf = domain.pf
+        self.hierarchy = self.pf.hierarchy
+        self.oct_handler = domain.pf.h.oct_handler
+        self.cell_count = cell_count
+        level_counts = self.oct_handler.count_levels(
+            self.domain.pf.max_level, self.domain.domain_id, mask)
+        assert(level_counts.sum() == cell_count)
+        level_counts[1:] = level_counts[:-1]
+        level_counts[0] = 0
+        self.level_counts = np.add.accumulate(level_counts)
+        self._current_particle_type = 'all'
+        self._current_fluid_type = self.pf.default_fluid_type
+
+    def select_icoords(self, dobj):
+        return self.oct_handler.icoords(self.domain.domain_id, self.mask,
+                                        self.cell_count,
+                                        self.level_counts.copy())
+
+    def select_fcoords(self, dobj):
+        return self.oct_handler.fcoords(self.domain.domain_id, self.mask,
+                                        self.cell_count,
+                                        self.level_counts.copy())
+
+    def select_fwidth(self, dobj):
+        # Recall domain_dimensions is the number of cells, not octs
+        base_dx = (self.domain.pf.domain_width /
+                   self.domain.pf.domain_dimensions)
+        widths = np.empty((self.cell_count, 3), dtype="float64")
+        dds = (2**self.select_ires(dobj))
+        for i in range(3):
+            widths[:,i] = base_dx[i] / dds
+        return widths
+
+    def select_ires(self, dobj):
+        return self.oct_handler.ires(self.domain.domain_id, self.mask,
+                                     self.cell_count,
+                                     self.level_counts.copy())
+
+    def __getitem__(self, key):
+        tr = super(OctreeSubset, self).__getitem__(key)
+        import pdb; pdb.set_trace()
+        try:
+            fields = self._determine_fields(key)
+        except YTFieldTypeNotFound:
+            return tr
+        finfo = self.pf._get_field_info(*fields[0])
+        if not finfo.particle_type:
+            nz = self._num_zones + 2*self._num_ghost_zones
+            dest_shape = (nz, nz, nz, self.n_oct)
+            return tr.reshape(dest_shape)
+        return tr
+
+

diff -r 861231d918087390f934d55775406563de920dab -r 9e8ac81fa888ddce960f8c4b7a07fe20dfb3633a yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -40,6 +40,8 @@
     GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
     StaticOutput
+from yt.data_objects.octree_subset import \
+    OctreeSubset
 from yt.geometry.oct_container import \
     ARTOctreeContainer
 from yt.data_objects.field_info_container import \
@@ -433,43 +435,10 @@
                 return False
         return False
 
-
-class ARTDomainSubset(object):
+class ARTDomainSubset(OctreeSubset):
     def __init__(self, domain, mask, cell_count, domain_level):
-        self.mask = mask
-        self.domain = domain
-        self.oct_handler = domain.pf.h.oct_handler
-        self.cell_count = cell_count
+        super(ARTDomainSubset, self).__init__(domain, mask, cell_count)
         self.domain_level = domain_level
-        level_counts = self.oct_handler.count_levels(
-            self.domain.pf.max_level, self.domain.domain_id, mask)
-        assert(level_counts.sum() == cell_count)
-        level_counts[1:] = level_counts[:-1]
-        level_counts[0] = 0
-        self.level_counts = np.add.accumulate(level_counts)
-
-    def select_icoords(self, dobj):
-        return self.oct_handler.icoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_fcoords(self, dobj):
-        return self.oct_handler.fcoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_ires(self, dobj):
-        return self.oct_handler.ires(self.domain.domain_id, self.mask,
-                                     self.cell_count,
-                                     self.level_counts.copy())
-
-    def select_fwidth(self, dobj):
-        base_dx = 1.0/self.domain.pf.domain_dimensions
-        widths = np.empty((self.cell_count, 3), dtype="float64")
-        dds = (2**self.select_ires(dobj))
-        for i in range(3):
-            widths[:, i] = base_dx[i] / dds
-        return widths
 
     def fill_root(self, content, ftfields):
         """

diff -r 861231d918087390f934d55775406563de920dab -r 9e8ac81fa888ddce960f8c4b7a07fe20dfb3633a yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -50,7 +50,6 @@
         return (exceptions.KeyError, hdf5_light_reader.ReadingError)
 
     def _read_particle_selection_by_type(self, chunks, selector, fields):
-        # Active particles don't have the particle_ prefix.
         rv = {}
         ptypes = list(set([ftype for ftype, fname in fields]))
         fields = list(set(fields))
@@ -94,7 +93,7 @@
         # Now we have to do something unpleasant
         if any((ftype != "all" for ftype, fname in fields)):
             type_fields = [(ftype, fname) for ftype, fname in fields
-                           if ftype != all]
+                           if ftype != "all"]
             rv.update(self._read_particle_selection_by_type(
                       chunks, selector, type_fields))
             if len(rv) == len(fields): return rv

diff -r 861231d918087390f934d55775406563de920dab -r 9e8ac81fa888ddce960f8c4b7a07fe20dfb3633a yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -35,6 +35,8 @@
     GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
     StaticOutput
+from yt.data_objects.octree_subset import \
+    OctreeSubset
 
 from .definitions import ramses_header
 from yt.utilities.definitions import \
@@ -252,43 +254,7 @@
         self.select(selector)
         return self.count(selector)
 
-class RAMSESDomainSubset(object):
-    def __init__(self, domain, mask, cell_count):
-        self.mask = mask
-        self.domain = domain
-        self.oct_handler = domain.pf.h.oct_handler
-        self.cell_count = cell_count
-        level_counts = self.oct_handler.count_levels(
-            self.domain.pf.max_level, self.domain.domain_id, mask)
-        assert(level_counts.sum() == cell_count)
-        level_counts[1:] = level_counts[:-1]
-        level_counts[0] = 0
-        self.level_counts = np.add.accumulate(level_counts)
-
-    def select_icoords(self, dobj):
-        return self.oct_handler.icoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_fcoords(self, dobj):
-        return self.oct_handler.fcoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_fwidth(self, dobj):
-        # Recall domain_dimensions is the number of cells, not octs
-        base_dx = (self.domain.pf.domain_width /
-                   self.domain.pf.domain_dimensions)
-        widths = np.empty((self.cell_count, 3), dtype="float64")
-        dds = (2**self.select_ires(dobj))
-        for i in range(3):
-            widths[:,i] = base_dx[i] / dds
-        return widths
-
-    def select_ires(self, dobj):
-        return self.oct_handler.ires(self.domain.domain_id, self.mask,
-                                     self.cell_count,
-                                     self.level_counts.copy())
+class RAMSESDomainSubset(OctreeSubset):
 
     def fill(self, content, fields):
         # Here we get a copy of the file, which we skip through and read the
@@ -389,8 +355,16 @@
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         yield YTDataChunk(dobj, "all", oobjs, dobj.size)
 
-    def _chunk_spatial(self, dobj, ngz):
-        raise NotImplementedError
+    def _chunk_spatial(self, dobj, ngz, sort = None):
+        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            size = og.cell_count
+            if size == 0: continue
+            yield YTDataChunk(dobj, "spatial", [g], size)
 
     def _chunk_io(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)

diff -r 861231d918087390f934d55775406563de920dab -r 9e8ac81fa888ddce960f8c4b7a07fe20dfb3633a yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1098,3 +1098,44 @@
 
 grid_selector = GridSelector
 
+cdef class OctreeSubsetSelector(SelectorObject):
+    # This is a numpy array, which will be a bool of ndim 1
+    cdef object oct_mask
+
+    def __init__(self, dobj):
+        self.oct_mask = dobj.mask
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def select_octs(self, OctreeContainer octree):
+        return self.oct_mask
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void set_bounds(self,
+                         np.float64_t left_edge[3], np.float64_t right_edge[3],
+                         np.float64_t dds[3], int ind[3][2], int *check):
+        check[0] = 0
+        return
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def select_grids(self,
+                     np.ndarray[np.float64_t, ndim=2] left_edges,
+                     np.ndarray[np.float64_t, ndim=2] right_edges,
+                     np.ndarray[np.int32_t, ndim=2] levels):
+        raise RuntimeError
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3],
+                         int eterm[3]) nogil:
+        return 1
+
+
+octree_subset_selector = OctreeSubsetSelector
+


https://bitbucket.org/yt_analysis/yt-3.0/commits/acadab069a03/
Changeset:   acadab069a03
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-18 02:55:53
Summary:     adding more debug prints
Affected #:  1 file

diff -r 9e8ac81fa888ddce960f8c4b7a07fe20dfb3633a -r acadab069a03a501d1ca1e8e1010e68b69da83aa yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -676,7 +676,7 @@
                 count[cur.my_octs[i - cur.offset].domain - 1] += 1
         return count
 
-    def check(self, int curdom):
+    def check(self, int curdom, int print_all = 0):
         cdef int dind, pi
         cdef Oct oct
         cdef OctAllocationContainer *cont = self.domains[curdom - 1]
@@ -685,6 +685,9 @@
         cdef int unassigned = 0
         for pi in range(cont.n_assigned):
             oct = cont.my_octs[pi]
+            if print_all==1:
+                print pi, oct.level, oct.domain,
+                print oct.pos[0],oct.pos[1],oct.pos[2]
             for i in range(2):
                 for j in range(2):
                     for k in range(2):


https://bitbucket.org/yt_analysis/yt-3.0/commits/a69ba0a7a97a/
Changeset:   a69ba0a7a97a
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-18 02:56:27
Summary:     assign domain=1 to octs. fixes segfault
Affected #:  1 file

diff -r acadab069a03a501d1ca1e8e1010e68b69da83aa -r a69ba0a7a97a4060aa6dd14bb8c7227f9aaef601 yt/geometry/fake_octree.pyx
--- a/yt/geometry/fake_octree.pyx
+++ b/yt/geometry/fake_octree.pyx
@@ -52,6 +52,7 @@
     cdef int[3] dd #hold the octant index
     cdef long i
     cdef long cur_noct = 0
+    cdef long cur_leaf= 0
     for i in range(3):
         ind[i] = 0
         dd[i] = ndd[i]
@@ -63,18 +64,21 @@
     oct_handler.allocate_domains([noct])
     print 'n_assigned', oct_handler.domains[0].n_assigned
     print 'parent'
-    parent = oct_handler.next_root(oct_handler.max_domain, ind)
+    parent = oct_handler.next_root(1, ind)
+    parent.domain = 1
+    cur_leaf = 8 #we've added one parent...
     print 'subdiv'
     while oct_handler.domains[0].n_assigned < noct:
-        cur_noct = subdivide(oct_handler,ind, dd, parent, 0, 0, noct,
+        cur_noct = subdivide(oct_handler,ind, dd, cur_leaf, parent, 0, 0, noct,
                   max_level, fsubdivide)
 
 cdef long subdivide(RAMSESOctreeContainer oct_handler, int ind[3], 
-               int dd[3],
+               int dd[3], long cur_leaf,
                Oct *parent, long cur_level, long cur_noct,
                long noct, long max_level, float fsubdivide):
-    print cur_level, ' n_assigned ', oct_handler.domains[0].n_assigned, 
-    print ' n', oct_handler.domains[0].n
+    print cur_level, ' na ', oct_handler.domains[0].n_assigned, 
+    print ' n', oct_handler.domains[0].n,
+    print 'pos ', parent.pos[0], parent.pos[1], parent.pos[2]
     cdef int ddr[3]
     cdef long i,j,k
     cdef float rf #random float from 0-1
@@ -89,6 +93,7 @@
     if rf > fsubdivide:
         #this will mark the octant ind as subdivided
         oct = oct_handler.next_child(1, ind, parent)
+        oct.domain = 1
         subdivide(oct_handler, ind, ddr, oct, cur_level + 1, 
                   cur_noct+ 1, noct, max_level, fsubdivide)
     return cur_noct


https://bitbucket.org/yt_analysis/yt-3.0/commits/634a0e8835d7/
Changeset:   634a0e8835d7
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-18 02:57:04
Summary:     tests are ok… need to stop adding octs on a # leafs condition, not a # of octs condition
Affected #:  1 file

diff -r a69ba0a7a97a4060aa6dd14bb8c7227f9aaef601 -r 634a0e8835d7a007a4355a54b4aa2044206ec698 yt/geometry/tests/fake_octree.py
--- a/yt/geometry/tests/fake_octree.py
+++ b/yt/geometry/tests/fake_octree.py
@@ -8,18 +8,23 @@
 dd = np.ones(3,dtype='i4')*dn
 dle = np.ones(3,dtype='f8')*0.0
 dre = np.ones(3,dtype='f8')
-fsub = 0.10
-domain = 0
+fsub = 0.25
+domain = 1
 
 oct_handler = RAMSESOctreeContainer(dd,dle,dre)
 create_fake_octree(oct_handler, nocts, max_level, dd, dle, dre, fsub)
 print "filled"
-print oct_handler.check(domain, print_all=1)
-mask = np.ones(nocts,dtype='bool')
-print mask
+print oct_handler.check(1, print_all=1)
+mask = np.ones((nocts,8),dtype='bool')
 cell_count = nocts*8
-level_counts = np.array([nocts]) # not used anyway
-fc = oct_handler.fcoords(domain,mask,cell_count)
+level_counts = oct_handler.count_levels(max_level, 1, mask)
+print level_counts
+print "fcoords"
+fc = oct_handler.fcoords(domain,mask,cell_count,level_counts)
+print level_counts, level_counts.sum()
+print [np.unique(fc[:,ax]).shape[0] for ax in range(3)]
 print fc
 print fc.shape
+import pdb; pdb.set_trace()
 
+#Now take the particles and recreate the same octree


https://bitbucket.org/yt_analysis/yt-3.0/commits/ff7fe030cc38/
Changeset:   ff7fe030cc38
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-18 04:22:35
Summary:     cleaned; fixed level counts
Affected #:  1 file

diff -r 634a0e8835d7a007a4355a54b4aa2044206ec698 -r ff7fe030cc382be44bb9083efcc64e66847e4aa4 yt/geometry/tests/fake_octree.py
--- a/yt/geometry/tests/fake_octree.py
+++ b/yt/geometry/tests/fake_octree.py
@@ -13,18 +13,10 @@
 
 oct_handler = RAMSESOctreeContainer(dd,dle,dre)
 create_fake_octree(oct_handler, nocts, max_level, dd, dle, dre, fsub)
-print "filled"
-print oct_handler.check(1, print_all=1)
 mask = np.ones((nocts,8),dtype='bool')
 cell_count = nocts*8
-level_counts = oct_handler.count_levels(max_level, 1, mask)
-print level_counts
-print "fcoords"
-fc = oct_handler.fcoords(domain,mask,cell_count,level_counts)
-print level_counts, level_counts.sum()
-print [np.unique(fc[:,ax]).shape[0] for ax in range(3)]
-print fc
-print fc.shape
-import pdb; pdb.set_trace()
+oct_counts = oct_handler.count_levels(max_level, 1, mask)
+level_counts = np.concatenate(([0,],np.cumsum(oct_counts)))
+fc = oct_handler.fcoords(domain,mask,cell_count, level_counts.copy())
 
 #Now take the particles and recreate the same octree


https://bitbucket.org/yt_analysis/yt-3.0/commits/7bd2fd1466e2/
Changeset:   7bd2fd1466e2
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-18 07:37:34
Summary:     added leaf counting
Affected #:  1 file

diff -r ff7fe030cc382be44bb9083efcc64e66847e4aa4 -r 7bd2fd1466e2a92c6a6b4e9961a2de239e58f071 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -196,6 +196,39 @@
                 count[o.domain - 1] += mask[o.local_ind,i]
         return count
 
+    @cython.boundscheck(True)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def count_leaves(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
+        # Modified to work when not all octs are assigned
+        cdef int i, j, k, ii
+        cdef np.int64_t oi
+        # pos here is CELL center, not OCT center.
+        cdef np.float64_t pos[3]
+        cdef int n = mask.shape[0]
+        cdef np.ndarray[np.int64_t, ndim=1] count
+        count = np.zeros(self.max_domain, 'int64')
+        # 
+        cur = self.cont
+        for oi in range(n):
+            if oi - cur.offset >= cur.n_assigned:
+                cur = cur.next
+                if cur == NULL:
+                    break
+            o = &cur.my_octs[oi - cur.offset]
+            # skip if unassigned
+            if o == NULL:
+                continue
+            if o.domain == -1: 
+                continue
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        if o.children[i][j][k] == NULL:
+                            ii = ((k*2)+j)*2+i
+                            count[o.domain - 1] += mask[o.local_ind,ii]
+        return count
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)


https://bitbucket.org/yt_analysis/yt-3.0/commits/390b77a76c9f/
Changeset:   390b77a76c9f
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-18 07:38:05
Summary:     cleaned fake_octree code
Affected #:  1 file

diff -r 7bd2fd1466e2a92c6a6b4e9961a2de239e58f071 -r 390b77a76c9fc3b0e7a517b1c7600fb2977746e2 yt/geometry/fake_octree.pyx
--- a/yt/geometry/fake_octree.pyx
+++ b/yt/geometry/fake_octree.pyx
@@ -32,68 +32,61 @@
 
 from oct_container cimport Oct, RAMSESOctreeContainer
 
-# Defined only by N leaves
-# Randomly decide if a branch should be subdivide, recurse one level if so
-# Once done, create a position array of len(leafes) with smoothing lengths = oct_size
-
-# Note that with this algorithm the octree won't be balanced once you hit
-# the maximum number of desired leaves
-
-# Use next_child(domain, int[3] octant, Oct parent)
-
+# Create a balanced octree by a random walk that recursively
+# subdivides
 def create_fake_octree(RAMSESOctreeContainer oct_handler,
-                       long noct,
+                       long max_noct,
                        long max_level,
                        np.ndarray[np.int32_t, ndim=1] ndd,
                        np.ndarray[np.float64_t, ndim=1] dle,
                        np.ndarray[np.float64_t, ndim=1] dre,
                        float fsubdivide):
+    cdef int[3] dd #hold the octant index
     cdef int[3] ind #hold the octant index
-    cdef int[3] dd #hold the octant index
     cdef long i
-    cdef long cur_noct = 0
-    cdef long cur_leaf= 0
+    cdef long cur_leaf = 0
+    cdef long leaves = 0
+    cdef np.ndarray[np.uint8_t, ndim=2] mask
     for i in range(3):
         ind[i] = 0
         dd[i] = ndd[i]
-    assert dd[0]*dd[1]*dd[2] <= noct
-    print 'starting'
-    print ind[0], ind[1], ind[2]
-    print 'allocate'
-    print noct
-    oct_handler.allocate_domains([noct])
-    print 'n_assigned', oct_handler.domains[0].n_assigned
-    print 'parent'
+    oct_handler.allocate_domains([max_noct])
     parent = oct_handler.next_root(1, ind)
     parent.domain = 1
     cur_leaf = 8 #we've added one parent...
-    print 'subdiv'
-    while oct_handler.domains[0].n_assigned < noct:
-        cur_noct = subdivide(oct_handler,ind, dd, cur_leaf, parent, 0, 0, noct,
-                  max_level, fsubdivide)
+    mask = np.ones((max_noct,8),dtype='uint8')
+    while oct_handler.domains[0].n_assigned < max_noct:
+        print "root: nocts ", oct_handler.domains[0].n_assigned
+        cur_leaf = subdivide(oct_handler, parent, ind, dd, cur_leaf, 0,
+                             max_noct, max_level, fsubdivide, mask)
+                             
+    leaves = oct_handler.count_leaves(mask)
+    assert cur_leaf == leaves
 
-cdef long subdivide(RAMSESOctreeContainer oct_handler, int ind[3], 
-               int dd[3], long cur_leaf,
-               Oct *parent, long cur_level, long cur_noct,
-               long noct, long max_level, float fsubdivide):
-    print cur_level, ' na ', oct_handler.domains[0].n_assigned, 
-    print ' n', oct_handler.domains[0].n,
-    print 'pos ', parent.pos[0], parent.pos[1], parent.pos[2]
+cdef long subdivide(RAMSESOctreeContainer oct_handler, 
+                    Oct *parent,
+                    int ind[3], int dd[3], 
+                    long cur_leaf, long cur_level, 
+                    long max_noct, long max_level, float fsubdivide,
+                    np.ndarray[np.uint8_t, ndim=2] mask):
+    print "child", parent.ind, ind[0], ind[1], ind[2], cur_leaf, cur_level
     cdef int ddr[3]
     cdef long i,j,k
     cdef float rf #random float from 0-1
     if cur_level >= max_level: 
-        return cur_noct
-    if oct_handler.domains[0].n_assigned >= noct: 
-        return cur_noct
+        return cur_leaf
+    if oct_handler.domains[0].n_assigned >= max_noct:
+        return cur_leaf
     for i in range(3):
         ind[i] = <int> ((rand() * 1.0 / RAND_MAX) * dd[i])
         ddr[i] = 2
     rf = rand() * 1.0 / RAND_MAX
     if rf > fsubdivide:
-        #this will mark the octant ind as subdivided
+        if parent.children[ind[0]][ind[1]][ind[2]] == NULL:
+            cur_leaf += 7 
         oct = oct_handler.next_child(1, ind, parent)
         oct.domain = 1
-        subdivide(oct_handler, ind, ddr, oct, cur_level + 1, 
-                  cur_noct+ 1, noct, max_level, fsubdivide)
-    return cur_noct
+        cur_leaf = subdivide(oct_handler, oct, ind, ddr, cur_leaf, 
+                             cur_level + 1, max_noct, max_level, 
+                             fsubdivide, mask)
+    return cur_leaf


https://bitbucket.org/yt_analysis/yt-3.0/commits/fd90ac91d0d8/
Changeset:   fd90ac91d0d8
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-04-18 08:07:43
Summary:     octree is successfully instantiated; particle octree does not yet match
Affected #:  2 files

diff -r 390b77a76c9fc3b0e7a517b1c7600fb2977746e2 -r fd90ac91d0d8ecb8d46f13c7b3d9cb389292e190 yt/geometry/fake_octree.pyx
--- a/yt/geometry/fake_octree.pyx
+++ b/yt/geometry/fake_octree.pyx
@@ -45,7 +45,6 @@
     cdef int[3] ind #hold the octant index
     cdef long i
     cdef long cur_leaf = 0
-    cdef long leaves = 0
     cdef np.ndarray[np.uint8_t, ndim=2] mask
     for i in range(3):
         ind[i] = 0
@@ -59,9 +58,8 @@
         print "root: nocts ", oct_handler.domains[0].n_assigned
         cur_leaf = subdivide(oct_handler, parent, ind, dd, cur_leaf, 0,
                              max_noct, max_level, fsubdivide, mask)
+    return cur_leaf
                              
-    leaves = oct_handler.count_leaves(mask)
-    assert cur_leaf == leaves
 
 cdef long subdivide(RAMSESOctreeContainer oct_handler, 
                     Oct *parent,

diff -r 390b77a76c9fc3b0e7a517b1c7600fb2977746e2 -r fd90ac91d0d8ecb8d46f13c7b3d9cb389292e190 yt/geometry/tests/fake_octree.py
--- a/yt/geometry/tests/fake_octree.py
+++ b/yt/geometry/tests/fake_octree.py
@@ -1,8 +1,8 @@
 from yt.geometry.fake_octree import create_fake_octree
-from yt.geometry.oct_container import RAMSESOctreeContainer
+from yt.geometry.oct_container import RAMSESOctreeContainer, ParticleOctreeContainer
 import numpy as np
 
-nocts= 100
+nocts= 3
 max_level = 12
 dn = 2
 dd = np.ones(3,dtype='i4')*dn
@@ -12,11 +12,27 @@
 domain = 1
 
 oct_handler = RAMSESOctreeContainer(dd,dle,dre)
-create_fake_octree(oct_handler, nocts, max_level, dd, dle, dre, fsub)
+leaves = create_fake_octree(oct_handler, nocts, max_level, dd, dle, dre, fsub)
 mask = np.ones((nocts,8),dtype='bool')
 cell_count = nocts*8
 oct_counts = oct_handler.count_levels(max_level, 1, mask)
 level_counts = np.concatenate(([0,],np.cumsum(oct_counts)))
 fc = oct_handler.fcoords(domain,mask,cell_count, level_counts.copy())
+leavesb = oct_handler.count_leaves(mask)
+assert leaves == leavesb
 
-#Now take the particles and recreate the same octree
+#Now take the fcoords, call them particles and recreate the same octree
+print "particle-based recreate"
+oct_handler2 = ParticleOctreeContainer(dd,dle,dre)
+oct_handler2.allocate_domains([nocts])
+oct_handler2.n_ref = 1 #specifically make a maximum of 1 particle per oct
+oct_handler2.add(fc, 1)
+print "added particles"
+cell_count2 = nocts*8
+oct_counts2 = oct_handler.count_levels(max_level, 1, mask)
+level_counts2 = np.concatenate(([0,],np.cumsum(oct_counts)))
+fc2 = oct_handler.fcoords(domain,mask,cell_count, level_counts.copy())
+leaves2 = oct_handler2.count_leaves(mask)
+assert leaves == leaves2
+
+print "success"


https://bitbucket.org/yt_analysis/yt-3.0/commits/1b3764566f4c/
Changeset:   1b3764566f4c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-18 17:31:43
Summary:     Merging in the basics of particle deposition
Affected #:  11 files

diff -r 77b261f38610ca068b32241820f144bf82bed38f -r 1b3764566f4c0177f819f842fc7e84f45d2b2cce yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -513,6 +513,11 @@
                         if f not in fields_to_generate:
                             fields_to_generate.append(f)
 
+    def deposit(self, positions, fields, op):
+        assert(self._current_chunk.chunk_type == "spatial")
+        fields = ensure_list(fields)
+        self.hierarchy._deposit_particle_fields(self, positions, fields, op)
+
     @contextmanager
     def _field_lock(self):
         self._locked = True

diff -r 77b261f38610ca068b32241820f144bf82bed38f -r 1b3764566f4c0177f819f842fc7e84f45d2b2cce yt/frontends/sph/smoothing_kernel.pyx
--- a/yt/frontends/sph/smoothing_kernel.pyx
+++ b/yt/frontends/sph/smoothing_kernel.pyx
@@ -53,21 +53,28 @@
     for p in range(ngas):
         kernel_sum[p] = 0.0
         skip = 0
+        # Find the # of cells of the kernel
         for i in range(3):
             pos[i] = ppos[p, i]
+            # Get particle root grid integer index
             ind[i] = <int>((pos[i] - left_edge[i]) / dds[i])
+            # How many root grid cells does the smoothing length span + 1
             half_len = <int>(hsml[p]/dds[i]) + 1
+            # Left and right integer indices of the smoothing range
+            # If smoothing len is small could be inside the same bin
             ib0[i] = ind[i] - half_len
             ib1[i] = ind[i] + half_len
             #pos[i] = ppos[p, i] - left_edge[i]
             #ind[i] = <int>(pos[i] / dds[i])
             #ib0[i] = <int>((pos[i] - hsml[i]) / dds[i]) - 1
             #ib1[i] = <int>((pos[i] + hsml[i]) / dds[i]) + 1
+            # Skip if outside out root grid
             if ib0[i] >= dims[i] or ib1[i] < 0:
                 skip = 1
             ib0[i] = iclip(ib0[i], 0, dims[i] - 1)
             ib1[i] = iclip(ib1[i], 0, dims[i] - 1)
         if skip == 1: continue
+        # Having found the kernel shape, calculate the kernel weight
         for i from ib0[0] <= i <= ib1[0]:
             idist[0] = (ind[0] - i) * (ind[0] - i) * sdds[0]
             for j from ib0[1] <= j <= ib1[1]:
@@ -75,10 +82,14 @@
                 for k from ib0[2] <= k <= ib1[2]:
                     idist[2] = (ind[2] - k) * (ind[2] - k) * sdds[2]
                     dist = idist[0] + idist[1] + idist[2]
+                    # Calculate distance in multiples of the smoothing length
                     dist = sqrt(dist) / hsml[p]
+                    # Kernel is 3D but save the elements in a 1D array
                     gi = ((i * dims[1] + j) * dims[2]) + k
                     pdist[gi] = sph_kernel(dist)
+                    # Save sum to normalize later
                     kernel_sum[p] += pdist[gi]
+        # Having found the kernel, deposit accordingly into gdata
         for i from ib0[0] <= i <= ib1[0]:
             for j from ib0[1] <= j <= ib1[1]:
                 for k from ib0[2] <= k <= ib1[2]:

diff -r 77b261f38610ca068b32241820f144bf82bed38f -r 1b3764566f4c0177f819f842fc7e84f45d2b2cce yt/geometry/fake_octree.pyx
--- /dev/null
+++ b/yt/geometry/fake_octree.pyx
@@ -0,0 +1,90 @@
+"""
+Make a fake octree, deposit particle at every leaf
+
+Author: Christopher Moody <chris.e.moody at gmail.com>
+Affiliation: UC Santa Cruz
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from libc.stdlib cimport malloc, free, rand, RAND_MAX
+cimport numpy as np
+import numpy as np
+cimport cython
+
+from oct_container cimport Oct, RAMSESOctreeContainer
+
+# Create a balanced octree by a random walk that recursively
+# subdivides
+def create_fake_octree(RAMSESOctreeContainer oct_handler,
+                       long max_noct,
+                       long max_level,
+                       np.ndarray[np.int32_t, ndim=1] ndd,
+                       np.ndarray[np.float64_t, ndim=1] dle,
+                       np.ndarray[np.float64_t, ndim=1] dre,
+                       float fsubdivide):
+    cdef int[3] dd #hold the octant index
+    cdef int[3] ind #hold the octant index
+    cdef long i
+    cdef long cur_leaf = 0
+    cdef np.ndarray[np.uint8_t, ndim=2] mask
+    for i in range(3):
+        ind[i] = 0
+        dd[i] = ndd[i]
+    oct_handler.allocate_domains([max_noct])
+    parent = oct_handler.next_root(1, ind)
+    parent.domain = 1
+    cur_leaf = 8 #we've added one parent...
+    mask = np.ones((max_noct,8),dtype='uint8')
+    while oct_handler.domains[0].n_assigned < max_noct:
+        print "root: nocts ", oct_handler.domains[0].n_assigned
+        cur_leaf = subdivide(oct_handler, parent, ind, dd, cur_leaf, 0,
+                             max_noct, max_level, fsubdivide, mask)
+    return cur_leaf
+                             
+
+cdef long subdivide(RAMSESOctreeContainer oct_handler, 
+                    Oct *parent,
+                    int ind[3], int dd[3], 
+                    long cur_leaf, long cur_level, 
+                    long max_noct, long max_level, float fsubdivide,
+                    np.ndarray[np.uint8_t, ndim=2] mask):
+    print "child", parent.ind, ind[0], ind[1], ind[2], cur_leaf, cur_level
+    cdef int ddr[3]
+    cdef long i,j,k
+    cdef float rf #random float from 0-1
+    if cur_level >= max_level: 
+        return cur_leaf
+    if oct_handler.domains[0].n_assigned >= max_noct:
+        return cur_leaf
+    for i in range(3):
+        ind[i] = <int> ((rand() * 1.0 / RAND_MAX) * dd[i])
+        ddr[i] = 2
+    rf = rand() * 1.0 / RAND_MAX
+    if rf > fsubdivide:
+        if parent.children[ind[0]][ind[1]][ind[2]] == NULL:
+            cur_leaf += 7 
+        oct = oct_handler.next_child(1, ind, parent)
+        oct.domain = 1
+        cur_leaf = subdivide(oct_handler, oct, ind, ddr, cur_leaf, 
+                             cur_level + 1, max_noct, max_level, 
+                             fsubdivide, mask)
+    return cur_leaf

diff -r 77b261f38610ca068b32241820f144bf82bed38f -r 1b3764566f4c0177f819f842fc7e84f45d2b2cce yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -54,7 +54,7 @@
     cdef np.float64_t DLE[3], DRE[3]
     cdef public int nocts
     cdef public int max_domain
-    cdef Oct* get(self, ppos)
+    cdef Oct* get(self, np.float64_t ppos[3], int *ii = ?)
     cdef void neighbors(self, Oct *, Oct **)
     cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
 

diff -r 77b261f38610ca068b32241820f144bf82bed38f -r 1b3764566f4c0177f819f842fc7e84f45d2b2cce yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -142,7 +142,7 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef Oct *get(self, ppos):
+    cdef Oct *get(self, np.float64_t ppos[3], int *ii = NULL):
         #Given a floating point position, retrieve the most
         #refined oct at that time
         cdef np.int64_t ind[3]
@@ -165,6 +165,13 @@
                     ind[i] = 1
                     cp[i] += dds[i]/2.0
             cur = cur.children[ind[0]][ind[1]][ind[2]]
+        if ii != NULL: return cur
+        for i in range(3):
+            if cp[i] > pp[i]:
+                ind[i] = 0
+            else:
+                ind[i] = 1
+        ii[0] = ((ind[2]*2)+ind[1])*2+ind[0]
         return cur
 
     @cython.boundscheck(False)
@@ -189,6 +196,39 @@
                 count[o.domain - 1] += mask[o.local_ind,i]
         return count
 
+    @cython.boundscheck(True)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def count_leaves(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
+        # Modified to work when not all octs are assigned
+        cdef int i, j, k, ii
+        cdef np.int64_t oi
+        # pos here is CELL center, not OCT center.
+        cdef np.float64_t pos[3]
+        cdef int n = mask.shape[0]
+        cdef np.ndarray[np.int64_t, ndim=1] count
+        count = np.zeros(self.max_domain, 'int64')
+        # 
+        cur = self.cont
+        for oi in range(n):
+            if oi - cur.offset >= cur.n_assigned:
+                cur = cur.next
+                if cur == NULL:
+                    break
+            o = &cur.my_octs[oi - cur.offset]
+            # skip if unassigned
+            if o == NULL:
+                continue
+            if o.domain == -1: 
+                continue
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        if o.children[i][j][k] == NULL:
+                            ii = ((k*2)+j)*2+i
+                            count[o.domain - 1] += mask[o.local_ind,ii]
+        return count
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -260,14 +300,17 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def get_neighbor_boundaries(self, ppos):
+    def get_neighbor_boundaries(self, oppos):
+        cdef int i, ii
+        cdef np.float64_t ppos[3]
+        for i in range(3):
+            ppos[i] = oppos[i]
         cdef Oct *main = self.get(ppos)
         cdef Oct* neighbors[27]
         self.neighbors(main, neighbors)
         cdef np.ndarray[np.float64_t, ndim=2] bounds
         cdef np.float64_t corner[3], size[3]
         bounds = np.zeros((27,6), dtype="float64")
-        cdef int i, ii
         tnp = 0
         for i in range(27):
             self.oct_bounds(neighbors[i], corner, size)
@@ -680,7 +723,7 @@
                 m2[o.local_ind, i] = mask[o.local_ind, i]
         return m2
 
-    def check(self, int curdom):
+    def check(self, int curdom, int print_all = 0):
         cdef int dind, pi
         cdef Oct oct
         cdef OctAllocationContainer *cont = self.domains[curdom - 1]
@@ -689,6 +732,9 @@
         cdef int unassigned = 0
         for pi in range(cont.n_assigned):
             oct = cont.my_octs[pi]
+            if print_all==1:
+                print pi, oct.level, oct.domain,
+                print oct.pos[0],oct.pos[1],oct.pos[2]
             for i in range(2):
                 for j in range(2):
                     for k in range(2):
@@ -901,6 +947,28 @@
 
 cdef class ARTOctreeContainer(RAMSESOctreeContainer):
     #this class is specifically for the NMSU ART
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def deposit_particle_cumsum(self,
+                                np.ndarray[np.float64_t, ndim=2] ppos, 
+                                np.ndarray[np.float64_t, ndim=1] pdata,
+                                np.ndarray[np.float64_t, ndim=1] mask,
+                                np.ndarray[np.float64_t, ndim=1] dest,
+                                fields, int domain):
+        cdef Oct *o
+        cdef OctAllocationContainer *dom = self.domains[domain - 1]
+        cdef np.float64_t pos[3]
+        cdef int ii
+        cdef int no = ppos.shape[0]
+        for n in range(no):
+            for j in range(3):
+                pos[j] = ppos[n,j]
+            o = self.get(pos, &ii) 
+            if mask[o.local_ind,ii]==0: continue
+            dest[o.ind+ii] += pdata[n]
+        return dest
+
     @cython.boundscheck(True)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -1394,12 +1462,15 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def count_neighbor_particles(self, ppos):
+    def count_neighbor_particles(self, oppos):
         #How many particles are in my neighborhood
+        cdef int i, ni, dl, tnp
+        cdef np.float64_t ppos[3]
+        for i in range(3):
+            ppos[i] = oppos[i]
         cdef Oct *main = self.get(ppos)
         cdef Oct* neighbors[27]
         self.neighbors(main, neighbors)
-        cdef int i, ni, dl, tnp
         tnp = 0
         for i in range(27):
             if neighbors[i].sd != NULL:

diff -r 77b261f38610ca068b32241820f144bf82bed38f -r 1b3764566f4c0177f819f842fc7e84f45d2b2cce yt/geometry/oct_deposit.pyx
--- /dev/null
+++ b/yt/geometry/oct_deposit.pyx
@@ -0,0 +1,158 @@
+"""
+Particle Deposition onto Octs
+
+Author: Christopher Moody <chris.e.moody at gmail.com>
+Affiliation: UC Santa Cruz
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from libc.stdlib cimport malloc, free
+cimport numpy as np
+import numpy as np
+cimport cython
+
+from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+
+# Mode functions
+ctypedef np.float64_t (*type_opt)(np.float64_t, np.float64_t)
+cdef np.float64_t opt_count(np.float64_t pdata,
+                            np.float64_t weight,
+                            np.int64_t index,
+                            np.ndarray[np.float64_t, ndim=2] data_out, 
+                            np.ndarray[np.float64_t, ndim=2] data_in):
+    data_out[index] += 1.0
+
+cdef np.float64_t opt_sum(np.float64_t pdata,
+                            np.float64_t weight,
+                            np.int64_t index,
+                            np.ndarray[np.float64_t, ndim=2] data_out, 
+                            np.ndarray[np.float64_t, ndim=2] data_in):
+    data_out[index] += pdata 
+
+cdef np.float64_t opt_diff(np.float64_t pdata,
+                            np.float64_t weight,
+                            np.int64_t index,
+                            np.ndarray[np.float64_t, ndim=2] data_out, 
+                            np.ndarray[np.float64_t, ndim=2] data_in):
+    data_out[index] += (data_in[index] - pdata) 
+
+cdef np.float64_t opt_wcount(np.float64_t pdata,
+                            np.float64_t weight,
+                            np.int64_t index,
+                            np.ndarray[np.float64_t, ndim=2] data_out, 
+                            np.ndarray[np.float64_t, ndim=2] data_in):
+    data_out[index] += weight
+
+cdef np.float64_t opt_wsum(np.float64_t pdata,
+                            np.float64_t weight,
+                            np.int64_t index,
+                            np.ndarray[np.float64_t, ndim=2] data_out, 
+                            np.ndarray[np.float64_t, ndim=2] data_in):
+    data_out[index] += pdata * weight
+
+cdef np.float64_t opt_wdiff(np.float64_t pdata,
+                            np.float64_t weight,
+                            np.int64_t index,
+                            np.ndarray[np.float64_t, ndim=2] data_out, 
+                            np.ndarray[np.float64_t, ndim=2] data_in):
+    data_out[index] += (data_in[index] - pdata) * weight
+
+# Selection functions
+ctypedef NOTSURE (*type_sel)(OctreeContainer, 
+                                np.ndarray[np.float64_t, ndim=1],
+                                np.float64_t)
+cdef NOTSURE select_nearest(OctreeContainer oct_handler,
+                            np.ndarray[np.float64_t, ndim=1] pos,
+                            np.float64_t radius):
+    #return only the nearest oct
+    pass
+
+
+cdef NOTSURE select_radius(OctreeContainer oct_handler,
+                            np.ndarray[np.float64_t, ndim=1] pos,
+                            np.float64_t radius):
+    #return a list of octs within the radius
+    pass
+    
+
+# Kernel functions
+ctypedef np.float64_t (*type_ker)(np.float64_t)
+cdef np.float64_t kernel_sph(np.float64_t x) nogil:
+    cdef np.float64_t kernel
+    if x <= 0.5:
+        kernel = 1.-6.*x*x*(1.-x)
+    elif x>0.5 and x<=1.0:
+        kernel = 2.*(1.-x)*(1.-x)*(1.-x)
+    else:
+        kernel = 0.
+    return kernel
+
+cdef np.float64_t kernel_null(np.float64_t x) nogil: return 0.0
+
+cdef deposit(OctreeContainer oct_handler, 
+        np.ndarray[np.float64_t, ndim=2] ppos, #positions,columns are x,y,z
+        np.ndarray[np.float64_t, ndim=2] pd, # particle fields
+        np.ndarray[np.float64_t, ndim=1] pr, # particle radius
+        np.ndarray[np.float64_t, ndim=2] data_in, #used to calc diff, same shape as data_out
+        np.ndarray[np.float64_t, ndim=2] data_out, #write deposited here
+        mode='count', selection='nearest', kernel='null'):
+    cdef type_opt fopt
+    cdef type_sel fsel
+    cdef type_ker fker
+    cdef long pi #particle index
+    cdef long nocts #number of octs in selection
+    cdef Oct oct 
+    cdef np.float64_t w
+    # Can we do this with dicts?
+    # Setup the function pointers
+    if mode == 'count':
+        fopt = opt_count
+    elif mode == 'sum':
+        fopt = opt_sum
+    elif mode == 'diff':
+        fopt = opt_diff
+    if mode == 'wcount':
+        fopt = opt_count
+    elif mode == 'wsum':
+        fopt = opt_sum
+    elif mode == 'wdiff':
+        fopt = opt_diff
+    if selection == 'nearest':
+        fsel = select_nearest
+    elif selection == 'radius':
+        fsel = select_radius
+    if kernel == 'null':
+        fker = kernel_null
+    if kernel == 'sph':
+        fker = kernel_sph
+    for pi in range(particles):
+        octs = fsel(oct_handler, ppos[pi], pr[pi])
+        for oct in octs:
+            for cell in oct.cells:
+                w = fker(pr[pi],cell) 
+                weights.append(w)
+        norm = weights.sum()
+        for w, oct in zip(weights, octs):
+            for cell in oct.cells:
+                fopt(pd[pi], w/norm, oct.index, data_in, data_out)
+
+

diff -r 77b261f38610ca068b32241820f144bf82bed38f -r 1b3764566f4c0177f819f842fc7e84f45d2b2cce yt/geometry/setup.py
--- a/yt/geometry/setup.py
+++ b/yt/geometry/setup.py
@@ -23,6 +23,13 @@
                 depends=["yt/utilities/lib/fp_utils.pxd",
                          "yt/geometry/oct_container.pxd",
                          "yt/geometry/selection_routines.pxd"])
+    config.add_extension("fake_octree", 
+                ["yt/geometry/fake_octree.pyx"],
+                include_dirs=["yt/utilities/lib/"],
+                libraries=["m"],
+                depends=["yt/utilities/lib/fp_utils.pxd",
+                         "yt/geometry/oct_container.pxd",
+                         "yt/geometry/selection_routines.pxd"])
     config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()
     return config

diff -r 77b261f38610ca068b32241820f144bf82bed38f -r 1b3764566f4c0177f819f842fc7e84f45d2b2cce yt/geometry/tests/fake_octree.py
--- /dev/null
+++ b/yt/geometry/tests/fake_octree.py
@@ -0,0 +1,38 @@
+from yt.geometry.fake_octree import create_fake_octree
+from yt.geometry.oct_container import RAMSESOctreeContainer, ParticleOctreeContainer
+import numpy as np
+
+nocts= 3
+max_level = 12
+dn = 2
+dd = np.ones(3,dtype='i4')*dn
+dle = np.ones(3,dtype='f8')*0.0
+dre = np.ones(3,dtype='f8')
+fsub = 0.25
+domain = 1
+
+oct_handler = RAMSESOctreeContainer(dd,dle,dre)
+leaves = create_fake_octree(oct_handler, nocts, max_level, dd, dle, dre, fsub)
+mask = np.ones((nocts,8),dtype='bool')
+cell_count = nocts*8
+oct_counts = oct_handler.count_levels(max_level, 1, mask)
+level_counts = np.concatenate(([0,],np.cumsum(oct_counts)))
+fc = oct_handler.fcoords(domain,mask,cell_count, level_counts.copy())
+leavesb = oct_handler.count_leaves(mask)
+assert leaves == leavesb
+
+#Now take the fcoords, call them particles and recreate the same octree
+print "particle-based recreate"
+oct_handler2 = ParticleOctreeContainer(dd,dle,dre)
+oct_handler2.allocate_domains([nocts])
+oct_handler2.n_ref = 1 #specifically make a maximum of 1 particle per oct
+oct_handler2.add(fc, 1)
+print "added particles"
+cell_count2 = nocts*8
+oct_counts2 = oct_handler.count_levels(max_level, 1, mask)
+level_counts2 = np.concatenate(([0,],np.cumsum(oct_counts)))
+fc2 = oct_handler.fcoords(domain,mask,cell_count, level_counts.copy())
+leaves2 = oct_handler2.count_leaves(mask)
+assert leaves == leaves2
+
+print "success"


https://bitbucket.org/yt_analysis/yt-3.0/commits/36e0d142b508/
Changeset:   36e0d142b508
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-18 21:38:12
Summary:     Spatial chunking within data objects for Octree codes now works.
Affected #:  3 files

diff -r 1b3764566f4c0177f819f842fc7e84f45d2b2cce -r 36e0d142b508b5432051fc45937ffdd0125b32af yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -60,6 +60,8 @@
         level_counts[1:] = level_counts[:-1]
         level_counts[0] = 0
         self.level_counts = np.add.accumulate(level_counts)
+        self._last_mask = None
+        self._last_selector_id = None
         self._current_particle_type = 'all'
         self._current_fluid_type = self.pf.default_fluid_type
 
@@ -98,8 +100,35 @@
         if not finfo.particle_type:
             nz = self._num_zones + 2*self._num_ghost_zones
             n_oct = tr.shape[0] / (nz**3.0)
-            dest_shape = (nz, nz, nz, n_oct)
-            return tr.reshape(dest_shape)
+            tr.shape = (n_oct, nz, nz, nz)
+            tr = np.rollaxis(tr, 0, 4)
+            return tr
         return tr
 
+    def deposit(self, positions, fields, method):
+        pass
 
+    def select(self, selector):
+        if id(selector) == self._last_selector_id:
+            return self._last_mask
+        self._last_mask = self.oct_handler.domain_mask(
+                self.mask, self.domain.domain_id)
+        if self._last_mask.sum() == 0: return None
+        self._last_selector_id = id(selector)
+        return self._last_mask
+
+    def count(self, selector):
+        if id(selector) == self._last_selector_id:
+            if self._last_mask is None: return 0
+            return self._last_mask.sum()
+        self.select(selector)
+        return self.count(selector)
+
+    def count_particles(self, selector, x, y, z):
+        # We don't cache the selector results
+        count = selector.count_points(x,y,z)
+        return count
+
+    def select_particles(self, selector, x, y, z):
+        mask = selector.select_points(x,y,z)
+        return mask

diff -r 1b3764566f4c0177f819f842fc7e84f45d2b2cce -r 36e0d142b508b5432051fc45937ffdd0125b32af yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -721,7 +721,43 @@
             o = &cur.my_octs[oi]
             for i in range(8):
                 m2[o.local_ind, i] = mask[o.local_ind, i]
-        return m2
+        return m2 # NOTE: This is uint8_t
+
+    def domain_mask(self,
+                    # mask is the base selector's *global* mask
+                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                    int domain_id):
+        # What distinguishes this one from domain_and is that we have a mask,
+        # which covers the whole domain, but our output will only be of a much
+        # smaller subset of octs that belong to a given domain *and* the mask.
+        # Note also that typically when something calls domain_and, they will 
+        # use a logical_any along the oct axis.  Here we don't do that.
+        # Note also that we change the shape of the returned array.
+        cdef np.int64_t i, j, k, oi, n, nm
+        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
+        cdef Oct *o
+        n = mask.shape[0]
+        nm = 0
+        for oi in range(cur.n_assigned):
+            o = &cur.my_octs[oi]
+            use = 0
+            for i in range(8):
+                if mask[o.local_ind, i] == 1: use = 1
+            nm += use
+        cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
+                np.zeros((2, 2, 2, nm), 'uint8')
+        nm = 0
+        for oi in range(cur.n_assigned):
+            o = &cur.my_octs[oi]
+            use = 0
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        ii = ((k*2)+j)*2+i
+                        if mask[o.local_ind, ii] == 0: continue
+                        use = m2[i, j, k, nm] = 1
+            nm += use
+        return m2.astype("bool")
 
     def check(self, int curdom, int print_all = 0):
         cdef int dind, pi

diff -r 1b3764566f4c0177f819f842fc7e84f45d2b2cce -r 36e0d142b508b5432051fc45937ffdd0125b32af yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1147,6 +1147,5 @@
                          int eterm[3]) nogil:
         return 1
 
-
 octree_subset_selector = OctreeSubsetSelector
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/54328cbc3d8d/
Changeset:   54328cbc3d8d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-22 19:45:07
Summary:     Adding a domain_ind function to enable particle deposition.
Affected #:  2 files

diff -r 145a6c342daafe2991a40304daa02c09df0d2e5d -r 54328cbc3d8df9b3c90882f64721d72dcdbe473b yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -108,7 +108,17 @@
             return tr
         return tr
 
+    _domain_ind = None
+
+    @property
+    def domain_ind(self):
+        if self._domain_ind is None:
+            di = self.oct_handler.domain_ind(self.mask, self.domain.domain_id)
+            self._domain_ind = di
+        return self._domain_ind
+
     def deposit(self, positions, fields, method):
+        # Here we perform our particle deposition.
         pass
 
     def select(self, selector):

diff -r 145a6c342daafe2991a40304daa02c09df0d2e5d -r 54328cbc3d8df9b3c90882f64721d72dcdbe473b yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -1490,6 +1490,7 @@
         cdef Oct *o
         n = mask.shape[0]
         nm = 0
+        # This could perhaps be faster if we 
         for oi in range(n):
             o = self.oct_list[oi]
             if o.domain != domain_id: continue
@@ -1512,3 +1513,30 @@
                         use = m2[i, j, k, nm] = 1
             nm += use
         return m2.astype("bool")
+
+    def domain_ind(self,
+                    # mask is the base selector's *global* mask
+                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                    int domain_id):
+        # Here we once again do something similar to the other functions.  We
+        # need a set of indices into the final reduced, masked values.  The
+        # indices will be domain.n long, and will be of type int64.  This way,
+        # we can get the Oct through a .get() call, then use Oct.ind as an
+        # index into this newly created array, then finally use the returned
+        # index into the domain subset array for deposition.
+        cdef np.int64_t i, j, k, oi, noct, n, nm, use, offset
+        cdef Oct *o
+        offset = self.dom_offsets[domain_id]
+        noct = self.dom_offsets[domain_id + 1] - offset
+        cdef np.ndarray[np.int64_t, ndim=1] ind = np.zeros(noct, 'int64')
+        nm = 0
+        for oi in range(noct):
+            ind[oi] = -1
+            o = self.oct_list[oi + offset]
+            use = 0
+            for i in range(8):
+                if mask[o.local_ind, i] == 1: use = 1
+            if use == 1:
+                ind[oi] = nm
+            nm += use
+        return ind


https://bitbucket.org/yt_analysis/yt-3.0/commits/5177df2099d2/
Changeset:   5177df2099d2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-22 19:49:47
Summary:     Merging octree spatial work.
Affected #:  5 files

diff -r 36e0d142b508b5432051fc45937ffdd0125b32af -r 5177df2099d2a49f8ac3cb2cce448f8cae16b2b1 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -99,13 +99,26 @@
         finfo = self.pf._get_field_info(*fields[0])
         if not finfo.particle_type:
             nz = self._num_zones + 2*self._num_ghost_zones
-            n_oct = tr.shape[0] / (nz**3.0)
-            tr.shape = (n_oct, nz, nz, nz)
-            tr = np.rollaxis(tr, 0, 4)
+            # We may need to reshape the field, if it is being queried from
+            # field_data.  If it's already cached, it just passes through.
+            if len(tr.shape) < 4: 
+                n_oct = tr.shape[0] / (nz**3.0)
+                tr.shape = (n_oct, nz, nz, nz)
+                tr = np.rollaxis(tr, 0, 4)
             return tr
         return tr
 
+    _domain_ind = None
+
+    @property
+    def domain_ind(self):
+        if self._domain_ind is None:
+            di = self.oct_handler.domain_ind(self.mask, self.domain.domain_id)
+            self._domain_ind = di
+        return self._domain_ind
+
     def deposit(self, positions, fields, method):
+        # Here we perform our particle deposition.
         pass
 
     def select(self, selector):

diff -r 36e0d142b508b5432051fc45937ffdd0125b32af -r 5177df2099d2a49f8ac3cb2cce448f8cae16b2b1 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -351,10 +351,27 @@
                     ('dummy',   'i'))
 
     def __init__(self, filename, data_style="tipsy",
-                 root_dimensions = 64):
+                 root_dimensions = 64, endian = ">",
+                 field_dtypes = None,
+                 domain_left_edge = None,
+                 domain_right_edge = None):
+        self.endian = endian
         self._root_dimensions = root_dimensions
         # Set up the template for domain files
         self.storage_filename = None
+        if domain_left_edge is None:
+            domain_left_edge = np.zeros(3, "float64") - 0.5
+        if domain_right_edge is None:
+            domain_right_edge = np.zeros(3, "float64") + 0.5
+
+        self.domain_left_edge = np.array(domain_left_edge, dtype="float64")
+        self.domain_right_edge = np.array(domain_right_edge, dtype="float64")
+
+        # My understanding is that dtypes are set on a field by field basis,
+        # not on a (particle type, field) basis
+        if field_dtypes is None: field_dtypes = {}
+        self._field_dtypes = field_dtypes
+
         super(TipsyStaticOutput, self).__init__(filename, data_style)
 
     def __repr__(self):
@@ -373,7 +390,7 @@
         # in the GADGET-2 user guide.
 
         f = open(self.parameter_filename, "rb")
-        hh = ">" + "".join(["%s" % (b) for a,b in self._header_spec])
+        hh = self.endian + "".join(["%s" % (b) for a,b in self._header_spec])
         hvals = dict([(a, c) for (a, b), c in zip(self._header_spec,
                      struct.unpack(hh, f.read(struct.calcsize(hh))))])
         self._header_offset = f.tell()
@@ -388,8 +405,9 @@
         # This may not be correct.
         self.current_time = hvals["time"]
 
-        self.domain_left_edge = np.zeros(3, "float64") - 0.5
-        self.domain_right_edge = np.ones(3, "float64") + 0.5
+        # NOTE: These are now set in the main initializer.
+        #self.domain_left_edge = np.zeros(3, "float64") - 0.5
+        #self.domain_right_edge = np.ones(3, "float64") + 0.5
         self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
         self.periodicity = (True, True, True)
 

diff -r 36e0d142b508b5432051fc45937ffdd0125b32af -r 5177df2099d2a49f8ac3cb2cce448f8cae16b2b1 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -372,6 +372,7 @@
         return rv
 
     def _initialize_octree(self, domain, octree):
+        pf = domain.pf
         with open(domain.domain_filename, "rb") as f:
             f.seek(domain.pf._header_offset)
             for ptype in self._ptypes:
@@ -391,6 +392,11 @@
                             pos[:,1].min(), pos[:,1].max())
                 mylog.debug("Spanning: %0.3e .. %0.3e in z",
                             pos[:,2].min(), pos[:,2].max())
+                if np.any(pos.min(axis=0) < pf.domain_left_edge) or \
+                   np.any(pos.max(axis=0) > pf.domain_right_edge):
+                    raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
+                                           pf.domain_left_edge,
+                                           pf.domain_right_edge)
                 del pp
                 octree.add(pos, domain.domain_id)
 
@@ -412,10 +418,12 @@
         for ptype, field in self._fields:
             pfields = []
             if tp[ptype] == 0: continue
+            dtbase = domain.pf._field_dtypes.get(field, 'f')
+            ff = "%s%s" % (domain.pf.endian, dtbase)
             if field in _vector_fields:
-                dt = (field, [('x', '>f'), ('y', '>f'), ('z', '>f')])
+                dt = (field, [('x', ff), ('y', ff), ('z', ff)])
             else:
-                dt = (field, '>f')
+                dt = (field, ff)
             pds.setdefault(ptype, []).append(dt)
             field_list.append((ptype, field))
         for ptype in pds:

diff -r 36e0d142b508b5432051fc45937ffdd0125b32af -r 5177df2099d2a49f8ac3cb2cce448f8cae16b2b1 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -733,7 +733,7 @@
         # Note also that typically when something calls domain_and, they will 
         # use a logical_any along the oct axis.  Here we don't do that.
         # Note also that we change the shape of the returned array.
-        cdef np.int64_t i, j, k, oi, n, nm
+        cdef np.int64_t i, j, k, oi, n, nm, use
         cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
         cdef Oct *o
         n = mask.shape[0]
@@ -1371,7 +1371,7 @@
                 #IND Corresponding integer index on the root octs
                 #CP Center  point of that oct
                 pp[i] = pos[p, i]
-                dds[i] = (self.DRE[i] + self.DLE[i])/self.nn[i]
+                dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
                 ind[i] = <np.int64_t> ((pp[i] - self.DLE[i])/dds[i])
                 cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
             cur = self.root_mesh[ind[0]][ind[1]][ind[2]]
@@ -1547,3 +1547,67 @@
                 m2[o.local_ind, i] = mask[o.local_ind, i]
         return m2
 
+    def domain_mask(self,
+                    # mask is the base selector's *global* mask
+                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                    int domain_id):
+        # What distinguishes this one from domain_and is that we have a mask,
+        # which covers the whole domain, but our output will only be of a much
+        # smaller subset of octs that belong to a given domain *and* the mask.
+        # Note also that typically when something calls domain_and, they will 
+        # use a logical_any along the oct axis.  Here we don't do that.
+        # Note also that we change the shape of the returned array.
+        cdef np.int64_t i, j, k, oi, n, nm, use
+        cdef Oct *o
+        n = mask.shape[0]
+        nm = 0
+        # This could perhaps be faster if we 
+        for oi in range(n):
+            o = self.oct_list[oi]
+            if o.domain != domain_id: continue
+            use = 0
+            for i in range(8):
+                if mask[o.local_ind, i] == 1: use = 1
+            nm += use
+        cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
+                np.zeros((2, 2, 2, nm), 'uint8')
+        nm = 0
+        for oi in range(n):
+            o = self.oct_list[oi]
+            if o.domain != domain_id: continue
+            use = 0
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        ii = ((k*2)+j)*2+i
+                        if mask[o.local_ind, ii] == 0: continue
+                        use = m2[i, j, k, nm] = 1
+            nm += use
+        return m2.astype("bool")
+
+    def domain_ind(self,
+                    # mask is the base selector's *global* mask
+                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                    int domain_id):
+        # Here we once again do something similar to the other functions.  We
+        # need a set of indices into the final reduced, masked values.  The
+        # indices will be domain.n long, and will be of type int64.  This way,
+        # we can get the Oct through a .get() call, then use Oct.ind as an
+        # index into this newly created array, then finally use the returned
+        # index into the domain subset array for deposition.
+        cdef np.int64_t i, j, k, oi, noct, n, nm, use, offset
+        cdef Oct *o
+        offset = self.dom_offsets[domain_id]
+        noct = self.dom_offsets[domain_id + 1] - offset
+        cdef np.ndarray[np.int64_t, ndim=1] ind = np.zeros(noct, 'int64')
+        nm = 0
+        for oi in range(noct):
+            ind[oi] = -1
+            o = self.oct_list[oi + offset]
+            use = 0
+            for i in range(8):
+                if mask[o.local_ind, i] == 1: use = 1
+            if use == 1:
+                ind[oi] = nm
+            nm += use
+        return ind

diff -r 36e0d142b508b5432051fc45937ffdd0125b32af -r 5177df2099d2a49f8ac3cb2cce448f8cae16b2b1 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -249,3 +249,14 @@
 
     def __str__(self):
         return "Data selector '%s' not implemented." % (self.class_name)
+
+class YTDomainOverflow(YTException):
+    def __init__(self, mi, ma, dle, dre):
+        self.mi = mi
+        self.ma = ma
+        self.dle = dle
+        self.dre = dre
+
+    def __str__(self):
+        return "Particle bounds %s and %s exceed domain bounds %s and %s" % (
+            self.mi, self.ma, self.dle, self.dre)


https://bitbucket.org/yt_analysis/yt-3.0/commits/71d137cd09df/
Changeset:   71d137cd09df
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-25 12:44:40
Summary:     Particle deposit first draft.

This renames Chris's oct_deposit to particle_deposit and adds the first pass at
a class for creating deposition routines.
Affected #:  4 files

diff -r 5177df2099d2a49f8ac3cb2cce448f8cae16b2b1 -r 71d137cd09df25305b367ea7d33afb9f990fa5ce yt/geometry/oct_deposit.pyx
--- a/yt/geometry/oct_deposit.pyx
+++ /dev/null
@@ -1,158 +0,0 @@
-"""
-Particle Deposition onto Octs
-
-Author: Christopher Moody <chris.e.moody at gmail.com>
-Affiliation: UC Santa Cruz
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: Columbia University
-Homepage: http://yt.enzotools.org/
-License:
-  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from libc.stdlib cimport malloc, free
-cimport numpy as np
-import numpy as np
-cimport cython
-
-from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
-
-# Mode functions
-ctypedef np.float64_t (*type_opt)(np.float64_t, np.float64_t)
-cdef np.float64_t opt_count(np.float64_t pdata,
-                            np.float64_t weight,
-                            np.int64_t index,
-                            np.ndarray[np.float64_t, ndim=2] data_out, 
-                            np.ndarray[np.float64_t, ndim=2] data_in):
-    data_out[index] += 1.0
-
-cdef np.float64_t opt_sum(np.float64_t pdata,
-                            np.float64_t weight,
-                            np.int64_t index,
-                            np.ndarray[np.float64_t, ndim=2] data_out, 
-                            np.ndarray[np.float64_t, ndim=2] data_in):
-    data_out[index] += pdata 
-
-cdef np.float64_t opt_diff(np.float64_t pdata,
-                            np.float64_t weight,
-                            np.int64_t index,
-                            np.ndarray[np.float64_t, ndim=2] data_out, 
-                            np.ndarray[np.float64_t, ndim=2] data_in):
-    data_out[index] += (data_in[index] - pdata) 
-
-cdef np.float64_t opt_wcount(np.float64_t pdata,
-                            np.float64_t weight,
-                            np.int64_t index,
-                            np.ndarray[np.float64_t, ndim=2] data_out, 
-                            np.ndarray[np.float64_t, ndim=2] data_in):
-    data_out[index] += weight
-
-cdef np.float64_t opt_wsum(np.float64_t pdata,
-                            np.float64_t weight,
-                            np.int64_t index,
-                            np.ndarray[np.float64_t, ndim=2] data_out, 
-                            np.ndarray[np.float64_t, ndim=2] data_in):
-    data_out[index] += pdata * weight
-
-cdef np.float64_t opt_wdiff(np.float64_t pdata,
-                            np.float64_t weight,
-                            np.int64_t index,
-                            np.ndarray[np.float64_t, ndim=2] data_out, 
-                            np.ndarray[np.float64_t, ndim=2] data_in):
-    data_out[index] += (data_in[index] - pdata) * weight
-
-# Selection functions
-ctypedef NOTSURE (*type_sel)(OctreeContainer, 
-                                np.ndarray[np.float64_t, ndim=1],
-                                np.float64_t)
-cdef NOTSURE select_nearest(OctreeContainer oct_handler,
-                            np.ndarray[np.float64_t, ndim=1] pos,
-                            np.float64_t radius):
-    #return only the nearest oct
-    pass
-
-
-cdef NOTSURE select_radius(OctreeContainer oct_handler,
-                            np.ndarray[np.float64_t, ndim=1] pos,
-                            np.float64_t radius):
-    #return a list of octs within the radius
-    pass
-    
-
-# Kernel functions
-ctypedef np.float64_t (*type_ker)(np.float64_t)
-cdef np.float64_t kernel_sph(np.float64_t x) nogil:
-    cdef np.float64_t kernel
-    if x <= 0.5:
-        kernel = 1.-6.*x*x*(1.-x)
-    elif x>0.5 and x<=1.0:
-        kernel = 2.*(1.-x)*(1.-x)*(1.-x)
-    else:
-        kernel = 0.
-    return kernel
-
-cdef np.float64_t kernel_null(np.float64_t x) nogil: return 0.0
-
-cdef deposit(OctreeContainer oct_handler, 
-        np.ndarray[np.float64_t, ndim=2] ppos, #positions,columns are x,y,z
-        np.ndarray[np.float64_t, ndim=2] pd, # particle fields
-        np.ndarray[np.float64_t, ndim=1] pr, # particle radius
-        np.ndarray[np.float64_t, ndim=2] data_in, #used to calc diff, same shape as data_out
-        np.ndarray[np.float64_t, ndim=2] data_out, #write deposited here
-        mode='count', selection='nearest', kernel='null'):
-    cdef type_opt fopt
-    cdef type_sel fsel
-    cdef type_ker fker
-    cdef long pi #particle index
-    cdef long nocts #number of octs in selection
-    cdef Oct oct 
-    cdef np.float64_t w
-    # Can we do this with dicts?
-    # Setup the function pointers
-    if mode == 'count':
-        fopt = opt_count
-    elif mode == 'sum':
-        fopt = opt_sum
-    elif mode == 'diff':
-        fopt = opt_diff
-    if mode == 'wcount':
-        fopt = opt_count
-    elif mode == 'wsum':
-        fopt = opt_sum
-    elif mode == 'wdiff':
-        fopt = opt_diff
-    if selection == 'nearest':
-        fsel = select_nearest
-    elif selection == 'radius':
-        fsel = select_radius
-    if kernel == 'null':
-        fker = kernel_null
-    if kernel == 'sph':
-        fker = kernel_sph
-    for pi in range(particles):
-        octs = fsel(oct_handler, ppos[pi], pr[pi])
-        for oct in octs:
-            for cell in oct.cells:
-                w = fker(pr[pi],cell) 
-                weights.append(w)
-        norm = weights.sum()
-        for w, oct in zip(weights, octs):
-            for cell in oct.cells:
-                fopt(pd[pi], w/norm, oct.index, data_in, data_out)
-
-

diff -r 5177df2099d2a49f8ac3cb2cce448f8cae16b2b1 -r 71d137cd09df25305b367ea7d33afb9f990fa5ce yt/geometry/particle_deposit.pxd
--- /dev/null
+++ b/yt/geometry/particle_deposit.pxd
@@ -0,0 +1,47 @@
+"""
+Particle Deposition onto Octs
+
+Author: Christopher Moody <chris.e.moody at gmail.com>
+Affiliation: UC Santa Cruz
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+import numpy as np
+from libc.stdlib cimport malloc, free
+cimport cython
+
+from fp_utils cimport *
+from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+
+cdef extern from "alloca.h":
+    void *alloca(int)
+
+cdef inline int gind(int i, int j, int k, int dims[3]):
+    return ((k*dims[1])+j)*dims[0]+i
+
+cdef class ParticleDepositOperation:
+    # We assume each will allocate and define their own temporary storage
+    cdef np.int64_t nvals
+    cdef void process(self, int dim[3], np.float64_t left_edge[3],
+                      np.float64_t dds[3], np.int64_t offset,
+                      np.float64_t ppos[3], np.float64_t *fields)

diff -r 5177df2099d2a49f8ac3cb2cce448f8cae16b2b1 -r 71d137cd09df25305b367ea7d33afb9f990fa5ce yt/geometry/particle_deposit.pyx
--- /dev/null
+++ b/yt/geometry/particle_deposit.pyx
@@ -0,0 +1,232 @@
+"""
+Particle Deposition onto Cells
+
+Author: Christopher Moody <chris.e.moody at gmail.com>
+Affiliation: UC Santa Cruz
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+import numpy as np
+from libc.stdlib cimport malloc, free
+cimport cython
+
+from fp_utils cimport *
+from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+
+cdef class ParticleDepositOperation:
+    def __init__(self, nvals):
+        self.nvals = nvals
+
+    def initialize(self, *args):
+        raise NotImplementedError
+
+    def finalize(self, *args):
+        raise NotImplementedError
+
+    def process_octree(self, OctreeContainer octree,
+                     np.ndarray[np.int64_t, ndim=1] dom_ind,
+                     np.ndarray[np.float64_t, ndim=2] positions,
+                     fields = None):
+        raise NotImplementedError
+
+    def process_grid(self, gobj,
+                     np.ndarray[np.float64_t, ndim=2] positions,
+                     fields = None):
+        cdef int nf, i, j
+        if fields is None:
+            fields = []
+        nf = len(fields)
+        cdef np.float64_t **field_pointers, *field_vals, pos[3]
+        cdef np.ndarray[np.float64_t, ndim=1] tarr
+        field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
+        field_vals = <np.float64_t*>alloca(sizeof(np.float64_t) * nf)
+        for i in range(nf):
+            tarr = fields[i]
+            field_pointers[i] = <np.float64_t *> tarr.data
+        cdef np.float64_t dds[3], left_edge[3]
+        cdef int dims[3]
+        for i in range(3):
+            dds[i] = gobj.dds[i]
+            left_edge[i] = gobj.LeftEdge[i]
+            dims[i] = gobj.ActiveDimensions[i]
+        for i in range(positions.shape[0]):
+            # Now we process
+            for j in range(nf):
+                field_vals[j] = field_pointers[j][i]
+            for j in range(3):
+                pos[j] = positions[i, j]
+            self.process(dims, left_edge, dds, 0, pos, field_vals)
+
+    cdef void process(self, int dim[3], np.float64_t left_edge[3],
+                      np.float64_t dds[3], np.int64_t offset,
+                      np.float64_t ppos[3], np.float64_t *fields):
+        raise NotImplementedError
+
+cdef class CountParticles(ParticleDepositOperation):
+    cdef np.float64_t *count # float, for ease
+    cdef object ocount
+    def initialize(self):
+        self.ocount = np.zeros(self.nvals, dtype="float64")
+        cdef np.ndarray arr = self.ocount
+        self.count = <np.float64_t*> arr.data
+
+    cdef void process(self, int dim[3],
+                      np.float64_t left_edge[3], 
+                      np.float64_t dds[3],
+                      np.int64_t offset, # offset into IO field
+                      np.float64_t ppos[3], # this particle's position
+                      np.float64_t *fields # any other fields we need
+                      ):
+        # here we do our thing; this is the kernel
+        cdef int ii[3], i
+        for i in range(3):
+            ii[i] = <int>((ppos[i] - left_edge[i])/dds[i])
+        self.count[gind(ii[0], ii[1], ii[2], dim)] += 1
+        
+    def finalize(self):
+        return self.ocount
+
+"""
+# Mode functions
+ctypedef np.float64_t (*type_opt)(np.float64_t, np.float64_t)
+cdef np.float64_t opt_count(np.float64_t pdata,
+                            np.float64_t weight,
+                            np.int64_t index,
+                            np.ndarray[np.float64_t, ndim=2] data_out, 
+                            np.ndarray[np.float64_t, ndim=2] data_in):
+    data_out[index] += 1.0
+
+cdef np.float64_t opt_sum(np.float64_t pdata,
+                            np.float64_t weight,
+                            np.int64_t index,
+                            np.ndarray[np.float64_t, ndim=2] data_out, 
+                            np.ndarray[np.float64_t, ndim=2] data_in):
+    data_out[index] += pdata 
+
+cdef np.float64_t opt_diff(np.float64_t pdata,
+                            np.float64_t weight,
+                            np.int64_t index,
+                            np.ndarray[np.float64_t, ndim=2] data_out, 
+                            np.ndarray[np.float64_t, ndim=2] data_in):
+    data_out[index] += (data_in[index] - pdata) 
+
+cdef np.float64_t opt_wcount(np.float64_t pdata,
+                            np.float64_t weight,
+                            np.int64_t index,
+                            np.ndarray[np.float64_t, ndim=2] data_out, 
+                            np.ndarray[np.float64_t, ndim=2] data_in):
+    data_out[index] += weight
+
+cdef np.float64_t opt_wsum(np.float64_t pdata,
+                            np.float64_t weight,
+                            np.int64_t index,
+                            np.ndarray[np.float64_t, ndim=2] data_out, 
+                            np.ndarray[np.float64_t, ndim=2] data_in):
+    data_out[index] += pdata * weight
+
+cdef np.float64_t opt_wdiff(np.float64_t pdata,
+                            np.float64_t weight,
+                            np.int64_t index,
+                            np.ndarray[np.float64_t, ndim=2] data_out, 
+                            np.ndarray[np.float64_t, ndim=2] data_in):
+    data_out[index] += (data_in[index] - pdata) * weight
+
+# Selection functions
+ctypedef NOTSURE (*type_sel)(OctreeContainer, 
+                                np.ndarray[np.float64_t, ndim=1],
+                                np.float64_t)
+cdef NOTSURE select_nearest(OctreeContainer oct_handler,
+                            np.ndarray[np.float64_t, ndim=1] pos,
+                            np.float64_t radius):
+    #return only the nearest oct
+    pass
+
+
+cdef NOTSURE select_radius(OctreeContainer oct_handler,
+                            np.ndarray[np.float64_t, ndim=1] pos,
+                            np.float64_t radius):
+    #return a list of octs within the radius
+    pass
+    
+
+# Kernel functions
+ctypedef np.float64_t (*type_ker)(np.float64_t)
+cdef np.float64_t kernel_sph(np.float64_t x) nogil:
+    cdef np.float64_t kernel
+    if x <= 0.5:
+        kernel = 1.-6.*x*x*(1.-x)
+    elif x>0.5 and x<=1.0:
+        kernel = 2.*(1.-x)*(1.-x)*(1.-x)
+    else:
+        kernel = 0.
+    return kernel
+
+cdef np.float64_t kernel_null(np.float64_t x) nogil: return 0.0
+
+cdef deposit(OctreeContainer oct_handler, 
+        np.ndarray[np.float64_t, ndim=2] ppos, #positions,columns are x,y,z
+        np.ndarray[np.float64_t, ndim=2] pd, # particle fields
+        np.ndarray[np.float64_t, ndim=1] pr, # particle radius
+        np.ndarray[np.float64_t, ndim=2] data_in, #used to calc diff, same shape as data_out
+        np.ndarray[np.float64_t, ndim=2] data_out, #write deposited here
+        mode='count', selection='nearest', kernel='null'):
+    cdef type_opt fopt
+    cdef type_sel fsel
+    cdef type_ker fker
+    cdef long pi #particle index
+    cdef long nocts #number of octs in selection
+    cdef Oct oct 
+    cdef np.float64_t w
+    # Can we do this with dicts?
+    # Setup the function pointers
+    if mode == 'count':
+        fopt = opt_count
+    elif mode == 'sum':
+        fopt = opt_sum
+    elif mode == 'diff':
+        fopt = opt_diff
+    if mode == 'wcount':
+        fopt = opt_count
+    elif mode == 'wsum':
+        fopt = opt_sum
+    elif mode == 'wdiff':
+        fopt = opt_diff
+    if selection == 'nearest':
+        fsel = select_nearest
+    elif selection == 'radius':
+        fsel = select_radius
+    if kernel == 'null':
+        fker = kernel_null
+    if kernel == 'sph':
+        fker = kernel_sph
+    for pi in range(particles):
+        octs = fsel(oct_handler, ppos[pi], pr[pi])
+        for oct in octs:
+            for cell in oct.cells:
+                w = fker(pr[pi],cell) 
+                weights.append(w)
+        norm = weights.sum()
+        for w, oct in zip(weights, octs):
+            for cell in oct.cells:
+                fopt(pd[pi], w/norm, oct.index, data_in, data_out)
+"""

diff -r 5177df2099d2a49f8ac3cb2cce448f8cae16b2b1 -r 71d137cd09df25305b367ea7d33afb9f990fa5ce yt/geometry/setup.py
--- a/yt/geometry/setup.py
+++ b/yt/geometry/setup.py
@@ -23,6 +23,13 @@
                 depends=["yt/utilities/lib/fp_utils.pxd",
                          "yt/geometry/oct_container.pxd",
                          "yt/geometry/selection_routines.pxd"])
+    config.add_extension("particle_deposit", 
+                ["yt/geometry/particle_deposit.pyx"],
+                include_dirs=["yt/utilities/lib/"],
+                libraries=["m"],
+                depends=["yt/utilities/lib/fp_utils.pxd",
+                         "yt/geometry/oct_container.pxd",
+                         "yt/geometry/particle_deposit.pxd"])
     config.add_extension("fake_octree", 
                 ["yt/geometry/fake_octree.pyx"],
                 include_dirs=["yt/utilities/lib/"],


https://bitbucket.org/yt_analysis/yt-3.0/commits/05a99508b3cd/
Changeset:   05a99508b3cd
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-25 16:25:26
Summary:     First draft of .deposit() for OctreeSubset objects.

This includes a few fixes to how particles are assigned to ParticleOctree
elements.
Affected #:  5 files

diff -r 71d137cd09df25305b367ea7d33afb9f990fa5ce -r 05a99508b3cd77bd87d83bae2d4a0f850a013e00 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -35,6 +35,7 @@
     NeedsDataField, \
     NeedsProperty, \
     NeedsParameter
+import yt.geometry.particle_deposit as particle_deposit
 
 class OctreeSubset(YTSelectionContainer):
     _spatial = True
@@ -98,16 +99,20 @@
             return tr
         finfo = self.pf._get_field_info(*fields[0])
         if not finfo.particle_type:
-            nz = self._num_zones + 2*self._num_ghost_zones
             # We may need to reshape the field, if it is being queried from
             # field_data.  If it's already cached, it just passes through.
-            if len(tr.shape) < 4: 
-                n_oct = tr.shape[0] / (nz**3.0)
-                tr.shape = (n_oct, nz, nz, nz)
-                tr = np.rollaxis(tr, 0, 4)
+            if len(tr.shape) < 4:
+                tr = self._reshape_vals(tr)
             return tr
         return tr
 
+    def _reshape_vals(self, arr):
+        nz = self._num_zones + 2*self._num_ghost_zones
+        n_oct = arr.shape[0] / (nz**3.0)
+        arr.shape = (n_oct, nz, nz, nz)
+        arr = np.rollaxis(arr, 0, 4)
+        return arr
+
     _domain_ind = None
 
     @property
@@ -117,9 +122,17 @@
             self._domain_ind = di
         return self._domain_ind
 
-    def deposit(self, positions, fields, method):
+    def deposit(self, positions, fields = None, method = None):
         # Here we perform our particle deposition.
-        pass
+        cls = getattr(particle_deposit, "deposit_%s" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        nvals = (self.domain_ind >= 0).sum() * 8
+        op = cls(nvals) # We allocate number of zones, not number of octs
+        op.initialize()
+        op.process_octree(self.oct_handler, self.domain_ind, positions, fields)
+        vals = op.finalize()
+        return self._reshape_vals(vals)
 
     def select(self, selector):
         if id(selector) == self._last_selector_id:

diff -r 71d137cd09df25305b367ea7d33afb9f990fa5ce -r 05a99508b3cd77bd87d83bae2d4a0f850a013e00 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -39,6 +39,10 @@
     Oct *children[2][2][2]
     Oct *parent
 
+cdef struct OctInfo:
+    np.float64_t left_edge[3]
+    np.float64_t dds[3]
+
 cdef struct OctAllocationContainer
 cdef struct OctAllocationContainer:
     np.int64_t n
@@ -54,7 +58,7 @@
     cdef np.float64_t DLE[3], DRE[3]
     cdef public int nocts
     cdef public int max_domain
-    cdef Oct* get(self, np.float64_t ppos[3], int *ii = ?)
+    cdef Oct* get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
     cdef void neighbors(self, Oct *, Oct **)
     cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
 

diff -r 71d137cd09df25305b367ea7d33afb9f990fa5ce -r 05a99508b3cd77bd87d83bae2d4a0f850a013e00 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -142,7 +142,7 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef Oct *get(self, np.float64_t ppos[3], int *ii = NULL):
+    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL):
         #Given a floating point position, retrieve the most
         #refined oct at that time
         cdef np.int64_t ind[3]
@@ -150,28 +150,24 @@
         cdef Oct *cur
         cdef int i
         for i in range(3):
-            pp[i] = ppos[i] - self.DLE[i]
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
-            ind[i] = <np.int64_t> ((pp[i] - self.DLE[i])/dds[i])
-            cp[i] = (ind[i] + 0.5) * dds[i]
+            ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
+            cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
         cur = self.root_mesh[ind[0]][ind[1]][ind[2]]
         while cur.children[0][0][0] != NULL:
             for i in range(3):
                 dds[i] = dds[i] / 2.0
-                if cp[i] > pp[i]:
+                if cp[i] > ppos[i]:
                     ind[i] = 0
                     cp[i] -= dds[i] / 2.0
                 else:
                     ind[i] = 1
                     cp[i] += dds[i]/2.0
             cur = cur.children[ind[0]][ind[1]][ind[2]]
-        if ii != NULL: return cur
+        if oinfo == NULL: return cur
         for i in range(3):
-            if cp[i] > pp[i]:
-                ind[i] = 0
-            else:
-                ind[i] = 1
-        ii[0] = ((ind[2]*2)+ind[1])*2+ind[0]
+            oinfo.dds[i] = dds[i] # Cell width
+            oinfo.left_edge[i] = cp[i] - dds[i]
         return cur
 
     @cython.boundscheck(False)
@@ -982,28 +978,6 @@
 
 
 cdef class ARTOctreeContainer(RAMSESOctreeContainer):
-    #this class is specifically for the NMSU ART
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def deposit_particle_cumsum(self,
-                                np.ndarray[np.float64_t, ndim=2] ppos, 
-                                np.ndarray[np.float64_t, ndim=1] pdata,
-                                np.ndarray[np.float64_t, ndim=1] mask,
-                                np.ndarray[np.float64_t, ndim=1] dest,
-                                fields, int domain):
-        cdef Oct *o
-        cdef OctAllocationContainer *dom = self.domains[domain - 1]
-        cdef np.float64_t pos[3]
-        cdef int ii
-        cdef int no = ppos.shape[0]
-        for n in range(no):
-            for j in range(3):
-                pos[j] = ppos[n,j]
-            o = self.get(pos, &ii) 
-            if mask[o.local_ind,ii]==0: continue
-            dest[o.ind+ii] += pdata[n]
-        return dest
 
     @cython.boundscheck(True)
     @cython.wraparound(False)
@@ -1262,6 +1236,7 @@
         cdef int max_level = 0
         self.oct_list = <Oct**> malloc(sizeof(Oct*)*self.nocts)
         cdef np.int64_t i = 0
+        cdef np.int64_t dom_ind
         cdef ParticleArrays *c = self.first_sd
         while c != NULL:
             self.oct_list[i] = c.oct
@@ -1280,11 +1255,15 @@
         self.dom_offsets = <np.int64_t *>malloc(sizeof(np.int64_t) *
                                                 (self.max_domain + 3))
         self.dom_offsets[0] = 0
+        dom_ind = 0
         for i in range(self.nocts):
             self.oct_list[i].local_ind = i
+            self.oct_list[i].ind = dom_ind
+            dom_ind += 1
             if self.oct_list[i].domain > cur_dom:
                 cur_dom = self.oct_list[i].domain
                 self.dom_offsets[cur_dom + 1] = i
+                dom_ind = 0
         self.dom_offsets[cur_dom + 2] = self.nocts
 
     cdef Oct* allocate_oct(self):
@@ -1597,8 +1576,9 @@
         # index into the domain subset array for deposition.
         cdef np.int64_t i, j, k, oi, noct, n, nm, use, offset
         cdef Oct *o
-        offset = self.dom_offsets[domain_id]
-        noct = self.dom_offsets[domain_id + 1] - offset
+        # For particle octrees, domain 0 is special and means non-leaf nodes.
+        offset = self.dom_offsets[domain_id + 1]
+        noct = self.dom_offsets[domain_id + 2] - offset
         cdef np.ndarray[np.int64_t, ndim=1] ind = np.zeros(noct, 'int64')
         nm = 0
         for oi in range(noct):

diff -r 71d137cd09df25305b367ea7d33afb9f990fa5ce -r 05a99508b3cd77bd87d83bae2d4a0f850a013e00 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -31,7 +31,8 @@
 cimport cython
 
 from fp_utils cimport *
-from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+from oct_container cimport Oct, OctAllocationContainer, \
+    OctreeContainer, OctInfo
 
 cdef class ParticleDepositOperation:
     def __init__(self, nvals):
@@ -47,8 +48,34 @@
                      np.ndarray[np.int64_t, ndim=1] dom_ind,
                      np.ndarray[np.float64_t, ndim=2] positions,
                      fields = None):
-        raise NotImplementedError
-
+        cdef int nf, i, j
+        if fields is None:
+            fields = []
+        nf = len(fields)
+        cdef np.float64_t **field_pointers, *field_vals, pos[3]
+        cdef np.ndarray[np.float64_t, ndim=1] tarr
+        field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
+        field_vals = <np.float64_t*>alloca(sizeof(np.float64_t) * nf)
+        for i in range(nf):
+            tarr = fields[i]
+            field_pointers[i] = <np.float64_t *> tarr.data
+        cdef int dims[3]
+        dims[0] = dims[1] = dims[2] = 2
+        cdef OctInfo oi
+        cdef np.int64_t offset
+        cdef Oct *oct
+        for i in range(positions.shape[0]):
+            # We should check if particle remains inside the Oct here
+            for j in range(nf):
+                field_vals[j] = field_pointers[j][i]
+            for j in range(3):
+                pos[j] = positions[i, j]
+            oct = octree.get(pos, &oi)
+            #print oct.local_ind, oct.pos[0], oct.pos[1], oct.pos[2]
+            offset = dom_ind[oct.ind]
+            self.process(dims, oi.left_edge, oi.dds,
+                         offset, pos, field_vals)
+        
     def process_grid(self, gobj,
                      np.ndarray[np.float64_t, ndim=2] positions,
                      fields = None):
@@ -84,12 +111,13 @@
 
 cdef class CountParticles(ParticleDepositOperation):
     cdef np.float64_t *count # float, for ease
-    cdef object ocount
+    cdef public object ocount
     def initialize(self):
         self.ocount = np.zeros(self.nvals, dtype="float64")
         cdef np.ndarray arr = self.ocount
         self.count = <np.float64_t*> arr.data
 
+    @cython.cdivision(True)
     cdef void process(self, int dim[3],
                       np.float64_t left_edge[3], 
                       np.float64_t dds[3],
@@ -101,11 +129,15 @@
         cdef int ii[3], i
         for i in range(3):
             ii[i] = <int>((ppos[i] - left_edge[i])/dds[i])
-        self.count[gind(ii[0], ii[1], ii[2], dim)] += 1
+        #print "Depositing into", offset,
+        #print gind(ii[0], ii[1], ii[2], dim)
+        self.count[gind(ii[0], ii[1], ii[2], dim) + offset] += 1
         
     def finalize(self):
         return self.ocount
 
+deposit_count = CountParticles
+
 """
 # Mode functions
 ctypedef np.float64_t (*type_opt)(np.float64_t, np.float64_t)

diff -r 71d137cd09df25305b367ea7d33afb9f990fa5ce -r 05a99508b3cd77bd87d83bae2d4a0f850a013e00 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -250,6 +250,13 @@
     def __str__(self):
         return "Data selector '%s' not implemented." % (self.class_name)
 
+class YTParticleDepositionNotImplemented(YTException):
+    def __init__(self, class_name):
+        self.class_name = class_name
+
+    def __str__(self):
+        return "Particle deposition method '%s' not implemented." % (self.class_name)
+
 class YTDomainOverflow(YTException):
     def __init__(self, mi, ma, dle, dre):
         self.mi = mi


https://bitbucket.org/yt_analysis/yt-3.0/commits/2219b4075dac/
Changeset:   2219b4075dac
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-25 16:59:18
Summary:     The number of octs should be half the number of zones for particle octrees.
Affected #:  1 file

diff -r 05a99508b3cd77bd87d83bae2d4a0f850a013e00 -r 2219b4075dacf4f176372adf425174e23eb49a33 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -96,7 +96,7 @@
         total_particles = sum(sum(d.total_particles.values())
                               for d in self.domains)
         self.oct_handler = ParticleOctreeContainer(
-            self.parameter_file.domain_dimensions,
+            self.parameter_file.domain_dimensions/2,
             self.parameter_file.domain_left_edge,
             self.parameter_file.domain_right_edge)
         self.oct_handler.n_ref = 64


https://bitbucket.org/yt_analysis/yt-3.0/commits/3ee0f2ec3e20/
Changeset:   3ee0f2ec3e20
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-25 19:25:24
Summary:     Adding domain_ind for fluid octrees, fixing an oct/cell width confusion.
Affected #:  1 file

diff -r 2219b4075dacf4f176372adf425174e23eb49a33 -r 3ee0f2ec3e2054f16867d3125aadedcdacc3ecba yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -166,8 +166,8 @@
             cur = cur.children[ind[0]][ind[1]][ind[2]]
         if oinfo == NULL: return cur
         for i in range(3):
-            oinfo.dds[i] = dds[i] # Cell width
-            oinfo.left_edge[i] = cp[i] - dds[i]
+            oinfo.dds[i] = dds[i]/2.0 # Cell width
+            oinfo.left_edge[i] = cp[i] - dds[i]/2.0
         return cur
 
     @cython.boundscheck(False)
@@ -755,6 +755,27 @@
             nm += use
         return m2.astype("bool")
 
+    def domain_ind(self,
+                    # mask is the base selector's *global* mask
+                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                    int domain_id):
+        cdef np.int64_t i, j, k, oi, noct, n, nm, use, offset
+        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
+        cdef Oct *o
+        # For particle octrees, domain 0 is special and means non-leaf nodes.
+        cdef np.ndarray[np.int64_t, ndim=1] ind = np.zeros(cur.n_assigned, 'int64')
+        nm = 0
+        for oi in range(cur.n_assigned):
+            ind[oi] = -1
+            o = &cur.my_octs[oi]
+            use = 0
+            for i in range(8):
+                if mask[o.local_ind, i] == 1: use = 1
+            if use == 1:
+                ind[o.ind] = nm
+            nm += use
+        return ind
+
     def check(self, int curdom, int print_all = 0):
         cdef int dind, pi
         cdef Oct oct


https://bitbucket.org/yt_analysis/yt-3.0/commits/dbc1ac2e558e/
Changeset:   dbc1ac2e558e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-04-26 12:00:13
Summary:     A few changes to get octree deposition closer to working.
Affected #:  4 files

diff -r 3ee0f2ec3e2054f16867d3125aadedcdacc3ecba -r dbc1ac2e558e1c170a765648c6120555dcf3f79e yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -127,10 +127,11 @@
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
-        nvals = (self.domain_ind >= 0).sum() * 8
+        nvals = self.domain_ind.size * 8
         op = cls(nvals) # We allocate number of zones, not number of octs
         op.initialize()
-        op.process_octree(self.oct_handler, self.domain_ind, positions, fields)
+        op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
+                          self.domain.domain_id)
         vals = op.finalize()
         return self._reshape_vals(vals)
 

diff -r 3ee0f2ec3e2054f16867d3125aadedcdacc3ecba -r dbc1ac2e558e1c170a765648c6120555dcf3f79e yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -96,7 +96,7 @@
           display_field = False)
 
 def _Ones(field, data):
-    return np.ones(data.shape, dtype='float64')
+    return np.ones(data.ires.size, dtype='float64')
 add_field("Ones", function=_Ones,
           projection_conversion="unitary",
           display_field = False)

diff -r 3ee0f2ec3e2054f16867d3125aadedcdacc3ecba -r dbc1ac2e558e1c170a765648c6120555dcf3f79e yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -153,8 +153,10 @@
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
             ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
             cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
-        cur = self.root_mesh[ind[0]][ind[1]][ind[2]]
-        while cur.children[0][0][0] != NULL:
+        next = self.root_mesh[ind[0]][ind[1]][ind[2]]
+        # We want to stop recursing when there's nowhere else to go
+        while next != NULL:
+            cur = next
             for i in range(3):
                 dds[i] = dds[i] / 2.0
                 if cp[i] > ppos[i]:
@@ -163,7 +165,7 @@
                 else:
                     ind[i] = 1
                     cp[i] += dds[i]/2.0
-            cur = cur.children[ind[0]][ind[1]][ind[2]]
+            next = cur.children[ind[0]][ind[1]][ind[2]]
         if oinfo == NULL: return cur
         for i in range(3):
             oinfo.dds[i] = dds[i]/2.0 # Cell width
@@ -707,7 +709,7 @@
 
     def domain_and(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
                    int domain_id):
-        cdef np.int64_t i, oi, n, 
+        cdef np.int64_t i, oi, n,  use
         cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
         cdef Oct *o
         cdef np.ndarray[np.uint8_t, ndim=2] m2 = \
@@ -715,6 +717,7 @@
         n = mask.shape[0]
         for oi in range(cur.n_assigned):
             o = &cur.my_octs[oi]
+            use = 0
             for i in range(8):
                 m2[o.local_ind, i] = mask[o.local_ind, i]
         return m2 # NOTE: This is uint8_t
@@ -763,10 +766,9 @@
         cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
         cdef Oct *o
         # For particle octrees, domain 0 is special and means non-leaf nodes.
-        cdef np.ndarray[np.int64_t, ndim=1] ind = np.zeros(cur.n_assigned, 'int64')
+        cdef np.ndarray[np.int64_t, ndim=1] ind = np.zeros(cur.n, 'int64') - 1
         nm = 0
-        for oi in range(cur.n_assigned):
-            ind[oi] = -1
+        for oi in range(cur.n):
             o = &cur.my_octs[oi]
             use = 0
             for i in range(8):
@@ -804,6 +806,33 @@
         print "DOMAIN % 3i HAS % 9i MISSED OCTS" % (curdom, nmissed)
         print "DOMAIN % 3i HAS % 9i UNASSIGNED OCTS" % (curdom, unassigned)
 
+    def check_refinement(self, int curdom):
+        cdef int pi, i, j, k, some_refined, some_unrefined
+        cdef Oct *oct
+        cdef int bad = 0
+        cdef OctAllocationContainer *cont = self.domains[curdom - 1]
+        for pi in range(cont.n_assigned):
+            oct = &cont.my_octs[pi]
+            some_unrefined = 0
+            some_refined = 0
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        if oct.children[i][j][k] == NULL:
+                            some_unrefined = 1
+                        else:
+                            some_refined = 1
+            if some_unrefined == some_refined == 1:
+                #print "BAD", oct.ind, oct.local_ind
+                bad += 1
+                if curdom == 10 or curdom == 72:
+                    for i in range(2):
+                        for j in range(2):
+                            for k in range(2):
+                                print (oct.children[i][j][k] == NULL),
+                    print
+        print "BAD TOTAL", curdom, bad, cont.n_assigned
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -1535,7 +1564,7 @@
 
     def domain_and(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
                    int domain_id):
-        cdef np.int64_t i, oi, n, 
+        cdef np.int64_t i, oi, n, use
         cdef Oct *o
         cdef np.ndarray[np.uint8_t, ndim=2] m2 = \
                 np.zeros((mask.shape[0], 8), 'uint8')
@@ -1543,6 +1572,7 @@
         for oi in range(n):
             o = self.oct_list[oi]
             if o.domain != domain_id: continue
+            use = 0
             for i in range(8):
                 m2[o.local_ind, i] = mask[o.local_ind, i]
         return m2

diff -r 3ee0f2ec3e2054f16867d3125aadedcdacc3ecba -r dbc1ac2e558e1c170a765648c6120555dcf3f79e yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -47,7 +47,7 @@
     def process_octree(self, OctreeContainer octree,
                      np.ndarray[np.int64_t, ndim=1] dom_ind,
                      np.ndarray[np.float64_t, ndim=2] positions,
-                     fields = None):
+                     fields = None, int domain_id = -1):
         cdef int nf, i, j
         if fields is None:
             fields = []
@@ -72,7 +72,8 @@
                 pos[j] = positions[i, j]
             oct = octree.get(pos, &oi)
             #print oct.local_ind, oct.pos[0], oct.pos[1], oct.pos[2]
-            offset = dom_ind[oct.ind]
+            offset = dom_ind[oct.ind] * 8
+            # Check that we found the oct ...
             self.process(dims, oi.left_edge, oi.dds,
                          offset, pos, field_vals)
         


https://bitbucket.org/yt_analysis/yt-3.0/commits/a86882ab8661/
Changeset:   a86882ab8661
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-05-01 22:29:56
Summary:     Adding a few comments and container fields for oct subsets.
Affected #:  3 files

diff -r dbc1ac2e558e1c170a765648c6120555dcf3f79e -r a86882ab86610ebc6cfceb10ca9b37b0fd8a8f5f yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -66,6 +66,16 @@
         self._current_particle_type = 'all'
         self._current_fluid_type = self.pf.default_fluid_type
 
+    def _generate_container_field(self, field):
+        if self._current_chunk is None:
+            self.hierarchy._identify_base_chunk(self)
+        if field == "dx":
+            return self._current_chunk.fwidth[:,0]
+        elif field == "dy":
+            return self._current_chunk.fwidth[:,1]
+        elif field == "dz":
+            return self._current_chunk.fwidth[:,2]
+
     def select_icoords(self, dobj):
         return self.oct_handler.icoords(self.domain.domain_id, self.mask,
                                         self.cell_count,

diff -r dbc1ac2e558e1c170a765648c6120555dcf3f79e -r a86882ab86610ebc6cfceb10ca9b37b0fd8a8f5f yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -168,8 +168,16 @@
             next = cur.children[ind[0]][ind[1]][ind[2]]
         if oinfo == NULL: return cur
         for i in range(3):
-            oinfo.dds[i] = dds[i]/2.0 # Cell width
-            oinfo.left_edge[i] = cp[i] - dds[i]/2.0
+            # This will happen *after* we quit out, so we need to back out the
+            # last change to cp
+            if ind[i] == 1:
+                cp[i] -= dds[i]/2.0 # Now centered
+            else:
+                cp[i] += dds[i]/2.0
+            # We don't need to change dds[i] as it has been halved from the
+            # oct width, thus making it already the cell width
+            oinfo.dds[i] = dds[i] # Cell width
+            oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
         return cur
 
     @cython.boundscheck(False)
@@ -513,7 +521,7 @@
         n = mask.shape[0]
         cdef np.ndarray[np.int64_t, ndim=2] coords
         coords = np.empty((cell_count, 3), dtype="int64")
-        ci=0
+        ci = 0
         for oi in range(cur.n):
             o = &cur.my_octs[oi]
             for k in range(2):
@@ -521,6 +529,9 @@
                     for i in range(2):
                         ii = ((k*2)+j)*2+i
                         if mask[o.local_ind, ii] == 0: continue
+                        # Note that we bit shift because o.pos is oct position,
+                        # not cell position, and it is with respect to octs,
+                        # not cells.
                         coords[ci, 0] = (o.pos[0] << 1) + i
                         coords[ci, 1] = (o.pos[1] << 1) + j
                         coords[ci, 2] = (o.pos[2] << 1) + k
@@ -636,7 +647,6 @@
                             ii = ((k*2)+j)*2+i
                             if mask[o.local_ind, ii] == 0: continue
                             dest[local_filled + offset] = source[o.local_ind*8+ii]
-                            # print 'oct_container.pyx:sourcemasked',o.level,local_filled, o.local_ind*8+ii, source[o.local_ind*8+ii]
                             local_filled += 1
         return local_filled
 
@@ -765,7 +775,6 @@
         cdef np.int64_t i, j, k, oi, noct, n, nm, use, offset
         cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
         cdef Oct *o
-        # For particle octrees, domain 0 is special and means non-leaf nodes.
         cdef np.ndarray[np.int64_t, ndim=1] ind = np.zeros(cur.n, 'int64') - 1
         nm = 0
         for oi in range(cur.n):

diff -r dbc1ac2e558e1c170a765648c6120555dcf3f79e -r a86882ab86610ebc6cfceb10ca9b37b0fd8a8f5f yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -130,8 +130,6 @@
         cdef int ii[3], i
         for i in range(3):
             ii[i] = <int>((ppos[i] - left_edge[i])/dds[i])
-        #print "Depositing into", offset,
-        #print gind(ii[0], ii[1], ii[2], dim)
         self.count[gind(ii[0], ii[1], ii[2], dim) + offset] += 1
         
     def finalize(self):


https://bitbucket.org/yt_analysis/yt-3.0/commits/ff219faca878/
Changeset:   ff219faca878
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-05-01 22:54:20
Summary:     Sometimes particles belong to ghost zones.

This sidesteps the issue but it must be addressed.
Affected #:  1 file

diff -r a86882ab86610ebc6cfceb10ca9b37b0fd8a8f5f -r ff219faca878615de3cf9761fd86abbd34af55be yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -71,7 +71,10 @@
             for j in range(3):
                 pos[j] = positions[i, j]
             oct = octree.get(pos, &oi)
-            #print oct.local_ind, oct.pos[0], oct.pos[1], oct.pos[2]
+            # This next line is unfortunate.  Basically it says, sometimes we
+            # might have particles that belong to octs outside our domain.
+            if oct.domain != domain_id: continue
+            #print domain_id, oct.local_ind, oct.ind, oct.domain, oct.pos[0], oct.pos[1], oct.pos[2]
             offset = dom_ind[oct.ind] * 8
             # Check that we found the oct ...
             self.process(dims, oi.left_edge, oi.dds,


https://bitbucket.org/yt_analysis/yt-3.0/commits/0eb93875a7c0/
Changeset:   0eb93875a7c0
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-05-11 00:35:51
Summary:     Renaming Oct members ind and local_ind to file_ind and domain_ind.

This improves clarity of what they actually mean.  I've added some comments as
well.  I've also added an offset-obtaining function.
Affected #:  4 files

diff -r ff219faca878615de3cf9761fd86abbd34af55be -r 0eb93875a7c0a8bfa7e039bba9a5bd2eae9ee0c9 yt/geometry/fake_octree.pyx
--- a/yt/geometry/fake_octree.pyx
+++ b/yt/geometry/fake_octree.pyx
@@ -67,7 +67,7 @@
                     long cur_leaf, long cur_level, 
                     long max_noct, long max_level, float fsubdivide,
                     np.ndarray[np.uint8_t, ndim=2] mask):
-    print "child", parent.ind, ind[0], ind[1], ind[2], cur_leaf, cur_level
+    print "child", parent.file_ind, ind[0], ind[1], ind[2], cur_leaf, cur_level
     cdef int ddr[3]
     cdef long i,j,k
     cdef float rf #random float from 0-1

diff -r ff219faca878615de3cf9761fd86abbd34af55be -r 0eb93875a7c0a8bfa7e039bba9a5bd2eae9ee0c9 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -30,8 +30,12 @@
 
 cdef struct Oct
 cdef struct Oct:
-    np.int64_t ind          # index
-    np.int64_t local_ind
+    np.int64_t file_ind     # index with respect to the order in which it was
+                            # added
+    np.int64_t domain_ind   # index within the global set of domains
+                            # note that moving to a local index will require
+                            # moving to split-up masks, which is part of a
+                            # bigger refactor
     np.int64_t domain       # (opt) addl int index
     np.int64_t pos[3]       # position in ints
     np.int8_t level
@@ -61,6 +65,9 @@
     cdef Oct* get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
     cdef void neighbors(self, Oct *, Oct **)
     cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
+    # This function must return the offset from global-to-local domains; i.e.,
+    # OctAllocationContainer.offset if such a thing exists.
+    cdef np.int64_t get_domain_offset(self, int domain_id)
 
 cdef class ARTIOOctreeContainer(OctreeContainer):
     cdef OctAllocationContainer **domains

diff -r ff219faca878615de3cf9761fd86abbd34af55be -r 0eb93875a7c0a8bfa7e039bba9a5bd2eae9ee0c9 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -56,8 +56,8 @@
     for n in range(n_octs):
         oct = &n_cont.my_octs[n]
         oct.parent = NULL
-        oct.ind = oct.domain = -1
-        oct.local_ind = n + n_cont.offset
+        oct.file_ind = oct.domain = -1
+        oct.domain_ind = n + n_cont.offset
         oct.level = -1
         for i in range(2):
             for j in range(2):
@@ -130,7 +130,7 @@
         while cur != NULL:
             for i in range(cur.n_assigned):
                 this = &cur.my_octs[i]
-                yield (this.ind, this.local_ind, this.domain)
+                yield (this.file_ind, this.domain_ind, this.domain)
             cur = cur.next
 
     cdef void oct_bounds(self, Oct *o, np.float64_t *corner, np.float64_t *size):
@@ -139,6 +139,9 @@
             size[i] = (self.DRE[i] - self.DLE[i]) / (self.nn[i] << o.level)
             corner[i] = o.pos[i] * size[i] + self.DLE[i]
 
+    cdef np.int64_t get_domain_offset(self, int domain_id):
+        return 0
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -199,7 +202,7 @@
                 cur = cur.next
             o = &cur.my_octs[oi - cur.offset]
             for i in range(8):
-                count[o.domain - 1] += mask[o.local_ind,i]
+                count[o.domain - 1] += mask[o.domain_ind,i]
         return count
 
     @cython.boundscheck(True)
@@ -232,7 +235,7 @@
                     for k in range(2):
                         if o.children[i][j][k] == NULL:
                             ii = ((k*2)+j)*2+i
-                            count[o.domain - 1] += mask[o.local_ind,ii]
+                            count[o.domain - 1] += mask[o.domain_ind,ii]
         return count
 
     @cython.boundscheck(False)
@@ -493,7 +496,7 @@
             next_oct.pos[i] = pos[i]
         next_oct.domain = curdom
         next_oct.parent = cur
-        next_oct.ind = 1
+        next_oct.file_ind = 1
         next_oct.level = curlevel
         return next_oct
 
@@ -528,7 +531,7 @@
                 for j in range(2):
                     for i in range(2):
                         ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
+                        if mask[o.domain_ind, ii] == 0: continue
                         # Note that we bit shift because o.pos is oct position,
                         # not cell position, and it is with respect to octs,
                         # not cells.
@@ -574,7 +577,7 @@
         for oi in range(cur.n):
             o = &cur.my_octs[oi]
             for i in range(8):
-                if mask[o.local_ind, i] == 0: continue
+                if mask[o.domain_ind, i] == 0: continue
                 level_count[o.level] += 1
         return level_count
 
@@ -613,7 +616,7 @@
                 for j in range(2):
                     for i in range(2):
                         ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
+                        if mask[o.domain_ind, ii] == 0: continue
                         coords[ci, 0] = pos[0] + dx[0] * i
                         coords[ci, 1] = pos[1] + dx[1] * j
                         coords[ci, 2] = pos[2] + dx[2] * k
@@ -645,13 +648,17 @@
                     for j in range(2):
                         for i in range(2):
                             ii = ((k*2)+j)*2+i
-                            if mask[o.local_ind, ii] == 0: continue
-                            dest[local_filled + offset] = source[o.local_ind*8+ii]
+                            if mask[o.domain_ind, ii] == 0: continue
+                            dest[local_filled + offset] = source[o.domain_ind*8+ii]
                             local_filled += 1
         return local_filled
 
 cdef class RAMSESOctreeContainer(OctreeContainer):
 
+    cdef np.int64_t get_domain_offset(self, int domain_id):
+        cdef OctAllocationContainer *cont = self.domains[domain_id - 1]
+        return cont.offset
+
     cdef Oct* next_root(self, int domain_id, int ind[3]):
         cdef Oct *next = self.root_mesh[ind[0]][ind[1]][ind[2]]
         if next != NULL: return next
@@ -729,7 +736,7 @@
             o = &cur.my_octs[oi]
             use = 0
             for i in range(8):
-                m2[o.local_ind, i] = mask[o.local_ind, i]
+                m2[o.domain_ind, i] = mask[o.domain_ind, i]
         return m2 # NOTE: This is uint8_t
 
     def domain_mask(self,
@@ -751,7 +758,7 @@
             o = &cur.my_octs[oi]
             use = 0
             for i in range(8):
-                if mask[o.local_ind, i] == 1: use = 1
+                if mask[o.domain_ind, i] == 1: use = 1
             nm += use
         cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
                 np.zeros((2, 2, 2, nm), 'uint8')
@@ -763,7 +770,7 @@
                 for j in range(2):
                     for k in range(2):
                         ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
+                        if mask[o.domain_ind, ii] == 0: continue
                         use = m2[i, j, k, nm] = 1
             nm += use
         return m2.astype("bool")
@@ -781,9 +788,9 @@
             o = &cur.my_octs[oi]
             use = 0
             for i in range(8):
-                if mask[o.local_ind, i] == 1: use = 1
+                if mask[o.domain_ind, i] == 1: use = 1
             if use == 1:
-                ind[o.ind] = nm
+                ind[o.file_ind] = nm
             nm += use
         return ind
 
@@ -832,7 +839,7 @@
                         else:
                             some_refined = 1
             if some_unrefined == some_refined == 1:
-                #print "BAD", oct.ind, oct.local_ind
+                #print "BAD", oct.file_ind, oct.domain_ind
                 bad += 1
                 if curdom == 10 or curdom == 72:
                     for i in range(2):
@@ -890,7 +897,7 @@
             # Now we should be at the right level
             cur.domain = curdom
             if local == 1:
-                cur.ind = p
+                cur.file_ind = p
             cur.level = curlevel
         return cont.n_assigned - initial
 
@@ -914,7 +921,7 @@
                 for j in range(2):
                     for k in range(2):
                         ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
+                        if mask[o.domain_ind, ii] == 0: continue
                         ci = level_counts[o.level]
                         coords[ci, 0] = (o.pos[0] << 1) + i
                         coords[ci, 1] = (o.pos[1] << 1) + j
@@ -959,7 +966,7 @@
         for oi in range(cur.n_assigned):
             o = &cur.my_octs[oi]
             for i in range(8):
-                if mask[o.local_ind, i] == 0: continue
+                if mask[o.domain_ind, i] == 0: continue
                 level_count[o.level] += 1
         return level_count
 
@@ -997,7 +1004,7 @@
                 for j in range(2):
                     for k in range(2):
                         ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
+                        if mask[o.domain_ind, ii] == 0: continue
                         ci = level_counts[o.level]
                         coords[ci, 0] = pos[0] + dx[0] * i
                         coords[ci, 1] = pos[1] + dx[1] * j
@@ -1029,8 +1036,8 @@
                     for j in range(2):
                         for k in range(2):
                             ii = ((k*2)+j)*2+i
-                            if mask[o.local_ind, ii] == 0: continue
-                            dest[local_filled + offset] = source[o.ind, ii]
+                            if mask[o.domain_ind, ii] == 0: continue
+                            dest[local_filled + offset] = source[o.file_ind, ii]
                             local_filled += 1
         return local_filled
 
@@ -1061,7 +1068,7 @@
             source = source_fields[key]
             for n in range(dom.n):
                 o = &dom.my_octs[n]
-                index = o.ind-subchunk_offset
+                index = o.file_ind-subchunk_offset
                 if o.level != level: continue
                 if index < 0: continue
                 if index >= subchunk_max: 
@@ -1072,7 +1079,7 @@
                     for j in range(2):
                         for k in range(2):
                             ii = ((k*2)+j)*2+i
-                            if mask[o.local_ind, ii] == 0: continue
+                            if mask[o.domain_ind, ii] == 0: continue
                             dest[local_filled + offset] = \
                                 source[index,ii]
                             local_filled += 1
@@ -1112,7 +1119,7 @@
                     for j in range(2):
                         for k in range(2):
                             ii = ((k*2)+j)*2+i
-                            if mask[o.local_ind, ii] == 0: continue
+                            if mask[o.domain_ind, ii] == 0: continue
                             ox = (o.pos[0] << 1) + i
                             oy = (o.pos[1] << 1) + j
                             oz = (o.pos[2] << 1) + k
@@ -1316,8 +1323,8 @@
         self.dom_offsets[0] = 0
         dom_ind = 0
         for i in range(self.nocts):
-            self.oct_list[i].local_ind = i
-            self.oct_list[i].ind = dom_ind
+            self.oct_list[i].domain_ind = i
+            self.oct_list[i].file_ind = dom_ind
             dom_ind += 1
             if self.oct_list[i].domain > cur_dom:
                 cur_dom = self.oct_list[i].domain
@@ -1334,8 +1341,8 @@
         cdef ParticleArrays *sd = <ParticleArrays*> \
             malloc(sizeof(ParticleArrays))
         cdef int i, j, k
-        my_oct.ind = my_oct.domain = -1
-        my_oct.local_ind = self.nocts - 1
+        my_oct.file_ind = my_oct.domain = -1
+        my_oct.domain_ind = self.nocts - 1
         my_oct.pos[0] = my_oct.pos[1] = my_oct.pos[2] = -1
         my_oct.level = -1
         my_oct.sd = sd
@@ -1386,7 +1393,7 @@
         for oi in range(ndo):
             o = self.oct_list[oi + doff]
             for i in range(8):
-                if mask[o.local_ind, i] == 0: continue
+                if mask[o.domain_ind, i] == 0: continue
                 level_count[o.level] += 1
         return level_count
 
@@ -1583,7 +1590,7 @@
             if o.domain != domain_id: continue
             use = 0
             for i in range(8):
-                m2[o.local_ind, i] = mask[o.local_ind, i]
+                m2[o.domain_ind, i] = mask[o.domain_ind, i]
         return m2
 
     def domain_mask(self,
@@ -1606,7 +1613,7 @@
             if o.domain != domain_id: continue
             use = 0
             for i in range(8):
-                if mask[o.local_ind, i] == 1: use = 1
+                if mask[o.domain_ind, i] == 1: use = 1
             nm += use
         cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
                 np.zeros((2, 2, 2, nm), 'uint8')
@@ -1619,7 +1626,7 @@
                 for j in range(2):
                     for k in range(2):
                         ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
+                        if mask[o.domain_ind, ii] == 0: continue
                         use = m2[i, j, k, nm] = 1
             nm += use
         return m2.astype("bool")
@@ -1631,7 +1638,7 @@
         # Here we once again do something similar to the other functions.  We
         # need a set of indices into the final reduced, masked values.  The
         # indices will be domain.n long, and will be of type int64.  This way,
-        # we can get the Oct through a .get() call, then use Oct.ind as an
+        # we can get the Oct through a .get() call, then use Oct.file_ind as an
         # index into this newly created array, then finally use the returned
         # index into the domain subset array for deposition.
         cdef np.int64_t i, j, k, oi, noct, n, nm, use, offset
@@ -1646,7 +1653,7 @@
             o = self.oct_list[oi + offset]
             use = 0
             for i in range(8):
-                if mask[o.local_ind, i] == 1: use = 1
+                if mask[o.domain_ind, i] == 1: use = 1
             if use == 1:
                 ind[oi] = nm
             nm += use

diff -r ff219faca878615de3cf9761fd86abbd34af55be -r 0eb93875a7c0a8bfa7e039bba9a5bd2eae9ee0c9 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -201,7 +201,7 @@
             this_level = 0
         if res == 0:
             for i in range(8):
-                mask[root.local_ind,i] = 0
+                mask[root.domain_ind,i] = 0
             # If this level *is* being selected (i.e., no early termination)
             # then we know no child zones will be selected.
             if this_level == 1:
@@ -217,11 +217,11 @@
                     ii = ((k*2)+j)*2+i
                     ch = root.children[i][j][k]
                     if next_level == 1 and ch != NULL:
-                        mask[root.local_ind, ii] = 0
+                        mask[root.domain_ind, ii] = 0
                         self.recursively_select_octs(
                             ch, spos, sdds, mask, level + 1)
                     elif this_level == 1:
-                        mask[root.local_ind, ii] = \
+                        mask[root.domain_ind, ii] = \
                             self.select_cell(spos, sdds, eterm)
                     spos[2] += sdds[2]
                 spos[1] += sdds[1]


https://bitbucket.org/yt_analysis/yt-3.0/commits/c2c281051ffa/
Changeset:   c2c281051ffa
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-05-11 00:37:01
Summary:     Try using the actual domain_ind minus the global offset as input to the output oct index.
Affected #:  1 file

diff -r 0eb93875a7c0a8bfa7e039bba9a5bd2eae9ee0c9 -r c2c281051ffadb10da23d52c19c782393bbad7ed yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -62,8 +62,9 @@
         cdef int dims[3]
         dims[0] = dims[1] = dims[2] = 2
         cdef OctInfo oi
-        cdef np.int64_t offset
+        cdef np.int64_t offset, moff
         cdef Oct *oct
+        moff = octree.get_domain_offset(domain_id)
         for i in range(positions.shape[0]):
             # We should check if particle remains inside the Oct here
             for j in range(nf):
@@ -75,7 +76,8 @@
             # might have particles that belong to octs outside our domain.
             if oct.domain != domain_id: continue
             #print domain_id, oct.local_ind, oct.ind, oct.domain, oct.pos[0], oct.pos[1], oct.pos[2]
-            offset = dom_ind[oct.ind] * 8
+            # Note that this has to be our local index, not our in-file index.
+            offset = dom_ind[oct.domain_ind - moff] * 8
             # Check that we found the oct ...
             self.process(dims, oi.left_edge, oi.dds,
                          offset, pos, field_vals)


https://bitbucket.org/yt_analysis/yt-3.0/commits/069e11b50b20/
Changeset:   069e11b50b20
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-05-13 01:28:22
Summary:     Experimental adjustment to make oct-traversal more straightforward for RAMSES.

This works for projections, etc, but I believe it may be slightly more costly
for filling from files.  However, I think we can mitigate this by eliminating
the "level_count" attribute, and further optimizations such as splitting the
mask up.
Affected #:  1 file

diff -r c2c281051ffadb10da23d52c19c782393bbad7ed -r 069e11b50b204653e98255ef7ec9b88e781b3e57 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -790,7 +790,7 @@
             for i in range(8):
                 if mask[o.domain_ind, i] == 1: use = 1
             if use == 1:
-                ind[o.file_ind] = nm
+                ind[o.domain_ind - cur.offset] = nm
             nm += use
         return ind
 
@@ -915,6 +915,7 @@
         n = mask.shape[0]
         cdef np.ndarray[np.int64_t, ndim=2] coords
         coords = np.empty((cell_count, 3), dtype="int64")
+        ci = 0
         for oi in range(cur.n_assigned):
             o = &cur.my_octs[oi]
             for i in range(2):
@@ -922,11 +923,10 @@
                     for k in range(2):
                         ii = ((k*2)+j)*2+i
                         if mask[o.domain_ind, ii] == 0: continue
-                        ci = level_counts[o.level]
                         coords[ci, 0] = (o.pos[0] << 1) + i
                         coords[ci, 1] = (o.pos[1] << 1) + j
                         coords[ci, 2] = (o.pos[2] << 1) + k
-                        level_counts[o.level] += 1
+                        ci += 1
         return coords
 
     @cython.boundscheck(False)
@@ -948,9 +948,8 @@
             o = &cur.my_octs[oi]
             for i in range(8):
                 if mask[oi + cur.offset, i] == 0: continue
-                ci = level_counts[o.level]
                 levels[ci] = o.level
-                level_counts[o.level] += 1
+                ci += 1
         return levels
 
     @cython.boundscheck(False)
@@ -991,6 +990,7 @@
             # position.  Note that the positions will also all be offset by
             # dx/2.0.  This is also for *oct grids*, not cells.
             base_dx[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
+        ci = 0
         for oi in range(cur.n):
             o = &cur.my_octs[oi]
             for i in range(3):
@@ -1005,11 +1005,10 @@
                     for k in range(2):
                         ii = ((k*2)+j)*2+i
                         if mask[o.domain_ind, ii] == 0: continue
-                        ci = level_counts[o.level]
                         coords[ci, 0] = pos[0] + dx[0] * i
                         coords[ci, 1] = pos[1] + dx[1] * j
                         coords[ci, 2] = pos[2] + dx[2] * k
-                        level_counts[o.level] += 1
+                        ci += 1
         return coords
 
     @cython.boundscheck(False)
@@ -1031,18 +1030,15 @@
             source = source_fields[key]
             for n in range(dom.n):
                 o = &dom.my_octs[n]
-                if o.level != level: continue
-                for i in range(2):
-                    for j in range(2):
-                        for k in range(2):
-                            ii = ((k*2)+j)*2+i
-                            if mask[o.domain_ind, ii] == 0: continue
-                            dest[local_filled + offset] = source[o.file_ind, ii]
-                            local_filled += 1
+                for ii in range(8):
+                    # We iterate and check here to keep our counts consistent
+                    # when filling different levels.
+                    if mask[o.domain_ind, ii] == 0: continue
+                    if o.level == level: 
+                        dest[local_filled] = source[o.file_ind, ii]
+                    local_filled += 1
         return local_filled
 
-
-
 cdef class ARTOctreeContainer(RAMSESOctreeContainer):
 
     @cython.boundscheck(True)


https://bitbucket.org/yt_analysis/yt-3.0/commits/88be57c26d09/
Changeset:   88be57c26d09
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-05-13 20:09:47
Summary:     This change allows spatially-defined fields to match the ordering of
non-spatially defined fields for octrees.
Affected #:  2 files

diff -r 069e11b50b204653e98255ef7ec9b88e781b3e57 -r 88be57c26d09ba78525268c07f52a74d813dfc77 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -249,7 +249,13 @@
                 for i,chunk in enumerate(self.chunks(field, "spatial", ngz = 0)):
                     mask = self._current_chunk.objs[0].select(self.selector)
                     if mask is None: continue
-                    data = self[field][mask]
+                    data = self[field]
+                    if len(data.shape) == 4:
+                        # This is how we keep it consistent between oct ordering
+                        # and grid ordering.
+                        data = data.T[mask.T]
+                    else:
+                        data = data[mask]
                     rv[ind:ind+data.size] = data
                     ind += data.size
         else:

diff -r 069e11b50b204653e98255ef7ec9b88e781b3e57 -r 88be57c26d09ba78525268c07f52a74d813dfc77 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -119,8 +119,7 @@
     def _reshape_vals(self, arr):
         nz = self._num_zones + 2*self._num_ghost_zones
         n_oct = arr.shape[0] / (nz**3.0)
-        arr.shape = (n_oct, nz, nz, nz)
-        arr = np.rollaxis(arr, 0, 4)
+        arr = arr.reshape((nz, nz, nz, n_oct), order="F")
         return arr
 
     _domain_ind = None


https://bitbucket.org/yt_analysis/yt-3.0/commits/54cc305a4ff4/
Changeset:   54cc305a4ff4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-05-14 19:53:49
Summary:     Enable domain offsets for Particle octrees.
Affected #:  2 files

diff -r 88be57c26d09ba78525268c07f52a74d813dfc77 -r 54cc305a4ff4df1b0f20fa2ca2a4ad2bb3ad13cc yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -1190,6 +1190,16 @@
                 free(o.sd.pos)
         free(o)
 
+    def __iter__(self):
+        #Get the next oct, will traverse domains
+        #Note that oct containers can be sorted 
+        #so that consecutive octs are on the same domain
+        cdef int oi
+        cdef Oct *o
+        for oi in range(self.nocts):
+            o = self.oct_list[oi]
+            yield (o.file_ind, o.domain_ind, o.domain)
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -1328,6 +1338,9 @@
                 dom_ind = 0
         self.dom_offsets[cur_dom + 2] = self.nocts
 
+    cdef np.int64_t get_domain_offset(self, int domain_id):
+        return self.dom_offsets[domain_id + 1]
+
     cdef Oct* allocate_oct(self):
         #Allocate the memory, set to NULL or -1
         #We reserve space for n_ref particles, but keep

diff -r 88be57c26d09ba78525268c07f52a74d813dfc77 -r 54cc305a4ff4df1b0f20fa2ca2a4ad2bb3ad13cc yt/geometry/oct_geometry_handler.py
--- a/yt/geometry/oct_geometry_handler.py
+++ b/yt/geometry/oct_geometry_handler.py
@@ -54,7 +54,7 @@
         Returns (in code units) the smallest cell size in the simulation.
         """
         return (self.parameter_file.domain_width /
-                (2**self.max_level)).min()
+                (2**(self.max_level+1))).min()
 
     def convert(self, unit):
         return self.parameter_file.conversion_factors[unit]


https://bitbucket.org/yt_analysis/yt-3.0/commits/29aa47d1b2c1/
Changeset:   29aa47d1b2c1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-05-15 00:00:50
Summary:     Adding a deposit method to the FieldDetector.
Affected #:  1 file

diff -r 54cc305a4ff4df1b0f20fa2ca2a4ad2bb3ad13cc -r 29aa47d1b2c1a6f7a8fc7a744e1429a7cc8933e1 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -286,6 +286,9 @@
         self.requested.append(item)
         return defaultdict.__missing__(self, item)
 
+    def deposit(self, *args, **kwargs):
+        return np.random.random((self.nd, self.nd, self.nd))
+
     def _read_data(self, field_name):
         self.requested.append(field_name)
         FI = getattr(self.pf, "field_info", FieldInfo)


https://bitbucket.org/yt_analysis/yt-3.0/commits/6eaa2394b591/
Changeset:   6eaa2394b591
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-05-21 20:52:30
Summary:     Removing ARTIOOctreeContainer (S.L. authorized.)
Affected #:  2 files

diff -r 29aa47d1b2c1a6f7a8fc7a744e1429a7cc8933e1 -r 6eaa2394b59160d94d04da9ae653ac69c9f64e00 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -69,13 +69,6 @@
     # OctAllocationContainer.offset if such a thing exists.
     cdef np.int64_t get_domain_offset(self, int domain_id)
 
-cdef class ARTIOOctreeContainer(OctreeContainer):
-    cdef OctAllocationContainer **domains
-    cdef Oct *get_root_oct(self, np.float64_t ppos[3])
-    cdef Oct *next_free_oct( self, int curdom )
-    cdef int valid_domain_oct(self, int curdom, Oct *parent)
-    cdef Oct *add_oct(self, int curdom, Oct *parent, int curlevel, double pp[3])
-
 cdef class RAMSESOctreeContainer(OctreeContainer):
     cdef OctAllocationContainer **domains
     cdef Oct *next_root(self, int domain_id, int ind[3])

diff -r 29aa47d1b2c1a6f7a8fc7a744e1429a7cc8933e1 -r 6eaa2394b59160d94d04da9ae653ac69c9f64e00 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -328,331 +328,6 @@
                 bounds[i, 3+ii] = size[ii]
         return bounds
 
-cdef class ARTIOOctreeContainer(OctreeContainer):
-
-    def allocate_domains(self, domain_counts):
-        cdef int count, i
-        cdef OctAllocationContainer *cur = self.cont
-        assert(cur == NULL)
-        self.max_domain = len(domain_counts) # 1-indexed
-        self.domains = <OctAllocationContainer **> malloc(
-            sizeof(OctAllocationContainer *) * len(domain_counts))
-        for i, count in enumerate(domain_counts):
-            cur = allocate_octs(count, cur)
-            if self.cont == NULL: self.cont = cur
-            self.domains[i] = cur
-        
-    def __dealloc__(self):
-        # This gets called BEFORE the superclass deallocation.  But, both get
-        # called.
-        if self.domains != NULL: free(self.domains)
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def count(self, np.ndarray[np.uint8_t, ndim=1, cast=True] mask,
-                     split = False):
-        cdef int n = mask.shape[0]
-        cdef int i, dom
-        cdef OctAllocationContainer *cur
-        cdef np.ndarray[np.int64_t, ndim=1] count
-        count = np.zeros(self.max_domain, 'int64')
-        # This is the idiom for iterating over many containers.
-        cur = self.cont
-        for i in range(n):
-            if i - cur.offset >= cur.n: cur = cur.next
-            if mask[i] == 1:
-                count[cur.my_octs[i - cur.offset].domain - 1] += 1
-        return count
-
-    def check(self, int curdom):
-        cdef int dind, pi
-        cdef Oct oct
-        cdef OctAllocationContainer *cont = self.domains[curdom - 1]
-        cdef int nbad = 0
-        for pi in range(cont.n_assigned):
-            oct = cont.my_octs[pi]
-            for i in range(2):
-                for j in range(2):
-                    for k in range(2):
-                        if oct.children[i][j][k] != NULL and \
-                           oct.children[i][j][k].level != oct.level + 1:
-                            if curdom == 61:
-                                print pi, oct.children[i][j][k].level,
-                                print oct.level
-                            nbad += 1
-        print "DOMAIN % 3i HAS % 9i BAD OCTS (%s / %s / %s)" % (curdom, nbad, 
-            cont.n - cont.n_assigned, cont.n_assigned, cont.n)
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef Oct *next_free_oct( self, int curdom ) :
-        cdef OctAllocationContainer *cont
-        cdef Oct *next_oct
-
-        if curdom < 1 or curdom > self.max_domain or self.domains == NULL  :
-            print "Error, invalid domain or unallocated domains"
-            raise RuntimeError
-        
-        cont = self.domains[curdom - 1]
-        if cont.n_assigned >= cont.n :
-            print "Error, ran out of octs in domain curdom"
-            raise RuntimeError
-
-        self.nocts += 1
-        next_oct = &cont.my_octs[cont.n_assigned]
-        cont.n_assigned += 1
-        return next_oct
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    cdef int valid_domain_oct(self, int curdom, Oct *parent) :
-        cdef OctAllocationContainer *cont
-
-        if curdom < 1 or curdom > self.max_domain or self.domains == NULL  :
-            raise RuntimeError
-        cont = self.domains[curdom - 1]
-
-        if parent == NULL or parent < &cont.my_octs[0] or \
-                parent > &cont.my_octs[cont.n_assigned] :
-            return 0
-        else :
-            return 1
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef Oct *get_root_oct(self, np.float64_t ppos[3]):
-        cdef np.int64_t ind[3]
-        cdef np.float64_t dds
-        cdef int i
-        for i in range(3):
-            dds = (self.DRE[i] - self.DLE[i])/self.nn[i]
-            ind[i] = <np.int64_t> floor((ppos[i]-self.DLE[i])/dds)
-        return self.root_mesh[ind[0]][ind[1]][ind[2]]
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef Oct *add_oct(self, int curdom, Oct *parent, 
-                    int curlevel, np.float64_t pp[3]):
-
-        cdef int level, i, ind[3]
-        cdef Oct *cur, *next_oct
-        cdef np.int64_t pos[3]
-        cdef np.float64_t dds
-
-        if curlevel < 0 :
-            raise RuntimeError
-        for i in range(3):
-            if pp[i] < self.DLE[i] or pp[i] > self.DRE[i] :
-                raise RuntimeError
-            dds = (self.DRE[i] - self.DLE[i])/(<np.int64_t>self.nn[i])
-            pos[i] = <np.int64_t> floor((pp[i]-self.DLE[i])*<np.float64_t>(1<<curlevel)/dds)
-
-        if curlevel == 0 :
-            cur = NULL
-        elif parent == NULL :
-            cur = self.get_root_oct(pp)
-            assert( cur != NULL )
-
-            # Now we find the location we want
-            for level in range(1,curlevel):
-                # At every level, find the cell this oct lives inside
-                for i in range(3) :
-                    if pos[i] < (2*cur.pos[i]+1)<<(curlevel-level) :
-                        ind[i] = 0
-                    else :
-                        ind[i] = 1
-                cur = cur.children[ind[0]][ind[1]][ind[2]]
-                if cur == NULL:
-                    # in ART we don't allocate down to curlevel 
-                    # if parent doesn't exist
-                    print "Error, no oct exists at that level"
-                    raise RuntimeError
-        else :
-            if not self.valid_domain_oct(curdom,parent) or \
-                    parent.level != curlevel - 1:
-                raise RuntimeError
-            cur = parent
- 
-        next_oct = self.next_free_oct( curdom )
-        if cur == NULL :
-            self.root_mesh[pos[0]][pos[1]][pos[2]] = next_oct
-        else :
-            for i in range(3) :
-                if pos[i] < 2*cur.pos[i]+1 :
-                    ind[i] = 0
-                else :
-                    ind[i] = 1
-            if cur.level != curlevel - 1 or  \
-                    cur.children[ind[0]][ind[1]][ind[2]] != NULL :
-                print "Error in add_oct: child already filled!"
-                raise RuntimeError
-
-            cur.children[ind[0]][ind[1]][ind[2]] = next_oct
-        for i in range(3) :
-            next_oct.pos[i] = pos[i]
-        next_oct.domain = curdom
-        next_oct.parent = cur
-        next_oct.file_ind = 1
-        next_oct.level = curlevel
-        return next_oct
-
-    # ii:mask/art ; ci=ramses loop backward (k<-fast, j ,i<-slow) 
-    # ii=0 000 art 000 ci 000 
-    # ii=1 100 art 100 ci 001 
-    # ii=2 010 art 010 ci 010 
-    # ii=3 110 art 110 ci 011
-    # ii=4 001 art 001 ci 100
-    # ii=5 101 art 011 ci 101
-    # ii=6 011 art 011 ci 110
-    # ii=7 111 art 111 ci 111
-    # keep coords ints so multiply by pow(2,1) when increasing level.
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def icoords(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
-        # Wham, bam, it's a scam
-        cdef np.int64_t i, j, k, oi, ci, n, ii, level
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        n = mask.shape[0]
-        cdef np.ndarray[np.int64_t, ndim=2] coords
-        coords = np.empty((cell_count, 3), dtype="int64")
-        ci = 0
-        for oi in range(cur.n):
-            o = &cur.my_octs[oi]
-            for k in range(2):
-                for j in range(2):
-                    for i in range(2):
-                        ii = ((k*2)+j)*2+i
-                        if mask[o.domain_ind, ii] == 0: continue
-                        # Note that we bit shift because o.pos is oct position,
-                        # not cell position, and it is with respect to octs,
-                        # not cells.
-                        coords[ci, 0] = (o.pos[0] << 1) + i
-                        coords[ci, 1] = (o.pos[1] << 1) + j
-                        coords[ci, 2] = (o.pos[2] << 1) + k
-                        ci += 1
-        return coords
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def ires(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
-        # Wham, bam, it's a scam
-        cdef np.int64_t i, j, k, oi, ci, n
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        n = mask.shape[0]
-        cdef np.ndarray[np.int64_t, ndim=1] levels
-        levels = np.empty(cell_count, dtype="int64")
-        ci = 0
-        for oi in range(cur.n):
-            o = &cur.my_octs[oi]
-            for i in range(8):
-                if mask[oi + cur.offset, i] == 0: continue
-                levels[ci] = o.level
-                ci +=1
-        return levels
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def count_levels(self, int max_level, int domain_id,
-                     np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
-        cdef np.ndarray[np.int64_t, ndim=1] level_count
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        cdef int oi, i
-        level_count = np.zeros(max_level+1, 'int64')
-        for oi in range(cur.n):
-            o = &cur.my_octs[oi]
-            for i in range(8):
-                if mask[o.domain_ind, i] == 0: continue
-                level_count[o.level] += 1
-        return level_count
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def fcoords(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
-        # Wham, bam, it's a scam
-        cdef np.int64_t i, j, k, oi, ci, n, ii
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        cdef np.float64_t pos[3]
-        cdef np.float64_t base_dx[3], dx[3]
-        n = mask.shape[0]
-        cdef np.ndarray[np.float64_t, ndim=2] coords
-        coords = np.empty((cell_count, 3), dtype="float64")
-        ci =0 
-        for i in range(3):
-            # This is the base_dx, but not the base distance from the center
-            # position.  Note that the positions will also all be offset by
-            # dx/2.0.  This is also for *oct grids*, not cells.
-            base_dx[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
-        for oi in range(cur.n):
-            o = &cur.my_octs[oi]
-            for i in range(3):
-                # This gives the *grid* width for this level
-                dx[i] = base_dx[i] / (1 << o.level)
-                # o.pos is the *grid* index, so pos[i] is the center of the
-                # first cell in the grid
-                pos[i] = self.DLE[i] + o.pos[i]*dx[i] + dx[i]/4.0
-                dx[i] = dx[i] / 2.0 # This is now the *offset* 
-            for k in range(2):
-                for j in range(2):
-                    for i in range(2):
-                        ii = ((k*2)+j)*2+i
-                        if mask[o.domain_ind, ii] == 0: continue
-                        coords[ci, 0] = pos[0] + dx[0] * i
-                        coords[ci, 1] = pos[1] + dx[1] * j
-                        coords[ci, 2] = pos[2] + dx[2] * k
-                        ci +=1 
-        return coords
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def fill_mask(self, int domain, dest_fields, source_fields,
-                   np.ndarray[np.uint8_t, ndim=2, cast=True] mask, int offset):
-        cdef np.ndarray[np.float32_t, ndim=1] source
-        cdef np.ndarray[np.float64_t, ndim=1] dest
-        cdef OctAllocationContainer *dom = self.domains[domain - 1]
-        cdef Oct *o
-        cdef int n
-        cdef int i, j, k, ii
-        cdef int local_pos, local_filled
-        cdef np.float64_t val
-        for key in dest_fields:
-            local_filled = 0
-            dest = dest_fields[key]
-            source = source_fields[key]
-            # snl: an alternative to filling level 0 yt-octs is to produce a 
-            # mapping between the mask and the source read order
-            for n in range(dom.n):
-                o = &dom.my_octs[n]
-                for k in range(2):
-                    for j in range(2):
-                        for i in range(2):
-                            ii = ((k*2)+j)*2+i
-                            if mask[o.domain_ind, ii] == 0: continue
-                            dest[local_filled + offset] = source[o.domain_ind*8+ii]
-                            local_filled += 1
-        return local_filled
-
 cdef class RAMSESOctreeContainer(OctreeContainer):
 
     cdef np.int64_t get_domain_offset(self, int domain_id):


https://bitbucket.org/yt_analysis/yt-3.0/commits/1fb97d1d3a5d/
Changeset:   1fb97d1d3a5d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-05-21 22:25:16
Summary:     We only want to deposit into an array of the correct size.  Also, skip bad offsets.
Affected #:  2 files

diff -r 6eaa2394b59160d94d04da9ae653ac69c9f64e00 -r 1fb97d1d3a5dad27ea29dda7b187404cdd261ecd yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -136,7 +136,7 @@
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
-        nvals = self.domain_ind.size * 8
+        nvals = (self.domain_ind >= 0).sum() * 8
         op = cls(nvals) # We allocate number of zones, not number of octs
         op.initialize()
         op.process_octree(self.oct_handler, self.domain_ind, positions, fields,

diff -r 6eaa2394b59160d94d04da9ae653ac69c9f64e00 -r 1fb97d1d3a5dad27ea29dda7b187404cdd261ecd yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -78,6 +78,7 @@
             #print domain_id, oct.local_ind, oct.ind, oct.domain, oct.pos[0], oct.pos[1], oct.pos[2]
             # Note that this has to be our local index, not our in-file index.
             offset = dom_ind[oct.domain_ind - moff] * 8
+            if offset < 0: continue
             # Check that we found the oct ...
             self.process(dims, oi.left_edge, oi.dds,
                          offset, pos, field_vals)


https://bitbucket.org/yt_analysis/yt-3.0/commits/85d4b7d54649/
Changeset:   85d4b7d54649
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-08 02:51:21
Summary:     fixed particle IO when # of particles exceeds 4096^2
Affected #:  1 file

diff -r 05a99508b3cd77bd87d83bae2d4a0f850a013e00 -r 85d4b7d546498f40f3951b18c93a3db56132bca2 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -330,32 +330,58 @@
     f.seek(pos)
     return unitary_center, fl, iocts, nLevel, root_level
 
+def get_ranges(skip, count, field, words=6, real_size=4, np_per_page=4096**2, 
+                  num_pages=1):
+    #translate every particle index into a file position ranges
+    ranges = []
+    arr_size = np_per_page * real_size
+    page_size = words * np_per_page * real_size
+    idxa, idxb = 0, 0
+    posa, posb = 0, 0
+    left = count
+    for page in range(num_pages):
+        idxb += np_per_page
+        for i, fname in enumerate(['x', 'y', 'z', 'vx', 'vy', 'vz']):
+            posb += arr_size
+            if i == field or fname == field:
+                if skip < np_per_page and count > 0:
+                    left_in_page = np_per_page - skip
+                    this_count = min(left_in_page, count)
+                    count -= this_count
+                    start = posa + skip * real_size
+                    end = posa + this_count * real_size
+                    ranges.append((start, this_count))
+                    skip = 0
+                    assert end <= posb
+                else:
+                    skip -= np_per_page
+            posa += arr_size
+        idxa += np_per_page
+    assert count == 0
+    return ranges
 
-def read_particles(file, Nrow, idxa=None, idxb=None, field=None):
+
+def read_particles(file, Nrow, idxa, idxb, field):
     words = 6  # words (reals) per particle: x,y,z,vx,vy,vz
     real_size = 4  # for file_particle_data; not always true?
-    np_per_page = Nrow**2  # defined in ART a_setup.h
+    np_per_page = Nrow**2  # defined in ART a_setup.h, # of particles/page
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
     data = np.array([], 'f4')
     fh = open(file, 'r')
-    totalp = idxb-idxa
-    left = totalp
-    for page in range(num_pages):
-        for i, fname in enumerate(['x', 'y', 'z', 'vx', 'vy', 'vz']):
-            if i == field or fname == field:
-                if idxa is not None:
-                    fh.seek(real_size*idxa, 1)
-                    count = min(np_per_page, left)
-                    temp = np.fromfile(fh, count=count, dtype='>f4')
-                    pageleft = np_per_page-count-idxa
-                    fh.seek(real_size*pageleft, 1)
-                    left -= count
-                else:
-                    count = np_per_page
-                    temp = np.fromfile(fh, count=count, dtype='>f4')
-                data = np.concatenate((data, temp))
-            else:
-                fh.seek(4*np_per_page, 1)
+    skip, count = idxa, idxb - idxa
+    import pdb; pdb.set_trace()
+    kwargs = dict(words=words, real_size=real_size, 
+                  np_per_page=np_per_page, num_pages=num_pages)
+    ranges = get_ranges(skip, count, field, **kwargs)
+    data = None
+    for seek, this_count in ranges:
+        fh.seek(seek)
+        temp = np.fromfile(fh, count=this_count, dtype='>f4')
+        if data is None:
+            data = temp
+        else:
+            data = np.concatenate((data, temp))
+    fh.close()
     return data
 
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/14d07dcd9760/
Changeset:   14d07dcd9760
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-08 02:51:32
Summary:     removed pdb
Affected #:  1 file

diff -r 85d4b7d546498f40f3951b18c93a3db56132bca2 -r 14d07dcd97608a40b3634d065ff47aca3ea9919a yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -369,7 +369,6 @@
     data = np.array([], 'f4')
     fh = open(file, 'r')
     skip, count = idxa, idxb - idxa
-    import pdb; pdb.set_trace()
     kwargs = dict(words=words, real_size=real_size, 
                   np_per_page=np_per_page, num_pages=num_pages)
     ranges = get_ranges(skip, count, field, **kwargs)


https://bitbucket.org/yt_analysis/yt-3.0/commits/648cf68c5c16/
Changeset:   648cf68c5c16
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-21 22:40:33
Summary:     added proto std, sum methods
Affected #:  1 file

diff -r 14d07dcd97608a40b3634d065ff47aca3ea9919a -r 648cf68c5c16115802f3722a541147dfbf8df23a yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -113,9 +113,11 @@
     cdef np.float64_t *count # float, for ease
     cdef public object ocount
     def initialize(self):
+        # Create a numpy array accessible to python
         self.ocount = np.zeros(self.nvals, dtype="float64")
         cdef np.ndarray arr = self.ocount
-        self.count = <np.float64_t*> arr.data
+        # alias the C-view for use in cython
+        self.count = <np.int64_t*> arr.data
 
     @cython.cdivision(True)
     cdef void process(self, int dim[3],
@@ -138,127 +140,64 @@
 
 deposit_count = CountParticles
 
-"""
-# Mode functions
-ctypedef np.float64_t (*type_opt)(np.float64_t, np.float64_t)
-cdef np.float64_t opt_count(np.float64_t pdata,
-                            np.float64_t weight,
-                            np.int64_t index,
-                            np.ndarray[np.float64_t, ndim=2] data_out, 
-                            np.ndarray[np.float64_t, ndim=2] data_in):
-    data_out[index] += 1.0
+cdef class SumParticleField(ParticleDepositOperation):
+    cdef np.float64_t *count # float, for ease
+    cdef public object ocount
+    def initialize(self):
+        self.osum = np.zeros(self.nvals, dtype="float64")
+        cdef np.ndarray arr = self.osum
+        self.sum = <np.float64_t*> arr.data
 
-cdef np.float64_t opt_sum(np.float64_t pdata,
-                            np.float64_t weight,
-                            np.int64_t index,
-                            np.ndarray[np.float64_t, ndim=2] data_out, 
-                            np.ndarray[np.float64_t, ndim=2] data_in):
-    data_out[index] += pdata 
+    @cython.cdivision(True)
+    cdef void process(self, int dim[3],
+                      np.float64_t left_edge[3], 
+                      np.float64_t dds[3],
+                      np.int64_t offset, # offset into IO field
+                      np.float64_t ppos[3], # this particle's position
+                      np.float64_t *fields # any other fields we need
+                      ):
+        # here we do our thing; this is the kernel
+        cdef int ii[3], i
+        for i in range(3):
+            ii[i] = <int>((ppos[i] - left_edge[i])/dds[i])
+        #print "Depositing into", offset,
+        #print gind(ii[0], ii[1], ii[2], dim)
+        self.sum[gind(ii[0], ii[1], ii[2], dim) + offset] += fields[i]
+        
+    def finalize(self):
+        return self.sum
 
-cdef np.float64_t opt_diff(np.float64_t pdata,
-                            np.float64_t weight,
-                            np.int64_t index,
-                            np.ndarray[np.float64_t, ndim=2] data_out, 
-                            np.ndarray[np.float64_t, ndim=2] data_in):
-    data_out[index] += (data_in[index] - pdata) 
+deposit_sum = SumParticleField
 
-cdef np.float64_t opt_wcount(np.float64_t pdata,
-                            np.float64_t weight,
-                            np.int64_t index,
-                            np.ndarray[np.float64_t, ndim=2] data_out, 
-                            np.ndarray[np.float64_t, ndim=2] data_in):
-    data_out[index] += weight
+cdef class StdParticleField(ParticleDepositOperation):
+    # Thanks to Britton and MJ Turk for the link
+    # to a single-pass STD
+    # http://www.cs.berkeley.edu/~mhoemmen/cs194/Tutorials/variance.pdf
+    cdef np.float64_t *count # float, for ease
+    cdef public object ocount
+    def initialize(self):
+        self.osum = np.zeros(self.nvals, dtype="float64")
+        cdef np.ndarray arr = self.osum
+        self.sum = <np.float64_t*> arr.data
 
-cdef np.float64_t opt_wsum(np.float64_t pdata,
-                            np.float64_t weight,
-                            np.int64_t index,
-                            np.ndarray[np.float64_t, ndim=2] data_out, 
-                            np.ndarray[np.float64_t, ndim=2] data_in):
-    data_out[index] += pdata * weight
+    @cython.cdivision(True)
+    cdef void process(self, int dim[3],
+                      np.float64_t left_edge[3], 
+                      np.float64_t dds[3],
+                      np.int64_t offset, # offset into IO field
+                      np.float64_t ppos[3], # this particle's position
+                      np.float64_t *fields # any other fields we need
+                      ):
+        # here we do our thing; this is the kernel
+        cdef int ii[3], i
+        for i in range(3):
+            ii[i] = <int>((ppos[i] - left_edge[i])/dds[i])
+        #print "Depositing into", offset,
+        #print gind(ii[0], ii[1], ii[2], dim)
+        self.sum[gind(ii[0], ii[1], ii[2], dim) + offset] += fields[i]
+        
+    def finalize(self):
+        return self.sum
 
-cdef np.float64_t opt_wdiff(np.float64_t pdata,
-                            np.float64_t weight,
-                            np.int64_t index,
-                            np.ndarray[np.float64_t, ndim=2] data_out, 
-                            np.ndarray[np.float64_t, ndim=2] data_in):
-    data_out[index] += (data_in[index] - pdata) * weight
+deposit_sum = SumParticleField
 
-# Selection functions
-ctypedef NOTSURE (*type_sel)(OctreeContainer, 
-                                np.ndarray[np.float64_t, ndim=1],
-                                np.float64_t)
-cdef NOTSURE select_nearest(OctreeContainer oct_handler,
-                            np.ndarray[np.float64_t, ndim=1] pos,
-                            np.float64_t radius):
-    #return only the nearest oct
-    pass
-
-
-cdef NOTSURE select_radius(OctreeContainer oct_handler,
-                            np.ndarray[np.float64_t, ndim=1] pos,
-                            np.float64_t radius):
-    #return a list of octs within the radius
-    pass
-    
-
-# Kernel functions
-ctypedef np.float64_t (*type_ker)(np.float64_t)
-cdef np.float64_t kernel_sph(np.float64_t x) nogil:
-    cdef np.float64_t kernel
-    if x <= 0.5:
-        kernel = 1.-6.*x*x*(1.-x)
-    elif x>0.5 and x<=1.0:
-        kernel = 2.*(1.-x)*(1.-x)*(1.-x)
-    else:
-        kernel = 0.
-    return kernel
-
-cdef np.float64_t kernel_null(np.float64_t x) nogil: return 0.0
-
-cdef deposit(OctreeContainer oct_handler, 
-        np.ndarray[np.float64_t, ndim=2] ppos, #positions,columns are x,y,z
-        np.ndarray[np.float64_t, ndim=2] pd, # particle fields
-        np.ndarray[np.float64_t, ndim=1] pr, # particle radius
-        np.ndarray[np.float64_t, ndim=2] data_in, #used to calc diff, same shape as data_out
-        np.ndarray[np.float64_t, ndim=2] data_out, #write deposited here
-        mode='count', selection='nearest', kernel='null'):
-    cdef type_opt fopt
-    cdef type_sel fsel
-    cdef type_ker fker
-    cdef long pi #particle index
-    cdef long nocts #number of octs in selection
-    cdef Oct oct 
-    cdef np.float64_t w
-    # Can we do this with dicts?
-    # Setup the function pointers
-    if mode == 'count':
-        fopt = opt_count
-    elif mode == 'sum':
-        fopt = opt_sum
-    elif mode == 'diff':
-        fopt = opt_diff
-    if mode == 'wcount':
-        fopt = opt_count
-    elif mode == 'wsum':
-        fopt = opt_sum
-    elif mode == 'wdiff':
-        fopt = opt_diff
-    if selection == 'nearest':
-        fsel = select_nearest
-    elif selection == 'radius':
-        fsel = select_radius
-    if kernel == 'null':
-        fker = kernel_null
-    if kernel == 'sph':
-        fker = kernel_sph
-    for pi in range(particles):
-        octs = fsel(oct_handler, ppos[pi], pr[pi])
-        for oct in octs:
-            for cell in oct.cells:
-                w = fker(pr[pi],cell) 
-                weights.append(w)
-        norm = weights.sum()
-        for w, oct in zip(weights, octs):
-            for cell in oct.cells:
-                fopt(pd[pi], w/norm, oct.index, data_in, data_out)
-"""


https://bitbucket.org/yt_analysis/yt-3.0/commits/03eb8bf54a6d/
Changeset:   03eb8bf54a6d
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-21 22:59:43
Summary:     fixing std
Affected #:  1 file

diff -r 648cf68c5c16115802f3722a541147dfbf8df23a -r 03eb8bf54a6dcfc7a951aa50f20d2aec30d56b32 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -114,7 +114,7 @@
     cdef public object ocount
     def initialize(self):
         # Create a numpy array accessible to python
-        self.ocount = np.zeros(self.nvals, dtype="float64")
+        self.ocount = np.zeros(self.nvals, dtype="int64")
         cdef np.ndarray arr = self.ocount
         # alias the C-view for use in cython
         self.count = <np.int64_t*> arr.data
@@ -176,9 +176,18 @@
     cdef np.float64_t *count # float, for ease
     cdef public object ocount
     def initialize(self):
-        self.osum = np.zeros(self.nvals, dtype="float64")
-        cdef np.ndarray arr = self.osum
-        self.sum = <np.float64_t*> arr.data
+        # we do this in a single pass, but need two scalar
+        # per cell, M_k, and Q_k and also the number of particles
+        # deposited into each one
+        self.omk= np.zeros(self.nvals, dtype="float64")
+        cdef np.ndarray omkarr= self.omk
+        self.mk= <np.float64_t*> omkarr.data
+        self.oqk= np.zeros(self.nvals, dtype="float64")
+        cdef np.ndarray oqkarr= self.oqk
+        self.qk= <np.float64_t*> oqkarr.data
+        self.oi = np.zeros(self.nvals, dtype="int64")
+        cdef np.ndarray oiarr = self.oi
+        self.qk= <np.float64_t*> oiarr.data
 
     @cython.cdivision(True)
     cdef void process(self, int dim[3],
@@ -189,12 +198,19 @@
                       np.float64_t *fields # any other fields we need
                       ):
         # here we do our thing; this is the kernel
-        cdef int ii[3], i
+        cdef int ii[3], i, cell_index
         for i in range(3):
             ii[i] = <int>((ppos[i] - left_edge[i])/dds[i])
         #print "Depositing into", offset,
         #print gind(ii[0], ii[1], ii[2], dim)
-        self.sum[gind(ii[0], ii[1], ii[2], dim) + offset] += fields[i]
+        cell_index = gind(ii[0], ii[1], ii[2], dim) + offset
+        if self.mk[cell_index] == -1:
+            self.mk[cell_index] = fields[i]
+        else:
+            self.mk[cell_index] = self.mk[cell_index] + (fields[i] - self.mk[cell_index]) / k
+
+
+        if self.sum
         
     def finalize(self):
         return self.sum


https://bitbucket.org/yt_analysis/yt-3.0/commits/c8e92b6c8878/
Changeset:   c8e92b6c8878
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-21 23:40:17
Summary:     forgot to commit after the merge. now
merge is mixed in with particel deposit changes, and spatial chunk update
to NMSU ART
Affected #:  12 files

diff -r 03eb8bf54a6dcfc7a951aa50f20d2aec30d56b32 -r c8e92b6c8878a74f03c39f26149b55f1d7af8aed yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -249,7 +249,13 @@
                 for i,chunk in enumerate(self.chunks(field, "spatial", ngz = 0)):
                     mask = self._current_chunk.objs[0].select(self.selector)
                     if mask is None: continue
-                    data = self[field][mask]
+                    data = self[field]
+                    if len(data.shape) == 4:
+                        # This is how we keep it consistent between oct ordering
+                        # and grid ordering.
+                        data = data.T[mask.T]
+                    else:
+                        data = data[mask]
                     rv[ind:ind+data.size] = data
                     ind += data.size
         else:

diff -r 03eb8bf54a6dcfc7a951aa50f20d2aec30d56b32 -r c8e92b6c8878a74f03c39f26149b55f1d7af8aed yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -286,6 +286,9 @@
         self.requested.append(item)
         return defaultdict.__missing__(self, item)
 
+    def deposit(self, *args, **kwargs):
+        return np.random.random((self.nd, self.nd, self.nd))
+
     def _read_data(self, field_name):
         self.requested.append(field_name)
         FI = getattr(self.pf, "field_info", FieldInfo)

diff -r 03eb8bf54a6dcfc7a951aa50f20d2aec30d56b32 -r c8e92b6c8878a74f03c39f26149b55f1d7af8aed yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -66,6 +66,16 @@
         self._current_particle_type = 'all'
         self._current_fluid_type = self.pf.default_fluid_type
 
+    def _generate_container_field(self, field):
+        if self._current_chunk is None:
+            self.hierarchy._identify_base_chunk(self)
+        if field == "dx":
+            return self._current_chunk.fwidth[:,0]
+        elif field == "dy":
+            return self._current_chunk.fwidth[:,1]
+        elif field == "dz":
+            return self._current_chunk.fwidth[:,2]
+
     def select_icoords(self, dobj):
         return self.oct_handler.icoords(self.domain.domain_id, self.mask,
                                         self.cell_count,
@@ -109,8 +119,7 @@
     def _reshape_vals(self, arr):
         nz = self._num_zones + 2*self._num_ghost_zones
         n_oct = arr.shape[0] / (nz**3.0)
-        arr.shape = (n_oct, nz, nz, nz)
-        arr = np.rollaxis(arr, 0, 4)
+        arr = arr.reshape((nz, nz, nz, n_oct), order="F")
         return arr
 
     _domain_ind = None
@@ -130,7 +139,8 @@
         nvals = (self.domain_ind >= 0).sum() * 8
         op = cls(nvals) # We allocate number of zones, not number of octs
         op.initialize()
-        op.process_octree(self.oct_handler, self.domain_ind, positions, fields)
+        op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
+                          self.domain.domain_id)
         vals = op.finalize()
         return self._reshape_vals(vals)
 

diff -r 03eb8bf54a6dcfc7a951aa50f20d2aec30d56b32 -r c8e92b6c8878a74f03c39f26149b55f1d7af8aed yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -96,7 +96,7 @@
           display_field = False)
 
 def _Ones(field, data):
-    return np.ones(data.shape, dtype='float64')
+    return np.ones(data.ires.size, dtype='float64')
 add_field("Ones", function=_Ones,
           projection_conversion="unitary",
           display_field = False)

diff -r 03eb8bf54a6dcfc7a951aa50f20d2aec30d56b32 -r c8e92b6c8878a74f03c39f26149b55f1d7af8aed yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -173,8 +173,16 @@
         # as well as the referring data source
         yield YTDataChunk(dobj, "all", oobjs, dobj.size)
 
-    def _chunk_spatial(self, dobj, ngz):
-        raise NotImplementedError
+    def _chunk_spatial(self, dobj, ngz, sort = None):
+        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            size = og.cell_count
+            if size == 0: continue
+            yield YTDataChunk(dobj, "spatial", [g], size)
 
     def _chunk_io(self, dobj):
         """

diff -r 03eb8bf54a6dcfc7a951aa50f20d2aec30d56b32 -r c8e92b6c8878a74f03c39f26149b55f1d7af8aed yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -96,7 +96,7 @@
         total_particles = sum(sum(d.total_particles.values())
                               for d in self.domains)
         self.oct_handler = ParticleOctreeContainer(
-            self.parameter_file.domain_dimensions,
+            self.parameter_file.domain_dimensions/2,
             self.parameter_file.domain_left_edge,
             self.parameter_file.domain_right_edge)
         self.oct_handler.n_ref = 64

diff -r 03eb8bf54a6dcfc7a951aa50f20d2aec30d56b32 -r c8e92b6c8878a74f03c39f26149b55f1d7af8aed yt/geometry/fake_octree.pyx
--- a/yt/geometry/fake_octree.pyx
+++ b/yt/geometry/fake_octree.pyx
@@ -67,7 +67,7 @@
                     long cur_leaf, long cur_level, 
                     long max_noct, long max_level, float fsubdivide,
                     np.ndarray[np.uint8_t, ndim=2] mask):
-    print "child", parent.ind, ind[0], ind[1], ind[2], cur_leaf, cur_level
+    print "child", parent.file_ind, ind[0], ind[1], ind[2], cur_leaf, cur_level
     cdef int ddr[3]
     cdef long i,j,k
     cdef float rf #random float from 0-1

diff -r 03eb8bf54a6dcfc7a951aa50f20d2aec30d56b32 -r c8e92b6c8878a74f03c39f26149b55f1d7af8aed yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -30,8 +30,12 @@
 
 cdef struct Oct
 cdef struct Oct:
-    np.int64_t ind          # index
-    np.int64_t local_ind
+    np.int64_t file_ind     # index with respect to the order in which it was
+                            # added
+    np.int64_t domain_ind   # index within the global set of domains
+                            # note that moving to a local index will require
+                            # moving to split-up masks, which is part of a
+                            # bigger refactor
     np.int64_t domain       # (opt) addl int index
     np.int64_t pos[3]       # position in ints
     np.int8_t level
@@ -61,13 +65,9 @@
     cdef Oct* get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
     cdef void neighbors(self, Oct *, Oct **)
     cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
-
-cdef class ARTIOOctreeContainer(OctreeContainer):
-    cdef OctAllocationContainer **domains
-    cdef Oct *get_root_oct(self, np.float64_t ppos[3])
-    cdef Oct *next_free_oct( self, int curdom )
-    cdef int valid_domain_oct(self, int curdom, Oct *parent)
-    cdef Oct *add_oct(self, int curdom, Oct *parent, int curlevel, double pp[3])
+    # This function must return the offset from global-to-local domains; i.e.,
+    # OctAllocationContainer.offset if such a thing exists.
+    cdef np.int64_t get_domain_offset(self, int domain_id)
 
 cdef class RAMSESOctreeContainer(OctreeContainer):
     cdef OctAllocationContainer **domains

diff -r 03eb8bf54a6dcfc7a951aa50f20d2aec30d56b32 -r c8e92b6c8878a74f03c39f26149b55f1d7af8aed yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -56,8 +56,8 @@
     for n in range(n_octs):
         oct = &n_cont.my_octs[n]
         oct.parent = NULL
-        oct.ind = oct.domain = -1
-        oct.local_ind = n + n_cont.offset
+        oct.file_ind = oct.domain = -1
+        oct.domain_ind = n + n_cont.offset
         oct.level = -1
         for i in range(2):
             for j in range(2):
@@ -130,7 +130,7 @@
         while cur != NULL:
             for i in range(cur.n_assigned):
                 this = &cur.my_octs[i]
-                yield (this.ind, this.local_ind, this.domain)
+                yield (this.file_ind, this.domain_ind, this.domain)
             cur = cur.next
 
     cdef void oct_bounds(self, Oct *o, np.float64_t *corner, np.float64_t *size):
@@ -139,6 +139,9 @@
             size[i] = (self.DRE[i] - self.DLE[i]) / (self.nn[i] << o.level)
             corner[i] = o.pos[i] * size[i] + self.DLE[i]
 
+    cdef np.int64_t get_domain_offset(self, int domain_id):
+        return 0
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -153,8 +156,10 @@
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
             ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
             cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
-        cur = self.root_mesh[ind[0]][ind[1]][ind[2]]
-        while cur.children[0][0][0] != NULL:
+        next = self.root_mesh[ind[0]][ind[1]][ind[2]]
+        # We want to stop recursing when there's nowhere else to go
+        while next != NULL:
+            cur = next
             for i in range(3):
                 dds[i] = dds[i] / 2.0
                 if cp[i] > ppos[i]:
@@ -163,11 +168,19 @@
                 else:
                     ind[i] = 1
                     cp[i] += dds[i]/2.0
-            cur = cur.children[ind[0]][ind[1]][ind[2]]
+            next = cur.children[ind[0]][ind[1]][ind[2]]
         if oinfo == NULL: return cur
         for i in range(3):
+            # This will happen *after* we quit out, so we need to back out the
+            # last change to cp
+            if ind[i] == 1:
+                cp[i] -= dds[i]/2.0 # Now centered
+            else:
+                cp[i] += dds[i]/2.0
+            # We don't need to change dds[i] as it has been halved from the
+            # oct width, thus making it already the cell width
             oinfo.dds[i] = dds[i] # Cell width
-            oinfo.left_edge[i] = cp[i] - dds[i]
+            oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
         return cur
 
     @cython.boundscheck(False)
@@ -189,7 +202,7 @@
                 cur = cur.next
             o = &cur.my_octs[oi - cur.offset]
             for i in range(8):
-                count[o.domain - 1] += mask[o.local_ind,i]
+                count[o.domain - 1] += mask[o.domain_ind,i]
         return count
 
     @cython.boundscheck(True)
@@ -222,7 +235,7 @@
                     for k in range(2):
                         if o.children[i][j][k] == NULL:
                             ii = ((k*2)+j)*2+i
-                            count[o.domain - 1] += mask[o.local_ind,ii]
+                            count[o.domain - 1] += mask[o.domain_ind,ii]
         return count
 
     @cython.boundscheck(False)
@@ -315,330 +328,11 @@
                 bounds[i, 3+ii] = size[ii]
         return bounds
 
-cdef class ARTIOOctreeContainer(OctreeContainer):
+cdef class RAMSESOctreeContainer(OctreeContainer):
 
-    def allocate_domains(self, domain_counts):
-        cdef int count, i
-        cdef OctAllocationContainer *cur = self.cont
-        assert(cur == NULL)
-        self.max_domain = len(domain_counts) # 1-indexed
-        self.domains = <OctAllocationContainer **> malloc(
-            sizeof(OctAllocationContainer *) * len(domain_counts))
-        for i, count in enumerate(domain_counts):
-            cur = allocate_octs(count, cur)
-            if self.cont == NULL: self.cont = cur
-            self.domains[i] = cur
-        
-    def __dealloc__(self):
-        # This gets called BEFORE the superclass deallocation.  But, both get
-        # called.
-        if self.domains != NULL: free(self.domains)
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def count(self, np.ndarray[np.uint8_t, ndim=1, cast=True] mask,
-                     split = False):
-        cdef int n = mask.shape[0]
-        cdef int i, dom
-        cdef OctAllocationContainer *cur
-        cdef np.ndarray[np.int64_t, ndim=1] count
-        count = np.zeros(self.max_domain, 'int64')
-        # This is the idiom for iterating over many containers.
-        cur = self.cont
-        for i in range(n):
-            if i - cur.offset >= cur.n: cur = cur.next
-            if mask[i] == 1:
-                count[cur.my_octs[i - cur.offset].domain - 1] += 1
-        return count
-
-    def check(self, int curdom):
-        cdef int dind, pi
-        cdef Oct oct
-        cdef OctAllocationContainer *cont = self.domains[curdom - 1]
-        cdef int nbad = 0
-        for pi in range(cont.n_assigned):
-            oct = cont.my_octs[pi]
-            for i in range(2):
-                for j in range(2):
-                    for k in range(2):
-                        if oct.children[i][j][k] != NULL and \
-                           oct.children[i][j][k].level != oct.level + 1:
-                            if curdom == 61:
-                                print pi, oct.children[i][j][k].level,
-                                print oct.level
-                            nbad += 1
-        print "DOMAIN % 3i HAS % 9i BAD OCTS (%s / %s / %s)" % (curdom, nbad, 
-            cont.n - cont.n_assigned, cont.n_assigned, cont.n)
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef Oct *next_free_oct( self, int curdom ) :
-        cdef OctAllocationContainer *cont
-        cdef Oct *next_oct
-
-        if curdom < 1 or curdom > self.max_domain or self.domains == NULL  :
-            print "Error, invalid domain or unallocated domains"
-            raise RuntimeError
-        
-        cont = self.domains[curdom - 1]
-        if cont.n_assigned >= cont.n :
-            print "Error, ran out of octs in domain curdom"
-            raise RuntimeError
-
-        self.nocts += 1
-        next_oct = &cont.my_octs[cont.n_assigned]
-        cont.n_assigned += 1
-        return next_oct
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    cdef int valid_domain_oct(self, int curdom, Oct *parent) :
-        cdef OctAllocationContainer *cont
-
-        if curdom < 1 or curdom > self.max_domain or self.domains == NULL  :
-            raise RuntimeError
-        cont = self.domains[curdom - 1]
-
-        if parent == NULL or parent < &cont.my_octs[0] or \
-                parent > &cont.my_octs[cont.n_assigned] :
-            return 0
-        else :
-            return 1
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef Oct *get_root_oct(self, np.float64_t ppos[3]):
-        cdef np.int64_t ind[3]
-        cdef np.float64_t dds
-        cdef int i
-        for i in range(3):
-            dds = (self.DRE[i] - self.DLE[i])/self.nn[i]
-            ind[i] = <np.int64_t> floor((ppos[i]-self.DLE[i])/dds)
-        return self.root_mesh[ind[0]][ind[1]][ind[2]]
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef Oct *add_oct(self, int curdom, Oct *parent, 
-                    int curlevel, np.float64_t pp[3]):
-
-        cdef int level, i, ind[3]
-        cdef Oct *cur, *next_oct
-        cdef np.int64_t pos[3]
-        cdef np.float64_t dds
-
-        if curlevel < 0 :
-            raise RuntimeError
-        for i in range(3):
-            if pp[i] < self.DLE[i] or pp[i] > self.DRE[i] :
-                raise RuntimeError
-            dds = (self.DRE[i] - self.DLE[i])/(<np.int64_t>self.nn[i])
-            pos[i] = <np.int64_t> floor((pp[i]-self.DLE[i])*<np.float64_t>(1<<curlevel)/dds)
-
-        if curlevel == 0 :
-            cur = NULL
-        elif parent == NULL :
-            cur = self.get_root_oct(pp)
-            assert( cur != NULL )
-
-            # Now we find the location we want
-            for level in range(1,curlevel):
-                # At every level, find the cell this oct lives inside
-                for i in range(3) :
-                    if pos[i] < (2*cur.pos[i]+1)<<(curlevel-level) :
-                        ind[i] = 0
-                    else :
-                        ind[i] = 1
-                cur = cur.children[ind[0]][ind[1]][ind[2]]
-                if cur == NULL:
-                    # in ART we don't allocate down to curlevel 
-                    # if parent doesn't exist
-                    print "Error, no oct exists at that level"
-                    raise RuntimeError
-        else :
-            if not self.valid_domain_oct(curdom,parent) or \
-                    parent.level != curlevel - 1:
-                raise RuntimeError
-            cur = parent
- 
-        next_oct = self.next_free_oct( curdom )
-        if cur == NULL :
-            self.root_mesh[pos[0]][pos[1]][pos[2]] = next_oct
-        else :
-            for i in range(3) :
-                if pos[i] < 2*cur.pos[i]+1 :
-                    ind[i] = 0
-                else :
-                    ind[i] = 1
-            if cur.level != curlevel - 1 or  \
-                    cur.children[ind[0]][ind[1]][ind[2]] != NULL :
-                print "Error in add_oct: child already filled!"
-                raise RuntimeError
-
-            cur.children[ind[0]][ind[1]][ind[2]] = next_oct
-        for i in range(3) :
-            next_oct.pos[i] = pos[i]
-        next_oct.domain = curdom
-        next_oct.parent = cur
-        next_oct.ind = 1
-        next_oct.level = curlevel
-        return next_oct
-
-    # ii:mask/art ; ci=ramses loop backward (k<-fast, j ,i<-slow) 
-    # ii=0 000 art 000 ci 000 
-    # ii=1 100 art 100 ci 001 
-    # ii=2 010 art 010 ci 010 
-    # ii=3 110 art 110 ci 011
-    # ii=4 001 art 001 ci 100
-    # ii=5 101 art 011 ci 101
-    # ii=6 011 art 011 ci 110
-    # ii=7 111 art 111 ci 111
-    # keep coords ints so multiply by pow(2,1) when increasing level.
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def icoords(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
-        # Wham, bam, it's a scam
-        cdef np.int64_t i, j, k, oi, ci, n, ii, level
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        n = mask.shape[0]
-        cdef np.ndarray[np.int64_t, ndim=2] coords
-        coords = np.empty((cell_count, 3), dtype="int64")
-        ci=0
-        for oi in range(cur.n):
-            o = &cur.my_octs[oi]
-            for k in range(2):
-                for j in range(2):
-                    for i in range(2):
-                        ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
-                        coords[ci, 0] = (o.pos[0] << 1) + i
-                        coords[ci, 1] = (o.pos[1] << 1) + j
-                        coords[ci, 2] = (o.pos[2] << 1) + k
-                        ci += 1
-        return coords
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def ires(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
-        # Wham, bam, it's a scam
-        cdef np.int64_t i, j, k, oi, ci, n
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        n = mask.shape[0]
-        cdef np.ndarray[np.int64_t, ndim=1] levels
-        levels = np.empty(cell_count, dtype="int64")
-        ci = 0
-        for oi in range(cur.n):
-            o = &cur.my_octs[oi]
-            for i in range(8):
-                if mask[oi + cur.offset, i] == 0: continue
-                levels[ci] = o.level
-                ci +=1
-        return levels
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def count_levels(self, int max_level, int domain_id,
-                     np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
-        cdef np.ndarray[np.int64_t, ndim=1] level_count
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        cdef int oi, i
-        level_count = np.zeros(max_level+1, 'int64')
-        for oi in range(cur.n):
-            o = &cur.my_octs[oi]
-            for i in range(8):
-                if mask[o.local_ind, i] == 0: continue
-                level_count[o.level] += 1
-        return level_count
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def fcoords(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
-        # Wham, bam, it's a scam
-        cdef np.int64_t i, j, k, oi, ci, n, ii
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        cdef np.float64_t pos[3]
-        cdef np.float64_t base_dx[3], dx[3]
-        n = mask.shape[0]
-        cdef np.ndarray[np.float64_t, ndim=2] coords
-        coords = np.empty((cell_count, 3), dtype="float64")
-        ci =0 
-        for i in range(3):
-            # This is the base_dx, but not the base distance from the center
-            # position.  Note that the positions will also all be offset by
-            # dx/2.0.  This is also for *oct grids*, not cells.
-            base_dx[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
-        for oi in range(cur.n):
-            o = &cur.my_octs[oi]
-            for i in range(3):
-                # This gives the *grid* width for this level
-                dx[i] = base_dx[i] / (1 << o.level)
-                # o.pos is the *grid* index, so pos[i] is the center of the
-                # first cell in the grid
-                pos[i] = self.DLE[i] + o.pos[i]*dx[i] + dx[i]/4.0
-                dx[i] = dx[i] / 2.0 # This is now the *offset* 
-            for k in range(2):
-                for j in range(2):
-                    for i in range(2):
-                        ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
-                        coords[ci, 0] = pos[0] + dx[0] * i
-                        coords[ci, 1] = pos[1] + dx[1] * j
-                        coords[ci, 2] = pos[2] + dx[2] * k
-                        ci +=1 
-        return coords
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def fill_mask(self, int domain, dest_fields, source_fields,
-                   np.ndarray[np.uint8_t, ndim=2, cast=True] mask, int offset):
-        cdef np.ndarray[np.float32_t, ndim=1] source
-        cdef np.ndarray[np.float64_t, ndim=1] dest
-        cdef OctAllocationContainer *dom = self.domains[domain - 1]
-        cdef Oct *o
-        cdef int n
-        cdef int i, j, k, ii
-        cdef int local_pos, local_filled
-        cdef np.float64_t val
-        for key in dest_fields:
-            local_filled = 0
-            dest = dest_fields[key]
-            source = source_fields[key]
-            # snl: an alternative to filling level 0 yt-octs is to produce a 
-            # mapping between the mask and the source read order
-            for n in range(dom.n):
-                o = &dom.my_octs[n]
-                for k in range(2):
-                    for j in range(2):
-                        for i in range(2):
-                            ii = ((k*2)+j)*2+i
-                            if mask[o.local_ind, ii] == 0: continue
-                            dest[local_filled + offset] = source[o.local_ind*8+ii]
-                            # print 'oct_container.pyx:sourcemasked',o.level,local_filled, o.local_ind*8+ii, source[o.local_ind*8+ii]
-                            local_filled += 1
-        return local_filled
-
-cdef class RAMSESOctreeContainer(OctreeContainer):
+    cdef np.int64_t get_domain_offset(self, int domain_id):
+        cdef OctAllocationContainer *cont = self.domains[domain_id - 1]
+        return cont.offset
 
     cdef Oct* next_root(self, int domain_id, int ind[3]):
         cdef Oct *next = self.root_mesh[ind[0]][ind[1]][ind[2]]
@@ -707,7 +401,7 @@
 
     def domain_and(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
                    int domain_id):
-        cdef np.int64_t i, oi, n, 
+        cdef np.int64_t i, oi, n,  use
         cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
         cdef Oct *o
         cdef np.ndarray[np.uint8_t, ndim=2] m2 = \
@@ -715,8 +409,9 @@
         n = mask.shape[0]
         for oi in range(cur.n_assigned):
             o = &cur.my_octs[oi]
+            use = 0
             for i in range(8):
-                m2[o.local_ind, i] = mask[o.local_ind, i]
+                m2[o.domain_ind, i] = mask[o.domain_ind, i]
         return m2 # NOTE: This is uint8_t
 
     def domain_mask(self,
@@ -738,7 +433,7 @@
             o = &cur.my_octs[oi]
             use = 0
             for i in range(8):
-                if mask[o.local_ind, i] == 1: use = 1
+                if mask[o.domain_ind, i] == 1: use = 1
             nm += use
         cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
                 np.zeros((2, 2, 2, nm), 'uint8')
@@ -750,11 +445,30 @@
                 for j in range(2):
                     for k in range(2):
                         ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
+                        if mask[o.domain_ind, ii] == 0: continue
                         use = m2[i, j, k, nm] = 1
             nm += use
         return m2.astype("bool")
 
+    def domain_ind(self,
+                    # mask is the base selector's *global* mask
+                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                    int domain_id):
+        cdef np.int64_t i, j, k, oi, noct, n, nm, use, offset
+        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
+        cdef Oct *o
+        cdef np.ndarray[np.int64_t, ndim=1] ind = np.zeros(cur.n, 'int64') - 1
+        nm = 0
+        for oi in range(cur.n):
+            o = &cur.my_octs[oi]
+            use = 0
+            for i in range(8):
+                if mask[o.domain_ind, i] == 1: use = 1
+            if use == 1:
+                ind[o.domain_ind - cur.offset] = nm
+            nm += use
+        return ind
+
     def check(self, int curdom, int print_all = 0):
         cdef int dind, pi
         cdef Oct oct
@@ -783,6 +497,33 @@
         print "DOMAIN % 3i HAS % 9i MISSED OCTS" % (curdom, nmissed)
         print "DOMAIN % 3i HAS % 9i UNASSIGNED OCTS" % (curdom, unassigned)
 
+    def check_refinement(self, int curdom):
+        cdef int pi, i, j, k, some_refined, some_unrefined
+        cdef Oct *oct
+        cdef int bad = 0
+        cdef OctAllocationContainer *cont = self.domains[curdom - 1]
+        for pi in range(cont.n_assigned):
+            oct = &cont.my_octs[pi]
+            some_unrefined = 0
+            some_refined = 0
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        if oct.children[i][j][k] == NULL:
+                            some_unrefined = 1
+                        else:
+                            some_refined = 1
+            if some_unrefined == some_refined == 1:
+                #print "BAD", oct.file_ind, oct.domain_ind
+                bad += 1
+                if curdom == 10 or curdom == 72:
+                    for i in range(2):
+                        for j in range(2):
+                            for k in range(2):
+                                print (oct.children[i][j][k] == NULL),
+                    print
+        print "BAD TOTAL", curdom, bad, cont.n_assigned
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -831,7 +572,7 @@
             # Now we should be at the right level
             cur.domain = curdom
             if local == 1:
-                cur.ind = p
+                cur.file_ind = p
             cur.level = curlevel
         return cont.n_assigned - initial
 
@@ -849,18 +590,18 @@
         n = mask.shape[0]
         cdef np.ndarray[np.int64_t, ndim=2] coords
         coords = np.empty((cell_count, 3), dtype="int64")
+        ci = 0
         for oi in range(cur.n_assigned):
             o = &cur.my_octs[oi]
             for i in range(2):
                 for j in range(2):
                     for k in range(2):
                         ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
-                        ci = level_counts[o.level]
+                        if mask[o.domain_ind, ii] == 0: continue
                         coords[ci, 0] = (o.pos[0] << 1) + i
                         coords[ci, 1] = (o.pos[1] << 1) + j
                         coords[ci, 2] = (o.pos[2] << 1) + k
-                        level_counts[o.level] += 1
+                        ci += 1
         return coords
 
     @cython.boundscheck(False)
@@ -882,9 +623,8 @@
             o = &cur.my_octs[oi]
             for i in range(8):
                 if mask[oi + cur.offset, i] == 0: continue
-                ci = level_counts[o.level]
                 levels[ci] = o.level
-                level_counts[o.level] += 1
+                ci += 1
         return levels
 
     @cython.boundscheck(False)
@@ -900,7 +640,7 @@
         for oi in range(cur.n_assigned):
             o = &cur.my_octs[oi]
             for i in range(8):
-                if mask[o.local_ind, i] == 0: continue
+                if mask[o.domain_ind, i] == 0: continue
                 level_count[o.level] += 1
         return level_count
 
@@ -925,6 +665,7 @@
             # position.  Note that the positions will also all be offset by
             # dx/2.0.  This is also for *oct grids*, not cells.
             base_dx[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
+        ci = 0
         for oi in range(cur.n):
             o = &cur.my_octs[oi]
             for i in range(3):
@@ -938,12 +679,11 @@
                 for j in range(2):
                     for k in range(2):
                         ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
-                        ci = level_counts[o.level]
+                        if mask[o.domain_ind, ii] == 0: continue
                         coords[ci, 0] = pos[0] + dx[0] * i
                         coords[ci, 1] = pos[1] + dx[1] * j
                         coords[ci, 2] = pos[2] + dx[2] * k
-                        level_counts[o.level] += 1
+                        ci += 1
         return coords
 
     @cython.boundscheck(False)
@@ -965,18 +705,15 @@
             source = source_fields[key]
             for n in range(dom.n):
                 o = &dom.my_octs[n]
-                if o.level != level: continue
-                for i in range(2):
-                    for j in range(2):
-                        for k in range(2):
-                            ii = ((k*2)+j)*2+i
-                            if mask[o.local_ind, ii] == 0: continue
-                            dest[local_filled + offset] = source[o.ind, ii]
-                            local_filled += 1
+                for ii in range(8):
+                    # We iterate and check here to keep our counts consistent
+                    # when filling different levels.
+                    if mask[o.domain_ind, ii] == 0: continue
+                    if o.level == level: 
+                        dest[local_filled] = source[o.file_ind, ii]
+                    local_filled += 1
         return local_filled
 
-
-
 cdef class ARTOctreeContainer(RAMSESOctreeContainer):
 
     @cython.boundscheck(True)
@@ -1002,7 +739,7 @@
             source = source_fields[key]
             for n in range(dom.n):
                 o = &dom.my_octs[n]
-                index = o.ind-subchunk_offset
+                index = o.file_ind-subchunk_offset
                 if o.level != level: continue
                 if index < 0: continue
                 if index >= subchunk_max: 
@@ -1013,7 +750,7 @@
                     for j in range(2):
                         for k in range(2):
                             ii = ((k*2)+j)*2+i
-                            if mask[o.local_ind, ii] == 0: continue
+                            if mask[o.domain_ind, ii] == 0: continue
                             dest[local_filled + offset] = \
                                 source[index,ii]
                             local_filled += 1
@@ -1053,7 +790,7 @@
                     for j in range(2):
                         for k in range(2):
                             ii = ((k*2)+j)*2+i
-                            if mask[o.local_ind, ii] == 0: continue
+                            if mask[o.domain_ind, ii] == 0: continue
                             ox = (o.pos[0] << 1) + i
                             oy = (o.pos[1] << 1) + j
                             oz = (o.pos[2] << 1) + k
@@ -1128,6 +865,16 @@
                 free(o.sd.pos)
         free(o)
 
+    def __iter__(self):
+        #Get the next oct, will traverse domains
+        #Note that oct containers can be sorted 
+        #so that consecutive octs are on the same domain
+        cdef int oi
+        cdef Oct *o
+        for oi in range(self.nocts):
+            o = self.oct_list[oi]
+            yield (o.file_ind, o.domain_ind, o.domain)
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -1257,8 +1004,8 @@
         self.dom_offsets[0] = 0
         dom_ind = 0
         for i in range(self.nocts):
-            self.oct_list[i].local_ind = i
-            self.oct_list[i].ind = dom_ind
+            self.oct_list[i].domain_ind = i
+            self.oct_list[i].file_ind = dom_ind
             dom_ind += 1
             if self.oct_list[i].domain > cur_dom:
                 cur_dom = self.oct_list[i].domain
@@ -1266,6 +1013,9 @@
                 dom_ind = 0
         self.dom_offsets[cur_dom + 2] = self.nocts
 
+    cdef np.int64_t get_domain_offset(self, int domain_id):
+        return self.dom_offsets[domain_id + 1]
+
     cdef Oct* allocate_oct(self):
         #Allocate the memory, set to NULL or -1
         #We reserve space for n_ref particles, but keep
@@ -1275,8 +1025,8 @@
         cdef ParticleArrays *sd = <ParticleArrays*> \
             malloc(sizeof(ParticleArrays))
         cdef int i, j, k
-        my_oct.ind = my_oct.domain = -1
-        my_oct.local_ind = self.nocts - 1
+        my_oct.file_ind = my_oct.domain = -1
+        my_oct.domain_ind = self.nocts - 1
         my_oct.pos[0] = my_oct.pos[1] = my_oct.pos[2] = -1
         my_oct.level = -1
         my_oct.sd = sd
@@ -1327,7 +1077,7 @@
         for oi in range(ndo):
             o = self.oct_list[oi + doff]
             for i in range(8):
-                if mask[o.local_ind, i] == 0: continue
+                if mask[o.domain_ind, i] == 0: continue
                 level_count[o.level] += 1
         return level_count
 
@@ -1514,7 +1264,7 @@
 
     def domain_and(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
                    int domain_id):
-        cdef np.int64_t i, oi, n, 
+        cdef np.int64_t i, oi, n, use
         cdef Oct *o
         cdef np.ndarray[np.uint8_t, ndim=2] m2 = \
                 np.zeros((mask.shape[0], 8), 'uint8')
@@ -1522,8 +1272,9 @@
         for oi in range(n):
             o = self.oct_list[oi]
             if o.domain != domain_id: continue
+            use = 0
             for i in range(8):
-                m2[o.local_ind, i] = mask[o.local_ind, i]
+                m2[o.domain_ind, i] = mask[o.domain_ind, i]
         return m2
 
     def domain_mask(self,
@@ -1546,7 +1297,7 @@
             if o.domain != domain_id: continue
             use = 0
             for i in range(8):
-                if mask[o.local_ind, i] == 1: use = 1
+                if mask[o.domain_ind, i] == 1: use = 1
             nm += use
         cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
                 np.zeros((2, 2, 2, nm), 'uint8')
@@ -1559,7 +1310,7 @@
                 for j in range(2):
                     for k in range(2):
                         ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
+                        if mask[o.domain_ind, ii] == 0: continue
                         use = m2[i, j, k, nm] = 1
             nm += use
         return m2.astype("bool")
@@ -1571,7 +1322,7 @@
         # Here we once again do something similar to the other functions.  We
         # need a set of indices into the final reduced, masked values.  The
         # indices will be domain.n long, and will be of type int64.  This way,
-        # we can get the Oct through a .get() call, then use Oct.ind as an
+        # we can get the Oct through a .get() call, then use Oct.file_ind as an
         # index into this newly created array, then finally use the returned
         # index into the domain subset array for deposition.
         cdef np.int64_t i, j, k, oi, noct, n, nm, use, offset
@@ -1586,7 +1337,7 @@
             o = self.oct_list[oi + offset]
             use = 0
             for i in range(8):
-                if mask[o.local_ind, i] == 1: use = 1
+                if mask[o.domain_ind, i] == 1: use = 1
             if use == 1:
                 ind[oi] = nm
             nm += use

diff -r 03eb8bf54a6dcfc7a951aa50f20d2aec30d56b32 -r c8e92b6c8878a74f03c39f26149b55f1d7af8aed yt/geometry/oct_geometry_handler.py
--- a/yt/geometry/oct_geometry_handler.py
+++ b/yt/geometry/oct_geometry_handler.py
@@ -54,7 +54,7 @@
         Returns (in code units) the smallest cell size in the simulation.
         """
         return (self.parameter_file.domain_width /
-                (2**self.max_level)).min()
+                (2**(self.max_level+1))).min()
 
     def convert(self, unit):
         return self.parameter_file.conversion_factors[unit]

diff -r 03eb8bf54a6dcfc7a951aa50f20d2aec30d56b32 -r c8e92b6c8878a74f03c39f26149b55f1d7af8aed yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -47,7 +47,7 @@
     def process_octree(self, OctreeContainer octree,
                      np.ndarray[np.int64_t, ndim=1] dom_ind,
                      np.ndarray[np.float64_t, ndim=2] positions,
-                     fields = None):
+                     fields = None, int domain_id = -1):
         cdef int nf, i, j
         if fields is None:
             fields = []
@@ -62,8 +62,9 @@
         cdef int dims[3]
         dims[0] = dims[1] = dims[2] = 2
         cdef OctInfo oi
-        cdef np.int64_t offset
+        cdef np.int64_t offset, moff
         cdef Oct *oct
+        moff = octree.get_domain_offset(domain_id)
         for i in range(positions.shape[0]):
             # We should check if particle remains inside the Oct here
             for j in range(nf):
@@ -71,8 +72,14 @@
             for j in range(3):
                 pos[j] = positions[i, j]
             oct = octree.get(pos, &oi)
-            #print oct.local_ind, oct.pos[0], oct.pos[1], oct.pos[2]
-            offset = dom_ind[oct.ind]
+            # This next line is unfortunate.  Basically it says, sometimes we
+            # might have particles that belong to octs outside our domain.
+            if oct.domain != domain_id: continue
+            #print domain_id, oct.local_ind, oct.ind, oct.domain, oct.pos[0], oct.pos[1], oct.pos[2]
+            # Note that this has to be our local index, not our in-file index.
+            offset = dom_ind[oct.domain_ind - moff] * 8
+            if offset < 0: continue
+            # Check that we found the oct ...
             self.process(dims, oi.left_edge, oi.dds,
                          offset, pos, field_vals)
         
@@ -110,7 +117,7 @@
         raise NotImplementedError
 
 cdef class CountParticles(ParticleDepositOperation):
-    cdef np.float64_t *count # float, for ease
+    cdef np.int64_t *count # float, for ease
     cdef public object ocount
     def initialize(self):
         # Create a numpy array accessible to python
@@ -131,18 +138,16 @@
         cdef int ii[3], i
         for i in range(3):
             ii[i] = <int>((ppos[i] - left_edge[i])/dds[i])
-        #print "Depositing into", offset,
-        #print gind(ii[0], ii[1], ii[2], dim)
         self.count[gind(ii[0], ii[1], ii[2], dim) + offset] += 1
         
     def finalize(self):
-        return self.ocount
+        return self.ocount.astype('f8')
 
 deposit_count = CountParticles
 
 cdef class SumParticleField(ParticleDepositOperation):
-    cdef np.float64_t *count # float, for ease
-    cdef public object ocount
+    cdef np.float64_t *sum
+    cdef public object osum
     def initialize(self):
         self.osum = np.zeros(self.nvals, dtype="float64")
         cdef np.ndarray arr = self.osum
@@ -152,20 +157,17 @@
     cdef void process(self, int dim[3],
                       np.float64_t left_edge[3], 
                       np.float64_t dds[3],
-                      np.int64_t offset, # offset into IO field
-                      np.float64_t ppos[3], # this particle's position
-                      np.float64_t *fields # any other fields we need
+                      np.int64_t offset, 
+                      np.float64_t ppos[3],
+                      np.float64_t *fields 
                       ):
-        # here we do our thing; this is the kernel
         cdef int ii[3], i
         for i in range(3):
-            ii[i] = <int>((ppos[i] - left_edge[i])/dds[i])
-        #print "Depositing into", offset,
-        #print gind(ii[0], ii[1], ii[2], dim)
-        self.sum[gind(ii[0], ii[1], ii[2], dim) + offset] += fields[i]
+            ii[i] = <int>((ppos[i] - left_edge[i]) / dds[i])
+        self.sum[gind(ii[0], ii[1], ii[2], dim) + offset] += fields[0]
         
     def finalize(self):
-        return self.sum
+        return self.osum
 
 deposit_sum = SumParticleField
 
@@ -173,47 +175,56 @@
     # Thanks to Britton and MJ Turk for the link
     # to a single-pass STD
     # http://www.cs.berkeley.edu/~mhoemmen/cs194/Tutorials/variance.pdf
-    cdef np.float64_t *count # float, for ease
-    cdef public object ocount
+    cdef np.float64_t *mk
+    cdef np.float64_t *qk
+    cdef np.float64_t *i
+    cdef public object omk
+    cdef public object oqk
+    cdef public object oi
     def initialize(self):
         # we do this in a single pass, but need two scalar
         # per cell, M_k, and Q_k and also the number of particles
         # deposited into each one
+        # the M_k term
         self.omk= np.zeros(self.nvals, dtype="float64")
         cdef np.ndarray omkarr= self.omk
         self.mk= <np.float64_t*> omkarr.data
+        # the Q_k term
         self.oqk= np.zeros(self.nvals, dtype="float64")
         cdef np.ndarray oqkarr= self.oqk
         self.qk= <np.float64_t*> oqkarr.data
+        # particle count
         self.oi = np.zeros(self.nvals, dtype="int64")
         cdef np.ndarray oiarr = self.oi
-        self.qk= <np.float64_t*> oiarr.data
+        self.i = <np.float64_t*> oiarr.data
 
     @cython.cdivision(True)
     cdef void process(self, int dim[3],
                       np.float64_t left_edge[3], 
                       np.float64_t dds[3],
-                      np.int64_t offset, # offset into IO field
-                      np.float64_t ppos[3], # this particle's position
-                      np.float64_t *fields # any other fields we need
+                      np.int64_t offset,
+                      np.float64_t ppos[3],
+                      np.float64_t *fields
                       ):
-        # here we do our thing; this is the kernel
         cdef int ii[3], i, cell_index
+        cdef float k
         for i in range(3):
             ii[i] = <int>((ppos[i] - left_edge[i])/dds[i])
-        #print "Depositing into", offset,
-        #print gind(ii[0], ii[1], ii[2], dim)
         cell_index = gind(ii[0], ii[1], ii[2], dim) + offset
-        if self.mk[cell_index] == -1:
-            self.mk[cell_index] = fields[i]
+        k = <float> self.i[cell_index]
+        if self.i[cell_index] == 0:
+            # Initialize cell values
+            self.mk[cell_index] = fields[0]
         else:
-            self.mk[cell_index] = self.mk[cell_index] + (fields[i] - self.mk[cell_index]) / k
-
-
-        if self.sum
+            self.mk[cell_index] = self.mk[cell_index] + \
+                                  (fields[0] - self.mk[cell_index]) / k
+            self.qk[cell_index] = self.qk[cell_index] + \
+                                  (k - 1.0) * (fields[0] - 
+                                             self.mk[cell_index]) ** 2.0 / k
+        self.qk[cell_index] += 1
         
     def finalize(self):
         return self.sum
 
-deposit_sum = SumParticleField
+deposit_std = StdParticleField
 

diff -r 03eb8bf54a6dcfc7a951aa50f20d2aec30d56b32 -r c8e92b6c8878a74f03c39f26149b55f1d7af8aed yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -201,7 +201,7 @@
             this_level = 0
         if res == 0:
             for i in range(8):
-                mask[root.local_ind,i] = 0
+                mask[root.domain_ind,i] = 0
             # If this level *is* being selected (i.e., no early termination)
             # then we know no child zones will be selected.
             if this_level == 1:
@@ -217,11 +217,11 @@
                     ii = ((k*2)+j)*2+i
                     ch = root.children[i][j][k]
                     if next_level == 1 and ch != NULL:
-                        mask[root.local_ind, ii] = 0
+                        mask[root.domain_ind, ii] = 0
                         self.recursively_select_octs(
                             ch, spos, sdds, mask, level + 1)
                     elif this_level == 1:
-                        mask[root.local_ind, ii] = \
+                        mask[root.domain_ind, ii] = \
                             self.select_cell(spos, sdds, eterm)
                     spos[2] += sdds[2]
                 spos[1] += sdds[1]


https://bitbucket.org/yt_analysis/yt-3.0/commits/de8847abae8a/
Changeset:   de8847abae8a
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-22 00:09:16
Summary:     ensure returned datatypes are f64
Affected #:  1 file

diff -r c8e92b6c8878a74f03c39f26149b55f1d7af8aed -r de8847abae8a3bca439c47b80e5dc5fb812796fa yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -140,7 +140,7 @@
                     temp[-nstars:] = data
                     tr[field] = temp
                     del data
-                tr[field] = tr[field][mask]
+                tr[field] = tr[field][mask].astype('f8')
                 ftype_old = ftype
                 fields_read.append(field)
         if tr == {}:


https://bitbucket.org/yt_analysis/yt-3.0/commits/13b5ec11dfb7/
Changeset:   13b5ec11dfb7
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-22 00:35:11
Summary:     std deposit works (I think?)
Affected #:  1 file

diff -r de8847abae8a3bca439c47b80e5dc5fb812796fa -r 13b5ec11dfb7cf67ad05788dc0e808d1a9970c2e yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -194,7 +194,7 @@
         cdef np.ndarray oqkarr= self.oqk
         self.qk= <np.float64_t*> oqkarr.data
         # particle count
-        self.oi = np.zeros(self.nvals, dtype="int64")
+        self.oi = np.zeros(self.nvals, dtype="float64")
         cdef np.ndarray oiarr = self.oi
         self.i = <np.float64_t*> oiarr.data
 
@@ -207,24 +207,28 @@
                       np.float64_t *fields
                       ):
         cdef int ii[3], i, cell_index
-        cdef float k
+        cdef float k, mk, qk
         for i in range(3):
             ii[i] = <int>((ppos[i] - left_edge[i])/dds[i])
         cell_index = gind(ii[0], ii[1], ii[2], dim) + offset
-        k = <float> self.i[cell_index]
-        if self.i[cell_index] == 0:
+        k = self.i[cell_index] 
+        mk = self.mk[cell_index]
+        qk = self.qk[cell_index] 
+        #print k, mk, qk, cell_index
+        if k == 0.0:
             # Initialize cell values
             self.mk[cell_index] = fields[0]
         else:
-            self.mk[cell_index] = self.mk[cell_index] + \
-                                  (fields[0] - self.mk[cell_index]) / k
-            self.qk[cell_index] = self.qk[cell_index] + \
-                                  (k - 1.0) * (fields[0] - 
-                                             self.mk[cell_index]) ** 2.0 / k
-        self.qk[cell_index] += 1
+            self.mk[cell_index] = mk + (fields[0] - mk) / k
+            self.qk[cell_index] = qk + (k - 1.0) * (fields[0] - mk)**2.0 / k
+        self.i[cell_index] += 1
         
     def finalize(self):
-        return self.sum
+        # This is the standard variance
+        # if we want sample variance divide by (self.oi - 1.0)
+        std = self.oqk / self.oi
+        std[~np.isfinite(std)] = 0.0
+        return std
 
 deposit_std = StdParticleField
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/d38104697610/
Changeset:   d38104697610
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-22 01:59:02
Summary:     added conversion factors for gas, particle velocities
Affected #:  1 file

diff -r 13b5ec11dfb7cf67ad05788dc0e808d1a9970c2e -r d38104697610551cd1ac189704a3c572d618912b yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -324,7 +324,8 @@
         self.conversion_factors = cf
 
         for ax in 'xyz':
-            self.conversion_factors["%s-velocity" % ax] = 1.0
+            self.conversion_factors["%s-velocity" % ax] = cf["Velocity"]
+            self.conversion_factors["particle_velocity_%s" % ax] = cf["Velocity"]
         for pt in particle_fields:
             if pt not in self.conversion_factors.keys():
                 self.conversion_factors[pt] = 1.0


https://bitbucket.org/yt_analysis/yt-3.0/commits/55e98d3c0933/
Changeset:   55e98d3c0933
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-22 02:11:23
Summary:     adding particle velocity fields and their correct conversions
Affected #:  1 file

diff -r d38104697610551cd1ac189704a3c572d618912b -r 55e98d3c0933198e1d3cd04efd35f9bac6477800 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -49,19 +49,6 @@
     add_art_field(f, function=NullFunc, take_log=True,
                   validators=[ValidateDataField(f)])
 
-for f in particle_fields:
-    add_art_field(f, function=NullFunc, take_log=True,
-                  validators=[ValidateDataField(f)],
-                  particle_type=True)
-add_art_field("particle_mass", function=NullFunc, take_log=True,
-              validators=[ValidateDataField(f)],
-              particle_type=True,
-              convert_function=lambda x: x.convert("particle_mass"))
-add_art_field("particle_mass_initial", function=NullFunc, take_log=True,
-              validators=[ValidateDataField(f)],
-              particle_type=True,
-              convert_function=lambda x: x.convert("particle_mass"))
-
 def _convertDensity(data):
     return data.convert("Density")
 KnownARTFields["Density"]._units = r"\rm{g}/\rm{cm}^3"
@@ -213,6 +200,24 @@
 ARTFieldInfo["Metal_Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
 
 # Particle fields
+for f in particle_fields:
+    add_art_field(f, function=NullFunc, take_log=True,
+                  validators=[ValidateDataField(f)],
+                  particle_type=True)
+for ax in "xyz":
+    add_art_field("particle_velocity_%s" % ax, function=NullFunc, take_log=True,
+                  validators=[ValidateDataField(f)],
+                  particle_type=True,
+                  convert_function=lambda x: x.convert("particle_velocity_%s" % ax))
+add_art_field("particle_mass", function=NullFunc, take_log=True,
+              validators=[ValidateDataField(f)],
+              particle_type=True,
+              convert_function=lambda x: x.convert("particle_mass"))
+add_art_field("particle_mass_initial", function=NullFunc, take_log=True,
+              validators=[ValidateDataField(f)],
+              particle_type=True,
+              convert_function=lambda x: x.convert("particle_mass"))
+
 def _particle_age(field, data):
     tr = data["particle_creation_time"]
     return data.pf.current_time - tr


https://bitbucket.org/yt_analysis/yt-3.0/commits/440a76cf232d/
Changeset:   440a76cf232d
Branch:      yt-3.0
User:        juxtaposicion
Date:        2013-05-22 02:45:13
Summary:     return the sqrt of the variance
Affected #:  1 file

diff -r 55e98d3c0933198e1d3cd04efd35f9bac6477800 -r 440a76cf232df87a3e27304dfd8507f4f965d3f9 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -226,9 +226,9 @@
     def finalize(self):
         # This is the standard variance
         # if we want sample variance divide by (self.oi - 1.0)
-        std = self.oqk / self.oi
-        std[~np.isfinite(std)] = 0.0
-        return std
+        std2 = self.oqk / self.oi
+        std2[self.oi == 0.0] = 0.0
+        return np.sqrt(std2)
 
 deposit_std = StdParticleField
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/18deb7f7cfbb/
Changeset:   18deb7f7cfbb
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-05-22 16:19:14
Summary:     Adding grid patch particle deposition.  Allowing failures for preloading data.

The preload issue comes about specifically when doing something like find_max
of a particle deposition field where the particle field relies on a
multi-component field like Coordinates.  I hope this will change over time once
we have proper vector field support baked in.
Affected #:  3 files

diff -r 440a76cf232df87a3e27304dfd8507f4f965d3f9 -r 18deb7f7cfbb2f87b8ffe24b403b1e2a56f0cc17 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -60,7 +60,10 @@
         e = FieldDetector(flat = True)
         e.NumberOfParticles = 1
         fields = e.requested
-        self.func(e, *args, **kwargs)
+        try:
+            self.func(e, *args, **kwargs)
+        except:
+            mylog.error("Could not preload for quantity %s, IO speed may suffer", self.__name__)
         retvals = [ [] for i in range(self.n_ret)]
         chunks = self._data_source.chunks([], chunking_style="io")
         for ds in parallel_objects(chunks, -1):

diff -r 440a76cf232df87a3e27304dfd8507f4f965d3f9 -r 18deb7f7cfbb2f87b8ffe24b403b1e2a56f0cc17 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -233,6 +233,7 @@
         if pf is None:
             # required attrs
             pf = fake_parameter_file(lambda: 1)
+            pf["Massarr"] = np.ones(6)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
                 pf.cosmological_simulation = 0.0
             pf.hubble_constant = 0.7

diff -r 440a76cf232df87a3e27304dfd8507f4f965d3f9 -r 18deb7f7cfbb2f87b8ffe24b403b1e2a56f0cc17 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -44,6 +44,7 @@
     NeedsProperty, \
     NeedsParameter
 from yt.geometry.selection_routines import convert_mask_to_indices
+import yt.geometry.particle_deposit as particle_deposit
 
 class AMRGridPatch(YTSelectionContainer):
     _spatial = True
@@ -474,6 +475,17 @@
         dt, t = dobj.selector.get_dt(self)
         return dt, t
 
+    def deposit(self, positions, fields = None, method = None):
+        # Here we perform our particle deposition.
+        cls = getattr(particle_deposit, "deposit_%s" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        op = cls(self.ActiveDimensions.prod()) # We allocate number of zones, not number of octs
+        op.initialize()
+        op.process_grid(self, positions, fields)
+        vals = op.finalize()
+        return vals.reshape(self.ActiveDimensions, order="F")
+
     def select(self, selector):
         if id(selector) == self._last_selector_id:
             return self._last_mask


https://bitbucket.org/yt_analysis/yt-3.0/commits/b8521d5e0e89/
Changeset:   b8521d5e0e89
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-05-22 20:39:50
Summary:     Merging from octtraversal bookmark
Affected #:  25 files

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -31,6 +31,9 @@
 from grid_patch import \
     AMRGridPatch
 
+from octree_subset import \
+    OctreeSubset
+
 from static_output import \
     StaticOutput
 

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -249,7 +249,13 @@
                 for i,chunk in enumerate(self.chunks(field, "spatial", ngz = 0)):
                     mask = self._current_chunk.objs[0].select(self.selector)
                     if mask is None: continue
-                    data = self[field][mask]
+                    data = self[field]
+                    if len(data.shape) == 4:
+                        # This is how we keep it consistent between oct ordering
+                        # and grid ordering.
+                        data = data.T[mask.T]
+                    else:
+                        data = data[mask]
                     rv[ind:ind+data.size] = data
                     ind += data.size
         else:
@@ -513,6 +519,11 @@
                         if f not in fields_to_generate:
                             fields_to_generate.append(f)
 
+    def deposit(self, positions, fields, op):
+        assert(self._current_chunk.chunk_type == "spatial")
+        fields = ensure_list(fields)
+        self.hierarchy._deposit_particle_fields(self, positions, fields, op)
+
     @contextmanager
     def _field_lock(self):
         self._locked = True

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -60,7 +60,10 @@
         e = FieldDetector(flat = True)
         e.NumberOfParticles = 1
         fields = e.requested
-        self.func(e, *args, **kwargs)
+        try:
+            self.func(e, *args, **kwargs)
+        except:
+            mylog.error("Could not preload for quantity %s, IO speed may suffer", self.__name__)
         retvals = [ [] for i in range(self.n_ret)]
         chunks = self._data_source.chunks([], chunking_style="io")
         for ds in parallel_objects(chunks, -1):

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -233,6 +233,7 @@
         if pf is None:
             # required attrs
             pf = fake_parameter_file(lambda: 1)
+            pf["Massarr"] = np.ones(6)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
                 pf.cosmological_simulation = 0.0
             pf.hubble_constant = 0.7
@@ -286,6 +287,9 @@
         self.requested.append(item)
         return defaultdict.__missing__(self, item)
 
+    def deposit(self, *args, **kwargs):
+        return np.random.random((self.nd, self.nd, self.nd))
+
     def _read_data(self, field_name):
         self.requested.append(field_name)
         FI = getattr(self.pf, "field_info", FieldInfo)

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -44,6 +44,7 @@
     NeedsProperty, \
     NeedsParameter
 from yt.geometry.selection_routines import convert_mask_to_indices
+import yt.geometry.particle_deposit as particle_deposit
 
 class AMRGridPatch(YTSelectionContainer):
     _spatial = True
@@ -474,6 +475,17 @@
         dt, t = dobj.selector.get_dt(self)
         return dt, t
 
+    def deposit(self, positions, fields = None, method = None):
+        # Here we perform our particle deposition.
+        cls = getattr(particle_deposit, "deposit_%s" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        op = cls(self.ActiveDimensions.prod()) # We allocate number of zones, not number of octs
+        op.initialize()
+        op.process_grid(self, positions, fields)
+        vals = op.finalize()
+        return vals.reshape(self.ActiveDimensions, order="F")
+
     def select(self, selector):
         if id(selector) == self._last_selector_id:
             return self._last_mask

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/data_objects/octree_subset.py
--- /dev/null
+++ b/yt/data_objects/octree_subset.py
@@ -0,0 +1,170 @@
+"""
+Subsets of octrees
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+from yt.data_objects.data_containers import \
+    YTFieldData, \
+    YTDataContainer, \
+    YTSelectionContainer
+from .field_info_container import \
+    NeedsGridType, \
+    NeedsOriginalGrid, \
+    NeedsDataField, \
+    NeedsProperty, \
+    NeedsParameter
+import yt.geometry.particle_deposit as particle_deposit
+
+class OctreeSubset(YTSelectionContainer):
+    _spatial = True
+    _num_ghost_zones = 0
+    _num_zones = 2
+    _type_name = 'octree_subset'
+    _skip_add = True
+    _con_args = ('domain', 'mask', 'cell_count')
+    _container_fields = ("dx", "dy", "dz")
+
+    def __init__(self, domain, mask, cell_count):
+        self.field_data = YTFieldData()
+        self.field_parameters = {}
+        self.mask = mask
+        self.domain = domain
+        self.pf = domain.pf
+        self.hierarchy = self.pf.hierarchy
+        self.oct_handler = domain.pf.h.oct_handler
+        self.cell_count = cell_count
+        level_counts = self.oct_handler.count_levels(
+            self.domain.pf.max_level, self.domain.domain_id, mask)
+        assert(level_counts.sum() == cell_count)
+        level_counts[1:] = level_counts[:-1]
+        level_counts[0] = 0
+        self.level_counts = np.add.accumulate(level_counts)
+        self._last_mask = None
+        self._last_selector_id = None
+        self._current_particle_type = 'all'
+        self._current_fluid_type = self.pf.default_fluid_type
+
+    def _generate_container_field(self, field):
+        if self._current_chunk is None:
+            self.hierarchy._identify_base_chunk(self)
+        if field == "dx":
+            return self._current_chunk.fwidth[:,0]
+        elif field == "dy":
+            return self._current_chunk.fwidth[:,1]
+        elif field == "dz":
+            return self._current_chunk.fwidth[:,2]
+
+    def select_icoords(self, dobj):
+        return self.oct_handler.icoords(self.domain.domain_id, self.mask,
+                                        self.cell_count,
+                                        self.level_counts.copy())
+
+    def select_fcoords(self, dobj):
+        return self.oct_handler.fcoords(self.domain.domain_id, self.mask,
+                                        self.cell_count,
+                                        self.level_counts.copy())
+
+    def select_fwidth(self, dobj):
+        # Recall domain_dimensions is the number of cells, not octs
+        base_dx = (self.domain.pf.domain_width /
+                   self.domain.pf.domain_dimensions)
+        widths = np.empty((self.cell_count, 3), dtype="float64")
+        dds = (2**self.select_ires(dobj))
+        for i in range(3):
+            widths[:,i] = base_dx[i] / dds
+        return widths
+
+    def select_ires(self, dobj):
+        return self.oct_handler.ires(self.domain.domain_id, self.mask,
+                                     self.cell_count,
+                                     self.level_counts.copy())
+
+    def __getitem__(self, key):
+        tr = super(OctreeSubset, self).__getitem__(key)
+        try:
+            fields = self._determine_fields(key)
+        except YTFieldTypeNotFound:
+            return tr
+        finfo = self.pf._get_field_info(*fields[0])
+        if not finfo.particle_type:
+            # We may need to reshape the field, if it is being queried from
+            # field_data.  If it's already cached, it just passes through.
+            if len(tr.shape) < 4:
+                tr = self._reshape_vals(tr)
+            return tr
+        return tr
+
+    def _reshape_vals(self, arr):
+        nz = self._num_zones + 2*self._num_ghost_zones
+        n_oct = arr.shape[0] / (nz**3.0)
+        arr = arr.reshape((nz, nz, nz, n_oct), order="F")
+        return arr
+
+    _domain_ind = None
+
+    @property
+    def domain_ind(self):
+        if self._domain_ind is None:
+            di = self.oct_handler.domain_ind(self.mask, self.domain.domain_id)
+            self._domain_ind = di
+        return self._domain_ind
+
+    def deposit(self, positions, fields = None, method = None):
+        # Here we perform our particle deposition.
+        cls = getattr(particle_deposit, "deposit_%s" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        nvals = (self.domain_ind >= 0).sum() * 8
+        op = cls(nvals) # We allocate number of zones, not number of octs
+        op.initialize()
+        op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
+                          self.domain.domain_id)
+        vals = op.finalize()
+        return self._reshape_vals(vals)
+
+    def select(self, selector):
+        if id(selector) == self._last_selector_id:
+            return self._last_mask
+        self._last_mask = self.oct_handler.domain_mask(
+                self.mask, self.domain.domain_id)
+        if self._last_mask.sum() == 0: return None
+        self._last_selector_id = id(selector)
+        return self._last_mask
+
+    def count(self, selector):
+        if id(selector) == self._last_selector_id:
+            if self._last_mask is None: return 0
+            return self._last_mask.sum()
+        self.select(selector)
+        return self.count(selector)
+
+    def count_particles(self, selector, x, y, z):
+        # We don't cache the selector results
+        count = selector.count_points(x,y,z)
+        return count
+
+    def select_particles(self, selector, x, y, z):
+        mask = selector.select_points(x,y,z)
+        return mask

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -59,6 +59,7 @@
     particle_types = ("all",)
     geometry = "cartesian"
     coordinates = None
+    max_level = 99
 
     class __metaclass__(type):
         def __init__(cls, name, b, d):

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -96,7 +96,7 @@
           display_field = False)
 
 def _Ones(field, data):
-    return np.ones(data.shape, dtype='float64')
+    return np.ones(data.ires.size, dtype='float64')
 add_field("Ones", function=_Ones,
           projection_conversion="unitary",
           display_field = False)

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -40,6 +40,8 @@
     GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
     StaticOutput
+from yt.data_objects.octree_subset import \
+    OctreeSubset
 from yt.geometry.oct_container import \
     ARTOctreeContainer
 from yt.data_objects.field_info_container import \
@@ -171,8 +173,16 @@
         # as well as the referring data source
         yield YTDataChunk(dobj, "all", oobjs, dobj.size)
 
-    def _chunk_spatial(self, dobj, ngz):
-        raise NotImplementedError
+    def _chunk_spatial(self, dobj, ngz, sort = None):
+        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            size = og.cell_count
+            if size == 0: continue
+            yield YTDataChunk(dobj, "spatial", [g], size)
 
     def _chunk_io(self, dobj):
         """
@@ -314,7 +324,8 @@
         self.conversion_factors = cf
 
         for ax in 'xyz':
-            self.conversion_factors["%s-velocity" % ax] = 1.0
+            self.conversion_factors["%s-velocity" % ax] = cf["Velocity"]
+            self.conversion_factors["particle_velocity_%s" % ax] = cf["Velocity"]
         for pt in particle_fields:
             if pt not in self.conversion_factors.keys():
                 self.conversion_factors[pt] = 1.0
@@ -433,43 +444,10 @@
                 return False
         return False
 
-
-class ARTDomainSubset(object):
+class ARTDomainSubset(OctreeSubset):
     def __init__(self, domain, mask, cell_count, domain_level):
-        self.mask = mask
-        self.domain = domain
-        self.oct_handler = domain.pf.h.oct_handler
-        self.cell_count = cell_count
+        super(ARTDomainSubset, self).__init__(domain, mask, cell_count)
         self.domain_level = domain_level
-        level_counts = self.oct_handler.count_levels(
-            self.domain.pf.max_level, self.domain.domain_id, mask)
-        assert(level_counts.sum() == cell_count)
-        level_counts[1:] = level_counts[:-1]
-        level_counts[0] = 0
-        self.level_counts = np.add.accumulate(level_counts)
-
-    def select_icoords(self, dobj):
-        return self.oct_handler.icoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_fcoords(self, dobj):
-        return self.oct_handler.fcoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_ires(self, dobj):
-        return self.oct_handler.ires(self.domain.domain_id, self.mask,
-                                     self.cell_count,
-                                     self.level_counts.copy())
-
-    def select_fwidth(self, dobj):
-        base_dx = 1.0/self.domain.pf.domain_dimensions
-        widths = np.empty((self.cell_count, 3), dtype="float64")
-        dds = (2**self.select_ires(dobj))
-        for i in range(3):
-            widths[:, i] = base_dx[i] / dds
-        return widths
 
     def fill_root(self, content, ftfields):
         """

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -49,19 +49,6 @@
     add_art_field(f, function=NullFunc, take_log=True,
                   validators=[ValidateDataField(f)])
 
-for f in particle_fields:
-    add_art_field(f, function=NullFunc, take_log=True,
-                  validators=[ValidateDataField(f)],
-                  particle_type=True)
-add_art_field("particle_mass", function=NullFunc, take_log=True,
-              validators=[ValidateDataField(f)],
-              particle_type=True,
-              convert_function=lambda x: x.convert("particle_mass"))
-add_art_field("particle_mass_initial", function=NullFunc, take_log=True,
-              validators=[ValidateDataField(f)],
-              particle_type=True,
-              convert_function=lambda x: x.convert("particle_mass"))
-
 def _convertDensity(data):
     return data.convert("Density")
 KnownARTFields["Density"]._units = r"\rm{g}/\rm{cm}^3"
@@ -213,6 +200,24 @@
 ARTFieldInfo["Metal_Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
 
 # Particle fields
+for f in particle_fields:
+    add_art_field(f, function=NullFunc, take_log=True,
+                  validators=[ValidateDataField(f)],
+                  particle_type=True)
+for ax in "xyz":
+    add_art_field("particle_velocity_%s" % ax, function=NullFunc, take_log=True,
+                  validators=[ValidateDataField(f)],
+                  particle_type=True,
+                  convert_function=lambda x: x.convert("particle_velocity_%s" % ax))
+add_art_field("particle_mass", function=NullFunc, take_log=True,
+              validators=[ValidateDataField(f)],
+              particle_type=True,
+              convert_function=lambda x: x.convert("particle_mass"))
+add_art_field("particle_mass_initial", function=NullFunc, take_log=True,
+              validators=[ValidateDataField(f)],
+              particle_type=True,
+              convert_function=lambda x: x.convert("particle_mass"))
+
 def _particle_age(field, data):
     tr = data["particle_creation_time"]
     return data.pf.current_time - tr

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -140,7 +140,7 @@
                     temp[-nstars:] = data
                     tr[field] = temp
                     del data
-                tr[field] = tr[field][mask]
+                tr[field] = tr[field][mask].astype('f8')
                 ftype_old = ftype
                 fields_read.append(field)
         if tr == {}:
@@ -330,32 +330,57 @@
     f.seek(pos)
     return unitary_center, fl, iocts, nLevel, root_level
 
+def get_ranges(skip, count, field, words=6, real_size=4, np_per_page=4096**2, 
+                  num_pages=1):
+    #translate every particle index into a file position ranges
+    ranges = []
+    arr_size = np_per_page * real_size
+    page_size = words * np_per_page * real_size
+    idxa, idxb = 0, 0
+    posa, posb = 0, 0
+    left = count
+    for page in range(num_pages):
+        idxb += np_per_page
+        for i, fname in enumerate(['x', 'y', 'z', 'vx', 'vy', 'vz']):
+            posb += arr_size
+            if i == field or fname == field:
+                if skip < np_per_page and count > 0:
+                    left_in_page = np_per_page - skip
+                    this_count = min(left_in_page, count)
+                    count -= this_count
+                    start = posa + skip * real_size
+                    end = posa + this_count * real_size
+                    ranges.append((start, this_count))
+                    skip = 0
+                    assert end <= posb
+                else:
+                    skip -= np_per_page
+            posa += arr_size
+        idxa += np_per_page
+    assert count == 0
+    return ranges
 
-def read_particles(file, Nrow, idxa=None, idxb=None, field=None):
+
+def read_particles(file, Nrow, idxa, idxb, field):
     words = 6  # words (reals) per particle: x,y,z,vx,vy,vz
     real_size = 4  # for file_particle_data; not always true?
-    np_per_page = Nrow**2  # defined in ART a_setup.h
+    np_per_page = Nrow**2  # defined in ART a_setup.h, # of particles/page
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
     data = np.array([], 'f4')
     fh = open(file, 'r')
-    totalp = idxb-idxa
-    left = totalp
-    for page in range(num_pages):
-        for i, fname in enumerate(['x', 'y', 'z', 'vx', 'vy', 'vz']):
-            if i == field or fname == field:
-                if idxa is not None:
-                    fh.seek(real_size*idxa, 1)
-                    count = min(np_per_page, left)
-                    temp = np.fromfile(fh, count=count, dtype='>f4')
-                    pageleft = np_per_page-count-idxa
-                    fh.seek(real_size*pageleft, 1)
-                    left -= count
-                else:
-                    count = np_per_page
-                    temp = np.fromfile(fh, count=count, dtype='>f4')
-                data = np.concatenate((data, temp))
-            else:
-                fh.seek(4*np_per_page, 1)
+    skip, count = idxa, idxb - idxa
+    kwargs = dict(words=words, real_size=real_size, 
+                  np_per_page=np_per_page, num_pages=num_pages)
+    ranges = get_ranges(skip, count, field, **kwargs)
+    data = None
+    for seek, this_count in ranges:
+        fh.seek(seek)
+        temp = np.fromfile(fh, count=this_count, dtype='>f4')
+        if data is None:
+            data = temp
+        else:
+            data = np.concatenate((data, temp))
+    fh.close()
     return data
 
 

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -35,6 +35,8 @@
     GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
     StaticOutput
+from yt.data_objects.octree_subset import \
+    OctreeSubset
 
 from .definitions import ramses_header
 from yt.utilities.definitions import \
@@ -252,43 +254,7 @@
         self.select(selector)
         return self.count(selector)
 
-class RAMSESDomainSubset(object):
-    def __init__(self, domain, mask, cell_count):
-        self.mask = mask
-        self.domain = domain
-        self.oct_handler = domain.pf.h.oct_handler
-        self.cell_count = cell_count
-        level_counts = self.oct_handler.count_levels(
-            self.domain.pf.max_level, self.domain.domain_id, mask)
-        assert(level_counts.sum() == cell_count)
-        level_counts[1:] = level_counts[:-1]
-        level_counts[0] = 0
-        self.level_counts = np.add.accumulate(level_counts)
-
-    def select_icoords(self, dobj):
-        return self.oct_handler.icoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_fcoords(self, dobj):
-        return self.oct_handler.fcoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_fwidth(self, dobj):
-        # Recall domain_dimensions is the number of cells, not octs
-        base_dx = (self.domain.pf.domain_width /
-                   self.domain.pf.domain_dimensions)
-        widths = np.empty((self.cell_count, 3), dtype="float64")
-        dds = (2**self.select_ires(dobj))
-        for i in range(3):
-            widths[:,i] = base_dx[i] / dds
-        return widths
-
-    def select_ires(self, dobj):
-        return self.oct_handler.ires(self.domain.domain_id, self.mask,
-                                     self.cell_count,
-                                     self.level_counts.copy())
+class RAMSESDomainSubset(OctreeSubset):
 
     def fill(self, content, fields):
         # Here we get a copy of the file, which we skip through and read the
@@ -389,8 +355,16 @@
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         yield YTDataChunk(dobj, "all", oobjs, dobj.size)
 
-    def _chunk_spatial(self, dobj, ngz):
-        raise NotImplementedError
+    def _chunk_spatial(self, dobj, ngz, sort = None):
+        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            size = og.cell_count
+            if size == 0: continue
+            yield YTDataChunk(dobj, "spatial", [g], size)
 
     def _chunk_io(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -40,6 +40,8 @@
     GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
     StaticOutput
+from yt.data_objects.octree_subset import \
+    OctreeSubset
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 from .fields import \
@@ -70,40 +72,8 @@
     def _calculate_offsets(self, fields):
         pass
 
-class ParticleDomainSubset(object):
-    def __init__(self, domain, mask, count):
-        self.domain = domain
-        self.mask = mask
-        self.cell_count = count
-        self.oct_handler = domain.pf.h.oct_handler
-        level_counts = self.oct_handler.count_levels(
-            99, self.domain.domain_id, mask)
-        level_counts[1:] = level_counts[:-1]
-        level_counts[0] = 0
-        self.level_counts = np.add.accumulate(level_counts)
-
-    def select_icoords(self, dobj):
-        return self.oct_handler.icoords(self.domain.domain_id, self.mask,
-                                        self.cell_count)
-
-    def select_fcoords(self, dobj):
-        return self.oct_handler.fcoords(self.domain.domain_id, self.mask,
-                                        self.cell_count)
-
-    def select_fwidth(self, dobj):
-        # Recall domain_dimensions is the number of cells, not octs
-        base_dx = (self.domain.pf.domain_width /
-                   self.domain.pf.domain_dimensions)
-        widths = np.empty((self.cell_count, 3), dtype="float64")
-        dds = (2**self.ires(dobj))
-        for i in range(3):
-            widths[:,i] = base_dx[i] / dds
-        return widths
-
-    def select_ires(self, dobj):
-        return self.oct_handler.ires(self.domain.domain_id, self.mask,
-                                     self.cell_count)
-
+class ParticleDomainSubset(OctreeSubset):
+    pass
 
 class ParticleGeometryHandler(OctreeGeometryHandler):
 
@@ -126,7 +96,7 @@
         total_particles = sum(sum(d.total_particles.values())
                               for d in self.domains)
         self.oct_handler = ParticleOctreeContainer(
-            self.parameter_file.domain_dimensions,
+            self.parameter_file.domain_dimensions/2,
             self.parameter_file.domain_left_edge,
             self.parameter_file.domain_right_edge)
         self.oct_handler.n_ref = 64
@@ -170,8 +140,16 @@
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         yield YTDataChunk(dobj, "all", oobjs, dobj.size)
 
-    def _chunk_spatial(self, dobj, ngz):
-        raise NotImplementedError
+    def _chunk_spatial(self, dobj, ngz, sort = None):
+        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            size = og.cell_count
+            if size == 0: continue
+            yield YTDataChunk(dobj, "spatial", [g], size)
 
     def _chunk_io(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -216,6 +194,7 @@
         self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
         self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
         self.cosmological_simulation = 1
+        self.periodicity = (True, True, True)
         self.current_redshift = hvals["Redshift"]
         self.omega_lambda = hvals["OmegaLambda"]
         self.omega_matter = hvals["Omega0"]
@@ -317,6 +296,7 @@
         self.domain_left_edge = np.zeros(3, "float64")
         self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
         self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
+        self.periodicity = (True, True, True)
 
         self.cosmological_simulation = 1
 
@@ -371,10 +351,27 @@
                     ('dummy',   'i'))
 
     def __init__(self, filename, data_style="tipsy",
-                 root_dimensions = 64):
+                 root_dimensions = 64, endian = ">",
+                 field_dtypes = None,
+                 domain_left_edge = None,
+                 domain_right_edge = None):
+        self.endian = endian
         self._root_dimensions = root_dimensions
         # Set up the template for domain files
         self.storage_filename = None
+        if domain_left_edge is None:
+            domain_left_edge = np.zeros(3, "float64") - 0.5
+        if domain_right_edge is None:
+            domain_right_edge = np.zeros(3, "float64") + 0.5
+
+        self.domain_left_edge = np.array(domain_left_edge, dtype="float64")
+        self.domain_right_edge = np.array(domain_right_edge, dtype="float64")
+
+        # My understanding is that dtypes are set on a field by field basis,
+        # not on a (particle type, field) basis
+        if field_dtypes is None: field_dtypes = {}
+        self._field_dtypes = field_dtypes
+
         super(TipsyStaticOutput, self).__init__(filename, data_style)
 
     def __repr__(self):
@@ -393,7 +390,7 @@
         # in the GADGET-2 user guide.
 
         f = open(self.parameter_filename, "rb")
-        hh = ">" + "".join(["%s" % (b) for a,b in self._header_spec])
+        hh = self.endian + "".join(["%s" % (b) for a,b in self._header_spec])
         hvals = dict([(a, c) for (a, b), c in zip(self._header_spec,
                      struct.unpack(hh, f.read(struct.calcsize(hh))))])
         self._header_offset = f.tell()
@@ -408,9 +405,11 @@
         # This may not be correct.
         self.current_time = hvals["time"]
 
-        self.domain_left_edge = np.zeros(3, "float64") - 0.5
-        self.domain_right_edge = np.ones(3, "float64") + 0.5
+        # NOTE: These are now set in the main initializer.
+        #self.domain_left_edge = np.zeros(3, "float64") - 0.5
+        #self.domain_right_edge = np.ones(3, "float64") + 0.5
         self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
+        self.periodicity = (True, True, True)
 
         self.cosmological_simulation = 1
 

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -372,6 +372,7 @@
         return rv
 
     def _initialize_octree(self, domain, octree):
+        pf = domain.pf
         with open(domain.domain_filename, "rb") as f:
             f.seek(domain.pf._header_offset)
             for ptype in self._ptypes:
@@ -391,6 +392,11 @@
                             pos[:,1].min(), pos[:,1].max())
                 mylog.debug("Spanning: %0.3e .. %0.3e in z",
                             pos[:,2].min(), pos[:,2].max())
+                if np.any(pos.min(axis=0) < pf.domain_left_edge) or \
+                   np.any(pos.max(axis=0) > pf.domain_right_edge):
+                    raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
+                                           pf.domain_left_edge,
+                                           pf.domain_right_edge)
                 del pp
                 octree.add(pos, domain.domain_id)
 
@@ -412,10 +418,12 @@
         for ptype, field in self._fields:
             pfields = []
             if tp[ptype] == 0: continue
+            dtbase = domain.pf._field_dtypes.get(field, 'f')
+            ff = "%s%s" % (domain.pf.endian, dtbase)
             if field in _vector_fields:
-                dt = (field, [('x', '>f'), ('y', '>f'), ('z', '>f')])
+                dt = (field, [('x', ff), ('y', ff), ('z', ff)])
             else:
-                dt = (field, '>f')
+                dt = (field, ff)
             pds.setdefault(ptype, []).append(dt)
             field_list.append((ptype, field))
         for ptype in pds:

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/frontends/sph/smoothing_kernel.pyx
--- a/yt/frontends/sph/smoothing_kernel.pyx
+++ b/yt/frontends/sph/smoothing_kernel.pyx
@@ -53,21 +53,28 @@
     for p in range(ngas):
         kernel_sum[p] = 0.0
         skip = 0
+        # Find the # of cells of the kernel
         for i in range(3):
             pos[i] = ppos[p, i]
+            # Get particle root grid integer index
             ind[i] = <int>((pos[i] - left_edge[i]) / dds[i])
+            # How many root grid cells does the smoothing length span + 1
             half_len = <int>(hsml[p]/dds[i]) + 1
+            # Left and right integer indices of the smoothing range
+            # If smoothing len is small could be inside the same bin
             ib0[i] = ind[i] - half_len
             ib1[i] = ind[i] + half_len
             #pos[i] = ppos[p, i] - left_edge[i]
             #ind[i] = <int>(pos[i] / dds[i])
             #ib0[i] = <int>((pos[i] - hsml[i]) / dds[i]) - 1
             #ib1[i] = <int>((pos[i] + hsml[i]) / dds[i]) + 1
+            # Skip if outside out root grid
             if ib0[i] >= dims[i] or ib1[i] < 0:
                 skip = 1
             ib0[i] = iclip(ib0[i], 0, dims[i] - 1)
             ib1[i] = iclip(ib1[i], 0, dims[i] - 1)
         if skip == 1: continue
+        # Having found the kernel shape, calculate the kernel weight
         for i from ib0[0] <= i <= ib1[0]:
             idist[0] = (ind[0] - i) * (ind[0] - i) * sdds[0]
             for j from ib0[1] <= j <= ib1[1]:
@@ -75,10 +82,14 @@
                 for k from ib0[2] <= k <= ib1[2]:
                     idist[2] = (ind[2] - k) * (ind[2] - k) * sdds[2]
                     dist = idist[0] + idist[1] + idist[2]
+                    # Calculate distance in multiples of the smoothing length
                     dist = sqrt(dist) / hsml[p]
+                    # Kernel is 3D but save the elements in a 1D array
                     gi = ((i * dims[1] + j) * dims[2]) + k
                     pdist[gi] = sph_kernel(dist)
+                    # Save sum to normalize later
                     kernel_sum[p] += pdist[gi]
+        # Having found the kernel, deposit accordingly into gdata
         for i from ib0[0] <= i <= ib1[0]:
             for j from ib0[1] <= j <= ib1[1]:
                 for k from ib0[2] <= k <= ib1[2]:

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/geometry/fake_octree.pyx
--- /dev/null
+++ b/yt/geometry/fake_octree.pyx
@@ -0,0 +1,90 @@
+"""
+Make a fake octree, deposit particle at every leaf
+
+Author: Christopher Moody <chris.e.moody at gmail.com>
+Affiliation: UC Santa Cruz
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from libc.stdlib cimport malloc, free, rand, RAND_MAX
+cimport numpy as np
+import numpy as np
+cimport cython
+
+from oct_container cimport Oct, RAMSESOctreeContainer
+
+# Create a balanced octree by a random walk that recursively
+# subdivides
+def create_fake_octree(RAMSESOctreeContainer oct_handler,
+                       long max_noct,
+                       long max_level,
+                       np.ndarray[np.int32_t, ndim=1] ndd,
+                       np.ndarray[np.float64_t, ndim=1] dle,
+                       np.ndarray[np.float64_t, ndim=1] dre,
+                       float fsubdivide):
+    cdef int[3] dd #hold the octant index
+    cdef int[3] ind #hold the octant index
+    cdef long i
+    cdef long cur_leaf = 0
+    cdef np.ndarray[np.uint8_t, ndim=2] mask
+    for i in range(3):
+        ind[i] = 0
+        dd[i] = ndd[i]
+    oct_handler.allocate_domains([max_noct])
+    parent = oct_handler.next_root(1, ind)
+    parent.domain = 1
+    cur_leaf = 8 #we've added one parent...
+    mask = np.ones((max_noct,8),dtype='uint8')
+    while oct_handler.domains[0].n_assigned < max_noct:
+        print "root: nocts ", oct_handler.domains[0].n_assigned
+        cur_leaf = subdivide(oct_handler, parent, ind, dd, cur_leaf, 0,
+                             max_noct, max_level, fsubdivide, mask)
+    return cur_leaf
+                             
+
+cdef long subdivide(RAMSESOctreeContainer oct_handler, 
+                    Oct *parent,
+                    int ind[3], int dd[3], 
+                    long cur_leaf, long cur_level, 
+                    long max_noct, long max_level, float fsubdivide,
+                    np.ndarray[np.uint8_t, ndim=2] mask):
+    print "child", parent.file_ind, ind[0], ind[1], ind[2], cur_leaf, cur_level
+    cdef int ddr[3]
+    cdef long i,j,k
+    cdef float rf #random float from 0-1
+    if cur_level >= max_level: 
+        return cur_leaf
+    if oct_handler.domains[0].n_assigned >= max_noct:
+        return cur_leaf
+    for i in range(3):
+        ind[i] = <int> ((rand() * 1.0 / RAND_MAX) * dd[i])
+        ddr[i] = 2
+    rf = rand() * 1.0 / RAND_MAX
+    if rf > fsubdivide:
+        if parent.children[ind[0]][ind[1]][ind[2]] == NULL:
+            cur_leaf += 7 
+        oct = oct_handler.next_child(1, ind, parent)
+        oct.domain = 1
+        cur_leaf = subdivide(oct_handler, oct, ind, ddr, cur_leaf, 
+                             cur_level + 1, max_noct, max_level, 
+                             fsubdivide, mask)
+    return cur_leaf

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -30,8 +30,12 @@
 
 cdef struct Oct
 cdef struct Oct:
-    np.int64_t ind          # index
-    np.int64_t local_ind
+    np.int64_t file_ind     # index with respect to the order in which it was
+                            # added
+    np.int64_t domain_ind   # index within the global set of domains
+                            # note that moving to a local index will require
+                            # moving to split-up masks, which is part of a
+                            # bigger refactor
     np.int64_t domain       # (opt) addl int index
     np.int64_t pos[3]       # position in ints
     np.int8_t level
@@ -39,6 +43,10 @@
     Oct *children[2][2][2]
     Oct *parent
 
+cdef struct OctInfo:
+    np.float64_t left_edge[3]
+    np.float64_t dds[3]
+
 cdef struct OctAllocationContainer
 cdef struct OctAllocationContainer:
     np.int64_t n
@@ -54,16 +62,12 @@
     cdef np.float64_t DLE[3], DRE[3]
     cdef public int nocts
     cdef public int max_domain
-    cdef Oct* get(self, ppos)
+    cdef Oct* get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
     cdef void neighbors(self, Oct *, Oct **)
     cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
-
-cdef class ARTIOOctreeContainer(OctreeContainer):
-    cdef OctAllocationContainer **domains
-    cdef Oct *get_root_oct(self, np.float64_t ppos[3])
-    cdef Oct *next_free_oct( self, int curdom )
-    cdef int valid_domain_oct(self, int curdom, Oct *parent)
-    cdef Oct *add_oct(self, int curdom, Oct *parent, int curlevel, double pp[3])
+    # This function must return the offset from global-to-local domains; i.e.,
+    # OctAllocationContainer.offset if such a thing exists.
+    cdef np.int64_t get_domain_offset(self, int domain_id)
 
 cdef class RAMSESOctreeContainer(OctreeContainer):
     cdef OctAllocationContainer **domains

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -56,8 +56,8 @@
     for n in range(n_octs):
         oct = &n_cont.my_octs[n]
         oct.parent = NULL
-        oct.ind = oct.domain = -1
-        oct.local_ind = n + n_cont.offset
+        oct.file_ind = oct.domain = -1
+        oct.domain_ind = n + n_cont.offset
         oct.level = -1
         for i in range(2):
             for j in range(2):
@@ -130,7 +130,7 @@
         while cur != NULL:
             for i in range(cur.n_assigned):
                 this = &cur.my_octs[i]
-                yield (this.ind, this.local_ind, this.domain)
+                yield (this.file_ind, this.domain_ind, this.domain)
             cur = cur.next
 
     cdef void oct_bounds(self, Oct *o, np.float64_t *corner, np.float64_t *size):
@@ -139,10 +139,13 @@
             size[i] = (self.DRE[i] - self.DLE[i]) / (self.nn[i] << o.level)
             corner[i] = o.pos[i] * size[i] + self.DLE[i]
 
+    cdef np.int64_t get_domain_offset(self, int domain_id):
+        return 0
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef Oct *get(self, ppos):
+    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL):
         #Given a floating point position, retrieve the most
         #refined oct at that time
         cdef np.int64_t ind[3]
@@ -150,21 +153,34 @@
         cdef Oct *cur
         cdef int i
         for i in range(3):
-            pp[i] = ppos[i] - self.DLE[i]
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
-            ind[i] = <np.int64_t> ((pp[i] - self.DLE[i])/dds[i])
-            cp[i] = (ind[i] + 0.5) * dds[i]
-        cur = self.root_mesh[ind[0]][ind[1]][ind[2]]
-        while cur.children[0][0][0] != NULL:
+            ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
+            cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
+        next = self.root_mesh[ind[0]][ind[1]][ind[2]]
+        # We want to stop recursing when there's nowhere else to go
+        while next != NULL:
+            cur = next
             for i in range(3):
                 dds[i] = dds[i] / 2.0
-                if cp[i] > pp[i]:
+                if cp[i] > ppos[i]:
                     ind[i] = 0
                     cp[i] -= dds[i] / 2.0
                 else:
                     ind[i] = 1
                     cp[i] += dds[i]/2.0
-            cur = cur.children[ind[0]][ind[1]][ind[2]]
+            next = cur.children[ind[0]][ind[1]][ind[2]]
+        if oinfo == NULL: return cur
+        for i in range(3):
+            # This will happen *after* we quit out, so we need to back out the
+            # last change to cp
+            if ind[i] == 1:
+                cp[i] -= dds[i]/2.0 # Now centered
+            else:
+                cp[i] += dds[i]/2.0
+            # We don't need to change dds[i] as it has been halved from the
+            # oct width, thus making it already the cell width
+            oinfo.dds[i] = dds[i] # Cell width
+            oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
         return cur
 
     @cython.boundscheck(False)
@@ -186,7 +202,40 @@
                 cur = cur.next
             o = &cur.my_octs[oi - cur.offset]
             for i in range(8):
-                count[o.domain - 1] += mask[o.local_ind,i]
+                count[o.domain - 1] += mask[o.domain_ind,i]
+        return count
+
+    @cython.boundscheck(True)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def count_leaves(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
+        # Modified to work when not all octs are assigned
+        cdef int i, j, k, ii
+        cdef np.int64_t oi
+        # pos here is CELL center, not OCT center.
+        cdef np.float64_t pos[3]
+        cdef int n = mask.shape[0]
+        cdef np.ndarray[np.int64_t, ndim=1] count
+        count = np.zeros(self.max_domain, 'int64')
+        # 
+        cur = self.cont
+        for oi in range(n):
+            if oi - cur.offset >= cur.n_assigned:
+                cur = cur.next
+                if cur == NULL:
+                    break
+            o = &cur.my_octs[oi - cur.offset]
+            # skip if unassigned
+            if o == NULL:
+                continue
+            if o.domain == -1: 
+                continue
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        if o.children[i][j][k] == NULL:
+                            ii = ((k*2)+j)*2+i
+                            count[o.domain - 1] += mask[o.domain_ind,ii]
         return count
 
     @cython.boundscheck(False)
@@ -260,14 +309,17 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def get_neighbor_boundaries(self, ppos):
+    def get_neighbor_boundaries(self, oppos):
+        cdef int i, ii
+        cdef np.float64_t ppos[3]
+        for i in range(3):
+            ppos[i] = oppos[i]
         cdef Oct *main = self.get(ppos)
         cdef Oct* neighbors[27]
         self.neighbors(main, neighbors)
         cdef np.ndarray[np.float64_t, ndim=2] bounds
         cdef np.float64_t corner[3], size[3]
         bounds = np.zeros((27,6), dtype="float64")
-        cdef int i, ii
         tnp = 0
         for i in range(27):
             self.oct_bounds(neighbors[i], corner, size)
@@ -276,330 +328,11 @@
                 bounds[i, 3+ii] = size[ii]
         return bounds
 
-cdef class ARTIOOctreeContainer(OctreeContainer):
+cdef class RAMSESOctreeContainer(OctreeContainer):
 
-    def allocate_domains(self, domain_counts):
-        cdef int count, i
-        cdef OctAllocationContainer *cur = self.cont
-        assert(cur == NULL)
-        self.max_domain = len(domain_counts) # 1-indexed
-        self.domains = <OctAllocationContainer **> malloc(
-            sizeof(OctAllocationContainer *) * len(domain_counts))
-        for i, count in enumerate(domain_counts):
-            cur = allocate_octs(count, cur)
-            if self.cont == NULL: self.cont = cur
-            self.domains[i] = cur
-        
-    def __dealloc__(self):
-        # This gets called BEFORE the superclass deallocation.  But, both get
-        # called.
-        if self.domains != NULL: free(self.domains)
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def count(self, np.ndarray[np.uint8_t, ndim=1, cast=True] mask,
-                     split = False):
-        cdef int n = mask.shape[0]
-        cdef int i, dom
-        cdef OctAllocationContainer *cur
-        cdef np.ndarray[np.int64_t, ndim=1] count
-        count = np.zeros(self.max_domain, 'int64')
-        # This is the idiom for iterating over many containers.
-        cur = self.cont
-        for i in range(n):
-            if i - cur.offset >= cur.n: cur = cur.next
-            if mask[i] == 1:
-                count[cur.my_octs[i - cur.offset].domain - 1] += 1
-        return count
-
-    def check(self, int curdom):
-        cdef int dind, pi
-        cdef Oct oct
-        cdef OctAllocationContainer *cont = self.domains[curdom - 1]
-        cdef int nbad = 0
-        for pi in range(cont.n_assigned):
-            oct = cont.my_octs[pi]
-            for i in range(2):
-                for j in range(2):
-                    for k in range(2):
-                        if oct.children[i][j][k] != NULL and \
-                           oct.children[i][j][k].level != oct.level + 1:
-                            if curdom == 61:
-                                print pi, oct.children[i][j][k].level,
-                                print oct.level
-                            nbad += 1
-        print "DOMAIN % 3i HAS % 9i BAD OCTS (%s / %s / %s)" % (curdom, nbad, 
-            cont.n - cont.n_assigned, cont.n_assigned, cont.n)
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef Oct *next_free_oct( self, int curdom ) :
-        cdef OctAllocationContainer *cont
-        cdef Oct *next_oct
-
-        if curdom < 1 or curdom > self.max_domain or self.domains == NULL  :
-            print "Error, invalid domain or unallocated domains"
-            raise RuntimeError
-        
-        cont = self.domains[curdom - 1]
-        if cont.n_assigned >= cont.n :
-            print "Error, ran out of octs in domain curdom"
-            raise RuntimeError
-
-        self.nocts += 1
-        next_oct = &cont.my_octs[cont.n_assigned]
-        cont.n_assigned += 1
-        return next_oct
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    cdef int valid_domain_oct(self, int curdom, Oct *parent) :
-        cdef OctAllocationContainer *cont
-
-        if curdom < 1 or curdom > self.max_domain or self.domains == NULL  :
-            raise RuntimeError
-        cont = self.domains[curdom - 1]
-
-        if parent == NULL or parent < &cont.my_octs[0] or \
-                parent > &cont.my_octs[cont.n_assigned] :
-            return 0
-        else :
-            return 1
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef Oct *get_root_oct(self, np.float64_t ppos[3]):
-        cdef np.int64_t ind[3]
-        cdef np.float64_t dds
-        cdef int i
-        for i in range(3):
-            dds = (self.DRE[i] - self.DLE[i])/self.nn[i]
-            ind[i] = <np.int64_t> floor((ppos[i]-self.DLE[i])/dds)
-        return self.root_mesh[ind[0]][ind[1]][ind[2]]
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef Oct *add_oct(self, int curdom, Oct *parent, 
-                    int curlevel, np.float64_t pp[3]):
-
-        cdef int level, i, ind[3]
-        cdef Oct *cur, *next_oct
-        cdef np.int64_t pos[3]
-        cdef np.float64_t dds
-
-        if curlevel < 0 :
-            raise RuntimeError
-        for i in range(3):
-            if pp[i] < self.DLE[i] or pp[i] > self.DRE[i] :
-                raise RuntimeError
-            dds = (self.DRE[i] - self.DLE[i])/(<np.int64_t>self.nn[i])
-            pos[i] = <np.int64_t> floor((pp[i]-self.DLE[i])*<np.float64_t>(1<<curlevel)/dds)
-
-        if curlevel == 0 :
-            cur = NULL
-        elif parent == NULL :
-            cur = self.get_root_oct(pp)
-            assert( cur != NULL )
-
-            # Now we find the location we want
-            for level in range(1,curlevel):
-                # At every level, find the cell this oct lives inside
-                for i in range(3) :
-                    if pos[i] < (2*cur.pos[i]+1)<<(curlevel-level) :
-                        ind[i] = 0
-                    else :
-                        ind[i] = 1
-                cur = cur.children[ind[0]][ind[1]][ind[2]]
-                if cur == NULL:
-                    # in ART we don't allocate down to curlevel 
-                    # if parent doesn't exist
-                    print "Error, no oct exists at that level"
-                    raise RuntimeError
-        else :
-            if not self.valid_domain_oct(curdom,parent) or \
-                    parent.level != curlevel - 1:
-                raise RuntimeError
-            cur = parent
- 
-        next_oct = self.next_free_oct( curdom )
-        if cur == NULL :
-            self.root_mesh[pos[0]][pos[1]][pos[2]] = next_oct
-        else :
-            for i in range(3) :
-                if pos[i] < 2*cur.pos[i]+1 :
-                    ind[i] = 0
-                else :
-                    ind[i] = 1
-            if cur.level != curlevel - 1 or  \
-                    cur.children[ind[0]][ind[1]][ind[2]] != NULL :
-                print "Error in add_oct: child already filled!"
-                raise RuntimeError
-
-            cur.children[ind[0]][ind[1]][ind[2]] = next_oct
-        for i in range(3) :
-            next_oct.pos[i] = pos[i]
-        next_oct.domain = curdom
-        next_oct.parent = cur
-        next_oct.ind = 1
-        next_oct.level = curlevel
-        return next_oct
-
-    # ii:mask/art ; ci=ramses loop backward (k<-fast, j ,i<-slow) 
-    # ii=0 000 art 000 ci 000 
-    # ii=1 100 art 100 ci 001 
-    # ii=2 010 art 010 ci 010 
-    # ii=3 110 art 110 ci 011
-    # ii=4 001 art 001 ci 100
-    # ii=5 101 art 011 ci 101
-    # ii=6 011 art 011 ci 110
-    # ii=7 111 art 111 ci 111
-    # keep coords ints so multiply by pow(2,1) when increasing level.
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def icoords(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
-        # Wham, bam, it's a scam
-        cdef np.int64_t i, j, k, oi, ci, n, ii, level
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        n = mask.shape[0]
-        cdef np.ndarray[np.int64_t, ndim=2] coords
-        coords = np.empty((cell_count, 3), dtype="int64")
-        ci=0
-        for oi in range(cur.n):
-            o = &cur.my_octs[oi]
-            for k in range(2):
-                for j in range(2):
-                    for i in range(2):
-                        ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
-                        coords[ci, 0] = (o.pos[0] << 1) + i
-                        coords[ci, 1] = (o.pos[1] << 1) + j
-                        coords[ci, 2] = (o.pos[2] << 1) + k
-                        ci += 1
-        return coords
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def ires(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
-        # Wham, bam, it's a scam
-        cdef np.int64_t i, j, k, oi, ci, n
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        n = mask.shape[0]
-        cdef np.ndarray[np.int64_t, ndim=1] levels
-        levels = np.empty(cell_count, dtype="int64")
-        ci = 0
-        for oi in range(cur.n):
-            o = &cur.my_octs[oi]
-            for i in range(8):
-                if mask[oi + cur.offset, i] == 0: continue
-                levels[ci] = o.level
-                ci +=1
-        return levels
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def count_levels(self, int max_level, int domain_id,
-                     np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
-        cdef np.ndarray[np.int64_t, ndim=1] level_count
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        cdef int oi, i
-        level_count = np.zeros(max_level+1, 'int64')
-        for oi in range(cur.n):
-            o = &cur.my_octs[oi]
-            for i in range(8):
-                if mask[o.local_ind, i] == 0: continue
-                level_count[o.level] += 1
-        return level_count
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def fcoords(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
-        # Wham, bam, it's a scam
-        cdef np.int64_t i, j, k, oi, ci, n, ii
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        cdef np.float64_t pos[3]
-        cdef np.float64_t base_dx[3], dx[3]
-        n = mask.shape[0]
-        cdef np.ndarray[np.float64_t, ndim=2] coords
-        coords = np.empty((cell_count, 3), dtype="float64")
-        ci =0 
-        for i in range(3):
-            # This is the base_dx, but not the base distance from the center
-            # position.  Note that the positions will also all be offset by
-            # dx/2.0.  This is also for *oct grids*, not cells.
-            base_dx[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
-        for oi in range(cur.n):
-            o = &cur.my_octs[oi]
-            for i in range(3):
-                # This gives the *grid* width for this level
-                dx[i] = base_dx[i] / (1 << o.level)
-                # o.pos is the *grid* index, so pos[i] is the center of the
-                # first cell in the grid
-                pos[i] = self.DLE[i] + o.pos[i]*dx[i] + dx[i]/4.0
-                dx[i] = dx[i] / 2.0 # This is now the *offset* 
-            for k in range(2):
-                for j in range(2):
-                    for i in range(2):
-                        ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
-                        coords[ci, 0] = pos[0] + dx[0] * i
-                        coords[ci, 1] = pos[1] + dx[1] * j
-                        coords[ci, 2] = pos[2] + dx[2] * k
-                        ci +=1 
-        return coords
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def fill_mask(self, int domain, dest_fields, source_fields,
-                   np.ndarray[np.uint8_t, ndim=2, cast=True] mask, int offset):
-        cdef np.ndarray[np.float32_t, ndim=1] source
-        cdef np.ndarray[np.float64_t, ndim=1] dest
-        cdef OctAllocationContainer *dom = self.domains[domain - 1]
-        cdef Oct *o
-        cdef int n
-        cdef int i, j, k, ii
-        cdef int local_pos, local_filled
-        cdef np.float64_t val
-        for key in dest_fields:
-            local_filled = 0
-            dest = dest_fields[key]
-            source = source_fields[key]
-            # snl: an alternative to filling level 0 yt-octs is to produce a 
-            # mapping between the mask and the source read order
-            for n in range(dom.n):
-                o = &dom.my_octs[n]
-                for k in range(2):
-                    for j in range(2):
-                        for i in range(2):
-                            ii = ((k*2)+j)*2+i
-                            if mask[o.local_ind, ii] == 0: continue
-                            dest[local_filled + offset] = source[o.local_ind*8+ii]
-                            # print 'oct_container.pyx:sourcemasked',o.level,local_filled, o.local_ind*8+ii, source[o.local_ind*8+ii]
-                            local_filled += 1
-        return local_filled
-
-cdef class RAMSESOctreeContainer(OctreeContainer):
+    cdef np.int64_t get_domain_offset(self, int domain_id):
+        cdef OctAllocationContainer *cont = self.domains[domain_id - 1]
+        return cont.offset
 
     cdef Oct* next_root(self, int domain_id, int ind[3]):
         cdef Oct *next = self.root_mesh[ind[0]][ind[1]][ind[2]]
@@ -666,7 +399,77 @@
                 count[cur.my_octs[i - cur.offset].domain - 1] += 1
         return count
 
-    def check(self, int curdom):
+    def domain_and(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                   int domain_id):
+        cdef np.int64_t i, oi, n,  use
+        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
+        cdef Oct *o
+        cdef np.ndarray[np.uint8_t, ndim=2] m2 = \
+                np.zeros((mask.shape[0], 8), 'uint8')
+        n = mask.shape[0]
+        for oi in range(cur.n_assigned):
+            o = &cur.my_octs[oi]
+            use = 0
+            for i in range(8):
+                m2[o.domain_ind, i] = mask[o.domain_ind, i]
+        return m2 # NOTE: This is uint8_t
+
+    def domain_mask(self,
+                    # mask is the base selector's *global* mask
+                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                    int domain_id):
+        # What distinguishes this one from domain_and is that we have a mask,
+        # which covers the whole domain, but our output will only be of a much
+        # smaller subset of octs that belong to a given domain *and* the mask.
+        # Note also that typically when something calls domain_and, they will 
+        # use a logical_any along the oct axis.  Here we don't do that.
+        # Note also that we change the shape of the returned array.
+        cdef np.int64_t i, j, k, oi, n, nm, use
+        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
+        cdef Oct *o
+        n = mask.shape[0]
+        nm = 0
+        for oi in range(cur.n_assigned):
+            o = &cur.my_octs[oi]
+            use = 0
+            for i in range(8):
+                if mask[o.domain_ind, i] == 1: use = 1
+            nm += use
+        cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
+                np.zeros((2, 2, 2, nm), 'uint8')
+        nm = 0
+        for oi in range(cur.n_assigned):
+            o = &cur.my_octs[oi]
+            use = 0
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        ii = ((k*2)+j)*2+i
+                        if mask[o.domain_ind, ii] == 0: continue
+                        use = m2[i, j, k, nm] = 1
+            nm += use
+        return m2.astype("bool")
+
+    def domain_ind(self,
+                    # mask is the base selector's *global* mask
+                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                    int domain_id):
+        cdef np.int64_t i, j, k, oi, noct, n, nm, use, offset
+        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
+        cdef Oct *o
+        cdef np.ndarray[np.int64_t, ndim=1] ind = np.zeros(cur.n, 'int64') - 1
+        nm = 0
+        for oi in range(cur.n):
+            o = &cur.my_octs[oi]
+            use = 0
+            for i in range(8):
+                if mask[o.domain_ind, i] == 1: use = 1
+            if use == 1:
+                ind[o.domain_ind - cur.offset] = nm
+            nm += use
+        return ind
+
+    def check(self, int curdom, int print_all = 0):
         cdef int dind, pi
         cdef Oct oct
         cdef OctAllocationContainer *cont = self.domains[curdom - 1]
@@ -675,6 +478,9 @@
         cdef int unassigned = 0
         for pi in range(cont.n_assigned):
             oct = cont.my_octs[pi]
+            if print_all==1:
+                print pi, oct.level, oct.domain,
+                print oct.pos[0],oct.pos[1],oct.pos[2]
             for i in range(2):
                 for j in range(2):
                     for k in range(2):
@@ -691,6 +497,33 @@
         print "DOMAIN % 3i HAS % 9i MISSED OCTS" % (curdom, nmissed)
         print "DOMAIN % 3i HAS % 9i UNASSIGNED OCTS" % (curdom, unassigned)
 
+    def check_refinement(self, int curdom):
+        cdef int pi, i, j, k, some_refined, some_unrefined
+        cdef Oct *oct
+        cdef int bad = 0
+        cdef OctAllocationContainer *cont = self.domains[curdom - 1]
+        for pi in range(cont.n_assigned):
+            oct = &cont.my_octs[pi]
+            some_unrefined = 0
+            some_refined = 0
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        if oct.children[i][j][k] == NULL:
+                            some_unrefined = 1
+                        else:
+                            some_refined = 1
+            if some_unrefined == some_refined == 1:
+                #print "BAD", oct.file_ind, oct.domain_ind
+                bad += 1
+                if curdom == 10 or curdom == 72:
+                    for i in range(2):
+                        for j in range(2):
+                            for k in range(2):
+                                print (oct.children[i][j][k] == NULL),
+                    print
+        print "BAD TOTAL", curdom, bad, cont.n_assigned
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -739,7 +572,7 @@
             # Now we should be at the right level
             cur.domain = curdom
             if local == 1:
-                cur.ind = p
+                cur.file_ind = p
             cur.level = curlevel
         return cont.n_assigned - initial
 
@@ -757,18 +590,18 @@
         n = mask.shape[0]
         cdef np.ndarray[np.int64_t, ndim=2] coords
         coords = np.empty((cell_count, 3), dtype="int64")
+        ci = 0
         for oi in range(cur.n_assigned):
             o = &cur.my_octs[oi]
             for i in range(2):
                 for j in range(2):
                     for k in range(2):
                         ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
-                        ci = level_counts[o.level]
+                        if mask[o.domain_ind, ii] == 0: continue
                         coords[ci, 0] = (o.pos[0] << 1) + i
                         coords[ci, 1] = (o.pos[1] << 1) + j
                         coords[ci, 2] = (o.pos[2] << 1) + k
-                        level_counts[o.level] += 1
+                        ci += 1
         return coords
 
     @cython.boundscheck(False)
@@ -790,9 +623,8 @@
             o = &cur.my_octs[oi]
             for i in range(8):
                 if mask[oi + cur.offset, i] == 0: continue
-                ci = level_counts[o.level]
                 levels[ci] = o.level
-                level_counts[o.level] += 1
+                ci += 1
         return levels
 
     @cython.boundscheck(False)
@@ -808,7 +640,7 @@
         for oi in range(cur.n_assigned):
             o = &cur.my_octs[oi]
             for i in range(8):
-                if mask[o.local_ind, i] == 0: continue
+                if mask[o.domain_ind, i] == 0: continue
                 level_count[o.level] += 1
         return level_count
 
@@ -833,6 +665,7 @@
             # position.  Note that the positions will also all be offset by
             # dx/2.0.  This is also for *oct grids*, not cells.
             base_dx[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
+        ci = 0
         for oi in range(cur.n):
             o = &cur.my_octs[oi]
             for i in range(3):
@@ -846,12 +679,11 @@
                 for j in range(2):
                     for k in range(2):
                         ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
-                        ci = level_counts[o.level]
+                        if mask[o.domain_ind, ii] == 0: continue
                         coords[ci, 0] = pos[0] + dx[0] * i
                         coords[ci, 1] = pos[1] + dx[1] * j
                         coords[ci, 2] = pos[2] + dx[2] * k
-                        level_counts[o.level] += 1
+                        ci += 1
         return coords
 
     @cython.boundscheck(False)
@@ -873,20 +705,17 @@
             source = source_fields[key]
             for n in range(dom.n):
                 o = &dom.my_octs[n]
-                if o.level != level: continue
-                for i in range(2):
-                    for j in range(2):
-                        for k in range(2):
-                            ii = ((k*2)+j)*2+i
-                            if mask[o.local_ind, ii] == 0: continue
-                            dest[local_filled + offset] = source[o.ind, ii]
-                            local_filled += 1
+                for ii in range(8):
+                    # We iterate and check here to keep our counts consistent
+                    # when filling different levels.
+                    if mask[o.domain_ind, ii] == 0: continue
+                    if o.level == level: 
+                        dest[local_filled] = source[o.file_ind, ii]
+                    local_filled += 1
         return local_filled
 
+cdef class ARTOctreeContainer(RAMSESOctreeContainer):
 
-
-cdef class ARTOctreeContainer(RAMSESOctreeContainer):
-    #this class is specifically for the NMSU ART
     @cython.boundscheck(True)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -910,7 +739,7 @@
             source = source_fields[key]
             for n in range(dom.n):
                 o = &dom.my_octs[n]
-                index = o.ind-subchunk_offset
+                index = o.file_ind-subchunk_offset
                 if o.level != level: continue
                 if index < 0: continue
                 if index >= subchunk_max: 
@@ -921,7 +750,7 @@
                     for j in range(2):
                         for k in range(2):
                             ii = ((k*2)+j)*2+i
-                            if mask[o.local_ind, ii] == 0: continue
+                            if mask[o.domain_ind, ii] == 0: continue
                             dest[local_filled + offset] = \
                                 source[index,ii]
                             local_filled += 1
@@ -961,7 +790,7 @@
                     for j in range(2):
                         for k in range(2):
                             ii = ((k*2)+j)*2+i
-                            if mask[o.local_ind, ii] == 0: continue
+                            if mask[o.domain_ind, ii] == 0: continue
                             ox = (o.pos[0] << 1) + i
                             oy = (o.pos[1] << 1) + j
                             oz = (o.pos[2] << 1) + k
@@ -1036,12 +865,23 @@
                 free(o.sd.pos)
         free(o)
 
+    def __iter__(self):
+        #Get the next oct, will traverse domains
+        #Note that oct containers can be sorted 
+        #so that consecutive octs are on the same domain
+        cdef int oi
+        cdef Oct *o
+        for oi in range(self.nocts):
+            o = self.oct_list[oi]
+            yield (o.file_ind, o.domain_ind, o.domain)
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
     def icoords(self, int domain_id,
                 np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count):
+                np.int64_t cell_count,
+                np.ndarray[np.int64_t, ndim=1] level_counts):
         #Return the integer positions of the cells
         #Limited to this domain and within the mask
         #Positions are binary; aside from the root mesh
@@ -1070,7 +910,8 @@
     @cython.cdivision(True)
     def ires(self, int domain_id,
                 np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count):
+                np.int64_t cell_count,
+                np.ndarray[np.int64_t, ndim=1] level_counts):
         #Return the 'resolution' of each cell; ie the level
         cdef np.ndarray[np.int64_t, ndim=1] res
         res = np.empty(cell_count, dtype="int64")
@@ -1090,7 +931,8 @@
     @cython.cdivision(True)
     def fcoords(self, int domain_id,
                 np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count):
+                np.int64_t cell_count,
+                np.ndarray[np.int64_t, ndim=1] level_counts):
         #Return the floating point unitary position of every cell
         cdef np.ndarray[np.float64_t, ndim=2] coords
         coords = np.empty((cell_count, 3), dtype="float64")
@@ -1141,6 +983,7 @@
         cdef int max_level = 0
         self.oct_list = <Oct**> malloc(sizeof(Oct*)*self.nocts)
         cdef np.int64_t i = 0
+        cdef np.int64_t dom_ind
         cdef ParticleArrays *c = self.first_sd
         while c != NULL:
             self.oct_list[i] = c.oct
@@ -1159,13 +1002,20 @@
         self.dom_offsets = <np.int64_t *>malloc(sizeof(np.int64_t) *
                                                 (self.max_domain + 3))
         self.dom_offsets[0] = 0
+        dom_ind = 0
         for i in range(self.nocts):
-            self.oct_list[i].local_ind = i
+            self.oct_list[i].domain_ind = i
+            self.oct_list[i].file_ind = dom_ind
+            dom_ind += 1
             if self.oct_list[i].domain > cur_dom:
                 cur_dom = self.oct_list[i].domain
                 self.dom_offsets[cur_dom + 1] = i
+                dom_ind = 0
         self.dom_offsets[cur_dom + 2] = self.nocts
 
+    cdef np.int64_t get_domain_offset(self, int domain_id):
+        return self.dom_offsets[domain_id + 1]
+
     cdef Oct* allocate_oct(self):
         #Allocate the memory, set to NULL or -1
         #We reserve space for n_ref particles, but keep
@@ -1175,8 +1025,8 @@
         cdef ParticleArrays *sd = <ParticleArrays*> \
             malloc(sizeof(ParticleArrays))
         cdef int i, j, k
-        my_oct.ind = my_oct.domain = -1
-        my_oct.local_ind = self.nocts - 1
+        my_oct.file_ind = my_oct.domain = -1
+        my_oct.domain_ind = self.nocts - 1
         my_oct.pos[0] = my_oct.pos[1] = my_oct.pos[2] = -1
         my_oct.level = -1
         my_oct.sd = sd
@@ -1227,7 +1077,7 @@
         for oi in range(ndo):
             o = self.oct_list[oi + doff]
             for i in range(8):
-                if mask[o.local_ind, i] == 0: continue
+                if mask[o.domain_ind, i] == 0: continue
                 level_count[o.level] += 1
         return level_count
 
@@ -1250,7 +1100,7 @@
                 #IND Corresponding integer index on the root octs
                 #CP Center  point of that oct
                 pp[i] = pos[p, i]
-                dds[i] = (self.DRE[i] + self.DLE[i])/self.nn[i]
+                dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
                 ind[i] = <np.int64_t> ((pp[i] - self.DLE[i])/dds[i])
                 cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
             cur = self.root_mesh[ind[0]][ind[1]][ind[2]]
@@ -1377,12 +1227,15 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def count_neighbor_particles(self, ppos):
+    def count_neighbor_particles(self, oppos):
         #How many particles are in my neighborhood
+        cdef int i, ni, dl, tnp
+        cdef np.float64_t ppos[3]
+        for i in range(3):
+            ppos[i] = oppos[i]
         cdef Oct *main = self.get(ppos)
         cdef Oct* neighbors[27]
         self.neighbors(main, neighbors)
-        cdef int i, ni, dl, tnp
         tnp = 0
         for i in range(27):
             if neighbors[i].sd != NULL:
@@ -1409,4 +1262,83 @@
                 count[o.domain] += mask[oi,i]
         return count
 
+    def domain_and(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                   int domain_id):
+        cdef np.int64_t i, oi, n, use
+        cdef Oct *o
+        cdef np.ndarray[np.uint8_t, ndim=2] m2 = \
+                np.zeros((mask.shape[0], 8), 'uint8')
+        n = mask.shape[0]
+        for oi in range(n):
+            o = self.oct_list[oi]
+            if o.domain != domain_id: continue
+            use = 0
+            for i in range(8):
+                m2[o.domain_ind, i] = mask[o.domain_ind, i]
+        return m2
 
+    def domain_mask(self,
+                    # mask is the base selector's *global* mask
+                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                    int domain_id):
+        # What distinguishes this one from domain_and is that we have a mask,
+        # which covers the whole domain, but our output will only be of a much
+        # smaller subset of octs that belong to a given domain *and* the mask.
+        # Note also that typically when something calls domain_and, they will 
+        # use a logical_any along the oct axis.  Here we don't do that.
+        # Note also that we change the shape of the returned array.
+        cdef np.int64_t i, j, k, oi, n, nm, use
+        cdef Oct *o
+        n = mask.shape[0]
+        nm = 0
+        # This could perhaps be faster if we 
+        for oi in range(n):
+            o = self.oct_list[oi]
+            if o.domain != domain_id: continue
+            use = 0
+            for i in range(8):
+                if mask[o.domain_ind, i] == 1: use = 1
+            nm += use
+        cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
+                np.zeros((2, 2, 2, nm), 'uint8')
+        nm = 0
+        for oi in range(n):
+            o = self.oct_list[oi]
+            if o.domain != domain_id: continue
+            use = 0
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        ii = ((k*2)+j)*2+i
+                        if mask[o.domain_ind, ii] == 0: continue
+                        use = m2[i, j, k, nm] = 1
+            nm += use
+        return m2.astype("bool")
+
+    def domain_ind(self,
+                    # mask is the base selector's *global* mask
+                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                    int domain_id):
+        # Here we once again do something similar to the other functions.  We
+        # need a set of indices into the final reduced, masked values.  The
+        # indices will be domain.n long, and will be of type int64.  This way,
+        # we can get the Oct through a .get() call, then use Oct.file_ind as an
+        # index into this newly created array, then finally use the returned
+        # index into the domain subset array for deposition.
+        cdef np.int64_t i, j, k, oi, noct, n, nm, use, offset
+        cdef Oct *o
+        # For particle octrees, domain 0 is special and means non-leaf nodes.
+        offset = self.dom_offsets[domain_id + 1]
+        noct = self.dom_offsets[domain_id + 2] - offset
+        cdef np.ndarray[np.int64_t, ndim=1] ind = np.zeros(noct, 'int64')
+        nm = 0
+        for oi in range(noct):
+            ind[oi] = -1
+            o = self.oct_list[oi + offset]
+            use = 0
+            for i in range(8):
+                if mask[o.domain_ind, i] == 1: use = 1
+            if use == 1:
+                ind[oi] = nm
+            nm += use
+        return ind

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/geometry/oct_geometry_handler.py
--- a/yt/geometry/oct_geometry_handler.py
+++ b/yt/geometry/oct_geometry_handler.py
@@ -54,7 +54,7 @@
         Returns (in code units) the smallest cell size in the simulation.
         """
         return (self.parameter_file.domain_width /
-                (2**self.max_level)).min()
+                (2**(self.max_level+1))).min()
 
     def convert(self, unit):
         return self.parameter_file.conversion_factors[unit]

diff -r 74c2c00d1078b5743660abeecdfb359f8266c9bd -r b8521d5e0e89939669e98c352ddc933f9d40eb89 yt/geometry/particle_deposit.pxd
--- /dev/null
+++ b/yt/geometry/particle_deposit.pxd
@@ -0,0 +1,47 @@
+"""
+Particle Deposition onto Octs
+
+Author: Christopher Moody <chris.e.moody at gmail.com>
+Affiliation: UC Santa Cruz
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+import numpy as np
+from libc.stdlib cimport malloc, free
+cimport cython
+
+from fp_utils cimport *
+from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+
+cdef extern from "alloca.h":
+    void *alloca(int)
+
+cdef inline int gind(int i, int j, int k, int dims[3]):
+    return ((k*dims[1])+j)*dims[0]+i
+
+cdef class ParticleDepositOperation:
+    # We assume each will allocate and define their own temporary storage
+    cdef np.int64_t nvals
+    cdef void process(self, int dim[3], np.float64_t left_edge[3],
+                      np.float64_t dds[3], np.int64_t offset,
+                      np.float64_t ppos[3], np.float64_t *fields)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/1c607a2db728/
Changeset:   1c607a2db728
Branch:      yt-3.0
User:        ngoldbaum
Date:        2013-05-24 21:03:16
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #32)

Implement initial spatial chunking for octrees
Affected #:  25 files

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -31,6 +31,9 @@
 from grid_patch import \
     AMRGridPatch
 
+from octree_subset import \
+    OctreeSubset
+
 from static_output import \
     StaticOutput
 

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -249,7 +249,13 @@
                 for i,chunk in enumerate(self.chunks(field, "spatial", ngz = 0)):
                     mask = self._current_chunk.objs[0].select(self.selector)
                     if mask is None: continue
-                    data = self[field][mask]
+                    data = self[field]
+                    if len(data.shape) == 4:
+                        # This is how we keep it consistent between oct ordering
+                        # and grid ordering.
+                        data = data.T[mask.T]
+                    else:
+                        data = data[mask]
                     rv[ind:ind+data.size] = data
                     ind += data.size
         else:
@@ -513,6 +519,11 @@
                         if f not in fields_to_generate:
                             fields_to_generate.append(f)
 
+    def deposit(self, positions, fields, op):
+        assert(self._current_chunk.chunk_type == "spatial")
+        fields = ensure_list(fields)
+        self.hierarchy._deposit_particle_fields(self, positions, fields, op)
+
     @contextmanager
     def _field_lock(self):
         self._locked = True

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -65,7 +65,10 @@
         e = FieldDetector(flat = True)
         e.NumberOfParticles = 1
         fields = e.requested
-        self.func(e, *args, **kwargs)
+        try:
+            self.func(e, *args, **kwargs)
+        except:
+            mylog.error("Could not preload for quantity %s, IO speed may suffer", self.__name__)
         retvals = [ [] for i in range(self.n_ret)]
         chunks = self._data_source.chunks([], chunking_style="io")
         for ds in parallel_objects(chunks, -1):

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -233,6 +233,7 @@
         if pf is None:
             # required attrs
             pf = fake_parameter_file(lambda: 1)
+            pf["Massarr"] = np.ones(6)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
                 pf.cosmological_simulation = 0.0
             pf.hubble_constant = 0.7
@@ -286,6 +287,9 @@
         self.requested.append(item)
         return defaultdict.__missing__(self, item)
 
+    def deposit(self, *args, **kwargs):
+        return np.random.random((self.nd, self.nd, self.nd))
+
     def _read_data(self, field_name):
         self.requested.append(field_name)
         FI = getattr(self.pf, "field_info", FieldInfo)

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -44,6 +44,7 @@
     NeedsProperty, \
     NeedsParameter
 from yt.geometry.selection_routines import convert_mask_to_indices
+import yt.geometry.particle_deposit as particle_deposit
 
 class AMRGridPatch(YTSelectionContainer):
     _spatial = True
@@ -474,6 +475,17 @@
         dt, t = dobj.selector.get_dt(self)
         return dt, t
 
+    def deposit(self, positions, fields = None, method = None):
+        # Here we perform our particle deposition.
+        cls = getattr(particle_deposit, "deposit_%s" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        op = cls(self.ActiveDimensions.prod()) # We allocate number of zones, not number of octs
+        op.initialize()
+        op.process_grid(self, positions, fields)
+        vals = op.finalize()
+        return vals.reshape(self.ActiveDimensions, order="F")
+
     def select(self, selector):
         if id(selector) == self._last_selector_id:
             return self._last_mask

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/data_objects/octree_subset.py
--- /dev/null
+++ b/yt/data_objects/octree_subset.py
@@ -0,0 +1,170 @@
+"""
+Subsets of octrees
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+from yt.data_objects.data_containers import \
+    YTFieldData, \
+    YTDataContainer, \
+    YTSelectionContainer
+from .field_info_container import \
+    NeedsGridType, \
+    NeedsOriginalGrid, \
+    NeedsDataField, \
+    NeedsProperty, \
+    NeedsParameter
+import yt.geometry.particle_deposit as particle_deposit
+
+class OctreeSubset(YTSelectionContainer):
+    _spatial = True
+    _num_ghost_zones = 0
+    _num_zones = 2
+    _type_name = 'octree_subset'
+    _skip_add = True
+    _con_args = ('domain', 'mask', 'cell_count')
+    _container_fields = ("dx", "dy", "dz")
+
+    def __init__(self, domain, mask, cell_count):
+        self.field_data = YTFieldData()
+        self.field_parameters = {}
+        self.mask = mask
+        self.domain = domain
+        self.pf = domain.pf
+        self.hierarchy = self.pf.hierarchy
+        self.oct_handler = domain.pf.h.oct_handler
+        self.cell_count = cell_count
+        level_counts = self.oct_handler.count_levels(
+            self.domain.pf.max_level, self.domain.domain_id, mask)
+        assert(level_counts.sum() == cell_count)
+        level_counts[1:] = level_counts[:-1]
+        level_counts[0] = 0
+        self.level_counts = np.add.accumulate(level_counts)
+        self._last_mask = None
+        self._last_selector_id = None
+        self._current_particle_type = 'all'
+        self._current_fluid_type = self.pf.default_fluid_type
+
+    def _generate_container_field(self, field):
+        if self._current_chunk is None:
+            self.hierarchy._identify_base_chunk(self)
+        if field == "dx":
+            return self._current_chunk.fwidth[:,0]
+        elif field == "dy":
+            return self._current_chunk.fwidth[:,1]
+        elif field == "dz":
+            return self._current_chunk.fwidth[:,2]
+
+    def select_icoords(self, dobj):
+        return self.oct_handler.icoords(self.domain.domain_id, self.mask,
+                                        self.cell_count,
+                                        self.level_counts.copy())
+
+    def select_fcoords(self, dobj):
+        return self.oct_handler.fcoords(self.domain.domain_id, self.mask,
+                                        self.cell_count,
+                                        self.level_counts.copy())
+
+    def select_fwidth(self, dobj):
+        # Recall domain_dimensions is the number of cells, not octs
+        base_dx = (self.domain.pf.domain_width /
+                   self.domain.pf.domain_dimensions)
+        widths = np.empty((self.cell_count, 3), dtype="float64")
+        dds = (2**self.select_ires(dobj))
+        for i in range(3):
+            widths[:,i] = base_dx[i] / dds
+        return widths
+
+    def select_ires(self, dobj):
+        return self.oct_handler.ires(self.domain.domain_id, self.mask,
+                                     self.cell_count,
+                                     self.level_counts.copy())
+
+    def __getitem__(self, key):
+        tr = super(OctreeSubset, self).__getitem__(key)
+        try:
+            fields = self._determine_fields(key)
+        except YTFieldTypeNotFound:
+            return tr
+        finfo = self.pf._get_field_info(*fields[0])
+        if not finfo.particle_type:
+            # We may need to reshape the field, if it is being queried from
+            # field_data.  If it's already cached, it just passes through.
+            if len(tr.shape) < 4:
+                tr = self._reshape_vals(tr)
+            return tr
+        return tr
+
+    def _reshape_vals(self, arr):
+        nz = self._num_zones + 2*self._num_ghost_zones
+        n_oct = arr.shape[0] / (nz**3.0)
+        arr = arr.reshape((nz, nz, nz, n_oct), order="F")
+        return arr
+
+    _domain_ind = None
+
+    @property
+    def domain_ind(self):
+        if self._domain_ind is None:
+            di = self.oct_handler.domain_ind(self.mask, self.domain.domain_id)
+            self._domain_ind = di
+        return self._domain_ind
+
+    def deposit(self, positions, fields = None, method = None):
+        # Here we perform our particle deposition.
+        cls = getattr(particle_deposit, "deposit_%s" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        nvals = (self.domain_ind >= 0).sum() * 8
+        op = cls(nvals) # We allocate number of zones, not number of octs
+        op.initialize()
+        op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
+                          self.domain.domain_id)
+        vals = op.finalize()
+        return self._reshape_vals(vals)
+
+    def select(self, selector):
+        if id(selector) == self._last_selector_id:
+            return self._last_mask
+        self._last_mask = self.oct_handler.domain_mask(
+                self.mask, self.domain.domain_id)
+        if self._last_mask.sum() == 0: return None
+        self._last_selector_id = id(selector)
+        return self._last_mask
+
+    def count(self, selector):
+        if id(selector) == self._last_selector_id:
+            if self._last_mask is None: return 0
+            return self._last_mask.sum()
+        self.select(selector)
+        return self.count(selector)
+
+    def count_particles(self, selector, x, y, z):
+        # We don't cache the selector results
+        count = selector.count_points(x,y,z)
+        return count
+
+    def select_particles(self, selector, x, y, z):
+        mask = selector.select_points(x,y,z)
+        return mask

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -59,6 +59,7 @@
     particle_types = ("all",)
     geometry = "cartesian"
     coordinates = None
+    max_level = 99
 
     class __metaclass__(type):
         def __init__(cls, name, b, d):

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -97,7 +97,7 @@
           display_field = False)
 
 def _Ones(field, data):
-    return np.ones(data.shape, dtype='float64')
+    return np.ones(data.ires.size, dtype='float64')
 add_field("Ones", function=_Ones,
           projection_conversion="unitary",
           display_field = False)

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -40,6 +40,8 @@
     GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
     StaticOutput
+from yt.data_objects.octree_subset import \
+    OctreeSubset
 from yt.geometry.oct_container import \
     ARTOctreeContainer
 from yt.data_objects.field_info_container import \
@@ -171,8 +173,16 @@
         # as well as the referring data source
         yield YTDataChunk(dobj, "all", oobjs, dobj.size)
 
-    def _chunk_spatial(self, dobj, ngz):
-        raise NotImplementedError
+    def _chunk_spatial(self, dobj, ngz, sort = None):
+        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            size = og.cell_count
+            if size == 0: continue
+            yield YTDataChunk(dobj, "spatial", [g], size)
 
     def _chunk_io(self, dobj):
         """
@@ -314,7 +324,8 @@
         self.conversion_factors = cf
 
         for ax in 'xyz':
-            self.conversion_factors["%s-velocity" % ax] = 1.0
+            self.conversion_factors["%s-velocity" % ax] = cf["Velocity"]
+            self.conversion_factors["particle_velocity_%s" % ax] = cf["Velocity"]
         for pt in particle_fields:
             if pt not in self.conversion_factors.keys():
                 self.conversion_factors[pt] = 1.0
@@ -433,43 +444,10 @@
                 return False
         return False
 
-
-class ARTDomainSubset(object):
+class ARTDomainSubset(OctreeSubset):
     def __init__(self, domain, mask, cell_count, domain_level):
-        self.mask = mask
-        self.domain = domain
-        self.oct_handler = domain.pf.h.oct_handler
-        self.cell_count = cell_count
+        super(ARTDomainSubset, self).__init__(domain, mask, cell_count)
         self.domain_level = domain_level
-        level_counts = self.oct_handler.count_levels(
-            self.domain.pf.max_level, self.domain.domain_id, mask)
-        assert(level_counts.sum() == cell_count)
-        level_counts[1:] = level_counts[:-1]
-        level_counts[0] = 0
-        self.level_counts = np.add.accumulate(level_counts)
-
-    def select_icoords(self, dobj):
-        return self.oct_handler.icoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_fcoords(self, dobj):
-        return self.oct_handler.fcoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_ires(self, dobj):
-        return self.oct_handler.ires(self.domain.domain_id, self.mask,
-                                     self.cell_count,
-                                     self.level_counts.copy())
-
-    def select_fwidth(self, dobj):
-        base_dx = 1.0/self.domain.pf.domain_dimensions
-        widths = np.empty((self.cell_count, 3), dtype="float64")
-        dds = (2**self.select_ires(dobj))
-        for i in range(3):
-            widths[:, i] = base_dx[i] / dds
-        return widths
 
     def fill_root(self, content, ftfields):
         """

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -49,19 +49,6 @@
     add_art_field(f, function=NullFunc, take_log=True,
                   validators=[ValidateDataField(f)])
 
-for f in particle_fields:
-    add_art_field(f, function=NullFunc, take_log=True,
-                  validators=[ValidateDataField(f)],
-                  particle_type=True)
-add_art_field("particle_mass", function=NullFunc, take_log=True,
-              validators=[ValidateDataField(f)],
-              particle_type=True,
-              convert_function=lambda x: x.convert("particle_mass"))
-add_art_field("particle_mass_initial", function=NullFunc, take_log=True,
-              validators=[ValidateDataField(f)],
-              particle_type=True,
-              convert_function=lambda x: x.convert("particle_mass"))
-
 def _convertDensity(data):
     return data.convert("Density")
 KnownARTFields["Density"]._units = r"\rm{g}/\rm{cm}^3"
@@ -213,6 +200,24 @@
 ARTFieldInfo["Metal_Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
 
 # Particle fields
+for f in particle_fields:
+    add_art_field(f, function=NullFunc, take_log=True,
+                  validators=[ValidateDataField(f)],
+                  particle_type=True)
+for ax in "xyz":
+    add_art_field("particle_velocity_%s" % ax, function=NullFunc, take_log=True,
+                  validators=[ValidateDataField(f)],
+                  particle_type=True,
+                  convert_function=lambda x: x.convert("particle_velocity_%s" % ax))
+add_art_field("particle_mass", function=NullFunc, take_log=True,
+              validators=[ValidateDataField(f)],
+              particle_type=True,
+              convert_function=lambda x: x.convert("particle_mass"))
+add_art_field("particle_mass_initial", function=NullFunc, take_log=True,
+              validators=[ValidateDataField(f)],
+              particle_type=True,
+              convert_function=lambda x: x.convert("particle_mass"))
+
 def _particle_age(field, data):
     tr = data["particle_creation_time"]
     return data.pf.current_time - tr

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -140,7 +140,7 @@
                     temp[-nstars:] = data
                     tr[field] = temp
                     del data
-                tr[field] = tr[field][mask]
+                tr[field] = tr[field][mask].astype('f8')
                 ftype_old = ftype
                 fields_read.append(field)
         if tr == {}:
@@ -330,32 +330,57 @@
     f.seek(pos)
     return unitary_center, fl, iocts, nLevel, root_level
 
+def get_ranges(skip, count, field, words=6, real_size=4, np_per_page=4096**2, 
+                  num_pages=1):
+    #translate every particle index into a file position ranges
+    ranges = []
+    arr_size = np_per_page * real_size
+    page_size = words * np_per_page * real_size
+    idxa, idxb = 0, 0
+    posa, posb = 0, 0
+    left = count
+    for page in range(num_pages):
+        idxb += np_per_page
+        for i, fname in enumerate(['x', 'y', 'z', 'vx', 'vy', 'vz']):
+            posb += arr_size
+            if i == field or fname == field:
+                if skip < np_per_page and count > 0:
+                    left_in_page = np_per_page - skip
+                    this_count = min(left_in_page, count)
+                    count -= this_count
+                    start = posa + skip * real_size
+                    end = posa + this_count * real_size
+                    ranges.append((start, this_count))
+                    skip = 0
+                    assert end <= posb
+                else:
+                    skip -= np_per_page
+            posa += arr_size
+        idxa += np_per_page
+    assert count == 0
+    return ranges
 
-def read_particles(file, Nrow, idxa=None, idxb=None, field=None):
+
+def read_particles(file, Nrow, idxa, idxb, field):
     words = 6  # words (reals) per particle: x,y,z,vx,vy,vz
     real_size = 4  # for file_particle_data; not always true?
-    np_per_page = Nrow**2  # defined in ART a_setup.h
+    np_per_page = Nrow**2  # defined in ART a_setup.h, # of particles/page
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
     data = np.array([], 'f4')
     fh = open(file, 'r')
-    totalp = idxb-idxa
-    left = totalp
-    for page in range(num_pages):
-        for i, fname in enumerate(['x', 'y', 'z', 'vx', 'vy', 'vz']):
-            if i == field or fname == field:
-                if idxa is not None:
-                    fh.seek(real_size*idxa, 1)
-                    count = min(np_per_page, left)
-                    temp = np.fromfile(fh, count=count, dtype='>f4')
-                    pageleft = np_per_page-count-idxa
-                    fh.seek(real_size*pageleft, 1)
-                    left -= count
-                else:
-                    count = np_per_page
-                    temp = np.fromfile(fh, count=count, dtype='>f4')
-                data = np.concatenate((data, temp))
-            else:
-                fh.seek(4*np_per_page, 1)
+    skip, count = idxa, idxb - idxa
+    kwargs = dict(words=words, real_size=real_size, 
+                  np_per_page=np_per_page, num_pages=num_pages)
+    ranges = get_ranges(skip, count, field, **kwargs)
+    data = None
+    for seek, this_count in ranges:
+        fh.seek(seek)
+        temp = np.fromfile(fh, count=this_count, dtype='>f4')
+        if data is None:
+            data = temp
+        else:
+            data = np.concatenate((data, temp))
+    fh.close()
     return data
 
 

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -35,6 +35,8 @@
     GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
     StaticOutput
+from yt.data_objects.octree_subset import \
+    OctreeSubset
 
 from .definitions import ramses_header
 from yt.utilities.definitions import \
@@ -252,43 +254,7 @@
         self.select(selector)
         return self.count(selector)
 
-class RAMSESDomainSubset(object):
-    def __init__(self, domain, mask, cell_count):
-        self.mask = mask
-        self.domain = domain
-        self.oct_handler = domain.pf.h.oct_handler
-        self.cell_count = cell_count
-        level_counts = self.oct_handler.count_levels(
-            self.domain.pf.max_level, self.domain.domain_id, mask)
-        assert(level_counts.sum() == cell_count)
-        level_counts[1:] = level_counts[:-1]
-        level_counts[0] = 0
-        self.level_counts = np.add.accumulate(level_counts)
-
-    def select_icoords(self, dobj):
-        return self.oct_handler.icoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_fcoords(self, dobj):
-        return self.oct_handler.fcoords(self.domain.domain_id, self.mask,
-                                        self.cell_count,
-                                        self.level_counts.copy())
-
-    def select_fwidth(self, dobj):
-        # Recall domain_dimensions is the number of cells, not octs
-        base_dx = (self.domain.pf.domain_width /
-                   self.domain.pf.domain_dimensions)
-        widths = np.empty((self.cell_count, 3), dtype="float64")
-        dds = (2**self.select_ires(dobj))
-        for i in range(3):
-            widths[:,i] = base_dx[i] / dds
-        return widths
-
-    def select_ires(self, dobj):
-        return self.oct_handler.ires(self.domain.domain_id, self.mask,
-                                     self.cell_count,
-                                     self.level_counts.copy())
+class RAMSESDomainSubset(OctreeSubset):
 
     def fill(self, content, fields):
         # Here we get a copy of the file, which we skip through and read the
@@ -389,8 +355,16 @@
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         yield YTDataChunk(dobj, "all", oobjs, dobj.size)
 
-    def _chunk_spatial(self, dobj, ngz):
-        raise NotImplementedError
+    def _chunk_spatial(self, dobj, ngz, sort = None):
+        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            size = og.cell_count
+            if size == 0: continue
+            yield YTDataChunk(dobj, "spatial", [g], size)
 
     def _chunk_io(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -40,6 +40,8 @@
     GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
     StaticOutput
+from yt.data_objects.octree_subset import \
+    OctreeSubset
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 from .fields import \
@@ -70,40 +72,8 @@
     def _calculate_offsets(self, fields):
         pass
 
-class ParticleDomainSubset(object):
-    def __init__(self, domain, mask, count):
-        self.domain = domain
-        self.mask = mask
-        self.cell_count = count
-        self.oct_handler = domain.pf.h.oct_handler
-        level_counts = self.oct_handler.count_levels(
-            99, self.domain.domain_id, mask)
-        level_counts[1:] = level_counts[:-1]
-        level_counts[0] = 0
-        self.level_counts = np.add.accumulate(level_counts)
-
-    def select_icoords(self, dobj):
-        return self.oct_handler.icoords(self.domain.domain_id, self.mask,
-                                        self.cell_count)
-
-    def select_fcoords(self, dobj):
-        return self.oct_handler.fcoords(self.domain.domain_id, self.mask,
-                                        self.cell_count)
-
-    def select_fwidth(self, dobj):
-        # Recall domain_dimensions is the number of cells, not octs
-        base_dx = (self.domain.pf.domain_width /
-                   self.domain.pf.domain_dimensions)
-        widths = np.empty((self.cell_count, 3), dtype="float64")
-        dds = (2**self.ires(dobj))
-        for i in range(3):
-            widths[:,i] = base_dx[i] / dds
-        return widths
-
-    def select_ires(self, dobj):
-        return self.oct_handler.ires(self.domain.domain_id, self.mask,
-                                     self.cell_count)
-
+class ParticleDomainSubset(OctreeSubset):
+    pass
 
 class ParticleGeometryHandler(OctreeGeometryHandler):
 
@@ -126,7 +96,7 @@
         total_particles = sum(sum(d.total_particles.values())
                               for d in self.domains)
         self.oct_handler = ParticleOctreeContainer(
-            self.parameter_file.domain_dimensions,
+            self.parameter_file.domain_dimensions/2,
             self.parameter_file.domain_left_edge,
             self.parameter_file.domain_right_edge)
         self.oct_handler.n_ref = 64
@@ -170,8 +140,16 @@
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         yield YTDataChunk(dobj, "all", oobjs, dobj.size)
 
-    def _chunk_spatial(self, dobj, ngz):
-        raise NotImplementedError
+    def _chunk_spatial(self, dobj, ngz, sort = None):
+        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            size = og.cell_count
+            if size == 0: continue
+            yield YTDataChunk(dobj, "spatial", [g], size)
 
     def _chunk_io(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -216,6 +194,7 @@
         self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
         self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
         self.cosmological_simulation = 1
+        self.periodicity = (True, True, True)
         self.current_redshift = hvals["Redshift"]
         self.omega_lambda = hvals["OmegaLambda"]
         self.omega_matter = hvals["Omega0"]
@@ -317,6 +296,7 @@
         self.domain_left_edge = np.zeros(3, "float64")
         self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
         self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
+        self.periodicity = (True, True, True)
 
         self.cosmological_simulation = 1
 
@@ -371,10 +351,27 @@
                     ('dummy',   'i'))
 
     def __init__(self, filename, data_style="tipsy",
-                 root_dimensions = 64):
+                 root_dimensions = 64, endian = ">",
+                 field_dtypes = None,
+                 domain_left_edge = None,
+                 domain_right_edge = None):
+        self.endian = endian
         self._root_dimensions = root_dimensions
         # Set up the template for domain files
         self.storage_filename = None
+        if domain_left_edge is None:
+            domain_left_edge = np.zeros(3, "float64") - 0.5
+        if domain_right_edge is None:
+            domain_right_edge = np.zeros(3, "float64") + 0.5
+
+        self.domain_left_edge = np.array(domain_left_edge, dtype="float64")
+        self.domain_right_edge = np.array(domain_right_edge, dtype="float64")
+
+        # My understanding is that dtypes are set on a field by field basis,
+        # not on a (particle type, field) basis
+        if field_dtypes is None: field_dtypes = {}
+        self._field_dtypes = field_dtypes
+
         super(TipsyStaticOutput, self).__init__(filename, data_style)
 
     def __repr__(self):
@@ -393,7 +390,7 @@
         # in the GADGET-2 user guide.
 
         f = open(self.parameter_filename, "rb")
-        hh = ">" + "".join(["%s" % (b) for a,b in self._header_spec])
+        hh = self.endian + "".join(["%s" % (b) for a,b in self._header_spec])
         hvals = dict([(a, c) for (a, b), c in zip(self._header_spec,
                      struct.unpack(hh, f.read(struct.calcsize(hh))))])
         self._header_offset = f.tell()
@@ -408,9 +405,11 @@
         # This may not be correct.
         self.current_time = hvals["time"]
 
-        self.domain_left_edge = np.zeros(3, "float64") - 0.5
-        self.domain_right_edge = np.ones(3, "float64") + 0.5
+        # NOTE: These are now set in the main initializer.
+        #self.domain_left_edge = np.zeros(3, "float64") - 0.5
+        #self.domain_right_edge = np.ones(3, "float64") + 0.5
         self.domain_dimensions = np.ones(3, "int32") * self._root_dimensions
+        self.periodicity = (True, True, True)
 
         self.cosmological_simulation = 1
 

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -372,6 +372,7 @@
         return rv
 
     def _initialize_octree(self, domain, octree):
+        pf = domain.pf
         with open(domain.domain_filename, "rb") as f:
             f.seek(domain.pf._header_offset)
             for ptype in self._ptypes:
@@ -391,6 +392,11 @@
                             pos[:,1].min(), pos[:,1].max())
                 mylog.debug("Spanning: %0.3e .. %0.3e in z",
                             pos[:,2].min(), pos[:,2].max())
+                if np.any(pos.min(axis=0) < pf.domain_left_edge) or \
+                   np.any(pos.max(axis=0) > pf.domain_right_edge):
+                    raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
+                                           pf.domain_left_edge,
+                                           pf.domain_right_edge)
                 del pp
                 octree.add(pos, domain.domain_id)
 
@@ -412,10 +418,12 @@
         for ptype, field in self._fields:
             pfields = []
             if tp[ptype] == 0: continue
+            dtbase = domain.pf._field_dtypes.get(field, 'f')
+            ff = "%s%s" % (domain.pf.endian, dtbase)
             if field in _vector_fields:
-                dt = (field, [('x', '>f'), ('y', '>f'), ('z', '>f')])
+                dt = (field, [('x', ff), ('y', ff), ('z', ff)])
             else:
-                dt = (field, '>f')
+                dt = (field, ff)
             pds.setdefault(ptype, []).append(dt)
             field_list.append((ptype, field))
         for ptype in pds:

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/frontends/sph/smoothing_kernel.pyx
--- a/yt/frontends/sph/smoothing_kernel.pyx
+++ b/yt/frontends/sph/smoothing_kernel.pyx
@@ -53,21 +53,28 @@
     for p in range(ngas):
         kernel_sum[p] = 0.0
         skip = 0
+        # Find the # of cells of the kernel
         for i in range(3):
             pos[i] = ppos[p, i]
+            # Get particle root grid integer index
             ind[i] = <int>((pos[i] - left_edge[i]) / dds[i])
+            # How many root grid cells does the smoothing length span + 1
             half_len = <int>(hsml[p]/dds[i]) + 1
+            # Left and right integer indices of the smoothing range
+            # If smoothing len is small could be inside the same bin
             ib0[i] = ind[i] - half_len
             ib1[i] = ind[i] + half_len
             #pos[i] = ppos[p, i] - left_edge[i]
             #ind[i] = <int>(pos[i] / dds[i])
             #ib0[i] = <int>((pos[i] - hsml[i]) / dds[i]) - 1
             #ib1[i] = <int>((pos[i] + hsml[i]) / dds[i]) + 1
+            # Skip if outside out root grid
             if ib0[i] >= dims[i] or ib1[i] < 0:
                 skip = 1
             ib0[i] = iclip(ib0[i], 0, dims[i] - 1)
             ib1[i] = iclip(ib1[i], 0, dims[i] - 1)
         if skip == 1: continue
+        # Having found the kernel shape, calculate the kernel weight
         for i from ib0[0] <= i <= ib1[0]:
             idist[0] = (ind[0] - i) * (ind[0] - i) * sdds[0]
             for j from ib0[1] <= j <= ib1[1]:
@@ -75,10 +82,14 @@
                 for k from ib0[2] <= k <= ib1[2]:
                     idist[2] = (ind[2] - k) * (ind[2] - k) * sdds[2]
                     dist = idist[0] + idist[1] + idist[2]
+                    # Calculate distance in multiples of the smoothing length
                     dist = sqrt(dist) / hsml[p]
+                    # Kernel is 3D but save the elements in a 1D array
                     gi = ((i * dims[1] + j) * dims[2]) + k
                     pdist[gi] = sph_kernel(dist)
+                    # Save sum to normalize later
                     kernel_sum[p] += pdist[gi]
+        # Having found the kernel, deposit accordingly into gdata
         for i from ib0[0] <= i <= ib1[0]:
             for j from ib0[1] <= j <= ib1[1]:
                 for k from ib0[2] <= k <= ib1[2]:

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/geometry/fake_octree.pyx
--- /dev/null
+++ b/yt/geometry/fake_octree.pyx
@@ -0,0 +1,90 @@
+"""
+Make a fake octree, deposit particle at every leaf
+
+Author: Christopher Moody <chris.e.moody at gmail.com>
+Affiliation: UC Santa Cruz
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from libc.stdlib cimport malloc, free, rand, RAND_MAX
+cimport numpy as np
+import numpy as np
+cimport cython
+
+from oct_container cimport Oct, RAMSESOctreeContainer
+
+# Create a balanced octree by a random walk that recursively
+# subdivides
+def create_fake_octree(RAMSESOctreeContainer oct_handler,
+                       long max_noct,
+                       long max_level,
+                       np.ndarray[np.int32_t, ndim=1] ndd,
+                       np.ndarray[np.float64_t, ndim=1] dle,
+                       np.ndarray[np.float64_t, ndim=1] dre,
+                       float fsubdivide):
+    cdef int[3] dd #hold the octant index
+    cdef int[3] ind #hold the octant index
+    cdef long i
+    cdef long cur_leaf = 0
+    cdef np.ndarray[np.uint8_t, ndim=2] mask
+    for i in range(3):
+        ind[i] = 0
+        dd[i] = ndd[i]
+    oct_handler.allocate_domains([max_noct])
+    parent = oct_handler.next_root(1, ind)
+    parent.domain = 1
+    cur_leaf = 8 #we've added one parent...
+    mask = np.ones((max_noct,8),dtype='uint8')
+    while oct_handler.domains[0].n_assigned < max_noct:
+        print "root: nocts ", oct_handler.domains[0].n_assigned
+        cur_leaf = subdivide(oct_handler, parent, ind, dd, cur_leaf, 0,
+                             max_noct, max_level, fsubdivide, mask)
+    return cur_leaf
+                             
+
+cdef long subdivide(RAMSESOctreeContainer oct_handler, 
+                    Oct *parent,
+                    int ind[3], int dd[3], 
+                    long cur_leaf, long cur_level, 
+                    long max_noct, long max_level, float fsubdivide,
+                    np.ndarray[np.uint8_t, ndim=2] mask):
+    print "child", parent.file_ind, ind[0], ind[1], ind[2], cur_leaf, cur_level
+    cdef int ddr[3]
+    cdef long i,j,k
+    cdef float rf #random float from 0-1
+    if cur_level >= max_level: 
+        return cur_leaf
+    if oct_handler.domains[0].n_assigned >= max_noct:
+        return cur_leaf
+    for i in range(3):
+        ind[i] = <int> ((rand() * 1.0 / RAND_MAX) * dd[i])
+        ddr[i] = 2
+    rf = rand() * 1.0 / RAND_MAX
+    if rf > fsubdivide:
+        if parent.children[ind[0]][ind[1]][ind[2]] == NULL:
+            cur_leaf += 7 
+        oct = oct_handler.next_child(1, ind, parent)
+        oct.domain = 1
+        cur_leaf = subdivide(oct_handler, oct, ind, ddr, cur_leaf, 
+                             cur_level + 1, max_noct, max_level, 
+                             fsubdivide, mask)
+    return cur_leaf

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -30,8 +30,12 @@
 
 cdef struct Oct
 cdef struct Oct:
-    np.int64_t ind          # index
-    np.int64_t local_ind
+    np.int64_t file_ind     # index with respect to the order in which it was
+                            # added
+    np.int64_t domain_ind   # index within the global set of domains
+                            # note that moving to a local index will require
+                            # moving to split-up masks, which is part of a
+                            # bigger refactor
     np.int64_t domain       # (opt) addl int index
     np.int64_t pos[3]       # position in ints
     np.int8_t level
@@ -39,6 +43,10 @@
     Oct *children[2][2][2]
     Oct *parent
 
+cdef struct OctInfo:
+    np.float64_t left_edge[3]
+    np.float64_t dds[3]
+
 cdef struct OctAllocationContainer
 cdef struct OctAllocationContainer:
     np.int64_t n
@@ -54,16 +62,12 @@
     cdef np.float64_t DLE[3], DRE[3]
     cdef public int nocts
     cdef public int max_domain
-    cdef Oct* get(self, ppos)
+    cdef Oct* get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
     cdef void neighbors(self, Oct *, Oct **)
     cdef void oct_bounds(self, Oct *, np.float64_t *, np.float64_t *)
-
-cdef class ARTIOOctreeContainer(OctreeContainer):
-    cdef OctAllocationContainer **domains
-    cdef Oct *get_root_oct(self, np.float64_t ppos[3])
-    cdef Oct *next_free_oct( self, int curdom )
-    cdef int valid_domain_oct(self, int curdom, Oct *parent)
-    cdef Oct *add_oct(self, int curdom, Oct *parent, int curlevel, double pp[3])
+    # This function must return the offset from global-to-local domains; i.e.,
+    # OctAllocationContainer.offset if such a thing exists.
+    cdef np.int64_t get_domain_offset(self, int domain_id)
 
 cdef class RAMSESOctreeContainer(OctreeContainer):
     cdef OctAllocationContainer **domains

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -56,8 +56,8 @@
     for n in range(n_octs):
         oct = &n_cont.my_octs[n]
         oct.parent = NULL
-        oct.ind = oct.domain = -1
-        oct.local_ind = n + n_cont.offset
+        oct.file_ind = oct.domain = -1
+        oct.domain_ind = n + n_cont.offset
         oct.level = -1
         for i in range(2):
             for j in range(2):
@@ -130,7 +130,7 @@
         while cur != NULL:
             for i in range(cur.n_assigned):
                 this = &cur.my_octs[i]
-                yield (this.ind, this.local_ind, this.domain)
+                yield (this.file_ind, this.domain_ind, this.domain)
             cur = cur.next
 
     cdef void oct_bounds(self, Oct *o, np.float64_t *corner, np.float64_t *size):
@@ -139,10 +139,13 @@
             size[i] = (self.DRE[i] - self.DLE[i]) / (self.nn[i] << o.level)
             corner[i] = o.pos[i] * size[i] + self.DLE[i]
 
+    cdef np.int64_t get_domain_offset(self, int domain_id):
+        return 0
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef Oct *get(self, ppos):
+    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL):
         #Given a floating point position, retrieve the most
         #refined oct at that time
         cdef np.int64_t ind[3]
@@ -150,21 +153,34 @@
         cdef Oct *cur
         cdef int i
         for i in range(3):
-            pp[i] = ppos[i] - self.DLE[i]
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
-            ind[i] = <np.int64_t> ((pp[i] - self.DLE[i])/dds[i])
-            cp[i] = (ind[i] + 0.5) * dds[i]
-        cur = self.root_mesh[ind[0]][ind[1]][ind[2]]
-        while cur.children[0][0][0] != NULL:
+            ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
+            cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
+        next = self.root_mesh[ind[0]][ind[1]][ind[2]]
+        # We want to stop recursing when there's nowhere else to go
+        while next != NULL:
+            cur = next
             for i in range(3):
                 dds[i] = dds[i] / 2.0
-                if cp[i] > pp[i]:
+                if cp[i] > ppos[i]:
                     ind[i] = 0
                     cp[i] -= dds[i] / 2.0
                 else:
                     ind[i] = 1
                     cp[i] += dds[i]/2.0
-            cur = cur.children[ind[0]][ind[1]][ind[2]]
+            next = cur.children[ind[0]][ind[1]][ind[2]]
+        if oinfo == NULL: return cur
+        for i in range(3):
+            # This will happen *after* we quit out, so we need to back out the
+            # last change to cp
+            if ind[i] == 1:
+                cp[i] -= dds[i]/2.0 # Now centered
+            else:
+                cp[i] += dds[i]/2.0
+            # We don't need to change dds[i] as it has been halved from the
+            # oct width, thus making it already the cell width
+            oinfo.dds[i] = dds[i] # Cell width
+            oinfo.left_edge[i] = cp[i] - dds[i] # Center minus dds
         return cur
 
     @cython.boundscheck(False)
@@ -186,7 +202,40 @@
                 cur = cur.next
             o = &cur.my_octs[oi - cur.offset]
             for i in range(8):
-                count[o.domain - 1] += mask[o.local_ind,i]
+                count[o.domain - 1] += mask[o.domain_ind,i]
+        return count
+
+    @cython.boundscheck(True)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def count_leaves(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
+        # Modified to work when not all octs are assigned
+        cdef int i, j, k, ii
+        cdef np.int64_t oi
+        # pos here is CELL center, not OCT center.
+        cdef np.float64_t pos[3]
+        cdef int n = mask.shape[0]
+        cdef np.ndarray[np.int64_t, ndim=1] count
+        count = np.zeros(self.max_domain, 'int64')
+        # 
+        cur = self.cont
+        for oi in range(n):
+            if oi - cur.offset >= cur.n_assigned:
+                cur = cur.next
+                if cur == NULL:
+                    break
+            o = &cur.my_octs[oi - cur.offset]
+            # skip if unassigned
+            if o == NULL:
+                continue
+            if o.domain == -1: 
+                continue
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        if o.children[i][j][k] == NULL:
+                            ii = ((k*2)+j)*2+i
+                            count[o.domain - 1] += mask[o.domain_ind,ii]
         return count
 
     @cython.boundscheck(False)
@@ -260,14 +309,17 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def get_neighbor_boundaries(self, ppos):
+    def get_neighbor_boundaries(self, oppos):
+        cdef int i, ii
+        cdef np.float64_t ppos[3]
+        for i in range(3):
+            ppos[i] = oppos[i]
         cdef Oct *main = self.get(ppos)
         cdef Oct* neighbors[27]
         self.neighbors(main, neighbors)
         cdef np.ndarray[np.float64_t, ndim=2] bounds
         cdef np.float64_t corner[3], size[3]
         bounds = np.zeros((27,6), dtype="float64")
-        cdef int i, ii
         tnp = 0
         for i in range(27):
             self.oct_bounds(neighbors[i], corner, size)
@@ -276,330 +328,11 @@
                 bounds[i, 3+ii] = size[ii]
         return bounds
 
-cdef class ARTIOOctreeContainer(OctreeContainer):
+cdef class RAMSESOctreeContainer(OctreeContainer):
 
-    def allocate_domains(self, domain_counts):
-        cdef int count, i
-        cdef OctAllocationContainer *cur = self.cont
-        assert(cur == NULL)
-        self.max_domain = len(domain_counts) # 1-indexed
-        self.domains = <OctAllocationContainer **> malloc(
-            sizeof(OctAllocationContainer *) * len(domain_counts))
-        for i, count in enumerate(domain_counts):
-            cur = allocate_octs(count, cur)
-            if self.cont == NULL: self.cont = cur
-            self.domains[i] = cur
-        
-    def __dealloc__(self):
-        # This gets called BEFORE the superclass deallocation.  But, both get
-        # called.
-        if self.domains != NULL: free(self.domains)
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def count(self, np.ndarray[np.uint8_t, ndim=1, cast=True] mask,
-                     split = False):
-        cdef int n = mask.shape[0]
-        cdef int i, dom
-        cdef OctAllocationContainer *cur
-        cdef np.ndarray[np.int64_t, ndim=1] count
-        count = np.zeros(self.max_domain, 'int64')
-        # This is the idiom for iterating over many containers.
-        cur = self.cont
-        for i in range(n):
-            if i - cur.offset >= cur.n: cur = cur.next
-            if mask[i] == 1:
-                count[cur.my_octs[i - cur.offset].domain - 1] += 1
-        return count
-
-    def check(self, int curdom):
-        cdef int dind, pi
-        cdef Oct oct
-        cdef OctAllocationContainer *cont = self.domains[curdom - 1]
-        cdef int nbad = 0
-        for pi in range(cont.n_assigned):
-            oct = cont.my_octs[pi]
-            for i in range(2):
-                for j in range(2):
-                    for k in range(2):
-                        if oct.children[i][j][k] != NULL and \
-                           oct.children[i][j][k].level != oct.level + 1:
-                            if curdom == 61:
-                                print pi, oct.children[i][j][k].level,
-                                print oct.level
-                            nbad += 1
-        print "DOMAIN % 3i HAS % 9i BAD OCTS (%s / %s / %s)" % (curdom, nbad, 
-            cont.n - cont.n_assigned, cont.n_assigned, cont.n)
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef Oct *next_free_oct( self, int curdom ) :
-        cdef OctAllocationContainer *cont
-        cdef Oct *next_oct
-
-        if curdom < 1 or curdom > self.max_domain or self.domains == NULL  :
-            print "Error, invalid domain or unallocated domains"
-            raise RuntimeError
-        
-        cont = self.domains[curdom - 1]
-        if cont.n_assigned >= cont.n :
-            print "Error, ran out of octs in domain curdom"
-            raise RuntimeError
-
-        self.nocts += 1
-        next_oct = &cont.my_octs[cont.n_assigned]
-        cont.n_assigned += 1
-        return next_oct
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    cdef int valid_domain_oct(self, int curdom, Oct *parent) :
-        cdef OctAllocationContainer *cont
-
-        if curdom < 1 or curdom > self.max_domain or self.domains == NULL  :
-            raise RuntimeError
-        cont = self.domains[curdom - 1]
-
-        if parent == NULL or parent < &cont.my_octs[0] or \
-                parent > &cont.my_octs[cont.n_assigned] :
-            return 0
-        else :
-            return 1
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef Oct *get_root_oct(self, np.float64_t ppos[3]):
-        cdef np.int64_t ind[3]
-        cdef np.float64_t dds
-        cdef int i
-        for i in range(3):
-            dds = (self.DRE[i] - self.DLE[i])/self.nn[i]
-            ind[i] = <np.int64_t> floor((ppos[i]-self.DLE[i])/dds)
-        return self.root_mesh[ind[0]][ind[1]][ind[2]]
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef Oct *add_oct(self, int curdom, Oct *parent, 
-                    int curlevel, np.float64_t pp[3]):
-
-        cdef int level, i, ind[3]
-        cdef Oct *cur, *next_oct
-        cdef np.int64_t pos[3]
-        cdef np.float64_t dds
-
-        if curlevel < 0 :
-            raise RuntimeError
-        for i in range(3):
-            if pp[i] < self.DLE[i] or pp[i] > self.DRE[i] :
-                raise RuntimeError
-            dds = (self.DRE[i] - self.DLE[i])/(<np.int64_t>self.nn[i])
-            pos[i] = <np.int64_t> floor((pp[i]-self.DLE[i])*<np.float64_t>(1<<curlevel)/dds)
-
-        if curlevel == 0 :
-            cur = NULL
-        elif parent == NULL :
-            cur = self.get_root_oct(pp)
-            assert( cur != NULL )
-
-            # Now we find the location we want
-            for level in range(1,curlevel):
-                # At every level, find the cell this oct lives inside
-                for i in range(3) :
-                    if pos[i] < (2*cur.pos[i]+1)<<(curlevel-level) :
-                        ind[i] = 0
-                    else :
-                        ind[i] = 1
-                cur = cur.children[ind[0]][ind[1]][ind[2]]
-                if cur == NULL:
-                    # in ART we don't allocate down to curlevel 
-                    # if parent doesn't exist
-                    print "Error, no oct exists at that level"
-                    raise RuntimeError
-        else :
-            if not self.valid_domain_oct(curdom,parent) or \
-                    parent.level != curlevel - 1:
-                raise RuntimeError
-            cur = parent
- 
-        next_oct = self.next_free_oct( curdom )
-        if cur == NULL :
-            self.root_mesh[pos[0]][pos[1]][pos[2]] = next_oct
-        else :
-            for i in range(3) :
-                if pos[i] < 2*cur.pos[i]+1 :
-                    ind[i] = 0
-                else :
-                    ind[i] = 1
-            if cur.level != curlevel - 1 or  \
-                    cur.children[ind[0]][ind[1]][ind[2]] != NULL :
-                print "Error in add_oct: child already filled!"
-                raise RuntimeError
-
-            cur.children[ind[0]][ind[1]][ind[2]] = next_oct
-        for i in range(3) :
-            next_oct.pos[i] = pos[i]
-        next_oct.domain = curdom
-        next_oct.parent = cur
-        next_oct.ind = 1
-        next_oct.level = curlevel
-        return next_oct
-
-    # ii:mask/art ; ci=ramses loop backward (k<-fast, j ,i<-slow) 
-    # ii=0 000 art 000 ci 000 
-    # ii=1 100 art 100 ci 001 
-    # ii=2 010 art 010 ci 010 
-    # ii=3 110 art 110 ci 011
-    # ii=4 001 art 001 ci 100
-    # ii=5 101 art 011 ci 101
-    # ii=6 011 art 011 ci 110
-    # ii=7 111 art 111 ci 111
-    # keep coords ints so multiply by pow(2,1) when increasing level.
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def icoords(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
-        # Wham, bam, it's a scam
-        cdef np.int64_t i, j, k, oi, ci, n, ii, level
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        n = mask.shape[0]
-        cdef np.ndarray[np.int64_t, ndim=2] coords
-        coords = np.empty((cell_count, 3), dtype="int64")
-        ci=0
-        for oi in range(cur.n):
-            o = &cur.my_octs[oi]
-            for k in range(2):
-                for j in range(2):
-                    for i in range(2):
-                        ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
-                        coords[ci, 0] = (o.pos[0] << 1) + i
-                        coords[ci, 1] = (o.pos[1] << 1) + j
-                        coords[ci, 2] = (o.pos[2] << 1) + k
-                        ci += 1
-        return coords
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def ires(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
-        # Wham, bam, it's a scam
-        cdef np.int64_t i, j, k, oi, ci, n
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        n = mask.shape[0]
-        cdef np.ndarray[np.int64_t, ndim=1] levels
-        levels = np.empty(cell_count, dtype="int64")
-        ci = 0
-        for oi in range(cur.n):
-            o = &cur.my_octs[oi]
-            for i in range(8):
-                if mask[oi + cur.offset, i] == 0: continue
-                levels[ci] = o.level
-                ci +=1
-        return levels
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def count_levels(self, int max_level, int domain_id,
-                     np.ndarray[np.uint8_t, ndim=2, cast=True] mask):
-        cdef np.ndarray[np.int64_t, ndim=1] level_count
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        cdef int oi, i
-        level_count = np.zeros(max_level+1, 'int64')
-        for oi in range(cur.n):
-            o = &cur.my_octs[oi]
-            for i in range(8):
-                if mask[o.local_ind, i] == 0: continue
-                level_count[o.level] += 1
-        return level_count
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def fcoords(self, int domain_id,
-                np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count,
-                np.ndarray[np.int64_t, ndim=1] level_counts):
-        # Wham, bam, it's a scam
-        cdef np.int64_t i, j, k, oi, ci, n, ii
-        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
-        cdef Oct *o
-        cdef np.float64_t pos[3]
-        cdef np.float64_t base_dx[3], dx[3]
-        n = mask.shape[0]
-        cdef np.ndarray[np.float64_t, ndim=2] coords
-        coords = np.empty((cell_count, 3), dtype="float64")
-        ci =0 
-        for i in range(3):
-            # This is the base_dx, but not the base distance from the center
-            # position.  Note that the positions will also all be offset by
-            # dx/2.0.  This is also for *oct grids*, not cells.
-            base_dx[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
-        for oi in range(cur.n):
-            o = &cur.my_octs[oi]
-            for i in range(3):
-                # This gives the *grid* width for this level
-                dx[i] = base_dx[i] / (1 << o.level)
-                # o.pos is the *grid* index, so pos[i] is the center of the
-                # first cell in the grid
-                pos[i] = self.DLE[i] + o.pos[i]*dx[i] + dx[i]/4.0
-                dx[i] = dx[i] / 2.0 # This is now the *offset* 
-            for k in range(2):
-                for j in range(2):
-                    for i in range(2):
-                        ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
-                        coords[ci, 0] = pos[0] + dx[0] * i
-                        coords[ci, 1] = pos[1] + dx[1] * j
-                        coords[ci, 2] = pos[2] + dx[2] * k
-                        ci +=1 
-        return coords
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def fill_mask(self, int domain, dest_fields, source_fields,
-                   np.ndarray[np.uint8_t, ndim=2, cast=True] mask, int offset):
-        cdef np.ndarray[np.float32_t, ndim=1] source
-        cdef np.ndarray[np.float64_t, ndim=1] dest
-        cdef OctAllocationContainer *dom = self.domains[domain - 1]
-        cdef Oct *o
-        cdef int n
-        cdef int i, j, k, ii
-        cdef int local_pos, local_filled
-        cdef np.float64_t val
-        for key in dest_fields:
-            local_filled = 0
-            dest = dest_fields[key]
-            source = source_fields[key]
-            # snl: an alternative to filling level 0 yt-octs is to produce a 
-            # mapping between the mask and the source read order
-            for n in range(dom.n):
-                o = &dom.my_octs[n]
-                for k in range(2):
-                    for j in range(2):
-                        for i in range(2):
-                            ii = ((k*2)+j)*2+i
-                            if mask[o.local_ind, ii] == 0: continue
-                            dest[local_filled + offset] = source[o.local_ind*8+ii]
-                            # print 'oct_container.pyx:sourcemasked',o.level,local_filled, o.local_ind*8+ii, source[o.local_ind*8+ii]
-                            local_filled += 1
-        return local_filled
-
-cdef class RAMSESOctreeContainer(OctreeContainer):
+    cdef np.int64_t get_domain_offset(self, int domain_id):
+        cdef OctAllocationContainer *cont = self.domains[domain_id - 1]
+        return cont.offset
 
     cdef Oct* next_root(self, int domain_id, int ind[3]):
         cdef Oct *next = self.root_mesh[ind[0]][ind[1]][ind[2]]
@@ -666,7 +399,77 @@
                 count[cur.my_octs[i - cur.offset].domain - 1] += 1
         return count
 
-    def check(self, int curdom):
+    def domain_and(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                   int domain_id):
+        cdef np.int64_t i, oi, n,  use
+        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
+        cdef Oct *o
+        cdef np.ndarray[np.uint8_t, ndim=2] m2 = \
+                np.zeros((mask.shape[0], 8), 'uint8')
+        n = mask.shape[0]
+        for oi in range(cur.n_assigned):
+            o = &cur.my_octs[oi]
+            use = 0
+            for i in range(8):
+                m2[o.domain_ind, i] = mask[o.domain_ind, i]
+        return m2 # NOTE: This is uint8_t
+
+    def domain_mask(self,
+                    # mask is the base selector's *global* mask
+                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                    int domain_id):
+        # What distinguishes this one from domain_and is that we have a mask,
+        # which covers the whole domain, but our output will only be of a much
+        # smaller subset of octs that belong to a given domain *and* the mask.
+        # Note also that typically when something calls domain_and, they will 
+        # use a logical_any along the oct axis.  Here we don't do that.
+        # Note also that we change the shape of the returned array.
+        cdef np.int64_t i, j, k, oi, n, nm, use
+        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
+        cdef Oct *o
+        n = mask.shape[0]
+        nm = 0
+        for oi in range(cur.n_assigned):
+            o = &cur.my_octs[oi]
+            use = 0
+            for i in range(8):
+                if mask[o.domain_ind, i] == 1: use = 1
+            nm += use
+        cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
+                np.zeros((2, 2, 2, nm), 'uint8')
+        nm = 0
+        for oi in range(cur.n_assigned):
+            o = &cur.my_octs[oi]
+            use = 0
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        ii = ((k*2)+j)*2+i
+                        if mask[o.domain_ind, ii] == 0: continue
+                        use = m2[i, j, k, nm] = 1
+            nm += use
+        return m2.astype("bool")
+
+    def domain_ind(self,
+                    # mask is the base selector's *global* mask
+                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                    int domain_id):
+        cdef np.int64_t i, j, k, oi, noct, n, nm, use, offset
+        cdef OctAllocationContainer *cur = self.domains[domain_id - 1]
+        cdef Oct *o
+        cdef np.ndarray[np.int64_t, ndim=1] ind = np.zeros(cur.n, 'int64') - 1
+        nm = 0
+        for oi in range(cur.n):
+            o = &cur.my_octs[oi]
+            use = 0
+            for i in range(8):
+                if mask[o.domain_ind, i] == 1: use = 1
+            if use == 1:
+                ind[o.domain_ind - cur.offset] = nm
+            nm += use
+        return ind
+
+    def check(self, int curdom, int print_all = 0):
         cdef int dind, pi
         cdef Oct oct
         cdef OctAllocationContainer *cont = self.domains[curdom - 1]
@@ -675,6 +478,9 @@
         cdef int unassigned = 0
         for pi in range(cont.n_assigned):
             oct = cont.my_octs[pi]
+            if print_all==1:
+                print pi, oct.level, oct.domain,
+                print oct.pos[0],oct.pos[1],oct.pos[2]
             for i in range(2):
                 for j in range(2):
                     for k in range(2):
@@ -691,6 +497,33 @@
         print "DOMAIN % 3i HAS % 9i MISSED OCTS" % (curdom, nmissed)
         print "DOMAIN % 3i HAS % 9i UNASSIGNED OCTS" % (curdom, unassigned)
 
+    def check_refinement(self, int curdom):
+        cdef int pi, i, j, k, some_refined, some_unrefined
+        cdef Oct *oct
+        cdef int bad = 0
+        cdef OctAllocationContainer *cont = self.domains[curdom - 1]
+        for pi in range(cont.n_assigned):
+            oct = &cont.my_octs[pi]
+            some_unrefined = 0
+            some_refined = 0
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        if oct.children[i][j][k] == NULL:
+                            some_unrefined = 1
+                        else:
+                            some_refined = 1
+            if some_unrefined == some_refined == 1:
+                #print "BAD", oct.file_ind, oct.domain_ind
+                bad += 1
+                if curdom == 10 or curdom == 72:
+                    for i in range(2):
+                        for j in range(2):
+                            for k in range(2):
+                                print (oct.children[i][j][k] == NULL),
+                    print
+        print "BAD TOTAL", curdom, bad, cont.n_assigned
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -739,7 +572,7 @@
             # Now we should be at the right level
             cur.domain = curdom
             if local == 1:
-                cur.ind = p
+                cur.file_ind = p
             cur.level = curlevel
         return cont.n_assigned - initial
 
@@ -757,18 +590,18 @@
         n = mask.shape[0]
         cdef np.ndarray[np.int64_t, ndim=2] coords
         coords = np.empty((cell_count, 3), dtype="int64")
+        ci = 0
         for oi in range(cur.n_assigned):
             o = &cur.my_octs[oi]
             for i in range(2):
                 for j in range(2):
                     for k in range(2):
                         ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
-                        ci = level_counts[o.level]
+                        if mask[o.domain_ind, ii] == 0: continue
                         coords[ci, 0] = (o.pos[0] << 1) + i
                         coords[ci, 1] = (o.pos[1] << 1) + j
                         coords[ci, 2] = (o.pos[2] << 1) + k
-                        level_counts[o.level] += 1
+                        ci += 1
         return coords
 
     @cython.boundscheck(False)
@@ -790,9 +623,8 @@
             o = &cur.my_octs[oi]
             for i in range(8):
                 if mask[oi + cur.offset, i] == 0: continue
-                ci = level_counts[o.level]
                 levels[ci] = o.level
-                level_counts[o.level] += 1
+                ci += 1
         return levels
 
     @cython.boundscheck(False)
@@ -808,7 +640,7 @@
         for oi in range(cur.n_assigned):
             o = &cur.my_octs[oi]
             for i in range(8):
-                if mask[o.local_ind, i] == 0: continue
+                if mask[o.domain_ind, i] == 0: continue
                 level_count[o.level] += 1
         return level_count
 
@@ -833,6 +665,7 @@
             # position.  Note that the positions will also all be offset by
             # dx/2.0.  This is also for *oct grids*, not cells.
             base_dx[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
+        ci = 0
         for oi in range(cur.n):
             o = &cur.my_octs[oi]
             for i in range(3):
@@ -846,12 +679,11 @@
                 for j in range(2):
                     for k in range(2):
                         ii = ((k*2)+j)*2+i
-                        if mask[o.local_ind, ii] == 0: continue
-                        ci = level_counts[o.level]
+                        if mask[o.domain_ind, ii] == 0: continue
                         coords[ci, 0] = pos[0] + dx[0] * i
                         coords[ci, 1] = pos[1] + dx[1] * j
                         coords[ci, 2] = pos[2] + dx[2] * k
-                        level_counts[o.level] += 1
+                        ci += 1
         return coords
 
     @cython.boundscheck(False)
@@ -873,20 +705,17 @@
             source = source_fields[key]
             for n in range(dom.n):
                 o = &dom.my_octs[n]
-                if o.level != level: continue
-                for i in range(2):
-                    for j in range(2):
-                        for k in range(2):
-                            ii = ((k*2)+j)*2+i
-                            if mask[o.local_ind, ii] == 0: continue
-                            dest[local_filled + offset] = source[o.ind, ii]
-                            local_filled += 1
+                for ii in range(8):
+                    # We iterate and check here to keep our counts consistent
+                    # when filling different levels.
+                    if mask[o.domain_ind, ii] == 0: continue
+                    if o.level == level: 
+                        dest[local_filled] = source[o.file_ind, ii]
+                    local_filled += 1
         return local_filled
 
+cdef class ARTOctreeContainer(RAMSESOctreeContainer):
 
-
-cdef class ARTOctreeContainer(RAMSESOctreeContainer):
-    #this class is specifically for the NMSU ART
     @cython.boundscheck(True)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -910,7 +739,7 @@
             source = source_fields[key]
             for n in range(dom.n):
                 o = &dom.my_octs[n]
-                index = o.ind-subchunk_offset
+                index = o.file_ind-subchunk_offset
                 if o.level != level: continue
                 if index < 0: continue
                 if index >= subchunk_max: 
@@ -921,7 +750,7 @@
                     for j in range(2):
                         for k in range(2):
                             ii = ((k*2)+j)*2+i
-                            if mask[o.local_ind, ii] == 0: continue
+                            if mask[o.domain_ind, ii] == 0: continue
                             dest[local_filled + offset] = \
                                 source[index,ii]
                             local_filled += 1
@@ -961,7 +790,7 @@
                     for j in range(2):
                         for k in range(2):
                             ii = ((k*2)+j)*2+i
-                            if mask[o.local_ind, ii] == 0: continue
+                            if mask[o.domain_ind, ii] == 0: continue
                             ox = (o.pos[0] << 1) + i
                             oy = (o.pos[1] << 1) + j
                             oz = (o.pos[2] << 1) + k
@@ -1036,12 +865,23 @@
                 free(o.sd.pos)
         free(o)
 
+    def __iter__(self):
+        #Get the next oct, will traverse domains
+        #Note that oct containers can be sorted 
+        #so that consecutive octs are on the same domain
+        cdef int oi
+        cdef Oct *o
+        for oi in range(self.nocts):
+            o = self.oct_list[oi]
+            yield (o.file_ind, o.domain_ind, o.domain)
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
     def icoords(self, int domain_id,
                 np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count):
+                np.int64_t cell_count,
+                np.ndarray[np.int64_t, ndim=1] level_counts):
         #Return the integer positions of the cells
         #Limited to this domain and within the mask
         #Positions are binary; aside from the root mesh
@@ -1070,7 +910,8 @@
     @cython.cdivision(True)
     def ires(self, int domain_id,
                 np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count):
+                np.int64_t cell_count,
+                np.ndarray[np.int64_t, ndim=1] level_counts):
         #Return the 'resolution' of each cell; ie the level
         cdef np.ndarray[np.int64_t, ndim=1] res
         res = np.empty(cell_count, dtype="int64")
@@ -1090,7 +931,8 @@
     @cython.cdivision(True)
     def fcoords(self, int domain_id,
                 np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
-                np.int64_t cell_count):
+                np.int64_t cell_count,
+                np.ndarray[np.int64_t, ndim=1] level_counts):
         #Return the floating point unitary position of every cell
         cdef np.ndarray[np.float64_t, ndim=2] coords
         coords = np.empty((cell_count, 3), dtype="float64")
@@ -1141,6 +983,7 @@
         cdef int max_level = 0
         self.oct_list = <Oct**> malloc(sizeof(Oct*)*self.nocts)
         cdef np.int64_t i = 0
+        cdef np.int64_t dom_ind
         cdef ParticleArrays *c = self.first_sd
         while c != NULL:
             self.oct_list[i] = c.oct
@@ -1159,13 +1002,20 @@
         self.dom_offsets = <np.int64_t *>malloc(sizeof(np.int64_t) *
                                                 (self.max_domain + 3))
         self.dom_offsets[0] = 0
+        dom_ind = 0
         for i in range(self.nocts):
-            self.oct_list[i].local_ind = i
+            self.oct_list[i].domain_ind = i
+            self.oct_list[i].file_ind = dom_ind
+            dom_ind += 1
             if self.oct_list[i].domain > cur_dom:
                 cur_dom = self.oct_list[i].domain
                 self.dom_offsets[cur_dom + 1] = i
+                dom_ind = 0
         self.dom_offsets[cur_dom + 2] = self.nocts
 
+    cdef np.int64_t get_domain_offset(self, int domain_id):
+        return self.dom_offsets[domain_id + 1]
+
     cdef Oct* allocate_oct(self):
         #Allocate the memory, set to NULL or -1
         #We reserve space for n_ref particles, but keep
@@ -1175,8 +1025,8 @@
         cdef ParticleArrays *sd = <ParticleArrays*> \
             malloc(sizeof(ParticleArrays))
         cdef int i, j, k
-        my_oct.ind = my_oct.domain = -1
-        my_oct.local_ind = self.nocts - 1
+        my_oct.file_ind = my_oct.domain = -1
+        my_oct.domain_ind = self.nocts - 1
         my_oct.pos[0] = my_oct.pos[1] = my_oct.pos[2] = -1
         my_oct.level = -1
         my_oct.sd = sd
@@ -1227,7 +1077,7 @@
         for oi in range(ndo):
             o = self.oct_list[oi + doff]
             for i in range(8):
-                if mask[o.local_ind, i] == 0: continue
+                if mask[o.domain_ind, i] == 0: continue
                 level_count[o.level] += 1
         return level_count
 
@@ -1250,7 +1100,7 @@
                 #IND Corresponding integer index on the root octs
                 #CP Center  point of that oct
                 pp[i] = pos[p, i]
-                dds[i] = (self.DRE[i] + self.DLE[i])/self.nn[i]
+                dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
                 ind[i] = <np.int64_t> ((pp[i] - self.DLE[i])/dds[i])
                 cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
             cur = self.root_mesh[ind[0]][ind[1]][ind[2]]
@@ -1377,12 +1227,15 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def count_neighbor_particles(self, ppos):
+    def count_neighbor_particles(self, oppos):
         #How many particles are in my neighborhood
+        cdef int i, ni, dl, tnp
+        cdef np.float64_t ppos[3]
+        for i in range(3):
+            ppos[i] = oppos[i]
         cdef Oct *main = self.get(ppos)
         cdef Oct* neighbors[27]
         self.neighbors(main, neighbors)
-        cdef int i, ni, dl, tnp
         tnp = 0
         for i in range(27):
             if neighbors[i].sd != NULL:
@@ -1409,4 +1262,83 @@
                 count[o.domain] += mask[oi,i]
         return count
 
+    def domain_and(self, np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                   int domain_id):
+        cdef np.int64_t i, oi, n, use
+        cdef Oct *o
+        cdef np.ndarray[np.uint8_t, ndim=2] m2 = \
+                np.zeros((mask.shape[0], 8), 'uint8')
+        n = mask.shape[0]
+        for oi in range(n):
+            o = self.oct_list[oi]
+            if o.domain != domain_id: continue
+            use = 0
+            for i in range(8):
+                m2[o.domain_ind, i] = mask[o.domain_ind, i]
+        return m2
 
+    def domain_mask(self,
+                    # mask is the base selector's *global* mask
+                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                    int domain_id):
+        # What distinguishes this one from domain_and is that we have a mask,
+        # which covers the whole domain, but our output will only be of a much
+        # smaller subset of octs that belong to a given domain *and* the mask.
+        # Note also that typically when something calls domain_and, they will 
+        # use a logical_any along the oct axis.  Here we don't do that.
+        # Note also that we change the shape of the returned array.
+        cdef np.int64_t i, j, k, oi, n, nm, use
+        cdef Oct *o
+        n = mask.shape[0]
+        nm = 0
+        # This could perhaps be faster if we 
+        for oi in range(n):
+            o = self.oct_list[oi]
+            if o.domain != domain_id: continue
+            use = 0
+            for i in range(8):
+                if mask[o.domain_ind, i] == 1: use = 1
+            nm += use
+        cdef np.ndarray[np.uint8_t, ndim=4] m2 = \
+                np.zeros((2, 2, 2, nm), 'uint8')
+        nm = 0
+        for oi in range(n):
+            o = self.oct_list[oi]
+            if o.domain != domain_id: continue
+            use = 0
+            for i in range(2):
+                for j in range(2):
+                    for k in range(2):
+                        ii = ((k*2)+j)*2+i
+                        if mask[o.domain_ind, ii] == 0: continue
+                        use = m2[i, j, k, nm] = 1
+            nm += use
+        return m2.astype("bool")
+
+    def domain_ind(self,
+                    # mask is the base selector's *global* mask
+                    np.ndarray[np.uint8_t, ndim=2, cast=True] mask,
+                    int domain_id):
+        # Here we once again do something similar to the other functions.  We
+        # need a set of indices into the final reduced, masked values.  The
+        # indices will be domain.n long, and will be of type int64.  This way,
+        # we can get the Oct through a .get() call, then use Oct.file_ind as an
+        # index into this newly created array, then finally use the returned
+        # index into the domain subset array for deposition.
+        cdef np.int64_t i, j, k, oi, noct, n, nm, use, offset
+        cdef Oct *o
+        # For particle octrees, domain 0 is special and means non-leaf nodes.
+        offset = self.dom_offsets[domain_id + 1]
+        noct = self.dom_offsets[domain_id + 2] - offset
+        cdef np.ndarray[np.int64_t, ndim=1] ind = np.zeros(noct, 'int64')
+        nm = 0
+        for oi in range(noct):
+            ind[oi] = -1
+            o = self.oct_list[oi + offset]
+            use = 0
+            for i in range(8):
+                if mask[o.domain_ind, i] == 1: use = 1
+            if use == 1:
+                ind[oi] = nm
+            nm += use
+        return ind

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/geometry/oct_geometry_handler.py
--- a/yt/geometry/oct_geometry_handler.py
+++ b/yt/geometry/oct_geometry_handler.py
@@ -54,7 +54,7 @@
         Returns (in code units) the smallest cell size in the simulation.
         """
         return (self.parameter_file.domain_width /
-                (2**self.max_level)).min()
+                (2**(self.max_level+1))).min()
 
     def convert(self, unit):
         return self.parameter_file.conversion_factors[unit]

diff -r b20f76ccd3c34bac9c187272593f9f49b58e7795 -r 1c607a2db7281ee3db313a707b62805b53cfad73 yt/geometry/particle_deposit.pxd
--- /dev/null
+++ b/yt/geometry/particle_deposit.pxd
@@ -0,0 +1,47 @@
+"""
+Particle Deposition onto Octs
+
+Author: Christopher Moody <chris.e.moody at gmail.com>
+Affiliation: UC Santa Cruz
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2013 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+cimport numpy as np
+import numpy as np
+from libc.stdlib cimport malloc, free
+cimport cython
+
+from fp_utils cimport *
+from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
+
+cdef extern from "alloca.h":
+    void *alloca(int)
+
+cdef inline int gind(int i, int j, int k, int dims[3]):
+    return ((k*dims[1])+j)*dims[0]+i
+
+cdef class ParticleDepositOperation:
+    # We assume each will allocate and define their own temporary storage
+    cdef np.int64_t nvals
+    cdef void process(self, int dim[3], np.float64_t left_edge[3],
+                      np.float64_t dds[3], np.int64_t offset,
+                      np.float64_t ppos[3], np.float64_t *fields)

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list