[yt-svn] commit/yt-3.0: 5 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Oct 3 17:05:53 PDT 2013


5 new commits in yt-3.0:

https://bitbucket.org/yt_analysis/yt-3.0/commits/df7341e79b12/
Changeset:   df7341e79b12
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-02 22:52:35
Summary:     Adding a "save octree" function.
Affected #:  4 files

diff -r 59d6af464edc0cb7d6322e930999bf715e54766f -r df7341e79b12ccf7affe14c2fb9f218d05b49ee1 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -74,7 +74,8 @@
     cdef np.int64_t get_domain_offset(self, int domain_id)
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
-                        OctVisitorData *data)
+                        OctVisitorData *data,
+                        int vc = ?)
     cdef Oct *next_root(self, int domain_id, int ind[3])
     cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
     cdef void setup_data(self, OctVisitorData *data, int domain_id = ?)

diff -r 59d6af464edc0cb7d6322e930999bf715e54766f -r df7341e79b12ccf7affe14c2fb9f218d05b49ee1 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -157,9 +157,11 @@
     @cython.cdivision(True)
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
-                        OctVisitorData *data):
-        cdef int i, j, k, n, vc
-        vc = self.partial_coverage
+                        OctVisitorData *data,
+                        int vc = -1):
+        cdef int i, j, k, n
+        if vc == -1:
+            vc = self.partial_coverage
         data.global_index = -1
         data.level = 0
         cdef np.float64_t pos[3], dds[3]
@@ -431,6 +433,24 @@
             coords[:,i] += self.DLE[i]
         return coords
 
+    def save_octree(self):
+        # Get the header
+        header = dict(dims = (self.nn[0], self.nn[1], self.nn[2]),
+                      left_edge = (self.DLE[0], self.DLE[1], self.DLE[2]),
+                      right_edge = (self.DRE[0], self.DRE[1], self.DRE[2]))
+        if self.partial_coverage == 1:
+            raise NotImplementedError
+        cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
+        # domain_id = -1 here, because we want *every* oct
+        cdef OctVisitorData data
+        self.setup_data(&data, -1)
+        cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
+        ref_mask = np.zeros(self.nocts, dtype="uint8") - 1
+        data.array = <void *> ref_mask.data
+        self.visit_all_octs(selector, oct_visitors.store_octree, &data, 1)
+        header['octree'] = ref_mask
+        return header
+
     def selector_fill(self, SelectorObject selector,
                       np.ndarray source,
                       np.ndarray dest = None,
@@ -698,6 +718,9 @@
             self.DRE[i] = domain_right_edge[i] #num_grid
         self.fill_func = oct_visitors.fill_file_indices_rind
 
+    def save_octree(self):
+        raise NotImplementedError
+
     cdef int get_root(self, int ind[3], Oct **o):
         o[0] = NULL
         cdef int i
@@ -734,12 +757,14 @@
     @cython.cdivision(True)
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
-                        OctVisitorData *data):
-        cdef int i, j, k, n, vc
+                        OctVisitorData *data,
+                        int vc = -1):
+        cdef int i, j, k, n
         cdef np.int64_t key, ukey
         data.global_index = -1
         data.level = 0
-        vc = self.partial_coverage
+        if vc == -1:
+            vc = self.partial_coverage
         cdef np.float64_t pos[3], dds[3]
         # This dds is the oct-width
         for i in range(3):

diff -r 59d6af464edc0cb7d6322e930999bf715e54766f -r df7341e79b12ccf7affe14c2fb9f218d05b49ee1 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -58,6 +58,7 @@
 cdef oct_visitor_function fill_file_indices_oind
 cdef oct_visitor_function fill_file_indices_rind
 cdef oct_visitor_function count_by_domain
+cdef oct_visitor_function store_octree
 
 cdef inline int cind(int i, int j, int k):
     # THIS ONLY WORKS FOR CHILDREN.  It is not general for zones.

diff -r 59d6af464edc0cb7d6322e930999bf715e54766f -r df7341e79b12ccf7affe14c2fb9f218d05b49ee1 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -173,3 +173,14 @@
     # NOTE: We do this for every *cell*.
     arr = <np.int64_t *> data.array
     arr[o.domain - 1] += 1
+
+cdef void store_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    cdef np.uint8_t *arr
+    if data.last != o.domain_ind:
+        data.last = o.domain_ind
+        arr = <np.uint8_t *> data.array
+        if o.children == NULL:
+            arr[data.index] = 0
+        if o.children != NULL:
+            arr[data.index] = 1
+        data.index += 1


https://bitbucket.org/yt_analysis/yt-3.0/commits/a8b4e6fd1a49/
Changeset:   a8b4e6fd1a49
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-02 23:42:46
Summary:     Adding a load_octree command along with tests.
Affected #:  4 files

diff -r df7341e79b12ccf7affe14c2fb9f218d05b49ee1 -r a8b4e6fd1a4925c5cb918f1e075204d1f89810d3 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -115,6 +115,58 @@
                 for k in range(self.nn[2]):
                     self.root_mesh[i][j][k] = NULL
 
+    @classmethod
+    def load_octree(cls, header):
+        cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
+        ref_mask = header['octree']
+        cdef OctreeContainer obj = cls(header['dims'], header['left_edge'], header['right_edge'])
+        # NOTE: We do not allow domain/file indices to be specified.
+        cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
+        cdef OctVisitorData data
+        obj.setup_data(&data, -1)
+        obj.allocate_domains([ref_mask.shape[0]])
+        cdef int i, j, k, n
+        data.global_index = -1
+        data.level = 0
+        cdef np.float64_t pos[3], dds[3]
+        # This dds is the oct-width
+        for i in range(3):
+            dds[i] = (obj.DRE[i] - obj.DLE[i]) / obj.nn[i]
+        # Pos is the center of the octs
+        cdef OctAllocationContainer *cur = obj.domains[0]
+        cdef Oct *o
+        cdef void *p[3]
+        p[0] = ref_mask.data
+        p[1] = <void *> cur.my_octs
+        p[2] = <void *> &cur.n_assigned
+        data.array = p
+        pos[0] = obj.DLE[0] + dds[0]/2.0
+        for i in range(obj.nn[0]):
+            pos[1] = obj.DLE[1] + dds[1]/2.0
+            for j in range(obj.nn[1]):
+                pos[2] = obj.DLE[2] + dds[2]/2.0
+                for k in range(obj.nn[2]):
+                    if obj.root_mesh[i][j][k] != NULL:
+                        raise RuntimeError
+                    o = &cur.my_octs[cur.n_assigned]
+                    o.domain_ind = o.file_ind = 0
+                    o.domain = 1
+                    obj.root_mesh[i][j][k] = o
+                    cur.n_assigned += 1
+                    data.pos[0] = i
+                    data.pos[1] = j
+                    data.pos[2] = k
+                    selector.recursively_visit_octs(
+                        obj.root_mesh[i][j][k],
+                        pos, dds, 0, oct_visitors.load_octree,
+                        &data, 1)
+                    pos[2] += dds[2]
+                pos[1] += dds[1]
+            pos[0] += dds[0]
+        obj.nocts = cur.n_assigned
+        assert(obj.nocts == ref_mask.size)
+        return obj
+
     cdef void setup_data(self, OctVisitorData *data, int domain_id = -1):
         cdef int i
         data.index = 0
@@ -718,6 +770,10 @@
             self.DRE[i] = domain_right_edge[i] #num_grid
         self.fill_func = oct_visitors.fill_file_indices_rind
 
+    @classmethod
+    def load_octree(self, header):
+        raise NotImplementedError
+
     def save_octree(self):
         raise NotImplementedError
 

diff -r df7341e79b12ccf7affe14c2fb9f218d05b49ee1 -r a8b4e6fd1a4925c5cb918f1e075204d1f89810d3 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -38,7 +38,6 @@
                    # To calculate nzones, 1 << (oref * 3)
     np.int32_t nz
                             
-
 ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
                                    np.uint8_t selected)
 
@@ -59,6 +58,7 @@
 cdef oct_visitor_function fill_file_indices_rind
 cdef oct_visitor_function count_by_domain
 cdef oct_visitor_function store_octree
+cdef oct_visitor_function load_octree
 
 cdef inline int cind(int i, int j, int k):
     # THIS ONLY WORKS FOR CHILDREN.  It is not general for zones.

diff -r df7341e79b12ccf7affe14c2fb9f218d05b49ee1 -r a8b4e6fd1a4925c5cb918f1e075204d1f89810d3 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -18,6 +18,7 @@
 cimport numpy
 import numpy
 from fp_utils cimport *
+from libc.stdlib cimport malloc, free
 
 # Now some visitor functions
 
@@ -184,3 +185,24 @@
         if o.children != NULL:
             arr[data.index] = 1
         data.index += 1
+
+cdef void load_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    cdef void **p = <void **> data.array
+    cdef np.uint8_t *arr = <np.uint8_t *> p[0]
+    cdef Oct* octs = <Oct*> p[1]
+    cdef np.int64_t *nocts = <np.int64_t*> p[2]
+    cdef int i
+   
+    if data.last != o.domain_ind:
+        data.last = o.domain_ind
+        if arr[data.index] == 0:
+            o.children = NULL
+        if arr[data.index] == 1:
+            o.children = <Oct **> malloc(sizeof(Oct *) * 8)
+            for i in range(8):
+                o.children[i] = &octs[nocts[0]]
+                o.children[i].file_ind = nocts[0]
+                o.children[i].domain_ind = nocts[0]
+                o.children[i].domain = 1
+                nocts[0] += 1
+        data.index += 1

diff -r df7341e79b12ccf7affe14c2fb9f218d05b49ee1 -r a8b4e6fd1a4925c5cb918f1e075204d1f89810d3 yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -1,12 +1,14 @@
 from yt.testing import *
 import numpy as np
+from yt.geometry.oct_container import \
+    OctreeContainer
 from yt.geometry.particle_oct_container import \
     ParticleOctreeContainer, \
     ParticleRegions
 from yt.geometry.oct_container import _ORDER_MAX
 from yt.utilities.lib.geometry_utils import get_morton_indices
 from yt.frontends.stream.api import load_particles
-from yt.geometry.selection_routines import RegionSelector
+from yt.geometry.selection_routines import RegionSelector, AlwaysSelector
 import yt.data_objects.api
 import time, os
 
@@ -42,6 +44,34 @@
         #    level_count += octree.count_levels(total_count.size-1, dom, mask)
         yield assert_equal, total_count, [1, 8, 64, 64, 256, 536, 1856, 1672]
 
+def test_save_load_octree():
+    np.random.seed(int(0x4d3d3d3))
+    pos = np.random.normal(0.5, scale=0.05, size=(NPART,3)) * (DRE-DLE) + DLE
+    octree = ParticleOctreeContainer((1, 1, 1), DLE, DRE)
+    octree.n_ref = 32
+    for i in range(3):
+        np.clip(pos[:,i], DLE[i], DRE[i], pos[:,i])
+    # Convert to integers
+    pos = np.floor((pos - DLE)/dx).astype("uint64")
+    morton = get_morton_indices(pos)
+    morton.sort()
+    octree.add(morton)
+    octree.finalize()
+    saved = octree.save_octree()
+    loaded = OctreeContainer.load_octree(saved)
+    always = AlwaysSelector(None)
+    ir1 = octree.ires(always)
+    ir2 = loaded.ires(always)
+    yield assert_equal, ir1, ir2
+
+    fc1 = octree.fcoords(always)
+    fc2 = loaded.fcoords(always)
+    yield assert_equal, fc1, fc2
+
+    fw1 = octree.fwidth(always)
+    fw2 = loaded.fwidth(always)
+    yield assert_equal, fw1, fw2
+
 def test_particle_octree_counts():
     np.random.seed(int(0x4d3d3d3))
     # Eight times as many!


https://bitbucket.org/yt_analysis/yt-3.0/commits/60801522170f/
Changeset:   60801522170f
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-03 00:09:53
Summary:     Starting a load_octree function.
Affected #:  2 files

diff -r a8b4e6fd1a4925c5cb918f1e075204d1f89810d3 -r 60801522170fc78d8a33091ce427164a4aad48b5 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -25,6 +25,10 @@
     AMRGridPatch
 from yt.geometry.grid_geometry_handler import \
     GridGeometryHandler
+from yt.data_objects.octree_subset import \
+    OctreeSubset
+from yt.geometry.oct_geometry_handler import \
+    OctreeGeometryHandler
 from yt.geometry.particle_geometry_handler import \
     ParticleGeometryHandler
 from yt.geometry.unstructured_mesh_handler import \
@@ -973,3 +977,315 @@
 
     return spf
 
+class StreamOctreeSubset(OctreeSubset):
+  domain_id = 1
+  _num_zones = 2
+
+  def __init__(self, base_region, pf, oct_handler):
+        self.field_data = YTFieldData()
+        self.field_parameters = {}
+        self.pf = pf
+        self.hierarchy = self.pf.hierarchy
+        self.oct_handler = oct_handler
+        self._last_mask = None
+        self._last_selector_id = None
+        self._current_particle_type = 'all'
+        self._current_fluid_type = self.pf.default_fluid_type
+        self.base_region = base_region
+        self.base_selector = base_region.selector
+
+class StreamOctreeHandler(OctreeGeometryHandler):
+
+    def __init__(self, pf, data_style = None):
+        self.stream_handler = pf.stream_handler
+        super(StreamParticleGeometryHandler, self).__init__(pf, data_style)
+
+    def _setup_data_io(self):
+        if self.stream_handler.io is not None:
+            self.io = self.stream_handler.io
+        else:
+            self.io = io_registry[self.data_style](self.stream_handler)
+
+    def _initialize_oct_handler(self):
+        self.oct_handler = None
+
+class StreamOctreeStaticOutput(StreamStaticOutput):
+    _hierarchy_class = StreamOctreeHandler
+    _fieldinfo_fallback = StreamFieldInfo
+    _fieldinfo_known = KnownStreamFields
+    _data_style = "stream_octree"
+
+    def _identify_base_chunk(self, dobj):
+        if getattr(dobj, "_chunk_info", None) is None:
+            data_files = getattr(dobj, "data_files", None)
+            if data_files is None:
+                data_files = [self.data_files[i] for i in
+                              self.regions.identify_data_files(dobj.selector)]
+            base_region = getattr(dobj, "base_region", dobj)
+            oref = self.parameter_file.over_refine_factor
+            subset = [ParticleOctreeSubset(base_region, data_files, 
+                        self.parameter_file, over_refine_factor = oref)]
+            dobj._chunk_info = subset
+        dobj._current_chunk = list(self._chunk_all(dobj))[0]
+
+    def _chunk_all(self, dobj):
+        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        yield YTDataChunk(dobj, "all", oobjs, None)
+
+    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
+        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        # We actually do not really use the data files except as input to the
+        # ParticleOctreeSubset.
+        # This is where we will perform cutting of the Octree and
+        # load-balancing.  That may require a specialized selector object to
+        # cut based on some space-filling curve index.
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            yield YTDataChunk(dobj, "spatial", [g])
+
+    def _chunk_io(self, dobj, cache = True):
+        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for subset in oobjs:
+            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
+
+def load_octree(data, sim_unit_to_cm, bbox=None,
+                      sim_time=0.0, periodicity=(True, True, True),
+                      n_ref = 64, over_refine_factor = 1):
+    r"""Load a set of particles into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
+
+    This should allow a collection of particle data to be loaded directly into
+    yt and analyzed as would any others.  This comes with several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+
+    This will initialize an Octree of data.  Note that fluid fields will not
+    work yet, or possibly ever.
+    
+    Parameters
+    ----------
+    data : dict
+        This is a dict of numpy arrays, where the keys are the field names.
+        Particles positions must be named "particle_position_x",
+        "particle_position_y", "particle_position_z".
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    periodicity : tuple of booleans
+        Determines whether the data will be treated as periodic along
+        each axis
+    n_ref : int
+        The number of particles that result in refining an oct used for
+        indexing the particles.
+
+    Examples
+    --------
+
+    >>> pos = [np.random.random(128*128*128) for i in range(3)]
+    >>> data = dict(particle_position_x = pos[0],
+    ...             particle_position_y = pos[1],
+    ...             particle_position_z = pos[2])
+    >>> bbox = np.array([[0., 1.0], [0.0, 1.0], [0.0, 1.0]])
+    >>> pf = load_particles(data, 3.08e24, bbox=bbox)
+
+    """
+
+    domain_dimensions = np.ones(3, "int32") * 2
+    nprocs = 1
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
+    sfh = StreamDictFieldHandler()
+    
+    particle_types = set_particle_types(data)
+    
+    sfh.update({'stream_file':data})
+    grid_left_edges = domain_left_edge
+    grid_right_edges = domain_right_edge
+    grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+
+    # I'm not sure we need any of this.
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        -np.ones(nprocs, dtype='int64'),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
+        np.zeros(nprocs).reshape((nprocs,1)),
+        sfh,
+        particle_types=particle_types,
+        periodicity=periodicity
+    )
+
+    handler.name = "ParticleData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamParticlesStaticOutput(handler)
+    spf.n_ref = n_ref
+    spf.over_refine_factor = over_refine_factor
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    return spf
+
+def hexahedral_connectivity(xgrid, ygrid, zgrid):
+    nx = len(xgrid)
+    ny = len(ygrid)
+    nz = len(zgrid)
+    coords = np.fromiter(chain.from_iterable(product(xgrid, ygrid, zgrid)),
+                    dtype=np.float64, count = nx*ny*nz*3)
+    coords.shape = (nx*ny*nz, 3)
+    cis = np.fromiter(chain.from_iterable(product([0,1], [0,1], [0,1])),
+                    dtype=np.int64, count = 8*3)
+    cis.shape = (8, 3)
+    cycle = np.fromiter(chain.from_iterable(product(*map(range, (nx-1, ny-1, nz-1)))),
+                    dtype=np.int64, count = (nx-1)*(ny-1)*(nz-1)*3)
+    cycle.shape = ((nx-1)*(ny-1)*(nz-1), 3)
+    off = cis + cycle[:, np.newaxis]
+    connectivity = ((off[:,:,0] * ny) + off[:,:,1]) * nz + off[:,:,2]
+    return coords, connectivity
+
+class StreamHexahedralMesh(SemiStructuredMesh):
+    _connectivity_length = 8
+    _index_offset = 0
+
+class StreamHexahedralHierarchy(UnstructuredGeometryHandler):
+
+    def __init__(self, pf, data_style = None):
+        self.stream_handler = pf.stream_handler
+        super(StreamHexahedralHierarchy, self).__init__(pf, data_style)
+
+    def _initialize_mesh(self):
+        coords = self.stream_handler.fields.pop('coordinates')
+        connec = self.stream_handler.fields.pop('connectivity')
+        self.meshes = [StreamHexahedralMesh(0,
+          self.hierarchy_filename, connec, coords, self)]
+
+    def _setup_data_io(self):
+        if self.stream_handler.io is not None:
+            self.io = self.stream_handler.io
+        else:
+            self.io = io_registry[self.data_style](self.stream_handler)
+
+    def _detect_fields(self):
+        self.field_list = list(set(self.stream_handler.get_fields()))
+
+class StreamHexahedralStaticOutput(StreamStaticOutput):
+    _hierarchy_class = StreamHexahedralHierarchy
+    _fieldinfo_fallback = StreamFieldInfo
+    _fieldinfo_known = KnownStreamFields
+    _data_style = "stream_hexahedral"
+
+def load_hexahedral_mesh(data, connectivity, coordinates,
+                         sim_unit_to_cm, bbox=None,
+                         sim_time=0.0, periodicity=(True, True, True)):
+    r"""Load a hexahedral mesh of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow a semistructured grid of data to be loaded directly into
+    yt and analyzed as would any others.  This comes with several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+        * Particles may be difficult to integrate.
+
+    Particle fields are detected as one-dimensional fields. The number of particles
+    is set by the "number_of_particles" key in data.
+    
+    Parameters
+    ----------
+    data : dict
+        This is a dict of numpy arrays, where the keys are the field names.
+        There must only be one.
+    connectivity : array_like
+        This should be of size (N,8) where N is the number of zones.
+    coordinates : array_like
+        This should be of size (M,3) where M is the number of vertices
+        indicated in the connectivity matrix.
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    periodicity : tuple of booleans
+        Determines whether the data will be treated as periodic along
+        each axis
+
+    """
+
+    domain_dimensions = np.ones(3, "int32") * 2
+    nprocs = 1
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
+    sfh = StreamDictFieldHandler()
+    
+    particle_types = set_particle_types(data)
+    
+    sfh.update({'connectivity': connectivity,
+                'coordinates': coordinates,
+                0: data})
+    grid_left_edges = domain_left_edge
+    grid_right_edges = domain_right_edge
+    grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+
+    # I'm not sure we need any of this.
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        -np.ones(nprocs, dtype='int64'),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
+        np.zeros(nprocs).reshape((nprocs,1)),
+        sfh,
+        particle_types=particle_types,
+        periodicity=periodicity
+    )
+
+    handler.name = "HexahedralMeshData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamHexahedralStaticOutput(handler)
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    return spf
+

diff -r a8b4e6fd1a4925c5cb918f1e075204d1f89810d3 -r 60801522170fc78d8a33091ce427164a4aad48b5 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -228,3 +228,26 @@
                         ds = self.fields[g.mesh_id][fname]
                     ind += g.select(selector, ds, rv[field], ind) # caches
         return rv
+
+class IOHandlerStreamOctree(BaseIOHandler):
+    _data_style = "stream_octree"
+
+    def __init__(self, stream_handler):
+        self.fields = stream_handler.fields
+        BaseIOHandler.__init__(self)
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        rv = {}
+        ind = {}
+        chunks = list(chunks)
+        assert(len(chunks) == 1)
+        for field in fields:
+            rv[field] = np.empty(size, dtype="float64")
+            ind[field] = 0
+        for chunk in chunks:
+            for subset in chunk.objs:
+                for field in fields:
+                    content = self.fields[subset.domain_id].get(field, None)
+                    ind[field] += subset.select(selector, content, rv[field],
+                                                ind[field])
+        return tr


https://bitbucket.org/yt_analysis/yt-3.0/commits/1978931dd90b/
Changeset:   1978931dd90b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-03 04:22:43
Summary:     Add a load_octree function.
Affected #:  6 files

diff -r 60801522170fc78d8a33091ce427164a4aad48b5 -r 1978931dd90bc9c4e68cd211f12f35b8bca23b12 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -22,6 +22,7 @@
       load_amr_grids, \
       load_particles, \
       load_hexahedral_mesh, \
+      load_octree, \
       refine_amr
 
 from .fields import \

diff -r 60801522170fc78d8a33091ce427164a4aad48b5 -r 1978931dd90bc9c4e68cd211f12f35b8bca23b12 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -21,8 +21,14 @@
 from yt.utilities.io_handler import io_registry
 from yt.funcs import *
 from yt.config import ytcfg
+from yt.data_objects.data_containers import \
+    YTFieldData, \
+    YTDataContainer, \
+    YTSelectionContainer
 from yt.data_objects.grid_patch import \
     AMRGridPatch
+from yt.geometry.geometry_handler import \
+    YTDataChunk
 from yt.geometry.grid_geometry_handler import \
     GridGeometryHandler
 from yt.data_objects.octree_subset import \
@@ -31,6 +37,8 @@
     OctreeGeometryHandler
 from yt.geometry.particle_geometry_handler import \
     ParticleGeometryHandler
+from yt.geometry.oct_container import \
+    OctreeContainer
 from yt.geometry.unstructured_mesh_handler import \
            UnstructuredGeometryHandler
 from yt.data_objects.static_output import \
@@ -978,10 +986,11 @@
     return spf
 
 class StreamOctreeSubset(OctreeSubset):
-  domain_id = 1
-  _num_zones = 2
+    domain_id = 1
+    _domain_offset = 1
+    _num_zones = 2
 
-  def __init__(self, base_region, pf, oct_handler):
+    def __init__(self, base_region, pf, oct_handler):
         self.field_data = YTFieldData()
         self.field_parameters = {}
         self.pf = pf
@@ -994,11 +1003,27 @@
         self.base_region = base_region
         self.base_selector = base_region.selector
 
+    def fill(self, content, dest, selector, offset):
+        # Here we get a copy of the file, which we skip through and read the
+        # bits we want.
+        oct_handler = self.oct_handler
+        cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
+        levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
+            selector, self.domain_id, cell_count)
+        levels[:] = 0
+        dest.update((field, np.empty(cell_count, dtype="float64"))
+                    for field in content)
+        # Make references ...
+        count = oct_handler.fill_level(0, levels, cell_inds, file_inds, 
+                                       dest, content, offset)
+        return count
+
 class StreamOctreeHandler(OctreeGeometryHandler):
 
     def __init__(self, pf, data_style = None):
         self.stream_handler = pf.stream_handler
-        super(StreamParticleGeometryHandler, self).__init__(pf, data_style)
+        self.data_style = data_style
+        super(StreamOctreeHandler, self).__init__(pf, data_style)
 
     def _setup_data_io(self):
         if self.stream_handler.io is not None:
@@ -1007,24 +1032,17 @@
             self.io = io_registry[self.data_style](self.stream_handler)
 
     def _initialize_oct_handler(self):
-        self.oct_handler = None
-
-class StreamOctreeStaticOutput(StreamStaticOutput):
-    _hierarchy_class = StreamOctreeHandler
-    _fieldinfo_fallback = StreamFieldInfo
-    _fieldinfo_known = KnownStreamFields
-    _data_style = "stream_octree"
+        header = dict(dims = self.pf.domain_dimensions/2,
+                      left_edge = self.pf.domain_left_edge,
+                      right_edge = self.pf.domain_right_edge,
+                      octree = self.pf.octree_mask)
+        self.oct_handler = OctreeContainer.load_octree(header)
 
     def _identify_base_chunk(self, dobj):
         if getattr(dobj, "_chunk_info", None) is None:
-            data_files = getattr(dobj, "data_files", None)
-            if data_files is None:
-                data_files = [self.data_files[i] for i in
-                              self.regions.identify_data_files(dobj.selector)]
             base_region = getattr(dobj, "base_region", dobj)
-            oref = self.parameter_file.over_refine_factor
-            subset = [ParticleOctreeSubset(base_region, data_files, 
-                        self.parameter_file, over_refine_factor = oref)]
+            subset = [StreamOctreeSubset(base_region, self.parameter_file,
+                                         self.oct_handler)]
             dobj._chunk_info = subset
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
 
@@ -1051,28 +1069,38 @@
         for subset in oobjs:
             yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
 
-def load_octree(data, sim_unit_to_cm, bbox=None,
-                      sim_time=0.0, periodicity=(True, True, True),
-                      n_ref = 64, over_refine_factor = 1):
-    r"""Load a set of particles into yt as a
-    :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        super(StreamOctreeHandler, self)._setup_classes(dd)
 
-    This should allow a collection of particle data to be loaded directly into
-    yt and analyzed as would any others.  This comes with several caveats:
-        * Units will be incorrect unless the data has already been converted to
-          cgs.
-        * Some functions may behave oddly, and parallelism will be
-          disappointing or non-existent in most cases.
+    def _detect_fields(self):
+        self.field_list = list(set(self.stream_handler.get_fields()))
+
+class StreamOctreeStaticOutput(StreamStaticOutput):
+    _hierarchy_class = StreamOctreeHandler
+    _fieldinfo_fallback = StreamFieldInfo
+    _fieldinfo_known = KnownStreamFields
+    _data_style = "stream_octree"
+
+def load_octree(octree_mask, domain_dimensions, data, sim_unit_to_cm,
+                bbox=None, sim_time=0.0, periodicity=(True, True, True)):
+    r"""Load an octree mask into yt.
+
+    Octrees can be saved out by calling save_octree on an OctreeContainer.
+    This enables them to be loaded back in.
 
     This will initialize an Octree of data.  Note that fluid fields will not
     work yet, or possibly ever.
     
     Parameters
     ----------
+    octree_mask : np.ndarray[uint8_t]
+        This is a depth-first refinement mask for an Octree.
+    domain_dimensions : array_like
+        This is the domain dimensions of the grid
     data : dict
-        This is a dict of numpy arrays, where the keys are the field names.
-        Particles positions must be named "particle_position_x",
-        "particle_position_y", "particle_position_z".
+        A dictionary of 1D arrays.  Note that these must of the size of the
+        number of "False" values in the ``octree_mask``.
     sim_unit_to_cm : float
         Conversion factor from simulation units to centimeters
     bbox : array_like (xdim:zdim, LE:RE), optional
@@ -1082,23 +1110,10 @@
     periodicity : tuple of booleans
         Determines whether the data will be treated as periodic along
         each axis
-    n_ref : int
-        The number of particles that result in refining an oct used for
-        indexing the particles.
-
-    Examples
-    --------
-
-    >>> pos = [np.random.random(128*128*128) for i in range(3)]
-    >>> data = dict(particle_position_x = pos[0],
-    ...             particle_position_y = pos[1],
-    ...             particle_position_z = pos[2])
-    >>> bbox = np.array([[0., 1.0], [0.0, 1.0], [0.0, 1.0]])
-    >>> pf = load_particles(data, 3.08e24, bbox=bbox)
 
     """
 
-    domain_dimensions = np.ones(3, "int32") * 2
+    domain_dimensions = np.array(domain_dimensions)
     nprocs = 1
     if bbox is None:
         bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
@@ -1110,7 +1125,7 @@
     
     particle_types = set_particle_types(data)
     
-    sfh.update({'stream_file':data})
+    sfh.update({0:data})
     grid_left_edges = domain_left_edge
     grid_right_edges = domain_right_edge
     grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
@@ -1129,7 +1144,7 @@
         periodicity=periodicity
     )
 
-    handler.name = "ParticleData"
+    handler.name = "OctreeData"
     handler.domain_left_edge = domain_left_edge
     handler.domain_right_edge = domain_right_edge
     handler.refine_by = 2
@@ -1138,9 +1153,8 @@
     handler.simulation_time = sim_time
     handler.cosmology_simulation = 0
 
-    spf = StreamParticlesStaticOutput(handler)
-    spf.n_ref = n_ref
-    spf.over_refine_factor = over_refine_factor
+    spf = StreamOctreeStaticOutput(handler)
+    spf.octree_mask = octree_mask
     spf.units["cm"] = sim_unit_to_cm
     spf.units['1'] = 1.0
     spf.units["unitary"] = 1.0

diff -r 60801522170fc78d8a33091ce427164a4aad48b5 -r 1978931dd90bc9c4e68cd211f12f35b8bca23b12 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -238,16 +238,13 @@
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}
-        ind = {}
+        ind = 0
         chunks = list(chunks)
         assert(len(chunks) == 1)
-        for field in fields:
-            rv[field] = np.empty(size, dtype="float64")
-            ind[field] = 0
         for chunk in chunks:
+            assert(len(chunk.objs) == 1)
             for subset in chunk.objs:
-                for field in fields:
-                    content = self.fields[subset.domain_id].get(field, None)
-                    ind[field] += subset.select(selector, content, rv[field],
-                                                ind[field])
-        return tr
+                field_vals = self.fields[subset.domain_id -
+                                    subset._domain_offset]
+                subset.fill(field_vals, rv, selector, ind)
+        return rv

diff -r 60801522170fc78d8a33091ce427164a4aad48b5 -r 1978931dd90bc9c4e68cd211f12f35b8bca23b12 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -135,10 +135,12 @@
         # Pos is the center of the octs
         cdef OctAllocationContainer *cur = obj.domains[0]
         cdef Oct *o
-        cdef void *p[3]
+        cdef void *p[4]
+        cdef np.int64_t nfinest = 0
         p[0] = ref_mask.data
         p[1] = <void *> cur.my_octs
         p[2] = <void *> &cur.n_assigned
+        p[3] = <void *> &nfinest
         data.array = p
         pos[0] = obj.DLE[0] + dds[0]/2.0
         for i in range(obj.nn[0]):
@@ -715,19 +717,21 @@
                    np.ndarray[np.uint8_t, ndim=1] levels,
                    np.ndarray[np.uint8_t, ndim=1] cell_inds,
                    np.ndarray[np.int64_t, ndim=1] file_inds,
-                   dest_fields, source_fields):
+                   dest_fields, source_fields,
+                   np.int64_t offset = 0):
         cdef np.ndarray[np.float64_t, ndim=2] source
         cdef np.ndarray[np.float64_t, ndim=1] dest
         cdef int n
         cdef int i, di
-        cdef int local_pos, local_filled
+        cdef np.int64_t local_pos, local_filled = 0
         cdef np.float64_t val
         for key in dest_fields:
             dest = dest_fields[key]
             source = source_fields[key]
             for i in range(levels.shape[0]):
                 if levels[i] != level: continue
-                dest[i] = source[file_inds[i], cell_inds[i]]
+                dest[i + offset] = source[file_inds[i], cell_inds[i]]
+                local_filled += 1
 
     def finalize(self):
         cdef SelectorObject selector = selection_routines.AlwaysSelector(None)

diff -r 60801522170fc78d8a33091ce427164a4aad48b5 -r 1978931dd90bc9c4e68cd211f12f35b8bca23b12 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -191,18 +191,22 @@
     cdef np.uint8_t *arr = <np.uint8_t *> p[0]
     cdef Oct* octs = <Oct*> p[1]
     cdef np.int64_t *nocts = <np.int64_t*> p[2]
+    cdef np.int64_t *nfinest = <np.int64_t*> p[3]
     cdef int i
    
     if data.last != o.domain_ind:
         data.last = o.domain_ind
         if arr[data.index] == 0:
             o.children = NULL
+            o.file_ind = nfinest[0]
+            o.domain = 1
+            nfinest[0] += 1
         if arr[data.index] == 1:
             o.children = <Oct **> malloc(sizeof(Oct *) * 8)
             for i in range(8):
                 o.children[i] = &octs[nocts[0]]
-                o.children[i].file_ind = nocts[0]
                 o.children[i].domain_ind = nocts[0]
-                o.children[i].domain = 1
+                o.children[i].file_ind = -1
+                o.children[i].domain = -1
                 nocts[0] += 1
         data.index += 1

diff -r 60801522170fc78d8a33091ce427164a4aad48b5 -r 1978931dd90bc9c4e68cd211f12f35b8bca23b12 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -114,7 +114,7 @@
 from yt.frontends.stream.api import \
     StreamStaticOutput, StreamFieldInfo, add_stream_field, \
     StreamHandler, load_uniform_grid, load_amr_grids, \
-    load_particles, load_hexahedral_mesh
+    load_particles, load_hexahedral_mesh, load_octree
 
 from yt.frontends.sph.api import \
     OWLSStaticOutput, OWLSFieldInfo, add_owls_field, \


https://bitbucket.org/yt_analysis/yt-3.0/commits/73a7cf95d67d/
Changeset:   73a7cf95d67d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-04 02:05:48
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #109)

Add a load_octree command
Affected #:  9 files

diff -r 92d56a1fd5dec9a365da0b0cd0d3f4480f3a7d55 -r 73a7cf95d67d589de7b672580bd98852a2f005e5 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -22,6 +22,7 @@
       load_amr_grids, \
       load_particles, \
       load_hexahedral_mesh, \
+      load_octree, \
       refine_amr
 
 from .fields import \

diff -r 92d56a1fd5dec9a365da0b0cd0d3f4480f3a7d55 -r 73a7cf95d67d589de7b672580bd98852a2f005e5 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -21,12 +21,24 @@
 from yt.utilities.io_handler import io_registry
 from yt.funcs import *
 from yt.config import ytcfg
+from yt.data_objects.data_containers import \
+    YTFieldData, \
+    YTDataContainer, \
+    YTSelectionContainer
 from yt.data_objects.grid_patch import \
     AMRGridPatch
+from yt.geometry.geometry_handler import \
+    YTDataChunk
 from yt.geometry.grid_geometry_handler import \
     GridGeometryHandler
+from yt.data_objects.octree_subset import \
+    OctreeSubset
+from yt.geometry.oct_geometry_handler import \
+    OctreeGeometryHandler
 from yt.geometry.particle_geometry_handler import \
     ParticleGeometryHandler
+from yt.geometry.oct_container import \
+    OctreeContainer
 from yt.geometry.unstructured_mesh_handler import \
            UnstructuredGeometryHandler
 from yt.data_objects.static_output import \
@@ -973,3 +985,321 @@
 
     return spf
 
+class StreamOctreeSubset(OctreeSubset):
+    domain_id = 1
+    _domain_offset = 1
+    _num_zones = 2
+
+    def __init__(self, base_region, pf, oct_handler):
+        self.field_data = YTFieldData()
+        self.field_parameters = {}
+        self.pf = pf
+        self.hierarchy = self.pf.hierarchy
+        self.oct_handler = oct_handler
+        self._last_mask = None
+        self._last_selector_id = None
+        self._current_particle_type = 'all'
+        self._current_fluid_type = self.pf.default_fluid_type
+        self.base_region = base_region
+        self.base_selector = base_region.selector
+
+    def fill(self, content, dest, selector, offset):
+        # Here we get a copy of the file, which we skip through and read the
+        # bits we want.
+        oct_handler = self.oct_handler
+        cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
+        levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
+            selector, self.domain_id, cell_count)
+        levels[:] = 0
+        dest.update((field, np.empty(cell_count, dtype="float64"))
+                    for field in content)
+        # Make references ...
+        count = oct_handler.fill_level(0, levels, cell_inds, file_inds, 
+                                       dest, content, offset)
+        return count
+
+class StreamOctreeHandler(OctreeGeometryHandler):
+
+    def __init__(self, pf, data_style = None):
+        self.stream_handler = pf.stream_handler
+        self.data_style = data_style
+        super(StreamOctreeHandler, self).__init__(pf, data_style)
+
+    def _setup_data_io(self):
+        if self.stream_handler.io is not None:
+            self.io = self.stream_handler.io
+        else:
+            self.io = io_registry[self.data_style](self.stream_handler)
+
+    def _initialize_oct_handler(self):
+        header = dict(dims = self.pf.domain_dimensions/2,
+                      left_edge = self.pf.domain_left_edge,
+                      right_edge = self.pf.domain_right_edge,
+                      octree = self.pf.octree_mask)
+        self.oct_handler = OctreeContainer.load_octree(header)
+
+    def _identify_base_chunk(self, dobj):
+        if getattr(dobj, "_chunk_info", None) is None:
+            base_region = getattr(dobj, "base_region", dobj)
+            subset = [StreamOctreeSubset(base_region, self.parameter_file,
+                                         self.oct_handler)]
+            dobj._chunk_info = subset
+        dobj._current_chunk = list(self._chunk_all(dobj))[0]
+
+    def _chunk_all(self, dobj):
+        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        yield YTDataChunk(dobj, "all", oobjs, None)
+
+    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
+        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        # We actually do not really use the data files except as input to the
+        # ParticleOctreeSubset.
+        # This is where we will perform cutting of the Octree and
+        # load-balancing.  That may require a specialized selector object to
+        # cut based on some space-filling curve index.
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            yield YTDataChunk(dobj, "spatial", [g])
+
+    def _chunk_io(self, dobj, cache = True):
+        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for subset in oobjs:
+            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
+
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        super(StreamOctreeHandler, self)._setup_classes(dd)
+
+    def _detect_fields(self):
+        self.field_list = list(set(self.stream_handler.get_fields()))
+
+class StreamOctreeStaticOutput(StreamStaticOutput):
+    _hierarchy_class = StreamOctreeHandler
+    _fieldinfo_fallback = StreamFieldInfo
+    _fieldinfo_known = KnownStreamFields
+    _data_style = "stream_octree"
+
+def load_octree(octree_mask, domain_dimensions, data, sim_unit_to_cm,
+                bbox=None, sim_time=0.0, periodicity=(True, True, True)):
+    r"""Load an octree mask into yt.
+
+    Octrees can be saved out by calling save_octree on an OctreeContainer.
+    This enables them to be loaded back in.
+
+    This will initialize an Octree of data.  Note that fluid fields will not
+    work yet, or possibly ever.
+    
+    Parameters
+    ----------
+    octree_mask : np.ndarray[uint8_t]
+        This is a depth-first refinement mask for an Octree.
+    domain_dimensions : array_like
+        This is the domain dimensions of the grid
+    data : dict
+        A dictionary of 1D arrays.  Note that these must of the size of the
+        number of "False" values in the ``octree_mask``.
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    periodicity : tuple of booleans
+        Determines whether the data will be treated as periodic along
+        each axis
+
+    """
+
+    domain_dimensions = np.array(domain_dimensions)
+    nprocs = 1
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
+    sfh = StreamDictFieldHandler()
+    
+    particle_types = set_particle_types(data)
+    
+    sfh.update({0:data})
+    grid_left_edges = domain_left_edge
+    grid_right_edges = domain_right_edge
+    grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+
+    # I'm not sure we need any of this.
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        -np.ones(nprocs, dtype='int64'),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
+        np.zeros(nprocs).reshape((nprocs,1)),
+        sfh,
+        particle_types=particle_types,
+        periodicity=periodicity
+    )
+
+    handler.name = "OctreeData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamOctreeStaticOutput(handler)
+    spf.octree_mask = octree_mask
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    return spf
+
+def hexahedral_connectivity(xgrid, ygrid, zgrid):
+    nx = len(xgrid)
+    ny = len(ygrid)
+    nz = len(zgrid)
+    coords = np.fromiter(chain.from_iterable(product(xgrid, ygrid, zgrid)),
+                    dtype=np.float64, count = nx*ny*nz*3)
+    coords.shape = (nx*ny*nz, 3)
+    cis = np.fromiter(chain.from_iterable(product([0,1], [0,1], [0,1])),
+                    dtype=np.int64, count = 8*3)
+    cis.shape = (8, 3)
+    cycle = np.fromiter(chain.from_iterable(product(*map(range, (nx-1, ny-1, nz-1)))),
+                    dtype=np.int64, count = (nx-1)*(ny-1)*(nz-1)*3)
+    cycle.shape = ((nx-1)*(ny-1)*(nz-1), 3)
+    off = cis + cycle[:, np.newaxis]
+    connectivity = ((off[:,:,0] * ny) + off[:,:,1]) * nz + off[:,:,2]
+    return coords, connectivity
+
+class StreamHexahedralMesh(SemiStructuredMesh):
+    _connectivity_length = 8
+    _index_offset = 0
+
+class StreamHexahedralHierarchy(UnstructuredGeometryHandler):
+
+    def __init__(self, pf, data_style = None):
+        self.stream_handler = pf.stream_handler
+        super(StreamHexahedralHierarchy, self).__init__(pf, data_style)
+
+    def _initialize_mesh(self):
+        coords = self.stream_handler.fields.pop('coordinates')
+        connec = self.stream_handler.fields.pop('connectivity')
+        self.meshes = [StreamHexahedralMesh(0,
+          self.hierarchy_filename, connec, coords, self)]
+
+    def _setup_data_io(self):
+        if self.stream_handler.io is not None:
+            self.io = self.stream_handler.io
+        else:
+            self.io = io_registry[self.data_style](self.stream_handler)
+
+    def _detect_fields(self):
+        self.field_list = list(set(self.stream_handler.get_fields()))
+
+class StreamHexahedralStaticOutput(StreamStaticOutput):
+    _hierarchy_class = StreamHexahedralHierarchy
+    _fieldinfo_fallback = StreamFieldInfo
+    _fieldinfo_known = KnownStreamFields
+    _data_style = "stream_hexahedral"
+
+def load_hexahedral_mesh(data, connectivity, coordinates,
+                         sim_unit_to_cm, bbox=None,
+                         sim_time=0.0, periodicity=(True, True, True)):
+    r"""Load a hexahedral mesh of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow a semistructured grid of data to be loaded directly into
+    yt and analyzed as would any others.  This comes with several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+        * Particles may be difficult to integrate.
+
+    Particle fields are detected as one-dimensional fields. The number of particles
+    is set by the "number_of_particles" key in data.
+    
+    Parameters
+    ----------
+    data : dict
+        This is a dict of numpy arrays, where the keys are the field names.
+        There must only be one.
+    connectivity : array_like
+        This should be of size (N,8) where N is the number of zones.
+    coordinates : array_like
+        This should be of size (M,3) where M is the number of vertices
+        indicated in the connectivity matrix.
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    periodicity : tuple of booleans
+        Determines whether the data will be treated as periodic along
+        each axis
+
+    """
+
+    domain_dimensions = np.ones(3, "int32") * 2
+    nprocs = 1
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
+    sfh = StreamDictFieldHandler()
+    
+    particle_types = set_particle_types(data)
+    
+    sfh.update({'connectivity': connectivity,
+                'coordinates': coordinates,
+                0: data})
+    grid_left_edges = domain_left_edge
+    grid_right_edges = domain_right_edge
+    grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+
+    # I'm not sure we need any of this.
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        -np.ones(nprocs, dtype='int64'),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
+        np.zeros(nprocs).reshape((nprocs,1)),
+        sfh,
+        particle_types=particle_types,
+        periodicity=periodicity
+    )
+
+    handler.name = "HexahedralMeshData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamHexahedralStaticOutput(handler)
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    return spf
+

diff -r 92d56a1fd5dec9a365da0b0cd0d3f4480f3a7d55 -r 73a7cf95d67d589de7b672580bd98852a2f005e5 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -228,3 +228,23 @@
                         ds = self.fields[g.mesh_id][fname]
                     ind += g.select(selector, ds, rv[field], ind) # caches
         return rv
+
+class IOHandlerStreamOctree(BaseIOHandler):
+    _data_style = "stream_octree"
+
+    def __init__(self, stream_handler):
+        self.fields = stream_handler.fields
+        BaseIOHandler.__init__(self)
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        rv = {}
+        ind = 0
+        chunks = list(chunks)
+        assert(len(chunks) == 1)
+        for chunk in chunks:
+            assert(len(chunk.objs) == 1)
+            for subset in chunk.objs:
+                field_vals = self.fields[subset.domain_id -
+                                    subset._domain_offset]
+                subset.fill(field_vals, rv, selector, ind)
+        return rv

diff -r 92d56a1fd5dec9a365da0b0cd0d3f4480f3a7d55 -r 73a7cf95d67d589de7b672580bd98852a2f005e5 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -74,7 +74,8 @@
     cdef np.int64_t get_domain_offset(self, int domain_id)
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
-                        OctVisitorData *data)
+                        OctVisitorData *data,
+                        int vc = ?)
     cdef Oct *next_root(self, int domain_id, int ind[3])
     cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent)
     cdef void setup_data(self, OctVisitorData *data, int domain_id = ?)

diff -r 92d56a1fd5dec9a365da0b0cd0d3f4480f3a7d55 -r 73a7cf95d67d589de7b672580bd98852a2f005e5 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -115,6 +115,60 @@
                 for k in range(self.nn[2]):
                     self.root_mesh[i][j][k] = NULL
 
+    @classmethod
+    def load_octree(cls, header):
+        cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
+        ref_mask = header['octree']
+        cdef OctreeContainer obj = cls(header['dims'], header['left_edge'], header['right_edge'])
+        # NOTE: We do not allow domain/file indices to be specified.
+        cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
+        cdef OctVisitorData data
+        obj.setup_data(&data, -1)
+        obj.allocate_domains([ref_mask.shape[0]])
+        cdef int i, j, k, n
+        data.global_index = -1
+        data.level = 0
+        cdef np.float64_t pos[3], dds[3]
+        # This dds is the oct-width
+        for i in range(3):
+            dds[i] = (obj.DRE[i] - obj.DLE[i]) / obj.nn[i]
+        # Pos is the center of the octs
+        cdef OctAllocationContainer *cur = obj.domains[0]
+        cdef Oct *o
+        cdef void *p[4]
+        cdef np.int64_t nfinest = 0
+        p[0] = ref_mask.data
+        p[1] = <void *> cur.my_octs
+        p[2] = <void *> &cur.n_assigned
+        p[3] = <void *> &nfinest
+        data.array = p
+        pos[0] = obj.DLE[0] + dds[0]/2.0
+        for i in range(obj.nn[0]):
+            pos[1] = obj.DLE[1] + dds[1]/2.0
+            for j in range(obj.nn[1]):
+                pos[2] = obj.DLE[2] + dds[2]/2.0
+                for k in range(obj.nn[2]):
+                    if obj.root_mesh[i][j][k] != NULL:
+                        raise RuntimeError
+                    o = &cur.my_octs[cur.n_assigned]
+                    o.domain_ind = o.file_ind = 0
+                    o.domain = 1
+                    obj.root_mesh[i][j][k] = o
+                    cur.n_assigned += 1
+                    data.pos[0] = i
+                    data.pos[1] = j
+                    data.pos[2] = k
+                    selector.recursively_visit_octs(
+                        obj.root_mesh[i][j][k],
+                        pos, dds, 0, oct_visitors.load_octree,
+                        &data, 1)
+                    pos[2] += dds[2]
+                pos[1] += dds[1]
+            pos[0] += dds[0]
+        obj.nocts = cur.n_assigned
+        assert(obj.nocts == ref_mask.size)
+        return obj
+
     cdef void setup_data(self, OctVisitorData *data, int domain_id = -1):
         cdef int i
         data.index = 0
@@ -157,9 +211,11 @@
     @cython.cdivision(True)
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
-                        OctVisitorData *data):
-        cdef int i, j, k, n, vc
-        vc = self.partial_coverage
+                        OctVisitorData *data,
+                        int vc = -1):
+        cdef int i, j, k, n
+        if vc == -1:
+            vc = self.partial_coverage
         data.global_index = -1
         data.level = 0
         cdef np.float64_t pos[3], dds[3]
@@ -431,6 +487,24 @@
             coords[:,i] += self.DLE[i]
         return coords
 
+    def save_octree(self):
+        # Get the header
+        header = dict(dims = (self.nn[0], self.nn[1], self.nn[2]),
+                      left_edge = (self.DLE[0], self.DLE[1], self.DLE[2]),
+                      right_edge = (self.DRE[0], self.DRE[1], self.DRE[2]))
+        if self.partial_coverage == 1:
+            raise NotImplementedError
+        cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
+        # domain_id = -1 here, because we want *every* oct
+        cdef OctVisitorData data
+        self.setup_data(&data, -1)
+        cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
+        ref_mask = np.zeros(self.nocts, dtype="uint8") - 1
+        data.array = <void *> ref_mask.data
+        self.visit_all_octs(selector, oct_visitors.store_octree, &data, 1)
+        header['octree'] = ref_mask
+        return header
+
     def selector_fill(self, SelectorObject selector,
                       np.ndarray source,
                       np.ndarray dest = None,
@@ -647,19 +721,21 @@
                    np.ndarray[np.uint8_t, ndim=1] levels,
                    np.ndarray[np.uint8_t, ndim=1] cell_inds,
                    np.ndarray[np.int64_t, ndim=1] file_inds,
-                   dest_fields, source_fields):
+                   dest_fields, source_fields,
+                   np.int64_t offset = 0):
         cdef np.ndarray[np.float64_t, ndim=2] source
         cdef np.ndarray[np.float64_t, ndim=1] dest
         cdef int n
         cdef int i, di
-        cdef int local_pos, local_filled
+        cdef np.int64_t local_pos, local_filled = 0
         cdef np.float64_t val
         for key in dest_fields:
             dest = dest_fields[key]
             source = source_fields[key]
             for i in range(levels.shape[0]):
                 if levels[i] != level: continue
-                dest[i] = source[file_inds[i], cell_inds[i]]
+                dest[i + offset] = source[file_inds[i], cell_inds[i]]
+                local_filled += 1
 
     def finalize(self):
         cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
@@ -702,6 +778,13 @@
             self.DRE[i] = domain_right_edge[i] #num_grid
         self.fill_func = oct_visitors.fill_file_indices_rind
 
+    @classmethod
+    def load_octree(self, header):
+        raise NotImplementedError
+
+    def save_octree(self):
+        raise NotImplementedError
+
     cdef int get_root(self, int ind[3], Oct **o):
         o[0] = NULL
         cdef int i
@@ -738,12 +821,14 @@
     @cython.cdivision(True)
     cdef void visit_all_octs(self, SelectorObject selector,
                         oct_visitor_function *func,
-                        OctVisitorData *data):
-        cdef int i, j, k, n, vc
+                        OctVisitorData *data,
+                        int vc = -1):
+        cdef int i, j, k, n
         cdef np.int64_t key, ukey
         data.global_index = -1
         data.level = 0
-        vc = self.partial_coverage
+        if vc == -1:
+            vc = self.partial_coverage
         cdef np.float64_t pos[3], dds[3]
         # This dds is the oct-width
         for i in range(3):

diff -r 92d56a1fd5dec9a365da0b0cd0d3f4480f3a7d55 -r 73a7cf95d67d589de7b672580bd98852a2f005e5 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -38,7 +38,6 @@
                    # To calculate nzones, 1 << (oref * 3)
     np.int32_t nz
                             
-
 ctypedef void oct_visitor_function(Oct *, OctVisitorData *visitor,
                                    np.uint8_t selected)
 
@@ -58,6 +57,8 @@
 cdef oct_visitor_function fill_file_indices_oind
 cdef oct_visitor_function fill_file_indices_rind
 cdef oct_visitor_function count_by_domain
+cdef oct_visitor_function store_octree
+cdef oct_visitor_function load_octree
 
 cdef inline int cind(int i, int j, int k):
     # THIS ONLY WORKS FOR CHILDREN.  It is not general for zones.

diff -r 92d56a1fd5dec9a365da0b0cd0d3f4480f3a7d55 -r 73a7cf95d67d589de7b672580bd98852a2f005e5 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -18,6 +18,7 @@
 cimport numpy
 import numpy
 from fp_utils cimport *
+from libc.stdlib cimport malloc, free
 
 # Now some visitor functions
 
@@ -173,3 +174,39 @@
     # NOTE: We do this for every *cell*.
     arr = <np.int64_t *> data.array
     arr[o.domain - 1] += 1
+
+cdef void store_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    cdef np.uint8_t *arr
+    if data.last != o.domain_ind:
+        data.last = o.domain_ind
+        arr = <np.uint8_t *> data.array
+        if o.children == NULL:
+            arr[data.index] = 0
+        if o.children != NULL:
+            arr[data.index] = 1
+        data.index += 1
+
+cdef void load_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
+    cdef void **p = <void **> data.array
+    cdef np.uint8_t *arr = <np.uint8_t *> p[0]
+    cdef Oct* octs = <Oct*> p[1]
+    cdef np.int64_t *nocts = <np.int64_t*> p[2]
+    cdef np.int64_t *nfinest = <np.int64_t*> p[3]
+    cdef int i
+   
+    if data.last != o.domain_ind:
+        data.last = o.domain_ind
+        if arr[data.index] == 0:
+            o.children = NULL
+            o.file_ind = nfinest[0]
+            o.domain = 1
+            nfinest[0] += 1
+        if arr[data.index] == 1:
+            o.children = <Oct **> malloc(sizeof(Oct *) * 8)
+            for i in range(8):
+                o.children[i] = &octs[nocts[0]]
+                o.children[i].domain_ind = nocts[0]
+                o.children[i].file_ind = -1
+                o.children[i].domain = -1
+                nocts[0] += 1
+        data.index += 1

diff -r 92d56a1fd5dec9a365da0b0cd0d3f4480f3a7d55 -r 73a7cf95d67d589de7b672580bd98852a2f005e5 yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -1,12 +1,14 @@
 from yt.testing import *
 import numpy as np
+from yt.geometry.oct_container import \
+    OctreeContainer
 from yt.geometry.particle_oct_container import \
     ParticleOctreeContainer, \
     ParticleRegions
 from yt.geometry.oct_container import _ORDER_MAX
 from yt.utilities.lib.geometry_utils import get_morton_indices
 from yt.frontends.stream.api import load_particles
-from yt.geometry.selection_routines import RegionSelector
+from yt.geometry.selection_routines import RegionSelector, AlwaysSelector
 import yt.data_objects.api
 import time, os
 
@@ -42,6 +44,34 @@
         #    level_count += octree.count_levels(total_count.size-1, dom, mask)
         yield assert_equal, total_count, [1, 8, 64, 64, 256, 536, 1856, 1672]
 
+def test_save_load_octree():
+    np.random.seed(int(0x4d3d3d3))
+    pos = np.random.normal(0.5, scale=0.05, size=(NPART,3)) * (DRE-DLE) + DLE
+    octree = ParticleOctreeContainer((1, 1, 1), DLE, DRE)
+    octree.n_ref = 32
+    for i in range(3):
+        np.clip(pos[:,i], DLE[i], DRE[i], pos[:,i])
+    # Convert to integers
+    pos = np.floor((pos - DLE)/dx).astype("uint64")
+    morton = get_morton_indices(pos)
+    morton.sort()
+    octree.add(morton)
+    octree.finalize()
+    saved = octree.save_octree()
+    loaded = OctreeContainer.load_octree(saved)
+    always = AlwaysSelector(None)
+    ir1 = octree.ires(always)
+    ir2 = loaded.ires(always)
+    yield assert_equal, ir1, ir2
+
+    fc1 = octree.fcoords(always)
+    fc2 = loaded.fcoords(always)
+    yield assert_equal, fc1, fc2
+
+    fw1 = octree.fwidth(always)
+    fw2 = loaded.fwidth(always)
+    yield assert_equal, fw1, fw2
+
 def test_particle_octree_counts():
     np.random.seed(int(0x4d3d3d3))
     # Eight times as many!

diff -r 92d56a1fd5dec9a365da0b0cd0d3f4480f3a7d55 -r 73a7cf95d67d589de7b672580bd98852a2f005e5 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -114,7 +114,7 @@
 from yt.frontends.stream.api import \
     StreamStaticOutput, StreamFieldInfo, add_stream_field, \
     StreamHandler, load_uniform_grid, load_amr_grids, \
-    load_particles, load_hexahedral_mesh
+    load_particles, load_hexahedral_mesh, load_octree
 
 from yt.frontends.sph.api import \
     OWLSStaticOutput, OWLSFieldInfo, add_owls_field, \

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list