[yt-svn] commit/yt: MatthewTurk: Merged in MatthewTurk/yt/yt-3.0 (pull request #854)

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Sat May 3 07:59:19 PDT 2014


1 new commit in yt:

https://bitbucket.org/yt_analysis/yt/commits/9a0500caa575/
Changeset:   9a0500caa575
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-03 16:59:11
Summary:     Merged in MatthewTurk/yt/yt-3.0 (pull request #854)

SDF Frontend
Affected #:  18 files

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -6,8 +6,6 @@
 from libc.stdlib cimport malloc, free
 import sys
 
-
-
 # Importing relevant rockstar data types particle, fof halo, halo
 
 cdef import from "particle.h":
@@ -15,6 +13,10 @@
         np.int64_t id
         float pos[6]
 
+cdef import from "rockstar.h":
+    particle *global_particles "p"
+    void rockstar_cleanup()
+
 cdef import from "fof.h":
     struct fof:
         np.int64_t num_p
@@ -23,13 +25,34 @@
 cdef import from "halo.h":
     struct halo:
         np.int64_t id
-        float pos[6], corevel[3], bulkvel[3]
+        float pos[6]
+        float corevel[3]
+        float bulkvel[3]
         float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
-        float J[3], energy, spin, alt_m[4], Xoff, Voff, b_to_a, c_to_a, A[3]
+        float J[3]
+        float energy, spin
+        float alt_m[4]
+        float Xoff, Voff, b_to_a, c_to_a
+        float A[3]
         float bullock_spin, kin_to_pot
         np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
         float min_pos_err, min_vel_err, min_bulkvel_err
 
+ctypedef packed struct haloflat:
+    np.int64_t id
+    float pos_x, pos_y, pos_z, pos_v, pos_u, pos_w
+    float corevel_x, corevel_y, corevel_z
+    float bulkvel_x, bulkvel_y, bulkvel_z
+    float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
+    float J1, J2, J3
+    float energy, spin
+    float alt_m1, alt_m2, alt_m3, alt_m4
+    float Xoff, Voff, b_to_a, c_to_a
+    float A1, A2, A3
+    float bullock_spin, kin_to_pot
+    np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
+    float min_pos_err, min_vel_err, min_bulkvel_err
+
 # For finding sub halos import finder function and global variable
 # rockstar uses to store the results
 
@@ -38,6 +61,9 @@
     halo *halos
     np.int64_t num_halos
     void calc_mass_definition() nogil
+    void free_particle_copies() nogil
+    void alloc_particle_copies(np.int64_t total_copies) nogil
+    void free_halos() nogil
 
 # For outputing halos, rockstar style
 
@@ -48,6 +74,7 @@
 
 cdef import from "config.h":
     void setup_config() nogil
+    void output_config(char *fn) nogil
 
 cdef import from "config_vars.h":
     # Rockstar cleverly puts all of the config variables inside a templated
@@ -197,45 +224,87 @@
     def output_halos(self):
         output_halos(0, 0, 0, NULL) 
 
+    def return_halos(self):
+        cdef haloflat[:] haloview = <haloflat[:num_halos]> (<haloflat*> halos)
+        rv = np.asarray(haloview).copy()
+        rockstar_cleanup()
+        free_halos()
+        return rv
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    def make_rockstar_fof(self, np.ndarray[np.int64_t, ndim=1] pid,
+    def make_rockstar_fof(self, np.ndarray[np.int64_t, ndim=1] pind,
+                                np.ndarray[np.int64_t, ndim=1] fof_tags,
                                 np.ndarray[np.float64_t, ndim=2] pos,
-                                np.ndarray[np.float64_t, ndim=2] vel,
-                                np.ndarray[np.int64_t, ndim=1] fof_tags,
-                                np.int64_t nfof,
-                                np.int64_t npart_max):
+                                np.ndarray[np.float64_t, ndim=2] vel):
 
         # Define fof object
 
         # Find number of particles
-        cdef np.int64_t i, j
-        cdef np.int64_t num_particles = pid.shape[0]
+        cdef np.int64_t i, j, k, ind, offset
+        cdef np.int64_t num_particles = pind.shape[0]
+        global global_particles
 
         # Allocate space for correct number of particles
-        cdef particle* particles = <particle*> malloc(npart_max * sizeof(particle))
         cdef fof fof_obj
-        fof_obj.particles = particles
 
-        cdef np.int64_t last_fof_tag = 1
-        cdef np.int64_t k = 0
-        for i in range(num_particles):
-            if fof_tags[i] < 0:
+        cdef np.int64_t max_count = 0
+        cdef np.int64_t next_tag, local_tag, last_fof_tag = -1
+        fof_obj.num_p = 0
+        j = 0
+        # We're going to do one iteration to get the most frequent value.
+        for i in range(pind.shape[0]):
+            ind = pind[i]
+            local_tag = fof_tags[ind]
+            # Don't count the null group
+            if local_tag == -1: continue
+            if local_tag != last_fof_tag:
+                if j > max_count:
+                    max_count = j
+                last_fof_tag = local_tag
+                j = 1
+            else:
+                j += 1
+        if j > max_count:
+            max_count = j
+        #print >> sys.stderr, "Most frequent occurrance: %s" % max_count
+        fof_obj.particles = <particle*> malloc(max_count * sizeof(particle))
+        j = 0
+        cdef int counter = 0, ndone = 0
+        cdef np.ndarray[np.int64_t, ndim=1] pcounts 
+        pcounts = np.zeros(np.unique(fof_tags).size, dtype="int64")
+        cdef np.int64_t frac = <np.int64_t> (pcounts.shape[0] / 20.0)
+        free_halos()
+        for i in range(pind.shape[0]):
+            ind = pind[i]
+            local_tag = fof_tags[ind]
+            # Skip this one -- it means no group.
+            if local_tag == -1:
                 continue
-            if fof_tags[i] != last_fof_tag:
-                last_fof_tag = fof_tags[i]
-                if k > 16:
-                    fof_obj.num_p = k
-                    find_subs(&fof_obj)
-                k = 0
-            particles[k].id = pid[i]
-
-            # fill in locations & velocities
-            for j in range(3):
-                particles[k].pos[j] = pos[i,j]
-                particles[k].pos[j+3] = vel[i,j]
-            k += 1
-        free(particles)
-
-
-
+            if i == pind.shape[0] - 1:
+                next_tag = local_tag + 1
+            else:
+                next_tag = fof_tags[pind[i+1]]
+            for k in range(3):
+                fof_obj.particles[j].pos[k] = pos[ind,k]
+                fof_obj.particles[j].pos[k+3] = vel[ind,k]
+            fof_obj.particles[j].id = j
+            fof_obj.num_p += 1
+            j += 1
+            # Now we check if we're the last one
+            if local_tag != next_tag:
+                pcounts[ndone] = fof_obj.num_p
+                counter += 1
+                ndone += 1
+                if counter == frac:
+                    print >> sys.stderr, "R*-ing % 5.1f%% done (%0.3f -> %0.3f)" % (
+                        (100.0 * ndone)/pcounts.size,
+                        fof_obj.particles[0].pos[2],
+                        halos[num_halos - 1].pos[2])
+                    counter = 0
+                global_particles = &fof_obj.particles[0]
+                find_subs(&fof_obj)
+                # Now we reset
+                fof_obj.num_p = j = 0
+        free(fof_obj.particles)
+        return pcounts

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -204,7 +204,7 @@
     p[0] = <particle *> malloc(sizeof(particle) * local_parts)
 
     conv[0] = conv[1] = conv[2] = pf.length_unit.in_units("Mpccm/h")
-    conv[3] = conv[4] = conv[5] = 1e-5
+    conv[3] = conv[4] = conv[5] = pf.velocity_unit.in_units("km/s")
     left_edge[0] = pf.domain_left_edge[0]
     left_edge[1] = pf.domain_left_edge[1]
     left_edge[2] = pf.domain_left_edge[2]

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/analysis_modules/halo_finding/rockstar/setup.py
--- a/yt/analysis_modules/halo_finding/rockstar/setup.py
+++ b/yt/analysis_modules/halo_finding/rockstar/setup.py
@@ -21,6 +21,8 @@
                          "yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx",
                          library_dirs=[rd],
                          libraries=["rockstar"],
+                         #define_macros = [("THREADSAFE", "__thread")],
+                         define_macros = [("THREADSAFE", "")],
                          include_dirs=[rd,
                                        os.path.join(rd, "io"),
                                        os.path.join(rd, "util")])
@@ -28,6 +30,8 @@
                          "yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx",
                          library_dirs=[rd],
                          libraries=["rockstar"],
+                         #define_macros = [("THREADSAFE", "__thread")],
+                         define_macros = [("THREADSAFE", "")],
                          include_dirs=[rd,
                                        os.path.join(rd, "io"),
                                        os.path.join(rd, "util")])

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -29,6 +29,7 @@
     'moab',
     #'pluto',
     'ramses',
+    'sdf',
     'sph',
     'stream',
 ]

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/frontends/sdf/__init__.py
--- /dev/null
+++ b/yt/frontends/sdf/__init__.py
@@ -0,0 +1,15 @@
+"""
+__init__ for yt.frontends.sdf
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/frontends/sdf/api.py
--- /dev/null
+++ b/yt/frontends/sdf/api.py
@@ -0,0 +1,24 @@
+"""
+API for yt.frontends.sdf
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+      SDFDataset
+
+from .io import \
+      IOHandlerSDF
+
+from .fields import \
+      SDFFieldInfo

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/frontends/sdf/data_structures.py
--- /dev/null
+++ b/yt/frontends/sdf/data_structures.py
@@ -0,0 +1,139 @@
+"""
+Data structures for a generic SDF frontend
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+import stat
+import weakref
+import struct
+import glob
+import time
+import os
+import types
+
+from yt.utilities.logger import ytLogger as mylog
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
+from yt.data_objects.static_output import \
+    Dataset, ParticleFile
+from yt.utilities.physical_constants import \
+    G, \
+    cm_per_kpc, \
+    mass_sun_cgs
+from yt.utilities.cosmology import Cosmology
+from .fields import \
+    SDFFieldInfo
+from .io import \
+    IOHandlerSDF, \
+    SDFRead,\
+    SDFIndex
+
+class SDFFile(ParticleFile):
+    pass
+
+class SDFDataset(Dataset):
+    _index_class = ParticleIndex
+    _file_class = SDFFile
+    _field_info_class = SDFFieldInfo
+    _particle_mass_name = None
+    _particle_coordinates_name = None
+    _particle_velocity_name = None
+    _sindex = None
+
+    def __init__(self, filename, dataset_type = "sdf_particles",
+                 n_ref = 64, over_refine_factor = 1,
+                 bounding_box = None,
+                 sdf_header = None,
+                 idx_filename = None,
+                 idx_header = None,
+                 idx_level = 9):
+        self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
+        if bounding_box is not None:
+            bbox = np.array(bounding_box, dtype="float64")
+            if bbox.shape == (2, 3):
+                bbox = bbox.transpose()
+            self.domain_left_edge = bbox[:,0]
+            self.domain_right_edge = bbox[:,1]
+        else:
+            self.domain_left_edge = self.domain_right_edge = None
+        self.sdf_header = sdf_header
+        self.idx_filename = idx_filename
+        self.idx_header = idx_header
+        self.idx_level = idx_level
+        super(SDFDataset, self).__init__(filename, dataset_type)
+
+    def _parse_parameter_file(self):
+        self.sdf_container = SDFRead(self.parameter_filename,
+                                     header=self.sdf_header)
+        # Reference
+        self.parameters = self.sdf_container.parameters
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+
+        if None in (self.domain_left_edge, self.domain_right_edge):
+            R0 = self.parameters['R0']
+            self.domain_left_edge = np.array([
+              -self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+            self.domain_right_edge = np.array([
+              +self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+            self.domain_left_edge *= self.parameters.get("a", 1.0)
+            self.domain_right_edge *= self.parameters.get("a", 1.0)
+
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
+        self.periodicity = (True, True, True)
+
+        self.cosmological_simulation = 1
+
+        self.current_redshift = self.parameters.get("redshift", 0.0)
+        self.omega_lambda = self.parameters["Omega0_lambda"]
+        self.omega_matter = self.parameters["Omega0_m"]
+        self.hubble_constant = self.parameters["h_100"]
+        # Now we calculate our time based on the cosmology.
+        cosmo = Cosmology(self.hubble_constant,
+                          self.omega_matter, self.omega_lambda)
+        self.current_time = cosmo.hubble_time(self.current_redshift)
+        mylog.info("Calculating time to be %0.3e seconds", self.current_time)
+        self.filename_template = self.parameter_filename
+        self.file_count = 1
+
+    @property
+    def sindex(self):
+        if self._sindex is None:
+            if self.idx_filename is not None:
+                indexdata = SDFRead(self.idx_filename,
+                                    header=self.idx_header)
+                self._sindex = SDFIndex(self.sdf_container, indexdata, level=self.idx_level)
+            else:
+                raise RuntimeError("SDF index0 file not supplied in load.")
+        else:
+            return self._sindex
+
+    def _set_code_unit_attributes(self):
+        self.length_unit = self.quan(1.0, "kpc")
+        self.velocity_unit = self.quan(1.0, "kpc/Gyr")
+        self.time_unit = self.quan(1.0, "Gyr")
+        self.mass_unit = self.quan(1e10, "Msun")
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        if not os.path.isfile(args[0]): return False
+        with open(args[0], "r") as f:
+            line = f.readline().strip()
+            return line == "# SDF 1.0"

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/frontends/sdf/fields.py
--- /dev/null
+++ b/yt/frontends/sdf/fields.py
@@ -0,0 +1,47 @@
+"""
+SDF-specific fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import numpy as np
+
+from yt.funcs import *
+
+from yt.fields.field_info_container import \
+    FieldInfoContainer
+
+from yt.config import ytcfg
+from yt.utilities.physical_constants import mh
+from yt.fields.species_fields import \
+    add_species_field_by_fraction, \
+    add_species_field_by_density, \
+    setup_species_fields
+
+from yt.fields.particle_fields import \
+    add_volume_weighted_smoothed_field
+
+class SDFFieldInfo(FieldInfoContainer):
+    known_other_fields = ()
+
+    known_particle_fields = (
+        ("mass", ("code_mass", ["particle_mass"], None)),
+        ("x", ("code_length", ["particle_position_x"], None)),
+        ("y", ("code_length", ["particle_position_y"], None)),
+        ("z", ("code_length", ["particle_position_z"], None)),
+        ("vx", ("code_velocity", ["particle_velocity_x"], None)),
+        ("vy", ("code_velocity", ["particle_velocity_y"], None)),
+        ("vz", ("code_velocity", ["particle_velocity_z"], None)),
+        ("ident", ("", ["particle_index"], None)),
+    )

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/frontends/sdf/io.py
--- /dev/null
+++ b/yt/frontends/sdf/io.py
@@ -0,0 +1,564 @@
+"""
+SDF data-file handling function
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import glob
+import h5py
+import numpy as np
+from yt.funcs import *
+from yt.utilities.exceptions import *
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+
+from yt.utilities.fortran_utils import read_record
+from yt.utilities.lib.geometry_utils import compute_morton
+
+from yt.geometry.oct_container import _ORDER_MAX
+CHUNKSIZE = 32**3
+
+class IOHandlerSDF(BaseIOHandler):
+    _dataset_type = "sdf_particles"
+
+    @property
+    def _handle(self):
+        return self.pf.sdf_container
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        raise NotImplementedError
+
+    def _read_particle_coords(self, chunks, ptf):
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        assert(ptf.keys()[0] == "dark_matter")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        assert(len(data_files) == 1)
+        for data_file in data_files:
+            pcount = self._handle['x'].size
+            yield "dark_matter", (
+                self._handle['x'], self._handle['y'], self._handle['z'])
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        assert(ptf.keys()[0] == "dark_matter")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        assert(len(data_files) == 1)
+        for data_file in data_files:
+            pcount = self._handle['x'].size
+            for ptype, field_list in sorted(ptf.items()):
+                x = self._handle['x']
+                y = self._handle['y']
+                z = self._handle['z']
+                mask = selector.select_points(x, y, z, 0.0)
+                del x, y, z
+                if mask is None: continue
+                for field in field_list:
+                    if field == "mass":
+                        data = np.ones(mask.sum(), dtype="float64")
+                        data *= self.pf.parameters["particle_mass"]
+                    else:
+                        data = self._handle[field][mask]
+                    yield (ptype, field), data
+
+    def _initialize_index(self, data_file, regions):
+        x, y, z = (self._handle[ax] for ax in 'xyz')
+        pcount = x.size
+        morton = np.empty(pcount, dtype='uint64')
+        ind = 0
+        while ind < pcount:
+            npart = min(CHUNKSIZE, pcount - ind)
+            pos = np.empty((npart, 3), dtype=x.dtype)
+            pos[:,0] = x[ind:ind+npart]
+            pos[:,1] = y[ind:ind+npart]
+            pos[:,2] = z[ind:ind+npart]
+            if np.any(pos.min(axis=0) < self.pf.domain_left_edge) or \
+               np.any(pos.max(axis=0) > self.pf.domain_right_edge):
+                raise YTDomainOverflow(pos.min(axis=0),
+                                       pos.max(axis=0),
+                                       self.pf.domain_left_edge,
+                                       self.pf.domain_right_edge)
+            regions.add_data_file(pos, data_file.file_id)
+            morton[ind:ind+npart] = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.pf.domain_left_edge,
+                data_file.pf.domain_right_edge)
+            ind += CHUNKSIZE
+        return morton
+
+    def _count_particles(self, data_file):
+        return {'dark_matter': self._handle['x'].size}
+
+    def _identify_fields(self, data_file):
+        fields = [("dark_matter", v) for v in self._handle.keys()]
+        fields.append(("dark_matter", "mass"))
+        return fields, {}
+
+import re
+import os
+
+_types = {
+    'int': 'int32',
+    'int64_t': 'int64',
+    'float': 'float32',
+    'double': 'float64',
+    'unsigned int': 'I',
+    'unsigned char': 'B',
+}
+
+def get_type(vtype, len=None):
+    try:
+        t = _types[vtype]
+        if len is not None:
+            t = np.dtype((t, len))
+        else:
+            t = np.dtype(t)
+    except KeyError:
+        t = eval("np."+vtype)
+    return t
+
+def lstrip(text_list):
+    return [t.strip() for t in text_list]
+
+def get_struct_vars(line):
+    spl = lstrip(line.split(";"))
+    multiv = lstrip(spl[0].split(","))
+    ret = lstrip(multiv[0].split())
+    ctype = ret[0]
+    vnames = [ret[-1]] + multiv[1:]
+    vnames = [v.strip() for v in vnames]
+    for vtype in ret[1:-1]:
+        ctype += ' ' + vtype
+    num = None
+    if len(vnames) == 1:
+        if '[' in vnames[0]:
+            num = int(vnames[0].split('[')[-1].strip(']'))
+            #num = int(re.sub("\D", "", vnames[0]))
+    ctype = get_type(ctype, len=num)
+    return ctype, vnames
+
+class DataStruct(object):
+    """docstring for DataStruct"""
+
+    _offset = 0
+
+    def __init__(self, dtypes, num, filename):
+        self.filename = filename
+        self.dtype = np.dtype(dtypes)
+        self.size = num
+        self.itemsize = self.dtype.itemsize
+        self.data = {}
+        self.handle = None
+
+    def set_offset(self, offset):
+        self._offset = offset
+        if self.size == -1:
+            file_size = os.path.getsize(self.filename)
+            file_size -= offset
+            self.size = float(file_size) / self.itemsize
+            assert(int(self.size) == self.size)
+
+    def build_memmap(self):
+        assert(self.size != -1)
+        self.handle = np.memmap(self.filename, dtype=self.dtype,
+                        mode='r', shape=self.size, offset=self._offset)
+        for k in self.dtype.names:
+            self.data[k] = self.handle[k]
+
+class SDFRead(dict):
+
+    """docstring for SDFRead"""
+
+    _eof = 'SDF-EOH'
+
+    def __init__(self, filename, header=None):
+        self.filename = filename
+        if header is None:
+            header = filename
+        self.header = header
+        self.parameters = {}
+        self.structs = []
+        self.comments = []
+        self.parse_header()
+        self.set_offsets()
+        self.load_memmaps()
+
+    def parse_header(self):
+        """docstring for parse_header"""
+        # Pre-process
+        ascfile = open(self.header, 'r')
+        while True:
+            l = ascfile.readline()
+            if self._eof in l: break
+
+            self.parse_line(l, ascfile)
+
+        hoff = ascfile.tell()
+        ascfile.close()
+        if self.header != self.filename:
+            hoff = 0
+        self.parameters['header_offset'] = hoff
+
+    def parse_line(self, line, ascfile):
+        """Parse a line of sdf"""
+
+
+        if 'struct' in line:
+            self.parse_struct(line, ascfile)
+            return
+
+        if "#" in line:
+            self.comments.append(line)
+            return
+
+        spl = lstrip(line.split("="))
+        vtype, vname = lstrip(spl[0].split())
+        vname = vname.strip("[]")
+        vval = spl[-1].strip(";")
+        if vtype == 'parameter':
+            self.parameters[vname] = vval
+            return
+        elif vtype == "char":
+            vtype = "str"
+
+        try:
+            vval = eval("np."+vtype+"(%s)" % vval)
+        except AttributeError:
+            vval = eval("np."+_types[vtype]+"(%s)" % vval)
+
+        self.parameters[vname] = vval
+
+    def parse_struct(self, line, ascfile):
+        assert 'struct' in line
+
+        str_types = []
+        comments = []
+        str_lines = []
+        l = ascfile.readline()
+        while "}" not in l:
+            vtype, vnames = get_struct_vars(l)
+            for v in vnames:
+                str_types.append((v, vtype))
+            l = ascfile.readline()
+        num = l.strip("}[]")
+        num = num.strip("\;\\\n]")
+        if len(num) == 0:
+            # We need to compute the number of records.  The DataStruct will
+            # handle this.
+            num = '-1'
+        num = int(num)
+        struct = DataStruct(str_types, num, self.filename)
+        self.structs.append(struct)
+        return
+
+    def set_offsets(self):
+        running_off = self.parameters['header_offset']
+        for struct in self.structs:
+            struct.set_offset(running_off)
+            running_off += struct.size * struct.itemsize
+        return
+
+    def load_memmaps(self):
+        for struct in self.structs:
+            struct.build_memmap()
+            self.update(struct.data)
+
+
+class SDFIndex(object):
+
+    """docstring for SDFIndex
+
+    This provides an index mechanism into the full SDF Dataset.
+
+    Most useful class methods:
+        get_cell_data(level, cell_iarr, fields)
+        iter_bbox_data(left, right, fields)
+        iter_bbox_data(left, right, fields)
+
+    """
+    def __init__(self, sdfdata, indexdata, level=9):
+        super(SDFIndex, self).__init__()
+        self.sdfdata = sdfdata
+        self.indexdata = indexdata
+        self.level = level
+        self.rmin = None
+        self.rmax = None
+        self.domain_width = None
+        self.domain_buffer = 0
+        self.domain_dims = 0
+        self.domain_active_dims = 0
+        self.masks = {
+            "p" : int("011"*level, 2),
+            "t" : int("101"*level, 2),
+            "r" : int("110"*level, 2),
+            "z" : int("011"*level, 2),
+            "y" : int("101"*level, 2),
+            "x" : int("110"*level, 2),
+            2 : int("011"*level, 2),
+            1 : int("101"*level, 2),
+            0 : int("110"*level, 2),
+        }
+        self.dim_slices = {
+            "p" : slice(0, None, 3),
+            "t" : slice(1, None, 3),
+            "r" : slice(2, None, 3),
+            "z" : slice(0, None, 3),
+            "y" : slice(1, None, 3),
+            "x" : slice(2, None, 3),
+            2 : slice(0, None, 3),
+            1 : slice(1, None, 3),
+            0 : slice(2, None, 3),
+        }
+        self.set_bounds()
+
+    def set_bounds(self):
+        r_0 = self.sdfdata.parameters['R0']
+        DW = 2.0 * r_0
+
+        self.rmin = np.zeros(3)
+        self.rmax = np.zeros(3)
+        sorted_rtp = self.sdfdata.parameters.get("sorted_rtp", False)
+        if sorted_rtp:
+            self.rmin[:] = [0.0, 0.0, -np.pi]
+            self.rmax[:] = [r_0*1.01, 2*np.pi, np.pi]
+        else:
+            self.rmin[0] -= self.sdfdata.parameters.get('Rx', 0.0)
+            self.rmin[1] -= self.sdfdata.parameters.get('Ry', 0.0)
+            self.rmin[2] -= self.sdfdata.parameters.get('Rz', 0.0)
+            self.rmax[0] += self.sdfdata.parameters.get('Rx', r_0)
+            self.rmax[1] += self.sdfdata.parameters.get('Ry', r_0)
+            self.rmax[2] += self.sdfdata.parameters.get('Rz', r_0)
+
+        #/* expand root for non-power-of-two */
+        expand_root = 0.0
+        ic_Nmesh = self.sdfdata.parameters.get('ic_Nmesh',0)
+        if ic_Nmesh != 0:
+            f2 = 1<<int(np.log2(ic_Nmesh-1)+1)
+            if (f2 != ic_Nmesh):
+                expand_root = 1.0*f2/ic_Nmesh - 1.0;
+            print 'Expanding: ', f2, ic_Nmesh, expand_root
+        self.rmin *= 1.0 + expand_root
+        self.rmax *= 1.0 + expand_root
+        self.domain_width = self.rmax - self.rmin
+        self.domain_dims = 1 << self.level
+        self.domain_buffer = (self.domain_dims - int(self.domain_dims/(1.0 + expand_root)))/2
+        self.domain_active_dims = self.domain_dims - 2*self.domain_buffer
+        print 'Domain stuff:', self.domain_width, self.domain_dims, self.domain_active_dims
+
+    def get_key(self, iarr, level=None):
+        if level is None:
+            level = self.level
+        i1, i2, i3 = iarr
+        rep1 = np.binary_repr(i1, width=self.level)
+        rep2 = np.binary_repr(i2, width=self.level)
+        rep3 = np.binary_repr(i3, width=self.level)
+        inter = np.zeros(self.level*3, dtype='c')
+        inter[self.dim_slices[0]] = rep1
+        inter[self.dim_slices[1]] = rep2
+        inter[self.dim_slices[2]] = rep3
+        return int(inter.tostring(), 2)
+
+    def get_key_ijk(self, i1, i2, i3, level=None):
+        return self.get_key(np.array([i1, i2, i3]), level=level)
+
+    def get_slice_key(self, ind, dim='r'):
+        slb = np.binary_repr(ind, width=self.level)
+        expanded = np.array([0]*self.level*3, dtype='c')
+        expanded[self.dim_slices[dim]] = slb
+        return int(expanded.tostring(), 2)
+
+    def get_slice_chunks(self, slice_dim, slice_index):
+        sl_key = self.get_slice_key(slice_index, dim=slice_dim)
+        mask = (self.indexdata['index'] & ~self.masks[slice_dim]) == sl_key
+        offsets = self.indexdata['base'][mask]
+        lengths = self.indexdata['len'][mask]
+        return mask, offsets, lengths
+
+    def get_ibbox_slow(self, ileft, iright):
+        """
+        Given left and right indicies, return a mask and
+        set of offsets+lengths into the sdf data.
+        """
+        mask = np.zeros(self.indexdata['index'].shape, dtype='bool')
+        ileft = np.array(ileft)
+        iright = np.array(iright)
+        for i in range(3):
+            left_key = self.get_slice_key(ileft[i], dim=i)
+            right_key= self.get_slice_key(iright[i], dim=i)
+            dim_inds = (self.indexdata['index'] & ~self.masks[i])
+            mask *= (dim_inds >= left_key) * (dim_inds <= right_key)
+            del dim_inds
+
+        offsets = self.indexdata['base'][mask]
+        lengths = self.indexdata['len'][mask]
+        return mask, offsets, lengths
+
+    def get_ibbox(self, ileft, iright):
+        """
+        Given left and right indicies, return a mask and
+        set of offsets+lengths into the sdf data.
+        """
+        mask = np.zeros(self.indexdata['index'].shape, dtype='bool')
+
+        print 'Getting data from ileft to iright:',  ileft, iright
+
+        X, Y, Z = np.mgrid[ileft[0]:iright[0]+1,
+                           ileft[1]:iright[1]+1,
+                           ileft[2]:iright[2]+1]
+
+        X = X.ravel()
+        Y = Y.ravel()
+        Z = Z.ravel()
+        # Correct For periodicity
+        X[X < self.domain_buffer] += self.domain_active_dims
+        X[X >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
+        Y[Y < self.domain_buffer] += self.domain_active_dims
+        Y[Y >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
+        Z[Z < self.domain_buffer] += self.domain_active_dims
+        Z[Z >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
+
+        print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()
+
+        indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
+        indices = indices[indices < self.indexdata['index'].shape[0]]
+        return indices
+
+    def get_bbox(self, left, right):
+        """
+        Given left and right indicies, return a mask and
+        set of offsets+lengths into the sdf data.
+        """
+        ileft = np.floor((left - self.rmin) / self.domain_width *  self.domain_dims)
+        iright = np.floor((right - self.rmin) / self.domain_width * self.domain_dims)
+
+        return self.get_ibbox(ileft, iright)
+
+    def get_data(self, chunk, fields):
+        data = {}
+        for field in fields:
+            data[field] = self.sdfdata[field][chunk]
+        return data
+
+    def iter_data(self, inds, fields):
+        num_inds = len(inds)
+        num_reads = 0
+        print 'Reading %i chunks' % num_inds
+        i = 0
+        while (i < num_inds):
+            ind = inds[i]
+            base = self.indexdata['base'][ind]
+            length = self.indexdata['len'][ind]
+            # Concatenate aligned reads
+            nexti = i+1
+            combined = 0
+            while nexti < len(inds):
+                nextind = inds[nexti]
+                #        print 'b: %i l: %i end: %i  next: %i' % ( base, length, base + length, self.indexdata['base'][nextind] )
+                if base + length == self.indexdata['base'][nextind]:
+                    length += self.indexdata['len'][nextind]
+                    i += 1
+                    nexti += 1
+                    combined += 1
+                else:
+                    break
+
+            chunk = slice(base, base+length)
+            print 'Reading chunk %i of length %i after catting %i' % (i, length, combined)
+            num_reads += 1
+            data = self.get_data(chunk, fields)
+            yield data
+            del data
+            i += 1
+        print 'Read %i chunks, batched into %i reads' % (num_inds, num_reads)
+
+    def iter_bbox_data(self, left, right, fields):
+        print 'Loading region from ', left, 'to', right
+        inds = self.get_bbox(left, right)
+        return self.iter_data(inds, fields)
+
+    def iter_ibbox_data(self, left, right, fields):
+        print 'Loading region from ', left, 'to', right
+        inds = self.get_ibbox(left, right)
+        return self.iter_data(inds, fields)
+
+    def get_contiguous_chunk(self, left_key, right_key, fields):
+        max_key = self.indexdata['index'][-1]
+        if left_key > max_key:
+            raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (left_key, max_key))
+        base = self.indexdata['base'][left_key]
+        right_key = min(right_key, self.indexdata['index'][-1])
+        length = self.indexdata['base'][right_key] + \
+            self.indexdata['len'][right_key] - base
+        print 'Getting contiguous chunk of size %i starting at %i' % (length, base)
+        return self.get_data(slice(base, base + length), fields)
+
+    def iter_slice_data(self, slice_dim, slice_index, fields):
+        mask, offsets, lengths = self.get_slice_chunks(slice_dim, slice_index)
+        for off, l in zip(offsets, lengths):
+            data = {}
+            chunk = slice(off, off+l)
+            for field in fields:
+                data[field] = self.sdfdata[field][chunk]
+            yield data
+            del data
+
+    def get_key_bounds(self, level, cell_iarr):
+        """
+        Get index keys for index file supplied.
+
+        level: int
+            Requested level
+        cell_iarr: array-like, length 3
+            Requested cell from given level.
+
+        Returns:
+            lmax_lk, lmax_rk
+        """
+        shift = self.level-level
+        level_buff = 0
+        level_lk = self.get_key(cell_iarr + level_buff)
+        level_rk = self.get_key(cell_iarr + level_buff) + 1
+        lmax_lk = (level_lk << shift*3)
+        lmax_rk = (((level_rk) << shift*3) -1)
+        #print "Level ", level, np.binary_repr(level_lk, width=self.level*3), np.binary_repr(level_rk, width=self.level*3)
+        #print "Level ", self.level, np.binary_repr(lmax_lk, width=self.level*3), np.binary_repr(lmax_rk, width=self.level*3)
+        return lmax_lk, lmax_rk
+
+    def get_cell_data(self, level, cell_iarr, fields):
+        """
+        Get data from requested cell
+
+        This uses the raw cell index, and doesn't account for periodicity or
+        an expanded domain (non-power of 2).
+
+        level: int
+            Requested level
+        cell_iarr: array-like, length 3
+            Requested cell from given level.         fields: list
+            Requested fields
+
+        Returns:
+            cell_data: dict
+                Dictionary of field_name, field_data
+        """
+        cell_iarr = np.array(cell_iarr)
+        lk, rk =self.get_key_bounds(level, cell_iarr)
+        return self.get_contiguous_chunk(lk, rk, fields)

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/frontends/sdf/setup.py
--- /dev/null
+++ b/yt/frontends/sdf/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+import glob
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('sdf', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -22,6 +22,7 @@
 #    config.add_subpackage("artio2")
     config.add_subpackage("pluto")
     config.add_subpackage("ramses")
+    config.add_subpackage("sdf")
     config.add_subpackage("sph")
     config.add_subpackage("stream")
     config.add_subpackage("boxlib/tests")

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -116,7 +116,7 @@
 except ImportError:
     pass
 
-def get_memory_usage():
+def get_memory_usage(subtract_share = False):
     """
     Returning resident size in megabytes
     """
@@ -130,6 +130,7 @@
         return -1024
     line = open(status_file).read()
     size, resident, share, text, library, data, dt = [int(i) for i in line.split()]
+    if subtract_share: resident -= share
     return resident * pagesize / (1024 * 1024) # return in megs
 
 def time_execution(func):
@@ -681,7 +682,7 @@
     return s
 
 @contextlib.contextmanager
-def memory_checker(interval = 15):
+def memory_checker(interval = 15, dest = None):
     r"""This is a context manager that monitors memory usage.
 
     Parameters
@@ -699,6 +700,8 @@
     ...     del arr
     """
     import threading
+    if dest is None:
+        dest = sys.stdout
     class MemoryChecker(threading.Thread):
         def __init__(self, event, interval):
             self.event = event
@@ -707,7 +710,7 @@
 
         def run(self):
             while not self.event.wait(self.interval):
-                print "MEMORY: %0.3e gb" % (get_memory_usage()/1024.)
+                print >> dest, "MEMORY: %0.3e gb" % (get_memory_usage()/1024.)
 
     e = threading.Event()
     mem_check = MemoryChecker(e, interval)

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -67,7 +67,8 @@
     cdef np.float64_t DLE[3], DRE[3]
     cdef public np.int64_t nocts
     cdef public int num_domains
-    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
+    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?,
+                  int max_level = ?)
     cdef int get_root(self, int ind[3], Oct **o)
     cdef Oct **neighbors(self, OctInfo *oinfo, np.int64_t *nneighbors,
                          Oct *o)

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -264,24 +264,25 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL,
-                  ):
+                  int max_level = 99):
         #Given a floating point position, retrieve the most
         #refined oct at that time
-        cdef int ind[3], level
+        cdef int ind32[3]
         cdef np.int64_t ipos[3]
         cdef np.float64_t dds[3], cp[3], pp[3]
         cdef Oct *cur, *next
         cdef int i
         cur = next = NULL
-        level = -1
+        cdef np.int64_t ind[3], level = -1
         for i in range(3):
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
             ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
             cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
             ipos[i] = 0
-        self.get_root(ind, &next)
+            ind32[i] = ind[i]
+        self.get_root(ind32, &next)
         # We want to stop recursing when there's nowhere else to go
-        while next != NULL:
+        while next != NULL and level <= max_level:
             level += 1
             for i in range(3):
                 ipos[i] = (ipos[i] << 1) + ind[i]
@@ -345,7 +346,8 @@
         # grow a stack of parent Octs.
         # Note that in the first iteration, we will just find the up-to-27
         # neighbors, including the main oct.
-        cdef int i, j, k, n, level, ind[3], ii, nfound = 0
+        cdef np.int64_t i, j, k, n, level, ii, nfound = 0, dlevel
+        cdef int ind[3]
         cdef OctList *olist, *my_list
         my_list = olist = NULL
         cdef Oct *cand
@@ -355,6 +357,7 @@
         # ndim is the oct dimensions of the level, not the cell dimensions.
         for i in range(3):
             ndim[i] = <np.int64_t> ((self.DRE[i] - self.DLE[i]) / oi.dds[i])
+            # Here we adjust for oi.dds meaning *cell* width.
             ndim[i] = (ndim[i] >> self.oref)
         my_list = olist = OctList_append(NULL, o)
         for i in range(3):
@@ -379,9 +382,10 @@
                     # correctly, but we might.
                     if cand == NULL: continue
                     for level in range(1, oi.level+1):
+                        dlevel = oi.level - level
                         if cand.children == NULL: break
                         for n in range(3):
-                            ind[n] = (npos[n] >> (oi.level - (level))) & 1
+                            ind[n] = (npos[n] >> dlevel) & 1
                         ii = cind(ind[0],ind[1],ind[2])
                         if cand.children[ii] == NULL: break
                         cand = cand.children[ii]
@@ -908,6 +912,7 @@
                 domain_left_edge, domain_right_edge, partial_coverage,
                  over_refine)
         self.fill_func = oct_visitors.fill_file_indices_rind
+
 cdef OctList *OctList_subneighbor_find(OctList *olist, Oct *top,
                                        int i, int j, int k):
     if top.children == NULL: return olist
@@ -920,7 +925,7 @@
     # For now, we assume we will not be doing this along all three zeros,
     # because that would be pretty tricky.
     if i == j == k == 0: return olist
-    cdef int n[3], ind[3], off[3][2], ii, ij, ik, ci
+    cdef np.int64_t n[3], ind[3], off[3][2], ii, ij, ik, ci
     ind[0] = 1 - i
     ind[1] = 1 - j
     ind[2] = 1 - k

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -36,6 +36,9 @@
     np.int64_t pn       # Particle number
     np.float64_t r2     # radius**2
 
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
 cdef inline np.float64_t r2dist(np.float64_t ppos[3],
                                 np.float64_t cpos[3],
                                 np.float64_t DW[3],

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -28,8 +28,9 @@
 from .amr_kdtools cimport _find_node, Node
 from .grid_traversal cimport VolumeContainer, PartitionedGrid, \
     vc_index, vc_pos_index
+import sys
 
-cdef ContourID *contour_create(np.int64_t contour_id,
+cdef inline ContourID *contour_create(np.int64_t contour_id,
                                ContourID *prev = NULL):
     node = <ContourID *> malloc(sizeof(ContourID))
     #print "Creating contour with id", contour_id
@@ -40,12 +41,12 @@
     if prev != NULL: prev.next = node
     return node
 
-cdef void contour_delete(ContourID *node):
+cdef inline void contour_delete(ContourID *node):
     if node.prev != NULL: node.prev.next = node.next
     if node.next != NULL: node.next.prev = node.prev
     free(node)
 
-cdef ContourID *contour_find(ContourID *node):
+cdef inline ContourID *contour_find(ContourID *node):
     cdef ContourID *temp, *root
     root = node
     # First we find the root
@@ -61,7 +62,7 @@
         node = temp
     return root
 
-cdef void contour_union(ContourID *node1, ContourID *node2):
+cdef inline void contour_union(ContourID *node1, ContourID *node2):
     node1 = contour_find(node1)
     node2 = contour_find(node2)
     if node1.contour_id < node2.contour_id:
@@ -69,7 +70,7 @@
     elif node2.contour_id < node1.contour_id:
         node1.parent = node2
 
-cdef int candidate_contains(CandidateContour *first,
+cdef inline int candidate_contains(CandidateContour *first,
                             np.int64_t contour_id,
                             np.int64_t join_id = -1):
     while first != NULL:
@@ -78,7 +79,7 @@
         first = first.next
     return 0
 
-cdef CandidateContour *candidate_add(CandidateContour *first,
+cdef inline CandidateContour *candidate_add(CandidateContour *first,
                                      np.int64_t contour_id,
                                      np.int64_t join_id = -1):
     cdef CandidateContour *node
@@ -617,7 +618,7 @@
 
 cdef class ParticleContourTree(ContourTree):
     cdef np.float64_t linking_length, linking_length2
-    cdef np.float64_t DW[3]
+    cdef np.float64_t DW[3], DLE[3], DRE[3]
     cdef bint periodicity[3]
 
     def __init__(self, linking_length):
@@ -625,6 +626,7 @@
         self.linking_length2 = linking_length * linking_length
         self.first = self.last = NULL
 
+    @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def identify_contours(self, OctreeContainer octree,
@@ -633,7 +635,7 @@
                                 np.ndarray[np.int64_t, ndim=1] particle_ids,
                                 int domain_id = -1, int domain_offset = 0,
                                 periodicity = (True, True, True),
-                                minimum_count = 8):
+                                int minimum_count = 8):
         cdef np.ndarray[np.int64_t, ndim=1] pdoms, pcount, pind, doff
         cdef np.float64_t pos[3]
         cdef Oct *oct = NULL, **neighbors = NULL
@@ -641,22 +643,27 @@
         cdef ContourID *c0, *c1
         cdef np.int64_t moff = octree.get_domain_offset(domain_id + domain_offset)
         cdef np.int64_t i, j, k, n, nneighbors, pind0, offset
+        cdef int counter = 0
         pcount = np.zeros_like(dom_ind)
         doff = np.zeros_like(dom_ind) - 1
         # First, we find the oct for each particle.
-        pdoms = np.zeros(positions.shape[0], dtype="int64") - 1
+        pdoms = np.zeros(positions.shape[0], dtype="int64")
+        pdoms -= -1
         cdef np.int64_t *pdom = <np.int64_t*> pdoms.data
         # First we allocate our container
         cdef ContourID **container = <ContourID**> malloc(
             sizeof(ContourID*) * positions.shape[0])
         for i in range(3):
             self.DW[i] = (octree.DRE[i] - octree.DLE[i])
+            self.DLE[i] = octree.DLE[i]
+            self.DRE[i] = octree.DRE[i]
             self.periodicity[i] = periodicity[i]
         for i in range(positions.shape[0]):
+            counter += 1
             container[i] = NULL
             for j in range(3):
                 pos[j] = positions[i, j]
-            oct = octree.get(pos)
+            oct = octree.get(pos, NULL)
             if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
                 continue
             offset = oct.domain_ind - moff
@@ -670,14 +677,18 @@
             offset = pdoms[pind[i]]
             if doff[offset] < 0:
                 doff[offset] = i
+        del pdoms
         cdef int nsize = 27
         cdef np.int64_t *nind = <np.int64_t *> malloc(sizeof(np.int64_t)*nsize)
-        cdef int counter = 0
+        counter = 0
+        cdef np.int64_t frac = <np.int64_t> (doff.shape[0] / 20.0)
+        print >> sys.stderr, "Will be outputting every", frac
+        cdef int inside, skip_early
         for i in range(doff.shape[0]):
+            if counter >= frac:
+                counter = 0
+                print >> sys.stderr, "FOF-ing % 5.1f%% done" % ((100.0 * i)/doff.size)
             counter += 1
-            if counter == 10000:
-                counter = 0
-                #print "FOF-ing % 5.1f%% done" % ((100.0 * i)/doff.size)
             # Any particles found for this oct?
             if doff[i] < 0: continue
             offset = pind[doff[i]]
@@ -703,7 +714,8 @@
                     break
             # This is allocated by the neighbors function, so we deallocate it.
             free(neighbors)
-            # Now we look at each particle.
+            # We might know that all our internal particles are linked.
+            # Otherwise, we look at each particle.
             for j in range(pcount[i]):
                 # Note that this offset is the particle index
                 pind0 = pind[doff[i] + j]
@@ -721,28 +733,32 @@
                                         offset, pind0, 
                                         doff[i] + j)
         cdef np.ndarray[np.int64_t, ndim=1] contour_ids
-        contour_ids = -1 * np.ones(positions.shape[0], dtype="int64")
+        contour_ids = np.ones(positions.shape[0], dtype="int64")
+        contour_ids *= -1
         # Sort on our particle IDs.
         for i in range(doff.shape[0]):
             if doff[i] < 0: continue
             for j in range(pcount[i]):
-                poffset = doff[i] + j
-                c1 = container[poffset]
+                offset = pind[doff[i] + j]
+                c1 = container[offset]
                 c0 = contour_find(c1)
-                contour_ids[pind[poffset]] = c0.contour_id
+                contour_ids[offset] = c0.contour_id
                 c0.count += 1
         for i in range(doff.shape[0]):
             if doff[i] < 0: continue
             for j in range(pcount[i]):
-                poffset = doff[i] + j
-                c1 = container[poffset]
+                offset = pind[doff[i] + j]
+                c1 = container[offset]
                 if c1 == NULL: continue
                 c0 = contour_find(c1)
+                offset = pind[offset]
                 if c0.count < minimum_count:
-                    contour_ids[pind[poffset]] = -1
+                    contour_ids[offset] = -1
         free(container)
+        del pind
         return contour_ids
 
+    @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
     cdef void link_particles(self, ContourID **container, 
@@ -753,7 +769,8 @@
                                    np.int64_t pind0,
                                    np.int64_t poffset):
         # Now we look at each particle and evaluate it
-        cdef np.float64_t pos0[3], pos1[3], d
+        cdef np.float64_t pos0[3], pos1[3], edges[2][3]
+        cdef int link
         cdef ContourID *c0, *c1
         cdef np.int64_t pind1
         cdef int i, j, k
@@ -765,23 +782,62 @@
             self.last = c0
             if self.first == NULL:
                 self.first = c0
-        c0 = contour_find(c0)
-        container[pind0] = c0
+        c0 = container[pind0] = contour_find(c0)
         for i in range(3):
+            # We make a very conservative guess here about the edges.
             pos0[i] = positions[pind0*3 + i]
+            edges[0][i] = pos0[i] - self.linking_length*1.01
+            edges[1][i] = pos0[i] + self.linking_length*1.01
+            if edges[0][i] < self.DLE[i] or edges[0][i] > self.DRE[i]:
+                # We skip this one, since we're close to the boundary
+                edges[0][i] = -1e30
+                edges[1][i] = 1e30
+        # Lets set up some bounds for the particles.  Maybe we can get away
+        # with reducing our number of calls to r2dist_early.
         for i in range(pcount):
             pind1 = pind[noffset + i]
             if pind1 == pind0: continue
+            c1 = container[pind1]
+            if c1 != NULL and c1.contour_id == c0.contour_id:
+                # Already linked.
+                continue
             for j in range(3):
                 pos1[j] = positions[pind1*3 + j]
-            d = r2dist(pos0, pos1, self.DW, self.periodicity)
-            if d > self.linking_length2:
-                continue
-            c1 = container[pind1]
+            link = r2dist_early(pos0, pos1, self.DW, self.periodicity,
+                                self.linking_length2, edges)
+            if link == 0: continue
             if c1 == NULL:
-                container[pind1] = c1 = contour_create(
-                    noffset + i, self.last)
-            contour_union(c0, c1)
-            c0 = c1 = contour_find(c0)
-            container[pind1] = c0
-            container[pind0] = c0
+                container[pind1] = c0
+            elif c0.contour_id != c1.contour_id:
+                contour_union(c0, c1)
+                c0 = container[pind1] = container[pind0] = contour_find(c0)
+
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef inline int r2dist_early(np.float64_t ppos[3],
+                             np.float64_t cpos[3],
+                             np.float64_t DW[3],
+                             bint periodicity[3],
+                             np.float64_t max_r2,
+                             np.float64_t edges[2][3]):
+    cdef int i
+    cdef np.float64_t r2, DR
+    r2 = 0.0
+    cdef int inside = 0
+    for i in range(3):
+        if cpos[i] < edges[0][i]:
+            return 0
+        if cpos[i] > edges[1][i]:
+            return 0
+    for i in range(3):
+        DR = (ppos[i] - cpos[i])
+        if not periodicity[i]:
+            pass
+        elif (DR > DW[i]/2.0):
+            DR -= DW[i]
+        elif (DR < -DW[i]/2.0):
+            DR += DW[i]
+        r2 += DR * DR
+        if r2 > max_r2: return 0
+    return 1

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list