[yt-svn] commit/yt: 39 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Sat May 3 07:59:19 PDT 2014


39 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/ac4edc3ac26b/
Changeset:   ac4edc3ac26b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-24 18:45:02
Summary:     Initial import of a data_structures.py file for SDF.
Affected #:  1 file

diff -r 7bd133d91d3934b8f4617f73b01cd102fb23af8b -r ac4edc3ac26bddfe6a8fe0bb5a811315ad108ae3 yt/frontends/sdf/data_structures.py
--- /dev/null
+++ b/yt/frontends/sdf/data_structures.py
@@ -0,0 +1,45 @@
+"""
+Data structures for a generic SDF frontend
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+import stat
+import weakref
+import struct
+import glob
+import time
+import os
+import types
+
+from yt.utilities.logger import ytLogger as mylog
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
+from yt.data_objects.static_output import \
+    Dataset, ParticleFile
+from yt.utilities.physical_constants import \
+    G, \
+    cm_per_kpc, \
+    mass_sun_cgs
+from yt.utilities.cosmology import Cosmology
+from .fields import \
+    SDFFieldInfo
+
+class SDFFile(ParticleFile):
+    def __init__(self, pf, io, filename, file_id):
+        pass
+
+class SDFParticleDataset(Dataset):
+    pass


https://bitbucket.org/yt_analysis/yt/commits/812e162d0252/
Changeset:   812e162d0252
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-24 18:46:42
Summary:     Adding empty IO file
Affected #:  1 file

diff -r ac4edc3ac26bddfe6a8fe0bb5a811315ad108ae3 -r 812e162d02526171907f8482d32f4587724e0cbe yt/frontends/sdf/io.py
--- /dev/null
+++ b/yt/frontends/sdf/io.py
@@ -0,0 +1,51 @@
+"""
+SDF data-file handling function
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import glob
+import h5py
+import numpy as np
+from yt.funcs import *
+from yt.utilities.exceptions import *
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+
+from yt.utilities.fortran_utils import read_record
+from yt.utilities.lib.geometry_utils import compute_morton
+
+from yt.geometry.oct_container import _ORDER_MAX
+
+class IOHandlerSDF(BaseIOHandler):
+    _dataset_type = "SDF"
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        raise NotImplementedError
+
+    def _read_particle_coords(self, chunks, ptf):
+        pass
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        pass
+
+    def _initialize_index(self, data_file, regions):
+        pass
+
+    def _count_particles(self, data_file):
+        pass
+
+    def _identify_fields(self, data_file):
+        return fields, {}
+


https://bitbucket.org/yt_analysis/yt/commits/59b3f3470666/
Changeset:   59b3f3470666
Branch:      yt-3.0
User:        samskillman
Date:        2014-04-24 18:53:42
Summary:     Adding SDFRead and DataStruct to sdf/io.py. Needs to be linked up with actual io routines.
Affected #:  1 file

diff -r 812e162d02526171907f8482d32f4587724e0cbe -r 59b3f347066647b5dac90ef6a567b25c7daa19d8 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -49,3 +49,154 @@
     def _identify_fields(self, data_file):
         return fields, {}
 
+import re
+import os
+
+types = {
+    'int': 'int32',
+    'int64_t': 'int64',
+    'float': 'float32',
+    'double': 'float64',
+    'unsigned int': 'I',
+    'unsigned char': 'B',
+}
+
+def get_type(vtype, len=None):
+    try:
+        t = types[vtype]
+        if len is not None:
+            t = np.dtype((t, len))
+        else:
+            t = np.dtype(t)
+    except KeyError:
+        t = eval("np."+vtype)
+    print 'Interpreting as type: ', t
+    return t
+
+def lstrip(text_list):
+    return [t.strip() for t in text_list]
+
+
+class DataStruct(object):
+    """docstring for DataStruct"""
+
+    _offset = 0
+
+    def __init__(self, dtypes, num, filename):
+        self.filename = filename
+        self.dtype = np.dtype(dtypes)
+        self.size = num
+        self.itemsize = self.dtype.itemsize
+        self.data = {}
+        self.handle = None
+
+    def set_offset(self, offset):
+        self._offset = offset
+        if self.size == -1:
+            file_size = os.path.getsize(self.filename) 
+            file_size -= offset
+            self.size = float(file_size) / self.itemsize
+            assert(int(self.size) == self.size)
+
+    def build_memmap(self):
+        assert(self.size != -1)
+        self.handle = np.memmap(self.filename, dtype=self.dtype,
+                        mode='r', shape=self.size, offset=self._offset)
+        for k in self.dtype.names:
+            self.data[k] = self.handle[k]
+
+class SDFRead(dict):
+
+    """docstring for SDFRead"""
+
+    _eof = 'SDF-EOH'
+
+    def __init__(self, filename):
+        self.filename = filename
+        self.parameters = {}
+        self.structs = []
+        self.comments = []
+        self.parse_header()
+        self.set_offsets()
+        self.load_memmaps()
+
+    def parse_header(self):
+        """docstring for parse_header"""
+        # Pre-process
+        ascfile = open(self.filename, 'r')
+        while True:
+            l = ascfile.readline()
+            if self._eof in l: break
+
+            self.parse_line(l, ascfile)
+
+        hoff = ascfile.tell()
+        ascfile.close()
+        self.parameters['header_offset'] = hoff
+
+    def parse_line(self, line, ascfile):
+        """Parse a line of sdf"""
+
+
+        if 'struct' in line:
+            self.parse_struct(line, ascfile)
+            return
+
+        if "#" in line:
+            self.comments.append(line)
+            return
+
+        spl = lstrip(line.split("="))
+        vtype, vname = lstrip(spl[0].split())
+        vname = vname.strip("[]")
+        vval = spl[-1].strip(";")
+        if vtype == 'parameter':
+            self.parameters[vname] = vval
+            return
+        elif vtype == "char":
+            vtype = "str"
+
+        try:
+            vval = eval("np."+vtype+"(%s)" % vval)
+        except AttributeError:
+            vval = eval("np."+types[vtype]+"(%s)" % vval)
+
+        self.parameters[vname] = vval
+
+    def parse_struct(self, line, ascfile):
+        assert 'struct' in line
+
+        str_types = []
+        comments = []
+        str_lines = []
+        l = ascfile.readline()
+        while "}" not in l:
+            vtype, vnames = get_struct_vars(l)
+            for v in vnames:
+                str_types.append((v, vtype))
+            l = ascfile.readline()
+        num = l.strip("}[]")
+        num = num.strip("\;\\\n]")
+        if len(num) == 0:
+            # We need to compute the number of records.  The DataStruct will
+            # handle this.
+            num = '-1'
+        num = int(num)
+        struct = DataStruct(str_types, num, self.filename)
+        self.structs.append(struct)
+        return
+
+    def set_offsets(self):
+        running_off = self.parameters['header_offset']
+        for struct in self.structs:
+            struct.set_offset(running_off)
+            running_off += struct.size * struct.itemsize
+        return
+
+    def load_memmaps(self):
+        for struct in self.structs:
+            struct.build_memmap()
+            self.update(struct.data)
+
+
+


https://bitbucket.org/yt_analysis/yt/commits/fb4eaf4444fb/
Changeset:   fb4eaf4444fb
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-24 19:00:59
Summary:     Adding Sam's get_struct_vars function.
Affected #:  1 file

diff -r 59b3f347066647b5dac90ef6a567b25c7daa19d8 -r fb4eaf4444fbe9dc7df245e1c5c64472150f7752 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -31,6 +31,11 @@
 class IOHandlerSDF(BaseIOHandler):
     _dataset_type = "SDF"
 
+    def __init__(self, *args, **kwargs):
+        super(IOHandlerSDF, self).__init__(*args, **kwargs)
+        # Now we create our sdf file reader
+        self.sdf_handle = SDFRead(self.pf.sdf_handle)
+
     def _read_fluid_selection(self, chunks, selector, fields, size):
         raise NotImplementedError
 
@@ -52,7 +57,7 @@
 import re
 import os
 
-types = {
+_types = {
     'int': 'int32',
     'int64_t': 'int64',
     'float': 'float32',
@@ -63,19 +68,34 @@
 
 def get_type(vtype, len=None):
     try:
-        t = types[vtype]
+        t = _types[vtype]
         if len is not None:
             t = np.dtype((t, len))
         else:
             t = np.dtype(t)
     except KeyError:
         t = eval("np."+vtype)
-    print 'Interpreting as type: ', t
     return t
 
 def lstrip(text_list):
     return [t.strip() for t in text_list]
 
+def get_struct_vars(line):
+    spl = lstrip(line.split(";"))
+    multiv = lstrip(spl[0].split(","))
+    ret = lstrip(multiv[0].split())
+    ctype = ret[0]
+    vnames = [ret[-1]] + multiv[1:]
+    vnames = [v.strip() for v in vnames]
+    for vtype in ret[1:-1]:
+        ctype += ' ' + vtype
+    num = None
+    if len(vnames) == 1:
+        if '[' in vnames[0]:
+            num = int(vnames[0].split('[')[-1].strip(']'))
+            #num = int(re.sub("\D", "", vnames[0]))
+    ctype = get_type(ctype, len=num)
+    return ctype, vnames
 
 class DataStruct(object):
     """docstring for DataStruct"""
@@ -159,7 +179,7 @@
         try:
             vval = eval("np."+vtype+"(%s)" % vval)
         except AttributeError:
-            vval = eval("np."+types[vtype]+"(%s)" % vval)
+            vval = eval("np."+_types[vtype]+"(%s)" % vval)
 
         self.parameters[vname] = vval
 


https://bitbucket.org/yt_analysis/yt/commits/3f1b23e6eeec/
Changeset:   3f1b23e6eeec
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-24 22:25:20
Summary:     Adding basic SDF file loading.
Affected #:  8 files

diff -r fb4eaf4444fbe9dc7df245e1c5c64472150f7752 -r 3f1b23e6eeec0e51fd42e3b6417e86132c44eb70 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -29,6 +29,7 @@
     'moab',
     #'pluto',
     'ramses',
+    'sdf',
     'sph',
     'stream',
 ]

diff -r fb4eaf4444fbe9dc7df245e1c5c64472150f7752 -r 3f1b23e6eeec0e51fd42e3b6417e86132c44eb70 yt/frontends/sdf/__init__.py
--- /dev/null
+++ b/yt/frontends/sdf/__init__.py
@@ -0,0 +1,15 @@
+"""
+API for yt.frontends.gadget
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r fb4eaf4444fbe9dc7df245e1c5c64472150f7752 -r 3f1b23e6eeec0e51fd42e3b6417e86132c44eb70 yt/frontends/sdf/api.py
--- /dev/null
+++ b/yt/frontends/sdf/api.py
@@ -0,0 +1,24 @@
+"""
+API for yt.frontends.sdf
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+      SDFDataset
+
+from .io import \
+      IOHandlerSDF
+
+from .fields import \
+      SDFFieldInfo

diff -r fb4eaf4444fbe9dc7df245e1c5c64472150f7752 -r 3f1b23e6eeec0e51fd42e3b6417e86132c44eb70 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -36,10 +36,78 @@
 from yt.utilities.cosmology import Cosmology
 from .fields import \
     SDFFieldInfo
+from .io import \
+    IOHandlerSDF, \
+    SDFRead
 
 class SDFFile(ParticleFile):
     def __init__(self, pf, io, filename, file_id):
         pass
 
-class SDFParticleDataset(Dataset):
-    pass
+class SDFDataset(Dataset):
+    _index_class = ParticleIndex
+    _file_class = SDFFile
+    _field_info_class = SDFFieldInfo
+    _particle_mass_name = None
+    _particle_coordinates_name = None
+    _particle_velocity_name = None
+
+    def __init__(self, filename, dataset_type = "sdf_particles",
+                 n_ref = 64, over_refine_factor = 1,
+                 bounding_box = None):
+        self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
+        if bounding_box is not None:
+            bbox = np.array(bounding_box, dtype="float64")
+            if bbox.shape == (2, 3):
+                bbox = bbox.transpose()
+            self.domain_left_edge = bbox[:,0]
+            self.domain_right_edge = bbox[:,1]
+        else:
+            self.domain_left_edge = self.domain_right_edge = None
+        super(SDFDataset, self).__init__(filename, dataset_type)
+
+    def _parse_parameter_file(self):
+        self.sdf_container = SDFRead(self.parameter_filename)
+        # Reference
+        self.parameters = self.sdf_container.parameters
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+
+        if None in (self.domain_left_edge, self.domain_right_edge):
+            self.domain_left_edge = np.array([
+              -self.parameters["R%s" % ax] for ax in 'xyz'])
+            self.domain_right_edge = np.array([
+              +self.parameters["R%s" % ax] for ax in 'xyz'])
+            self.domain_left_edge *= self.parameters["a"]
+            self.domain_right_edge *= self.parameters["a"]
+
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
+        self.periodicity = (True, True, True)
+
+        self.cosmological_simulation = 1
+
+        self.current_redshift = self.parameters["redshift"]
+        self.omega_lambda = self.parameters["Omega0_lambda"]
+        self.omega_matter = self.parameters["Omega0_m"]
+        self.hubble_constant = self.parameters["hubble"]
+        # Now we calculate our time based on the cosmology.
+        cosmo = Cosmology(self.hubble_constant,
+                          self.omega_matter, self.omega_lambda)
+        self.current_time = cosmo.hubble_time(self.current_redshift)
+        mylog.info("Calculating time to be %0.3e seconds", self.current_time)
+
+    def _set_code_unit_attributes(self):
+        self.length_unit = self.quan(1.0, "kpc")
+        self.velocity_unit = self.quan(1.0, "kpc/Gyr")
+        self.time_unit = self.quan(1.0, "Gyr")
+        self.mass_unit = self.quan(1e10, "Msun")
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        with open(args[0], "r") as f:
+            line = f.readline().strip()
+            return line == "# SDF 1.0"

diff -r fb4eaf4444fbe9dc7df245e1c5c64472150f7752 -r 3f1b23e6eeec0e51fd42e3b6417e86132c44eb70 yt/frontends/sdf/definitions.py
--- /dev/null
+++ b/yt/frontends/sdf/definitions.py
@@ -0,0 +1,58 @@
+
+gadget_ptypes = ("Gas", "Halo", "Disk", "Bulge", "Stars", "Bndry")
+ghdf5_ptypes  = ("PartType0", "PartType1", "PartType2", "PartType3",
+                 "PartType4", "PartType5")
+
+gadget_header_specs = dict(
+    default      = (('Npart', 6, 'i'),
+                    ('Massarr', 6, 'd'),
+                    ('Time', 1, 'd'),
+                    ('Redshift', 1, 'd'),
+                    ('FlagSfr', 1, 'i'),
+                    ('FlagFeedback', 1, 'i'),
+                    ('Nall', 6, 'i'),
+                    ('FlagCooling', 1, 'i'),
+                    ('NumFiles', 1, 'i'),
+                    ('BoxSize', 1, 'd'),
+                    ('Omega0', 1, 'd'),
+                    ('OmegaLambda', 1, 'd'),
+                    ('HubbleParam', 1, 'd'),
+                    ('FlagAge', 1, 'i'),
+                    ('FlagMEtals', 1, 'i'),
+                    ('NallHW', 6, 'i'),
+                    ('unused', 16, 'i')),
+    pad32       = (('empty',  32, 'c'),),
+    pad64       = (('empty',  64, 'c'),),
+    pad128      = (('empty', 128, 'c'),),
+    pad256      = (('empty', 256, 'c'),),
+)
+
+gadget_ptype_specs = dict(
+    default = ( "Gas",
+                "Halo",
+                "Disk",
+                "Bulge",
+                "Stars",
+                "Bndry" )
+)
+
+gadget_field_specs = dict(
+    default = ( "Coordinates",
+                "Velocities",
+                "ParticleIDs",
+                "Mass",
+                ("InternalEnergy", "Gas"),
+                ("Density", "Gas"),
+                ("SmoothingLength", "Gas"),
+    ),
+    agora_unlv = ( "Coordinates",
+                   "Velocities",
+                   "ParticleIDs",
+                   "Mass",
+                   ("InternalEnergy", "Gas"),
+                   ("Density", "Gas"),
+                   ("Electron_Number_Density", "Gas"),
+                   ("HI_NumberDensity", "Gas"),
+                   ("SmoothingLength", "Gas"),
+    )
+)

diff -r fb4eaf4444fbe9dc7df245e1c5c64472150f7752 -r 3f1b23e6eeec0e51fd42e3b6417e86132c44eb70 yt/frontends/sdf/fields.py
--- /dev/null
+++ b/yt/frontends/sdf/fields.py
@@ -0,0 +1,54 @@
+"""
+SDF-specific fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import numpy as np
+
+from yt.funcs import *
+
+from yt.fields.field_info_container import \
+    FieldInfoContainer
+
+from yt.config import ytcfg
+from yt.utilities.physical_constants import mh
+from yt.fields.species_fields import \
+    add_species_field_by_fraction, \
+    add_species_field_by_density, \
+    setup_species_fields
+
+from yt.fields.particle_fields import \
+    add_volume_weighted_smoothed_field
+
+class SDFFieldInfo(FieldInfoContainer):
+    known_other_fields = ()
+
+    known_particle_fields = (
+        ("Mass", ("code_mass", ["particle_mass"], None)),
+        ("Masses", ("code_mass", ["particle_mass"], None)),
+        ("Coordinates", ("code_length", ["particle_position"], None)),
+        ("Velocity", ("code_velocity", ["particle_velocity"], None)),
+        ("Velocities", ("code_velocity", ["particle_velocity"], None)),
+        ("ParticleIDs", ("", ["particle_index"], None)),
+        ("InternalEnergy", ("", ["thermal_energy"], None)),
+        ("SmoothingLength", ("code_length", ["smoothing_length"], None)),
+        ("Density", ("code_mass / code_length**3", ["density"], None)),
+        ("MaximumTemperature", ("K", [], None)),
+        ("Temperature", ("K", ["temperature"], None)),
+        ("Epsilon", ("code_length", [], None)),
+        ("Metals", ("code_metallicity", ["metallicity"], None)),
+        ("Phi", ("code_length", [], None)),
+        ("FormationTime", ("code_time", ["creation_time"], None)),
+    )

diff -r fb4eaf4444fbe9dc7df245e1c5c64472150f7752 -r 3f1b23e6eeec0e51fd42e3b6417e86132c44eb70 yt/frontends/sdf/setup.py
--- /dev/null
+++ b/yt/frontends/sdf/setup.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+import glob
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('sph', parent_package, top_path)
+    config.add_extension("smoothing_kernel",
+        ["yt/frontends/sph/smoothing_kernel.pyx"],
+        include_dirs=["yt/frontends/sph/",
+                      "yt/geometry/",
+                      "yt/utilities/lib/"],
+        depends=glob.glob("yt/geometry/*.pxd"),
+        )
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


https://bitbucket.org/yt_analysis/yt/commits/15c5af50de95/
Changeset:   15c5af50de95
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-24 23:02:00
Summary:     Implementing IO for SDF files.
Affected #:  3 files

diff -r 3f1b23e6eeec0e51fd42e3b6417e86132c44eb70 -r 15c5af50de9525c06253f224986c4719da9b66f8 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -41,8 +41,7 @@
     SDFRead
 
 class SDFFile(ParticleFile):
-    def __init__(self, pf, io, filename, file_id):
-        pass
+    pass
 
 class SDFDataset(Dataset):
     _index_class = ParticleIndex
@@ -99,6 +98,8 @@
                           self.omega_matter, self.omega_lambda)
         self.current_time = cosmo.hubble_time(self.current_redshift)
         mylog.info("Calculating time to be %0.3e seconds", self.current_time)
+        self.filename_template = self.parameter_filename
+        self.file_count = 1
 
     def _set_code_unit_attributes(self):
         self.length_unit = self.quan(1.0, "kpc")

diff -r 3f1b23e6eeec0e51fd42e3b6417e86132c44eb70 -r 15c5af50de9525c06253f224986c4719da9b66f8 yt/frontends/sdf/fields.py
--- a/yt/frontends/sdf/fields.py
+++ b/yt/frontends/sdf/fields.py
@@ -36,19 +36,12 @@
     known_other_fields = ()
 
     known_particle_fields = (
-        ("Mass", ("code_mass", ["particle_mass"], None)),
-        ("Masses", ("code_mass", ["particle_mass"], None)),
-        ("Coordinates", ("code_length", ["particle_position"], None)),
-        ("Velocity", ("code_velocity", ["particle_velocity"], None)),
-        ("Velocities", ("code_velocity", ["particle_velocity"], None)),
-        ("ParticleIDs", ("", ["particle_index"], None)),
-        ("InternalEnergy", ("", ["thermal_energy"], None)),
-        ("SmoothingLength", ("code_length", ["smoothing_length"], None)),
-        ("Density", ("code_mass / code_length**3", ["density"], None)),
-        ("MaximumTemperature", ("K", [], None)),
-        ("Temperature", ("K", ["temperature"], None)),
-        ("Epsilon", ("code_length", [], None)),
-        ("Metals", ("code_metallicity", ["metallicity"], None)),
-        ("Phi", ("code_length", [], None)),
-        ("FormationTime", ("code_time", ["creation_time"], None)),
+        ("mass", ("code_mass", ["particle_mass"], None)),
+        ("x", ("code_length", ["particle_position_x"], None)),
+        ("y", ("code_length", ["particle_position_y"], None)),
+        ("z", ("code_length", ["particle_position_z"], None)),
+        ("vx", ("code_velocity", ["particle_velocity_x"], None)),
+        ("vy", ("code_velocity", ["particle_velocity_y"], None)),
+        ("vz", ("code_velocity", ["particle_velocity_z"], None)),
+        ("ident", ("", ["particle_identifier"], None)),
     )

diff -r 3f1b23e6eeec0e51fd42e3b6417e86132c44eb70 -r 15c5af50de9525c06253f224986c4719da9b66f8 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -27,31 +27,91 @@
 from yt.utilities.lib.geometry_utils import compute_morton
 
 from yt.geometry.oct_container import _ORDER_MAX
+CHUNKSIZE = 32**3
 
 class IOHandlerSDF(BaseIOHandler):
-    _dataset_type = "SDF"
+    _dataset_type = "sdf_particles"
 
-    def __init__(self, *args, **kwargs):
-        super(IOHandlerSDF, self).__init__(*args, **kwargs)
-        # Now we create our sdf file reader
-        self.sdf_handle = SDFRead(self.pf.sdf_handle)
+    @property
+    def _handle(self):
+        return self.pf.sdf_container
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         raise NotImplementedError
 
     def _read_particle_coords(self, chunks, ptf):
-        pass
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        assert(ptf.keys()[0] == "dark_matter")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        assert(len(data_files) == 1)
+        for data_file in data_files:
+            pcount = self._handle['x'].size
+            yield "dark_matter", (
+                self._handle['x'].astype("float64"),
+                self._handle['y'].astype("float64"),
+                self._handle['z'].astype("float64"))
 
     def _read_particle_fields(self, chunks, ptf, selector):
-        pass
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        assert(ptf.keys()[0] == "dark_matter")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        assert(len(data_files) == 1)
+        for data_file in data_files:
+            pcount = self._handle['x'].size
+            for ptype, field_list in sorted(ptf.items()):
+                x = self._handle['x'].astype("float64")
+                y = self._handle['y'].astype("float64")
+                z = self._handle['z'].astype("float64")
+                mask = selector.select_points(x, y, z, 0.0)
+                del x, y, z
+                if mask is None: continue
+                for field in field_list:
+                    if field == "mass":
+                        data = np.ones(mask.sum(), dtype="float64")
+                        data *= self.pf.parameters["particle_mass"]
+                    else:
+                        data = self._handle[field][mask].astype("float64")
+                    yield (ptype, field), data
 
     def _initialize_index(self, data_file, regions):
-        pass
+        x, y, z = (self._handle[ax] for ax in 'xyz')
+        pcount = x.size
+        morton = np.empty(pcount, dtype='uint64')
+        ind = 0
+        pos = np.empty((CHUNKSIZE, 3), dtype=x.dtype)
+        while ind < pcount:
+            npart = min(CHUNKSIZE, pcount - ind)
+            pos[:npart,0] = x[ind:ind+npart]
+            pos[:npart,1] = y[ind:ind+npart]
+            pos[:npart,2] = z[ind:ind+npart]
+            if np.any(pos.min(axis=0) < self.pf.domain_left_edge) or \
+               np.any(pos.max(axis=0) > self.pf.domain_right_edge):
+                raise YTDomainOverflow(pos.min(axis=0),
+                                       pos.max(axis=0),
+                                       self.pf.domain_left_edge,
+                                       self.pf.domain_right_edge)
+            regions.add_data_file(pos, data_file.file_id)
+            morton[ind:ind+npart] = compute_morton(
+                pos[:npart,0], pos[:npart,1], pos[:npart,2],
+                data_file.pf.domain_left_edge,
+                data_file.pf.domain_right_edge)
+            ind += CHUNKSIZE
+        return morton
 
     def _count_particles(self, data_file):
-        pass
+        return {'dark_matter': self._handle['x'].size}
 
     def _identify_fields(self, data_file):
+        fields = [("dark_matter", v) for v in self._handle.keys()]
+        fields.append(("dark_matter", "mass"))
         return fields, {}
 
 import re


https://bitbucket.org/yt_analysis/yt/commits/7f40ed0773aa/
Changeset:   7f40ed0773aa
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-24 23:06:01
Summary:     Merging with 32-bit
Affected #:  15 files

diff -r 15c5af50de9525c06253f224986c4719da9b66f8 -r 7f40ed0773aa9b161bdc665f1af2d02417020552 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -379,9 +379,9 @@
 
     def count_particles(self, selector, x, y, z):
         # We don't cache the selector results
-        count = selector.count_points(x,y,z)
+        count = selector.count_points(x,y,z, 0.0)
         return count
 
     def select_particles(self, selector, x, y, z):
-        mask = selector.select_points(x,y,z)
+        mask = selector.select_points(x,y,z, 0.0)
         return mask

diff -r 15c5af50de9525c06253f224986c4719da9b66f8 -r 7f40ed0773aa9b161bdc665f1af2d02417020552 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -249,11 +249,11 @@
 
     def count_particles(self, selector, x, y, z):
         # We don't cache the selector results
-        count = selector.count_points(x,y,z)
+        count = selector.count_points(x,y,z, 0.0)
         return count
 
     def select_particles(self, selector, x, y, z):
-        mask = selector.select_points(x,y,z)
+        mask = selector.select_points(x,y,z, 0.0)
         return mask
 
 class ParticleOctreeSubset(OctreeSubset):

diff -r 15c5af50de9525c06253f224986c4719da9b66f8 -r 7f40ed0773aa9b161bdc665f1af2d02417020552 yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -171,9 +171,9 @@
 
     def count_particles(self, selector, x, y, z):
         # We don't cache the selector results
-        count = selector.count_points(x,y,z)
+        count = selector.count_points(x,y,z, 0.0)
         return count
 
     def select_particles(self, selector, x, y, z):
-        mask = selector.select_points(x,y,z)
+        mask = selector.select_points(x,y,z, 0.0)
         return mask

diff -r 15c5af50de9525c06253f224986c4719da9b66f8 -r 7f40ed0773aa9b161bdc665f1af2d02417020552 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -74,7 +74,7 @@
         pbool, idxa, idxb = _determine_field_size(pf, ftype, self.ls, ptmax)
         pstr = 'particle_position_%s'
         x,y,z = [self._get_field((ftype, pstr % ax)) for ax in 'xyz']
-        mask = selector.select_points(x, y, z)
+        mask = selector.select_points(x, y, z, 0.0)
         if self.caching:
             self.masks[key] = mask
             return self.masks[key]

diff -r 15c5af50de9525c06253f224986c4719da9b66f8 -r 7f40ed0773aa9b161bdc665f1af2d02417020552 yt/frontends/artio/io.py
--- a/yt/frontends/artio/io.py
+++ b/yt/frontends/artio/io.py
@@ -64,7 +64,7 @@
                 for ptype, field_list in sorted(ptf.items()):
                     x, y, z = (np.asarray(rv[ptype][pn % ax], dtype="=f8")
                                for ax in 'XYZ')
-                    mask = selector.select_points(x, y, z)
+                    mask = selector.select_points(x, y, z, 0.0)
                     if mask is None: continue
                     for field in field_list:
                         data = np.asarray(rv[ptype][field], "=f8")

diff -r 15c5af50de9525c06253f224986c4719da9b66f8 -r 7f40ed0773aa9b161bdc665f1af2d02417020552 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -104,7 +104,7 @@
                             r"particle_position_%s")
                     x, y, z = (np.asarray(pds.get(pn % ax).value, dtype="=f8")
                                for ax in 'xyz')
-                    mask = selector.select_points(x, y, z)
+                    mask = selector.select_points(x, y, z, 0.0)
                     if mask is None: continue
                     for field in field_list:
                         data = np.asarray(pds.get(field).value, "=f8")

diff -r 15c5af50de9525c06253f224986c4719da9b66f8 -r 7f40ed0773aa9b161bdc665f1af2d02417020552 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -93,7 +93,7 @@
                 x = p_fields[start:end, px]
                 y = p_fields[start:end, py]
                 z = p_fields[start:end, pz]
-                mask = selector.select_points(x, y, z)
+                mask = selector.select_points(x, y, z, 0.0)
                 if mask is None: continue
                 for field in field_list:
                     fi = self._particle_fields[field]

diff -r 15c5af50de9525c06253f224986c4719da9b66f8 -r 7f40ed0773aa9b161bdc665f1af2d02417020552 yt/frontends/halo_catalogs/halo_catalog/io.py
--- a/yt/frontends/halo_catalogs/halo_catalog/io.py
+++ b/yt/frontends/halo_catalogs/halo_catalog/io.py
@@ -68,7 +68,7 @@
                     x = f['particle_position_x'].value.astype("float64")
                     y = f['particle_position_y'].value.astype("float64")
                     z = f['particle_position_z'].value.astype("float64")
-                    mask = selector.select_points(x, y, z)
+                    mask = selector.select_points(x, y, z, 0.0)
                     del x, y, z
                     if mask is None: continue
                     for field in field_list:

diff -r 15c5af50de9525c06253f224986c4719da9b66f8 -r 7f40ed0773aa9b161bdc665f1af2d02417020552 yt/frontends/halo_catalogs/rockstar/io.py
--- a/yt/frontends/halo_catalogs/rockstar/io.py
+++ b/yt/frontends/halo_catalogs/rockstar/io.py
@@ -74,7 +74,7 @@
                     x = halos['particle_position_x'].astype("float64")
                     y = halos['particle_position_y'].astype("float64")
                     z = halos['particle_position_z'].astype("float64")
-                    mask = selector.select_points(x, y, z)
+                    mask = selector.select_points(x, y, z, 0.0)
                     del x, y, z
                     if mask is None: continue
                     for field in field_list:

diff -r 15c5af50de9525c06253f224986c4719da9b66f8 -r 7f40ed0773aa9b161bdc665f1af2d02417020552 yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -77,7 +77,7 @@
                 for ptype, field_list in sorted(ptf.items()):
                     x, y, z = (np.asarray(rv[ptype, pn % ax], "=f8")
                                for ax in 'xyz')
-                    mask = selector.select_points(x, y, z)
+                    mask = selector.select_points(x, y, z, 0.0)
                     for field in field_list:
                         data = np.asarray(rv.pop((ptype, field))[mask], "=f8")
                         yield (ptype, field), data

diff -r 15c5af50de9525c06253f224986c4719da9b66f8 -r 7f40ed0773aa9b161bdc665f1af2d02417020552 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -99,7 +99,7 @@
                 g = f["/%s" % ptype]
                 coords = g["Coordinates"][:].astype("float64")
                 mask = selector.select_points(
-                            coords[:,0], coords[:,1], coords[:,2])
+                            coords[:,0], coords[:,1], coords[:,2], 0.0)
                 del coords
                 if mask is None: continue
                 for field in field_list:
@@ -281,7 +281,7 @@
                 pos = self._read_field_from_file(f,
                             tp[ptype], "Coordinates")
                 mask = selector.select_points(
-                    pos[:,0], pos[:,1], pos[:,2])
+                    pos[:,0], pos[:,1], pos[:,2], 0.0)
                 del pos
                 if mask is None: continue
                 for field in field_list:
@@ -534,7 +534,7 @@
                 mask = selector.select_points(
                     p["Coordinates"]['x'].astype("float64"),
                     p["Coordinates"]['y'].astype("float64"),
-                    p["Coordinates"]['z'].astype("float64"))
+                    p["Coordinates"]['z'].astype("float64"), 0.0)
                 if mask is None: continue
                 tf = self._fill_fields(field_list, p, mask, data_file)
                 for field in field_list:
@@ -751,7 +751,7 @@
                 c = np.frombuffer(s, dtype="float64")
                 c.shape = (c.shape[0]/3.0, 3)
                 mask = selector.select_points(
-                            c[:,0], c[:,1], c[:,2])
+                            c[:,0], c[:,1], c[:,2], 0.0)
                 del c
                 if mask is None: continue
                 for field in field_list:

diff -r 15c5af50de9525c06253f224986c4719da9b66f8 -r 7f40ed0773aa9b161bdc665f1af2d02417020552 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -85,7 +85,7 @@
                 for ptype, field_list in sorted(ptf.items()):
                     x, y, z  = (gf[ptype, "particle_position_%s" % ax]
                                 for ax in 'xyz')
-                    mask = selector.select_points(x, y, z)
+                    mask = selector.select_points(x, y, z, 0.0)
                     if mask is None: continue
                     for field in field_list:
                         data = np.asarray(gf[ptype, field])
@@ -127,7 +127,7 @@
             for ptype, field_list in sorted(ptf.items()):
                 x, y, z = (f[ptype, "particle_position_%s" % ax]
                            for ax in 'xyz')
-                mask = selector.select_points(x, y, z)
+                mask = selector.select_points(x, y, z, 0.0)
                 if mask is None: continue
                 for field in field_list:
                     data = f[ptype, field][mask]

diff -r 15c5af50de9525c06253f224986c4719da9b66f8 -r 7f40ed0773aa9b161bdc665f1af2d02417020552 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -455,10 +455,10 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def count_points(self, np.ndarray[np.float64_t, ndim=1] x,
-                           np.ndarray[np.float64_t, ndim=1] y,
-                           np.ndarray[np.float64_t, ndim=1] z,
-                           np.float64_t radius = 0.0):
+    def count_points(self, np.ndarray[anyfloat, ndim=1] x,
+                           np.ndarray[anyfloat, ndim=1] y,
+                           np.ndarray[anyfloat, ndim=1] z,
+                           np.float64_t radius):
         cdef int count = 0
         cdef int i
         cdef np.float64_t pos[3]
@@ -483,10 +483,10 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def select_points(self, np.ndarray[np.float64_t, ndim=1] x,
-                            np.ndarray[np.float64_t, ndim=1] y,
-                            np.ndarray[np.float64_t, ndim=1] z,
-                            np.float64_t radius = 0.0):
+    def select_points(self, np.ndarray[anyfloat, ndim=1] x,
+                            np.ndarray[anyfloat, ndim=1] y,
+                            np.ndarray[anyfloat, ndim=1] z,
+                            np.float64_t radius):
         cdef int count = 0
         cdef int i
         cdef np.float64_t pos[3]

diff -r 15c5af50de9525c06253f224986c4719da9b66f8 -r 7f40ed0773aa9b161bdc665f1af2d02417020552 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -237,7 +237,9 @@
 
     __array_priority__ = 2.0
 
-    def __new__(cls, input_array, input_units=None, registry=None, dtype=np.float64):
+    def __new__(cls, input_array, input_units=None, registry=None, dtype=None):
+        if dtype is None:
+            dtype = getattr(input_array, 'dtype', np.float64)
         if input_array is NotImplemented:
             return input_array
         if registry is None and isinstance(input_units, basestring):

diff -r 15c5af50de9525c06253f224986c4719da9b66f8 -r 7f40ed0773aa9b161bdc665f1af2d02417020552 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -146,7 +146,7 @@
             # Here, ptype_map means which particles contribute to a given type.
             # And ptf is the actual fields from disk to read.
             for ptype, (x, y, z) in self._read_particle_coords(chunks, ptf):
-                psize[ptype] += selector.count_points(x, y, z)
+                psize[ptype] += selector.count_points(x, y, z, 0.0)
             self._last_selector_counts = dict(**psize)
             self._last_selector_id = hash(selector)
         # Now we allocate


https://bitbucket.org/yt_analysis/yt/commits/9eebd797f295/
Changeset:   9eebd797f295
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-24 23:06:47
Summary:     Dropping back to 32 bits for SDF, if needed.
Affected #:  1 file

diff -r 7f40ed0773aa9b161bdc665f1af2d02417020552 -r 9eebd797f2956a6d8c837e99a2c68d5f6d873f15 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -51,9 +51,7 @@
         for data_file in data_files:
             pcount = self._handle['x'].size
             yield "dark_matter", (
-                self._handle['x'].astype("float64"),
-                self._handle['y'].astype("float64"),
-                self._handle['z'].astype("float64"))
+                self._handle['x'], self._handle['y'], self._handle['z'])
 
     def _read_particle_fields(self, chunks, ptf, selector):
         chunks = list(chunks)
@@ -67,9 +65,9 @@
         for data_file in data_files:
             pcount = self._handle['x'].size
             for ptype, field_list in sorted(ptf.items()):
-                x = self._handle['x'].astype("float64")
-                y = self._handle['y'].astype("float64")
-                z = self._handle['z'].astype("float64")
+                x = self._handle['x']
+                y = self._handle['y']
+                z = self._handle['z']
                 mask = selector.select_points(x, y, z, 0.0)
                 del x, y, z
                 if mask is None: continue
@@ -78,7 +76,7 @@
                         data = np.ones(mask.sum(), dtype="float64")
                         data *= self.pf.parameters["particle_mass"]
                     else:
-                        data = self._handle[field][mask].astype("float64")
+                        data = self._handle[field][mask]
                     yield (ptype, field), data
 
     def _initialize_index(self, data_file, regions):


https://bitbucket.org/yt_analysis/yt/commits/30d6f32ff3fc/
Changeset:   30d6f32ff3fc
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-24 23:21:46
Summary:     Fixing setup.py for SDF
Affected #:  1 file

diff -r 9eebd797f2956a6d8c837e99a2c68d5f6d873f15 -r 30d6f32ff3fc0bd642fbb3763295743c7933ea68 yt/frontends/sdf/setup.py
--- a/yt/frontends/sdf/setup.py
+++ b/yt/frontends/sdf/setup.py
@@ -7,14 +7,7 @@
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('sph', parent_package, top_path)
-    config.add_extension("smoothing_kernel",
-        ["yt/frontends/sph/smoothing_kernel.pyx"],
-        include_dirs=["yt/frontends/sph/",
-                      "yt/geometry/",
-                      "yt/utilities/lib/"],
-        depends=glob.glob("yt/geometry/*.pxd"),
-        )
+    config = Configuration('sdf', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


https://bitbucket.org/yt_analysis/yt/commits/2a6802614794/
Changeset:   2a6802614794
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-24 23:23:15
Summary:     Removing a bunch of unnecessary code.
Affected #:  2 files

diff -r 30d6f32ff3fc0bd642fbb3763295743c7933ea68 -r 2a68026147940acb6dacba1b32d42f82dd32ac52 yt/frontends/sdf/__init__.py
--- a/yt/frontends/sdf/__init__.py
+++ b/yt/frontends/sdf/__init__.py
@@ -1,5 +1,5 @@
 """
-API for yt.frontends.gadget
+__init__ for yt.frontends.sdf
 
 
 

diff -r 30d6f32ff3fc0bd642fbb3763295743c7933ea68 -r 2a68026147940acb6dacba1b32d42f82dd32ac52 yt/frontends/sdf/definitions.py
--- a/yt/frontends/sdf/definitions.py
+++ b/yt/frontends/sdf/definitions.py
@@ -1,58 +0,0 @@
-
-gadget_ptypes = ("Gas", "Halo", "Disk", "Bulge", "Stars", "Bndry")
-ghdf5_ptypes  = ("PartType0", "PartType1", "PartType2", "PartType3",
-                 "PartType4", "PartType5")
-
-gadget_header_specs = dict(
-    default      = (('Npart', 6, 'i'),
-                    ('Massarr', 6, 'd'),
-                    ('Time', 1, 'd'),
-                    ('Redshift', 1, 'd'),
-                    ('FlagSfr', 1, 'i'),
-                    ('FlagFeedback', 1, 'i'),
-                    ('Nall', 6, 'i'),
-                    ('FlagCooling', 1, 'i'),
-                    ('NumFiles', 1, 'i'),
-                    ('BoxSize', 1, 'd'),
-                    ('Omega0', 1, 'd'),
-                    ('OmegaLambda', 1, 'd'),
-                    ('HubbleParam', 1, 'd'),
-                    ('FlagAge', 1, 'i'),
-                    ('FlagMEtals', 1, 'i'),
-                    ('NallHW', 6, 'i'),
-                    ('unused', 16, 'i')),
-    pad32       = (('empty',  32, 'c'),),
-    pad64       = (('empty',  64, 'c'),),
-    pad128      = (('empty', 128, 'c'),),
-    pad256      = (('empty', 256, 'c'),),
-)
-
-gadget_ptype_specs = dict(
-    default = ( "Gas",
-                "Halo",
-                "Disk",
-                "Bulge",
-                "Stars",
-                "Bndry" )
-)
-
-gadget_field_specs = dict(
-    default = ( "Coordinates",
-                "Velocities",
-                "ParticleIDs",
-                "Mass",
-                ("InternalEnergy", "Gas"),
-                ("Density", "Gas"),
-                ("SmoothingLength", "Gas"),
-    ),
-    agora_unlv = ( "Coordinates",
-                   "Velocities",
-                   "ParticleIDs",
-                   "Mass",
-                   ("InternalEnergy", "Gas"),
-                   ("Density", "Gas"),
-                   ("Electron_Number_Density", "Gas"),
-                   ("HI_NumberDensity", "Gas"),
-                   ("SmoothingLength", "Gas"),
-    )
-)


https://bitbucket.org/yt_analysis/yt/commits/57385164bb13/
Changeset:   57385164bb13
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-25 01:08:56
Summary:     A few fixes for 32/64 bit overflows for big, deep hierarchy.
Affected #:  2 files

diff -r 2a68026147940acb6dacba1b32d42f82dd32ac52 -r 57385164bb13b6aaafc2d8033a5cd1424400529f yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -267,19 +267,20 @@
                   ):
         #Given a floating point position, retrieve the most
         #refined oct at that time
-        cdef int ind[3], level
+        cdef int ind32[3]
         cdef np.int64_t ipos[3]
         cdef np.float64_t dds[3], cp[3], pp[3]
         cdef Oct *cur, *next
         cdef int i
         cur = next = NULL
-        level = -1
+        cdef np.int64_t ind[3], level = -1
         for i in range(3):
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
             ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
             cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
             ipos[i] = 0
-        self.get_root(ind, &next)
+            ind32[i] = ind[i]
+        self.get_root(ind32, &next)
         # We want to stop recursing when there's nowhere else to go
         while next != NULL:
             level += 1
@@ -345,7 +346,8 @@
         # grow a stack of parent Octs.
         # Note that in the first iteration, we will just find the up-to-27
         # neighbors, including the main oct.
-        cdef int i, j, k, n, level, ind[3], ii, nfound = 0
+        cdef np.int64_t i, j, k, n, level, ii, nfound = 0, dlevel
+        cdef int ind[3]
         cdef OctList *olist, *my_list
         my_list = olist = NULL
         cdef Oct *cand
@@ -379,9 +381,10 @@
                     # correctly, but we might.
                     if cand == NULL: continue
                     for level in range(1, oi.level+1):
+                        dlevel = oi.level - level
                         if cand.children == NULL: break
                         for n in range(3):
-                            ind[n] = (npos[n] >> (oi.level - (level))) & 1
+                            ind[n] = (npos[n] >> dlevel) & 1
                         ii = cind(ind[0],ind[1],ind[2])
                         if cand.children[ii] == NULL: break
                         cand = cand.children[ii]
@@ -920,7 +923,7 @@
     # For now, we assume we will not be doing this along all three zeros,
     # because that would be pretty tricky.
     if i == j == k == 0: return olist
-    cdef int n[3], ind[3], off[3][2], ii, ij, ik, ci
+    cdef np.int64_t n[3], ind[3], off[3][2], ii, ij, ik, ci
     ind[0] = 1 - i
     ind[1] = 1 - j
     ind[2] = 1 - k

diff -r 2a68026147940acb6dacba1b32d42f82dd32ac52 -r 57385164bb13b6aaafc2d8033a5cd1424400529f yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -625,6 +625,7 @@
         self.linking_length2 = linking_length * linking_length
         self.first = self.last = NULL
 
+    @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def identify_contours(self, OctreeContainer octree,
@@ -633,7 +634,7 @@
                                 np.ndarray[np.int64_t, ndim=1] particle_ids,
                                 int domain_id = -1, int domain_offset = 0,
                                 periodicity = (True, True, True),
-                                minimum_count = 8):
+                                int minimum_count = 8):
         cdef np.ndarray[np.int64_t, ndim=1] pdoms, pcount, pind, doff
         cdef np.float64_t pos[3]
         cdef Oct *oct = NULL, **neighbors = NULL
@@ -641,6 +642,7 @@
         cdef ContourID *c0, *c1
         cdef np.int64_t moff = octree.get_domain_offset(domain_id + domain_offset)
         cdef np.int64_t i, j, k, n, nneighbors, pind0, offset
+        cdef int counter = 0
         pcount = np.zeros_like(dom_ind)
         doff = np.zeros_like(dom_ind) - 1
         # First, we find the oct for each particle.
@@ -653,6 +655,7 @@
             self.DW[i] = (octree.DRE[i] - octree.DLE[i])
             self.periodicity[i] = periodicity[i]
         for i in range(positions.shape[0]):
+            counter += 1
             container[i] = NULL
             for j in range(3):
                 pos[j] = positions[i, j]
@@ -672,12 +675,12 @@
                 doff[offset] = i
         cdef int nsize = 27
         cdef np.int64_t *nind = <np.int64_t *> malloc(sizeof(np.int64_t)*nsize)
-        cdef int counter = 0
+        counter = 0
         for i in range(doff.shape[0]):
             counter += 1
-            if counter == 10000:
+            if counter >= 100000 or counter == 0:
                 counter = 0
-                #print "FOF-ing % 5.1f%% done" % ((100.0 * i)/doff.size)
+                print "FOF-ing % 5.1f%% done" % ((100.0 * i)/doff.size)
             # Any particles found for this oct?
             if doff[i] < 0: continue
             offset = pind[doff[i]]
@@ -729,7 +732,8 @@
                 poffset = doff[i] + j
                 c1 = container[poffset]
                 c0 = contour_find(c1)
-                contour_ids[pind[poffset]] = c0.contour_id
+                offset = ipind[poffset]
+                contour_ids[offset] = c0.contour_id
                 c0.count += 1
         for i in range(doff.shape[0]):
             if doff[i] < 0: continue
@@ -738,11 +742,13 @@
                 c1 = container[poffset]
                 if c1 == NULL: continue
                 c0 = contour_find(c1)
+                offset = ipind[poffset]
                 if c0.count < minimum_count:
-                    contour_ids[pind[poffset]] = -1
+                    contour_ids[offset] = -1
         free(container)
         return contour_ids
 
+    @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
     cdef void link_particles(self, ContourID **container, 


https://bitbucket.org/yt_analysis/yt/commits/cdc5e67dfee7/
Changeset:   cdc5e67dfee7
Branch:      yt-3.0
User:        samskillman
Date:        2014-04-25 01:59:24
Summary:     Add sdf to frontends/setup, and fix which hubble constant to grab.
Affected #:  2 files

diff -r 57385164bb13b6aaafc2d8033a5cd1424400529f -r cdc5e67dfee70b7f18bb3163f0ae0f4668ce3f5c yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -92,7 +92,7 @@
         self.current_redshift = self.parameters["redshift"]
         self.omega_lambda = self.parameters["Omega0_lambda"]
         self.omega_matter = self.parameters["Omega0_m"]
-        self.hubble_constant = self.parameters["hubble"]
+        self.hubble_constant = self.parameters["h_100"]
         # Now we calculate our time based on the cosmology.
         cosmo = Cosmology(self.hubble_constant,
                           self.omega_matter, self.omega_lambda)

diff -r 57385164bb13b6aaafc2d8033a5cd1424400529f -r cdc5e67dfee70b7f18bb3163f0ae0f4668ce3f5c yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -22,6 +22,7 @@
 #    config.add_subpackage("artio2")
     config.add_subpackage("pluto")
     config.add_subpackage("ramses")
+    config.add_subpackage("sdf")
     config.add_subpackage("sph")
     config.add_subpackage("stream")
     config.add_subpackage("boxlib/tests")


https://bitbucket.org/yt_analysis/yt/commits/06ca215f8085/
Changeset:   06ca215f8085
Branch:      yt-3.0
User:        samskillman
Date:        2014-04-25 04:00:51
Summary:     Minor fixes for sdf. Rockstar interface should detect velocity units.
Affected #:  2 files

diff -r cdc5e67dfee70b7f18bb3163f0ae0f4668ce3f5c -r 06ca215f80856efd9cc80feaca184d1a71d7847b yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -204,7 +204,7 @@
     p[0] = <particle *> malloc(sizeof(particle) * local_parts)
 
     conv[0] = conv[1] = conv[2] = pf.length_unit.in_units("Mpccm/h")
-    conv[3] = conv[4] = conv[5] = 1e-5
+    conv[3] = conv[4] = conv[5] = pf.velocity_unit.in_units("km/s")
     left_edge[0] = pf.domain_left_edge[0]
     left_edge[1] = pf.domain_left_edge[1]
     left_edge[2] = pf.domain_left_edge[2]

diff -r cdc5e67dfee70b7f18bb3163f0ae0f4668ce3f5c -r 06ca215f80856efd9cc80feaca184d1a71d7847b yt/frontends/sdf/fields.py
--- a/yt/frontends/sdf/fields.py
+++ b/yt/frontends/sdf/fields.py
@@ -43,5 +43,5 @@
         ("vx", ("code_velocity", ["particle_velocity_x"], None)),
         ("vy", ("code_velocity", ["particle_velocity_y"], None)),
         ("vz", ("code_velocity", ["particle_velocity_z"], None)),
-        ("ident", ("", ["particle_identifier"], None)),
+        ("ident", ("", ["particle_index"], None)),
     )


https://bitbucket.org/yt_analysis/yt/commits/39c495c7a0dc/
Changeset:   39c495c7a0dc
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-25 12:47:12
Summary:     Adding early-termination to FOF.
Affected #:  4 files

diff -r 06ca215f80856efd9cc80feaca184d1a71d7847b -r 39c495c7a0dc5c0e8756f943597ba8bda03f69ea yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -67,7 +67,8 @@
     cdef np.float64_t DLE[3], DRE[3]
     cdef public np.int64_t nocts
     cdef public int num_domains
-    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
+    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?,
+                  int max_level = ?)
     cdef int get_root(self, int ind[3], Oct **o)
     cdef Oct **neighbors(self, OctInfo *oinfo, np.int64_t *nneighbors,
                          Oct *o)

diff -r 06ca215f80856efd9cc80feaca184d1a71d7847b -r 39c495c7a0dc5c0e8756f943597ba8bda03f69ea yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -264,7 +264,7 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL,
-                  ):
+                  int max_level = 99):
         #Given a floating point position, retrieve the most
         #refined oct at that time
         cdef int ind32[3]
@@ -282,7 +282,7 @@
             ind32[i] = ind[i]
         self.get_root(ind32, &next)
         # We want to stop recursing when there's nowhere else to go
-        while next != NULL:
+        while next != NULL and level <= max_level:
             level += 1
             for i in range(3):
                 ipos[i] = (ipos[i] << 1) + ind[i]
@@ -357,6 +357,7 @@
         # ndim is the oct dimensions of the level, not the cell dimensions.
         for i in range(3):
             ndim[i] = <np.int64_t> ((self.DRE[i] - self.DLE[i]) / oi.dds[i])
+            # Here we adjust for oi.dds meaning *cell* width.
             ndim[i] = (ndim[i] >> self.oref)
         my_list = olist = OctList_append(NULL, o)
         for i in range(3):
@@ -911,6 +912,7 @@
                 domain_left_edge, domain_right_edge, partial_coverage,
                  over_refine)
         self.fill_func = oct_visitors.fill_file_indices_rind
+
 cdef OctList *OctList_subneighbor_find(OctList *olist, Oct *top,
                                        int i, int j, int k):
     if top.children == NULL: return olist

diff -r 06ca215f80856efd9cc80feaca184d1a71d7847b -r 39c495c7a0dc5c0e8756f943597ba8bda03f69ea yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -36,6 +36,9 @@
     np.int64_t pn       # Particle number
     np.float64_t r2     # radius**2
 
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
 cdef inline np.float64_t r2dist(np.float64_t ppos[3],
                                 np.float64_t cpos[3],
                                 np.float64_t DW[3],

diff -r 06ca215f80856efd9cc80feaca184d1a71d7847b -r 39c495c7a0dc5c0e8756f943597ba8bda03f69ea yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -29,7 +29,7 @@
 from .grid_traversal cimport VolumeContainer, PartitionedGrid, \
     vc_index, vc_pos_index
 
-cdef ContourID *contour_create(np.int64_t contour_id,
+cdef inline ContourID *contour_create(np.int64_t contour_id,
                                ContourID *prev = NULL):
     node = <ContourID *> malloc(sizeof(ContourID))
     #print "Creating contour with id", contour_id
@@ -40,12 +40,12 @@
     if prev != NULL: prev.next = node
     return node
 
-cdef void contour_delete(ContourID *node):
+cdef inline void contour_delete(ContourID *node):
     if node.prev != NULL: node.prev.next = node.next
     if node.next != NULL: node.next.prev = node.prev
     free(node)
 
-cdef ContourID *contour_find(ContourID *node):
+cdef inline ContourID *contour_find(ContourID *node):
     cdef ContourID *temp, *root
     root = node
     # First we find the root
@@ -61,7 +61,7 @@
         node = temp
     return root
 
-cdef void contour_union(ContourID *node1, ContourID *node2):
+cdef inline void contour_union(ContourID *node1, ContourID *node2):
     node1 = contour_find(node1)
     node2 = contour_find(node2)
     if node1.contour_id < node2.contour_id:
@@ -69,7 +69,7 @@
     elif node2.contour_id < node1.contour_id:
         node1.parent = node2
 
-cdef int candidate_contains(CandidateContour *first,
+cdef inline int candidate_contains(CandidateContour *first,
                             np.int64_t contour_id,
                             np.int64_t join_id = -1):
     while first != NULL:
@@ -78,7 +78,7 @@
         first = first.next
     return 0
 
-cdef CandidateContour *candidate_add(CandidateContour *first,
+cdef inline CandidateContour *candidate_add(CandidateContour *first,
                                      np.int64_t contour_id,
                                      np.int64_t join_id = -1):
     cdef CandidateContour *node
@@ -659,7 +659,7 @@
             container[i] = NULL
             for j in range(3):
                 pos[j] = positions[i, j]
-            oct = octree.get(pos)
+            oct = octree.get(pos, NULL)
             if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
                 continue
             offset = oct.domain_ind - moff
@@ -676,11 +676,13 @@
         cdef int nsize = 27
         cdef np.int64_t *nind = <np.int64_t *> malloc(sizeof(np.int64_t)*nsize)
         counter = 0
+        cdef np.int64_t frac = <np.int64_t> (doff.shape[0] / 20.0)
+        cdef int inside, skip_early
         for i in range(doff.shape[0]):
-            counter += 1
-            if counter >= 100000 or counter == 0:
+            if counter >= frac:
                 counter = 0
                 print "FOF-ing % 5.1f%% done" % ((100.0 * i)/doff.size)
+            counter += 1
             # Any particles found for this oct?
             if doff[i] < 0: continue
             offset = pind[doff[i]]
@@ -706,13 +708,23 @@
                     break
             # This is allocated by the neighbors function, so we deallocate it.
             free(neighbors)
-            # Now we look at each particle.
+            # We might know that all our internal particles are linked.
+            # Otherwise, we look at each particle.
+            inside = 0
+            for j in range(3):
+                if oi.dds[j] * (1 << octree.oref) > self.linking_length:
+                    inside += 1
             for j in range(pcount[i]):
                 # Note that this offset is the particle index
                 pind0 = pind[doff[i] + j]
                 # Look at each neighboring oct
                 for k in range(nneighbors):
                     if nind[k] == -1: continue
+                    # If all internal particles are inside the linking length
+                    # and we are on the self-Oct, we supply an early skip.
+                    skip_early = 0
+                    if inside == 3 and nind[k] == i:
+                        skip_early = 1
                     offset = doff[nind[k]]
                     if offset < 0: continue
                     # NOTE: doff[i] will not monotonically increase.  So we
@@ -722,7 +734,7 @@
                                         fpos, ipind,
                                         pcount[nind[k]], 
                                         offset, pind0, 
-                                        doff[i] + j)
+                                        doff[i] + j, skip_early)
         cdef np.ndarray[np.int64_t, ndim=1] contour_ids
         contour_ids = -1 * np.ones(positions.shape[0], dtype="int64")
         # Sort on our particle IDs.
@@ -757,9 +769,11 @@
                                    np.int64_t pcount, 
                                    np.int64_t noffset,
                                    np.int64_t pind0,
-                                   np.int64_t poffset):
+                                   np.int64_t poffset,
+                                   int skip_early):
         # Now we look at each particle and evaluate it
-        cdef np.float64_t pos0[3], pos1[3], d
+        cdef np.float64_t pos0[3], pos1[3], edges[2][3]
+        cdef int link
         cdef ContourID *c0, *c1
         cdef np.int64_t pind1
         cdef int i, j, k
@@ -771,23 +785,61 @@
             self.last = c0
             if self.first == NULL:
                 self.first = c0
-        c0 = contour_find(c0)
-        container[pind0] = c0
+        if skip_early == 1:
+            for i in range(pcount):
+                pind1 = pind[noffset + i]
+                container[pind1] = c0
+            return
         for i in range(3):
             pos0[i] = positions[pind0*3 + i]
+            edges[0][i] = pos0[i] - self.linking_length
+            edges[1][i] = pos0[i] + self.linking_length
+        # Lets set up some bounds for the particles.  Maybe we can get away
+        # with reducing our number of calls to r2dist_early.
         for i in range(pcount):
             pind1 = pind[noffset + i]
             if pind1 == pind0: continue
+            c1 = container[pind1]
+            if c1 != NULL and c1.contour_id == c0.contour_id:
+                # Already linked.
+                continue
             for j in range(3):
                 pos1[j] = positions[pind1*3 + j]
-            d = r2dist(pos0, pos1, self.DW, self.periodicity)
-            if d > self.linking_length2:
-                continue
-            c1 = container[pind1]
+            link = r2dist_early(pos0, pos1, self.DW, self.periodicity,
+                                self.linking_length2, edges)
+            if link == 0: continue
             if c1 == NULL:
-                container[pind1] = c1 = contour_create(
-                    noffset + i, self.last)
-            contour_union(c0, c1)
-            c0 = c1 = contour_find(c0)
-            container[pind1] = c0
-            container[pind0] = c0
+                container[pind1] = c0
+            elif c0.contour_id != c1.contour_id:
+                contour_union(c0, c1)
+                c0 = container[pind1] = container[pind0] = contour_find(c0)
+
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef inline int r2dist_early(np.float64_t ppos[3],
+                             np.float64_t cpos[3],
+                             np.float64_t DW[3],
+                             bint periodicity[3],
+                             np.float64_t max_r2,
+                             np.float64_t edges[2][3]):
+    cdef int i
+    cdef np.float64_t r2, DR
+    r2 = 0.0
+    cdef int inside = 0
+    for i in range(3):
+        if cpos[i] < edges[0][i]:
+            return 0
+        if cpos[i] > edges[1][i]:
+            return 0
+    for i in range(3):
+        DR = (ppos[i] - cpos[i])
+        if not periodicity[i]:
+            pass
+        elif (DR > DW[i]/2.0):
+            DR -= DW[i]
+        elif (DR < -DW[i]/2.0):
+            DR += DW[i]
+        r2 += DR * DR
+        if r2 > max_r2: return 0
+    return 1


https://bitbucket.org/yt_analysis/yt/commits/555c1d3c5dee/
Changeset:   555c1d3c5dee
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-25 13:30:35
Summary:     Removing some early termination stuff.
Affected #:  1 file

diff -r 39c495c7a0dc5c0e8756f943597ba8bda03f69ea -r 555c1d3c5dee70d0b53c787406343025f149b73a yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -710,21 +710,12 @@
             free(neighbors)
             # We might know that all our internal particles are linked.
             # Otherwise, we look at each particle.
-            inside = 0
-            for j in range(3):
-                if oi.dds[j] * (1 << octree.oref) > self.linking_length:
-                    inside += 1
             for j in range(pcount[i]):
                 # Note that this offset is the particle index
                 pind0 = pind[doff[i] + j]
                 # Look at each neighboring oct
                 for k in range(nneighbors):
                     if nind[k] == -1: continue
-                    # If all internal particles are inside the linking length
-                    # and we are on the self-Oct, we supply an early skip.
-                    skip_early = 0
-                    if inside == 3 and nind[k] == i:
-                        skip_early = 1
                     offset = doff[nind[k]]
                     if offset < 0: continue
                     # NOTE: doff[i] will not monotonically increase.  So we
@@ -734,7 +725,7 @@
                                         fpos, ipind,
                                         pcount[nind[k]], 
                                         offset, pind0, 
-                                        doff[i] + j, skip_early)
+                                        doff[i] + j)
         cdef np.ndarray[np.int64_t, ndim=1] contour_ids
         contour_ids = -1 * np.ones(positions.shape[0], dtype="int64")
         # Sort on our particle IDs.
@@ -769,8 +760,7 @@
                                    np.int64_t pcount, 
                                    np.int64_t noffset,
                                    np.int64_t pind0,
-                                   np.int64_t poffset,
-                                   int skip_early):
+                                   np.int64_t poffset):
         # Now we look at each particle and evaluate it
         cdef np.float64_t pos0[3], pos1[3], edges[2][3]
         cdef int link
@@ -785,15 +775,12 @@
             self.last = c0
             if self.first == NULL:
                 self.first = c0
-        if skip_early == 1:
-            for i in range(pcount):
-                pind1 = pind[noffset + i]
-                container[pind1] = c0
-            return
+        c0 = container[pind0] = contour_find(c0)
         for i in range(3):
+            # We make a very conservative guess here about the edges.
             pos0[i] = positions[pind0*3 + i]
-            edges[0][i] = pos0[i] - self.linking_length
-            edges[1][i] = pos0[i] + self.linking_length
+            edges[0][i] = pos0[i] - self.linking_length/2.0
+            edges[1][i] = pos0[i] + self.linking_length/2.0
         # Lets set up some bounds for the particles.  Maybe we can get away
         # with reducing our number of calls to r2dist_early.
         for i in range(pcount):


https://bitbucket.org/yt_analysis/yt/commits/8735b46b1f5e/
Changeset:   8735b46b1f5e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-25 13:56:40
Summary:     Disable edge checking near boundaries.
Affected #:  1 file

diff -r 555c1d3c5dee70d0b53c787406343025f149b73a -r 8735b46b1f5e9aec14ac8f1826794a9f565e3d5d yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -617,7 +617,7 @@
 
 cdef class ParticleContourTree(ContourTree):
     cdef np.float64_t linking_length, linking_length2
-    cdef np.float64_t DW[3]
+    cdef np.float64_t DW[3], DLE[3], DRE[3]
     cdef bint periodicity[3]
 
     def __init__(self, linking_length):
@@ -653,6 +653,8 @@
             sizeof(ContourID*) * positions.shape[0])
         for i in range(3):
             self.DW[i] = (octree.DRE[i] - octree.DLE[i])
+            self.DLE[i] = octree.DLE[i]
+            self.DRE[i] = octree.DRE[i]
             self.periodicity[i] = periodicity[i]
         for i in range(positions.shape[0]):
             counter += 1
@@ -779,8 +781,12 @@
         for i in range(3):
             # We make a very conservative guess here about the edges.
             pos0[i] = positions[pind0*3 + i]
-            edges[0][i] = pos0[i] - self.linking_length/2.0
-            edges[1][i] = pos0[i] + self.linking_length/2.0
+            edges[0][i] = pos0[i] - self.linking_length*1.01
+            edges[1][i] = pos0[i] + self.linking_length*1.01
+            if edges[0][i] < self.DLE[i] or edges[0][i] > self.DRE[i]:
+                # We skip this one, since we're close to the boundary
+                edges[0][i] = -1e30
+                edges[1][i] = 1e30
         # Lets set up some bounds for the particles.  Maybe we can get away
         # with reducing our number of calls to r2dist_early.
         for i in range(pcount):


https://bitbucket.org/yt_analysis/yt/commits/1d801f0ff437/
Changeset:   1d801f0ff437
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-25 20:18:49
Summary:     Realloc plays rehavoc with pointers!
Affected #:  1 file

diff -r 8735b46b1f5e9aec14ac8f1826794a9f565e3d5d -r 1d801f0ff437bb3cc80be09db2bd89be0abb0d94 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -213,9 +213,8 @@
         cdef np.int64_t num_particles = pid.shape[0]
 
         # Allocate space for correct number of particles
-        cdef particle* particles = <particle*> malloc(npart_max * sizeof(particle))
         cdef fof fof_obj
-        fof_obj.particles = particles
+        fof_obj.particles = <particle*> malloc(npart_max * sizeof(particle))
 
         cdef np.int64_t last_fof_tag = 1
         cdef np.int64_t k = 0
@@ -228,14 +227,14 @@
                     fof_obj.num_p = k
                     find_subs(&fof_obj)
                 k = 0
-            particles[k].id = pid[i]
+            fof_obj.particles[k].id = k
 
             # fill in locations & velocities
             for j in range(3):
-                particles[k].pos[j] = pos[i,j]
-                particles[k].pos[j+3] = vel[i,j]
+                fof_obj.particles[k].pos[j] = pos[i,j]
+                fof_obj.particles[k].pos[j+3] = vel[i,j]
             k += 1
-        free(particles)
+        free(fof_obj.particles)
 
 
 


https://bitbucket.org/yt_analysis/yt/commits/56ff9bac46e3/
Changeset:   56ff9bac46e3
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-26 23:01:25
Summary:     Adding nicer options to memory checkers.
Affected #:  1 file

diff -r 1d801f0ff437bb3cc80be09db2bd89be0abb0d94 -r 56ff9bac46e34a0a927b7a390dbe576f4cef7995 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -117,7 +117,7 @@
 except ImportError:
     pass
 
-def get_memory_usage():
+def get_memory_usage(subtract_share = False):
     """
     Returning resident size in megabytes
     """
@@ -131,6 +131,7 @@
         return -1024
     line = open(status_file).read()
     size, resident, share, text, library, data, dt = [int(i) for i in line.split()]
+    if subtract_share: resident -= share
     return resident * pagesize / (1024 * 1024) # return in megs
 
 def time_execution(func):
@@ -682,7 +683,7 @@
     return s
 
 @contextlib.contextmanager
-def memory_checker(interval = 15):
+def memory_checker(interval = 15, dest = None):
     r"""This is a context manager that monitors memory usage.
 
     Parameters
@@ -700,6 +701,8 @@
     ...     del arr
     """
     import threading
+    if dest is None:
+        dest = sys.stdout
     class MemoryChecker(threading.Thread):
         def __init__(self, event, interval):
             self.event = event
@@ -708,7 +711,7 @@
 
         def run(self):
             while not self.event.wait(self.interval):
-                print "MEMORY: %0.3e gb" % (get_memory_usage()/1024.)
+                print >> dest, "MEMORY: %0.3e gb" % (get_memory_usage()/1024.)
 
     e = threading.Event()
     mem_check = MemoryChecker(e, interval)


https://bitbucket.org/yt_analysis/yt/commits/e0fafb8b11d1/
Changeset:   e0fafb8b11d1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-26 23:01:41
Summary:     Reduce memory overhead in the FOF routine.
Affected #:  1 file

diff -r 56ff9bac46e34a0a927b7a390dbe576f4cef7995 -r e0fafb8b11d142c8783ee2ac9100a31331b4435b yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -28,6 +28,7 @@
 from .amr_kdtools cimport _find_node, Node
 from .grid_traversal cimport VolumeContainer, PartitionedGrid, \
     vc_index, vc_pos_index
+import sys
 
 cdef inline ContourID *contour_create(np.int64_t contour_id,
                                ContourID *prev = NULL):
@@ -646,7 +647,8 @@
         pcount = np.zeros_like(dom_ind)
         doff = np.zeros_like(dom_ind) - 1
         # First, we find the oct for each particle.
-        pdoms = np.zeros(positions.shape[0], dtype="int64") - 1
+        pdoms = np.zeros(positions.shape[0], dtype="int64")
+        pdoms -= -1
         cdef np.int64_t *pdom = <np.int64_t*> pdoms.data
         # First we allocate our container
         cdef ContourID **container = <ContourID**> malloc(
@@ -675,15 +677,17 @@
             offset = pdoms[pind[i]]
             if doff[offset] < 0:
                 doff[offset] = i
+        del pdoms
         cdef int nsize = 27
         cdef np.int64_t *nind = <np.int64_t *> malloc(sizeof(np.int64_t)*nsize)
         counter = 0
         cdef np.int64_t frac = <np.int64_t> (doff.shape[0] / 20.0)
+        print >> sys.stderr, "Will be outputting every", frac
         cdef int inside, skip_early
         for i in range(doff.shape[0]):
             if counter >= frac:
                 counter = 0
-                print "FOF-ing % 5.1f%% done" % ((100.0 * i)/doff.size)
+                print >> sys.stderr, "FOF-ing % 5.1f%% done" % ((100.0 * i)/doff.size)
             counter += 1
             # Any particles found for this oct?
             if doff[i] < 0: continue
@@ -729,7 +733,8 @@
                                         offset, pind0, 
                                         doff[i] + j)
         cdef np.ndarray[np.int64_t, ndim=1] contour_ids
-        contour_ids = -1 * np.ones(positions.shape[0], dtype="int64")
+        contour_ids = np.ones(positions.shape[0], dtype="int64")
+        contour_ids *= -1
         # Sort on our particle IDs.
         for i in range(doff.shape[0]):
             if doff[i] < 0: continue
@@ -751,6 +756,7 @@
                 if c0.count < minimum_count:
                     contour_ids[offset] = -1
         free(container)
+        del pind
         return contour_ids
 
     @cython.cdivision(True)


https://bitbucket.org/yt_analysis/yt/commits/01be1a28435d/
Changeset:   01be1a28435d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-27 01:16:04
Summary:     We now calculate on the fly the maximum fof count.
Affected #:  2 files

diff -r e0fafb8b11d142c8783ee2ac9100a31331b4435b -r 01be1a28435dc03fc2b766b6da18a786805fd0a2 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -199,42 +199,60 @@
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    def make_rockstar_fof(self, np.ndarray[np.int64_t, ndim=1] pid,
+    def make_rockstar_fof(self, np.ndarray[np.int64_t, ndim=1] pind,
+                                np.ndarray[np.int64_t, ndim=1] fof_tags,
                                 np.ndarray[np.float64_t, ndim=2] pos,
-                                np.ndarray[np.float64_t, ndim=2] vel,
-                                np.ndarray[np.int64_t, ndim=1] fof_tags,
-                                np.int64_t nfof,
-                                np.int64_t npart_max):
+                                np.ndarray[np.float64_t, ndim=2] vel):
 
         # Define fof object
 
         # Find number of particles
-        cdef np.int64_t i, j
-        cdef np.int64_t num_particles = pid.shape[0]
+        cdef np.int64_t i, j, k, ind
+        cdef np.int64_t num_particles = pind.shape[0]
 
         # Allocate space for correct number of particles
         cdef fof fof_obj
-        fof_obj.particles = <particle*> malloc(npart_max * sizeof(particle))
 
-        cdef np.int64_t last_fof_tag = 1
-        cdef np.int64_t k = 0
-        for i in range(num_particles):
-            if fof_tags[i] < 0:
+        cdef np.int64_t max_count = 0
+        cdef np.int64_t local_tag, last_fof_tag = -1
+        fof_obj.num_p = 0
+        last_fof_tag
+        j = 0
+        # We're going to do one iteration to get the most frequent value.
+        for i in range(pind.shape[0]):
+            ind = pind[i]
+            local_tag = fof_tags[ind]
+            # Don't count the null group
+            if local_tag == -1: continue
+            if local_tag != last_fof_tag:
+                if j > max_count:
+                    max_count = j
+                last_fof_tag = local_tag
+                j = 1
+            else:
+                j += 1
+        #print >> sys.stderr, "Most frequent occurrance: %s" % max_count
+        fof_obj.particles = <particle*> malloc(max_count * sizeof(particle))
+        j = 0
+        for i in range(pind.shape[0]):
+            ind = pind[i]
+            local_tag = fof_tags[ind]
+            # Skip this one -- it means no group.
+            if local_tag == -1:
                 continue
-            if fof_tags[i] != last_fof_tag:
-                last_fof_tag = fof_tags[i]
-                if k > 16:
-                    fof_obj.num_p = k
-                    find_subs(&fof_obj)
-                k = 0
-            fof_obj.particles[k].id = k
-
-            # fill in locations & velocities
-            for j in range(3):
-                fof_obj.particles[k].pos[j] = pos[i,j]
-                fof_obj.particles[k].pos[j+3] = vel[i,j]
-            k += 1
+            for k in range(3):
+                fof_obj.particles[j].pos[k] = pos[ind,k]
+                fof_obj.particles[j].pos[k+3] = vel[ind,k]
+            fof_obj.particles[k].id = -1
+            fof_obj.num_p += 1
+            j += 1
+            # Now we check if we're the last one
+            if i == pind.shape[0] or fof_tags[pind[i+1]] != local_tag:
+                #print >> sys.stderr, \
+                #    "Finding subs on %s particles from %s out of %s" % (
+                #    fof_obj.num_p, i, pind.shape[0])
+                find_subs(&fof_obj)
+                # Now we reset
+                fof_obj.num_p = 0
+                j = 0
         free(fof_obj.particles)
-
-
-

diff -r e0fafb8b11d142c8783ee2ac9100a31331b4435b -r 01be1a28435dc03fc2b766b6da18a786805fd0a2 yt/analysis_modules/halo_finding/rockstar/setup.py
--- a/yt/analysis_modules/halo_finding/rockstar/setup.py
+++ b/yt/analysis_modules/halo_finding/rockstar/setup.py
@@ -21,6 +21,7 @@
                          "yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx",
                          library_dirs=[rd],
                          libraries=["rockstar"],
+                         define_macros = [("THREADSAFE", "__thread")],
                          include_dirs=[rd,
                                        os.path.join(rd, "io"),
                                        os.path.join(rd, "util")])
@@ -28,6 +29,7 @@
                          "yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx",
                          library_dirs=[rd],
                          libraries=["rockstar"],
+                         define_macros = [("THREADSAFE", "__thread")],
                          include_dirs=[rd,
                                        os.path.join(rd, "io"),
                                        os.path.join(rd, "util")])


https://bitbucket.org/yt_analysis/yt/commits/d3a29ece2ea8/
Changeset:   d3a29ece2ea8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-27 01:18:43
Summary:     Fixing k/j index.
Affected #:  1 file

diff -r 01be1a28435dc03fc2b766b6da18a786805fd0a2 -r d3a29ece2ea8c456c2c4761de04fbe4bd5155f5f yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -243,7 +243,7 @@
             for k in range(3):
                 fof_obj.particles[j].pos[k] = pos[ind,k]
                 fof_obj.particles[j].pos[k+3] = vel[ind,k]
-            fof_obj.particles[k].id = -1
+            fof_obj.particles[j].id = -1
             fof_obj.num_p += 1
             j += 1
             # Now we check if we're the last one


https://bitbucket.org/yt_analysis/yt/commits/1969a32eb322/
Changeset:   1969a32eb322
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-27 01:41:44
Summary:     Be extra careful with checking tags.
Affected #:  1 file

diff -r d3a29ece2ea8c456c2c4761de04fbe4bd5155f5f -r 1969a32eb3221de50a2e5b7f0d6d6fb309af84a3 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -240,6 +240,11 @@
             # Skip this one -- it means no group.
             if local_tag == -1:
                 continue
+            if i == pind.shape[0] - 1:
+                next_tag = local_tag + 1
+            else:
+                offset = pind[i+1]
+                next_tag = fof_tags[offset]
             for k in range(3):
                 fof_obj.particles[j].pos[k] = pos[ind,k]
                 fof_obj.particles[j].pos[k+3] = vel[ind,k]
@@ -247,12 +252,11 @@
             fof_obj.num_p += 1
             j += 1
             # Now we check if we're the last one
-            if i == pind.shape[0] or fof_tags[pind[i+1]] != local_tag:
+            if local_tag != next_tag:
                 #print >> sys.stderr, \
                 #    "Finding subs on %s particles from %s out of %s" % (
                 #    fof_obj.num_p, i, pind.shape[0])
                 find_subs(&fof_obj)
                 # Now we reset
-                fof_obj.num_p = 0
-                j = 0
+                fof_obj.num_p = j = 0
         free(fof_obj.particles)


https://bitbucket.org/yt_analysis/yt/commits/eb811185d337/
Changeset:   eb811185d337
Branch:      yt-3.0
User:        mswarren
Date:        2014-04-27 06:11:43
Summary:     Fix bounds check
Affected #:  1 file

diff -r 1969a32eb3221de50a2e5b7f0d6d6fb309af84a3 -r eb811185d337389c88b798ccbb2a24678f5ca20a yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -84,12 +84,12 @@
         pcount = x.size
         morton = np.empty(pcount, dtype='uint64')
         ind = 0
-        pos = np.empty((CHUNKSIZE, 3), dtype=x.dtype)
         while ind < pcount:
             npart = min(CHUNKSIZE, pcount - ind)
-            pos[:npart,0] = x[ind:ind+npart]
-            pos[:npart,1] = y[ind:ind+npart]
-            pos[:npart,2] = z[ind:ind+npart]
+            pos = np.empty((npart, 3), dtype=x.dtype)
+            pos[:,0] = x[ind:ind+npart]
+            pos[:,1] = y[ind:ind+npart]
+            pos[:,2] = z[ind:ind+npart]
             if np.any(pos.min(axis=0) < self.pf.domain_left_edge) or \
                np.any(pos.max(axis=0) > self.pf.domain_right_edge):
                 raise YTDomainOverflow(pos.min(axis=0),
@@ -98,7 +98,7 @@
                                        self.pf.domain_right_edge)
             regions.add_data_file(pos, data_file.file_id)
             morton[ind:ind+npart] = compute_morton(
-                pos[:npart,0], pos[:npart,1], pos[:npart,2],
+                pos[:,0], pos[:,1], pos[:,2],
                 data_file.pf.domain_left_edge,
                 data_file.pf.domain_right_edge)
             ind += CHUNKSIZE


https://bitbucket.org/yt_analysis/yt/commits/3f9e2601b34b/
Changeset:   3f9e2601b34b
Branch:      yt-3.0
User:        mswarren
Date:        2014-04-27 07:12:36
Summary:     max_count was wrong if it was the last run
Affected #:  1 file

diff -r eb811185d337389c88b798ccbb2a24678f5ca20a -r 3f9e2601b34b8ff10755a323a6e1c39d437e81b7 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -231,6 +231,8 @@
                 j = 1
             else:
                 j += 1
+        if j > max_count:
+            max_count = j
         #print >> sys.stderr, "Most frequent occurrance: %s" % max_count
         fof_obj.particles = <particle*> malloc(max_count * sizeof(particle))
         j = 0


https://bitbucket.org/yt_analysis/yt/commits/2a2833e6c189/
Changeset:   2a2833e6c189
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-27 23:09:50
Summary:     Fixes for Rockstar particle allocation.
Affected #:  2 files

diff -r 3f9e2601b34b8ff10755a323a6e1c39d437e81b7 -r 2a2833e6c1891ce305618821f4dbda109031f1c0 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -30,6 +30,22 @@
         np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
         float min_pos_err, min_vel_err, min_bulkvel_err
 
+ctypedef packed struct haloflat:
+    np.int64_t id
+    float pos_x, pos_y, pos_z, pos_v, pos_u, pos_w
+    float corevel_x, corevel_y, corevel_z
+    float bulkvel_x, bulkvel_y, bulkvel_z
+    float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
+    float J1, J2, J3
+    float energy, spin
+    float alt_m1, alt_m2, alt_m3, alt_m4
+    float Xoff, Voff, b_to_a, c_to_a
+    float A1, A2, A3
+    float bullock_spin, kin_to_pot
+    np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
+    float min_pos_err, min_vel_err, min_bulkvel_err
+    float padding ########## REMOVE THIS IF NEED BE
+
 # For finding sub halos import finder function and global variable
 # rockstar uses to store the results
 
@@ -38,6 +54,8 @@
     halo *halos
     np.int64_t num_halos
     void calc_mass_definition() nogil
+    void free_particle_copies() nogil
+    void free_halos() nogil
 
 # For outputing halos, rockstar style
 
@@ -207,14 +225,14 @@
         # Define fof object
 
         # Find number of particles
-        cdef np.int64_t i, j, k, ind
+        cdef np.int64_t i, j, k, ind, offset
         cdef np.int64_t num_particles = pind.shape[0]
 
         # Allocate space for correct number of particles
         cdef fof fof_obj
 
         cdef np.int64_t max_count = 0
-        cdef np.int64_t local_tag, last_fof_tag = -1
+        cdef np.int64_t next_tag, local_tag, last_fof_tag = -1
         fof_obj.num_p = 0
         last_fof_tag
         j = 0
@@ -236,6 +254,12 @@
         #print >> sys.stderr, "Most frequent occurrance: %s" % max_count
         fof_obj.particles = <particle*> malloc(max_count * sizeof(particle))
         j = 0
+        cdef int counter = 0, ndone = 0
+        cdef np.ndarray[np.int64_t, ndim=1] pcounts 
+        pcounts = np.zeros(np.unique(fof_tags).size, dtype="int64")
+        cdef np.int64_t frac = <np.int64_t> (pcounts.shape[0] / 20.0)
+        free_halos()
+        print "Rockstar started with num_halos = ", num_halos, pcounts.size
         for i in range(pind.shape[0]):
             ind = pind[i]
             local_tag = fof_tags[ind]
@@ -258,7 +282,18 @@
                 #print >> sys.stderr, \
                 #    "Finding subs on %s particles from %s out of %s" % (
                 #    fof_obj.num_p, i, pind.shape[0])
+                pcounts[ndone] = fof_obj.num_p
+                counter += 1
+                ndone += 1
+                if counter == frac:
+                    print >> sys.stderr, "R*-ing % 5.1f%% done" % (
+                        (100.0 * ndone)/pcounts.size)
+                    counter = 0
+                p = fof_obj.particles
+                free_particle_copies()
                 find_subs(&fof_obj)
                 # Now we reset
                 fof_obj.num_p = j = 0
         free(fof_obj.particles)
+        print "Rockstar ended with num_halos = ", num_halos
+        return pcounts

diff -r 3f9e2601b34b8ff10755a323a6e1c39d437e81b7 -r 2a2833e6c1891ce305618821f4dbda109031f1c0 yt/analysis_modules/halo_finding/rockstar/setup.py
--- a/yt/analysis_modules/halo_finding/rockstar/setup.py
+++ b/yt/analysis_modules/halo_finding/rockstar/setup.py
@@ -21,7 +21,8 @@
                          "yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx",
                          library_dirs=[rd],
                          libraries=["rockstar"],
-                         define_macros = [("THREADSAFE", "__thread")],
+                         #define_macros = [("THREADSAFE", "__thread")],
+                         define_macros = [("THREADSAFE", "")],
                          include_dirs=[rd,
                                        os.path.join(rd, "io"),
                                        os.path.join(rd, "util")])
@@ -29,7 +30,8 @@
                          "yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx",
                          library_dirs=[rd],
                          libraries=["rockstar"],
-                         define_macros = [("THREADSAFE", "__thread")],
+                         #define_macros = [("THREADSAFE", "__thread")],
+                         define_macros = [("THREADSAFE", "")],
                          include_dirs=[rd,
                                        os.path.join(rd, "io"),
                                        os.path.join(rd, "util")])


https://bitbucket.org/yt_analysis/yt/commits/db692d4c7267/
Changeset:   db692d4c7267
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-27 23:13:50
Summary:     Adding a few comments and making the halo returning a bit simpler.
Affected #:  1 file

diff -r 2a2833e6c1891ce305618821f4dbda109031f1c0 -r db692d4c72671fef498981042c1421215dac1dd0 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -215,6 +215,10 @@
     def output_halos(self):
         output_halos(0, 0, 0, NULL) 
 
+    def return_halos(self):
+        cdef haloflat[:] haloview = <haloflat[:num_halos]> (<haloflat*> halos)
+        return np.asarray(haloview)
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def make_rockstar_fof(self, np.ndarray[np.int64_t, ndim=1] pind,
@@ -259,7 +263,6 @@
         pcounts = np.zeros(np.unique(fof_tags).size, dtype="int64")
         cdef np.int64_t frac = <np.int64_t> (pcounts.shape[0] / 20.0)
         free_halos()
-        print "Rockstar started with num_halos = ", num_halos, pcounts.size
         for i in range(pind.shape[0]):
             ind = pind[i]
             local_tag = fof_tags[ind]
@@ -290,10 +293,11 @@
                         (100.0 * ndone)/pcounts.size)
                     counter = 0
                 p = fof_obj.particles
+                # R* computes offsets, so we need to free and whatnot every
+                # iteration, since we're not copying.
                 free_particle_copies()
                 find_subs(&fof_obj)
                 # Now we reset
                 fof_obj.num_p = j = 0
         free(fof_obj.particles)
-        print "Rockstar ended with num_halos = ", num_halos
         return pcounts


https://bitbucket.org/yt_analysis/yt/commits/32b97e36c50e/
Changeset:   32b97e36c50e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-28 03:58:54
Summary:     This should enable R* to work with aliased memory locations.
Affected #:  1 file

diff -r db692d4c72671fef498981042c1421215dac1dd0 -r 32b97e36c50e649211ababc41134f257fde89f6b yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -23,9 +23,15 @@
 cdef import from "halo.h":
     struct halo:
         np.int64_t id
-        float pos[6], corevel[3], bulkvel[3]
+        float pos[6]
+        float corevel[3]
+        float bulkvel[3]
         float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
-        float J[3], energy, spin, alt_m[4], Xoff, Voff, b_to_a, c_to_a, A[3]
+        float J[3]
+        float energy, spin
+        float alt_m[4]
+        float Xoff, Voff, b_to_a, c_to_a
+        float A[3]
         float bullock_spin, kin_to_pot
         np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
         float min_pos_err, min_vel_err, min_bulkvel_err
@@ -44,7 +50,6 @@
     float bullock_spin, kin_to_pot
     np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
     float min_pos_err, min_vel_err, min_bulkvel_err
-    float padding ########## REMOVE THIS IF NEED BE
 
 # For finding sub halos import finder function and global variable
 # rockstar uses to store the results
@@ -277,7 +282,7 @@
             for k in range(3):
                 fof_obj.particles[j].pos[k] = pos[ind,k]
                 fof_obj.particles[j].pos[k+3] = vel[ind,k]
-            fof_obj.particles[j].id = -1
+            fof_obj.particles[j].id = j
             fof_obj.num_p += 1
             j += 1
             # Now we check if we're the last one
@@ -289,13 +294,14 @@
                 counter += 1
                 ndone += 1
                 if counter == frac:
-                    print >> sys.stderr, "R*-ing % 5.1f%% done" % (
-                        (100.0 * ndone)/pcounts.size)
+                    print >> sys.stderr, "R*-ing % 5.1f%% done (%0.3f)" % (
+                        (100.0 * ndone)/pcounts.size,
+                        fof_obj.particles[0].pos[2])
                     counter = 0
-                p = fof_obj.particles
                 # R* computes offsets, so we need to free and whatnot every
                 # iteration, since we're not copying.
                 free_particle_copies()
+                p = fof_obj.particles
                 find_subs(&fof_obj)
                 # Now we reset
                 fof_obj.num_p = j = 0


https://bitbucket.org/yt_analysis/yt/commits/717f89bb3f50/
Changeset:   717f89bb3f50
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-28 14:42:53
Summary:     Turn off debug output, fix copy allocation.
Affected #:  1 file

diff -r 32b97e36c50e649211ababc41134f257fde89f6b -r 717f89bb3f50030199e6aecf3d7496e4729132be yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -60,6 +60,7 @@
     np.int64_t num_halos
     void calc_mass_definition() nogil
     void free_particle_copies() nogil
+    void alloc_particle_copies(np.int64_t total_copies) nogil
     void free_halos() nogil
 
 # For outputing halos, rockstar style
@@ -262,6 +263,7 @@
             max_count = j
         #print >> sys.stderr, "Most frequent occurrance: %s" % max_count
         fof_obj.particles = <particle*> malloc(max_count * sizeof(particle))
+        alloc_particle_copies(max_count)
         j = 0
         cdef int counter = 0, ndone = 0
         cdef np.ndarray[np.int64_t, ndim=1] pcounts 
@@ -294,14 +296,15 @@
                 counter += 1
                 ndone += 1
                 if counter == frac:
-                    print >> sys.stderr, "R*-ing % 5.1f%% done (%0.3f)" % (
-                        (100.0 * ndone)/pcounts.size,
-                        fof_obj.particles[0].pos[2])
+                    #print >> sys.stderr, "R*-ing % 5.1f%% done (%0.3f -> %0.3f)" % (
+                    #    (100.0 * ndone)/pcounts.size,
+                    #    fof_obj.particles[0].pos[2],
+                    #    halos[num_halos - 1].pos[3])
                     counter = 0
                 # R* computes offsets, so we need to free and whatnot every
                 # iteration, since we're not copying.
-                free_particle_copies()
-                p = fof_obj.particles
+                #free_particle_copies()
+                p = &fof_obj.particles[0]
                 find_subs(&fof_obj)
                 # Now we reset
                 fof_obj.num_p = j = 0


https://bitbucket.org/yt_analysis/yt/commits/6252b1841755/
Changeset:   6252b1841755
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-28 15:37:28
Summary:     Switch to valloc, which fixes problems from memcpy. Up SDF chunksize.
Affected #:  2 files

diff -r 717f89bb3f50030199e6aecf3d7496e4729132be -r 6252b1841755f95d072c8d59c3589ec2543e48a1 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -6,7 +6,8 @@
 from libc.stdlib cimport malloc, free
 import sys
 
-
+cdef import from "malloc.h":
+    void *valloc(size_t size) nogil
 
 # Importing relevant rockstar data types particle, fof halo, halo
 
@@ -262,8 +263,7 @@
         if j > max_count:
             max_count = j
         #print >> sys.stderr, "Most frequent occurrance: %s" % max_count
-        fof_obj.particles = <particle*> malloc(max_count * sizeof(particle))
-        alloc_particle_copies(max_count)
+        fof_obj.particles = <particle*> valloc(max_count * sizeof(particle))
         j = 0
         cdef int counter = 0, ndone = 0
         cdef np.ndarray[np.int64_t, ndim=1] pcounts 
@@ -289,21 +289,15 @@
             j += 1
             # Now we check if we're the last one
             if local_tag != next_tag:
-                #print >> sys.stderr, \
-                #    "Finding subs on %s particles from %s out of %s" % (
-                #    fof_obj.num_p, i, pind.shape[0])
                 pcounts[ndone] = fof_obj.num_p
                 counter += 1
                 ndone += 1
                 if counter == frac:
-                    #print >> sys.stderr, "R*-ing % 5.1f%% done (%0.3f -> %0.3f)" % (
-                    #    (100.0 * ndone)/pcounts.size,
-                    #    fof_obj.particles[0].pos[2],
-                    #    halos[num_halos - 1].pos[3])
+                    print >> sys.stderr, "R*-ing % 5.1f%% done (%0.3f -> %0.3f)" % (
+                        (100.0 * ndone)/pcounts.size,
+                        fof_obj.particles[0].pos[2],
+                        halos[num_halos - 1].pos[3])
                     counter = 0
-                # R* computes offsets, so we need to free and whatnot every
-                # iteration, since we're not copying.
-                #free_particle_copies()
                 p = &fof_obj.particles[0]
                 find_subs(&fof_obj)
                 # Now we reset

diff -r 717f89bb3f50030199e6aecf3d7496e4729132be -r 6252b1841755f95d072c8d59c3589ec2543e48a1 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -27,7 +27,7 @@
 from yt.utilities.lib.geometry_utils import compute_morton
 
 from yt.geometry.oct_container import _ORDER_MAX
-CHUNKSIZE = 32**3
+CHUNKSIZE = 256**3
 
 class IOHandlerSDF(BaseIOHandler):
     _dataset_type = "sdf_particles"


https://bitbucket.org/yt_analysis/yt/commits/f3421ee1dc69/
Changeset:   f3421ee1dc69
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-28 19:19:17
Summary:     Switch back to malloc and set global particle values.
Affected #:  1 file

diff -r 6252b1841755f95d072c8d59c3589ec2543e48a1 -r f3421ee1dc69b21eb0fa0bdea748a497a47be35f yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -6,9 +6,6 @@
 from libc.stdlib cimport malloc, free
 import sys
 
-cdef import from "malloc.h":
-    void *valloc(size_t size) nogil
-
 # Importing relevant rockstar data types particle, fof halo, halo
 
 cdef import from "particle.h":
@@ -16,6 +13,9 @@
         np.int64_t id
         float pos[6]
 
+cdef import from "rockstar.h":
+    particle *global_particles "p"
+
 cdef import from "fof.h":
     struct fof:
         np.int64_t num_p
@@ -238,6 +238,7 @@
         # Find number of particles
         cdef np.int64_t i, j, k, ind, offset
         cdef np.int64_t num_particles = pind.shape[0]
+        global global_particles
 
         # Allocate space for correct number of particles
         cdef fof fof_obj
@@ -263,7 +264,7 @@
         if j > max_count:
             max_count = j
         #print >> sys.stderr, "Most frequent occurrance: %s" % max_count
-        fof_obj.particles = <particle*> valloc(max_count * sizeof(particle))
+        fof_obj.particles = <particle*> malloc(max_count * sizeof(particle))
         j = 0
         cdef int counter = 0, ndone = 0
         cdef np.ndarray[np.int64_t, ndim=1] pcounts 
@@ -298,7 +299,7 @@
                         fof_obj.particles[0].pos[2],
                         halos[num_halos - 1].pos[3])
                     counter = 0
-                p = &fof_obj.particles[0]
+                global_particles = &fof_obj.particles[0]
                 find_subs(&fof_obj)
                 # Now we reset
                 fof_obj.num_p = j = 0


https://bitbucket.org/yt_analysis/yt/commits/39ed6f6a9ff1/
Changeset:   39ed6f6a9ff1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-28 21:40:49
Summary:     Fix FOF assignment offsets.
Affected #:  1 file

diff -r f3421ee1dc69b21eb0fa0bdea748a497a47be35f -r 39ed6f6a9ff1b93c03bb2b9192a67e450c7c7f28 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -739,20 +739,19 @@
         for i in range(doff.shape[0]):
             if doff[i] < 0: continue
             for j in range(pcount[i]):
-                poffset = doff[i] + j
-                c1 = container[poffset]
+                offset = pind[doff[i] + j]
+                c1 = container[offset]
                 c0 = contour_find(c1)
-                offset = ipind[poffset]
                 contour_ids[offset] = c0.contour_id
                 c0.count += 1
         for i in range(doff.shape[0]):
             if doff[i] < 0: continue
             for j in range(pcount[i]):
-                poffset = doff[i] + j
-                c1 = container[poffset]
+                offset = pind[doff[i] + j]
+                c1 = container[offset]
                 if c1 == NULL: continue
                 c0 = contour_find(c1)
-                offset = ipind[poffset]
+                offset = pind[offset]
                 if c0.count < minimum_count:
                     contour_ids[offset] = -1
         free(container)


https://bitbucket.org/yt_analysis/yt/commits/5780eb170d19/
Changeset:   5780eb170d19
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-28 22:54:14
Summary:     Add cleanup functions for R* find_subs interface.
Affected #:  1 file

diff -r 39ed6f6a9ff1b93c03bb2b9192a67e450c7c7f28 -r 5780eb170d194e0947bfda085a4243fa34099d51 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -15,6 +15,7 @@
 
 cdef import from "rockstar.h":
     particle *global_particles "p"
+    void rockstar_cleanup()
 
 cdef import from "fof.h":
     struct fof:
@@ -73,6 +74,7 @@
 
 cdef import from "config.h":
     void setup_config() nogil
+    void output_config(char *fn) nogil
 
 cdef import from "config_vars.h":
     # Rockstar cleverly puts all of the config variables inside a templated
@@ -224,7 +226,10 @@
 
     def return_halos(self):
         cdef haloflat[:] haloview = <haloflat[:num_halos]> (<haloflat*> halos)
-        return np.asarray(haloview)
+        rv = np.asarray(haloview).copy()
+        rockstar_cleanup()
+        free_halos()
+        return rv
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -246,7 +251,6 @@
         cdef np.int64_t max_count = 0
         cdef np.int64_t next_tag, local_tag, last_fof_tag = -1
         fof_obj.num_p = 0
-        last_fof_tag
         j = 0
         # We're going to do one iteration to get the most frequent value.
         for i in range(pind.shape[0]):
@@ -280,8 +284,7 @@
             if i == pind.shape[0] - 1:
                 next_tag = local_tag + 1
             else:
-                offset = pind[i+1]
-                next_tag = fof_tags[offset]
+                next_tag = fof_tags[pind[i+1]]
             for k in range(3):
                 fof_obj.particles[j].pos[k] = pos[ind,k]
                 fof_obj.particles[j].pos[k+3] = vel[ind,k]
@@ -297,7 +300,7 @@
                     print >> sys.stderr, "R*-ing % 5.1f%% done (%0.3f -> %0.3f)" % (
                         (100.0 * ndone)/pcounts.size,
                         fof_obj.particles[0].pos[2],
-                        halos[num_halos - 1].pos[3])
+                        halos[num_halos - 1].pos[2])
                     counter = 0
                 global_particles = &fof_obj.particles[0]
                 find_subs(&fof_obj)


https://bitbucket.org/yt_analysis/yt/commits/efd46c4f20a7/
Changeset:   efd46c4f20a7
Branch:      yt-3.0
User:        samskillman
Date:        2014-04-29 00:56:52
Summary:     Adding SDFIndex from pyslice.

Now a .sindex hangs off the dataset. This is used with an idx0 file to provide
offsets/lengths into the full SDF dataset. This will allow chunked io that is
spatially aware. Currently .sindex is instantiated upon request as a property.
It requires knowledge of the location and level of the idx0 file.

Additionally, I've added the ability to specify the sdf header from an alternate
file in the load command.
Affected #:  2 files

diff -r 5780eb170d194e0947bfda085a4243fa34099d51 -r efd46c4f20a754b23508bbe6196c4e282b3f7c63 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -38,7 +38,8 @@
     SDFFieldInfo
 from .io import \
     IOHandlerSDF, \
-    SDFRead
+    SDFRead,\
+    SDFIndex
 
 class SDFFile(ParticleFile):
     pass
@@ -50,10 +51,15 @@
     _particle_mass_name = None
     _particle_coordinates_name = None
     _particle_velocity_name = None
+    _sindex = None
 
     def __init__(self, filename, dataset_type = "sdf_particles",
                  n_ref = 64, over_refine_factor = 1,
-                 bounding_box = None):
+                 bounding_box = None,
+                 sdf_header = None,
+                 idx_filename = None,
+                 idx_header = None,
+                 idx_level = 9):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         if bounding_box is not None:
@@ -64,10 +70,15 @@
             self.domain_right_edge = bbox[:,1]
         else:
             self.domain_left_edge = self.domain_right_edge = None
+        self.sdf_header = sdf_header
+        self.idx_filename = idx_filename
+        self.idx_header = idx_header
+        self.idx_level = idx_level
         super(SDFDataset, self).__init__(filename, dataset_type)
 
     def _parse_parameter_file(self):
-        self.sdf_container = SDFRead(self.parameter_filename)
+        self.sdf_container = SDFRead(self.parameter_filename,
+                                     header=self.sdf_header)
         # Reference
         self.parameters = self.sdf_container.parameters
         self.dimensionality = 3
@@ -101,6 +112,18 @@
         self.filename_template = self.parameter_filename
         self.file_count = 1
 
+    @property
+    def sindex(self):
+        if self._sindex is None:
+            if self.idx_filename is not None:
+                indexdata = SDFRead(self.idx_filename,
+                                    header=self.idx_header)
+                self._sindex = SDFIndex(self.sdf_container, indexdata, level=self.idx_level)
+            else:
+                raise RuntimeError("SDF index0 file not supplied in load.")
+        else:
+            return self._sindex
+
     def _set_code_unit_attributes(self):
         self.length_unit = self.quan(1.0, "kpc")
         self.velocity_unit = self.quan(1.0, "kpc/Gyr")

diff -r 5780eb170d194e0947bfda085a4243fa34099d51 -r efd46c4f20a754b23508bbe6196c4e282b3f7c63 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -171,7 +171,7 @@
     def set_offset(self, offset):
         self._offset = offset
         if self.size == -1:
-            file_size = os.path.getsize(self.filename) 
+            file_size = os.path.getsize(self.filename)
             file_size -= offset
             self.size = float(file_size) / self.itemsize
             assert(int(self.size) == self.size)
@@ -189,8 +189,11 @@
 
     _eof = 'SDF-EOH'
 
-    def __init__(self, filename):
+    def __init__(self, filename, header=None):
         self.filename = filename
+        if header is None:
+            header = filename
+        self.header = header
         self.parameters = {}
         self.structs = []
         self.comments = []
@@ -201,7 +204,7 @@
     def parse_header(self):
         """docstring for parse_header"""
         # Pre-process
-        ascfile = open(self.filename, 'r')
+        ascfile = open(self.header, 'r')
         while True:
             l = ascfile.readline()
             if self._eof in l: break
@@ -210,6 +213,8 @@
 
         hoff = ascfile.tell()
         ascfile.close()
+        if self.header != self.filename:
+            hoff = 0
         self.parameters['header_offset'] = hoff
 
     def parse_line(self, line, ascfile):
@@ -277,4 +282,288 @@
             self.update(struct.data)
 
 
+class SDFIndex(object):
 
+    """docstring for SDFIndex
+
+    This provides an index mechanism into the full SDF Dataset.
+
+    Most useful class methods:
+        get_cell_data(level, cell_iarr, fields)
+        iter_bbox_data(left, right, fields)
+        iter_bbox_data(left, right, fields)
+
+    """
+    def __init__(self, sdfdata, indexdata, level=9):
+        super(SDFIndex, self).__init__()
+        self.sdfdata = sdfdata
+        self.indexdata = indexdata
+        self.level = level
+        self.rmin = None
+        self.rmax = None
+        self.domain_width = None
+        self.domain_buffer = 0
+        self.domain_dims = 0
+        self.domain_active_dims = 0
+        self.masks = {
+            "r" : int("011"*9, 2),
+            "t" : int("101"*9, 2),
+            "p" : int("110"*9, 2),
+            "x" : int("011"*9, 2),
+            "y" : int("101"*9, 2),
+            "z" : int("110"*9, 2),
+            0 : int("011"*9, 2),
+            1 : int("101"*9, 2),
+            2 : int("110"*9, 2),
+        }
+        self. dim_slices = {
+            "r" : slice(0, None, 3),
+            "t" : slice(1, None, 3),
+            "p" : slice(2, None, 3),
+            "x" : slice(0, None, 3),
+            "y" : slice(1, None, 3),
+            "z" : slice(2, None, 3),
+            0 : slice(0, None, 3),
+            1 : slice(1, None, 3),
+            2 : slice(2, None, 3),
+        }
+        self.set_bounds()
+
+    def set_bounds(self):
+        r_0 = self.sdfdata.parameters['R0']
+        DW = 2.0 * r_0
+
+        self.rmin = np.zeros(3)
+        self.rmax = np.zeros(3)
+        sorted_rtp = self.sdfdata.parameters.get("sorted_rtp", False)
+        if sorted_rtp:
+            self.rmin[:] = [0.0, 0.0, -np.pi]
+            self.rmax[:] = [r_0*1.01, 2*np.pi, np.pi]
+        else:
+            self.rmin[0] -= self.sdfdata.parameters.get('Rx', 0.0)
+            self.rmin[1] -= self.sdfdata.parameters.get('Ry', 0.0)
+            self.rmin[2] -= self.sdfdata.parameters.get('Rz', 0.0)
+            self.rmax[0] += self.sdfdata.parameters.get('Rx', r_0)
+            self.rmax[1] += self.sdfdata.parameters.get('Ry', r_0)
+            self.rmax[2] += self.sdfdata.parameters.get('Rz', r_0)
+
+        #/* expand root for non-power-of-two */
+        expand_root = 0.0
+        ic_Nmesh = self.sdfdata.parameters.get('ic_Nmesh',0)
+        if ic_Nmesh != 0:
+            f2 = 1<<int(np.log2(ic_Nmesh-1)+1)
+            if (f2 != ic_Nmesh):
+                expand_root = 1.0*f2/ic_Nmesh - 1.0;
+            print 'Expanding: ', f2, ic_Nmesh, expand_root
+        self.rmin *= 1.0 + expand_root
+        self.rmax *= 1.0 + expand_root
+        self.domain_width = self.rmax - self.rmin
+        self.domain_dims = 1 << self.level
+        self.domain_buffer = (self.domain_dims - int(self.domain_dims/(1.0 + expand_root)))/2
+        self.domain_active_dims = self.domain_dims - 2*self.domain_buffer
+        print 'Domain stuff:', self.domain_width, self.domain_dims, self.domain_active_dims
+
+    def get_key_ijk(self, i1, i2, i3):
+        rep1 = np.binary_repr(i1, width=9)
+        rep2 = np.binary_repr(i2, width=9)
+        rep3 = np.binary_repr(i3, width=9)
+        inter = np.zeros(27, dtype='c')
+        inter[::3] = rep1
+        inter[1::3] = rep2
+        inter[2::3] = rep3
+        return int(inter.tostring(), 2)
+
+    def get_key(self, iarr, level=9):
+        i1, i2, i3 = iarr
+        rep1 = np.binary_repr(i1, width=9)
+        rep2 = np.binary_repr(i2, width=9)
+        rep3 = np.binary_repr(i3, width=9)
+        inter = np.zeros(27, dtype='c')
+        inter[::3] = rep1
+        inter[1::3] = rep2
+        inter[2::3] = rep3
+        return int(inter.tostring(), 2)
+
+    def get_slice_key(self, ind, dim='r'):
+        slb = np.binary_repr(ind, width=9)
+        expanded = np.array([0]*27, dtype='c')
+        expanded[self.dim_slices[dim]] = slb
+        return int(expanded.tostring(), 2)
+
+    def get_slice_chunks(self, slice_dim, slice_index):
+        sl_key = self.get_slice_key(slice_index, dim=slice_dim)
+        mask = (self.indexdata['index'] & ~self.masks[slice_dim]) == sl_key
+        offsets = self.indexdata['base'][mask]
+        lengths = self.indexdata['len'][mask]
+        return mask, offsets, lengths
+
+    def get_ibbox_slow(self, ileft, iright):
+        """
+        Given left and right indicies, return a mask and
+        set of offsets+lengths into the sdf data.
+        """
+        mask = np.zeros(self.indexdata['index'].shape, dtype='bool')
+        ileft = np.array(ileft)
+        iright = np.array(iright)
+        for i in range(3):
+            left_key = self.get_slice_key(ileft[i], dim=i)
+            right_key= self.get_slice_key(iright[i], dim=i)
+            dim_inds = (self.indexdata['index'] & ~self.masks[i])
+            mask *= (dim_inds >= left_key) * (dim_inds <= right_key)
+            del dim_inds
+
+        offsets = self.indexdata['base'][mask]
+        lengths = self.indexdata['len'][mask]
+        return mask, offsets, lengths
+
+    def get_ibbox(self, ileft, iright):
+        """
+        Given left and right indicies, return a mask and
+        set of offsets+lengths into the sdf data.
+        """
+        mask = np.zeros(self.indexdata['index'].shape, dtype='bool')
+
+        print 'Getting data from ileft to iright:',  ileft, iright
+
+        X, Y, Z = np.mgrid[ileft[0]:iright[0]+1,
+                           ileft[1]:iright[1]+1,
+                           ileft[2]:iright[2]+1]
+
+        X = X.ravel()
+        Y = Y.ravel()
+        Z = Z.ravel()
+        # Correct For periodicity
+        X[X < self.domain_buffer] += self.domain_active_dims
+        X[X >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
+        Y[Y < self.domain_buffer] += self.domain_active_dims
+        Y[Y >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
+        Z[Z < self.domain_buffer] += self.domain_active_dims
+        Z[Z >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
+
+        print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()
+
+        indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
+        indices = indices[indices < self.indexdata['index'].shape[0]]
+        return indices
+
+    def get_bbox(self, left, right):
+        """
+        Given left and right indicies, return a mask and
+        set of offsets+lengths into the sdf data.
+        """
+        ileft = np.floor((left - self.rmin) / self.domain_width *  self.domain_dims)
+        iright = np.floor((right - self.rmin) / self.domain_width * self.domain_dims)
+
+        return self.get_ibbox(ileft, iright)
+
+    def get_data(self, chunk, fields):
+        data = {}
+        for field in fields:
+            data[field] = self.sdfdata[field][chunk]
+        return data
+
+    def iter_data(self, inds, fields):
+        num_inds = len(inds)
+        num_reads = 0
+        print 'Reading %i chunks' % num_inds
+        i = 0
+        while (i < num_inds):
+            ind = inds[i]
+            base = self.indexdata['base'][ind]
+            length = self.indexdata['len'][ind]
+            # Concatenate aligned reads
+            nexti = i+1
+            combined = 0
+            while nexti < len(inds):
+                nextind = inds[nexti]
+                #        print 'b: %i l: %i end: %i  next: %i' % ( base, length, base + length, self.indexdata['base'][nextind] )
+                if base + length == self.indexdata['base'][nextind]:
+                    length += self.indexdata['len'][nextind]
+                    i += 1
+                    nexti += 1
+                    combined += 1
+                else:
+                    break
+
+            chunk = slice(base, base+length)
+            print 'Reading chunk %i of length %i after catting %i' % (i, length, combined)
+            num_reads += 1
+            data = self.get_data(chunk, fields)
+            yield data
+            del data
+            i += 1
+        print 'Read %i chunks, batched into %i reads' % (num_inds, num_reads)
+
+    def iter_bbox_data(self, left, right, fields):
+        print 'Loading region from ', left, 'to', right
+        inds = self.get_bbox(left, right)
+        return self.iter_data(inds, fields)
+
+    def iter_ibbox_data(self, left, right, fields):
+        print 'Loading region from ', left, 'to', right
+        inds = self.get_ibbox(left, right)
+        return self.iter_data(inds, fields)
+
+    def get_contiguous_chunk(self, left_key, right_key, fields):
+        max_key = self.indexdata['index'][-1]
+        if left_key > max_key:
+            raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (left_key, max_key))
+        base = self.indexdata['base'][left_key]
+        right_key = min(right_key, self.indexdata['index'][-1])
+        length = self.indexdata['base'][right_key] + \
+            self.indexdata['len'][right_key] - base
+        print 'Getting contiguous chunk of size %i starting at %i' % (length, base)
+        return self.get_data(slice(base, base + length), fields)
+
+    def iter_slice_data(self, slice_dim, slice_index, fields):
+        mask, offsets, lengths = self.get_slice_chunks(slice_dim, slice_index)
+        for off, l in zip(offsets, lengths):
+            data = {}
+            chunk = slice(off, off+l)
+            for field in fields:
+                data[field] = self.sdfdata[field][chunk]
+            yield data
+            del data
+
+    def get_key_bounds(self, level, cell_iarr):
+        """
+        Get index keys for index file supplied.
+
+        level: int
+            Requested level
+        cell_iarr: array-like, length 3
+            Requested cell from given level.
+
+        Returns:
+            lmax_lk, lmax_rk
+        """
+        shift = self.level-level
+        level_buff = 0
+        level_lk = self.get_key(cell_iarr + level_buff)
+        level_rk = self.get_key(cell_iarr + level_buff) + 1
+        lmax_lk = (level_lk << shift*3)
+        lmax_rk = (((level_rk) << shift*3) -1)
+        #print "Level ", level, np.binary_repr(level_lk, width=27), np.binary_repr(level_rk, width=27)
+        #print "Level ", self.level, np.binary_repr(lmax_lk, width=27), np.binary_repr(lmax_rk, width=27)
+        return lmax_lk, lmax_rk
+
+    def get_cell_data(self, level, cell_iarr, fields):
+        """
+        Get data from requested cell
+
+        This uses the raw cell index, and doesn't account for periodicity or
+        an expanded domain (non-power of 2).
+
+        level: int
+            Requested level
+        cell_iarr: array-like, length 3
+            Requested cell from given level.         fields: list
+            Requested fields
+
+        Returns:
+            cell_data: dict
+                Dictionary of field_name, field_data
+        """
+        cell_iarr = np.array(cell_iarr)
+        lk, rk =self.get_key_bounds(level, cell_iarr)
+        return self.get_contiguous_chunk(lk, rk, fields)


https://bitbucket.org/yt_analysis/yt/commits/61ca83d7dd6a/
Changeset:   61ca83d7dd6a
Branch:      yt-3.0
User:        samskillman
Date:        2014-04-29 03:47:04
Summary:     A few fixes to the sdf frontend with Rx, etc are missing. Also make guesses at redshift/scale factor.
Affected #:  2 files

diff -r efd46c4f20a754b23508bbe6196c4e282b3f7c63 -r 61ca83d7dd6a0a6eba558c10551af313bb50e8cb yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -87,12 +87,13 @@
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
 
         if None in (self.domain_left_edge, self.domain_right_edge):
+            R0 = self.parameters['R0']
             self.domain_left_edge = np.array([
-              -self.parameters["R%s" % ax] for ax in 'xyz'])
+              -self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
             self.domain_right_edge = np.array([
-              +self.parameters["R%s" % ax] for ax in 'xyz'])
-            self.domain_left_edge *= self.parameters["a"]
-            self.domain_right_edge *= self.parameters["a"]
+              +self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+            self.domain_left_edge *= self.parameters.get("a", 1.0)
+            self.domain_right_edge *= self.parameters.get("a", 1.0)
 
         nz = 1 << self.over_refine_factor
         self.domain_dimensions = np.ones(3, "int32") * nz
@@ -100,7 +101,7 @@
 
         self.cosmological_simulation = 1
 
-        self.current_redshift = self.parameters["redshift"]
+        self.current_redshift = self.parameters.get("redshift", 0.0)
         self.omega_lambda = self.parameters["Omega0_lambda"]
         self.omega_matter = self.parameters["Omega0_m"]
         self.hubble_constant = self.parameters["h_100"]

diff -r efd46c4f20a754b23508bbe6196c4e282b3f7c63 -r 61ca83d7dd6a0a6eba558c10551af313bb50e8cb yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -306,15 +306,15 @@
         self.domain_dims = 0
         self.domain_active_dims = 0
         self.masks = {
-            "r" : int("011"*9, 2),
-            "t" : int("101"*9, 2),
-            "p" : int("110"*9, 2),
-            "x" : int("011"*9, 2),
-            "y" : int("101"*9, 2),
-            "z" : int("110"*9, 2),
-            0 : int("011"*9, 2),
-            1 : int("101"*9, 2),
-            2 : int("110"*9, 2),
+            "r" : int("011"*level, 2),
+            "t" : int("101"*level, 2),
+            "p" : int("110"*level, 2),
+            "x" : int("011"*level, 2),
+            "y" : int("101"*level, 2),
+            "z" : int("110"*level, 2),
+            0 : int("011"*level, 2),
+            1 : int("101"*level, 2),
+            2 : int("110"*level, 2),
         }
         self. dim_slices = {
             "r" : slice(0, None, 3),
@@ -364,29 +364,31 @@
         print 'Domain stuff:', self.domain_width, self.domain_dims, self.domain_active_dims
 
     def get_key_ijk(self, i1, i2, i3):
-        rep1 = np.binary_repr(i1, width=9)
-        rep2 = np.binary_repr(i2, width=9)
-        rep3 = np.binary_repr(i3, width=9)
-        inter = np.zeros(27, dtype='c')
+        rep1 = np.binary_repr(i1, width=self.level)
+        rep2 = np.binary_repr(i2, width=self.level)
+        rep3 = np.binary_repr(i3, width=self.level)
+        inter = np.zeros(self.level*3, dtype='c')
         inter[::3] = rep1
         inter[1::3] = rep2
         inter[2::3] = rep3
         return int(inter.tostring(), 2)
 
-    def get_key(self, iarr, level=9):
+    def get_key(self, iarr, level=None):
+        if level is None:
+            level = self.level
         i1, i2, i3 = iarr
-        rep1 = np.binary_repr(i1, width=9)
-        rep2 = np.binary_repr(i2, width=9)
-        rep3 = np.binary_repr(i3, width=9)
-        inter = np.zeros(27, dtype='c')
+        rep1 = np.binary_repr(i1, width=self.level)
+        rep2 = np.binary_repr(i2, width=self.level)
+        rep3 = np.binary_repr(i3, width=self.level)
+        inter = np.zeros(self.level*3, dtype='c')
         inter[::3] = rep1
         inter[1::3] = rep2
         inter[2::3] = rep3
         return int(inter.tostring(), 2)
 
     def get_slice_key(self, ind, dim='r'):
-        slb = np.binary_repr(ind, width=9)
-        expanded = np.array([0]*27, dtype='c')
+        slb = np.binary_repr(ind, width=self.level)
+        expanded = np.array([0]*self.level*3, dtype='c')
         expanded[self.dim_slices[dim]] = slb
         return int(expanded.tostring(), 2)
 
@@ -543,8 +545,8 @@
         level_rk = self.get_key(cell_iarr + level_buff) + 1
         lmax_lk = (level_lk << shift*3)
         lmax_rk = (((level_rk) << shift*3) -1)
-        #print "Level ", level, np.binary_repr(level_lk, width=27), np.binary_repr(level_rk, width=27)
-        #print "Level ", self.level, np.binary_repr(lmax_lk, width=27), np.binary_repr(lmax_rk, width=27)
+        #print "Level ", level, np.binary_repr(level_lk, width=self.level*3), np.binary_repr(level_rk, width=self.level*3)
+        #print "Level ", self.level, np.binary_repr(lmax_lk, width=self.level*3), np.binary_repr(lmax_rk, width=self.level*3)
         return lmax_lk, lmax_rk
 
     def get_cell_data(self, level, cell_iarr, fields):


https://bitbucket.org/yt_analysis/yt/commits/3521ce90a16a/
Changeset:   3521ce90a16a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-29 14:09:54
Summary:     Fix _is_valid for SDF.
Affected #:  1 file

diff -r 61ca83d7dd6a0a6eba558c10551af313bb50e8cb -r 3521ce90a16ad3e21da3db15f4822643e049bfed yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -133,6 +133,7 @@
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):
+        if not os.path.isfile(args[0]): return False
         with open(args[0], "r") as f:
             line = f.readline().strip()
             return line == "# SDF 1.0"


https://bitbucket.org/yt_analysis/yt/commits/b51ac6e82937/
Changeset:   b51ac6e82937
Branch:      yt-3.0
User:        samskillman
Date:        2014-04-30 04:30:45
Summary:     Fixes for sdf io. Indices were backwards.
Affected #:  1 file

diff -r 61ca83d7dd6a0a6eba558c10551af313bb50e8cb -r b51ac6e82937bfe567dfd1989ef40838d64def07 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -306,26 +306,26 @@
         self.domain_dims = 0
         self.domain_active_dims = 0
         self.masks = {
-            "r" : int("011"*level, 2),
+            "p" : int("011"*level, 2),
             "t" : int("101"*level, 2),
-            "p" : int("110"*level, 2),
-            "x" : int("011"*level, 2),
+            "r" : int("110"*level, 2),
+            "z" : int("011"*level, 2),
             "y" : int("101"*level, 2),
-            "z" : int("110"*level, 2),
-            0 : int("011"*level, 2),
+            "x" : int("110"*level, 2),
+            2 : int("011"*level, 2),
             1 : int("101"*level, 2),
-            2 : int("110"*level, 2),
+            0 : int("110"*level, 2),
         }
-        self. dim_slices = {
-            "r" : slice(0, None, 3),
+        self.dim_slices = {
+            "p" : slice(0, None, 3),
             "t" : slice(1, None, 3),
-            "p" : slice(2, None, 3),
-            "x" : slice(0, None, 3),
+            "r" : slice(2, None, 3),
+            "z" : slice(0, None, 3),
             "y" : slice(1, None, 3),
-            "z" : slice(2, None, 3),
-            0 : slice(0, None, 3),
+            "x" : slice(2, None, 3),
+            2 : slice(0, None, 3),
             1 : slice(1, None, 3),
-            2 : slice(2, None, 3),
+            0 : slice(2, None, 3),
         }
         self.set_bounds()
 
@@ -363,16 +363,6 @@
         self.domain_active_dims = self.domain_dims - 2*self.domain_buffer
         print 'Domain stuff:', self.domain_width, self.domain_dims, self.domain_active_dims
 
-    def get_key_ijk(self, i1, i2, i3):
-        rep1 = np.binary_repr(i1, width=self.level)
-        rep2 = np.binary_repr(i2, width=self.level)
-        rep3 = np.binary_repr(i3, width=self.level)
-        inter = np.zeros(self.level*3, dtype='c')
-        inter[::3] = rep1
-        inter[1::3] = rep2
-        inter[2::3] = rep3
-        return int(inter.tostring(), 2)
-
     def get_key(self, iarr, level=None):
         if level is None:
             level = self.level
@@ -381,11 +371,14 @@
         rep2 = np.binary_repr(i2, width=self.level)
         rep3 = np.binary_repr(i3, width=self.level)
         inter = np.zeros(self.level*3, dtype='c')
-        inter[::3] = rep1
-        inter[1::3] = rep2
-        inter[2::3] = rep3
+        inter[self.dim_slices[0]] = rep1
+        inter[self.dim_slices[1]] = rep2
+        inter[self.dim_slices[2]] = rep3
         return int(inter.tostring(), 2)
 
+    def get_key_ijk(self, i1, i2, i3, level=None):
+        return self.get_key(np.array([i1, i2, i3]), level=level)
+
     def get_slice_key(self, ind, dim='r'):
         slb = np.binary_repr(ind, width=self.level)
         expanded = np.array([0]*self.level*3, dtype='c')


https://bitbucket.org/yt_analysis/yt/commits/a3f1cb66018e/
Changeset:   a3f1cb66018e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-01 15:44:27
Summary:     Merging with Sam's fix
Affected #:  1 file

diff -r 3521ce90a16ad3e21da3db15f4822643e049bfed -r a3f1cb66018e445398a87eb86d072b41bae7b892 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -306,26 +306,26 @@
         self.domain_dims = 0
         self.domain_active_dims = 0
         self.masks = {
-            "r" : int("011"*level, 2),
+            "p" : int("011"*level, 2),
             "t" : int("101"*level, 2),
-            "p" : int("110"*level, 2),
-            "x" : int("011"*level, 2),
+            "r" : int("110"*level, 2),
+            "z" : int("011"*level, 2),
             "y" : int("101"*level, 2),
-            "z" : int("110"*level, 2),
-            0 : int("011"*level, 2),
+            "x" : int("110"*level, 2),
+            2 : int("011"*level, 2),
             1 : int("101"*level, 2),
-            2 : int("110"*level, 2),
+            0 : int("110"*level, 2),
         }
-        self. dim_slices = {
-            "r" : slice(0, None, 3),
+        self.dim_slices = {
+            "p" : slice(0, None, 3),
             "t" : slice(1, None, 3),
-            "p" : slice(2, None, 3),
-            "x" : slice(0, None, 3),
+            "r" : slice(2, None, 3),
+            "z" : slice(0, None, 3),
             "y" : slice(1, None, 3),
-            "z" : slice(2, None, 3),
-            0 : slice(0, None, 3),
+            "x" : slice(2, None, 3),
+            2 : slice(0, None, 3),
             1 : slice(1, None, 3),
-            2 : slice(2, None, 3),
+            0 : slice(2, None, 3),
         }
         self.set_bounds()
 
@@ -363,16 +363,6 @@
         self.domain_active_dims = self.domain_dims - 2*self.domain_buffer
         print 'Domain stuff:', self.domain_width, self.domain_dims, self.domain_active_dims
 
-    def get_key_ijk(self, i1, i2, i3):
-        rep1 = np.binary_repr(i1, width=self.level)
-        rep2 = np.binary_repr(i2, width=self.level)
-        rep3 = np.binary_repr(i3, width=self.level)
-        inter = np.zeros(self.level*3, dtype='c')
-        inter[::3] = rep1
-        inter[1::3] = rep2
-        inter[2::3] = rep3
-        return int(inter.tostring(), 2)
-
     def get_key(self, iarr, level=None):
         if level is None:
             level = self.level
@@ -381,11 +371,14 @@
         rep2 = np.binary_repr(i2, width=self.level)
         rep3 = np.binary_repr(i3, width=self.level)
         inter = np.zeros(self.level*3, dtype='c')
-        inter[::3] = rep1
-        inter[1::3] = rep2
-        inter[2::3] = rep3
+        inter[self.dim_slices[0]] = rep1
+        inter[self.dim_slices[1]] = rep2
+        inter[self.dim_slices[2]] = rep3
         return int(inter.tostring(), 2)
 
+    def get_key_ijk(self, i1, i2, i3, level=None):
+        return self.get_key(np.array([i1, i2, i3]), level=level)
+
     def get_slice_key(self, ind, dim='r'):
         slb = np.binary_repr(ind, width=self.level)
         expanded = np.array([0]*self.level*3, dtype='c')


https://bitbucket.org/yt_analysis/yt/commits/6845be8a4823/
Changeset:   6845be8a4823
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-02 14:52:23
Summary:     Reduce chunksize for SDF
Affected #:  1 file

diff -r a3f1cb66018e445398a87eb86d072b41bae7b892 -r 6845be8a48237de67adfb78d6759326b47c0b39d yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -27,7 +27,7 @@
 from yt.utilities.lib.geometry_utils import compute_morton
 
 from yt.geometry.oct_container import _ORDER_MAX
-CHUNKSIZE = 256**3
+CHUNKSIZE = 32**3
 
 class IOHandlerSDF(BaseIOHandler):
     _dataset_type = "sdf_particles"


https://bitbucket.org/yt_analysis/yt/commits/9a0500caa575/
Changeset:   9a0500caa575
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-03 16:59:11
Summary:     Merged in MatthewTurk/yt/yt-3.0 (pull request #854)

SDF Frontend
Affected #:  18 files

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -6,8 +6,6 @@
 from libc.stdlib cimport malloc, free
 import sys
 
-
-
 # Importing relevant rockstar data types particle, fof halo, halo
 
 cdef import from "particle.h":
@@ -15,6 +13,10 @@
         np.int64_t id
         float pos[6]
 
+cdef import from "rockstar.h":
+    particle *global_particles "p"
+    void rockstar_cleanup()
+
 cdef import from "fof.h":
     struct fof:
         np.int64_t num_p
@@ -23,13 +25,34 @@
 cdef import from "halo.h":
     struct halo:
         np.int64_t id
-        float pos[6], corevel[3], bulkvel[3]
+        float pos[6]
+        float corevel[3]
+        float bulkvel[3]
         float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
-        float J[3], energy, spin, alt_m[4], Xoff, Voff, b_to_a, c_to_a, A[3]
+        float J[3]
+        float energy, spin
+        float alt_m[4]
+        float Xoff, Voff, b_to_a, c_to_a
+        float A[3]
         float bullock_spin, kin_to_pot
         np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
         float min_pos_err, min_vel_err, min_bulkvel_err
 
+ctypedef packed struct haloflat:
+    np.int64_t id
+    float pos_x, pos_y, pos_z, pos_v, pos_u, pos_w
+    float corevel_x, corevel_y, corevel_z
+    float bulkvel_x, bulkvel_y, bulkvel_z
+    float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
+    float J1, J2, J3
+    float energy, spin
+    float alt_m1, alt_m2, alt_m3, alt_m4
+    float Xoff, Voff, b_to_a, c_to_a
+    float A1, A2, A3
+    float bullock_spin, kin_to_pot
+    np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
+    float min_pos_err, min_vel_err, min_bulkvel_err
+
 # For finding sub halos import finder function and global variable
 # rockstar uses to store the results
 
@@ -38,6 +61,9 @@
     halo *halos
     np.int64_t num_halos
     void calc_mass_definition() nogil
+    void free_particle_copies() nogil
+    void alloc_particle_copies(np.int64_t total_copies) nogil
+    void free_halos() nogil
 
 # For outputing halos, rockstar style
 
@@ -48,6 +74,7 @@
 
 cdef import from "config.h":
     void setup_config() nogil
+    void output_config(char *fn) nogil
 
 cdef import from "config_vars.h":
     # Rockstar cleverly puts all of the config variables inside a templated
@@ -197,45 +224,87 @@
     def output_halos(self):
         output_halos(0, 0, 0, NULL) 
 
+    def return_halos(self):
+        cdef haloflat[:] haloview = <haloflat[:num_halos]> (<haloflat*> halos)
+        rv = np.asarray(haloview).copy()
+        rockstar_cleanup()
+        free_halos()
+        return rv
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    def make_rockstar_fof(self, np.ndarray[np.int64_t, ndim=1] pid,
+    def make_rockstar_fof(self, np.ndarray[np.int64_t, ndim=1] pind,
+                                np.ndarray[np.int64_t, ndim=1] fof_tags,
                                 np.ndarray[np.float64_t, ndim=2] pos,
-                                np.ndarray[np.float64_t, ndim=2] vel,
-                                np.ndarray[np.int64_t, ndim=1] fof_tags,
-                                np.int64_t nfof,
-                                np.int64_t npart_max):
+                                np.ndarray[np.float64_t, ndim=2] vel):
 
         # Define fof object
 
         # Find number of particles
-        cdef np.int64_t i, j
-        cdef np.int64_t num_particles = pid.shape[0]
+        cdef np.int64_t i, j, k, ind, offset
+        cdef np.int64_t num_particles = pind.shape[0]
+        global global_particles
 
         # Allocate space for correct number of particles
-        cdef particle* particles = <particle*> malloc(npart_max * sizeof(particle))
         cdef fof fof_obj
-        fof_obj.particles = particles
 
-        cdef np.int64_t last_fof_tag = 1
-        cdef np.int64_t k = 0
-        for i in range(num_particles):
-            if fof_tags[i] < 0:
+        cdef np.int64_t max_count = 0
+        cdef np.int64_t next_tag, local_tag, last_fof_tag = -1
+        fof_obj.num_p = 0
+        j = 0
+        # We're going to do one iteration to get the most frequent value.
+        for i in range(pind.shape[0]):
+            ind = pind[i]
+            local_tag = fof_tags[ind]
+            # Don't count the null group
+            if local_tag == -1: continue
+            if local_tag != last_fof_tag:
+                if j > max_count:
+                    max_count = j
+                last_fof_tag = local_tag
+                j = 1
+            else:
+                j += 1
+        if j > max_count:
+            max_count = j
+        #print >> sys.stderr, "Most frequent occurrance: %s" % max_count
+        fof_obj.particles = <particle*> malloc(max_count * sizeof(particle))
+        j = 0
+        cdef int counter = 0, ndone = 0
+        cdef np.ndarray[np.int64_t, ndim=1] pcounts 
+        pcounts = np.zeros(np.unique(fof_tags).size, dtype="int64")
+        cdef np.int64_t frac = <np.int64_t> (pcounts.shape[0] / 20.0)
+        free_halos()
+        for i in range(pind.shape[0]):
+            ind = pind[i]
+            local_tag = fof_tags[ind]
+            # Skip this one -- it means no group.
+            if local_tag == -1:
                 continue
-            if fof_tags[i] != last_fof_tag:
-                last_fof_tag = fof_tags[i]
-                if k > 16:
-                    fof_obj.num_p = k
-                    find_subs(&fof_obj)
-                k = 0
-            particles[k].id = pid[i]
-
-            # fill in locations & velocities
-            for j in range(3):
-                particles[k].pos[j] = pos[i,j]
-                particles[k].pos[j+3] = vel[i,j]
-            k += 1
-        free(particles)
-
-
-
+            if i == pind.shape[0] - 1:
+                next_tag = local_tag + 1
+            else:
+                next_tag = fof_tags[pind[i+1]]
+            for k in range(3):
+                fof_obj.particles[j].pos[k] = pos[ind,k]
+                fof_obj.particles[j].pos[k+3] = vel[ind,k]
+            fof_obj.particles[j].id = j
+            fof_obj.num_p += 1
+            j += 1
+            # Now we check if we're the last one
+            if local_tag != next_tag:
+                pcounts[ndone] = fof_obj.num_p
+                counter += 1
+                ndone += 1
+                if counter == frac:
+                    print >> sys.stderr, "R*-ing % 5.1f%% done (%0.3f -> %0.3f)" % (
+                        (100.0 * ndone)/pcounts.size,
+                        fof_obj.particles[0].pos[2],
+                        halos[num_halos - 1].pos[2])
+                    counter = 0
+                global_particles = &fof_obj.particles[0]
+                find_subs(&fof_obj)
+                # Now we reset
+                fof_obj.num_p = j = 0
+        free(fof_obj.particles)
+        return pcounts

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -204,7 +204,7 @@
     p[0] = <particle *> malloc(sizeof(particle) * local_parts)
 
     conv[0] = conv[1] = conv[2] = pf.length_unit.in_units("Mpccm/h")
-    conv[3] = conv[4] = conv[5] = 1e-5
+    conv[3] = conv[4] = conv[5] = pf.velocity_unit.in_units("km/s")
     left_edge[0] = pf.domain_left_edge[0]
     left_edge[1] = pf.domain_left_edge[1]
     left_edge[2] = pf.domain_left_edge[2]

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/analysis_modules/halo_finding/rockstar/setup.py
--- a/yt/analysis_modules/halo_finding/rockstar/setup.py
+++ b/yt/analysis_modules/halo_finding/rockstar/setup.py
@@ -21,6 +21,8 @@
                          "yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx",
                          library_dirs=[rd],
                          libraries=["rockstar"],
+                         #define_macros = [("THREADSAFE", "__thread")],
+                         define_macros = [("THREADSAFE", "")],
                          include_dirs=[rd,
                                        os.path.join(rd, "io"),
                                        os.path.join(rd, "util")])
@@ -28,6 +30,8 @@
                          "yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx",
                          library_dirs=[rd],
                          libraries=["rockstar"],
+                         #define_macros = [("THREADSAFE", "__thread")],
+                         define_macros = [("THREADSAFE", "")],
                          include_dirs=[rd,
                                        os.path.join(rd, "io"),
                                        os.path.join(rd, "util")])

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -29,6 +29,7 @@
     'moab',
     #'pluto',
     'ramses',
+    'sdf',
     'sph',
     'stream',
 ]

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/frontends/sdf/__init__.py
--- /dev/null
+++ b/yt/frontends/sdf/__init__.py
@@ -0,0 +1,15 @@
+"""
+__init__ for yt.frontends.sdf
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/frontends/sdf/api.py
--- /dev/null
+++ b/yt/frontends/sdf/api.py
@@ -0,0 +1,24 @@
+"""
+API for yt.frontends.sdf
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+      SDFDataset
+
+from .io import \
+      IOHandlerSDF
+
+from .fields import \
+      SDFFieldInfo

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/frontends/sdf/data_structures.py
--- /dev/null
+++ b/yt/frontends/sdf/data_structures.py
@@ -0,0 +1,139 @@
+"""
+Data structures for a generic SDF frontend
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as np
+import stat
+import weakref
+import struct
+import glob
+import time
+import os
+import types
+
+from yt.utilities.logger import ytLogger as mylog
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
+from yt.data_objects.static_output import \
+    Dataset, ParticleFile
+from yt.utilities.physical_constants import \
+    G, \
+    cm_per_kpc, \
+    mass_sun_cgs
+from yt.utilities.cosmology import Cosmology
+from .fields import \
+    SDFFieldInfo
+from .io import \
+    IOHandlerSDF, \
+    SDFRead,\
+    SDFIndex
+
+class SDFFile(ParticleFile):
+    pass
+
+class SDFDataset(Dataset):
+    _index_class = ParticleIndex
+    _file_class = SDFFile
+    _field_info_class = SDFFieldInfo
+    _particle_mass_name = None
+    _particle_coordinates_name = None
+    _particle_velocity_name = None
+    _sindex = None
+
+    def __init__(self, filename, dataset_type = "sdf_particles",
+                 n_ref = 64, over_refine_factor = 1,
+                 bounding_box = None,
+                 sdf_header = None,
+                 idx_filename = None,
+                 idx_header = None,
+                 idx_level = 9):
+        self.n_ref = n_ref
+        self.over_refine_factor = over_refine_factor
+        if bounding_box is not None:
+            bbox = np.array(bounding_box, dtype="float64")
+            if bbox.shape == (2, 3):
+                bbox = bbox.transpose()
+            self.domain_left_edge = bbox[:,0]
+            self.domain_right_edge = bbox[:,1]
+        else:
+            self.domain_left_edge = self.domain_right_edge = None
+        self.sdf_header = sdf_header
+        self.idx_filename = idx_filename
+        self.idx_header = idx_header
+        self.idx_level = idx_level
+        super(SDFDataset, self).__init__(filename, dataset_type)
+
+    def _parse_parameter_file(self):
+        self.sdf_container = SDFRead(self.parameter_filename,
+                                     header=self.sdf_header)
+        # Reference
+        self.parameters = self.sdf_container.parameters
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+
+        if None in (self.domain_left_edge, self.domain_right_edge):
+            R0 = self.parameters['R0']
+            self.domain_left_edge = np.array([
+              -self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+            self.domain_right_edge = np.array([
+              +self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+            self.domain_left_edge *= self.parameters.get("a", 1.0)
+            self.domain_right_edge *= self.parameters.get("a", 1.0)
+
+        nz = 1 << self.over_refine_factor
+        self.domain_dimensions = np.ones(3, "int32") * nz
+        self.periodicity = (True, True, True)
+
+        self.cosmological_simulation = 1
+
+        self.current_redshift = self.parameters.get("redshift", 0.0)
+        self.omega_lambda = self.parameters["Omega0_lambda"]
+        self.omega_matter = self.parameters["Omega0_m"]
+        self.hubble_constant = self.parameters["h_100"]
+        # Now we calculate our time based on the cosmology.
+        cosmo = Cosmology(self.hubble_constant,
+                          self.omega_matter, self.omega_lambda)
+        self.current_time = cosmo.hubble_time(self.current_redshift)
+        mylog.info("Calculating time to be %0.3e seconds", self.current_time)
+        self.filename_template = self.parameter_filename
+        self.file_count = 1
+
+    @property
+    def sindex(self):
+        if self._sindex is None:
+            if self.idx_filename is not None:
+                indexdata = SDFRead(self.idx_filename,
+                                    header=self.idx_header)
+                self._sindex = SDFIndex(self.sdf_container, indexdata, level=self.idx_level)
+            else:
+                raise RuntimeError("SDF index0 file not supplied in load.")
+        else:
+            return self._sindex
+
+    def _set_code_unit_attributes(self):
+        self.length_unit = self.quan(1.0, "kpc")
+        self.velocity_unit = self.quan(1.0, "kpc/Gyr")
+        self.time_unit = self.quan(1.0, "Gyr")
+        self.mass_unit = self.quan(1e10, "Msun")
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        if not os.path.isfile(args[0]): return False
+        with open(args[0], "r") as f:
+            line = f.readline().strip()
+            return line == "# SDF 1.0"

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/frontends/sdf/fields.py
--- /dev/null
+++ b/yt/frontends/sdf/fields.py
@@ -0,0 +1,47 @@
+"""
+SDF-specific fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import numpy as np
+
+from yt.funcs import *
+
+from yt.fields.field_info_container import \
+    FieldInfoContainer
+
+from yt.config import ytcfg
+from yt.utilities.physical_constants import mh
+from yt.fields.species_fields import \
+    add_species_field_by_fraction, \
+    add_species_field_by_density, \
+    setup_species_fields
+
+from yt.fields.particle_fields import \
+    add_volume_weighted_smoothed_field
+
+class SDFFieldInfo(FieldInfoContainer):
+    known_other_fields = ()
+
+    known_particle_fields = (
+        ("mass", ("code_mass", ["particle_mass"], None)),
+        ("x", ("code_length", ["particle_position_x"], None)),
+        ("y", ("code_length", ["particle_position_y"], None)),
+        ("z", ("code_length", ["particle_position_z"], None)),
+        ("vx", ("code_velocity", ["particle_velocity_x"], None)),
+        ("vy", ("code_velocity", ["particle_velocity_y"], None)),
+        ("vz", ("code_velocity", ["particle_velocity_z"], None)),
+        ("ident", ("", ["particle_index"], None)),
+    )

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/frontends/sdf/io.py
--- /dev/null
+++ b/yt/frontends/sdf/io.py
@@ -0,0 +1,564 @@
+"""
+SDF data-file handling function
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import glob
+import h5py
+import numpy as np
+from yt.funcs import *
+from yt.utilities.exceptions import *
+
+from yt.utilities.io_handler import \
+    BaseIOHandler
+
+from yt.utilities.fortran_utils import read_record
+from yt.utilities.lib.geometry_utils import compute_morton
+
+from yt.geometry.oct_container import _ORDER_MAX
+CHUNKSIZE = 32**3
+
+class IOHandlerSDF(BaseIOHandler):
+    _dataset_type = "sdf_particles"
+
+    @property
+    def _handle(self):
+        return self.pf.sdf_container
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        raise NotImplementedError
+
+    def _read_particle_coords(self, chunks, ptf):
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        assert(ptf.keys()[0] == "dark_matter")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        assert(len(data_files) == 1)
+        for data_file in data_files:
+            pcount = self._handle['x'].size
+            yield "dark_matter", (
+                self._handle['x'], self._handle['y'], self._handle['z'])
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        assert(ptf.keys()[0] == "dark_matter")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        assert(len(data_files) == 1)
+        for data_file in data_files:
+            pcount = self._handle['x'].size
+            for ptype, field_list in sorted(ptf.items()):
+                x = self._handle['x']
+                y = self._handle['y']
+                z = self._handle['z']
+                mask = selector.select_points(x, y, z, 0.0)
+                del x, y, z
+                if mask is None: continue
+                for field in field_list:
+                    if field == "mass":
+                        data = np.ones(mask.sum(), dtype="float64")
+                        data *= self.pf.parameters["particle_mass"]
+                    else:
+                        data = self._handle[field][mask]
+                    yield (ptype, field), data
+
+    def _initialize_index(self, data_file, regions):
+        x, y, z = (self._handle[ax] for ax in 'xyz')
+        pcount = x.size
+        morton = np.empty(pcount, dtype='uint64')
+        ind = 0
+        while ind < pcount:
+            npart = min(CHUNKSIZE, pcount - ind)
+            pos = np.empty((npart, 3), dtype=x.dtype)
+            pos[:,0] = x[ind:ind+npart]
+            pos[:,1] = y[ind:ind+npart]
+            pos[:,2] = z[ind:ind+npart]
+            if np.any(pos.min(axis=0) < self.pf.domain_left_edge) or \
+               np.any(pos.max(axis=0) > self.pf.domain_right_edge):
+                raise YTDomainOverflow(pos.min(axis=0),
+                                       pos.max(axis=0),
+                                       self.pf.domain_left_edge,
+                                       self.pf.domain_right_edge)
+            regions.add_data_file(pos, data_file.file_id)
+            morton[ind:ind+npart] = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.pf.domain_left_edge,
+                data_file.pf.domain_right_edge)
+            ind += CHUNKSIZE
+        return morton
+
+    def _count_particles(self, data_file):
+        return {'dark_matter': self._handle['x'].size}
+
+    def _identify_fields(self, data_file):
+        fields = [("dark_matter", v) for v in self._handle.keys()]
+        fields.append(("dark_matter", "mass"))
+        return fields, {}
+
+import re
+import os
+
+_types = {
+    'int': 'int32',
+    'int64_t': 'int64',
+    'float': 'float32',
+    'double': 'float64',
+    'unsigned int': 'I',
+    'unsigned char': 'B',
+}
+
+def get_type(vtype, len=None):
+    try:
+        t = _types[vtype]
+        if len is not None:
+            t = np.dtype((t, len))
+        else:
+            t = np.dtype(t)
+    except KeyError:
+        t = eval("np."+vtype)
+    return t
+
+def lstrip(text_list):
+    return [t.strip() for t in text_list]
+
+def get_struct_vars(line):
+    spl = lstrip(line.split(";"))
+    multiv = lstrip(spl[0].split(","))
+    ret = lstrip(multiv[0].split())
+    ctype = ret[0]
+    vnames = [ret[-1]] + multiv[1:]
+    vnames = [v.strip() for v in vnames]
+    for vtype in ret[1:-1]:
+        ctype += ' ' + vtype
+    num = None
+    if len(vnames) == 1:
+        if '[' in vnames[0]:
+            num = int(vnames[0].split('[')[-1].strip(']'))
+            #num = int(re.sub("\D", "", vnames[0]))
+    ctype = get_type(ctype, len=num)
+    return ctype, vnames
+
+class DataStruct(object):
+    """docstring for DataStruct"""
+
+    _offset = 0
+
+    def __init__(self, dtypes, num, filename):
+        self.filename = filename
+        self.dtype = np.dtype(dtypes)
+        self.size = num
+        self.itemsize = self.dtype.itemsize
+        self.data = {}
+        self.handle = None
+
+    def set_offset(self, offset):
+        self._offset = offset
+        if self.size == -1:
+            file_size = os.path.getsize(self.filename)
+            file_size -= offset
+            self.size = float(file_size) / self.itemsize
+            assert(int(self.size) == self.size)
+
+    def build_memmap(self):
+        assert(self.size != -1)
+        self.handle = np.memmap(self.filename, dtype=self.dtype,
+                        mode='r', shape=self.size, offset=self._offset)
+        for k in self.dtype.names:
+            self.data[k] = self.handle[k]
+
+class SDFRead(dict):
+
+    """docstring for SDFRead"""
+
+    _eof = 'SDF-EOH'
+
+    def __init__(self, filename, header=None):
+        self.filename = filename
+        if header is None:
+            header = filename
+        self.header = header
+        self.parameters = {}
+        self.structs = []
+        self.comments = []
+        self.parse_header()
+        self.set_offsets()
+        self.load_memmaps()
+
+    def parse_header(self):
+        """docstring for parse_header"""
+        # Pre-process
+        ascfile = open(self.header, 'r')
+        while True:
+            l = ascfile.readline()
+            if self._eof in l: break
+
+            self.parse_line(l, ascfile)
+
+        hoff = ascfile.tell()
+        ascfile.close()
+        if self.header != self.filename:
+            hoff = 0
+        self.parameters['header_offset'] = hoff
+
+    def parse_line(self, line, ascfile):
+        """Parse a line of sdf"""
+
+
+        if 'struct' in line:
+            self.parse_struct(line, ascfile)
+            return
+
+        if "#" in line:
+            self.comments.append(line)
+            return
+
+        spl = lstrip(line.split("="))
+        vtype, vname = lstrip(spl[0].split())
+        vname = vname.strip("[]")
+        vval = spl[-1].strip(";")
+        if vtype == 'parameter':
+            self.parameters[vname] = vval
+            return
+        elif vtype == "char":
+            vtype = "str"
+
+        try:
+            vval = eval("np."+vtype+"(%s)" % vval)
+        except AttributeError:
+            vval = eval("np."+_types[vtype]+"(%s)" % vval)
+
+        self.parameters[vname] = vval
+
+    def parse_struct(self, line, ascfile):
+        assert 'struct' in line
+
+        str_types = []
+        comments = []
+        str_lines = []
+        l = ascfile.readline()
+        while "}" not in l:
+            vtype, vnames = get_struct_vars(l)
+            for v in vnames:
+                str_types.append((v, vtype))
+            l = ascfile.readline()
+        num = l.strip("}[]")
+        num = num.strip("\;\\\n]")
+        if len(num) == 0:
+            # We need to compute the number of records.  The DataStruct will
+            # handle this.
+            num = '-1'
+        num = int(num)
+        struct = DataStruct(str_types, num, self.filename)
+        self.structs.append(struct)
+        return
+
+    def set_offsets(self):
+        running_off = self.parameters['header_offset']
+        for struct in self.structs:
+            struct.set_offset(running_off)
+            running_off += struct.size * struct.itemsize
+        return
+
+    def load_memmaps(self):
+        for struct in self.structs:
+            struct.build_memmap()
+            self.update(struct.data)
+
+
+class SDFIndex(object):
+
+    """docstring for SDFIndex
+
+    This provides an index mechanism into the full SDF Dataset.
+
+    Most useful class methods:
+        get_cell_data(level, cell_iarr, fields)
+        iter_bbox_data(left, right, fields)
+        iter_bbox_data(left, right, fields)
+
+    """
+    def __init__(self, sdfdata, indexdata, level=9):
+        super(SDFIndex, self).__init__()
+        self.sdfdata = sdfdata
+        self.indexdata = indexdata
+        self.level = level
+        self.rmin = None
+        self.rmax = None
+        self.domain_width = None
+        self.domain_buffer = 0
+        self.domain_dims = 0
+        self.domain_active_dims = 0
+        self.masks = {
+            "p" : int("011"*level, 2),
+            "t" : int("101"*level, 2),
+            "r" : int("110"*level, 2),
+            "z" : int("011"*level, 2),
+            "y" : int("101"*level, 2),
+            "x" : int("110"*level, 2),
+            2 : int("011"*level, 2),
+            1 : int("101"*level, 2),
+            0 : int("110"*level, 2),
+        }
+        self.dim_slices = {
+            "p" : slice(0, None, 3),
+            "t" : slice(1, None, 3),
+            "r" : slice(2, None, 3),
+            "z" : slice(0, None, 3),
+            "y" : slice(1, None, 3),
+            "x" : slice(2, None, 3),
+            2 : slice(0, None, 3),
+            1 : slice(1, None, 3),
+            0 : slice(2, None, 3),
+        }
+        self.set_bounds()
+
+    def set_bounds(self):
+        r_0 = self.sdfdata.parameters['R0']
+        DW = 2.0 * r_0
+
+        self.rmin = np.zeros(3)
+        self.rmax = np.zeros(3)
+        sorted_rtp = self.sdfdata.parameters.get("sorted_rtp", False)
+        if sorted_rtp:
+            self.rmin[:] = [0.0, 0.0, -np.pi]
+            self.rmax[:] = [r_0*1.01, 2*np.pi, np.pi]
+        else:
+            self.rmin[0] -= self.sdfdata.parameters.get('Rx', 0.0)
+            self.rmin[1] -= self.sdfdata.parameters.get('Ry', 0.0)
+            self.rmin[2] -= self.sdfdata.parameters.get('Rz', 0.0)
+            self.rmax[0] += self.sdfdata.parameters.get('Rx', r_0)
+            self.rmax[1] += self.sdfdata.parameters.get('Ry', r_0)
+            self.rmax[2] += self.sdfdata.parameters.get('Rz', r_0)
+
+        #/* expand root for non-power-of-two */
+        expand_root = 0.0
+        ic_Nmesh = self.sdfdata.parameters.get('ic_Nmesh',0)
+        if ic_Nmesh != 0:
+            f2 = 1<<int(np.log2(ic_Nmesh-1)+1)
+            if (f2 != ic_Nmesh):
+                expand_root = 1.0*f2/ic_Nmesh - 1.0;
+            print 'Expanding: ', f2, ic_Nmesh, expand_root
+        self.rmin *= 1.0 + expand_root
+        self.rmax *= 1.0 + expand_root
+        self.domain_width = self.rmax - self.rmin
+        self.domain_dims = 1 << self.level
+        self.domain_buffer = (self.domain_dims - int(self.domain_dims/(1.0 + expand_root)))/2
+        self.domain_active_dims = self.domain_dims - 2*self.domain_buffer
+        print 'Domain stuff:', self.domain_width, self.domain_dims, self.domain_active_dims
+
+    def get_key(self, iarr, level=None):
+        if level is None:
+            level = self.level
+        i1, i2, i3 = iarr
+        rep1 = np.binary_repr(i1, width=self.level)
+        rep2 = np.binary_repr(i2, width=self.level)
+        rep3 = np.binary_repr(i3, width=self.level)
+        inter = np.zeros(self.level*3, dtype='c')
+        inter[self.dim_slices[0]] = rep1
+        inter[self.dim_slices[1]] = rep2
+        inter[self.dim_slices[2]] = rep3
+        return int(inter.tostring(), 2)
+
+    def get_key_ijk(self, i1, i2, i3, level=None):
+        return self.get_key(np.array([i1, i2, i3]), level=level)
+
+    def get_slice_key(self, ind, dim='r'):
+        slb = np.binary_repr(ind, width=self.level)
+        expanded = np.array([0]*self.level*3, dtype='c')
+        expanded[self.dim_slices[dim]] = slb
+        return int(expanded.tostring(), 2)
+
+    def get_slice_chunks(self, slice_dim, slice_index):
+        sl_key = self.get_slice_key(slice_index, dim=slice_dim)
+        mask = (self.indexdata['index'] & ~self.masks[slice_dim]) == sl_key
+        offsets = self.indexdata['base'][mask]
+        lengths = self.indexdata['len'][mask]
+        return mask, offsets, lengths
+
+    def get_ibbox_slow(self, ileft, iright):
+        """
+        Given left and right indicies, return a mask and
+        set of offsets+lengths into the sdf data.
+        """
+        mask = np.zeros(self.indexdata['index'].shape, dtype='bool')
+        ileft = np.array(ileft)
+        iright = np.array(iright)
+        for i in range(3):
+            left_key = self.get_slice_key(ileft[i], dim=i)
+            right_key= self.get_slice_key(iright[i], dim=i)
+            dim_inds = (self.indexdata['index'] & ~self.masks[i])
+            mask *= (dim_inds >= left_key) * (dim_inds <= right_key)
+            del dim_inds
+
+        offsets = self.indexdata['base'][mask]
+        lengths = self.indexdata['len'][mask]
+        return mask, offsets, lengths
+
+    def get_ibbox(self, ileft, iright):
+        """
+        Given left and right indicies, return a mask and
+        set of offsets+lengths into the sdf data.
+        """
+        mask = np.zeros(self.indexdata['index'].shape, dtype='bool')
+
+        print 'Getting data from ileft to iright:',  ileft, iright
+
+        X, Y, Z = np.mgrid[ileft[0]:iright[0]+1,
+                           ileft[1]:iright[1]+1,
+                           ileft[2]:iright[2]+1]
+
+        X = X.ravel()
+        Y = Y.ravel()
+        Z = Z.ravel()
+        # Correct For periodicity
+        X[X < self.domain_buffer] += self.domain_active_dims
+        X[X >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
+        Y[Y < self.domain_buffer] += self.domain_active_dims
+        Y[Y >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
+        Z[Z < self.domain_buffer] += self.domain_active_dims
+        Z[Z >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
+
+        print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()
+
+        indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
+        indices = indices[indices < self.indexdata['index'].shape[0]]
+        return indices
+
+    def get_bbox(self, left, right):
+        """
+        Given left and right indicies, return a mask and
+        set of offsets+lengths into the sdf data.
+        """
+        ileft = np.floor((left - self.rmin) / self.domain_width *  self.domain_dims)
+        iright = np.floor((right - self.rmin) / self.domain_width * self.domain_dims)
+
+        return self.get_ibbox(ileft, iright)
+
+    def get_data(self, chunk, fields):
+        data = {}
+        for field in fields:
+            data[field] = self.sdfdata[field][chunk]
+        return data
+
+    def iter_data(self, inds, fields):
+        num_inds = len(inds)
+        num_reads = 0
+        print 'Reading %i chunks' % num_inds
+        i = 0
+        while (i < num_inds):
+            ind = inds[i]
+            base = self.indexdata['base'][ind]
+            length = self.indexdata['len'][ind]
+            # Concatenate aligned reads
+            nexti = i+1
+            combined = 0
+            while nexti < len(inds):
+                nextind = inds[nexti]
+                #        print 'b: %i l: %i end: %i  next: %i' % ( base, length, base + length, self.indexdata['base'][nextind] )
+                if base + length == self.indexdata['base'][nextind]:
+                    length += self.indexdata['len'][nextind]
+                    i += 1
+                    nexti += 1
+                    combined += 1
+                else:
+                    break
+
+            chunk = slice(base, base+length)
+            print 'Reading chunk %i of length %i after catting %i' % (i, length, combined)
+            num_reads += 1
+            data = self.get_data(chunk, fields)
+            yield data
+            del data
+            i += 1
+        print 'Read %i chunks, batched into %i reads' % (num_inds, num_reads)
+
+    def iter_bbox_data(self, left, right, fields):
+        print 'Loading region from ', left, 'to', right
+        inds = self.get_bbox(left, right)
+        return self.iter_data(inds, fields)
+
+    def iter_ibbox_data(self, left, right, fields):
+        print 'Loading region from ', left, 'to', right
+        inds = self.get_ibbox(left, right)
+        return self.iter_data(inds, fields)
+
+    def get_contiguous_chunk(self, left_key, right_key, fields):
+        max_key = self.indexdata['index'][-1]
+        if left_key > max_key:
+            raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (left_key, max_key))
+        base = self.indexdata['base'][left_key]
+        right_key = min(right_key, self.indexdata['index'][-1])
+        length = self.indexdata['base'][right_key] + \
+            self.indexdata['len'][right_key] - base
+        print 'Getting contiguous chunk of size %i starting at %i' % (length, base)
+        return self.get_data(slice(base, base + length), fields)
+
+    def iter_slice_data(self, slice_dim, slice_index, fields):
+        mask, offsets, lengths = self.get_slice_chunks(slice_dim, slice_index)
+        for off, l in zip(offsets, lengths):
+            data = {}
+            chunk = slice(off, off+l)
+            for field in fields:
+                data[field] = self.sdfdata[field][chunk]
+            yield data
+            del data
+
+    def get_key_bounds(self, level, cell_iarr):
+        """
+        Get index keys for index file supplied.
+
+        level: int
+            Requested level
+        cell_iarr: array-like, length 3
+            Requested cell from given level.
+
+        Returns:
+            lmax_lk, lmax_rk
+        """
+        shift = self.level-level
+        level_buff = 0
+        level_lk = self.get_key(cell_iarr + level_buff)
+        level_rk = self.get_key(cell_iarr + level_buff) + 1
+        lmax_lk = (level_lk << shift*3)
+        lmax_rk = (((level_rk) << shift*3) -1)
+        #print "Level ", level, np.binary_repr(level_lk, width=self.level*3), np.binary_repr(level_rk, width=self.level*3)
+        #print "Level ", self.level, np.binary_repr(lmax_lk, width=self.level*3), np.binary_repr(lmax_rk, width=self.level*3)
+        return lmax_lk, lmax_rk
+
+    def get_cell_data(self, level, cell_iarr, fields):
+        """
+        Get data from requested cell
+
+        This uses the raw cell index, and doesn't account for periodicity or
+        an expanded domain (non-power of 2).
+
+        level: int
+            Requested level
+        cell_iarr: array-like, length 3
+            Requested cell from given level.         fields: list
+            Requested fields
+
+        Returns:
+            cell_data: dict
+                Dictionary of field_name, field_data
+        """
+        cell_iarr = np.array(cell_iarr)
+        lk, rk =self.get_key_bounds(level, cell_iarr)
+        return self.get_contiguous_chunk(lk, rk, fields)

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/frontends/sdf/setup.py
--- /dev/null
+++ b/yt/frontends/sdf/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+import glob
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('sdf', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -22,6 +22,7 @@
 #    config.add_subpackage("artio2")
     config.add_subpackage("pluto")
     config.add_subpackage("ramses")
+    config.add_subpackage("sdf")
     config.add_subpackage("sph")
     config.add_subpackage("stream")
     config.add_subpackage("boxlib/tests")

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -116,7 +116,7 @@
 except ImportError:
     pass
 
-def get_memory_usage():
+def get_memory_usage(subtract_share = False):
     """
     Returning resident size in megabytes
     """
@@ -130,6 +130,7 @@
         return -1024
     line = open(status_file).read()
     size, resident, share, text, library, data, dt = [int(i) for i in line.split()]
+    if subtract_share: resident -= share
     return resident * pagesize / (1024 * 1024) # return in megs
 
 def time_execution(func):
@@ -681,7 +682,7 @@
     return s
 
 @contextlib.contextmanager
-def memory_checker(interval = 15):
+def memory_checker(interval = 15, dest = None):
     r"""This is a context manager that monitors memory usage.
 
     Parameters
@@ -699,6 +700,8 @@
     ...     del arr
     """
     import threading
+    if dest is None:
+        dest = sys.stdout
     class MemoryChecker(threading.Thread):
         def __init__(self, event, interval):
             self.event = event
@@ -707,7 +710,7 @@
 
         def run(self):
             while not self.event.wait(self.interval):
-                print "MEMORY: %0.3e gb" % (get_memory_usage()/1024.)
+                print >> dest, "MEMORY: %0.3e gb" % (get_memory_usage()/1024.)
 
     e = threading.Event()
     mem_check = MemoryChecker(e, interval)

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -67,7 +67,8 @@
     cdef np.float64_t DLE[3], DRE[3]
     cdef public np.int64_t nocts
     cdef public int num_domains
-    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?)
+    cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = ?,
+                  int max_level = ?)
     cdef int get_root(self, int ind[3], Oct **o)
     cdef Oct **neighbors(self, OctInfo *oinfo, np.int64_t *nneighbors,
                          Oct *o)

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -264,24 +264,25 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     cdef Oct *get(self, np.float64_t ppos[3], OctInfo *oinfo = NULL,
-                  ):
+                  int max_level = 99):
         #Given a floating point position, retrieve the most
         #refined oct at that time
-        cdef int ind[3], level
+        cdef int ind32[3]
         cdef np.int64_t ipos[3]
         cdef np.float64_t dds[3], cp[3], pp[3]
         cdef Oct *cur, *next
         cdef int i
         cur = next = NULL
-        level = -1
+        cdef np.int64_t ind[3], level = -1
         for i in range(3):
             dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
             ind[i] = <np.int64_t> ((ppos[i] - self.DLE[i])/dds[i])
             cp[i] = (ind[i] + 0.5) * dds[i] + self.DLE[i]
             ipos[i] = 0
-        self.get_root(ind, &next)
+            ind32[i] = ind[i]
+        self.get_root(ind32, &next)
         # We want to stop recursing when there's nowhere else to go
-        while next != NULL:
+        while next != NULL and level <= max_level:
             level += 1
             for i in range(3):
                 ipos[i] = (ipos[i] << 1) + ind[i]
@@ -345,7 +346,8 @@
         # grow a stack of parent Octs.
         # Note that in the first iteration, we will just find the up-to-27
         # neighbors, including the main oct.
-        cdef int i, j, k, n, level, ind[3], ii, nfound = 0
+        cdef np.int64_t i, j, k, n, level, ii, nfound = 0, dlevel
+        cdef int ind[3]
         cdef OctList *olist, *my_list
         my_list = olist = NULL
         cdef Oct *cand
@@ -355,6 +357,7 @@
         # ndim is the oct dimensions of the level, not the cell dimensions.
         for i in range(3):
             ndim[i] = <np.int64_t> ((self.DRE[i] - self.DLE[i]) / oi.dds[i])
+            # Here we adjust for oi.dds meaning *cell* width.
             ndim[i] = (ndim[i] >> self.oref)
         my_list = olist = OctList_append(NULL, o)
         for i in range(3):
@@ -379,9 +382,10 @@
                     # correctly, but we might.
                     if cand == NULL: continue
                     for level in range(1, oi.level+1):
+                        dlevel = oi.level - level
                         if cand.children == NULL: break
                         for n in range(3):
-                            ind[n] = (npos[n] >> (oi.level - (level))) & 1
+                            ind[n] = (npos[n] >> dlevel) & 1
                         ii = cind(ind[0],ind[1],ind[2])
                         if cand.children[ii] == NULL: break
                         cand = cand.children[ii]
@@ -908,6 +912,7 @@
                 domain_left_edge, domain_right_edge, partial_coverage,
                  over_refine)
         self.fill_func = oct_visitors.fill_file_indices_rind
+
 cdef OctList *OctList_subneighbor_find(OctList *olist, Oct *top,
                                        int i, int j, int k):
     if top.children == NULL: return olist
@@ -920,7 +925,7 @@
     # For now, we assume we will not be doing this along all three zeros,
     # because that would be pretty tricky.
     if i == j == k == 0: return olist
-    cdef int n[3], ind[3], off[3][2], ii, ij, ik, ci
+    cdef np.int64_t n[3], ind[3], off[3][2], ii, ij, ik, ci
     ind[0] = 1 - i
     ind[1] = 1 - j
     ind[2] = 1 - k

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -36,6 +36,9 @@
     np.int64_t pn       # Particle number
     np.float64_t r2     # radius**2
 
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
 cdef inline np.float64_t r2dist(np.float64_t ppos[3],
                                 np.float64_t cpos[3],
                                 np.float64_t DW[3],

diff -r 869468b460381a8820464c5e63c7598303dc0ad1 -r 9a0500caa575676cb27e0d88550d473937388b70 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -28,8 +28,9 @@
 from .amr_kdtools cimport _find_node, Node
 from .grid_traversal cimport VolumeContainer, PartitionedGrid, \
     vc_index, vc_pos_index
+import sys
 
-cdef ContourID *contour_create(np.int64_t contour_id,
+cdef inline ContourID *contour_create(np.int64_t contour_id,
                                ContourID *prev = NULL):
     node = <ContourID *> malloc(sizeof(ContourID))
     #print "Creating contour with id", contour_id
@@ -40,12 +41,12 @@
     if prev != NULL: prev.next = node
     return node
 
-cdef void contour_delete(ContourID *node):
+cdef inline void contour_delete(ContourID *node):
     if node.prev != NULL: node.prev.next = node.next
     if node.next != NULL: node.next.prev = node.prev
     free(node)
 
-cdef ContourID *contour_find(ContourID *node):
+cdef inline ContourID *contour_find(ContourID *node):
     cdef ContourID *temp, *root
     root = node
     # First we find the root
@@ -61,7 +62,7 @@
         node = temp
     return root
 
-cdef void contour_union(ContourID *node1, ContourID *node2):
+cdef inline void contour_union(ContourID *node1, ContourID *node2):
     node1 = contour_find(node1)
     node2 = contour_find(node2)
     if node1.contour_id < node2.contour_id:
@@ -69,7 +70,7 @@
     elif node2.contour_id < node1.contour_id:
         node1.parent = node2
 
-cdef int candidate_contains(CandidateContour *first,
+cdef inline int candidate_contains(CandidateContour *first,
                             np.int64_t contour_id,
                             np.int64_t join_id = -1):
     while first != NULL:
@@ -78,7 +79,7 @@
         first = first.next
     return 0
 
-cdef CandidateContour *candidate_add(CandidateContour *first,
+cdef inline CandidateContour *candidate_add(CandidateContour *first,
                                      np.int64_t contour_id,
                                      np.int64_t join_id = -1):
     cdef CandidateContour *node
@@ -617,7 +618,7 @@
 
 cdef class ParticleContourTree(ContourTree):
     cdef np.float64_t linking_length, linking_length2
-    cdef np.float64_t DW[3]
+    cdef np.float64_t DW[3], DLE[3], DRE[3]
     cdef bint periodicity[3]
 
     def __init__(self, linking_length):
@@ -625,6 +626,7 @@
         self.linking_length2 = linking_length * linking_length
         self.first = self.last = NULL
 
+    @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def identify_contours(self, OctreeContainer octree,
@@ -633,7 +635,7 @@
                                 np.ndarray[np.int64_t, ndim=1] particle_ids,
                                 int domain_id = -1, int domain_offset = 0,
                                 periodicity = (True, True, True),
-                                minimum_count = 8):
+                                int minimum_count = 8):
         cdef np.ndarray[np.int64_t, ndim=1] pdoms, pcount, pind, doff
         cdef np.float64_t pos[3]
         cdef Oct *oct = NULL, **neighbors = NULL
@@ -641,22 +643,27 @@
         cdef ContourID *c0, *c1
         cdef np.int64_t moff = octree.get_domain_offset(domain_id + domain_offset)
         cdef np.int64_t i, j, k, n, nneighbors, pind0, offset
+        cdef int counter = 0
         pcount = np.zeros_like(dom_ind)
         doff = np.zeros_like(dom_ind) - 1
         # First, we find the oct for each particle.
-        pdoms = np.zeros(positions.shape[0], dtype="int64") - 1
+        pdoms = np.zeros(positions.shape[0], dtype="int64")
+        pdoms -= -1
         cdef np.int64_t *pdom = <np.int64_t*> pdoms.data
         # First we allocate our container
         cdef ContourID **container = <ContourID**> malloc(
             sizeof(ContourID*) * positions.shape[0])
         for i in range(3):
             self.DW[i] = (octree.DRE[i] - octree.DLE[i])
+            self.DLE[i] = octree.DLE[i]
+            self.DRE[i] = octree.DRE[i]
             self.periodicity[i] = periodicity[i]
         for i in range(positions.shape[0]):
+            counter += 1
             container[i] = NULL
             for j in range(3):
                 pos[j] = positions[i, j]
-            oct = octree.get(pos)
+            oct = octree.get(pos, NULL)
             if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
                 continue
             offset = oct.domain_ind - moff
@@ -670,14 +677,18 @@
             offset = pdoms[pind[i]]
             if doff[offset] < 0:
                 doff[offset] = i
+        del pdoms
         cdef int nsize = 27
         cdef np.int64_t *nind = <np.int64_t *> malloc(sizeof(np.int64_t)*nsize)
-        cdef int counter = 0
+        counter = 0
+        cdef np.int64_t frac = <np.int64_t> (doff.shape[0] / 20.0)
+        print >> sys.stderr, "Will be outputting every", frac
+        cdef int inside, skip_early
         for i in range(doff.shape[0]):
+            if counter >= frac:
+                counter = 0
+                print >> sys.stderr, "FOF-ing % 5.1f%% done" % ((100.0 * i)/doff.size)
             counter += 1
-            if counter == 10000:
-                counter = 0
-                #print "FOF-ing % 5.1f%% done" % ((100.0 * i)/doff.size)
             # Any particles found for this oct?
             if doff[i] < 0: continue
             offset = pind[doff[i]]
@@ -703,7 +714,8 @@
                     break
             # This is allocated by the neighbors function, so we deallocate it.
             free(neighbors)
-            # Now we look at each particle.
+            # We might know that all our internal particles are linked.
+            # Otherwise, we look at each particle.
             for j in range(pcount[i]):
                 # Note that this offset is the particle index
                 pind0 = pind[doff[i] + j]
@@ -721,28 +733,32 @@
                                         offset, pind0, 
                                         doff[i] + j)
         cdef np.ndarray[np.int64_t, ndim=1] contour_ids
-        contour_ids = -1 * np.ones(positions.shape[0], dtype="int64")
+        contour_ids = np.ones(positions.shape[0], dtype="int64")
+        contour_ids *= -1
         # Sort on our particle IDs.
         for i in range(doff.shape[0]):
             if doff[i] < 0: continue
             for j in range(pcount[i]):
-                poffset = doff[i] + j
-                c1 = container[poffset]
+                offset = pind[doff[i] + j]
+                c1 = container[offset]
                 c0 = contour_find(c1)
-                contour_ids[pind[poffset]] = c0.contour_id
+                contour_ids[offset] = c0.contour_id
                 c0.count += 1
         for i in range(doff.shape[0]):
             if doff[i] < 0: continue
             for j in range(pcount[i]):
-                poffset = doff[i] + j
-                c1 = container[poffset]
+                offset = pind[doff[i] + j]
+                c1 = container[offset]
                 if c1 == NULL: continue
                 c0 = contour_find(c1)
+                offset = pind[offset]
                 if c0.count < minimum_count:
-                    contour_ids[pind[poffset]] = -1
+                    contour_ids[offset] = -1
         free(container)
+        del pind
         return contour_ids
 
+    @cython.cdivision(True)
     @cython.boundscheck(False)
     @cython.wraparound(False)
     cdef void link_particles(self, ContourID **container, 
@@ -753,7 +769,8 @@
                                    np.int64_t pind0,
                                    np.int64_t poffset):
         # Now we look at each particle and evaluate it
-        cdef np.float64_t pos0[3], pos1[3], d
+        cdef np.float64_t pos0[3], pos1[3], edges[2][3]
+        cdef int link
         cdef ContourID *c0, *c1
         cdef np.int64_t pind1
         cdef int i, j, k
@@ -765,23 +782,62 @@
             self.last = c0
             if self.first == NULL:
                 self.first = c0
-        c0 = contour_find(c0)
-        container[pind0] = c0
+        c0 = container[pind0] = contour_find(c0)
         for i in range(3):
+            # We make a very conservative guess here about the edges.
             pos0[i] = positions[pind0*3 + i]
+            edges[0][i] = pos0[i] - self.linking_length*1.01
+            edges[1][i] = pos0[i] + self.linking_length*1.01
+            if edges[0][i] < self.DLE[i] or edges[0][i] > self.DRE[i]:
+                # We skip this one, since we're close to the boundary
+                edges[0][i] = -1e30
+                edges[1][i] = 1e30
+        # Lets set up some bounds for the particles.  Maybe we can get away
+        # with reducing our number of calls to r2dist_early.
         for i in range(pcount):
             pind1 = pind[noffset + i]
             if pind1 == pind0: continue
+            c1 = container[pind1]
+            if c1 != NULL and c1.contour_id == c0.contour_id:
+                # Already linked.
+                continue
             for j in range(3):
                 pos1[j] = positions[pind1*3 + j]
-            d = r2dist(pos0, pos1, self.DW, self.periodicity)
-            if d > self.linking_length2:
-                continue
-            c1 = container[pind1]
+            link = r2dist_early(pos0, pos1, self.DW, self.periodicity,
+                                self.linking_length2, edges)
+            if link == 0: continue
             if c1 == NULL:
-                container[pind1] = c1 = contour_create(
-                    noffset + i, self.last)
-            contour_union(c0, c1)
-            c0 = c1 = contour_find(c0)
-            container[pind1] = c0
-            container[pind0] = c0
+                container[pind1] = c0
+            elif c0.contour_id != c1.contour_id:
+                contour_union(c0, c1)
+                c0 = container[pind1] = container[pind0] = contour_find(c0)
+
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef inline int r2dist_early(np.float64_t ppos[3],
+                             np.float64_t cpos[3],
+                             np.float64_t DW[3],
+                             bint periodicity[3],
+                             np.float64_t max_r2,
+                             np.float64_t edges[2][3]):
+    cdef int i
+    cdef np.float64_t r2, DR
+    r2 = 0.0
+    cdef int inside = 0
+    for i in range(3):
+        if cpos[i] < edges[0][i]:
+            return 0
+        if cpos[i] > edges[1][i]:
+            return 0
+    for i in range(3):
+        DR = (ppos[i] - cpos[i])
+        if not periodicity[i]:
+            pass
+        elif (DR > DW[i]/2.0):
+            DR -= DW[i]
+        elif (DR < -DW[i]/2.0):
+            DR += DW[i]
+        r2 += DR * DR
+        if r2 > max_r2: return 0
+    return 1

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list