[yt-svn] commit/yt: MatthewTurk: Merged in darkskysims/yt-dark/yt-3.0 (pull request #955)

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Jul 17 06:16:42 PDT 2014


1 new commit in yt:

https://bitbucket.org/yt_analysis/yt/commits/990c27c2dd3c/
Changeset:   990c27c2dd3c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-17 15:16:31
Summary:     Merged in darkskysims/yt-dark/yt-3.0 (pull request #955)

SDF frontend and Dark Sky Sims work
Affected #:  25 files

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/analysis_modules/halo_finding/fof/EnzoFOF.c
--- a/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
+++ b/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
@@ -32,11 +32,15 @@
     PyArrayObject    *xpos, *ypos, *zpos;
     xpos=ypos=zpos=NULL;
     float link = 0.2;
+    float fPeriod[3] = {1.0, 1.0, 1.0};
+	int nMembers = 8;
 
     int i;
 
-    if (!PyArg_ParseTuple(args, "OOO|f",
-        &oxpos, &oypos, &ozpos, &link))
+    if (!PyArg_ParseTuple(args, "OOO|f(fff)i",
+        &oxpos, &oypos, &ozpos, &link,
+        &fPeriod[0], &fPeriod[1], &fPeriod[2],
+        &nMembers))
     return PyErr_Format(_FOFerror,
             "EnzoFOF: Invalid parameters.");
 
@@ -74,8 +78,8 @@
 
 	KDFOF kd;
 	int nBucket,j;
-	float fPeriod[3],fEps;
-	int nMembers,nGroup,bVerbose=1;
+	float fEps;
+	int nGroup,bVerbose=1;
 	int sec,usec;
 	
 	/* linking length */
@@ -83,9 +87,6 @@
 	fEps = link;
 	
 	nBucket = 16;
-	nMembers = 8;
-
-	for (j=0;j<3;++j) fPeriod[j] = 1.0;
 
     /* initialize the kd FOF structure */
 

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -32,30 +32,33 @@
         float pos[6]
         float corevel[3]
         float bulkvel[3]
-        float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
+        float m, r, child_r, vmax_r, mgrav, vmax, rvmax, rs, klypin_rs, vrms
         float J[3]
         float energy, spin
         float alt_m[4]
         float Xoff, Voff, b_to_a, c_to_a
         float A[3]
-        float bullock_spin, kin_to_pot
+        float b_to_a2, c_to_a2
+        float A2[3]
+        float bullock_spin, kin_to_pot, m_pe_b, m_pe_d
         np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
-        float min_pos_err, min_vel_err, min_bulkvel_err
+        float min_pos_err, min_vel_err, min_bulkvel_err, _pad
 
-ctypedef packed struct haloflat:
+ctypedef struct haloflat:
     np.int64_t id
-    float pos_x, pos_y, pos_z, pos_v, pos_u, pos_w
+    float pos_x, pos_y, pos_z, vel_x, vel_y, vel_z
     float corevel_x, corevel_y, corevel_z
     float bulkvel_x, bulkvel_y, bulkvel_z
-    float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
-    float J1, J2, J3
+    float m, r, child_r, vmax_r, mgrav, vmax, rvmax, rs, klypin_rs, vrms
+    float Jx, Jy, Jz
     float energy, spin
     float alt_m1, alt_m2, alt_m3, alt_m4
     float Xoff, Voff, b_to_a, c_to_a
-    float A1, A2, A3
-    float bullock_spin, kin_to_pot
+    float Ax, Ay, Az
+    float b_to_a2, c_to_a2, A2x, A2y, A2z
+    float bullock_spin, kin_to_pot, m_pe_b, m_pe_d
     np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
-    float min_pos_err, min_vel_err, min_bulkvel_err
+    float min_pos_err, min_vel_err, min_bulkvel_err, _pad
 
 # For finding sub halos import finder function and global variable
 # rockstar uses to store the results
@@ -68,6 +71,10 @@
     void free_particle_copies() nogil
     void alloc_particle_copies(np.int64_t total_copies) nogil
     void free_halos() nogil
+    float max_halo_radius(halo *h) nogil
+
+# global in groupies.c
+cdef extern double particle_thresh_dens[5]
 
 # For outputing halos, rockstar style
 
@@ -80,6 +87,9 @@
     void setup_config() nogil
     void output_config(char *fn) nogil
 
+cdef import from "distance.h":
+    void init_cosmology() nogil
+
 cdef import from "config_vars.h":
     # Rockstar cleverly puts all of the config variables inside a templated
     # definition of their vaiables.
@@ -87,13 +97,21 @@
     np.float64_t PARTICLE_MASS
 
     char *MASS_DEFINITION
+    char *MASS_DEFINITION2
+    char *MASS_DEFINITION3
+    char *MASS_DEFINITION4
+    char *MASS_DEFINITION5
+    np.int64_t STRICT_SO_MASSES
     np.int64_t MIN_HALO_OUTPUT_SIZE
     np.float64_t FORCE_RES
+    np.float64_t FORCE_RES_PHYS_MAX
 
     np.float64_t SCALE_NOW
     np.float64_t h0
     np.float64_t Ol
     np.float64_t Om
+    np.float64_t W0
+    np.float64_t WA
 
     np.int64_t GADGET_ID_BYTES
     np.float64_t GADGET_MASS_CONVERSION
@@ -111,6 +129,7 @@
     char *INBASE
     char *FILENAME
     np.int64_t STARTING_SNAP
+    np.int64_t RESTART_SNAP
     np.int64_t NUM_SNAPS
     np.int64_t NUM_BLOCKS
     np.int64_t NUM_READERS
@@ -130,10 +149,13 @@
     np.int64_t FULL_PARTICLE_CHUNKS
     char *BGC2_SNAPNAMES
 
+    np.int64_t SHAPE_ITERATIONS
+    np.int64_t WEIGHTED_SHAPES
     np.int64_t BOUND_PROPS
     np.int64_t BOUND_OUT_TO_HALO_EDGE
     np.int64_t DO_MERGER_TREE_ONLY
     np.int64_t IGNORE_PARTICLE_IDS
+    np.float64_t EXACT_LL_CALC
     np.float64_t TRIM_OVERLAP
     np.float64_t ROUND_AFTER_TRIM
     np.int64_t LIGHTCONE
@@ -147,20 +169,20 @@
 
     np.int64_t SWAP_ENDIANNESS
     np.int64_t GADGET_VARIANT
+    np.int64_t ART_VARIANT
 
     np.float64_t FOF_FRACTION
     np.float64_t FOF_LINKING_LENGTH
+    np.float64_t INITIAL_METRIC_SCALING
     np.float64_t INCLUDE_HOST_POTENTIAL_RATIO
-    np.float64_t DOUBLE_COUNT_SUBHALO_MASS_RATIO
     np.int64_t TEMPORAL_HALO_FINDING
     np.int64_t MIN_HALO_PARTICLES
     np.float64_t UNBOUND_THRESHOLD
     np.int64_t ALT_NFW_METRIC
+    np.int64_t EXTRA_PROFILING
 
     np.int64_t TOTAL_PARTICLES
     np.float64_t BOX_SIZE
-    np.int64_t OUTPUT_HMAD
-    np.int64_t OUTPUT_PARTICLES
     np.int64_t OUTPUT_LEVELS
     np.float64_t DUMP_PARTICLES[3]
 
@@ -179,16 +201,20 @@
         self.pf = pf
 
     def setup_rockstar(self,
-                        particle_mass,
-                        int periodic = 1, force_res=None,
-                        int min_halo_size = 25, outbase = "None",
-                        callbacks = None):
+                       particle_mass,
+                       int periodic = 1, force_res = None,
+                       int min_halo_size = 25, outbase = "None",
+                       write_config = False,  exact_ll_calc = False,
+                       lightcone = False, lightcone_origin = [0,0,0],
+                       callbacks = None):
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
         global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
         global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
-        
+        global OUTPUT_FORMAT, EXTRA_PROFILING
+        global STRICT_SO_MASSES, EXACT_LL_CALC
+        global LIGHTCONE, LIGHTCONE_ORIGIN
 
         if force_res is not None:
             FORCE_RES=np.float64(force_res)
@@ -197,7 +223,7 @@
         
         FILENAME = "inline.<block>"
         FILE_FORMAT = "GENERIC"
-        OUTPUT_FORMAT = "ASCII"
+        OUTPUT_FORMAT = "BOTH"
         MIN_HALO_OUTPUT_SIZE=min_halo_size
         
         pf = self.pf
@@ -213,27 +239,95 @@
             #workaround is to make a new directory
             OUTBASE = outbase 
 
-
         PARTICLE_MASS = particle_mass.in_units('Msun/h')
         PERIODIC = periodic
         BOX_SIZE = pf.domain_width.in_units('Mpccm/h')[0]
 
+        if exact_ll_calc: EXACT_LL_CALC = 1
+        STRICT_SO_MASSES = 1    # presumably unused in our code path
+        EXTRA_PROFILING = 0
+
+        if lightcone:
+            LIGHTCONE = 1
+            LIGHTCONE_ORIGIN[0] = lightcone_origin[0]
+            LIGHTCONE_ORIGIN[1] = lightcone_origin[1]
+            LIGHTCONE_ORIGIN[2] = lightcone_origin[2]
+
         # Set up the configuration options
         setup_config()
 
         # Needs to be called so rockstar can use the particle mass parameter
         # to calculate virial quantities properly
+        init_cosmology()
         calc_mass_definition()
 
-    def output_halos(self):
-        output_halos(0, 0, 0, NULL) 
+        if write_config: output_config(NULL)
+
+    def particle_thresh_dens(self):
+        cdef np.ndarray d = np.array([particle_thresh_dens[0],
+                                      particle_thresh_dens[1],
+                                      particle_thresh_dens[2],
+                                      particle_thresh_dens[3],
+                                      particle_thresh_dens[4]],
+                                     dtype=np.float64)
+        return d
+
+    def assign_masses(self, h, np.ndarray[np.float32_t, ndim=1] r, float force_res, \
+                      double pmass, np.ndarray[np.float64_t, ndim=1] dens_thresh):
+        """Assign spherical overdensity masses to halos.  r must be sorted"""
+        cdef double total_mass = 0.0
+        cdef double m = 0.0
+        cdef double alt_m1 = 0.0
+        cdef double alt_m2 = 0.0
+        cdef double alt_m3 = 0.0
+        cdef double alt_m4 = 0.0
+        cdef double rr
+        cdef double cur_dens
+        for rr in r:
+            if rr < force_res: rr = force_res
+            total_mass += pmass
+            cur_dens = total_mass/(rr*rr*rr)
+            if cur_dens > dens_thresh[0]: m = total_mass
+            if cur_dens > dens_thresh[1]: alt_m1 = total_mass
+            if cur_dens > dens_thresh[2]: alt_m2 = total_mass
+            if cur_dens > dens_thresh[3]: alt_m3 = total_mass
+            if cur_dens > dens_thresh[4]: alt_m4 = total_mass
+            if cur_dens <= dens_thresh[1]:
+                break
+        h['m'] = m
+        h['alt_m1'] = alt_m1
+        h['alt_m2'] = alt_m2
+        h['alt_m3'] = alt_m3
+        h['alt_m4'] = alt_m4
+        # if cur_dens > dens_thresh[1]:
+            # This is usually a subhalo problem, and we don't know who is a subhalo
+            # print >> sys.stderr, "r too small in assign_masses, m200b will be wrong!"
+            # print >> sys.stderr, "edge_dens/dens_thresh[1] %.3f" % (cur_dens/dens_thresh[1])
+
+    def max_halo_radius(self, int i):
+        return max_halo_radius(&halos[i])
+
+    def output_halos(self, np.int64_t idoffset, np.ndarray[np.float32_t, ndim=2] bbox):
+        cdef float bounds[6]
+        if idoffset is None: idoffset = 0
+        if bbox is None:
+            output_halos(idoffset, 0, 0, NULL) 
+        else:
+            for i in range(3):
+                bounds[i] = bbox[i,0]
+                bounds[i+3] = bbox[i,1]
+            output_halos(idoffset, 0, 0, bounds) 
+
+    def output_config(self):
+        output_config(NULL) 
 
     def return_halos(self):
         cdef haloflat[:] haloview = <haloflat[:num_halos]> (<haloflat*> halos)
-        rv = np.asarray(haloview).copy()
+        return np.asarray(haloview)
+
+    def finish(self):
         rockstar_cleanup()
         free_halos()
-        return rv
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -242,6 +336,7 @@
                                 np.ndarray[anyfloat, ndim=2] pos,
                                 np.ndarray[anyfloat, ndim=2] vel):
 
+        verbose = False
         # Define fof object
 
         # Find number of particles
@@ -271,7 +366,7 @@
                 j += 1
         if j > max_count:
             max_count = j
-        #print >> sys.stderr, "Most frequent occurrance: %s" % max_count
+        #print >> sys.stderr, "Most frequent occurrence: %s" % max_count
         fof_obj.particles = <particle*> malloc(max_count * sizeof(particle))
         j = 0
         cdef int counter = 0, ndone = 0
@@ -300,7 +395,7 @@
                 pcounts[ndone] = fof_obj.num_p
                 counter += 1
                 ndone += 1
-                if counter == frac:
+                if verbose and counter == frac:
                     print >> sys.stderr, "R*-ing % 5.1f%% done (%0.3f -> %0.3f)" % (
                         (100.0 * ndone)/pcounts.size,
                         fof_obj.particles[0].pos[2],
@@ -311,4 +406,5 @@
                 # Now we reset
                 fof_obj.num_p = j = 0
         free(fof_obj.particles)
+        global_particles = NULL
         return pcounts

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -328,8 +328,10 @@
     def morton(self):
         self.validate()
         eps = np.finfo(self.dtype).eps
-        LE = self.min(axis=0) - eps * self.uq
-        RE = self.max(axis=0) + eps * self.uq
+        LE = self.min(axis=0)
+        LE -= np.abs(LE) * eps
+        RE = self.max(axis=0)
+        RE += np.abs(RE) * eps
         morton = compute_morton(
             self[:,0], self[:,1], self[:,2],
             LE, RE)
@@ -340,8 +342,10 @@
         mi = self.morton
         mi.sort()
         eps = np.finfo(self.dtype).eps
-        LE = self.min(axis=0) - eps * self.uq
-        RE = self.max(axis=0) + eps * self.uq
+        LE = self.min(axis=0)
+        LE -= np.abs(LE) * eps
+        RE = self.max(axis=0)
+        RE += np.abs(RE) * eps
         octree = ParticleOctreeContainer(dims, LE, RE, 
             over_refine = over_refine_factor)
         octree.n_ref = n_ref

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -110,7 +110,7 @@
                 data[field][ub] /= weight_data[field][ub]
                 std_data[field][ub] /= weight_data[field][ub]
             self[field] = data[field]
-            #self["%s_std" % field] = np.sqrt(std_data[field])
+            self["%s_std" % field] = np.sqrt(std_data[field])
         self["UsedBins"] = used
 
         if fractional:
@@ -852,7 +852,7 @@
         if self.weight_field is not None:
             weight_data = chunk[self.weight_field]
         else:
-            weight_data = np.ones(chunk.ires.size, dtype="float64")
+            weight_data = np.ones(filter.size, dtype="float64")
         weight_data = weight_data[filter]
         # So that we can pass these into
         return arr, weight_data, bin_fields

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/fields/field_functions.py
--- a/yt/fields/field_functions.py
+++ b/yt/fields/field_functions.py
@@ -34,7 +34,7 @@
             np.subtract(r, DW[i], rdw)
             np.abs(rdw, rdw)
             np.minimum(r, rdw, r)
-        np.power(r, 2.0, r)
+        np.multiply(r, r, r)
         np.add(radius2, r, radius2)
         if data.pf.dimensionality < i+1:
             break

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -90,6 +90,16 @@
                    "particle_position", "particle_velocity",
                    self)
         else:
+            # We need to check to make sure that there's a "known field" that
+            # overlaps with one of the vector fields.  For instance, if we are
+            # in the Stream frontend, and we have a set of scalar position
+            # fields, they will overlap with -- and be overridden by -- the
+            # "known" vector field that the frontend creates.  So the easiest
+            # thing to do is to simply remove the on-disk field (which doesn't
+            # exist) and replace it with a derived field.
+            if (ptype, "particle_position") in self and \
+                 self[ptype, "particle_position"]._function == NullFunc:
+                self.pop((ptype, "particle_position"))
             particle_vector_functions(ptype,
                     ["particle_position_%s" % ax for ax in 'xyz'],
                     ["particle_velocity_%s" % ax for ax in 'xyz'],

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -72,6 +72,7 @@
 
 def particle_deposition_functions(ptype, coord_name, mass_name, registry):
     orig = set(registry.keys())
+    ptype_dn = ptype.replace("_","\/").title()
     def particle_count(field, data):
         pos = data[ptype, coord_name]
         d = data.deposit(pos, method = "count")
@@ -81,7 +82,7 @@
     registry.add_field(("deposit", "%s_count" % ptype),
              function = particle_count,
              validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Count}" % ptype)
+             display_name = "\\mathrm{%s Count}" % ptype_dn)
 
     def particle_mass(field, data):
         pos = data[ptype, coord_name]
@@ -92,7 +93,7 @@
     registry.add_field(("deposit", "%s_mass" % ptype),
              function = particle_mass,
              validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Mass}" % ptype,
+             display_name = "\\mathrm{%s Mass}" % ptype_dn,
              units = "g")
              
     def particle_density(field, data):
@@ -108,7 +109,7 @@
     registry.add_field(("deposit", "%s_density" % ptype),
              function = particle_density,
              validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s Density}" % ptype,
+             display_name = "\\mathrm{%s Density}" % ptype_dn,
              units = "g/cm**3")
 
     def particle_cic(field, data):
@@ -121,7 +122,7 @@
     registry.add_field(("deposit", "%s_cic" % ptype),
              function = particle_cic,
              validators = [ValidateSpatial()],
-             display_name = "\\mathrm{%s CIC Density}" % ptype,
+             display_name = "\\mathrm{%s CIC Density}" % ptype_dn,
              units = "g/cm**3")
 
     # Now some translation functions.

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/frontends/halo_catalogs/rockstar/definitions.py
--- a/yt/frontends/halo_catalogs/rockstar/definitions.py
+++ b/yt/frontends/halo_catalogs/rockstar/definitions.py
@@ -30,7 +30,9 @@
     ("box_size", 1, "f"),
     ("particle_mass", 1, "f"),
     ("particle_type", 1, "q"),
-    ("unused", BINARY_HEADER_SIZE - 4*12 - 8*6, "c")
+    ("format_revision", 1, "i"),
+    ("version", 12, "c"),
+    ("unused", BINARY_HEADER_SIZE - 4*12 - 4 - 8*6 - 12, "c")
 )
 
 halo_dt = np.dtype([
@@ -38,17 +40,17 @@
     ('particle_position_x', np.float32),
     ('particle_position_y', np.float32),
     ('particle_position_z', np.float32),
-    ('particle_mposition_x', np.float32),
-    ('particle_mposition_y', np.float32),
-    ('particle_mposition_z', np.float32),
     ('particle_velocity_x', np.float32),
     ('particle_velocity_y', np.float32),
     ('particle_velocity_z', np.float32),
-    ('particle_bvelocity_x', np.float32),
-    ('particle_bvelocity_y', np.float32),
-    ('particle_bvelocity_z', np.float32),
+    ('particle_corevel_x', np.float32),
+    ('particle_corevel_y', np.float32),
+    ('particle_corevel_z', np.float32),
+    ('particle_bulkvel_x', np.float32),
+    ('particle_bulkvel_y', np.float32),
+    ('particle_bulkvel_z', np.float32),
     ('particle_mass', np.float32),
-    ('virial_radius', np.float32),
+    ('radius', np.float32),
     ('child_r', np.float32),
     ('vmax_r', np.float32),
     ('mgrav', np.float32),
@@ -57,9 +59,9 @@
     ('rs', np.float32),
     ('klypin_rs', np.float32),
     ('vrms', np.float32),
-    ('JX', np.float32),
-    ('JY', np.float32),
-    ('JZ', np.float32),
+    ('Jx', np.float32),
+    ('Jy', np.float32),
+    ('Jz', np.float32),
     ('energy', np.float32),
     ('spin', np.float32),
     ('alt_m1', np.float32),
@@ -73,8 +75,15 @@
     ('Ax', np.float32),
     ('Ay', np.float32),
     ('Az', np.float32),
+    ('b_to_a2', np.float32),
+    ('c_to_a2', np.float32),
+    ('A2x', np.float32),
+    ('A2y', np.float32),
+    ('A2z', np.float32),
     ('bullock_spin', np.float32),
     ('kin_to_pot', np.float32),
+    ('m_pe_b', np.float32),
+    ('m_pe_d', np.float32),
     ('num_p', np.int64),
     ('num_child_particles', np.int64),
     ('p_start', np.int64),
@@ -84,8 +93,7 @@
     ('min_pos_err', np.float32),
     ('min_vel_err', np.float32),
     ('min_bulkvel_err', np.float32),
-    ('padding2', np.float32),
-])
+], align=True)
 
 particle_dt = np.dtype([
     ('particle_identifier', np.int64),

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/frontends/halo_catalogs/rockstar/fields.py
--- a/yt/frontends/halo_catalogs/rockstar/fields.py
+++ b/yt/frontends/halo_catalogs/rockstar/fields.py
@@ -40,17 +40,17 @@
         ("particle_position_x", (p_units, [], None)),
         ("particle_position_y", (p_units, [], None)),
         ("particle_position_z", (p_units, [], None)),
-        ("particle_mposition_x", (p_units, [], None)),
-        ("particle_mposition_y", (p_units, [], None)),
-        ("particle_mposition_z", (p_units, [], None)),
         ("particle_velocity_x", (v_units, [], None)),
         ("particle_velocity_y", (v_units, [], None)),
         ("particle_velocity_z", (v_units, [], None)),
-        ("particle_bvelocity_x", (v_units, [], None)),
-        ("particle_bvelocity_y", (v_units, [], None)),
-        ("particle_bvelocity_z", (v_units, [], None)),
-        ("particle_mass", (m_units, [], "Virial Mass")),
-        ("virial_radius", (r_units, [], "Virial Radius")),
+        ("particle_corevel_x", (v_units, [], None)),
+        ("particle_corevel_y", (v_units, [], None)),
+        ("particle_corevel_z", (v_units, [], None)),
+        ("particle_bulkvel_x", (v_units, [], None)),
+        ("particle_bulkvel_y", (v_units, [], None)),
+        ("particle_bulkvel_z", (v_units, [], None)),
+        ("particle_mass", (m_units, [], "Mass")),
+        ("virial_radius", (r_units, [], "Radius")),
         ("child_r", (r_units, [], None)),
         ("vmax_r", (v_units, [], None)),
     # These fields I don't have good definitions for yet.
@@ -60,9 +60,9 @@
     ('rs', (r_units, [], "R_s")),
     ('klypin_rs', (r_units, [], "Klypin R_s")),
     ('vrms', (v_units, [], "V_{rms}")),
-    ('JX', ("", [], "J_x")),
-    ('JY', ("", [], "J_y")),
-    ('JZ', ("", [], "J_z")),
+    ('Jx', ("", [], "J_x")),
+    ('Jy', ("", [], "J_y")),
+    ('Jz', ("", [], "J_z")),
     ('energy', ("", [], None)),
     ('spin', ("", [], "Spin Parameter")),
     ('alt_m1', (m_units, [], None)),
@@ -76,8 +76,15 @@
     ('Ax', ("", [], "A_x")),
     ('Ay', ("", [], "A_y")),
     ('Az', ("", [], "A_z")),
+    ('b_to_a2', ("", [], None)),
+    ('c_to_a2', ("", [], None)),
+    ('A2x', ("", [], "A2_x")),
+    ('A2y', ("", [], "A2_y")),
+    ('A2z', ("", [], "A2_z")),
     ('bullock_spin', ("", [], "Bullock Spin Parameter")),
     ('kin_to_pot', ("", [], "Kinetic to Potential")),
+    ('m_pe_b', ("", [], None)),
+    ('m_pe_d', ("", [], None)),
     ('num_p', ("", [], "Number of Particles")),
     ('num_child_particles', ("", [], "Number of Child Particles")),
     ('p_start', ("", [], None)),

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -29,17 +29,31 @@
     ParticleIndex
 from yt.data_objects.static_output import \
     Dataset, ParticleFile
-from yt.utilities.physical_constants import \
-    G, \
+from yt.utilities.physical_ratios import \
     cm_per_kpc, \
-    mass_sun_cgs
-from yt.utilities.cosmology import Cosmology
+    mass_sun_grams, \
+    sec_per_Gyr
 from .fields import \
     SDFFieldInfo
 from .io import \
-    IOHandlerSDF, \
+    IOHandlerSDF
+from yt.utilities.sdf import \
     SDFRead,\
-    SDFIndex
+    SDFIndex,\
+    HTTPSDFRead
+
+try:
+    import requests
+except ImportError:
+    requests = None
+
+
+
+# currently specified by units_2HOT == 2 in header
+# in future will read directly from file
+units_2HOT_v2_length = 3.08567802e21
+units_2HOT_v2_mass = 1.98892e43
+units_2HOT_v2_time = 3.1558149984e16
 
 class SDFFile(ParticleFile):
     pass
@@ -51,19 +65,24 @@
     _particle_mass_name = None
     _particle_coordinates_name = None
     _particle_velocity_name = None
-    _sindex = None
+    _midx = None
+    _skip_cache = True
+    _subspace = False
+
 
     def __init__(self, filename, dataset_type = "sdf_particles",
                  n_ref = 64, over_refine_factor = 1,
                  bounding_box = None,
                  sdf_header = None,
-                 idx_filename = None,
-                 idx_header = None,
-                 idx_level = 9):
+                 midx_filename = None,
+                 midx_header = None,
+                 midx_level = None,
+                 field_map = None):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         if bounding_box is not None:
-            bbox = np.array(bounding_box, dtype="float64")
+            self._subspace = True
+            bbox = np.array(bounding_box, dtype="float32")
             if bbox.shape == (2, 3):
                 bbox = bbox.transpose()
             self.domain_left_edge = bbox[:,0]
@@ -71,69 +90,117 @@
         else:
             self.domain_left_edge = self.domain_right_edge = None
         self.sdf_header = sdf_header
-        self.idx_filename = idx_filename
-        self.idx_header = idx_header
-        self.idx_level = idx_level
+        self.midx_filename = midx_filename
+        self.midx_header = midx_header
+        self.midx_level = midx_level
+        if field_map is None:
+            field_map = {}
+        self._field_map = field_map
+        prefix = ''
+        if self.midx_filename is not None:
+            prefix += 'midx_'
+        if filename.startswith("http"):
+            prefix += 'http_'
+        dataset_type = prefix + 'sdf_particles'
         super(SDFDataset, self).__init__(filename, dataset_type)
 
     def _parse_parameter_file(self):
-        self.sdf_container = SDFRead(self.parameter_filename,
-                                     header=self.sdf_header)
+        if self.parameter_filename.startswith("http"):
+            sdf_class = HTTPSDFRead
+        else:
+            sdf_class = SDFRead
+        self.sdf_container = sdf_class(self.parameter_filename,
+                                 header=self.sdf_header)
+
         # Reference
         self.parameters = self.sdf_container.parameters
         self.dimensionality = 3
         self.refine_by = 2
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        try:
+            self.unique_identifier = \
+                int(os.stat(self.parameter_filename)[stat.ST_CTIME])
+        except:
+            self.unique_identifier = time.time()
+
 
         if None in (self.domain_left_edge, self.domain_right_edge):
             R0 = self.parameters['R0']
-            self.domain_left_edge = np.array([
-              -self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
-            self.domain_right_edge = np.array([
-              +self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+            if 'offset_center' in self.parameters and self.parameters['offset_center']:
+                self.domain_left_edge = np.array([0, 0, 0])
+                self.domain_right_edge = np.array([
+                 2.0 * self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+            else:
+                self.domain_left_edge = np.array([
+                    -self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
+                self.domain_right_edge = np.array([
+                    +self.parameters.get("R%s" % ax, R0) for ax in 'xyz'])
             self.domain_left_edge *= self.parameters.get("a", 1.0)
             self.domain_right_edge *= self.parameters.get("a", 1.0)
 
         nz = 1 << self.over_refine_factor
         self.domain_dimensions = np.ones(3, "int32") * nz
-        self.periodicity = (True, True, True)
+        if "do_periodic" in self.parameters and self.parameters["do_periodic"]:
+            self.periodicity = (True, True, True)
+        else:
+            self.periodicity = (False, False, False)
 
         self.cosmological_simulation = 1
 
         self.current_redshift = self.parameters.get("redshift", 0.0)
         self.omega_lambda = self.parameters["Omega0_lambda"]
         self.omega_matter = self.parameters["Omega0_m"]
+        if "Omega0_fld" in self.parameters:
+            self.omega_lambda += self.parameters["Omega0_fld"]
+        if "Omega0_r" in self.parameters:
+            # not correct, but most codes can't handle Omega0_r
+            self.omega_matter += self.parameters["Omega0_r"]
         self.hubble_constant = self.parameters["h_100"]
-        # Now we calculate our time based on the cosmology.
-        cosmo = Cosmology(self.hubble_constant,
-                          self.omega_matter, self.omega_lambda)
-        self.current_time = cosmo.hubble_time(self.current_redshift)
+        self.current_time = units_2HOT_v2_time * self.parameters.get("tpos", 0.0)
         mylog.info("Calculating time to be %0.3e seconds", self.current_time)
         self.filename_template = self.parameter_filename
         self.file_count = 1
 
     @property
-    def sindex(self):
-        if self._sindex is None:
-            if self.idx_filename is not None:
-                indexdata = SDFRead(self.idx_filename,
-                                    header=self.idx_header)
-                self._sindex = SDFIndex(self.sdf_container, indexdata, level=self.idx_level)
+    def midx(self):
+        if self._midx is None:
+            if self.midx_filename is not None:
+
+                if 'http' in self.midx_filename:
+                    sdf_class = HTTPSDFRead
+                else:
+                    sdf_class = SDFRead
+                indexdata = sdf_class(self.midx_filename, header=self.midx_header)
+                self._midx = SDFIndex(self.sdf_container, indexdata,
+                                        level=self.midx_level)
             else:
                 raise RuntimeError("SDF index0 file not supplied in load.")
-        else:
-            return self._sindex
+        return self._midx
 
     def _set_code_unit_attributes(self):
-        self.length_unit = self.quan(1.0, "kpc")
-        self.velocity_unit = self.quan(1.0, "kpc/Gyr")
-        self.time_unit = self.quan(1.0, "Gyr")
-        self.mass_unit = self.quan(1e10, "Msun")
+        self.length_unit = self.quan(1.0, self.parameters.get("length_unit", 'kpc'))
+        self.velocity_unit = self.quan(1.0, self.parameters.get("velocity_unit", 'kpc/Gyr'))
+        self.time_unit = self.quan(1.0, self.parameters.get("time_unit", 'Gyr'))
+        mass_unit = self.parameters.get("mass_unit", '1e10 Msun')
+        if ' ' in mass_unit:
+            factor, unit = mass_unit.split(' ')
+        else:
+            factor = 1.0
+            unit = mass_unit
+        self.mass_unit = self.quan(float(factor), unit)
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):
-        if not os.path.isfile(args[0]): return False
-        with open(args[0], "r") as f:
-            line = f.readline().strip()
-            return line == "# SDF 1.0"
+        sdf_header = kwargs.get('sdf_header', args[0])
+        print 'Parsing sdf_header: %s' % sdf_header
+        if sdf_header.startswith("http"):
+            if requests is None: return False
+            hreq = requests.get(sdf_header, stream=True)
+            if hreq.status_code != 200: return False
+            # Grab a whole 4k page.
+            line = hreq.iter_content(4096).next()
+        elif os.path.isfile(sdf_header):
+            with open(sdf_header, "r") as f:
+                line = f.read(10).strip()
+        else:
+            return False
+        return line.startswith("# SDF")

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/frontends/sdf/fields.py
--- a/yt/frontends/sdf/fields.py
+++ b/yt/frontends/sdf/fields.py
@@ -35,13 +35,39 @@
 class SDFFieldInfo(FieldInfoContainer):
     known_other_fields = ()
 
-    known_particle_fields = (
-        ("mass", ("code_mass", ["particle_mass"], None)),
-        ("x", ("code_length", ["particle_position_x"], None)),
-        ("y", ("code_length", ["particle_position_y"], None)),
-        ("z", ("code_length", ["particle_position_z"], None)),
-        ("vx", ("code_velocity", ["particle_velocity_x"], None)),
-        ("vy", ("code_velocity", ["particle_velocity_y"], None)),
-        ("vz", ("code_velocity", ["particle_velocity_z"], None)),
-        ("ident", ("", ["particle_index"], None)),
-    )
+    known_particle_fields = ()
+    _mass_field = None
+
+    def __init__(self, pf, field_list):
+
+        if 'mass' in field_list:
+            self.known_particle_fields.append(("mass", "code_mass",
+                                               ["particle_mass"], None))
+        possible_masses = ['mass', 'm200b', 'mvir']
+        mnf = 'mass'
+        for mn in possible_masses:
+            if mn in pf.sdf_container.keys():
+                mnf = self._mass_field = mn
+                break
+
+        idf = pf._field_map.get("particle_index", 'ident')
+        xf = pf._field_map.get("particle_position_x", 'x')
+        yf = pf._field_map.get("particle_position_y", 'y')
+        zf = pf._field_map.get("particle_position_z", 'z')
+        vxf = pf._field_map.get("particle_velocity_x", 'vx')
+        vyf = pf._field_map.get("particle_velocity_z", 'vy')
+        vzf = pf._field_map.get("particle_velocity_z", 'vz')
+
+        self.known_particle_fields = (
+            (idf, ('dimensionless', ['particle_index'], None)),
+            (xf,  ('code_length', ['particle_position_x'], None)),
+            (yf,  ('code_length', ['particle_position_y'], None)),
+            (zf,  ('code_length', ['particle_position_z'], None)),
+            (vxf, ('code_velocity', ['particle_velocity_x'], None)),
+            (vyf, ('code_velocity', ['particle_velocity_y'], None)),
+            (vzf, ('code_velocity', ['particle_velocity_z'], None)),
+            (mnf, ('code_mass', ['particle_mass'], None)),
+        )
+        super(SDFFieldInfo, self).__init__(pf, field_list)
+
+

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/frontends/sdf/io.py
--- a/yt/frontends/sdf/io.py
+++ b/yt/frontends/sdf/io.py
@@ -14,11 +14,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import glob
-import h5py
 import numpy as np
 from yt.funcs import *
 from yt.utilities.exceptions import *
+from yt.units.yt_array import YTArray
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -82,6 +81,7 @@
     def _initialize_index(self, data_file, regions):
         x, y, z = (self._handle[ax] for ax in 'xyz')
         pcount = x.size
+
         morton = np.empty(pcount, dtype='uint64')
         ind = 0
         while ind < pcount:
@@ -90,12 +90,6 @@
             pos[:,0] = x[ind:ind+npart]
             pos[:,1] = y[ind:ind+npart]
             pos[:,2] = z[ind:ind+npart]
-            if np.any(pos.min(axis=0) < self.pf.domain_left_edge) or \
-               np.any(pos.max(axis=0) > self.pf.domain_right_edge):
-                raise YTDomainOverflow(pos.min(axis=0),
-                                       pos.max(axis=0),
-                                       self.pf.domain_left_edge,
-                                       self.pf.domain_right_edge)
             regions.add_data_file(pos, data_file.file_id)
             morton[ind:ind+npart] = compute_morton(
                 pos[:,0], pos[:,1], pos[:,2],
@@ -104,461 +98,170 @@
             ind += CHUNKSIZE
         return morton
 
+    def _identify_fields(self, data_file):
+        fields = [("dark_matter", v) for v in self._handle.keys()]
+        fields.append(("dark_matter", "mass"))
+        return fields, {}
+
     def _count_particles(self, data_file):
-        return {'dark_matter': self._handle['x'].size}
+        pcount = self._handle['x'].size
+        if (pcount > 1e9):
+            mylog.warn("About to load %i particles into memory. " % (pcount) +
+                       "You may want to consider a midx-enabled load")
+        return {'dark_matter': pcount}
+
+
+class IOHandlerHTTPSDF(IOHandlerSDF):
+    _dataset_type = "http_sdf_particles"
+
+    def _read_particle_coords(self, chunks, ptf):
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        assert(ptf.keys()[0] == "dark_matter")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        assert(len(data_files) == 1)
+        for data_file in data_files:
+            pcount = self._handle['x'].size
+            yield "dark_matter", (
+                self._handle['x'][:pcount], self._handle['y'][:pcount], self._handle['z'][:pcount])
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        chunks = list(chunks)
+        data_files = set([])
+        assert(len(ptf) == 1)
+        assert(ptf.keys()[0] == "dark_matter")
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
+        assert(len(data_files) == 1)
+        for data_file in data_files:
+            pcount = self._handle['x'].size
+            for ptype, field_list in sorted(ptf.items()):
+                x = self._handle['x'][:pcount]
+                y = self._handle['y'][:pcount]
+                z = self._handle['z'][:pcount]
+                mask = selector.select_points(x, y, z, 0.0)
+                del x, y, z
+                if mask is None: continue
+                for field in field_list:
+                    if field == "mass":
+                        if self.pf.field_info._mass_field is None:
+                            pm = 1.0
+                            if 'particle_mass' in self.pf.parameters:
+                                pm = self.pf.parameters['particle_mass']
+                            else:
+                                raise RuntimeError
+                            data = pm * np.ones(mask.sum(), dtype="float64")
+                        else:
+                            data = self._handle[self.pf.field_info._mass_field][mask]
+                    else:
+                        data = self._handle[field][mask]
+                    yield (ptype, field), data
+
+    def _count_particles(self, data_file):
+        return {'dark_matter': self._handle['x'].http_array.shape}
+
+
+class IOHandlerSIndexSDF(IOHandlerSDF):
+    _dataset_type = "midx_sdf_particles"
+
+
+    def _read_particle_coords(self, chunks, ptf):
+        dle = self.pf.domain_left_edge.in_units("code_length").d
+        dre = self.pf.domain_right_edge.in_units("code_length").d
+        for dd in self.pf.midx.iter_bbox_data(
+            dle, dre,
+            ['x','y','z']):
+            yield "dark_matter", (
+                dd['x'], dd['y'], dd['z'])
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        dle = self.pf.domain_left_edge.in_units("code_length").d
+        dre = self.pf.domain_right_edge.in_units("code_length").d
+        required_fields = []
+        for ptype, field_list in sorted(ptf.items()):
+            for field in field_list:
+                if field == "mass": continue
+                required_fields.append(field)
+
+        for dd in self.pf.midx.iter_bbox_data(
+            dle, dre,
+            required_fields):
+
+            for ptype, field_list in sorted(ptf.items()):
+                x = dd['x']
+                y = dd['y']
+                z = dd['z']
+                mask = selector.select_points(x, y, z, 0.0)
+                del x, y, z
+                if mask is None: continue
+                for field in field_list:
+                    if field == "mass":
+                        data = np.ones(mask.sum(), dtype="float64")
+                        data *= self.pf.parameters["particle_mass"]
+                    else:
+                        data = dd[field][mask]
+                    yield (ptype, field), data
+
+    def _initialize_index(self, data_file, regions):
+        dle = self.pf.domain_left_edge.in_units("code_length").d
+        dre = self.pf.domain_right_edge.in_units("code_length").d
+        pcount = 0
+        for dd in self.pf.midx.iter_bbox_data(
+            dle, dre,
+            ['x']):
+            pcount += dd['x'].size
+
+        morton = np.empty(pcount, dtype='uint64')
+        ind = 0
+
+        chunk_id = 0
+        for dd in self.pf.midx.iter_bbox_data(
+            dle, dre,
+            ['x','y','z']):
+            npart = dd['x'].size
+            pos = np.empty((npart, 3), dtype=dd['x'].dtype)
+            pos[:,0] = dd['x']
+            pos[:,1] = dd['y']
+            pos[:,2] = dd['z']
+            if np.any(pos.min(axis=0) < self.pf.domain_left_edge) or \
+               np.any(pos.max(axis=0) > self.pf.domain_right_edge):
+                raise YTDomainOverflow(pos.min(axis=0),
+                                       pos.max(axis=0),
+                                       self.pf.domain_left_edge,
+                                       self.pf.domain_right_edge)
+            regions.add_data_file(pos, chunk_id)
+            morton[ind:ind+npart] = compute_morton(
+                pos[:,0], pos[:,1], pos[:,2],
+                data_file.pf.domain_left_edge,
+                data_file.pf.domain_right_edge)
+            ind += npart
+        return morton
+
+    def _count_particles(self, data_file):
+        dle = self.pf.domain_left_edge.in_units("code_length").d
+        dre = self.pf.domain_right_edge.in_units("code_length").d
+        pcount_estimate = self.pf.midx.get_nparticles_bbox(dle, dre)
+        if pcount_estimate > 1e9:
+            mylog.warning("Filtering %i particles to find total."
+                          % pcount_estimate + \
+                          " You may want to reconsider your bounding box.")
+        pcount = 0
+        for dd in self.pf.midx.iter_bbox_data(
+            dle, dre,
+            ['x']):
+            pcount += dd['x'].size
+        return {'dark_matter': pcount}
 
     def _identify_fields(self, data_file):
         fields = [("dark_matter", v) for v in self._handle.keys()]
         fields.append(("dark_matter", "mass"))
         return fields, {}
 
-import re
-import os
 
-_types = {
-    'int': 'int32',
-    'int64_t': 'int64',
-    'float': 'float32',
-    'double': 'float64',
-    'unsigned int': 'I',
-    'unsigned char': 'B',
-}
+class IOHandlerSIndexHTTPSDF(IOHandlerSIndexSDF):
+    _dataset_type = "midx_http_sdf_particles"
 
-def get_type(vtype, len=None):
-    try:
-        t = _types[vtype]
-        if len is not None:
-            t = np.dtype((t, len))
-        else:
-            t = np.dtype(t)
-    except KeyError:
-        t = eval("np."+vtype)
-    return t
-
-def lstrip(text_list):
-    return [t.strip() for t in text_list]
-
-def get_struct_vars(line):
-    spl = lstrip(line.split(";"))
-    multiv = lstrip(spl[0].split(","))
-    ret = lstrip(multiv[0].split())
-    ctype = ret[0]
-    vnames = [ret[-1]] + multiv[1:]
-    vnames = [v.strip() for v in vnames]
-    for vtype in ret[1:-1]:
-        ctype += ' ' + vtype
-    num = None
-    if len(vnames) == 1:
-        if '[' in vnames[0]:
-            num = int(vnames[0].split('[')[-1].strip(']'))
-            #num = int(re.sub("\D", "", vnames[0]))
-    ctype = get_type(ctype, len=num)
-    return ctype, vnames
-
-class DataStruct(object):
-    """docstring for DataStruct"""
-
-    _offset = 0
-
-    def __init__(self, dtypes, num, filename):
-        self.filename = filename
-        self.dtype = np.dtype(dtypes)
-        self.size = num
-        self.itemsize = self.dtype.itemsize
-        self.data = {}
-        self.handle = None
-
-    def set_offset(self, offset):
-        self._offset = offset
-        if self.size == -1:
-            file_size = os.path.getsize(self.filename)
-            file_size -= offset
-            self.size = float(file_size) / self.itemsize
-            assert(int(self.size) == self.size)
-
-    def build_memmap(self):
-        assert(self.size != -1)
-        self.handle = np.memmap(self.filename, dtype=self.dtype,
-                        mode='r', shape=self.size, offset=self._offset)
-        for k in self.dtype.names:
-            self.data[k] = self.handle[k]
-
-class SDFRead(dict):
-
-    """docstring for SDFRead"""
-
-    _eof = 'SDF-EOH'
-
-    def __init__(self, filename, header=None):
-        self.filename = filename
-        if header is None:
-            header = filename
-        self.header = header
-        self.parameters = {}
-        self.structs = []
-        self.comments = []
-        self.parse_header()
-        self.set_offsets()
-        self.load_memmaps()
-
-    def parse_header(self):
-        """docstring for parse_header"""
-        # Pre-process
-        ascfile = open(self.header, 'r')
-        while True:
-            l = ascfile.readline()
-            if self._eof in l: break
-
-            self.parse_line(l, ascfile)
-
-        hoff = ascfile.tell()
-        ascfile.close()
-        if self.header != self.filename:
-            hoff = 0
-        self.parameters['header_offset'] = hoff
-
-    def parse_line(self, line, ascfile):
-        """Parse a line of sdf"""
-
-
-        if 'struct' in line:
-            self.parse_struct(line, ascfile)
-            return
-
-        if "#" in line:
-            self.comments.append(line)
-            return
-
-        spl = lstrip(line.split("="))
-        vtype, vname = lstrip(spl[0].split())
-        vname = vname.strip("[]")
-        vval = spl[-1].strip(";")
-        if vtype == 'parameter':
-            self.parameters[vname] = vval
-            return
-        elif vtype == "char":
-            vtype = "str"
-
-        try:
-            vval = eval("np."+vtype+"(%s)" % vval)
-        except AttributeError:
-            vval = eval("np."+_types[vtype]+"(%s)" % vval)
-
-        self.parameters[vname] = vval
-
-    def parse_struct(self, line, ascfile):
-        assert 'struct' in line
-
-        str_types = []
-        comments = []
-        str_lines = []
-        l = ascfile.readline()
-        while "}" not in l:
-            vtype, vnames = get_struct_vars(l)
-            for v in vnames:
-                str_types.append((v, vtype))
-            l = ascfile.readline()
-        num = l.strip("}[]")
-        num = num.strip("\;\\\n]")
-        if len(num) == 0:
-            # We need to compute the number of records.  The DataStruct will
-            # handle this.
-            num = '-1'
-        num = int(num)
-        struct = DataStruct(str_types, num, self.filename)
-        self.structs.append(struct)
-        return
-
-    def set_offsets(self):
-        running_off = self.parameters['header_offset']
-        for struct in self.structs:
-            struct.set_offset(running_off)
-            running_off += struct.size * struct.itemsize
-        return
-
-    def load_memmaps(self):
-        for struct in self.structs:
-            struct.build_memmap()
-            self.update(struct.data)
-
-
-class SDFIndex(object):
-
-    """docstring for SDFIndex
-
-    This provides an index mechanism into the full SDF Dataset.
-
-    Most useful class methods:
-        get_cell_data(level, cell_iarr, fields)
-        iter_bbox_data(left, right, fields)
-        iter_bbox_data(left, right, fields)
-
-    """
-    def __init__(self, sdfdata, indexdata, level=9):
-        super(SDFIndex, self).__init__()
-        self.sdfdata = sdfdata
-        self.indexdata = indexdata
-        self.level = level
-        self.rmin = None
-        self.rmax = None
-        self.domain_width = None
-        self.domain_buffer = 0
-        self.domain_dims = 0
-        self.domain_active_dims = 0
-        self.masks = {
-            "p" : int("011"*level, 2),
-            "t" : int("101"*level, 2),
-            "r" : int("110"*level, 2),
-            "z" : int("011"*level, 2),
-            "y" : int("101"*level, 2),
-            "x" : int("110"*level, 2),
-            2 : int("011"*level, 2),
-            1 : int("101"*level, 2),
-            0 : int("110"*level, 2),
-        }
-        self.dim_slices = {
-            "p" : slice(0, None, 3),
-            "t" : slice(1, None, 3),
-            "r" : slice(2, None, 3),
-            "z" : slice(0, None, 3),
-            "y" : slice(1, None, 3),
-            "x" : slice(2, None, 3),
-            2 : slice(0, None, 3),
-            1 : slice(1, None, 3),
-            0 : slice(2, None, 3),
-        }
-        self.set_bounds()
-
-    def set_bounds(self):
-        r_0 = self.sdfdata.parameters['R0']
-        DW = 2.0 * r_0
-
-        self.rmin = np.zeros(3)
-        self.rmax = np.zeros(3)
-        sorted_rtp = self.sdfdata.parameters.get("sorted_rtp", False)
-        if sorted_rtp:
-            self.rmin[:] = [0.0, 0.0, -np.pi]
-            self.rmax[:] = [r_0*1.01, 2*np.pi, np.pi]
-        else:
-            self.rmin[0] -= self.sdfdata.parameters.get('Rx', 0.0)
-            self.rmin[1] -= self.sdfdata.parameters.get('Ry', 0.0)
-            self.rmin[2] -= self.sdfdata.parameters.get('Rz', 0.0)
-            self.rmax[0] += self.sdfdata.parameters.get('Rx', r_0)
-            self.rmax[1] += self.sdfdata.parameters.get('Ry', r_0)
-            self.rmax[2] += self.sdfdata.parameters.get('Rz', r_0)
-
-        #/* expand root for non-power-of-two */
-        expand_root = 0.0
-        ic_Nmesh = self.sdfdata.parameters.get('ic_Nmesh',0)
-        if ic_Nmesh != 0:
-            f2 = 1<<int(np.log2(ic_Nmesh-1)+1)
-            if (f2 != ic_Nmesh):
-                expand_root = 1.0*f2/ic_Nmesh - 1.0;
-            print 'Expanding: ', f2, ic_Nmesh, expand_root
-        self.rmin *= 1.0 + expand_root
-        self.rmax *= 1.0 + expand_root
-        self.domain_width = self.rmax - self.rmin
-        self.domain_dims = 1 << self.level
-        self.domain_buffer = (self.domain_dims - int(self.domain_dims/(1.0 + expand_root)))/2
-        self.domain_active_dims = self.domain_dims - 2*self.domain_buffer
-        print 'Domain stuff:', self.domain_width, self.domain_dims, self.domain_active_dims
-
-    def get_key(self, iarr, level=None):
-        if level is None:
-            level = self.level
-        i1, i2, i3 = iarr
-        rep1 = np.binary_repr(i1, width=self.level)
-        rep2 = np.binary_repr(i2, width=self.level)
-        rep3 = np.binary_repr(i3, width=self.level)
-        inter = np.zeros(self.level*3, dtype='c')
-        inter[self.dim_slices[0]] = rep1
-        inter[self.dim_slices[1]] = rep2
-        inter[self.dim_slices[2]] = rep3
-        return int(inter.tostring(), 2)
-
-    def get_key_ijk(self, i1, i2, i3, level=None):
-        return self.get_key(np.array([i1, i2, i3]), level=level)
-
-    def get_slice_key(self, ind, dim='r'):
-        slb = np.binary_repr(ind, width=self.level)
-        expanded = np.array([0]*self.level*3, dtype='c')
-        expanded[self.dim_slices[dim]] = slb
-        return int(expanded.tostring(), 2)
-
-    def get_slice_chunks(self, slice_dim, slice_index):
-        sl_key = self.get_slice_key(slice_index, dim=slice_dim)
-        mask = (self.indexdata['index'] & ~self.masks[slice_dim]) == sl_key
-        offsets = self.indexdata['base'][mask]
-        lengths = self.indexdata['len'][mask]
-        return mask, offsets, lengths
-
-    def get_ibbox_slow(self, ileft, iright):
-        """
-        Given left and right indicies, return a mask and
-        set of offsets+lengths into the sdf data.
-        """
-        mask = np.zeros(self.indexdata['index'].shape, dtype='bool')
-        ileft = np.array(ileft)
-        iright = np.array(iright)
-        for i in range(3):
-            left_key = self.get_slice_key(ileft[i], dim=i)
-            right_key= self.get_slice_key(iright[i], dim=i)
-            dim_inds = (self.indexdata['index'] & ~self.masks[i])
-            mask *= (dim_inds >= left_key) * (dim_inds <= right_key)
-            del dim_inds
-
-        offsets = self.indexdata['base'][mask]
-        lengths = self.indexdata['len'][mask]
-        return mask, offsets, lengths
-
-    def get_ibbox(self, ileft, iright):
-        """
-        Given left and right indicies, return a mask and
-        set of offsets+lengths into the sdf data.
-        """
-        mask = np.zeros(self.indexdata['index'].shape, dtype='bool')
-
-        print 'Getting data from ileft to iright:',  ileft, iright
-
-        X, Y, Z = np.mgrid[ileft[0]:iright[0]+1,
-                           ileft[1]:iright[1]+1,
-                           ileft[2]:iright[2]+1]
-
-        X = X.ravel()
-        Y = Y.ravel()
-        Z = Z.ravel()
-        # Correct For periodicity
-        X[X < self.domain_buffer] += self.domain_active_dims
-        X[X >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
-        Y[Y < self.domain_buffer] += self.domain_active_dims
-        Y[Y >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
-        Z[Z < self.domain_buffer] += self.domain_active_dims
-        Z[Z >= self.domain_dims -  self.domain_buffer] -= self.domain_active_dims
-
-        print 'periodic:',  X.min(), X.max(), Y.min(), Y.max(), Z.min(), Z.max()
-
-        indices = np.array([self.get_key_ijk(x, y, z) for x, y, z in zip(X, Y, Z)])
-        indices = indices[indices < self.indexdata['index'].shape[0]]
-        return indices
-
-    def get_bbox(self, left, right):
-        """
-        Given left and right indicies, return a mask and
-        set of offsets+lengths into the sdf data.
-        """
-        ileft = np.floor((left - self.rmin) / self.domain_width *  self.domain_dims)
-        iright = np.floor((right - self.rmin) / self.domain_width * self.domain_dims)
-
-        return self.get_ibbox(ileft, iright)
-
-    def get_data(self, chunk, fields):
-        data = {}
-        for field in fields:
-            data[field] = self.sdfdata[field][chunk]
-        return data
-
-    def iter_data(self, inds, fields):
-        num_inds = len(inds)
-        num_reads = 0
-        print 'Reading %i chunks' % num_inds
-        i = 0
-        while (i < num_inds):
-            ind = inds[i]
-            base = self.indexdata['base'][ind]
-            length = self.indexdata['len'][ind]
-            # Concatenate aligned reads
-            nexti = i+1
-            combined = 0
-            while nexti < len(inds):
-                nextind = inds[nexti]
-                #        print 'b: %i l: %i end: %i  next: %i' % ( base, length, base + length, self.indexdata['base'][nextind] )
-                if base + length == self.indexdata['base'][nextind]:
-                    length += self.indexdata['len'][nextind]
-                    i += 1
-                    nexti += 1
-                    combined += 1
-                else:
-                    break
-
-            chunk = slice(base, base+length)
-            print 'Reading chunk %i of length %i after catting %i' % (i, length, combined)
-            num_reads += 1
-            data = self.get_data(chunk, fields)
-            yield data
-            del data
-            i += 1
-        print 'Read %i chunks, batched into %i reads' % (num_inds, num_reads)
-
-    def iter_bbox_data(self, left, right, fields):
-        print 'Loading region from ', left, 'to', right
-        inds = self.get_bbox(left, right)
-        return self.iter_data(inds, fields)
-
-    def iter_ibbox_data(self, left, right, fields):
-        print 'Loading region from ', left, 'to', right
-        inds = self.get_ibbox(left, right)
-        return self.iter_data(inds, fields)
-
-    def get_contiguous_chunk(self, left_key, right_key, fields):
-        max_key = self.indexdata['index'][-1]
-        if left_key > max_key:
-            raise RuntimeError("Left key is too large. Key: %i Max Key: %i" % (left_key, max_key))
-        base = self.indexdata['base'][left_key]
-        right_key = min(right_key, self.indexdata['index'][-1])
-        length = self.indexdata['base'][right_key] + \
-            self.indexdata['len'][right_key] - base
-        print 'Getting contiguous chunk of size %i starting at %i' % (length, base)
-        return self.get_data(slice(base, base + length), fields)
-
-    def iter_slice_data(self, slice_dim, slice_index, fields):
-        mask, offsets, lengths = self.get_slice_chunks(slice_dim, slice_index)
-        for off, l in zip(offsets, lengths):
-            data = {}
-            chunk = slice(off, off+l)
-            for field in fields:
-                data[field] = self.sdfdata[field][chunk]
-            yield data
-            del data
-
-    def get_key_bounds(self, level, cell_iarr):
-        """
-        Get index keys for index file supplied.
-
-        level: int
-            Requested level
-        cell_iarr: array-like, length 3
-            Requested cell from given level.
-
-        Returns:
-            lmax_lk, lmax_rk
-        """
-        shift = self.level-level
-        level_buff = 0
-        level_lk = self.get_key(cell_iarr + level_buff)
-        level_rk = self.get_key(cell_iarr + level_buff) + 1
-        lmax_lk = (level_lk << shift*3)
-        lmax_rk = (((level_rk) << shift*3) -1)
-        #print "Level ", level, np.binary_repr(level_lk, width=self.level*3), np.binary_repr(level_rk, width=self.level*3)
-        #print "Level ", self.level, np.binary_repr(lmax_lk, width=self.level*3), np.binary_repr(lmax_rk, width=self.level*3)
-        return lmax_lk, lmax_rk
-
-    def get_cell_data(self, level, cell_iarr, fields):
-        """
-        Get data from requested cell
-
-        This uses the raw cell index, and doesn't account for periodicity or
-        an expanded domain (non-power of 2).
-
-        level: int
-            Requested level
-        cell_iarr: array-like, length 3
-            Requested cell from given level.         fields: list
-            Requested fields
-
-        Returns:
-            cell_data: dict
-                Dictionary of field_name, field_data
-        """
-        cell_iarr = np.array(cell_iarr)
-        lk, rk =self.get_key_bounds(level, cell_iarr)
-        return self.get_contiguous_chunk(lk, rk, fields)

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -649,6 +649,9 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        if args[0].startswith("http://"):
+        if not args[0].startswith("http://"):
+            return False
+        hreq = requests.get(args[0] + "/yt_index.json")
+        if hreq.status_code == 200:
             return True
         return False

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -54,6 +54,7 @@
     )
 
     known_particle_fields = (
+        ("particle_position", ("code_length", [], None)),
         ("particle_position_x", ("code_length", [], None)),
         ("particle_position_y", ("code_length", [], None)),
         ("particle_position_z", ("code_length", [], None)),

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -96,6 +96,7 @@
 
 class StreamParticleIOHandler(BaseIOHandler):
 
+    _vector_fields = ("particle_position", "particle_velocity")
     _dataset_type = "stream_particles"
 
     def __init__(self, pf):
@@ -116,6 +117,19 @@
                               f[ptype, "particle_position_y"],
                               f[ptype, "particle_position_z"])
             
+    def _count_particles_chunks(self, chunks, ptf, selector):
+        # This is allowed to over-estimate.  We probably *will*, too, because
+        # we're going to count *all* of the particles, not just individual
+        # types.
+        count = 0
+        psize = {}
+        for chunk in chunks:
+            for obj in chunk.objs:
+                count += selector.count_octs(obj.oct_handler, obj.domain_id)
+        for ptype in ptf:
+            psize[ptype] = self.pf.n_ref * count / float(obj.nz)
+        return psize
+
     def _read_particle_fields(self, chunks, ptf, selector):
         data_files = set([])
         for chunk in chunks:
@@ -124,8 +138,13 @@
         for data_file in sorted(data_files):
             f = self.fields[data_file.filename]
             for ptype, field_list in sorted(ptf.items()):
-                x, y, z = (f[ptype, "particle_position_%s" % ax]
-                           for ax in 'xyz')
+                if (ptype, "particle_position") in f:
+                    x = f[ptype, "particle_position"][:,0]
+                    y = f[ptype, "particle_position"][:,1]
+                    z = f[ptype, "particle_position"][:,2]
+                else:
+                    x, y, z = (f[ptype, "particle_position_%s" % ax]
+                               for ax in 'xyz')
                 mask = selector.select_points(x, y, z, 0.0)
                 if mask is None: continue
                 for field in field_list:

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -18,6 +18,10 @@
 from oct_visitors cimport Oct, OctVisitorData, \
     oct_visitor_function
 
+ctypedef fused anyfloat:
+    np.float32_t
+    np.float64_t
+
 cdef class SelectorObject:
     cdef public np.int32_t min_level
     cdef public np.int32_t max_level
@@ -53,3 +57,13 @@
 cdef class OctreeSubsetSelector(SelectorObject):
     cdef SelectorObject base_selector
     cdef public np.int64_t domain_id
+
+cdef inline np.float64_t _periodic_dist(np.float64_t x1, np.float64_t x2,
+                                        np.float64_t dw, bint periodic) nogil:
+    cdef np.float64_t rel = x1 - x2
+    if not periodic: return rel
+    if rel > dw * 0.5:
+        rel -= dw
+    elif rel < -dw * 0.5:
+        rel += dw
+    return rel

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -36,10 +36,6 @@
     long int lrint(double x) nogil
     double fabs(double x) nogil
 
-ctypedef fused anyfloat:
-    np.float32_t
-    np.float64_t
-
 # These routines are separated into a couple different categories:
 #
 #   * Routines for identifying intersections of an object with a bounding box
@@ -335,10 +331,10 @@
         # domain_width is already in code units, and we assume what is fed in
         # is too.
         cdef np.float64_t rel = x1 - x2
-        if self.periodicity[d] :
-            if rel > self.domain_width[d]/2.0 :
+        if self.periodicity[d]:
+            if rel > self.domain_width[d] * 0.5:
                 rel -= self.domain_width[d]
-            elif rel < -self.domain_width[d]/2.0 :
+            elif rel < -self.domain_width[d] * 0.5:
                 rel += self.domain_width[d]
         return rel
 
@@ -491,11 +487,12 @@
         cdef int i
         cdef np.float64_t pos[3]
         cdef np.ndarray[np.uint8_t, ndim=1] mask 
-        mask = np.zeros(x.shape[0], dtype='uint8')
+        mask = np.empty(x.shape[0], dtype='uint8')
         _ensure_code(x)
         _ensure_code(y)
         _ensure_code(z)
 
+
         # this is to allow selectors to optimize the point vs
         # 0-radius sphere case.  These two may have different 
         # effects for 0-volume selectors, however (collision 
@@ -517,7 +514,7 @@
                     mask[i] = self.select_sphere(pos, radius)
                     count += mask[i]
         if count == 0: return None
-        return mask.astype("bool")
+        return mask.view("bool")
 
     def __hash__(self):
         return hash(self._hash_vals() + self._base_hash())
@@ -595,12 +592,27 @@
     cdef np.float64_t radius
     cdef np.float64_t radius2
     cdef np.float64_t center[3]
+    cdef np.float64_t bbox[3][2]
+    cdef bint check_box[3]
 
     def __init__(self, dobj):
         for i in range(3):
             self.center[i] = _ensure_code(dobj.center[i])
         self.radius = _ensure_code(dobj.radius)
         self.radius2 = self.radius * self.radius
+        center = _ensure_code(dobj.center)
+        cdef np.float64_t mi = np.finfo("float64").min
+        cdef np.float64_t ma = np.finfo("float64").max
+        for i in range(3):
+            self.center[i] = center[i]
+            self.bbox[i][0] = self.center[i] - self.radius
+            self.bbox[i][1] = self.center[i] + self.radius
+            if self.bbox[i][0] < dobj.pf.domain_left_edge[i]:
+                self.check_box[i] = False
+            elif self.bbox[i][1] > dobj.pf.domain_right_edge[i]:
+                self.check_box[i] = False
+            else:
+                self.check_box[i] = True
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -620,10 +632,15 @@
         cdef int i
         cdef np.float64_t dist, dist2 = 0
         for i in range(3):
-            dist = self.difference(pos[i], self.center[i], i)
+            if self.check_box[i] and \
+              (pos[i] < self.bbox[i][0] or 
+               pos[i] > self.bbox[i][1]):
+                return 0
+            dist = _periodic_dist(pos[i], self.center[i], self.domain_width[i],
+                                  self.periodicity[i])
             dist2 += dist*dist
-        if dist2 <= self.radius2: return 1
-        return 0
+            if dist2 > self.radius2: return 0
+        return 1
    
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -649,16 +666,22 @@
             left_edge[1] <= self.center[1] <= right_edge[1] and
             left_edge[2] <= self.center[2] <= right_edge[2]):
             return 1
+        for i in range(3):
+            if not self.check_box[i]: continue
+            if right_edge[i] < self.bbox[i][0] or \
+               left_edge[i] > self.bbox[i][1]:
+                return 0
         # http://www.gamedev.net/topic/335465-is-this-the-simplest-sphere-aabb-collision-test/
         dist = 0
         for i in range(3):
+            # Early terminate
             box_center = (right_edge[i] + left_edge[i])/2.0
             relcenter = self.difference(box_center, self.center[i], i)
             edge = right_edge[i] - left_edge[i]
             closest = relcenter - fclip(relcenter, -edge/2.0, edge/2.0)
             dist += closest*closest
-        if dist <= self.radius2: return 1
-        return 0
+            if dist > self.radius2: return 0
+        return 1
 
     def _hash_vals(self):
         return (self.radius, self.radius2,

diff -r 2bccbc8de4e2b93aed82fff99f0dafd9308e1691 -r 990c27c2dd3ca0be052231028ae4aa02e07ad380 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -116,12 +116,17 @@
     def _read_chunk_data(self, chunk, fields):
         return {}
 
+    def _count_particles_chunks(self, chunks, ptf, selector):
+        psize = defaultdict(lambda: 0) # COUNT PTYPES ON DISK
+        for ptype, (x, y, z) in self._read_particle_coords(chunks, ptf):
+            psize[ptype] += selector.count_points(x, y, z, 0.0)
+        return dict(psize.items())
+
     def _read_particle_selection(self, chunks, selector, fields):
         rv = {}
         ind = {}
         # We first need a set of masks for each particle type
         ptf = defaultdict(list)        # ON-DISK TO READ
-        psize = defaultdict(lambda: 0) # COUNT PTYPES ON DISK
         fsize = defaultdict(lambda: 0) # COUNT RV
         field_maps = defaultdict(list) # ptypes -> fields
         chunks = list(chunks)
@@ -139,17 +144,10 @@
                 ptf[ftype].append(fname)
                 field_maps[field].append(field)
         # We can't hash chunks, but otherwise this is a neat idea.
-        if 0 and hash(selector) == self._last_selector_id and \
-           all(ptype in self._last_selector_counts for ptype in ptf):
-            psize.update(self._last_selector_counts)
-        else:
-            # Now we have our full listing.
-            # Here, ptype_map means which particles contribute to a given type.
-            # And ptf is the actual fields from disk to read.
-            for ptype, (x, y, z) in self._read_particle_coords(chunks, ptf):
-                psize[ptype] += selector.count_points(x, y, z, 0.0)
-            self._last_selector_counts = dict(**psize)
-            self._last_selector_id = hash(selector)
+        # Now we have our full listing.
+        # Here, ptype_map means which particles contribute to a given type.
+        # And ptf is the actual fields from disk to read.
+        psize = self._count_particles_chunks(chunks, ptf, selector)
         # Now we allocate
         # ptf, remember, is our mapping of what we want to read
         #for ptype in ptf:
@@ -175,6 +173,10 @@
                 #    field_f, my_ind, my_ind+vals.shape[0], field_r)
                 rv[field_f][my_ind:my_ind + vals.shape[0],...] = vals
                 ind[field_f] += vals.shape[0]
+        # Now we need to truncate all our fields, since we allow for
+        # over-estimating.
+        for field_f in ind:
+            rv[field_f] = rv[field_f][:ind[field_f]]
         return rv
 
 class IOHandlerExtracted(BaseIOHandler):

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list