[yt-svn] commit/yt: 6 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Mar 2 09:31:56 PST 2016


6 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/dc1707812808/
Changeset:   dc1707812808
Branch:      yt
User:        brittonsmith
Date:        2016-02-15 13:39:25+00:00
Summary:     Making sure to continue searching for member_id fields when they aren't found in the first file.
Affected #:  2 files

diff -r 816123b32fa4a71c537683bef8de93f795a57796 -r dc17078128087e433c44c20665a86b11c1424ffe yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -334,9 +334,10 @@
           dict([(ptype, False)
                 for ptype, pnum in self.particle_count.items()
                 if pnum > 0])
+        has_ids = False
 
         for data_file in self.data_files:
-            fl, sl, _units = self.io._identify_fields(data_file)
+            fl, sl, idl, _units = self.io._identify_fields(data_file)
             units.update(_units)
             field_list.extend([f for f in fl
                                if f not in field_list])
@@ -344,7 +345,8 @@
                                       if f not in scalar_field_list])
             for ptype in found_fields:
                 found_fields[ptype] |= data_file.total_particles[ptype]
-            if all(found_fields.values()): break
+            has_ids |= len(idl) > 0
+            if all(found_fields.values()) and has_ids: break
 
         self.field_list = field_list
         self.scalar_field_list = scalar_field_list

diff -r 816123b32fa4a71c537683bef8de93f795a57796 -r dc17078128087e433c44c20665a86b11c1424ffe yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -298,11 +298,10 @@
     def _identify_fields(self, data_file):
         fields = []
         scalar_fields = []
+        id_fields = {}
         pcount = data_file.total_particles
-        if sum(pcount.values()) == 0: return fields, scalar_fields, {}
         with h5py.File(data_file.filename, "r") as f:
             for ptype in self.ds.particle_types_raw:
-                if data_file.total_particles[ptype] == 0: continue
                 fields.append((ptype, "particle_identifier"))
                 scalar_fields.append((ptype, "particle_identifier"))
                 my_fields, my_offset_fields = \
@@ -311,8 +310,9 @@
                 scalar_fields.extend(my_fields)
 
                 if "IDs" not in f: continue
-                fields.extend([(ptype, field) for field in f["IDs"]])
-        return fields, scalar_fields, {}
+                id_fields = [(ptype, field) for field in f["IDs"]]
+                fields.extend(id_fields)
+        return fields, scalar_fields, id_fields, {}
 
 def subfind_field_list(fh, ptype, pcount):
     fields = []


https://bitbucket.org/yt_analysis/yt/commits/c1bb2363cecb/
Changeset:   c1bb2363cecb
Branch:      yt
User:        brittonsmith
Date:        2016-02-15 14:23:58+00:00
Summary:     Skip file read if no particles.
Affected #:  1 file

diff -r dc17078128087e433c44c20665a86b11c1424ffe -r c1bb2363cecb093762dfb93b29f03218dc1b7a09 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -258,6 +258,7 @@
             start_index = dobj.field_data_start[i]
             end_index = dobj.field_data_end[i]
             pcount = end_index - start_index
+            if pcount == 0: continue
             field_end = field_start + end_index - start_index
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(member_fields.items()):


https://bitbucket.org/yt_analysis/yt/commits/9478d43315ad/
Changeset:   9478d43315ad
Branch:      yt
User:        brittonsmith
Date:        2016-02-25 12:11:49+00:00
Summary:     Adding tests.
Affected #:  1 file

diff -r c1bb2363cecb093762dfb93b29f03218dc1b7a09 -r 9478d43315adc4160fbf7bc00793f8db67778b32 yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -86,3 +86,29 @@
         # as the array of all masses.  This will test getting
         # scalar fields for halos correctly.
         yield assert_array_equal, ad[ptype, "particle_mass"], mass
+
+# fof/subhalo catalog with no member ids in first file
+g56 = "gadget_halos/data/groups_056/fof_subhalo_tab_056.0.hdf5"
+
+# This dataset has halos in one file and ids in another,
+# which can confuse the field detection.
+ at requires_file(g56)
+def test_unbalanced_dataset():
+    ds = data_dir_load(g56)
+    halo = ds.halo("Group", 0)
+    my_ids = halo["member_ids"]
+    assert True
+
+# fof/subhalo catalog with no member ids in first file
+g76 = "gadget_halos/data/groups_076/fof_subhalo_tab_076.0.hdf5"
+
+# This dataset has one halo with particles distributed over 3 files
+# with the 2nd file being empty.
+ at requires_file(g76)
+def test_3file_halo():
+    ds = data_dir_load(g76)
+    # this halo's particles are distributed over 3 files with the
+    # middle file being empty
+    halo = ds.halo("Group", 6)
+    my_ids = halo["member_ids"]
+    assert True


https://bitbucket.org/yt_analysis/yt/commits/9bf7d05d35f4/
Changeset:   9bf7d05d35f4
Branch:      yt
User:        brittonsmith
Date:        2016-02-25 12:12:49+00:00
Summary:     Merging.
Affected #:  34 files

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -30,6 +30,7 @@
 yt/utilities/lib/amr_kdtools.c
 yt/utilities/lib/basic_octree.c
 yt/utilities/lib/bitarray.c
+yt/utilities/lib/bounding_volume_hierarchy.c
 yt/utilities/lib/contour_finding.c
 yt/utilities/lib/depth_first_octree.c
 yt/utilities/lib/element_mappings.c

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt
+include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt setupext.py
 include yt/visualization/mapserver/html/map_index.html
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 clean.sh
--- a/clean.sh
+++ b/clean.sh
@@ -1,4 +1,1 @@
-find . -name "*.so" -exec rm -v {} \;
-find . -name "*.pyc" -exec rm -v {} \;
-find . -name "__config__.py" -exec rm -v {} \;
-rm -rvf build dist
+hg --config extensions.purge= purge --all yt

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -453,12 +453,19 @@
    @requires_ds(my_ds)
    def test_my_ds():
        ds = data_dir_load(my_ds)
+
        def create_image(filename_prefix):
            plt.plot([1, 2], [1, 2])
            plt.savefig(filename_prefix)
        test = GenericImageTest(ds, create_image, 12)
+
+       # this ensures the test has a unique key in the
+       # answer test storage file
+       test.prefix = "my_unique_name"
+
        # this ensures a nice test name in nose's output
        test_my_ds.__description__ = test.description
+
        yield test_my_ds
 
 Another good example of an image comparison test is the

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -434,8 +434,8 @@
 positions with time. For analysis and visualization, it is often useful to turn 
 these displacements on or off, and to be able to scale them arbitrarily to 
 emphasize certain features of the solution. To allow this, if ``yt`` detects 
-displacement fields in an Exodus II dataset (using the convention that they will
- be named ``disp_x``, ``disp_y``, etc...), it will add optionally add these to 
+displacement fields in an Exodus II dataset (using the convention that they will 
+be named ``disp_x``, ``disp_y``, etc...), it will optionally add these to 
 the mesh vertex positions for the purposes of visualization. Displacement fields 
 can be controlled when a dataset is loaded by passing in an optional dictionary 
 to the ``yt.load`` command. This feature is turned off by default, meaning that 

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 setup.py
--- a/setup.py
+++ b/setup.py
@@ -122,6 +122,11 @@
     Extension("yt.utilities.lib.bitarray",
               ["yt/utilities/lib/bitarray.pyx"],
               libraries=["m"], depends=["yt/utilities/lib/bitarray.pxd"]),
+    Extension("yt.utilities.lib.bounding_volume_hierarchy",
+              ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
+              extra_compile_args=omp_args,
+              extra_link_args=omp_args,
+              libraries=["m"], depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
     Extension("yt.utilities.lib.contour_finding",
               ["yt/utilities/lib/contour_finding.pyx"],
               include_dirs=["yt/utilities/lib/",

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 yt/analysis_modules/level_sets/clump_validators.py
--- a/yt/analysis_modules/level_sets/clump_validators.py
+++ b/yt/analysis_modules/level_sets/clump_validators.py
@@ -13,7 +13,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.utilities.data_point_utilities import FindBindingEnergy
+from yt.utilities.lib.misc_utilities import \
+    gravitational_binding_energy
 from yt.utilities.operator_registry import \
     OperatorRegistry
 from yt.utilities.physical_constants import \
@@ -64,11 +65,12 @@
              (bulk_velocity[2] - clump["all", "particle_velocity_z"])**2)).sum()
 
     potential = clump.data.ds.quan(G *
-        FindBindingEnergy(clump["gas", "cell_mass"].in_cgs(),
-                          clump["index", "x"].in_cgs(),
-                          clump["index", "y"].in_cgs(),
-                          clump["index", "z"].in_cgs(),
-                          truncate, (kinetic / G).in_cgs()),
+        gravitational_binding_energy(
+            clump["gas", "cell_mass"].in_cgs(),
+            clump["index", "x"].in_cgs(),
+            clump["index", "y"].in_cgs(),
+            clump["index", "z"].in_cgs(),
+            truncate, (kinetic / G).in_cgs()),
         kinetic.in_cgs().units)
     
     if truncate and potential >= kinetic:
@@ -76,7 +78,7 @@
 
     if use_particles:
         potential += clump.data.ds.quan(G *
-            FindBindingEnergy(
+            gravitational_binding_energy(
                 clump["all", "particle_mass"].in_cgs(),
                 clump["all", "particle_position_x"].in_cgs(),
                 clump["all", "particle_position_y"].in_cgs(),

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -702,11 +702,11 @@
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
         # We allocate number of zones, not number of octs
-        op = cls(self.ActiveDimensions.prod(), kernel_name)
+        op = cls(self.ActiveDimensions, kernel_name)
         op.initialize()
         op.process_grid(self, positions, fields)
         vals = op.finalize()
-        return vals.reshape(self.ActiveDimensions, order="C")
+        return vals.copy(order="C")
 
     def write_to_gdf(self, gdf_path, fields, nprocs=1, field_units=None,
                      **kwargs):

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -327,12 +327,13 @@
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
         # We allocate number of zones, not number of octs
-        op = cls(self.ActiveDimensions.prod(), kernel_name)
+        # Everything inside this is fortran ordered, so we reverse it here.
+        op = cls(tuple(self.ActiveDimensions)[::-1], kernel_name)
         op.initialize()
         op.process_grid(self, positions, fields)
         vals = op.finalize()
         if vals is None: return
-        return vals.reshape(self.ActiveDimensions, order="C")
+        return vals.transpose() # Fortran-ordered, so transpose.
 
     def select_blocks(self, selector):
         mask = self._get_selector_mask(selector)

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -69,7 +69,11 @@
     _con_args = ('p',)
     def __init__(self, p, ds=None, field_parameters=None, data_source=None):
         super(YTPoint, self).__init__(ds, field_parameters, data_source)
-        self.p = p
+        if isinstance(p, YTArray):
+            # we pass p through ds.arr to ensure code units are attached
+            self.p = self.ds.arr(p)
+        else:
+            self.p = self.ds.arr(p, 'code_length')
 
 class YTOrthoRay(YTSelectionContainer1D):
     """

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -435,7 +435,9 @@
         if "all" not in self.particle_types:
             mylog.debug("Creating Particle Union 'all'")
             pu = ParticleUnion("all", list(self.particle_types_raw))
-            self.add_particle_union(pu)
+            nfields = self.add_particle_union(pu)
+            if nfields == 0:
+                mylog.debug("zero common fields: skipping particle union 'all'")
         self.field_info.setup_extra_union_fields()
         mylog.debug("Loading field plugins.")
         self.field_info.load_all_plugins()
@@ -487,9 +489,17 @@
     def add_particle_union(self, union):
         # No string lookups here, we need an actual union.
         f = self.particle_fields_by_type
-        fields = set_intersection([f[s] for s in union
-                                   if s in self.particle_types_raw
-                                   and len(f[s]) > 0])
+
+        # find fields common to all particle types in the union
+        fields = set_intersection(
+            [f[s] for s in union if s in self.particle_types_raw]
+        )
+
+        if len(fields) == 0:
+            # don't create this union if no fields are common to all
+            # particle types
+            return len(fields)
+
         for field in fields:
             units = set([])
             for s in union:
@@ -508,6 +518,7 @@
         # ...if we can't find them, we set them up as defaults.
         new_fields = self._setup_particle_types([union.name])
         self.field_info.find_dependencies(new_fields)
+        return len(new_fields)
 
     def add_particle_filter(self, filter):
         # This requires an index
@@ -988,7 +999,8 @@
         deps, _ = self.field_info.check_derived_fields([name])
         self.field_dependencies.update(deps)
 
-    def add_deposited_particle_field(self, deposit_field, method, kernel_name='cubic'):
+    def add_deposited_particle_field(self, deposit_field, method, kernel_name='cubic',
+                                     weight_field='particle_mass'):
         """Add a new deposited particle field
 
         Creates a new deposited field based on the particle *deposit_field*.
@@ -1003,13 +1015,15 @@
         method : string
            This is the "method name" which will be looked up in the
            `particle_deposit` namespace as `methodname_deposit`.  Current
-           methods include `count`, `simple_smooth`, `sum`, `std`, `cic`,
-           `weighted_mean`, `mesh_id`, and `nearest`.
+           methods include `simple_smooth`, `sum`, `std`, `cic`, `weighted_mean`,
+           `mesh_id`, and `nearest`.
         kernel_name : string, default 'cubic'
            This is the name of the smoothing kernel to use. It is only used for
            the `simple_smooth` method and is otherwise ignored. Current
            supported kernel names include `cubic`, `quartic`, `quintic`,
            `wendland2`, `wendland4`, and `wendland6`.
+        weight_field : string, default 'particle_mass'
+           Weighting field name for deposition method `weighted_mean`.
 
         Returns
         -------
@@ -1021,38 +1035,45 @@
             ptype, deposit_field = deposit_field[0], deposit_field[1]
         else:
             raise RuntimeError
+
         units = self.field_info[ptype, deposit_field].units
+        take_log = self.field_info[ptype, deposit_field].take_log
+        name_map = {"sum": "sum", "std":"std", "cic": "cic", "weighted_mean": "avg",
+                    "nearest": "nn", "simple_smooth": "ss", "count": "count"}
+        field_name = "%s_" + name_map[method] + "_%s"
+        field_name = field_name % (ptype, deposit_field.replace('particle_', ''))
+
+        if method == "count":
+            field_name = "%s_count" % ptype
+            if ("deposit", field_name) in self.field_info:
+                mylog.warning("The deposited field %s already exists" % field_name)
+                return ("deposit", field_name)
+            else:
+                units = "dimensionless"
+                take_log = False
 
         def _deposit_field(field, data):
             """
-            Create a grid field for particle wuantities weighted by particle
-            mass, using cloud-in-cell deposition.
+            Create a grid field for particle quantities using given method.
             """
             pos = data[ptype, "particle_position"]
-            # get back into density
-            if method != 'count':
-                pden = data[ptype, "particle_mass"]
-                top = data.deposit(pos, [data[(ptype, deposit_field)]*pden],
-                                   method=method, kernel_name=kernel_name)
-                bottom = data.deposit(pos, [pden], method=method,
-                                      kernel_name=kernel_name)
-                top[bottom == 0] = 0.0
-                bnz = bottom.nonzero()
-                top[bnz] /= bottom[bnz]
-                d = data.ds.arr(top, input_units=units)
+            if method == 'weighted_mean':
+                d = data.ds.arr(data.deposit(pos, [data[ptype, deposit_field],
+                                                   data[ptype, weight_field]],
+                                             method=method, kernel_name=kernel_name),
+                                             input_units=units)
+                d[np.isnan(d)] = 0.0
             else:
                 d = data.ds.arr(data.deposit(pos, [data[ptype, deposit_field]],
-                                             method=method,
-                                             kernel_name=kernel_name))
+                                             method=method, kernel_name=kernel_name),
+                                             input_units=units)
             return d
-        name_map = {"cic": "cic", "sum": "nn", "count": "count"}
-        field_name = "%s_" + name_map[method] + "_%s"
-        field_name = field_name % (ptype, deposit_field.replace('particle_', ''))
+
         self.add_field(
             ("deposit", field_name),
             function=_deposit_field,
             units=units,
-            take_log=False,
+            take_log=take_log,
             validators=[ValidateSpatial()])
         return ("deposit", field_name)
 

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 yt/data_objects/tests/test_points.py
--- a/yt/data_objects/tests/test_points.py
+++ b/yt/data_objects/tests/test_points.py
@@ -10,6 +10,17 @@
     from yt.config import ytcfg
     ytcfg["yt","__withintesting"] = "True"
 
+def test_point_creation():
+    ds = fake_random_ds(16)
+    p1 = ds.point(ds.domain_center)
+    p2 = ds.point([0.5, 0.5, 0.5])
+    p3 = ds.point([0.5, 0.5, 0.5]*yt.units.cm)
+
+    # ensure all three points are really at the same position
+    for fname in 'xyz':
+        assert_equal(p1[fname], p2[fname])
+        assert_equal(p1[fname], p3[fname])
+
 def test_domain_point():
     nparticles = 3
     ds = fake_random_ds(16, particles=nparticles)

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -80,7 +80,8 @@
     def __init__(self, name, function, units=None,
                  take_log=True, validators=None,
                  particle_type=False, vector_field=False, display_field=True,
-                 not_in_all=False, display_name=None, output_units = None):
+                 not_in_all=False, display_name=None, output_units=None,
+                 ds=None):
         self.name = name
         self.take_log = take_log
         self.display_name = display_name
@@ -88,6 +89,7 @@
         self.display_field = display_field
         self.particle_type = particle_type
         self.vector_field = vector_field
+        self.ds = ds
 
         self._function = function
 
@@ -128,11 +130,11 @@
         return dd
 
     def get_units(self):
-        u = Unit(self.units)
+        u = Unit(self.units, registry=self.ds.unit_registry)
         return u.latex_representation()
 
     def get_projected_units(self):
-        u = Unit(self.units)*Unit('cm')
+        u = Unit(self.units, registry=self.ds.unit_registry)*Unit('cm')
         return u.latex_representation()
 
     def check_available(self, data):
@@ -206,7 +208,7 @@
         if projected:
             raise NotImplementedError
         else:
-            units = Unit(self.units)
+            units = Unit(self.units, registry=self.ds.unit_registry)
         # Add unit label
         if not units.is_dimensionless:
             data_label += r"\ \ (%s)" % (units.latex_representation())

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -240,6 +240,7 @@
         # the derived field and exit. If used as a decorator, function will
         # be None. In that case, we return a function that will be applied
         # to the function that the decorator is applied to.
+        kwargs.setdefault('ds', self.ds)
         if function is None:
             def create_function(f):
                 self[name] = DerivedField(name, f, **kwargs)
@@ -274,6 +275,7 @@
         return loaded, unavailable
 
     def add_output_field(self, name, **kwargs):
+        kwargs.setdefault('ds', self.ds)
         self[name] = DerivedField(name, NullFunc, **kwargs)
 
     def alias(self, alias_name, original_name, units = None):

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -764,7 +764,7 @@
 
 def add_volume_weighted_smoothed_field(ptype, coord_name, mass_name,
         smoothing_length_name, density_name, smoothed_field, registry,
-        nneighbors = None, kernel_name = 'cubic'):
+        nneighbors = 64, kernel_name = 'cubic'):
     if kernel_name == 'cubic':
         field_name = ("deposit", "%s_smoothed_%s" % (ptype, smoothed_field))
     else:
@@ -785,17 +785,15 @@
         else:
             hsml = data[ptype, smoothing_length_name]
             hsml.convert_to_units("code_length")
-        kwargs = {}
-        if nneighbors:
-            kwargs['nneighbors'] = nneighbors
         # This is for applying cutoffs, similar to in the SPLASH paper.
         smooth_cutoff = data["index","cell_volume"]**(1./3)
         smooth_cutoff.convert_to_units("code_length")
         # volume_weighted smooth operations return lists of length 1.
         rv = data.smooth(pos, [mass, hsml, dens, quan],
+                         index_fields=[smooth_cutoff],
                          method="volume_weighted",
                          create_octree=True,
-                         index_fields=[smooth_cutoff],
+                         nneighbors=nneighbors,
                          kernel_name=kernel_name)[0]
         rv[np.isnan(rv)] = 0.0
         # Now some quick unit conversions.

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -33,7 +33,7 @@
         fields.append(("gas", fname))
         units.append(code_units)
 
-    base_ds = fake_random_ds(4, fields=fields, units=units, particles=10)
+    base_ds = fake_random_ds(4, fields=fields, units=units, particles=20)
 
     base_ds.index
     base_ds.cosmological_simulation = 1
@@ -195,12 +195,28 @@
             yield TestFieldAccess(field, nproc)
 
 def test_add_deposited_particle_field():
+    # NOT tested: "std", "mesh_id", "nearest" and "simple_smooth"
     global base_ds
-    fn = base_ds.add_deposited_particle_field(('io', 'particle_ones'), 'count')
-    assert_equal(fn, ('deposit', 'io_count_ones'))
     ad = base_ds.all_data()
+
+    # Test "count", "sum" and "cic" method
+    for method in ["count", "sum", "cic"]:
+        fn = base_ds.add_deposited_particle_field(('io', 'particle_mass'), method)
+        expected_fn = 'io_%s' if method == "count" else 'io_%s_mass'
+        assert_equal(fn, ('deposit', expected_fn % method))
+        ret = ad[fn]
+        if method == "count":
+            assert_equal(ret.sum(), ad['particle_ones'].sum())
+        else:
+            assert_almost_equal(ret.sum(), ad['particle_mass'].sum())
+
+    # Test "weighted_mean" method
+    fn = base_ds.add_deposited_particle_field(('io', 'particle_ones'), 'weighted_mean',
+                                              weight_field='particle_ones')
+    assert_equal(fn, ('deposit', 'io_avg_ones'))
     ret = ad[fn]
-    assert_equal(ret.sum(), ad['particle_ones'].sum())
+    # The sum should equal the number of cells that have particles
+    assert_equal(ret.sum(), np.count_nonzero(ad[("deposit", "io_count")]))
 
 @requires_file('GadgetDiskGalaxy/snapshot_200.hdf5')
 def test_add_smoothed_particle_field():

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -15,6 +15,7 @@
 from libc.stdlib cimport malloc, free
 from libc.string cimport memcpy
 import data_structures
+from yt.utilities.lib.misc_utilities import OnceIndirect
 
 cdef extern from "platform_dep.h":
     void *alloca(int)
@@ -1554,6 +1555,9 @@
         if fields is None:
             fields = []
         nf = len(fields)
+        cdef np.float64_t[::cython.view.indirect, ::1] field_pointers 
+        if nf > 0: field_pointers = OnceIndirect(fields)
+        cdef np.float64_t[:] field_vals = np.empty(nf, dtype="float64")
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector, -1)
         cdef np.ndarray[np.int64_t, ndim=1] domain_ind
@@ -1568,17 +1572,9 @@
                 continue
             domain_ind[sfc - self.sfc_start] = j
             j += 1
-        cdef np.float64_t **field_pointers
-        cdef np.float64_t *field_vals
         cdef np.float64_t pos[3]
         cdef np.float64_t left_edge[3]
         cdef int coords[3]
-        cdef np.ndarray[np.float64_t, ndim=1] tarr
-        field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
-        field_vals = <np.float64_t*>alloca(sizeof(np.float64_t) * nf)
-        for i in range(nf):
-            tarr = fields[i]
-            field_pointers[i] = <np.float64_t *> tarr.data
         cdef int dims[3]
         dims[0] = dims[1] = dims[2] = 1
         cdef np.int64_t offset, moff

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -178,8 +178,15 @@
                 raise RuntimeError("yt needs cylindrical to be 2D")
             self.level_dds[:,2] = 2*np.pi
             default_zbounds = (0.0, 2*np.pi)
+        elif self.ds.geometry == "spherical":
+            # BoxLib only supports 1D spherical, so ensure
+            # the other dimensions have the right extent.
+            self.level_dds[:,1] = np.pi
+            self.level_dds[:,2] = 2*np.pi
+            default_ybounds = (0.0, np.pi)
+            default_zbounds = (0.0, 2*np.pi)
         else:
-            raise RuntimeError("yt only supports cartesian and cylindrical coordinates.")
+            raise RuntimeError("Unknown BoxLib coordinate system.")
         if int(next(header_file)) != 0:
             raise RuntimeError("INTERNAL ERROR! This should be a zero.")
 
@@ -586,8 +593,10 @@
             self.geometry = "cartesian"
         elif coordinate_type == 1:
             self.geometry = "cylindrical"
+        elif coordinate_type == 2:
+            self.geometry = "spherical"
         else:
-            raise RuntimeError("yt does not yet support spherical geometry")
+            raise RuntimeError("Unknown BoxLib coord_type")
 
         # overrides for 1/2-dimensional data
         if self.dimensionality == 1:

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -278,12 +278,13 @@
             nap = dict((ap_type, []) for ap_type in 
                 params["Physics"]["ActiveParticles"]["ActiveParticlesEnabled"])
         else:
-            nap = {}
             if "AppendActiveParticleType" in self.parameters:
+                nap = {}
                 active_particles = True
                 for type in self.parameters.get("AppendActiveParticleType", []):
                     nap[type] = []
             else:
+                nap = None
                 active_particles = False
         for grid_id in range(self.num_grids):
             pbar.update(grid_id)

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -1,5 +1,5 @@
 """
-Enzo frontend tests using moving7
+Enzo frontend tests
 
 
 
@@ -33,6 +33,13 @@
 _fields = ("temperature", "density", "velocity_magnitude",
            "velocity_divergence")
 
+two_sphere_test = 'ActiveParticleTwoSphere/DD0011/DD0011'
+active_particle_cosmology = 'ActiveParticleCosmology/DD0046/DD0046'
+ecp = "enzo_cosmology_plus/DD0046/DD0046"
+g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+enzotiny = "enzo_tiny_cosmology/DD0046/DD0046"
+
+
 def check_color_conservation(ds):
     species_names = ds.field_info.species_names
     dd = ds.all_data()
@@ -68,7 +75,6 @@
         test_moving7.__name__ = test.description
         yield test
 
-g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
 @requires_ds(g30, big_data=True)
 def test_galaxy0030():
     ds = data_dir_load(g30)
@@ -78,7 +84,6 @@
         test_galaxy0030.__name__ = test.description
         yield test
 
-enzotiny = "enzo_tiny_cosmology/DD0046/DD0046"
 @requires_ds(enzotiny)
 def test_simulated_halo_mass_function():
     ds = data_dir_load(enzotiny)
@@ -91,7 +96,6 @@
     for fit in range(1, 6):
         yield AnalyticHaloMassFunctionTest(ds, fit)
 
-ecp = "enzo_cosmology_plus/DD0046/DD0046"
 @requires_ds(ecp, big_data=True)
 def test_ecp():
     ds = data_dir_load(ecp)
@@ -116,3 +120,33 @@
 @requires_file(enzotiny)
 def test_EnzoDataset():
     assert isinstance(data_dir_load(enzotiny), EnzoDataset)
+
+ at requires_file(two_sphere_test)
+ at requires_file(active_particle_cosmology)
+def test_active_particle_datasets():
+    two_sph = data_dir_load(two_sphere_test)
+    assert 'AccretingParticle' in two_sph.particle_types_raw
+    assert_equal(len(two_sph.particle_unions), 0)
+    pfields = ['GridID', 'creation_time', 'dynamical_time',
+               'identifier', 'level', 'metallicity', 'particle_mass']
+    pfields += ['particle_position_%s' % d for d in 'xyz']
+    pfields += ['particle_velocity_%s' % d for d in 'xyz']
+
+    acc_part_fields = \
+        [('AccretingParticle', pf) for pf in ['AccretionRate'] + pfields]
+
+    real_acc_part_fields = sorted(
+        [f for f in two_sph.field_list if f[0] == 'AccretingParticle'])
+    assert_equal(acc_part_fields, real_acc_part_fields)
+
+
+    apcos = data_dir_load(active_particle_cosmology)
+    assert_equal(['CenOstriker', 'DarkMatter'], apcos.particle_types_raw)
+    assert 'all' in apcos.particle_unions
+
+    apcos_fields = [('CenOstriker', pf) for pf in pfields]
+
+    real_apcos_fields = sorted(
+        [f for f in apcos.field_list if f[0] == 'CenOstriker'])
+
+    assert_equal(apcos_fields, real_apcos_fields)

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -138,5 +138,5 @@
     cdef public int update_values
     cdef void process(self, int dim[3], np.float64_t left_edge[3],
                       np.float64_t dds[3], np.int64_t offset,
-                      np.float64_t ppos[3], np.float64_t *fields,
+                      np.float64_t ppos[3], np.float64_t[:] fields,
                       np.int64_t domain_ind)

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -19,9 +19,22 @@
 from libc.stdlib cimport malloc, free
 cimport cython
 from libc.math cimport sqrt
+from cpython cimport PyObject
+from fp_utils cimport *
 
 from oct_container cimport Oct, OctAllocationContainer, \
     OctreeContainer, OctInfo
+from cpython.array cimport array, clone
+from cython.view cimport memoryview as cymemview
+from yt.utilities.lib.misc_utilities import OnceIndirect
+
+cdef append_axes(np.ndarray arr, int naxes):
+    if arr.ndim == naxes:
+        return arr
+    # Avoid copies
+    arr2 = arr.view()
+    arr2.shape = arr2.shape + (1,) * (naxes - arr2.ndim)
+    return arr2
 
 cdef class ParticleDepositOperation:
     def __init__(self, nvals, kernel_name):
@@ -46,15 +59,10 @@
         if fields is None:
             fields = []
         nf = len(fields)
-        cdef np.float64_t **field_pointers
-        cdef np.float64_t *field_vals
+        cdef np.float64_t[::cython.view.indirect, ::1] field_pointers 
+        if nf > 0: field_pointers = OnceIndirect(fields)
         cdef np.float64_t pos[3]
-        cdef np.ndarray[np.float64_t, ndim=1] tarr
-        field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
-        field_vals = <np.float64_t*>alloca(sizeof(np.float64_t) * nf)
-        for i in range(nf):
-            tarr = fields[i]
-            field_pointers[i] = <np.float64_t *> tarr.data
+        cdef np.float64_t[:] field_vals = np.empty(nf, dtype="float64")
         cdef int dims[3]
         dims[0] = dims[1] = dims[2] = (1 << octree.oref)
         cdef int nz = dims[0] * dims[1] * dims[2]
@@ -66,7 +74,7 @@
         for i in range(positions.shape[0]):
             # We should check if particle remains inside the Oct here
             for j in range(nf):
-                field_vals[j] = field_pointers[j][i]
+                field_vals[j] = field_pointers[j,i]
             for j in range(3):
                 pos[j] = positions[i, j]
             # This line should be modified to have it return the index into an
@@ -89,7 +97,7 @@
             if oct == NULL or (domain_id > 0 and oct.domain != domain_id):
                 continue
             # Note that this has to be our local index, not our in-file index.
-            offset = dom_ind[oct.domain_ind - moff] * nz
+            offset = dom_ind[oct.domain_ind - moff]
             if offset < 0: continue
             # Check that we found the oct ...
             self.process(dims, oi.left_edge, oi.dds,
@@ -107,16 +115,11 @@
         if fields is None:
             fields = []
         nf = len(fields)
-        cdef np.float64_t **field_pointers
-        cdef np.float64_t *field_vals
+        cdef np.float64_t[:] field_vals = np.empty(nf, dtype="float64")
+        cdef np.float64_t[::cython.view.indirect, ::1] field_pointers 
+        if nf > 0: field_pointers = OnceIndirect(fields)
         cdef np.float64_t pos[3]
-        cdef np.ndarray[np.float64_t, ndim=1] tarr
-        field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
-        field_vals = <np.float64_t*>alloca(sizeof(np.float64_t) * nf)
         cdef np.int64_t gid = getattr(gobj, "id", -1)
-        for i in range(nf):
-            tarr = fields[i]
-            field_pointers[i] = <np.float64_t *> tarr.data
         cdef np.float64_t dds[3]
         cdef np.float64_t left_edge[3]
         cdef int dims[3]
@@ -127,7 +130,7 @@
         for i in range(positions.shape[0]):
             # Now we process
             for j in range(nf):
-                field_vals[j] = field_pointers[j][i]
+                field_vals[j] = field_pointers[j,i]
             for j in range(3):
                 pos[j] = positions[i, j]
             self.process(dims, left_edge, dds, 0, pos, field_vals, gid)
@@ -137,19 +140,16 @@
 
     cdef void process(self, int dim[3], np.float64_t left_edge[3],
                       np.float64_t dds[3], np.int64_t offset,
-                      np.float64_t ppos[3], np.float64_t *fields,
+                      np.float64_t ppos[3], np.float64_t[:] fields,
                       np.int64_t domain_ind):
         raise NotImplementedError
 
 cdef class CountParticles(ParticleDepositOperation):
-    cdef np.int64_t *count # float, for ease
-    cdef public object ocount
+    cdef np.int64_t[:,:,:,:] count
     def initialize(self):
         # Create a numpy array accessible to python
-        self.ocount = np.zeros(self.nvals, dtype="int64", order='F')
-        cdef np.ndarray arr = self.ocount
-        # alias the C-view for use in cython
-        self.count = <np.int64_t*> arr.data
+        self.count = append_axes(
+            np.zeros(self.nvals, dtype="int64", order='F'), 4)
 
     @cython.cdivision(True)
     cdef void process(self, int dim[3],
@@ -157,7 +157,7 @@
                       np.float64_t dds[3],
                       np.int64_t offset, # offset into IO field
                       np.float64_t ppos[3], # this particle's position
-                      np.float64_t *fields,
+                      np.float64_t[:] fields,
                       np.int64_t domain_ind
                       ):
         # here we do our thing; this is the kernel
@@ -165,10 +165,12 @@
         cdef int i
         for i in range(3):
             ii[i] = <int>((ppos[i] - left_edge[i])/dds[i])
-        self.count[gind(ii[0], ii[1], ii[2], dim) + offset] += 1
+        self.count[ii[2], ii[1], ii[0], offset] += 1
 
     def finalize(self):
-        return self.ocount.astype('f8')
+        arr = np.asarray(self.count)
+        arr.shape = self.nvals
+        return arr.astype("float64")
 
 deposit_count = CountParticles
 
@@ -176,18 +178,14 @@
     # Note that this does nothing at the edges.  So it will give a poor
     # estimate there, and since Octrees are mostly edges, this will be a very
     # poor SPH kernel.
-    cdef np.float64_t *data
-    cdef public object odata
-    cdef np.float64_t *temp
-    cdef public object otemp
+    cdef np.float64_t[:,:,:,:] data
+    cdef np.float64_t[:,:,:,:] temp
 
     def initialize(self):
-        self.odata = np.zeros(self.nvals, dtype="float64", order='F')
-        cdef np.ndarray arr = self.odata
-        self.data = <np.float64_t*> arr.data
-        self.otemp = np.zeros(self.nvals, dtype="float64", order='F')
-        arr = self.otemp
-        self.temp = <np.float64_t*> arr.data
+        self.data = append_axes(
+            np.zeros(self.nvals, dtype="float64", order='F'), 4)
+        self.temp = append_axes(
+            np.zeros(self.nvals, dtype="float64", order='F'), 4)
 
     @cython.cdivision(True)
     cdef void process(self, int dim[3],
@@ -195,7 +193,7 @@
                       np.float64_t dds[3],
                       np.int64_t offset,
                       np.float64_t ppos[3],
-                      np.float64_t *fields,
+                      np.float64_t[:] fields,
                       np.int64_t domain_ind
                       ):
         cdef int ii[3]
@@ -227,14 +225,14 @@
                     dist = idist[0] + idist[1] + idist[2]
                     # Calculate distance in multiples of the smoothing length
                     dist = sqrt(dist) / fields[0]
-                    self.temp[gind(i,j,k,dim) + offset] = self.sph_kernel(dist)
-                    kernel_sum += self.temp[gind(i,j,k,dim) + offset]
+                    self.temp[k,j,i,offset] = self.sph_kernel(dist)
+                    kernel_sum += self.temp[k,j,i,offset]
         # Having found the kernel, deposit accordingly into gdata
         for i from ib0[0] <= i <= ib1[0]:
             for j from ib0[1] <= j <= ib1[1]:
                 for k from ib0[2] <= k <= ib1[2]:
-                    dist = self.temp[gind(i,j,k,dim) + offset] / kernel_sum
-                    self.data[gind(i,j,k,dim) + offset] += fields[1] * dist
+                    dist = self.temp[k,j,i,offset] / kernel_sum
+                    self.data[k,j,i,offset] += fields[1] * dist
 
     def finalize(self):
         return self.odata
@@ -242,12 +240,10 @@
 deposit_simple_smooth = SimpleSmooth
 
 cdef class SumParticleField(ParticleDepositOperation):
-    cdef np.float64_t *sum
-    cdef public object osum
+    cdef np.float64_t[:,:,:,:] sum
     def initialize(self):
-        self.osum = np.zeros(self.nvals, dtype="float64", order='F')
-        cdef np.ndarray arr = self.osum
-        self.sum = <np.float64_t*> arr.data
+        self.sum = append_axes(
+            np.zeros(self.nvals, dtype="float64", order='F'), 4)
 
     @cython.cdivision(True)
     cdef void process(self, int dim[3],
@@ -255,18 +251,20 @@
                       np.float64_t dds[3],
                       np.int64_t offset,
                       np.float64_t ppos[3],
-                      np.float64_t *fields,
+                      np.float64_t[:] fields,
                       np.int64_t domain_ind
                       ):
         cdef int ii[3]
         cdef int i
         for i in range(3):
             ii[i] = <int>((ppos[i] - left_edge[i]) / dds[i])
-        self.sum[gind(ii[0], ii[1], ii[2], dim) + offset] += fields[0]
+        self.sum[ii[2], ii[1], ii[0], offset] += fields[0]
         return
 
     def finalize(self):
-        return self.osum
+        sum = np.asarray(self.sum)
+        sum.shape = self.nvals
+        return sum
 
 deposit_sum = SumParticleField
 
@@ -274,28 +272,20 @@
     # Thanks to Britton and MJ Turk for the link
     # to a single-pass STD
     # http://www.cs.berkeley.edu/~mhoemmen/cs194/Tutorials/variance.pdf
-    cdef np.float64_t *mk
-    cdef np.float64_t *qk
-    cdef np.float64_t *i
-    cdef public object omk
-    cdef public object oqk
-    cdef public object oi
+    cdef np.float64_t[:,:,:,:] mk
+    cdef np.float64_t[:,:,:,:] qk
+    cdef np.float64_t[:,:,:,:] i
     def initialize(self):
         # we do this in a single pass, but need two scalar
         # per cell, M_k, and Q_k and also the number of particles
         # deposited into each one
         # the M_k term
-        self.omk= np.zeros(self.nvals, dtype="float64", order='F')
-        cdef np.ndarray omkarr= self.omk
-        self.mk= <np.float64_t*> omkarr.data
-        # the Q_k term
-        self.oqk= np.zeros(self.nvals, dtype="float64", order='F')
-        cdef np.ndarray oqkarr= self.oqk
-        self.qk= <np.float64_t*> oqkarr.data
-        # particle count
-        self.oi = np.zeros(self.nvals, dtype="float64", order='F')
-        cdef np.ndarray oiarr = self.oi
-        self.i = <np.float64_t*> oiarr.data
+        self.mk = append_axes(
+            np.zeros(self.nvals, dtype="float64", order='F'), 4)
+        self.qk = append_axes(
+            np.zeros(self.nvals, dtype="float64", order='F'), 4)
+        self.i = append_axes(
+            np.zeros(self.nvals, dtype="float64", order='F'), 4)
 
     @cython.cdivision(True)
     cdef void process(self, int dim[3],
@@ -303,7 +293,7 @@
                       np.float64_t dds[3],
                       np.int64_t offset,
                       np.float64_t ppos[3],
-                      np.float64_t *fields,
+                      np.float64_t[:] fields,
                       np.int64_t domain_ind
                       ):
         cdef int ii[3]
@@ -311,35 +301,36 @@
         cdef float k, mk, qk
         for i in range(3):
             ii[i] = <int>((ppos[i] - left_edge[i])/dds[i])
-        cell_index = gind(ii[0], ii[1], ii[2], dim) + offset
-        k = self.i[cell_index]
-        mk = self.mk[cell_index]
-        qk = self.qk[cell_index]
+        k = self.i[ii[2], ii[1], ii[0], offset]
+        mk = self.mk[ii[2], ii[1], ii[0], offset]
+        qk = self.qk[ii[2], ii[1], ii[0], offset]
         #print k, mk, qk, cell_index
         if k == 0.0:
             # Initialize cell values
-            self.mk[cell_index] = fields[0]
+            self.mk[ii[2], ii[1], ii[0], offset] = fields[0]
         else:
-            self.mk[cell_index] = mk + (fields[0] - mk) / k
-            self.qk[cell_index] = qk + (k - 1.0) * (fields[0] - mk)**2.0 / k
-        self.i[cell_index] += 1
+            self.mk[ii[2], ii[1], ii[0], offset] = mk + (fields[0] - mk) / k
+            self.qk[ii[2], ii[1], ii[0], offset] = \
+                qk + (k - 1.0) * (fields[0] - mk)**2.0 / k
+        self.i[ii[2], ii[1], ii[0], offset] += 1
 
     def finalize(self):
         # This is the standard variance
         # if we want sample variance divide by (self.oi - 1.0)
-        std2 = self.oqk / self.oi
-        std2[self.oi == 0.0] = 0.0
+        i = np.asarray(self.i)
+        std2 = np.asarray(self.qk) / i
+        std2[i == 0.0] = 0.0
+        std2.shape = self.nvals
         return np.sqrt(std2)
 
 deposit_std = StdParticleField
 
 cdef class CICDeposit(ParticleDepositOperation):
-    cdef np.float64_t *field
+    cdef np.float64_t[:,:,:,:] field
     cdef public object ofield
     def initialize(self):
-        self.ofield = np.zeros(self.nvals, dtype="float64", order='F')
-        cdef np.ndarray arr = self.ofield
-        self.field = <np.float64_t *> arr.data
+        self.field = append_axes(
+            np.zeros(self.nvals, dtype="float64", order='F'), 4)
 
     @cython.cdivision(True)
     cdef void process(self, int dim[3],
@@ -347,7 +338,7 @@
                       np.float64_t dds[3],
                       np.int64_t offset, # offset into IO field
                       np.float64_t ppos[3], # this particle's position
-                      np.float64_t *fields,
+                      np.float64_t[:] fields,
                       np.int64_t domain_ind
                       ):
 
@@ -372,29 +363,26 @@
         for i in range(2):
             for j in range(2):
                 for k in range(2):
-                    ii = gind(ind[0] - i, ind[1] - j, ind[2] - k, dim) + offset
-                    self.field[ii] += fields[0]*rdds[0][i]*rdds[1][j]*rdds[2][k]
+                    self.field[ind[2] - k, ind[1] - j, ind[0] - i, offset] += \
+                        fields[0]*rdds[0][i]*rdds[1][j]*rdds[2][k]
 
     def finalize(self):
-        return self.ofield
+        rv = np.asarray(self.field)
+        rv.shape = self.nvals
+        return rv
 
 deposit_cic = CICDeposit
 
 cdef class WeightedMeanParticleField(ParticleDepositOperation):
     # Deposit both mass * field and mass into two scalars
     # then in finalize divide mass * field / mass
-    cdef np.float64_t *wf
-    cdef public object owf
-    cdef np.float64_t *w
-    cdef public object ow
+    cdef np.float64_t[:,:,:,:] wf
+    cdef np.float64_t[:,:,:,:] w
     def initialize(self):
-        self.owf = np.zeros(self.nvals, dtype='float64', order='F')
-        cdef np.ndarray wfarr = self.owf
-        self.wf = <np.float64_t*> wfarr.data
-
-        self.ow = np.zeros(self.nvals, dtype='float64', order='F')
-        cdef np.ndarray warr = self.ow
-        self.w = <np.float64_t*> warr.data
+        self.wf = append_axes(
+            np.zeros(self.nvals, dtype='float64', order='F'), 4)
+        self.w = append_axes(
+            np.zeros(self.nvals, dtype='float64', order='F'), 4)
 
     @cython.cdivision(True)
     cdef void process(self, int dim[3],
@@ -402,18 +390,22 @@
                       np.float64_t dds[3],
                       np.int64_t offset,
                       np.float64_t ppos[3],
-                      np.float64_t *fields,
+                      np.float64_t[:] fields,
                       np.int64_t domain_ind
                       ):
         cdef int ii[3]
         cdef int i
         for i in range(3):
             ii[i] = <int>((ppos[i] - left_edge[i]) / dds[i])
-        self.w[ gind(ii[0], ii[1], ii[2], dim) + offset] += fields[1]
-        self.wf[gind(ii[0], ii[1], ii[2], dim) + offset] += fields[0] * fields[1]
+        self.w[ii[2], ii[1], ii[0], offset] += fields[1]
+        self.wf[ii[2], ii[1], ii[0], offset] += fields[0] * fields[1]
 
     def finalize(self):
-        return self.owf / self.ow
+        wf = np.asarray(self.wf)
+        w = np.asarray(self.w)
+        rv = wf / w
+        rv.shape = self.nvals
+        return rv
 
 deposit_weighted_mean = WeightedMeanParticleField
 
@@ -430,7 +422,7 @@
                       np.float64_t dds[3],
                       np.int64_t offset,
                       np.float64_t ppos[3],
-                      np.float64_t *fields,
+                      np.float64_t[:] fields,
                       np.int64_t domain_ind
                       ):
         fields[0] = domain_ind
@@ -441,19 +433,14 @@
 deposit_mesh_id = MeshIdentifier
 
 cdef class NNParticleField(ParticleDepositOperation):
-    cdef np.float64_t *nnfield
-    cdef np.float64_t *distfield
-    cdef public object onnfield
-    cdef public object odistfield
+    cdef np.float64_t[:,:,:,:] nnfield
+    cdef np.float64_t[:,:,:,:] distfield
     def initialize(self):
-        self.onnfield = np.zeros(self.nvals, dtype="float64", order='F')
-        cdef np.ndarray arr = self.onnfield
-        self.nnfield = <np.float64_t*> arr.data
-
-        self.odistfield = np.zeros(self.nvals, dtype="float64", order='F')
-        self.odistfield[:] = np.inf
-        arr = self.odistfield
-        self.distfield = <np.float64_t*> arr.data
+        self.nnfield = append_axes(
+            np.zeros(self.nvals, dtype="float64", order='F'), 4)
+        self.distfield = append_axes(
+            np.zeros(self.nvals, dtype="float64", order='F'), 4)
+        self.distfield[:] = np.inf
 
     @cython.cdivision(True)
     cdef void process(self, int dim[3],
@@ -461,14 +448,13 @@
                       np.float64_t dds[3],
                       np.int64_t offset,
                       np.float64_t ppos[3],
-                      np.float64_t *fields,
+                      np.float64_t[:] fields,
                       np.int64_t domain_ind
                       ):
         # This one is a bit slow.  Every grid cell is going to be iterated
         # over, and we're going to deposit particles in it.
         cdef int i, j, k
         cdef int ii[3]
-        cdef np.int64_t ggind
         cdef np.float64_t r2
         cdef np.float64_t gpos[3]
         gpos[0] = left_edge[0] + 0.5 * dds[0]
@@ -477,19 +463,20 @@
             for j in range(dim[1]):
                 gpos[2] = left_edge[2] + 0.5 * dds[2]
                 for k in range(dim[2]):
-                    ggind = gind(i, j, k, dim) + offset
                     r2 = ((ppos[0] - gpos[0])*(ppos[0] - gpos[0]) +
                           (ppos[1] - gpos[1])*(ppos[1] - gpos[1]) +
                           (ppos[2] - gpos[2])*(ppos[2] - gpos[2]))
-                    if r2 < self.distfield[ggind]:
-                        self.distfield[ggind] = r2
-                        self.nnfield[ggind] = fields[0]
+                    if r2 < self.distfield[k,j,i,offset]:
+                        self.distfield[k,j,i,offset] = r2
+                        self.nnfield[k,j,i,offset] = fields[0]
                     gpos[2] += dds[2]
                 gpos[1] += dds[1]
             gpos[0] += dds[0]
         return
 
     def finalize(self):
-        return self.onnfield
+        nn = np.asarray(self.nnfield)
+        nn.shape = self.nvals
+        return nn
 
 deposit_nearest = NNParticleField

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -14,6 +14,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import operator
 import numpy as np
 
 from yt.funcs import mylog
@@ -42,19 +43,28 @@
 steps = np.array([[-1, -1, -1], [-1, -1,  0], [-1, -1,  1],
                   [-1,  0, -1], [-1,  0,  0], [-1,  0,  1],
                   [-1,  1, -1], [-1,  1,  0], [-1,  1,  1],
-                  
+
                   [ 0, -1, -1], [ 0, -1,  0], [ 0, -1,  1],
                   [ 0,  0, -1],
                   # [ 0,  0,  0],
                   [ 0,  0,  1],
                   [ 0,  1, -1], [ 0,  1,  0], [ 0,  1,  1],
-                  
+
                   [ 1, -1, -1], [ 1, -1,  0], [ 1, -1,  1],
                   [ 1,  0, -1], [ 1,  0,  0], [ 1,  0,  1],
                   [ 1,  1, -1], [ 1,  1,  0], [ 1,  1,  1] ])
 
+def _apply_log(data, log_changed, log_new):
+    '''Helper used to set log10/10^ to data in AMRKDTree'''
+    if not log_changed:
+        return
+    if log_new:
+        np.log10(data, data)
+    else:
+        np.power(10.0, data, data)
+
 class Tree(object):
-    def __init__(self, ds, comm_rank=0, comm_size=1, left=None, right=None, 
+    def __init__(self, ds, comm_rank=0, comm_size=1, left=None, right=None,
         min_level=None, max_level=None, data_source=None):
 
         self.ds = ds
@@ -155,6 +165,7 @@
         self.brick_dimensions = []
         self.sdx = ds.index.get_smallest_dx()
 
+        self.regenerate_data = True
         self._initialized = False
         try:
             self._id_offset = ds.index.grids[0]._id_offset
@@ -171,13 +182,25 @@
                          data_source=data_source)
 
     def set_fields(self, fields, log_fields, no_ghost):
-        self.fields = self.data_source._determine_fields(fields)
+        new_fields = self.data_source._determine_fields(fields)
+        self.regenerate_data = \
+            self.fields is None or \
+            len(self.fields) != len(new_fields) or \
+            self.fields != new_fields
+        self.fields = new_fields
+
+        if self.log_fields is not None:
+            flip_log = map(operator.ne, self.log_fields, log_fields)
+        else:
+            flip_log = [False] * len(log_fields)
         self.log_fields = log_fields
+
         self.no_ghost = no_ghost
         del self.bricks, self.brick_dimensions
         self.brick_dimensions = []
         bricks = []
         for b in self.traverse():
+            map(_apply_log, b.my_data, flip_log, log_fields)
             bricks.append(b)
         self.bricks = np.array(bricks)
         self.brick_dimensions = np.array(self.brick_dimensions)
@@ -261,7 +284,8 @@
         return scatter_image(self.comm, owners[1], image)
 
     def get_brick_data(self, node):
-        if node.data is not None: return node.data
+        if node.data is not None and not self.regenerate_data:
+            return node.data
         grid = self.ds.index.grids[node.grid - self._id_offset]
         dds = grid.dds.ndarray_view()
         gle = grid.LeftEdge.ndarray_view()
@@ -273,7 +297,7 @@
         assert(np.all(grid.LeftEdge <= nle))
         assert(np.all(grid.RightEdge >= nre))
 
-        if grid in self.current_saved_grids:
+        if grid in self.current_saved_grids and not self.regenerate_data:
             dds = self.current_vcds[self.current_saved_grids.index(grid)]
         else:
             dds = []
@@ -301,6 +325,7 @@
         node.data = brick
         if not self._initialized:
             self.brick_dimensions.append(dims)
+        self.regenerate_data = False
         return brick
 
     def locate_brick(self, position):
@@ -327,16 +352,16 @@
         cis: List of neighbor cell index tuples
 
         Both of these are neighbors that, relative to the current cell
-        index (i,j,k), are ordered as: 
-        
-        (i-1, j-1, k-1), (i-1, j-1, k ), (i-1, j-1, k+1), ...  
-        (i-1, j  , k-1), (i-1, j  , k ), (i-1, j  , k+1), ...  
+        index (i,j,k), are ordered as:
+
+        (i-1, j-1, k-1), (i-1, j-1, k ), (i-1, j-1, k+1), ...
+        (i-1, j  , k-1), (i-1, j  , k ), (i-1, j  , k+1), ...
         (i+1, j+1, k-1), (i-1, j-1, k ), (i+1, j+1, k+1)
 
         That is they start from the lower left and proceed to upper
         right varying the third index most frequently. Note that the
         center cell (i,j,k) is ommitted.
-        
+
         """
         ci = np.array(ci)
         center_dds = grid.dds
@@ -351,7 +376,7 @@
         new_positions = position + steps*offs
         new_positions = [periodic_position(p, self.ds) for p in new_positions]
         grids[in_grid] = grid
-                
+
         get_them = np.argwhere(in_grid).ravel()
         cis[in_grid] = new_cis[in_grid]
 
@@ -367,11 +392,11 @@
         return grids, cis
 
     def locate_neighbors_from_position(self, position):
-        r"""Given a position, finds the 26 neighbor grids 
+        r"""Given a position, finds the 26 neighbor grids
         and cell indices.
 
         This is a mostly a wrapper for locate_neighbors.
-        
+
         Parameters
         ----------
         position: array-like
@@ -383,16 +408,16 @@
         cis: List of neighbor cell index tuples
 
         Both of these are neighbors that, relative to the current cell
-        index (i,j,k), are ordered as: 
-        
-        (i-1, j-1, k-1), (i-1, j-1, k ), (i-1, j-1, k+1), ...  
-        (i-1, j  , k-1), (i-1, j  , k ), (i-1, j  , k+1), ...  
+        index (i,j,k), are ordered as:
+
+        (i-1, j-1, k-1), (i-1, j-1, k ), (i-1, j-1, k+1), ...
+        (i-1, j  , k-1), (i-1, j  , k ), (i-1, j  , k+1), ...
         (i+1, j+1, k-1), (i-1, j-1, k ), (i+1, j+1, k+1)
 
         That is they start from the lower left and proceed to upper
         right varying the third index most frequently. Note that the
         center cell (i,j,k) is ommitted.
-        
+
         """
         position = np.array(position)
         grid = self.ds.index.grids[self.locate_brick(position).grid -
@@ -421,7 +446,7 @@
         del f
         if self.comm.rank != (self.comm.size-1):
             self.comm.send_array([0],self.comm.rank+1, tag=self.comm.rank)
-        
+
     def load_kd_bricks(self,fn=None):
         if fn is None:
             fn = '%s_kd_bricks.h5' % self.ds
@@ -435,10 +460,10 @@
                     data = [f["brick_%s_%s" %
                               (hex(i), field)][:].astype('float64') for field in self.fields]
                     node.data = PartitionedGrid(node.grid.id, data,
-                                                 node.l_corner.copy(), 
-                                                 node.r_corner.copy(), 
+                                                 node.l_corner.copy(),
+                                                 node.r_corner.copy(),
                                                  node.dims.astype('int64'))
-                    
+
                     self.bricks.append(node.data)
                     self.brick_dimensions.append(node.dims)
 
@@ -457,15 +482,15 @@
         if self.comm.size == 0: return
         nid, pid, lid, rid, les, res, gid, splitdims, splitposs = \
                 self.get_node_arrays()
-        nid = self.comm.par_combine_object(nid, 'cat', 'list') 
-        pid = self.comm.par_combine_object(pid, 'cat', 'list') 
-        lid = self.comm.par_combine_object(lid, 'cat', 'list') 
-        rid = self.comm.par_combine_object(rid, 'cat', 'list') 
-        gid = self.comm.par_combine_object(gid, 'cat', 'list') 
-        les = self.comm.par_combine_object(les, 'cat', 'list') 
-        res = self.comm.par_combine_object(res, 'cat', 'list') 
-        splitdims = self.comm.par_combine_object(splitdims, 'cat', 'list') 
-        splitposs = self.comm.par_combine_object(splitposs, 'cat', 'list') 
+        nid = self.comm.par_combine_object(nid, 'cat', 'list')
+        pid = self.comm.par_combine_object(pid, 'cat', 'list')
+        lid = self.comm.par_combine_object(lid, 'cat', 'list')
+        rid = self.comm.par_combine_object(rid, 'cat', 'list')
+        gid = self.comm.par_combine_object(gid, 'cat', 'list')
+        les = self.comm.par_combine_object(les, 'cat', 'list')
+        res = self.comm.par_combine_object(res, 'cat', 'list')
+        splitdims = self.comm.par_combine_object(splitdims, 'cat', 'list')
+        splitposs = self.comm.par_combine_object(splitposs, 'cat', 'list')
         nid = np.array(nid)
         self.rebuild_tree_from_array(nid, pid, lid,
             rid, les, res, gid, splitdims, splitposs)
@@ -481,25 +506,25 @@
         splitdims = []
         splitposs = []
         for node in depth_first_touch(self.tree.trunk):
-            nids.append(node.node_id) 
-            les.append(node.get_left_edge()) 
-            res.append(node.get_right_edge()) 
+            nids.append(node.node_id)
+            les.append(node.get_left_edge())
+            res.append(node.get_right_edge())
             if node.left is None:
-                leftids.append(-1) 
+                leftids.append(-1)
             else:
-                leftids.append(node.left.node_id) 
+                leftids.append(node.left.node_id)
             if node.right is None:
-                rightids.append(-1) 
+                rightids.append(-1)
             else:
-                rightids.append(node.right.node_id) 
+                rightids.append(node.right.node_id)
             if node.parent is None:
-                parentids.append(-1) 
+                parentids.append(-1)
             else:
-                parentids.append(node.parent.node_id) 
+                parentids.append(node.parent.node_id)
             if node.grid is None:
-                gridids.append(-1) 
+                gridids.append(-1)
             else:
-                gridids.append(node.grid) 
+                gridids.append(node.grid)
             splitdims.append(node.get_split_dim())
             splitposs.append(node.get_split_pos())
 
@@ -510,10 +535,10 @@
                                rids, les, res, gids, splitdims, splitposs):
         del self.tree.trunk
 
-        self.tree.trunk = Node(None, 
+        self.tree.trunk = Node(None,
                     None,
                     None,
-                    les[0], res[0], gids[0], nids[0]) 
+                    les[0], res[0], gids[0], nids[0])
 
         N = nids.shape[0]
         for i in range(N):
@@ -521,14 +546,14 @@
             n.set_left_edge(les[i])
             n.set_right_edge(res[i])
             if lids[i] != -1 and n.left is None:
-                n.left = Node(n, None, None, 
-                              np.zeros(3, dtype='float64'),  
-                              np.zeros(3, dtype='float64'),  
+                n.left = Node(n, None, None,
+                              np.zeros(3, dtype='float64'),
+                              np.zeros(3, dtype='float64'),
                               -1, lids[i])
             if rids[i] != -1 and n.right is None:
-                n.right = Node(n, None, None, 
-                               np.zeros(3, dtype='float64'),  
-                               np.zeros(3, dtype='float64'),  
+                n.right = Node(n, None, None,
+                               np.zeros(3, dtype='float64'),
+                               np.zeros(3, dtype='float64'),
                                -1, rids[i])
             if gids[i] != -1:
                 n.grid = gids[i]
@@ -541,9 +566,9 @@
 
     def count_volume(self):
         return kd_sum_volume(self.tree.trunk)
-    
+
     def count_cells(self):
-        return self.tree.sum_cells() 
+        return self.tree.sum_cells()
 
 if __name__ == "__main__":
     import yt

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 yt/utilities/lib/bounding_volume_hierarchy.pxd
--- /dev/null
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pxd
@@ -0,0 +1,50 @@
+cimport cython 
+import numpy as np
+cimport numpy as np
+
+# ray data structure
+cdef struct Ray:
+    np.float64_t origin[3]
+    np.float64_t direction[3]
+    np.float64_t inv_dir[3]
+    np.float64_t data_val
+    np.float64_t t_near
+    np.float64_t t_far
+    np.int64_t elem_id
+
+# axis-aligned bounding box
+cdef struct BBox:
+    np.float64_t left_edge[3]
+    np.float64_t right_edge[3]
+
+# node for the bounding volume hierarchy
+cdef struct BVHNode:
+    np.int64_t begin
+    np.int64_t end
+    BVHNode* left
+    BVHNode* right
+    BBox bbox
+    
+# triangle data structure
+cdef struct Triangle:
+    np.float64_t p0[3]
+    np.float64_t p1[3]
+    np.float64_t p2[3]
+    np.float64_t d0, d1, d2
+    np.int64_t elem_id
+    np.float64_t centroid[3]
+    BBox bbox
+
+cdef class BVH:
+    cdef BVHNode* root
+    cdef Triangle* triangles
+    cdef np.int64_t leaf_size
+    cdef np.float64_t[:, ::1] vertices
+    cdef np.int64_t _partition(self, np.int64_t begin, np.int64_t end,
+                               np.int64_t ax, np.float64_t split) nogil
+    cdef void intersect(self, Ray* ray) nogil
+    cdef void _get_node_bbox(self, BVHNode* node, 
+                             np.int64_t begin, np.int64_t end) nogil
+    cdef void _recursive_intersect(self, Ray* ray, BVHNode* node) nogil
+    cdef BVHNode* _recursive_build(self, np.int64_t begin, np.int64_t end) nogil
+    cdef void _recursive_free(self, BVHNode* node) nogil

diff -r 9478d43315adc4160fbf7bc00793f8db67778b32 -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- /dev/null
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -0,0 +1,320 @@
+cimport cython 
+import numpy as np
+cimport numpy as np
+from libc.math cimport fabs, fmax, fmin
+from libc.stdlib cimport malloc, free
+from cython.parallel import parallel, prange
+
+cdef extern from "mesh_construction.h":
+    enum:
+        MAX_NUM_TRI
+    int triangulate_hex[MAX_NUM_TRI][3]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline np.float64_t dot(const np.float64_t a[3], 
+                             const np.float64_t b[3]) nogil:
+    return a[0]*b[0] + a[1]*b[1] + a[2]*b[2] 
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void cross(const np.float64_t a[3], 
+                       const np.float64_t b[3],
+                       np.float64_t c[3]) nogil:
+    c[0] = a[1]*b[2] - a[2]*b[1]
+    c[1] = a[2]*b[0] - a[0]*b[2]
+    c[2] = a[0]*b[1] - a[1]*b[0]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void subtract(const np.float64_t a[3], 
+                          const np.float64_t b[3],
+                          np.float64_t c[3]) nogil:
+    c[0] = a[0] - b[0]
+    c[1] = a[1] - b[1]
+    c[2] = a[2] - b[2]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef np.int64_t ray_triangle_intersect(Ray* ray, const Triangle* tri) nogil:
+# https://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm
+
+    # edge vectors
+    cdef np.float64_t e1[3]
+    cdef np.float64_t e2[3]
+    subtract(tri.p1, tri.p0, e1)
+    subtract(tri.p2, tri.p0, e2)
+
+    cdef np.float64_t P[3]
+    cross(ray.direction, e2, P)
+
+    cdef np.float64_t det, inv_det
+    det = dot(e1, P)
+    if(det > -1.0e-10 and det < 1.0e-10): 
+        return False
+    inv_det = 1.0 / det
+
+    cdef np.float64_t T[3]
+    subtract(ray.origin, tri.p0, T)
+
+    cdef np.float64_t u = dot(T, P) * inv_det
+    if(u < 0.0 or u > 1.0):
+        return False
+
+    cdef np.float64_t Q[3]
+    cross(T, e1, Q)
+
+    cdef np.float64_t v = dot(ray.direction, Q) * inv_det
+    if(v < 0.0 or u + v  > 1.0):
+        return False
+
+    cdef np.float64_t t = dot(e2, Q) * inv_det
+
+    if(t > 1.0e-10 and t < ray.t_far):
+        ray.t_far = t
+        ray.data_val = (1.0 - u - v)*tri.d0 + u*tri.d1 + v*tri.d2
+        ray.elem_id = tri.elem_id
+        return True
+
+    return False
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef np.int64_t ray_bbox_intersect(Ray* ray, const BBox bbox) nogil:
+# https://tavianator.com/fast-branchless-raybounding-box-intersections/
+
+    cdef np.float64_t tmin = -1.0e300
+    cdef np.float64_t tmax =  1.0e300
+ 
+    cdef np.int64_t i
+    cdef np.float64_t t1, t2
+    for i in range(3):
+        t1 = (bbox.left_edge[i]  - ray.origin[i])*ray.inv_dir[i]
+        t2 = (bbox.right_edge[i] - ray.origin[i])*ray.inv_dir[i] 
+        tmin = fmax(tmin, fmin(t1, t2))
+        tmax = fmin(tmax, fmax(t1, t2))
+ 
+    return tmax >= fmax(tmin, 0.0)
+
+
+cdef class BVH:
+    '''
+
+    This class implements a bounding volume hierarchy (BVH), a spatial acceleration
+    structure for fast ray-tracing. A BVH is like a kd-tree, except that instead of 
+    partitioning the *volume* of the parent to create the children, we partition the 
+    triangles themselves into 'left' or 'right' sub-trees. The bounding volume for a
+    node is then determined by computing the bounding volume of the triangles that
+    belong to it. This allows us to quickly discard triangles that are not close 
+    to intersecting a given ray.
+
+    '''
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def __cinit__(self,
+                  np.float64_t[:, ::1] vertices,
+                  np.int64_t[:, ::1] indices,
+                  np.float64_t[:, ::1] field_data):
+        
+        self.leaf_size = 16
+        self.vertices = vertices
+        cdef np.int64_t num_elem = indices.shape[0]
+        cdef np.int64_t num_tri = 12*num_elem
+
+        # fill our array of triangles
+        cdef np.int64_t i, j, k
+        cdef np.int64_t offset, tri_index
+        cdef np.int64_t v0, v1, v2
+        cdef Triangle* tri
+        self.triangles = <Triangle*> malloc(num_tri * sizeof(Triangle))
+        for i in range(num_elem):
+            offset = 12*i
+            for j in range(12):
+                tri_index = offset + j
+                tri = &(self.triangles[tri_index])
+                tri.elem_id = i
+                v0 = indices[i][triangulate_hex[j][0]]
+                v1 = indices[i][triangulate_hex[j][1]]
+                v2 = indices[i][triangulate_hex[j][2]]
+                tri.d0 = field_data[i][triangulate_hex[j][0]]
+                tri.d1 = field_data[i][triangulate_hex[j][1]]
+                tri.d2 = field_data[i][triangulate_hex[j][2]]
+                for k in range(3):
+                    tri.p0[k] = vertices[v0][k]
+                    tri.p1[k] = vertices[v1][k]
+                    tri.p2[k] = vertices[v2][k]
+                    tri.centroid[k] = (tri.p0[k] + tri.p1[k] + tri.p2[k]) / 3.0
+                    tri.bbox.left_edge[k]  = fmin(fmin(tri.p0[k], tri.p1[k]), tri.p2[k])
+                    tri.bbox.right_edge[k] = fmax(fmax(tri.p0[k], tri.p1[k]), tri.p2[k])
+
+        self.root = self._recursive_build(0, num_tri)
+
+    cdef void _recursive_free(self, BVHNode* node) nogil:
+        if node.end - node.begin > self.leaf_size:
+            self._recursive_free(node.left)
+            self._recursive_free(node.right)
+        free(node)
+
+    def __dealloc__(self):
+        self._recursive_free(self.root)
+        free(self.triangles)
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef np.int64_t _partition(self, np.int64_t begin, np.int64_t end,
+                               np.int64_t ax, np.float64_t split) nogil:
+        # this re-orders the triangle array so that all of the triangles 
+        # to the left of mid have centroids less than or equal to "split" 
+        # along the direction "ax". All the triangles to the right of mid 
+        # will have centroids *greater* than "split" along "ax".
+        cdef np.int64_t mid = begin
+        while (begin != end):
+            if self.triangles[mid].centroid[ax] > split:
+                mid += 1
+            elif self.triangles[begin].centroid[ax] > split:
+                self.triangles[mid], self.triangles[begin] = \
+                self.triangles[begin], self.triangles[mid]
+                mid += 1
+            begin += 1
+        return mid
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void _get_node_bbox(self, BVHNode* node, 
+                             np.int64_t begin, np.int64_t end) nogil:
+        cdef np.int64_t i, j
+        cdef BBox box = self.triangles[begin].bbox
+        for i in range(begin+1, end):
+            for j in range(3):
+                box.left_edge[j] = fmin(box.left_edge[j],
+                                        self.triangles[i].bbox.left_edge[j])
+                box.right_edge[j] = fmax(box.right_edge[j], 
+                                         self.triangles[i].bbox.right_edge[j])
+        node.bbox = box
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void intersect(self, Ray* ray) nogil:
+        self._recursive_intersect(ray, self.root)
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void _recursive_intersect(self, Ray* ray, BVHNode* node) nogil:
+
+        # check for bbox intersection:
+        if not ray_bbox_intersect(ray, node.bbox):
+            return
+
+        # check for leaf
+        cdef np.int64_t i, hit
+        cdef Triangle* tri
+        if (node.end - node.begin) <= self.leaf_size:
+            for i in range(node.begin, node.end):
+                tri = &(self.triangles[i])
+                hit = ray_triangle_intersect(ray, tri)
+            return
+
+        # if not leaf, intersect with left and right children
+        self._recursive_intersect(ray, node.left)
+        self._recursive_intersect(ray, node.right)
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef BVHNode* _recursive_build(self, np.int64_t begin, np.int64_t end) nogil:
+        cdef BVHNode *node = <BVHNode* > malloc(sizeof(BVHNode))
+        node.begin = begin
+        node.end = end
+
+        self._get_node_bbox(node, begin, end)
+        
+        # check for leaf
+        if (end - begin) <= self.leaf_size:
+            return node
+        
+        # we use the "split in the middle of the longest axis approach"
+        # see: http://www.vadimkravcenko.com/bvh-tree-building/
+
+        # compute longest dimension
+        cdef np.int64_t ax = 0
+        cdef np.float64_t d = fabs(node.bbox.right_edge[0] - 
+                                   node.bbox.left_edge[0])
+        if fabs(node.bbox.right_edge[1] - node.bbox.left_edge[1]) > d:
+            ax = 1
+        if fabs(node.bbox.right_edge[2] - node.bbox.left_edge[2]) > d:
+            ax = 2
+
+        # split in half along that dimension
+        cdef np.float64_t split = 0.5*(node.bbox.right_edge[ax] +
+                                       node.bbox.left_edge[ax])
+
+        # sort triangle list
+        cdef np.int64_t mid = self._partition(begin, end, ax, split)
+
+        if(mid == begin or mid == end):
+            mid = begin + (end-begin)/2
+        
+        # recursively build sub-trees
+        node.left = self._recursive_build(begin, mid)
+        node.right = self._recursive_build(mid, end)
+
+        return node
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void cast_rays(np.float64_t* image,
+                    const np.float64_t* origins,
+                    const np.float64_t* direction,
+                    const int N, 
+                    BVH bvh) nogil:
+
+    cdef Ray* ray 
+    cdef int i, j, k
+    
+    with nogil, parallel():
+       
+        ray = <Ray *> malloc(sizeof(Ray))
+    
+        for k in range(3):
+            ray.direction[k] = direction[k]
+            ray.inv_dir[k] = 1.0 / direction[k]
+
+        for i in prange(N):
+            for j in range(3):
+                ray.origin[j] = origins[N*j + i]
+            ray.t_far = 1e30
+            ray.t_near = 0.0
+            ray.data_val = 0
+            bvh.intersect(ray)
+            image[i] = ray.data_val
+
+        free(ray)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def test_ray_trace(np.ndarray[np.float64_t, ndim=1] image, 
+                   np.ndarray[np.float64_t, ndim=2] origins,
+                   np.ndarray[np.float64_t, ndim=1] direction,
+                   BVH bvh):
+    
+    cdef int N = origins.shape[0]
+    cast_rays(&image[0], &origins[0, 0], &direction[0], N, bvh)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/3ec960da8169/
Changeset:   3ec960da8169
Branch:      yt
User:        brittonsmith
Date:        2016-02-25 14:43:58+00:00
Summary:     Fixing flake8 errors.
Affected #:  2 files

diff -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 -r 3ec960da816994fae7b0afde71499e01fb712860 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -300,7 +300,6 @@
         fields = []
         scalar_fields = []
         id_fields = {}
-        pcount = data_file.total_particles
         with h5py.File(data_file.filename, "r") as f:
             for ptype in self.ds.particle_types_raw:
                 fields.append((ptype, "particle_identifier"))

diff -r 9bf7d05d35f498607df5dd2e69a6efe40e794ac5 -r 3ec960da816994fae7b0afde71499e01fb712860 yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -96,7 +96,7 @@
 def test_unbalanced_dataset():
     ds = data_dir_load(g56)
     halo = ds.halo("Group", 0)
-    my_ids = halo["member_ids"]
+    halo["member_ids"]
     assert True
 
 # fof/subhalo catalog with no member ids in first file
@@ -110,5 +110,5 @@
     # this halo's particles are distributed over 3 files with the
     # middle file being empty
     halo = ds.halo("Group", 6)
-    my_ids = halo["member_ids"]
+    halo["member_ids"]
     assert True


https://bitbucket.org/yt_analysis/yt/commits/0ebb7e886c4f/
Changeset:   0ebb7e886c4f
Branch:      yt
User:        xarthisius
Date:        2016-03-02 17:31:44+00:00
Summary:     Merged in brittonsmith/yt (pull request #2005)

[BUGFIX] fixing a couple corner cases in gadget_fof frontend
Affected #:  3 files

diff -r 5cde4c82ad67e298a3df60962bd0607fd274145f -r 0ebb7e886c4fd7deed72db2980741dcb65b8e130 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -334,9 +334,10 @@
           dict([(ptype, False)
                 for ptype, pnum in self.particle_count.items()
                 if pnum > 0])
+        has_ids = False
 
         for data_file in self.data_files:
-            fl, sl, _units = self.io._identify_fields(data_file)
+            fl, sl, idl, _units = self.io._identify_fields(data_file)
             units.update(_units)
             field_list.extend([f for f in fl
                                if f not in field_list])
@@ -344,7 +345,8 @@
                                       if f not in scalar_field_list])
             for ptype in found_fields:
                 found_fields[ptype] |= data_file.total_particles[ptype]
-            if all(found_fields.values()): break
+            has_ids |= len(idl) > 0
+            if all(found_fields.values()) and has_ids: break
 
         self.field_list = field_list
         self.scalar_field_list = scalar_field_list

diff -r 5cde4c82ad67e298a3df60962bd0607fd274145f -r 0ebb7e886c4fd7deed72db2980741dcb65b8e130 yt/frontends/gadget_fof/io.py
--- a/yt/frontends/gadget_fof/io.py
+++ b/yt/frontends/gadget_fof/io.py
@@ -258,6 +258,7 @@
             start_index = dobj.field_data_start[i]
             end_index = dobj.field_data_end[i]
             pcount = end_index - start_index
+            if pcount == 0: continue
             field_end = field_start + end_index - start_index
             with h5py.File(data_file.filename, "r") as f:
                 for ptype, field_list in sorted(member_fields.items()):
@@ -298,11 +299,9 @@
     def _identify_fields(self, data_file):
         fields = []
         scalar_fields = []
-        pcount = data_file.total_particles
-        if sum(pcount.values()) == 0: return fields, scalar_fields, {}
+        id_fields = {}
         with h5py.File(data_file.filename, "r") as f:
             for ptype in self.ds.particle_types_raw:
-                if data_file.total_particles[ptype] == 0: continue
                 fields.append((ptype, "particle_identifier"))
                 scalar_fields.append((ptype, "particle_identifier"))
                 my_fields, my_offset_fields = \
@@ -311,8 +310,9 @@
                 scalar_fields.extend(my_fields)
 
                 if "IDs" not in f: continue
-                fields.extend([(ptype, field) for field in f["IDs"]])
-        return fields, scalar_fields, {}
+                id_fields = [(ptype, field) for field in f["IDs"]]
+                fields.extend(id_fields)
+        return fields, scalar_fields, id_fields, {}
 
 def subfind_field_list(fh, ptype, pcount):
     fields = []

diff -r 5cde4c82ad67e298a3df60962bd0607fd274145f -r 0ebb7e886c4fd7deed72db2980741dcb65b8e130 yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -86,3 +86,29 @@
         # as the array of all masses.  This will test getting
         # scalar fields for halos correctly.
         yield assert_array_equal, ad[ptype, "particle_mass"], mass
+
+# fof/subhalo catalog with no member ids in first file
+g56 = "gadget_halos/data/groups_056/fof_subhalo_tab_056.0.hdf5"
+
+# This dataset has halos in one file and ids in another,
+# which can confuse the field detection.
+ at requires_file(g56)
+def test_unbalanced_dataset():
+    ds = data_dir_load(g56)
+    halo = ds.halo("Group", 0)
+    halo["member_ids"]
+    assert True
+
+# fof/subhalo catalog with no member ids in first file
+g76 = "gadget_halos/data/groups_076/fof_subhalo_tab_076.0.hdf5"
+
+# This dataset has one halo with particles distributed over 3 files
+# with the 2nd file being empty.
+ at requires_file(g76)
+def test_3file_halo():
+    ds = data_dir_load(g76)
+    # this halo's particles are distributed over 3 files with the
+    # middle file being empty
+    halo = ds.halo("Group", 6)
+    halo["member_ids"]
+    assert True

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list