[yt-svn] commit/yt: 6 new changesets

Bitbucket commits-noreply at bitbucket.org
Tue Nov 22 12:40:51 PST 2011


6 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/6c06c843f2d9/
changeset:   6c06c843f2d9
branch:      yt
user:        jsoishi
date:        2011-10-18 20:53:59
summary:     added docstrings to several objects; cleaned up a few others.
affected #:  1 file

diff -r cdc13f4f9b7197199abac0345244cc00b5d5bee8 -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -2651,6 +2651,31 @@
     _type_name = "extracted_region"
     _con_args = ('_base_region', '_indices')
     def __init__(self, base_region, indices, force_refresh=True, **kwargs):
+        """An arbitrarily defined data container that allows for selection
+        of all data meeting certain criteria.
+
+        In order to create an arbitrarily selected set of data, the
+        ExtractedRegion takes a `base_region` and a set of `indices`
+        and creates a region within the `base_region` consisting of
+        all data indexed by the `indices`. Note that `indices` must be
+        precomputed. This does not work well for parallelized
+        operations.
+
+        Parameters
+        ----------
+        base_region : yt data source
+            A previously selected data source.
+        indices : array_like
+            An array of indices
+
+        Other Parameters
+        ----------------
+        force_refresh : bool
+           Force a refresh of the data. Defaults to True.
+        
+        Examples
+        --------
+        """
         cen = base_region.get_field_parameter("center")
         AMR3DData.__init__(self, center=cen,
                             fields=None, pf=base_region.pf, **kwargs)
@@ -2933,10 +2958,22 @@
     _dx_pad = 0.5
     def __init__(self, center, left_edge, right_edge, fields = None,
                  pf = None, **kwargs):
-        """
-        We create an object with a set of three *left_edge* coordinates,
-        three *right_edge* coordinates, and a *center* that need not be the
-        center.
+        """A 3D region of data with an arbitrary center.
+
+        Takes an array of three *left_edge* coordinates, three
+        *right_edge* coordinates, and a *center* that can be anywhere
+        in the domain. If the selected region extends past the edges
+        of the domain, no data will be found there, though the
+        object's `left_edge` or `right_edge` are not modified.
+
+        Parameters
+        ----------
+        center : array_like
+            The center of the region
+        left_edge : array_like
+            The left edge of the region
+        right_edge : array_like
+            The right edge of the region
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
         self.left_edge = left_edge
@@ -2990,10 +3027,25 @@
     _dx_pad = 0.5
     def __init__(self, center, left_edge, right_edge, fields = None,
                  pf = None, **kwargs):
-        """
-        We create an object with a set of three *left_edge* coordinates,
-        three *right_edge* coordinates, and a *center* that need not be the
-        center.
+        """A 3D region of data that with periodic boundary
+        conditions if the selected region extends beyond the
+        simulation domain.
+
+        Takes an array of three *left_edge* coordinates, three
+        *right_edge* coordinates, and a *center* that can be anywhere
+        in the domain. The selected region can extend past the edges
+        of the domain, in which case periodic boundary conditions will
+        be applied to fill the region.
+
+        Parameters
+        ----------
+        center : array_like
+            The center of the region
+        left_edge : array_like
+            The left edge of the region
+        right_edge : array_like
+            The right edge of the region
+
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
         self.left_edge = na.array(left_edge)
@@ -3056,7 +3108,15 @@
     """
     _type_name = "periodic_region_strict"
     _dx_pad = 0.0
-
+    def __init__(self, center, left_edge, right_edge, fields = None,
+                 pf = None, **kwargs):
+        """same as periodic region, but does not include cells unless
+        the selected region encompasses their centers.
+
+        """
+        AMRPeriodicRegionBase.__init__(self, center, left_edge, right_edge, 
+                                       fields = None, pf = None, **kwargs)
+    
 class AMRGridCollectionBase(AMR3DData):
     """
     An arbitrary selection of grids, within which we accept all points.
@@ -3097,9 +3157,20 @@
     _type_name = "sphere"
     _con_args = ('center', 'radius')
     def __init__(self, center, radius, fields = None, pf = None, **kwargs):
-        """
-        The most famous of all the data objects, we define it via a
-        *center* and a *radius*.
+        """A sphere f points defined by a *center* and a *radius*.
+
+        Parameters
+        ----------
+        center : array_like
+            The center of the sphere.
+        radius : float
+            The radius of the sphere.
+
+        Examples
+        --------
+        >>> pf = load("DD0010/moving7_0010")
+        >>> c = [0.5,0.5,0.5]
+        >>> sphere = pf.h.sphere(c,1.*pf['kpc'])
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
         # Unpack the radius, if necessary
@@ -3153,6 +3224,19 @@
     _con_args = ('level', 'left_edge', 'right_edge', 'ActiveDimensions')
     def __init__(self, level, left_edge, dims, fields = None,
                  pf = None, num_ghost_zones = 0, use_pbar = True, **kwargs):
+        """A 3D region with all data extracted to a single, specified
+        resolution.
+        
+        Parameters
+        ----------
+        level : int
+            The resolution level data is uniformly gridded at
+        left_edge : array_like
+            The left edge of the region to be extracted
+        right_edge : array_like
+            The right edge of the region to be extracted
+
+        """
         AMR3DData.__init__(self, center=kwargs.pop("center", None),
                            fields=fields, pf=pf, **kwargs)
         self.left_edge = na.array(left_edge)
@@ -3286,6 +3370,24 @@
     _type_name = "smoothed_covering_grid"
     @wraps(AMRCoveringGridBase.__init__)
     def __init__(self, *args, **kwargs):
+        """A 3D region with all data extracted and interpolated to a
+        single, specified resolution.
+
+        Smoothed covering grids start at level 0, interpolating to
+        fill the region to level 1, replacing any cells actually
+        covered by level 1 data, and then recursively repeating this
+        process until it reaches the specified `level`.
+        
+        Parameters
+        ----------
+        level : int
+            The resolution level data is uniformly gridded at
+        left_edge : array_like
+            The left edge of the region to be extracted
+        right_edge : array_like
+            The right edge of the region to be extracted
+
+        """
         self._base_dx = (
               (self.pf.domain_right_edge - self.pf.domain_left_edge) /
                self.pf.domain_dimensions.astype("float64"))



https://bitbucket.org/yt_analysis/yt/changeset/13f1bb040e8c/
changeset:   13f1bb040e8c
branch:      yt
user:        jsoishi
date:        2011-10-18 20:57:33
summary:     merged.
affected #:  30 files

diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea tests/halos.py
--- /dev/null
+++ b/tests/halos.py
@@ -0,0 +1,10 @@
+from yt.utilities.answer_testing.output_tests import \
+    SingleOutputTest, create_test
+from yt.utilities.answer_testing.halo_tests import \
+    TestHaloCountHOP, TestHaloCountFOF, TestHaloCountPHOP 
+
+create_test(TestHaloCountHOP, "halo_count_HOP", threshold=80.0)
+
+create_test(TestHaloCountFOF, "halo_count_FOF", link=0.2, padding=0.02)
+
+create_test(TestHaloCountPHOP, "halo_count_PHOP", threshold=80.0)


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea tests/object_field_values.py
--- a/tests/object_field_values.py
+++ b/tests/object_field_values.py
@@ -3,7 +3,7 @@
 
 from yt.utilities.answer_testing.output_tests import \
     YTStaticOutputTest, RegressionTestException, create_test
-from yt.funcs import ensure_list
+from yt.funcs import ensure_list, iterable
 from fields_to_test import field_list, particle_field_list
 
 class FieldHashesDontMatch(RegressionTestException):
@@ -16,26 +16,50 @@
     return func
 
 @register_object
-def centered_sphere(self):
-    center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
-    width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-    self.data_object = self.pf.h.sphere(center, width/0.25)
+def centered_sphere(tobj):
+    center = 0.5*(tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
+    width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+    tobj.data_object = tobj.pf.h.sphere(center, width/0.25)
 
 @register_object
-def off_centered_sphere(self):
-    center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
-    width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-    self.data_object = self.pf.h.sphere(center - 0.25 * width, width/0.25)
+def off_centered_sphere(tobj):
+    center = 0.5*(tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
+    width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+    tobj.data_object = tobj.pf.h.sphere(center - 0.25 * width, width/0.25)
 
 @register_object
-def corner_sphere(self):
-    width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-    self.data_object = self.pf.h.sphere(self.pf.domain_left_edge, width/0.25)
+def corner_sphere(tobj):
+    width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+    tobj.data_object = tobj.pf.h.sphere(tobj.pf.domain_left_edge, width/0.25)
 
 @register_object
+def disk(self):
+    center = (self.pf.domain_right_edge + self.pf.domain_left_edge)/2.
+    radius = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()/10.
+    height = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()/10.
+    normal = na.array([1.]*3)
+    self.data_object = self.pf.h.disk(center, normal, radius, height)
+    
+ at register_object
 def all_data(self):
     self.data_object = self.pf.h.all_data()
 
+_new_known_objects = {}
+for field in ["Density"]:#field_list:
+    for object_name in known_objects:
+        def _rfunc(oname, fname):
+            def func(tobj):
+                known_objects[oname](tobj)
+                tobj.orig_data_object = tobj.data_object
+                avg_value = tobj.orig_data_object.quantities[
+                        "WeightedAverageQuantity"](fname, "Density")
+                tobj.data_object = tobj.orig_data_object.cut_region(
+                        ["grid['%s'] > %s" % (fname, avg_value)])
+            return func
+        _new_known_objects["%s_cut_region_%s" % (object_name, field)] = \
+                _rfunc(object_name, field)
+known_objects.update(_new_known_objects)
+
 class YTFieldValuesTest(YTStaticOutputTest):
     def run(self):
         vals = self.data_object[self.field].copy()
@@ -51,16 +75,26 @@
 
 for object_name in known_objects:
     for field in field_list + particle_field_list:
+        if "cut_region" in object_name and field in particle_field_list:
+            continue
         create_test(YTFieldValuesTest, "%s_%s" % (object_name, field),
                     field = field, object_name = object_name)
-
+    
 class YTDerivedQuantityTest(YTStaticOutputTest):
     def setup(self):
         YTStaticOutputTest.setup(self)
         known_objects[self.object_name](self)
 
-    def compare(self, old_results):
-        if self.result != old_result: raise FieldHashesDontMatch
+    def compare(self, old_result):
+        if hasattr(self.result, 'tostring'):
+            self.compare_array_delta(self.result, old_result, 1e-7)
+            return
+        elif iterable(self.result):
+            a1 = na.array(self.result)
+            a2 = na.array(old_result)
+            self.compare_array_delta(a1, a2, 1e-7)
+        else:
+            if self.result != old_result: raise FieldHashesDontMatch
 
     def run(self):
         # This only works if it takes no arguments
@@ -74,6 +108,11 @@
 
 for object_name in known_objects:
     for dq in dq_names:
+        # Some special exceptions
+        if "cut_region" in object_name and (
+            "SpinParameter" in dq or
+            "TotalMass" in dq):
+            continue
         create_test(YTDerivedQuantityTest, "%s_%s" % (object_name, dq),
                     dq_name = dq, object_name = object_name)
 


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea tests/projections.py
--- a/tests/projections.py
+++ b/tests/projections.py
@@ -2,33 +2,34 @@
     SingleOutputTest, create_test
 from yt.utilities.answer_testing.hydro_tests import \
     TestProjection, TestOffAxisProjection, TestSlice, \
-    TestGasDistribution, Test2DGasDistribution
+    TestRay, TestGasDistribution, Test2DGasDistribution
 
 from fields_to_test import field_list
 
+for field in field_list:
+    create_test(TestRay, "%s" % field, field = field)
+
 for axis in range(3):
     for field in field_list:
-        create_test(TestSlice, "projection_slice_%s_%s" % (axis, field),
+        create_test(TestSlice, "%s_%s" % (axis, field),
                     field = field, axis = axis)
 
 for axis in range(3):
     for field in field_list:
-        create_test(TestProjection, "projection_test_%s_%s" % (axis, field),
+        create_test(TestProjection, "%s_%s" % (axis, field),
                     field = field, axis = axis)
-        create_test(TestProjection, "projection_test_%s_%s_Density" % (axis, field),
-                    field = field, axis = axis, weight_field = "Density")
-
-for axis in range(3):
-    for field in field_list:
-        create_test(TestOffAxisProjection, "off-axis_projection_test_%s_%s" % 
-                    (axis, field),
-                    field = field, axis = axis)
-        create_test(TestOffAxisProjection, "off-axis_projection_test_%s_%s_Density" % 
-                    (axis, field),
+        create_test(TestProjection, "%s_%s_Density" % (axis, field),
                     field = field, axis = axis, weight_field = "Density")
 
 for field in field_list:
-    create_test(TestGasDistribution, "profile_density_test_%s" % field,
+    create_test(TestOffAxisProjection, "%s_%s" % (axis, field),
+                field = field, axis = axis)
+    create_test(TestOffAxisProjection, "%s_%s_Density" % (axis, field),
+                field = field, axis = axis, weight_field = "Density")
+
+for field in field_list:
+    create_test(TestGasDistribution, "density_%s" % field,
                 field_x = "Density", field_y = field)
-    create_test(Test2DGasDistribution, "2d_profile_density_x-vel_test_%s" % field,
-                field_x = "Density", field_y = "x-velocity", field_z = field, weight = "CellMassMsun")
+    create_test(Test2DGasDistribution, "density_x-vel_%s" % field,
+                field_x = "Density", field_y = "x-velocity", field_z = field, 
+                weight = "CellMassMsun")


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea tests/runall.py
--- a/tests/runall.py
+++ b/tests/runall.py
@@ -1,3 +1,4 @@
+import matplotlib; matplotlib.use('Agg')
 from yt.config import ytcfg
 ytcfg["yt","loglevel"] = "50"
 ytcfg["yt","serialize"] = "False"
@@ -6,7 +7,6 @@
     RegressionTestRunner, clear_registry, create_test, \
     TestFieldStatistics, TestAllProjections, registry_entries, \
     Xunit
-
 from yt.utilities.command_line import get_yt_version
 
 from yt.mods import *
@@ -49,6 +49,7 @@
     return mapping
 
 if __name__ == "__main__":
+    clear_registry()
     mapping = find_and_initialize_tests()
     test_storage_directory = ytcfg.get("yt","test_storage_dir")
     try:
@@ -73,12 +74,17 @@
     parser.add_option("-n", "--name", dest="this_name",
                       default=my_hash,
                       help = "The name we'll call this set of tests")
-    parser.add_option("", "--parallel", dest="parallel",
-                      default=False,
-                      help = "Run in parallel?")
     opts, args = parser.parse_args()
     if opts.list_tests:
-        print "\n    ".join(sorted(itertools.chain(*mapping.values())))
+        tests_to_run = []
+        for m, vals in mapping.items():
+            new_tests = fnmatch.filter(vals, opts.test_pattern)
+            if len(new_tests) == 0: continue
+            load_tests(m, cwd)
+            keys = set(registry_entries())
+            tests_to_run += [t for t in new_tests if t in keys]
+        tests = list(set(tests_to_run))
+        print "\n    ".join(tests)
         sys.exit(0)
     pf = load(opts.parameter_file)
     if pf is None:
@@ -100,8 +106,9 @@
     for m, vals in mapping.items():
         new_tests = fnmatch.filter(vals, opts.test_pattern)
         if len(new_tests) == 0: continue
-        tests_to_run += new_tests
         load_tests(m, cwd)
+        keys = set(registry_entries())
+        tests_to_run += [t for t in new_tests if t in keys]
     for test_name in sorted(tests_to_run):
         rtr.run_test(test_name)
     if watcher is not None:


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea tests/volume_rendering.py
--- a/tests/volume_rendering.py
+++ b/tests/volume_rendering.py
@@ -33,8 +33,4 @@
     def compare(self, old_result):
         if not na.all(self.result==old_result):
             raise VolumeRenderingInconsistent()
-        else:
-            pass
 
-        # self.compare_array_delta(old_result, self.result, 0.0)
-


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -426,7 +426,7 @@
         """
         if self.max_dens_point is not None:
             return self.max_dens_point[0]
-        max = self._mpi_allmax(self._max_dens[self.id][0])
+        max = self._mpi_allreduce(self._max_dens[self.id][0], op='max')
         return max
 
     def maximum_density_location(self):
@@ -450,7 +450,7 @@
         else:
             value = na.array([0,0,0])
         # This works, and isn't appropriate but for now will be fine...
-        value = self._mpi_allsum(value)
+        value = self._mpi_allreduce(value, op='sum')
         return value
 
     def center_of_mass(self):
@@ -479,8 +479,8 @@
         else:
             my_mass = 0.
             my_com = na.array([0.,0.,0.])
-        global_mass = self._mpi_allsum(my_mass)
-        global_com = self._mpi_allsum(my_com)
+        global_mass = self._mpi_allreduce(my_mass, op='sum')
+        global_com = self._mpi_allreduce(my_com, op='sum')
         return global_com / global_mass
 
     def total_mass(self):
@@ -499,7 +499,7 @@
             my_mass = self["ParticleMassMsun"].sum()
         else:
             my_mass = 0.
-        global_mass = self._mpi_allsum(float(my_mass))
+        global_mass = self._mpi_allreduce(float(my_mass), op='sum')
         return global_mass
 
     def bulk_velocity(self):
@@ -528,7 +528,7 @@
             vy = 0.
             vz = 0.
         bv = na.array([vx,vy,vz,pm])
-        global_bv = self._mpi_allsum(bv)
+        global_bv = self._mpi_allreduce(bv, op='sum')
         return global_bv[:3]/global_bv[3]
 
     def rms_velocity(self):
@@ -558,7 +558,7 @@
             ss = na.array([s, float(size)])
         else:
             ss = na.array([0.,0.])
-        global_ss = self._mpi_allsum(ss)
+        global_ss = self._mpi_allreduce(ss, op='sum')
         ms = global_ss[0] / global_ss[1]
         return na.sqrt(ms) * global_ss[1]
 
@@ -598,7 +598,7 @@
             
         else:
             my_max = 0.
-        return self._mpi_allmax(my_max)
+        return self._mpi_allreduce(my_max, op='max')
 
     def get_size(self):
         if self.size is not None:
@@ -607,7 +607,7 @@
             my_size = self.indices.size
         else:
             my_size = 0
-        global_size = self._mpi_allsum(my_size)
+        global_size = self._mpi_allreduce(my_size, op='sum')
         return global_size
 
     def __getitem__(self, key):
@@ -736,8 +736,8 @@
             dist_max = 0.0
         # In this parallel case, we're going to find the global dist extrema
         # and built identical bins on all tasks.
-        dist_min = self._mpi_allmin(dist_min)
-        dist_max = self._mpi_allmax(dist_max)
+        dist_min = self._mpi_allreduce(dist_min, op='min')
+        dist_max = self._mpi_allreduce(dist_max, op='max')
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
         self.radial_bins = na.logspace(math.log10(dist_min*.99 + TINY), 
@@ -752,7 +752,7 @@
             for i in xrange(self.bin_count):
                 self.mass_bins[i+1] += self.mass_bins[i]
         # Sum up the mass_bins globally
-        self.mass_bins = self._mpi_Allsum_double(self.mass_bins)
+        self.mass_bins = self._mpi_allreduce(self.mass_bins, op='sum')
         # Calculate the over densities in the bins.
         self.overdensity = self.mass_bins * Msun2g / \
         (4./3. * math.pi * rho_crit * \
@@ -1479,7 +1479,7 @@
             del diff_subchain
         # Bring it together, and divide by the previously computed total mass
         # of each halo.
-        self.bulk_vel = self._mpi_Allsum_double(self.bulk_vel)
+        self.bulk_vel = self._mpi_allreduce(self.bulk_vel, op='sum')
         for groupID in xrange(self.group_count):
             self.bulk_vel[groupID] = self.bulk_vel[groupID] / self.Tot_M[groupID]
         yt_counters("bulk vel. computing")
@@ -1501,7 +1501,7 @@
                 rms_vel_temp[u][1] = marks[i+1] - marks[i]
             del vel, marks, uniq_subchain
         # Bring it together.
-        rms_vel_temp = self._mpi_Allsum_double(rms_vel_temp)
+        rms_vel_temp = self._mpi_allreduce(rms_vel_temp, op='sum')
         self.rms_vel = na.empty(self.group_count, dtype='float64')
         for groupID in xrange(self.group_count):
             # Here we do the Mean and the Root.
@@ -1855,20 +1855,20 @@
         # analyzing a subvolume.
         ds_names = ["particle_position_x","particle_position_y","particle_position_z"]
         if ytcfg.getboolean("yt","inline") == False and \
-            resize and self._mpi_get_size() != 1 and subvolume is None:
-            random.seed(self._mpi_get_rank())
+            resize and self._par_size != 1 and subvolume is None:
+            random.seed(self._par_rank)
             cut_list = self._partition_hierarchy_3d_bisection_list()
             root_points = self._subsample_points()
             self.bucket_bounds = []
-            if self._mpi_get_rank() == 0:
+            if self._par_rank == 0:
                 self._recursive_divide(root_points, topbounds, 0, cut_list)
             self.bucket_bounds = self._mpi_bcast_pickled(self.bucket_bounds)
-            my_bounds = self.bucket_bounds[self._mpi_get_rank()]
+            my_bounds = self.bucket_bounds[self._par_rank]
             LE, RE = my_bounds[0], my_bounds[1]
             self._data_source = self.hierarchy.region_strict([0.]*3, LE, RE)
         # If this isn't parallel, define the region as an AMRRegionStrict so
         # particle IO works.
-        if self._mpi_get_size() == 1:
+        if self._par_size == 1:
             self._data_source = self.hierarchy.periodic_region_strict([0.5]*3, LE, RE)
         # get the average spacing between particles for this region
         # The except is for the serial case, where the full box is what we want.
@@ -1934,10 +1934,8 @@
                 (str(self.padding), avg_spacing, full_vol, data.size, str(self._data_source)))
         # Now we get the full box mass after we have the final composition of
         # subvolumes.
-        if ytcfg.getboolean("yt","inline") == False:
-            total_mass = self._mpi_allsum((self._data_source["ParticleMassMsun"].astype('float64')).sum())
-        else:
-            total_mass = self._mpi_allsum((self._data_source["ParticleMassMsun"].astype('float64')).sum())
+        total_mass = self._mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(), 
+                                         op='sum')
         if not self._distributed:
             self.padding = (na.zeros(3,dtype='float64'), na.zeros(3,dtype='float64'))
         # If we're using a subvolume, we now re-divide.
@@ -1959,13 +1957,13 @@
         # Read in a random subset of the points in each domain, and then
         # collect them on the root task.
         xp = self._data_source["particle_position_x"]
-        n_parts = self._mpi_allsum(xp.size)
+        n_parts = self._mpi_allreduce(xp.size, op='sum')
         local_parts = xp.size
         random_points = int(self.sample * n_parts)
         # We want to get a representative selection of random particles in
         # each subvolume.
-        adjust = float(local_parts) / ( float(n_parts) / self._mpi_get_size())
-        n_random = int(adjust * float(random_points) / self._mpi_get_size())
+        adjust = float(local_parts) / ( float(n_parts) / self._par_size)
+        n_random = int(adjust * float(random_points) / self._par_size)
         mylog.info("Reading in %d random particles." % n_random)
         # Get unique random particles.
         my_points = na.empty((n_random, 3), dtype='float64')
@@ -1988,7 +1986,7 @@
         else:
             root_points = na.empty([])
         my_points.shape = (1, n_random*3)
-        root_points = self._mpi_concatenate_array_on_root_double(my_points[0])
+        root_points = self._mpi_catarray(my_points[0])
         del my_points
         if mine == 0:
             root_points.shape = (tot_random, 3)
@@ -2112,9 +2110,9 @@
         if dm_only:
             select = self._get_dm_indices()
             total_mass = \
-                self._mpi_allsum((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'))
+                self._mpi_allreduce((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
         else:
-            total_mass = self._mpi_allsum(self._data_source["ParticleMassMsun"].sum(dtype='float64'))
+            total_mass = self._mpi_allreduce(self._data_source["ParticleMassMsun"].sum(dtype='float64'), op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles
         # are all on different processors, we should instead construct an
         # object representing the entire domain and sum it "lazily" with
@@ -2194,7 +2192,7 @@
             self._partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         if link > 0.0:
-            n_parts = self._mpi_allsum(self._data_source["particle_position_x"].size)
+            n_parts = self._mpi_allreduce(self._data_source["particle_position_x"].size, op='sum')
             # get the average spacing between particles
             #l = pf.domain_right_edge - pf.domain_left_edge
             #vol = l[0] * l[1] * l[2]


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -284,17 +284,17 @@
         yt_counters("MPI stuff.")
         hooks = []
         for opp_neighbor in self.neighbors:
-            hooks.append(self._mpi_Irecv_long(recv_real_indices[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_Irecv_double(recv_points[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_Irecv_double(recv_mass[opp_neighbor], opp_neighbor))
+            hooks.append(self._mpi_nonblocking_recv(recv_real_indices[opp_neighbor], opp_neighbor))
+            hooks.append(self._mpi_nonblocking_recv(recv_points[opp_neighbor], opp_neighbor))
+            hooks.append(self._mpi_nonblocking_recv(recv_mass[opp_neighbor], opp_neighbor))
         # Let's wait here to be absolutely sure that all the receive buffers
         # have been created before any sending happens!
         self._barrier()
         # Now we send the data.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_Isend_long(send_real_indices[neighbor], neighbor))
-            hooks.append(self._mpi_Isend_double(send_points[neighbor], neighbor))
-            hooks.append(self._mpi_Isend_double(send_mass[neighbor], neighbor))
+            hooks.append(self._mpi_nonblocking_send(send_real_indices[neighbor], neighbor))
+            hooks.append(self._mpi_nonblocking_send(send_points[neighbor], neighbor))
+            hooks.append(self._mpi_nonblocking_send(send_mass[neighbor], neighbor))
         # Now we use the data, after all the comms are done.
         self._mpi_Request_Waitall(hooks)
         yt_counters("MPI stuff.")
@@ -683,8 +683,8 @@
         # Shift the values over effectively by concatenating them in the same
         # order as the values have been shifted in _globally_assign_chainIDs()
         yt_counters("global chain MPI stuff.")
-        self.densest_in_chain = self._mpi_concatenate_array_double(self.densest_in_chain)
-        self.densest_in_chain_real_index = self._mpi_concatenate_array_long(self.densest_in_chain_real_index)
+        self.densest_in_chain = self._mpi_catarray(self.densest_in_chain)
+        self.densest_in_chain_real_index = self._mpi_catarray(self.densest_in_chain_real_index)
         yt_counters("global chain MPI stuff.")
         # Sort the chains by density here. This is an attempt to make it such
         # that the merging stuff in a few steps happens in the same order
@@ -774,14 +774,14 @@
         # Set up the receives, but don't actually use them.
         hooks = []
         for opp_neighbor in self.neighbors:
-            hooks.append(self._mpi_Irecv_long(temp_indices[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_Irecv_long(temp_chainIDs[opp_neighbor], opp_neighbor))
+            hooks.append(self._mpi_nonblocking_recv(temp_indices[opp_neighbor], opp_neighbor))
+            hooks.append(self._mpi_nonblocking_recv(temp_chainIDs[opp_neighbor], opp_neighbor))
         # Make sure all the receive buffers are set before continuing.
         self._barrier()
         # Send padded particles to our neighbors.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_Isend_long(self.uphill_real_indices, neighbor))
-            hooks.append(self._mpi_Isend_long(self.uphill_chainIDs, neighbor))
+            hooks.append(self._mpi_nonblocking_send(self.uphill_real_indices, neighbor))
+            hooks.append(self._mpi_nonblocking_send(self.uphill_chainIDs, neighbor))
         # Now actually use the data once it's good to go.
         self._mpi_Request_Waitall(hooks)
         self.__max_memory()
@@ -878,7 +878,7 @@
         # it. Therefore each key (a chain) in this dict is unique, but the items
         # the keys point to are not necessarily unique.
         chainID_translate_map_global = \
-            self._mpi_minimum_array_long(chainID_translate_map_local)
+            self._mpi_allreduce(chainID_translate_map_local, op='min')
         # Loop over chains, smallest to largest density, recursively until
         # we reach a self-assigned chain. Then we assign that final chainID to
         # the *current* one only.
@@ -943,14 +943,14 @@
         # Set up the receving hooks.
         hooks = []
         for opp_neighbor in self.neighbors:
-            hooks.append(self._mpi_Irecv_long(recv_real_indices[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_Irecv_long(recv_chainIDs[opp_neighbor], opp_neighbor))
+            hooks.append(self._mpi_nonblocking_recv(recv_real_indices[opp_neighbor], opp_neighbor))
+            hooks.append(self._mpi_nonblocking_recv(recv_chainIDs[opp_neighbor], opp_neighbor))
         # Make sure the recv buffers are set before continuing.
         self._barrier()
         # Now we send them.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_Isend_long(real_indices, neighbor))
-            hooks.append(self._mpi_Isend_long(chainIDs, neighbor))
+            hooks.append(self._mpi_nonblocking_send(real_indices, neighbor))
+            hooks.append(self._mpi_nonblocking_send(chainIDs, neighbor))
         # Now we use them when they're nice and ripe.
         self._mpi_Request_Waitall(hooks)
         self.__max_memory()
@@ -1155,7 +1155,7 @@
         Set_list = []
         # We only want the holes that are modulo mine.
         keys = na.arange(groupID, dtype='int64')
-        size = self._mpi_get_size()
+        size = self._par_size
         select = (keys % size == self.mine)
         groupIDs = keys[select]
         mine_groupIDs = set([]) # Records only ones modulo mine.
@@ -1202,7 +1202,7 @@
         del Set_list
         # To bring it all together, find the minimum values at each entry
         # globally.
-        lookup = self._mpi_minimum_array_long(lookup)
+        lookup = self._mpi_allreduce(lookup, op='min')
         # Now apply this to reverse_map
         for chainID,groupID in enumerate(self.reverse_map):
             if groupID == -1:
@@ -1330,7 +1330,7 @@
         # Now we broadcast this, effectively, with an allsum. Even though
         # some groups are on multiple tasks, there is only one densest_in_chain
         # and only that task contributed above.
-        self.max_dens_point = self._mpi_Allsum_double(max_dens_point)
+        self.max_dens_point = self._mpi_allreduce(max_dens_point, op='sum')
         del max_dens_point
         yt_counters("max dens point")
         # Now CoM.
@@ -1385,9 +1385,9 @@
                     CoM_M[groupID] += self.max_dens_point[groupID,1:4] - na.array([0.5,0.5,0.5])
                     CoM_M[groupID] *= Tot_M[groupID]
         # Now we find their global values
-        self.group_sizes = self._mpi_Allsum_long(size)
-        CoM_M = self._mpi_Allsum_double(CoM_M)
-        self.Tot_M = self._mpi_Allsum_double(Tot_M)
+        self.group_sizes = self._mpi_allreduce(size, op='sum')
+        CoM_M = self._mpi_allreduce(CoM_M, op='sum')
+        self.Tot_M = self._mpi_allreduce(Tot_M, op='sum')
         self.CoM = na.empty((self.group_count,3), dtype='float64')
         for groupID in xrange(int(self.group_count)):
             self.CoM[groupID] = CoM_M[groupID] / self.Tot_M[groupID]
@@ -1405,7 +1405,7 @@
                 max_radius[u] = na.max(dist[marks[i]:marks[i+1]])
         # Find the maximum across all tasks.
         mylog.info('Fraction of particles in this region in groups: %f' % (float(calc)/self.size))
-        self.max_radius = self._mpi_double_array_max(max_radius)
+        self.max_radius = self._mpi_allreduce(max_radius, op='max')
         self.max_radius = na.sqrt(self.max_radius)
         yt_counters("max radius")
         yt_counters("Precomp.")


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -168,10 +168,10 @@
         if self.sleep <= 0.:
             self.sleep = 5
         # MPI stuff
-        self.mine = self._mpi_get_rank()
+        self.mine = self._par_rank
         if self.mine is None:
             self.mine = 0
-        self.size = self._mpi_get_size()
+        self.size = self._par_size
         if self.size is None:
             self.size = 1
         # Get to work.
@@ -548,11 +548,11 @@
         child_IDs_tosend = child_IDs[child_send]
         child_halos_tosend = child_halos[child_send]
         
-        parent_IDs_tosend = self._mpi_concatenate_array_on_root_long(parent_IDs_tosend)
-        parent_masses_tosend = self._mpi_concatenate_array_on_root_double(parent_masses_tosend)
-        parent_halos_tosend = self._mpi_concatenate_array_on_root_int(parent_halos_tosend)
-        child_IDs_tosend = self._mpi_concatenate_array_on_root_long(child_IDs_tosend)
-        child_halos_tosend = self._mpi_concatenate_array_on_root_int(child_halos_tosend)
+        parent_IDs_tosend = self._mpi_catarray(parent_IDs_tosend)
+        parent_masses_tosend = self._mpi_catarray(parent_masses_tosend)
+        parent_halos_tosend = self._mpi_catarray(parent_halos_tosend)
+        child_IDs_tosend = self._mpi_catarray(child_IDs_tosend)
+        child_halos_tosend = self._mpi_catarray(child_halos_tosend)
 
         # Resort the received particles.
         Psort = parent_IDs_tosend.argsort()
@@ -599,7 +599,7 @@
             (matched, parent_IDs_tosend.size, child_IDs_tosend.size))
 
         # Now we sum up the contributions globally.
-        self.child_mass_arr = self._mpi_Allsum_double(self.child_mass_arr)
+        self.child_mass_arr = self._mpi_allsum(self.child_mass_arr)
         
         # Turn these Msol masses into percentages of the parent.
         line = "SELECT HaloMass FROM Halos WHERE SnapCurrentTimeIdentifier=%d \


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/analysis_modules/halo_profiler/api.py
--- a/yt/analysis_modules/halo_profiler/api.py
+++ b/yt/analysis_modules/halo_profiler/api.py
@@ -34,4 +34,5 @@
 from .multi_halo_profiler import \
     HaloProfiler, \
     FakeProfile, \
-    shift_projections
+    shift_projections, \
+    standard_fields


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -582,8 +582,14 @@
             except EmptyProfileData:
                 mylog.error("Caught EmptyProfileData exception, returning None for this halo.")
                 return None
+            # Figure out which fields to add simultaneously
+            field_groupings = defaultdict(lambda: defaultdict(list))
             for hp in self.profile_fields:
-                profile.add_fields(hp['field'], weight=hp['weight_field'], accumulation=hp['accumulation'])
+                field_groupings[hp['weight_field']][hp['accumulation']].append(hp['field'])
+            for weight_field in field_groupings:
+                for accum, fields in field_groupings[weight_field].items():
+                    profile.add_fields(fields, weight=weight_field,
+                                       accumulation=accum)
 
         if virial_filter:
             self._add_actual_overdensity(profile)
@@ -1080,3 +1086,34 @@
 
     def keys(self):
         return self._data.keys()
+
+standard_fields = [
+    ("Density", "CellMassMsun", False),
+    ("Temperature", "CellMassMsun", False),
+    ("VelocityMagnitude", "CellMassMsun", False),
+    ("Ones", None, False),
+    ("Entropy", "CellMassMsun", False),
+    ("RadialVelocity", "CellMassMsun", False),
+    ("SpecificAngularMomentumX", "CellMassMsun", False),
+    ("SpecificAngularMomentumY", "CellMassMsun", False),
+    ("SpecificAngularMomentumZ", "CellMassMsun", False),
+    ("CoolingTime", "CellMassMsun", False),
+    ("DynamicalTime", "CellMassMsun", False),
+    ("CellMassMsun", None, True),
+    ("TotalMassMsun", None, True),
+    ("Dark_Matter_Density", "CellMassMsun", False),
+    #("ParticleSpecificAngularMomentumX", "ParticleMassMsun"),
+    #("ParticleSpecificAngularMomentumY", "ParticleMassMsun"),
+    #("ParticleSpecificAngularMomentumZ", "ParticleMassMsun"),
+    ("OverDensity", "CellMassMsun", False),
+    #("ParticleMassMsun", None),
+    ("StarParticleDensity", "StarParticleMassMsun", False), # How do we weight this?
+    #("StarParticleMassMsun", None), 
+    ("StarParticleDensity", "StarParticleMassMsun", False), # How do we weight this?
+]
+
+standard_fields += [("%s_Fraction" % (s), "CellMassMsun", False)
+    for s in ["HI","HII","HeI","HeII","HeIII","H2I","H2II",
+    "HM","Electron", "DI","DII","HDI","Metal"]
+]
+


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/analysis_modules/halo_profiler/standard_analysis.py
--- a/yt/analysis_modules/halo_profiler/standard_analysis.py
+++ b/yt/analysis_modules/halo_profiler/standard_analysis.py
@@ -28,37 +28,6 @@
 from yt.data_objects.profiles import BinnedProfile1D
 from yt.funcs import *
 
-analysis_field_list = [
-    "Density",
-    "Temperature",
-    "VelocityMagnitude",
-    ("Ones", None),
-    "Entropy",
-    "RadialVelocity",
-    "SpecificAngularMomnetumX",
-    "SpecificAngularMomnetumY",
-    "SpecificAngularMomnetumZ",
-    "CoolingTime",
-    "DynamicalTime",
-    ("CellMassMsun", None),
-    "Dark_Matter_Density",
-    #("ParticleSpecificAngularMomentumX", "ParticleMassMsun"),
-    #("ParticleSpecificAngularMomentumY", "ParticleMassMsun"),
-    #("ParticleSpecificAngularMomentumZ", "ParticleMassMsun"),
-    ("TotalMass", None),
-    "OverDensity",
-    #("ParticleMassMsun", None),
-    ("StarParticleDensity", "StarParticleMassMsun"), # How do we weight this?
-    #("StarParticleMassMsun", None), 
-    ("StarParticleDensity", "StarParticleMassMsun"), # How do we weight this?
-]
-
-analysis_field_list += ["%s_Fraction" % (s) for s in
-    ["HI","HII","HeI","HeII","HeIII","H2I","H2II","HM","Electron",
-    "DI","DII","HDI","Metal"]
-]
-    
-
 class StandardRadialAnalysis(object):
     def __init__(self, pf, center, radius, n_bins = 128, inner_radius = None):
         self.pf = pf


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -107,8 +107,8 @@
         self.constant_theta = theta
         self.constant_phi = phi
         # MPI stuff.
-        self.size = self._mpi_get_size()
-        self.mine = self._mpi_get_rank()
+        self.size = self._par_size
+        self.mine = self._par_rank
         self.vol_ratio = vol_ratio
         if self.vol_ratio == -1:
             self.vol_ratio = self.size
@@ -363,7 +363,7 @@
         for task in xrange(self.size):
             if task == self.mine: continue
             self.recv_done[task] = na.zeros(1, dtype='int64')
-            self.done_hooks.append(self._mpi_Irecv_long(self.recv_done[task], \
+            self.done_hooks.append(self._mpi_nonblocking_recv(self.recv_done[task], \
                 task, tag=15))
     
     def _send_done_to_root(self):
@@ -376,7 +376,7 @@
             # I send when I *think* things should finish.
             self.send_done = na.ones(1, dtype='int64') * \
                 (self.size / self.vol_ratio -1) + self.comm_cycle_count
-            self.done_hooks.append(self._mpi_Isend_long(self.send_done, \
+            self.done_hooks.append(self._mpi_nonblocking_send(self.send_done, \
                     0, tag=15))
         else:
             # As root, I need to mark myself!
@@ -418,22 +418,22 @@
         self.recv_fields_vals = na.zeros((self.comm_size, len(self.fields)*2), \
             dtype='float64')
         self.recv_gen_array = na.zeros(self.size, dtype='int64')
-        self.recv_hooks.append(self._mpi_Irecv_double(self.recv_points, \
+        self.recv_hooks.append(self._mpi_nonblocking_recv(self.recv_points, \
             (self.mine-1)%self.size, tag=10))
-        self.recv_hooks.append(self._mpi_Irecv_double(self.recv_fields_vals, \
+        self.recv_hooks.append(self._mpi_nonblocking_recv(self.recv_fields_vals, \
             (self.mine-1)%self.size, tag=20))
-        self.recv_hooks.append(self._mpi_Irecv_long(self.recv_gen_array, \
+        self.recv_hooks.append(self._mpi_nonblocking_recv(self.recv_gen_array, \
             (self.mine-1)%self.size, tag=40))
 
     def _send_arrays(self):
         """
         Send the data arrays to the right-hand neighbor.
         """
-        self.send_hooks.append(self._mpi_Isend_double(self.points,\
+        self.send_hooks.append(self._mpi_nonblocking_send(self.points,\
             (self.mine+1)%self.size, tag=10))
-        self.send_hooks.append(self._mpi_Isend_double(self.fields_vals,\
+        self.send_hooks.append(self._mpi_nonblocking_send(self.fields_vals,\
             (self.mine+1)%self.size, tag=20))
-        self.send_hooks.append(self._mpi_Isend_long(self.gen_array, \
+        self.send_hooks.append(self._mpi_nonblocking_send(self.gen_array, \
             (self.mine+1)%self.size, tag=40))
 
     def _allsum_bin_hits(self):
@@ -441,8 +441,8 @@
         Add up the hits to all the bins globally for all functions.
         """
         for fset in self._fsets:
-            fset.too_low = self._mpi_allsum(fset.too_low)
-            fset.too_high = self._mpi_allsum(fset.too_high)
+            fset.too_low = self._mpi_allreduce(fset.too_low, op='sum')
+            fset.too_high = self._mpi_allreduce(fset.too_high, op='sum')
             fset.binned = {}
             if self.mine == 0:
                 mylog.info("Function %s had values out of range for these fields:" % \
@@ -452,7 +452,7 @@
                     (field, fset.too_high[i], fset.too_low[i]))
             for length in self.lengths:
                 fset.length_bin_hits[length] = \
-                    self._mpi_Allsum_long(fset.length_bin_hits[length])
+                    self._mpi_allreduce(fset.length_bin_hits[length], op='sum')
                 # Find out how many were successfully binned.
                 fset.binned[length] = fset.length_bin_hits[length].sum()
                 # Normalize the counts.


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1432,8 +1432,8 @@
             self[field] = na.zeros(_size, dtype='float64')
             for grid in self._get_grids():
                 self._get_data_from_grid(grid, field)
-            self[field] = self._mpi_allsum(\
-                self[field]).reshape([self.dims]*2).transpose()
+            self[field] = self._mpi_allreduce(\
+                self[field], op='sum').reshape([self.dims]*2).transpose()
 
     def interpolate_discretize(self, *args, **kwargs):
         pass
@@ -2221,7 +2221,7 @@
             self._get_data_from_grid(grid, fields_to_get, dls)
         mylog.info("IO completed; summing")
         for field in fields_to_get:
-            self[field] = self._mpi_Allsum_double(self[field])
+            self[field] = self._mpi_allreduce(self[field], op='sum')
             conv = self.pf.units[self.pf.field_info[field].projection_conversion]
             self[field] *= conv
 


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -466,5 +466,5 @@
     def __call__(self, data):
         # We need to make sure that it's an actual AMR grid
         if isinstance(data, FieldDetector): return True
-        if data._type_name == 'grid': return True
+        if getattr(data, "_type_name", None) == 'grid': return True
         raise NeedsOriginalGrid()


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -119,10 +119,10 @@
 
     def _finalize_parallel(self):
         for key in self.__data:
-            self.__data[key] = self._mpi_allsum(self.__data[key])
+            self.__data[key] = self._mpi_allreduce(self.__data[key], op='sum')
         for key in self.__weight_data:
-            self.__weight_data[key] = self._mpi_allsum(self.__weight_data[key])
-        self.__used = self._mpi_allsum(self.__used)
+            self.__weight_data[key] = self._mpi_allreduce(self.__weight_data[key], op='sum')
+        self.__used = self._mpi_allreduce(self.__used, op='sum')
 
     def _unlazy_add_fields(self, fields, weight, accumulation):
         for field in fields:


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -384,7 +384,7 @@
     def _detect_fields(self):
         self.field_list = []
         # Do this only on the root processor to save disk work.
-        if self._mpi_get_rank() == 0 or self._mpi_get_rank() == None:
+        if self._par_rank == 0 or self._par_rank == None:
             field_list = self.get_data("/", "DataFields")
             if field_list is None:
                 mylog.info("Gathering a field list (this may take a moment.)")
@@ -589,7 +589,7 @@
             self.derived_field_list = self.__class__._cached_derived_field_list
 
     def _generate_random_grids(self):
-        my_rank = self._mpi_get_rank()
+        my_rank = self._par_rank
         my_grids = self.grids[self.grid_procs.ravel() == my_rank]
         if len(my_grids) > 40:
             starter = na.random.randint(0, 20)


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/utilities/answer_testing/api.py
--- a/yt/utilities/answer_testing/api.py
+++ b/yt/utilities/answer_testing/api.py
@@ -45,3 +45,8 @@
 
 from .xunit import \
     Xunit
+
+from .halo_tests import \
+    TestHaloCompositionHashHOP, \
+    TestHaloCompositionHashFOF, \
+    TestHaloCompositionHashPHOP


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/utilities/answer_testing/halo_tests.py
--- a/yt/utilities/answer_testing/halo_tests.py
+++ b/yt/utilities/answer_testing/halo_tests.py
@@ -1,16 +1,20 @@
 from yt.mods import *
-import matplotlib; matplotlib.use("Agg")
+import matplotlib
 import pylab
 from output_tests import SingleOutputTest, YTStaticOutputTest, create_test
+from yt.analysis_modules.halo_finding.api import *
+import hashlib
+import numpy as np
 
-class TestHaloCount(YTStaticOutputTest):
+# Tests the number of halos returned by the HOP halo finder on a dataset
+class TestHaloCountHOP(YTStaticOutputTest):
     threshold = 80.0
 
     def run(self):
-        # Find the haloes using vanilla HOP.
-        haloes = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
-        # We only care about the number of haloes.
-        self.result = len(haloes)
+        # Find the halos using vanilla HOP.
+        halos = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
+        # We only care about the number of halos.
+        self.result = len(halos)
                     
     def compare(self, old_result):
         # The new value should be identical to the old one.
@@ -19,18 +23,53 @@
     def plot(self):
         return []
 
-create_test(TestHaloCount, "halo_count_test", threshold=80.0)
+# Tests the number of halos returned by the FOF halo finder on a dataset
+class TestHaloCountFOF(YTStaticOutputTest):
+    link = 0.2
+    padding = 0.02
+
+    def run(self):
+        # Find the halos using FOF.
+        halos = FOFHaloFinder(self.pf, link=self.link, dm_only=False, 
+                               padding=self.padding)
+        # We only care about the number of halos.
+        self.result = len(halos)
+                    
+    def compare(self, old_result):
+        # The new value should be identical to the old one.
+        self.compare_value_delta(self.result, old_result, 0)
+
+    def plot(self):
+        return []
+
+# Tests the number of halos returned by the Parallel HOP halo finder on a 
+# dataset
+class TestHaloCountPHOP(YTStaticOutputTest):
+    threshold = 80.0
+
+    def run(self):
+        # Find the halos using parallel HOP.
+        halos = parallelHF(self.pf, threshold=self.threshold, dm_only=False)
+        # We only care about the number of halos.
+        self.result = len(halos)
+                    
+    def compare(self, old_result):
+        # The new value should be identical to the old one.
+        self.compare_value_delta(self.result, old_result, 0)
+
+    def plot(self):
+        return []
 
 class TestHaloComposition(YTStaticOutputTest):
     threshold=80.0
     
     def run(self):
-        # Find the haloes using vanilla HOP.
-        haloes = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
+        # Find the halos using vanilla HOP.
+        halos = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
         # The result is a list of the particle IDs, stored
         # as sets for easy comparison.
         IDs = []
-        for halo in haloes:
+        for halo in halos:
             IDs.append(set(halo["particle_index"]))
         self.result = IDs
     
@@ -42,7 +81,85 @@
                 return False
         return True
     
-    def plot(self):
-        return []
+# Tests the content of the halos returned by the HOP halo finder on a dataset 
+# by comparing the hash of the arrays of all the particles contained in each
+# halo.  Evidently breaks on parallel runtime.  DO NOT USE.
+class TestHaloCompositionHashHOP(YTStaticOutputTest):
+    threshold=80.0
+    
+    def run(self):
+        # Find the halos using vanilla HOP.
+        halos = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
+        # The result is a flattened array of the arrays of the particle IDs for
+        # each halo
+        IDs = []
+        for halo in halos:
+            IDs.append(halo["particle_index"])
+        IDs = np.concatenate(IDs)
+        self.result = IDs
+    
+    def compare(self, old_result):
+        # All the lists of arrays should be identical.  To check this
+        # faster, we take the 256-bit hash of these lists and compare them
+        result_hash = hashlib.sha256(self.result.tostring()).hexdigest()
+        old_result_hash = hashlib.sha256(old_result.tostring()).hexdigest()
+        if result_hash == old_result_hash:
+            return True
+        else:
+            return False
 
-create_test(TestHaloComposition, "halo_composition_test", threshold=80.0)
+# Tests the content of the halos returned by the FOF halo finder on a dataset 
+# by comparing the hash of the arrays of all the particles contained in each
+# halo.  Evidently breaks on parallel runtime.  DO NOT USE.
+class TestHaloCompositionHashFOF(YTStaticOutputTest):
+    link = 0.2
+    padding = 0.02
+    
+    def run(self):
+        # Find the halos using vanilla FOF.
+        halos = FOFHaloFinder(self.pf, link=self.link, dm_only=False, 
+                               padding=self.padding)
+        # The result is a flattened array of the arrays of the particle IDs for
+        # each halo
+        IDs = []
+        for halo in halos:
+            IDs.append(halo["particle_index"])
+        IDs = np.concatenate(IDs)
+        self.result = IDs
+    
+    def compare(self, old_result):
+        # All the lists of arrays should be identical.  To check this
+        # faster, we take the 256-bit hash of these lists and compare them
+        result_hash = hashlib.sha256(self.result.tostring()).hexdigest()
+        old_result_hash = hashlib.sha256(old_result.tostring()).hexdigest()
+        if result_hash == old_result_hash:
+            return True
+        else:
+            return False
+
+# Tests the content of the halos returned by the Parallel HOP halo finder on a 
+# dataset by comparing the hash of the arrays of all the particles contained 
+# in each halo.  Evidently breaks on parallel runtime.  DO NOT USE.
+class TestHaloCompositionHashPHOP(YTStaticOutputTest):
+    threshold=80.0
+    
+    def run(self):
+        # Find the halos using parallel HOP.
+        halos = parallelHF(self.pf, threshold=self.threshold, dm_only=False)
+        # The result is a flattened array of the arrays of the particle IDs for
+        # each halo
+        IDs = []
+        for halo in halos:
+            IDs.append(halo["particle_index"])
+        IDs = np.concatenate(IDs)
+        self.result = IDs
+    
+    def compare(self, old_result):
+        # All the lists of arrays should be identical.  To check this
+        # faster, we take the 256-bit hash of these lists and compare them
+        result_hash = hashlib.sha256(self.result.tostring()).hexdigest()
+        old_result_hash = hashlib.sha256(old_result.tostring()).hexdigest()
+        if result_hash == old_result_hash:
+            return True
+        else:
+            return False


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/utilities/answer_testing/hydro_tests.py
--- a/yt/utilities/answer_testing/hydro_tests.py
+++ b/yt/utilities/answer_testing/hydro_tests.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import matplotlib; matplotlib.use("Agg")
+import matplotlib
 import pylab
 from yt.mods import *
 from output_tests import SingleOutputTest, YTStaticOutputTest, create_test
@@ -94,6 +94,34 @@
         write_image(self.result, fn)
         return [fn]
 
+class TestRay(YTStaticOutputTest):
+
+    field = None
+
+    def run(self):
+        na.random.seed(4333)
+        start_point = na.random.random(self.pf.dimensionality) * \
+            (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
+            self.pf.domain_left_edge
+        end_point   = na.random.random(self.pf.dimensionality) * \
+            (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
+            self.pf.domain_left_edge
+
+        # Here proj will just be the data array.
+        ray = self.pf.h.ray(start_point, end_point, field=self.field)
+
+        # values.
+        self.result = ray[self.field]
+
+    def compare(self, old_result):
+        ray  = self.result
+        oray = old_result
+
+        self.compare_array_delta(ray, oray, 1e-7)
+
+    def plot(self):
+        return
+
 class TestSlice(YTStaticOutputTest):
 
     field = None


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/utilities/answer_testing/output_tests.py
--- a/yt/utilities/answer_testing/output_tests.py
+++ b/yt/utilities/answer_testing/output_tests.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import matplotlib
 from yt.mods import *
 
 # We first create our dictionary of tests to run.  This starts out empty, and


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/utilities/answer_testing/particle_tests.py
--- a/yt/utilities/answer_testing/particle_tests.py
+++ b/yt/utilities/answer_testing/particle_tests.py
@@ -1,5 +1,5 @@
+import matplotlib
 from yt.mods import *
-import matplotlib; matplotlib.use("Agg")
 import pylab
 from output_tests import SingleOutputTest, YTStaticOutputTest, create_test
 


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/utilities/answer_testing/runner.py
--- a/yt/utilities/answer_testing/runner.py
+++ b/yt/utilities/answer_testing/runner.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import matplotlib; matplotlib.use("Agg")
+import matplotlib
 import os, shelve, cPickle, sys, imp, tempfile
 
 from yt.config import ytcfg; ytcfg["yt","serialize"] = "False"
@@ -82,7 +82,7 @@
 class RegressionTestRunner(object):
     def __init__(self, results_id, compare_id = None,
                  results_path = ".", compare_results_path = ".",
-                 io_log = "OutputLog"):
+                 io_log = "OutputLog", plot_tests = False):
         # This test runner assumes it has been launched with the current
         # working directory that of the test case itself.
         self.io_log = io_log
@@ -95,6 +95,7 @@
         self.results = RegressionTestStorage(results_id, path=results_path)
         self.plot_list = {}
         self.passed_tests = {}
+        self.plot_tests = plot_tests
 
     def run_all_tests(self):
         plot_list = []
@@ -129,7 +130,8 @@
         print self.id, "Running", test.name,
         test.setup()
         test.run()
-        self.plot_list[test.name] = test.plot()
+        if self.plot_tests:
+            self.plot_list[test.name] = test.plot()
         self.results[test.name] = test.result
         success, msg = self._compare(test)
         if self.old_results is None:


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -643,16 +643,18 @@
                            virial_quantities=['TotalMassMsun','RadiusMpc'])
 
         # Add profile fields.
-        hp.add_profile('CellVolume',weight_field=None,accumulation=True)
-        hp.add_profile('TotalMassMsun',weight_field=None,accumulation=True)
-        hp.add_profile('Density',weight_field=None,accumulation=False)
-        hp.add_profile('Temperature',weight_field='CellMassMsun',accumulation=False)
+        pf = hp.pf
+        all_fields = pf.h.field_list + pf.h.derived_field_list
+        for field, wv, acc in HP.standard_fields:
+            if field not in all_fields: continue
+            hp.add_profile(field, wv, acc)
         hp.make_profiles(filename="FilteredQuantities.out")
 
         # Add projection fields.
         hp.add_projection('Density',weight_field=None)
         hp.add_projection('Temperature',weight_field='Density')
-        hp.add_projection('Metallicity',weight_field='Density')
+        if "Metallicity" in all_fields:
+            hp.add_projection('Metallicity',weight_field='Density')
 
         # Make projections for all three axes using the filtered halo list and
         # save data to hdf5 files.
@@ -1568,7 +1570,7 @@
             save_name = "%s"%pf+"_"+field+"_rendering.png"
         if not '.png' in save_name:
             save_name += '.png'
-        if cam._mpi_get_rank() != -1:
+        if cam._par_rank != -1:
             write_bitmap(image,save_name)
         
 


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/utilities/parallel_tools/distributed_object_collection.py
--- a/yt/utilities/parallel_tools/distributed_object_collection.py
+++ b/yt/utilities/parallel_tools/distributed_object_collection.py
@@ -53,8 +53,8 @@
         owners = self._object_owners[desired_indices]
         mylog.debug("Owner list: %s", na.unique1d(owners))
         # Even if we have a million bricks, this should not take long.
-        s = self._mpi_get_size()
-        m = self._mpi_get_rank()
+        s = self._par_size
+        m = self._par_rank
         requests = dict( ( (i, []) for i in xrange(s) ) )
         for i, p in izip(desired_indices, owners):
             requests[p].append(i)
@@ -91,18 +91,20 @@
                         size, p)
             proc_hooks[len(drecv_buffers)] = p
             drecv_buffers.append(self._create_buffer(requests[p]))
-            drecv_hooks.append(self._mpi_Irecv_double(drecv_buffers[-1], p, 1))
+            # does this work without specifying the type? (was double)
+            drecv_hooks.append(self._mpi_nonblocking_recv(drecv_buffers[-1], p, 1))
             recv_buffers.append(na.zeros(size, dtype='int64'))
             # Our index list goes on 0, our buffer goes on 1.  We know how big
             # the index list will be, now.
-            recv_hooks.append(self._mpi_Irecv_long(recv_buffers[-1], p, 0))
+            # does this work without specifying the type? (was long)
+            recv_hooks.append(self._mpi_nonblocking_recv(recv_buffers[-1], p, 0))
         # Send our index lists into hte waiting buffers
         mylog.debug("Sending index lists")
         for p, ind_list in requests.items():
             if p == m: continue
             if len(ind_list) == 0: continue
             # Now, we actually send our index lists.
-            send_hooks.append(self._mpi_Isend_long(ind_list, p, 0))
+            send_hooks.append(self._mpi_nonblocking_send(ind_list, p, 0))
         # Now we post receives for all of the data buffers.
         mylog.debug("Sending data")
         for i in self._mpi_Request_Waititer(recv_hooks):
@@ -113,8 +115,7 @@
             ind_list = recv_buffers[i]
             dsend_buffers.append(self._create_buffer(ind_list))
             self._pack_buffer(ind_list, dsend_buffers[-1])
-            dsend_hooks.append(self._mpi_Isend_double(
-                dsend_buffers[-1], p, 1))
+            dsend_hooks.append(self._mpi_nonblocking_send(dsend_buffers[-1], p, 1))
         mylog.debug("Waiting on data receives: %s", len(drecv_hooks))
         for i in self._mpi_Request_Waititer(drecv_hooks):
             mylog.debug("Unpacking from %s", proc_hooks[i])


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -82,6 +82,39 @@
 else:
     parallel_capable = False
 
+# Set up translation table
+if parallel_capable:
+    dtype_names = dict(
+            float32 = MPI.FLOAT,
+            float64 = MPI.DOUBLE,
+            int32   = MPI.INT,
+            int64   = MPI.LONG
+    )
+    op_names = dict(
+        sum = MPI.SUM,
+        min = MPI.MIN,
+        max = MPI.MAX
+    )
+
+else:
+    dtype_names = dict(
+            float32 = "MPI.FLOAT",
+            float64 = "MPI.DOUBLE",
+            int32   = "MPI.INT",
+            int64   = "MPI.LONG"
+    )
+    op_names = dict(
+            sum = "MPI.SUM",
+            min = "MPI.MIN",
+            max = "MPI.MAX"
+    )
+
+# Because the dtypes will == correctly but do not hash the same, we need this
+# function for dictionary access.
+def get_mpi_type(dtype):
+    for dt, val in dtype_names.items():
+        if dt == dtype: return val
+
 class ObjectIterator(object):
     """
     This is a generalized class that accepts a list of objects and then
@@ -182,9 +215,9 @@
     output; otherwise, the function gets called.  Used as a decorator.
     """
     @wraps(func)
-    def passage(self, data):
+    def passage(self, data, **kwargs):
         if not self._distributed: return data
-        return func(self, data)
+        return func(self, data, **kwargs)
     return passage
 
 def parallel_blocking_call(func):
@@ -297,35 +330,6 @@
         reg = self.hierarchy.region_strict(self.center, LE, RE)
         return True, reg
 
-    def _partition_hierarchy_2d_inclined(self, unit_vectors, origin, widths,
-                                         box_vectors, resolution = (1.0, 1.0)):
-        if not self._distributed:
-            ib = self.hierarchy.inclined_box(origin, box_vectors)
-            return False, ib, resolution
-        # We presuppose that unit_vectors is already unitary.  If it's not,
-        # caveat emptor.
-        uv = na.array(unit_vectors)
-        inv_mat = na.linalg.pinv(uv)
-        cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 2)
-        mi = MPI.COMM_WORLD.rank
-        cx, cy = na.unravel_index(mi, cc)
-        resolution = (1.0/cc[0], 1.0/cc[1])
-        # We are rotating with respect to the *origin*, not the back center,
-        # so we go from 0 .. width.
-        px = na.mgrid[0.0:1.0:(cc[0]+1)*1j][cx] * widths[0]
-        py = na.mgrid[0.0:1.0:(cc[1]+1)*1j][cy] * widths[1]
-        nxo = inv_mat[0,0]*px + inv_mat[0,1]*py + origin[0]
-        nyo = inv_mat[1,0]*px + inv_mat[1,1]*py + origin[1]
-        nzo = inv_mat[2,0]*px + inv_mat[2,1]*py + origin[2]
-        nbox_vectors = na.array(
-                       [unit_vectors[0] * widths[0]/cc[0],
-                        unit_vectors[1] * widths[1]/cc[1],
-                        unit_vectors[2] * widths[2]],
-                        dtype='float64')
-        norigin = na.array([nxo, nyo, nzo])
-        box = self.hierarchy.inclined_box(norigin, nbox_vectors)
-        return True, box, resolution
-        
     def _partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
         LE, RE = na.array(ds.left_edge), na.array(ds.right_edge)
         # We need to establish if we're looking at a subvolume, in which case
@@ -441,139 +445,7 @@
                     break
                 nextdim = (nextdim + 1) % 3
         return cuts
-        
 
-    def _partition_hierarchy_3d_bisection(self, axis, bins, counts, top_bounds = None,\
-        old_group = None, old_comm = None, cut=None, old_cc=None):
-        """
-        Partition the volume into evenly weighted subvolumes using the distribution
-        in counts. The bisection happens in the MPI communicator group old_group.
-        You may need to set "MPI_COMM_MAX" and "MPI_GROUP_MAX" environment 
-        variables.
-        """
-        counts = counts.astype('int64')
-        if not self._distributed:
-            LE, RE = self.pf.domain_left_edge.copy(), self.pf.domain_right_edge.copy()
-            return False, LE, RE, self.hierarchy.grid_collection(self.center, self.hierarchy.grids)
-        
-        # First time through the world is the current group.
-        if old_group == None or old_comm == None:
-            old_group = MPI.COMM_WORLD.Get_group()
-            old_comm = MPI.COMM_WORLD
-        
-        # Figure out the gridding based on the deepness of cuts.
-        if old_cc is None:
-            cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 3)
-        else:
-            cc = old_cc
-        cc[cut[0]] /= cut[1]
-        # Set the boundaries of the full bounding box for this group.
-        if top_bounds == None:
-            LE, RE = self.pf.domain_left_edge.copy(), self.pf.domain_right_edge.copy()
-        else:
-            LE, RE = top_bounds
-
-        ra = old_group.Get_rank() # In this group, not WORLD, unless it's the first time.
-        
-        # First find the total number of particles in my group.
-        parts = old_comm.allreduce(int(counts.sum()), op=MPI.SUM)
-        # Now the full sum in the bins along this axis in this group.
-        full_counts = na.empty(counts.size, dtype='int64')
-        old_comm.Allreduce([counts, MPI.LONG], [full_counts, MPI.LONG], op=MPI.SUM)
-        # Find the bin that passes the cut points.
-        midpoints = [LE[axis]]
-        sum = 0
-        bin = 0
-        for step in xrange(1,cut[1]):
-            while sum < ((parts*step)/cut[1]):
-                lastsum = sum
-                sum += full_counts[bin]
-                bin += 1
-            # Bin edges
-            left_edge = bins[bin-1]
-            right_edge = bins[bin]
-            # Find a better approx of the midpoint cut line using a linear approx.
-            a = float(sum - lastsum) / (right_edge - left_edge)
-            midpoints.append(left_edge + (0.5 - (float(lastsum) / parts / 2)) / a)
-            #midpoint = (left_edge + right_edge) / 2.
-        midpoints.append(RE[axis])
-        # Now we need to split the members of this group into chunks. 
-        # The values that go into the _ranks are the ranks of the tasks
-        # in *this* communicator group, which go zero to size - 1. They are not
-        # the same as the global ranks!
-        groups = {}
-        ranks = {}
-        old_group_size = old_group.Get_size()
-        for step in xrange(cut[1]):
-            groups[step] = na.arange(step*old_group_size/cut[1], (step+1)*old_group_size/cut[1])
-            # [ (start, stop, step), ]
-            ranks[step] = [ (groups[step][0], groups[step][-1], 1), ] 
-        
-        # Based on where we are, adjust our LE or RE, depending on axis. At the
-        # same time assign the new MPI group membership.
-        for step in xrange(cut[1]):
-            if ra in groups[step]:
-                LE[axis] = midpoints[step]
-                RE[axis] = midpoints[step+1]
-                new_group = old_group.Range_incl(ranks[step])
-                new_comm = old_comm.Create(new_group)
-        
-        if old_cc is not None:
-            old_group.Free()
-            old_comm.Free()
-        
-        new_top_bounds = (LE,RE)
-        
-        # Using the new boundaries, regrid.
-        mi = new_comm.rank
-        cx, cy, cz = na.unravel_index(mi, cc)
-        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
-
-        my_LE = na.array([x[0], y[0], z[0]], dtype='float64')
-        my_RE = na.array([x[1], y[1], z[1]], dtype='float64')
-        
-        # Return a new subvolume and associated stuff.
-        return new_group, new_comm, my_LE, my_RE, new_top_bounds, cc,\
-            self.hierarchy.region_strict(self.center, my_LE, my_RE)
-
-    def _mpi_find_neighbor_3d(self, shift):
-        """ Given a shift array, 1x3 long, find the task ID
-        of that neighbor. For example, shift=[1,0,0] finds the neighbor
-        immediately to the right in the positive x direction. Each task
-        has 26 neighbors, of which some may be itself depending on the number
-        and arrangement of tasks.
-        """
-        if not self._distributed: return 0
-        shift = na.array(shift)
-        cc = na.array(MPI.Compute_dims(MPI.COMM_WORLD.size, 3))
-        mi = MPI.COMM_WORLD.rank
-        si = MPI.COMM_WORLD.size
-        # store some facts about myself
-        mi_cx,mi_cy,mi_cz = na.unravel_index(mi,cc)
-        mi_ar = na.array([mi_cx,mi_cy,mi_cz])
-        # these are identical on all tasks
-        # should these be calculated once and stored?
-        #dLE = na.empty((si,3), dtype='float64') # positions not needed yet...
-        #dRE = na.empty((si,3), dtype='float64')
-        tasks = na.empty((cc[0],cc[1],cc[2]), dtype='int64')
-        
-        for i in range(si):
-            cx,cy,cz = na.unravel_index(i,cc)
-            tasks[cx,cy,cz] = i
-            #x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-            #y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-            #z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
-            #dLE[i, :] = na.array([x[0], y[0], z[0]], dtype='float64')
-            #dRE[i, :] = na.array([x[1], y[1], z[1]], dtype='float64')
-        
-        # find the neighbor
-        ne = (mi_ar + shift) % cc
-        ne = tasks[ne[0],ne[1],ne[2]]
-        return ne
-        
-        
     def _barrier(self):
         if not self._distributed: return
         mylog.debug("Opening MPI Barrier on %s", MPI.COMM_WORLD.rank)
@@ -587,311 +459,14 @@
         return None
 
     @parallel_passthrough
-    def _mpi_catrgb(self, data):
-        self._barrier()
-        data, final = data
-        if MPI.COMM_WORLD.rank == 0:
-            cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 2)
-            nsize = final[0]/cc[0], final[1]/cc[1]
-            new_image = na.zeros((final[0], final[1], 6), dtype='float64')
-            new_image[0:nsize[0],0:nsize[1],:] = data[:]
-            for i in range(1,MPI.COMM_WORLD.size):
-                cy, cx = na.unravel_index(i, cc)
-                mylog.debug("Receiving image from % into bits %s:%s, %s:%s",
-                    i, nsize[0]*cx,nsize[0]*(cx+1),
-                       nsize[1]*cy,nsize[1]*(cy+1))
-                buf = _recv_array(source=i, tag=0).reshape(
-                    (nsize[0],nsize[1],6))
-                new_image[nsize[0]*cy:nsize[0]*(cy+1),
-                          nsize[1]*cx:nsize[1]*(cx+1),:] = buf[:]
-            data = new_image
-        else:
-            _send_array(data.ravel(), dest=0, tag=0)
-        data = MPI.COMM_WORLD.bcast(data)
-        return (data, final)
-
-    @parallel_passthrough
     def _mpi_catdict(self, data):
-        field_keys = data.keys()
-        field_keys.sort()
-        size = data[field_keys[0]].shape[-1]
-        sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
-        outsize = na.array(size, dtype='int64')
-        MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
-                                 [sizes, 1, MPI.LONG] )
-        # This nested concatenate is to get the shapes to work out correctly;
-        # if we just add [0] to sizes, it will broadcast a summation, not a
-        # concatenation.
-        offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
-        arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
-        for key in field_keys:
-            dd = data[key]
-            rv = _alltoallv_array(dd, arr_size, offsets, sizes)
-            data[key] = rv
-        return data
+        self._par_combine_object(data, op = "cat")
 
     @parallel_passthrough
     def _mpi_joindict(self, data):
-        #self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                data.update(MPI.COMM_WORLD.recv(source=i, tag=0))
-        else:
-            MPI.COMM_WORLD.send(data, dest=0, tag=0)
-        data = MPI.COMM_WORLD.bcast(data, root=0)
-        #self._barrier()
-        return data
+        self._par_combine_object(data, op = "join")
 
     @parallel_passthrough
-    def _mpi_joindict_unpickled_double(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                keys = na.empty(size, dtype='int64')
-                values = na.empty(size, dtype='float64')
-                MPI.COMM_WORLD.Recv([keys, MPI.LONG], i, 0)
-                MPI.COMM_WORLD.Recv([values, MPI.DOUBLE], i, 0)
-                for i,key in enumerate(keys):
-                    data[key] = values[i]
-            # Now convert root's data to arrays.
-            size = len(data)
-            root_keys = na.empty(size, dtype='int64')
-            root_values = na.empty(size, dtype='float64')
-            count = 0
-            for key in data:
-                root_keys[count] = key
-                root_values[count] = data[key]
-                count += 1
-        else:
-            MPI.COMM_WORLD.send(len(data), 0, 0)
-            keys = na.empty(len(data), dtype='int64')
-            values = na.empty(len(data), dtype='float64')
-            count = 0
-            for key in data:
-                keys[count] = key
-                values[count] = data[key]
-                count += 1
-            MPI.COMM_WORLD.Send([keys, MPI.LONG], 0, 0)
-            MPI.COMM_WORLD.Send([values, MPI.DOUBLE], 0, 0)
-        # Now send it back as arrays.
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        if MPI.COMM_WORLD.rank != 0:
-            del keys, values
-            root_keys = na.empty(size, dtype='int64')
-            root_values = na.empty(size, dtype='float64')
-        MPI.COMM_WORLD.Bcast([root_keys, MPI.LONG], root=0)
-        MPI.COMM_WORLD.Bcast([root_values, MPI.DOUBLE], root=0)
-        # Convert back to a dict.
-        del data
-        data = dict(itertools.izip(root_keys, root_values))
-        return data
-
-    @parallel_passthrough
-    def _mpi_joindict_unpickled_long(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                keys = na.empty(size, dtype='int64')
-                values = na.empty(size, dtype='int64')
-                MPI.COMM_WORLD.Recv([keys, MPI.LONG], i, 0)
-                MPI.COMM_WORLD.Recv([values, MPI.LONG], i, 0)
-                for i,key in enumerate(keys):
-                    data[key] = values[i]
-            # Now convert root's data to arrays.
-            size = len(data)
-            root_keys = na.empty(size, dtype='int64')
-            root_values = na.empty(size, dtype='int64')
-            count = 0
-            for key in data:
-                root_keys[count] = key
-                root_values[count] = data[key]
-                count += 1
-        else:
-            MPI.COMM_WORLD.send(len(data), 0, 0)
-            keys = na.empty(len(data), dtype='int64')
-            values = na.empty(len(data), dtype='int64')
-            count = 0
-            for key in data:
-                keys[count] = key
-                values[count] = data[key]
-                count += 1
-            MPI.COMM_WORLD.Send([keys, MPI.LONG], 0, 0)
-            MPI.COMM_WORLD.Send([values, MPI.LONG], 0, 0)
-        # Now send it back as arrays.
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        if MPI.COMM_WORLD.rank != 0:
-            del keys, values
-            root_keys = na.empty(size, dtype='int64')
-            root_values = na.empty(size, dtype='int64')
-        MPI.COMM_WORLD.Bcast([root_keys, MPI.LONG], root=0)
-        MPI.COMM_WORLD.Bcast([root_values, MPI.LONG], root=0)
-        # Convert back to a dict.
-        del data
-        data = dict(itertools.izip(root_keys,root_values))
-        return data
-
-    @parallel_passthrough
-    def _mpi_concatenate_array_long(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1, MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                new_data = na.empty(size, dtype='int64')
-                MPI.COMM_WORLD.Recv([new_data, MPI.LONG], i, 0)
-                data = na.concatenate((data, new_data))
-            size = data.size
-            del new_data
-        else:
-            MPI.COMM_WORLD.send(data.size, 0, 0)
-            MPI.COMM_WORLD.Send([data, MPI.LONG], 0, 0)
-        # Now we distribute the full array.
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        if MPI.COMM_WORLD.rank != 0:
-            del data
-            data = na.empty(size, dtype='int64')
-        MPI.COMM_WORLD.Bcast([data, MPI.LONG], root=0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_concatenate_array_double(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1, MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                new_data = na.empty(size, dtype='float64')
-                MPI.COMM_WORLD.Recv([new_data, MPI.DOUBLE], i, 0)
-                data = na.concatenate((data, new_data))
-            size = data.size
-            del new_data
-        else:
-            MPI.COMM_WORLD.send(data.size, 0, 0)
-            MPI.COMM_WORLD.Send([data, MPI.DOUBLE], 0, 0)
-        # Now we distribute the full array.
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        if MPI.COMM_WORLD.rank != 0:
-            del data
-            data = na.empty(size, dtype='float64')
-        MPI.COMM_WORLD.Bcast([data, MPI.DOUBLE], root=0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_concatenate_array_on_root_double(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1, MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                new_data = na.empty(size, dtype='float64')
-                MPI.COMM_WORLD.Recv([new_data, MPI.DOUBLE], i, 0)
-                data = na.concatenate((data, new_data))
-        else:
-            MPI.COMM_WORLD.send(data.size, 0, 0)
-            MPI.COMM_WORLD.Send([data, MPI.DOUBLE], 0, 0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_concatenate_array_on_root_int(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1, MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                new_data = na.empty(size, dtype='int32')
-                MPI.COMM_WORLD.Recv([new_data, MPI.INT], i, 0)
-                data = na.concatenate((data, new_data))
-        else:
-            MPI.COMM_WORLD.send(data.size, 0, 0)
-            MPI.COMM_WORLD.Send([data, MPI.INT], 0, 0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_concatenate_array_on_root_long(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1, MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                new_data = na.empty(size, dtype='int64')
-                MPI.COMM_WORLD.Recv([new_data, MPI.LONG], i, 0)
-                data = na.concatenate((data, new_data))
-        else:
-            MPI.COMM_WORLD.send(data.size, 0, 0)
-            MPI.COMM_WORLD.Send([data, MPI.LONG], 0, 0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_minimum_array_long(self, data):
-        """
-        Specifically for parallelHOP. For the identical array on each task,
-        it merges the arrays together, taking the lower value at each index.
-        """
-        self._barrier()
-        size = data.size # They're all the same size, of course
-        if MPI.COMM_WORLD.rank == 0:
-            new_data = na.empty(size, dtype='int64')
-            for i in range(1, MPI.COMM_WORLD.size):
-                MPI.COMM_WORLD.Recv([new_data, MPI.LONG], i, 0)
-                data = na.minimum(data, new_data)
-            del new_data
-        else:
-            MPI.COMM_WORLD.Send([data, MPI.LONG], 0, 0)
-        # Redistribute from root
-        MPI.COMM_WORLD.Bcast([data, MPI.LONG], root=0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_bcast_long_dict_unpickled(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            size = len(data)
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        root_keys = na.empty(size, dtype='int64')
-        root_values = na.empty(size, dtype='int64')
-        if MPI.COMM_WORLD.rank == 0:
-            count = 0
-            for key in data:
-                root_keys[count] = key
-                root_values[count] = data[key]
-                count += 1
-        MPI.COMM_WORLD.Bcast([root_keys, MPI.LONG], root=0)
-        MPI.COMM_WORLD.Bcast([root_values, MPI.LONG], root=0)
-        if MPI.COMM_WORLD.rank != 0:
-            data = {}
-            for i,key in enumerate(root_keys):
-                data[key] = root_values[i]
-        return data
-
-    @parallel_passthrough
-    def _mpi_maxdict(self, data):
-        """
-        For each key in data, find the maximum value across all tasks, and
-        then broadcast it back.
-        """
-        self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                temp_data = MPI.COMM_WORLD.recv(source=i, tag=0)
-                for key in temp_data:
-                    try:
-                        old_value = data[key]
-                    except KeyError:
-                        # This guarantees the new value gets added.
-                        old_value = None
-                    if old_value < temp_data[key]:
-                        data[key] = temp_data[key]
-        else:
-            MPI.COMM_WORLD.send(data, dest=0, tag=0)
-        data = MPI.COMM_WORLD.bcast(data, root=0)
-        self._barrier()
-        return data
-
     def _mpi_maxdict_dict(self, data):
         """
         Similar to above, but finds maximums for dicts of dicts. This is
@@ -938,61 +513,15 @@
                 top_keys = na.concatenate([top_keys, recv_top_keys])
                 bot_keys = na.concatenate([bot_keys, recv_bot_keys])
                 vals = na.concatenate([vals, recv_vals])
-#                 for j, top_key in enumerate(top_keys):
-#                     if j%1000 == 0: mylog.info(j)
-#                     # Make sure there's an entry for top_key in data
-#                     try:
-#                         test = data[top_key]
-#                     except KeyError:
-#                         data[top_key] = {}
-#                     try:
-#                         old_value = data[top_key][bot_keys[j]]
-#                     except KeyError:
-#                         # This guarantees the new value gets added.
-#                         old_value = None
-#                     if old_value < vals[j]:
-#                         data[top_key][bot_keys[j]] = vals[j]
         else:
-#             top_keys = []
-#             bot_keys = []
-#             vals = []
-#             for top_key in data:
-#                 for bot_key in data[top_key]:
-#                     top_keys.append(top_key)
-#                     bot_keys.append(bot_key)
-#                     vals.append(data[top_key][bot_key])
-#             top_keys = na.array(top_keys, dtype='int64')
-#             bot_keys = na.array(bot_keys, dtype='int64')
-#             vals = na.array(vals, dtype='float64')
             size = top_keys.size
             MPI.COMM_WORLD.send(size, dest=0, tag=0)
             MPI.COMM_WORLD.Send([top_keys, MPI.LONG], dest=0, tag=0)
             MPI.COMM_WORLD.Send([bot_keys, MPI.LONG], dest=0, tag=0)
             MPI.COMM_WORLD.Send([vals, MPI.DOUBLE], dest=0, tag=0)
-        # Getting ghetto here, we're going to decompose the dict into arrays,
-        # send that, and then reconstruct it. When data is too big the pickling
-        # of the dict fails.
+        # We're going to decompose the dict into arrays, send that, and then
+        # reconstruct it. When data is too big the pickling of the dict fails.
         if MPI.COMM_WORLD.rank == 0:
-#             data = defaultdict(dict)
-#             for i,top_key in enumerate(top_keys):
-#                 try:
-#                     old = data[top_key][bot_keys[i]]
-#                 except KeyError:
-#                     old = None
-#                 if old < vals[i]:
-#                     data[top_key][bot_keys[i]] = vals[i]
-#             top_keys = []
-#             bot_keys = []
-#             vals = []
-#             for top_key in data:
-#                 for bot_key in data[top_key]:
-#                     top_keys.append(top_key)
-#                     bot_keys.append(bot_key)
-#                     vals.append(data[top_key][bot_key])
-#             del data
-#             top_keys = na.array(top_keys, dtype='int64')
-#             bot_keys = na.array(bot_keys, dtype='int64')
-#             vals = na.array(vals, dtype='float64')
             size = top_keys.size
         # Broadcast them using array methods
         size = MPI.COMM_WORLD.bcast(size, root=0)
@@ -1006,82 +535,88 @@
         return (top_keys, bot_keys, vals)
 
     @parallel_passthrough
-    def __mpi_recvlist(self, data):
-        # First we receive, then we make a new list.
-        data = ensure_list(data)
-        for i in range(1,MPI.COMM_WORLD.size):
-            buf = ensure_list(MPI.COMM_WORLD.recv(source=i, tag=0))
-            data += buf
-        return data
+    def _par_combine_object(self, data, op):
+        # op can be chosen from:
+        #   cat
+        #   join
+        # data is selected to be of types:
+        #   na.ndarray
+        #   dict
+        #   data field dict
+        if isinstance(data, types.DictType) and op == "join":
+            if MPI.COMM_WORLD.rank == 0:
+                for i in range(1,MPI.COMM_WORLD.size):
+                    data.update(MPI.COMM_WORLD.recv(source=i, tag=0))
+            else:
+                MPI.COMM_WORLD.send(data, dest=0, tag=0)
+            data = MPI.COMM_WORLD.bcast(data, root=0)
+            return data
+        elif isinstance(data, types.DictType) and op == "cat":
+            field_keys = data.keys()
+            field_keys.sort()
+            size = data[field_keys[0]].shape[-1]
+            sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
+            outsize = na.array(size, dtype='int64')
+            MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
+                                     [sizes, 1, MPI.LONG] )
+            # This nested concatenate is to get the shapes to work out correctly;
+            # if we just add [0] to sizes, it will broadcast a summation, not a
+            # concatenation.
+            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
+            for key in field_keys:
+                dd = data[key]
+                rv = _alltoallv_array(dd, arr_size, offsets, sizes)
+                data[key] = rv
+            return data
+        elif isinstance(data, na.ndarray) and op == "cat":
+            if data is None:
+                ncols = -1
+                size = 0
+            else:
+                if len(data) == 0:
+                    ncols = -1
+                    size = 0
+                elif len(data.shape) == 1:
+                    ncols = 1
+                    size = data.shape[0]
+                else:
+                    ncols, size = data.shape
+            ncols = MPI.COMM_WORLD.allreduce(ncols, op=MPI.MAX)
+            if size == 0:
+                data = na.zeros((ncols,0), dtype='float64') # This only works for
+            size = data.shape[-1]
+            sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
+            outsize = na.array(size, dtype='int64')
+            MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
+                                     [sizes, 1, MPI.LONG] )
+            # This nested concatenate is to get the shapes to work out correctly;
+            # if we just add [0] to sizes, it will broadcast a summation, not a
+            # concatenation.
+            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
+            data = _alltoallv_array(data, arr_size, offsets, sizes)
+            return data
+        elif isinstance(data, types.ListType) and op == "cat":
+            if MPI.COMM_WORLD.rank == 0:
+                data = self.__mpi_recvlist(data)
+            else:
+                MPI.COMM_WORLD.send(data, dest=0, tag=0)
+            mylog.debug("Opening MPI Broadcast on %s", MPI.COMM_WORLD.rank)
+            data = MPI.COMM_WORLD.bcast(data, root=0)
+            return data
+        raise NotImplementedError
 
     @parallel_passthrough
     def _mpi_catlist(self, data):
-        self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            data = self.__mpi_recvlist(data)
-        else:
-            MPI.COMM_WORLD.send(data, dest=0, tag=0)
-        mylog.debug("Opening MPI Broadcast on %s", MPI.COMM_WORLD.rank)
-        data = MPI.COMM_WORLD.bcast(data, root=0)
-        self._barrier()
-        return data
-
-    @parallel_passthrough
-    def __mpi_recvarrays(self, data):
-        # First we receive, then we make a new list.
-        for i in range(1,MPI.COMM_WORLD.size):
-            buf = _recv_array(source=i, tag=0)
-            if buf is not None:
-                if data is None: data = buf
-                else: data = na.concatenate([data, buf])
-        return data
-
-    @parallel_passthrough
-    def _mpi_cat_na_array(self,data):
-        self._barrier()
-        comm = MPI.COMM_WORLD
-        if comm.rank == 0:
-            for i in range(1,comm.size):
-                buf = comm.recv(source=i, tag=0)
-                data = na.concatenate([data,buf])
-        else:
-            comm.send(data, 0, tag = 0)
-        data = comm.bcast(data, root=0)
-        return data
+        self._par_combine_object(data, op = "cat")
 
     @parallel_passthrough
     def _mpi_catarray(self, data):
-        if data is None:
-            ncols = -1
-            size = 0
-        else:
-            if len(data) == 0:
-                ncols = -1
-                size = 0
-            elif len(data.shape) == 1:
-                ncols = 1
-                size = data.shape[0]
-            else:
-                ncols, size = data.shape
-        ncols = MPI.COMM_WORLD.allreduce(ncols, op=MPI.MAX)
-        if size == 0:
-            data = na.zeros((ncols,0), dtype='float64') # This only works for
-        size = data.shape[-1]
-        sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
-        outsize = na.array(size, dtype='int64')
-        MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
-                                 [sizes, 1, MPI.LONG] )
-        # This nested concatenate is to get the shapes to work out correctly;
-        # if we just add [0] to sizes, it will broadcast a summation, not a
-        # concatenation.
-        offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
-        arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
-        data = _alltoallv_array(data, arr_size, offsets, sizes)
-        return data
+        self._par_combine_object(data, op = "cat")
 
     @parallel_passthrough
     def _mpi_bcast_pickled(self, data):
-        #self._barrier()
         data = MPI.COMM_WORLD.bcast(data, root=0)
         return data
 
@@ -1097,82 +632,37 @@
         io_handler.preload(grids, fields)
 
     @parallel_passthrough
-    def _mpi_double_array_max(self,data):
-        """
-        Finds the na.maximum of a distributed array and returns the result
-        back to all. The array should be the same length on all tasks!
-        """
-        self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            recv_data = na.empty(data.size, dtype='float64')
-            for i in xrange(1, MPI.COMM_WORLD.size):
-                MPI.COMM_WORLD.Recv([recv_data, MPI.DOUBLE], source=i, tag=0)
-                data = na.maximum(data, recv_data)
+    def _mpi_allreduce(self, data, dtype=None, op='sum'):
+        op = op_names[op]
+        if isinstance(data, na.ndarray) and data.dtype != na.bool:
+            if dtype is None:
+                dtype = data.dtype
+            if dtype != data.dtype:
+                data = data.astype(dtype)
+            temp = data.copy()
+            MPI.COMM_WORLD.Allreduce([temp,get_mpi_type(dtype)], 
+                                     [data,get_mpi_type(dtype)], op)
+            return data
         else:
-            MPI.COMM_WORLD.Send([data, MPI.DOUBLE], dest=0, tag=0)
-        MPI.COMM_WORLD.Bcast([data, MPI.DOUBLE], root=0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_allsum(self, data):
-        #self._barrier()
-        # We use old-school pickling here on the assumption the arrays are
-        # relatively small ( < 1e7 elements )
-        if isinstance(data, na.ndarray) and data.dtype != na.bool:
-            tr = na.zeros_like(data)
-            if not data.flags.c_contiguous: data = data.copy()
-            MPI.COMM_WORLD.Allreduce(data, tr, op=MPI.SUM)
-            return tr
-        else:
-            return MPI.COMM_WORLD.allreduce(data, op=MPI.SUM)
-
-    @parallel_passthrough
-    def _mpi_Allsum_double(self, data):
-        self._barrier()
-        # Non-pickling float allsum of a float array, data.
-        temp = data.copy()
-        MPI.COMM_WORLD.Allreduce([temp, MPI.DOUBLE], [data, MPI.DOUBLE], op=MPI.SUM)
-        del temp
-        return data
-
-    @parallel_passthrough
-    def _mpi_Allsum_long(self, data):
-        self._barrier()
-        # Non-pickling float allsum of an int array, data.
-        temp = data.copy()
-        MPI.COMM_WORLD.Allreduce([temp, MPI.LONG], [data, MPI.LONG], op=MPI.SUM)
-        del temp
-        return data
-
-    @parallel_passthrough
-    def _mpi_allmax(self, data):
-        self._barrier()
-        return MPI.COMM_WORLD.allreduce(data, op=MPI.MAX)
-
-    @parallel_passthrough
-    def _mpi_allmin(self, data):
-        self._barrier()
-        return MPI.COMM_WORLD.allreduce(data, op=MPI.MIN)
+            # We use old-school pickling here on the assumption the arrays are
+            # relatively small ( < 1e7 elements )
+            return MPI.COMM_WORLD.allreduce(data, op)
 
     ###
     # Non-blocking stuff.
     ###
 
-    def _mpi_Irecv_long(self, data, source, tag=0):
+    def _mpi_nonblocking_recv(self, data, source, tag=0, dtype=None):
         if not self._distributed: return -1
-        return MPI.COMM_WORLD.Irecv([data, MPI.LONG], source, tag)
+        if dtype is None: dtype = data.dtype
+        mpi_type = get_mpi_type(dtype)
+        return MPI.COMM_WORLD.Irecv([data, mpi_type], source, tag)
 
-    def _mpi_Irecv_double(self, data, source, tag=0):
+    def _mpi_nonblocking_send(self, data, dest, tag=0, dtype=None):
         if not self._distributed: return -1
-        return MPI.COMM_WORLD.Irecv([data, MPI.DOUBLE], source, tag)
-
-    def _mpi_Isend_long(self, data, dest, tag=0):
-        if not self._distributed: return -1
-        return MPI.COMM_WORLD.Isend([data, MPI.LONG], dest, tag)
-
-    def _mpi_Isend_double(self, data, dest, tag=0):
-        if not self._distributed: return -1
-        return MPI.COMM_WORLD.Isend([data, MPI.DOUBLE], dest, tag)
+        if dtype is None: dtype = data.dtype
+        mpi_type = get_mpi_type(dtype)
+        return MPI.COMM_WORLD.Isend([data, mpi_type], dest, tag)
 
     def _mpi_Request_Waitall(self, hooks):
         if not self._distributed: return
@@ -1195,11 +685,17 @@
     # End non-blocking stuff.
     ###
 
-    def _mpi_get_size(self):
+    ###
+    # Parallel rank and size properties.
+    ###
+
+    @property
+    def _par_size(self):
         if not self._distributed: return 1
         return MPI.COMM_WORLD.size
 
-    def _mpi_get_rank(self):
+    @property
+    def _par_rank(self):
         if not self._distributed: return 0
         return MPI.COMM_WORLD.rank
 


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -124,8 +124,8 @@
             self.magnitudes = na.zeros((self.N,self.steps), dtype='float64')
         
     def integrate_through_volume(self):
-        nprocs = self._mpi_get_size()
-        my_rank = self._mpi_get_rank()
+        nprocs = self._par_size
+        my_rank = self._par_rank
         self.streamlines[my_rank::nprocs,0,:] = self.start_positions[my_rank::nprocs]
 
         pbar = get_pbar("Streamlining", self.N)
@@ -144,8 +144,8 @@
        
     @parallel_passthrough
     def _finalize_parallel(self,data):
-        self.streamlines = self._mpi_allsum(self.streamlines)
-        self.magnitudes = self._mpi_allsum(self.magnitudes)
+        self.streamlines = self._mpi_allreduce(self.streamlines, op='sum')
+        self.magnitudes = self._mpi_allreduce(self.magnitudes, op='sum')
         
     def _integrate_through_brick(self, node, stream, step,
                                  periodic=False, mag=None):


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/visualization/volume_rendering/api.py
--- a/yt/visualization/volume_rendering/api.py
+++ b/yt/visualization/volume_rendering/api.py
@@ -40,6 +40,5 @@
                              import_partitioned_grids
 from image_handling import export_rgba, import_rgba, \
                            plot_channel, plot_rgb
-from software_sampler import VolumeRendering
 from camera import Camera, PerspectiveCamera, StereoPairCamera, \
     off_axis_projection


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -356,7 +356,7 @@
             pbar.update(total_cells)
         pbar.finish()
 
-        if self._mpi_get_rank() is 0 and fn is not None:
+        if self._par_rank is 0 and fn is not None:
             if clip_ratio is not None:
                 write_bitmap(image, fn, clip_ratio*image.std())
             else:
@@ -623,7 +623,7 @@
             pbar.update(total_cells)
         pbar.finish()
 
-        if self._mpi_get_rank() is 0 and fn is not None:
+        if self._par_rank is 0 and fn is not None:
             # This assumes Density; this is a relatively safe assumption.
             import matplotlib.figure
             import matplotlib.backends.backend_agg


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/visualization/volume_rendering/grid_partitioner.py
--- a/yt/visualization/volume_rendering/grid_partitioner.py
+++ b/yt/visualization/volume_rendering/grid_partitioner.py
@@ -275,7 +275,7 @@
         self.brick_right_edges = na.zeros( (NB, 3), dtype='float64')
         self.brick_parents = na.zeros( NB, dtype='int64')
         self.brick_dimensions = na.zeros( (NB, 3), dtype='int64')
-        self.brick_owners = na.ones(NB, dtype='int32') * self._mpi_get_rank()
+        self.brick_owners = na.ones(NB, dtype='int32') * self._par_rank
         self._object_owners = self.brick_owners
         for i,b in enumerate(bricks):
             self.brick_left_edges[i,:] = b.LeftEdge
@@ -307,7 +307,7 @@
         bricks = self.bricks
         self.bricks = na.array([None] * self.brick_owners.size, dtype='object')
         # Copy our bricks back in
-        self.bricks[self.brick_owners == self._mpi_get_rank()] = bricks[:]
+        self.bricks[self.brick_owners == self._par_rank] = bricks[:]
 
     def _create_buffer(self, ind_list):
         # Note that we have vertex-centered data, so we add one before taking


diff -r 6c06c843f2d95d3622f3bd568290d7d61658cf6d -r 13f1bb040e8cb934cdd208cb64d82bc78710a1ea yt/visualization/volume_rendering/software_sampler.py
--- a/yt/visualization/volume_rendering/software_sampler.py
+++ /dev/null
@@ -1,188 +0,0 @@
-"""
-Import the components of the volume rendering extension
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-import h5py
-import numpy as na
-
-from yt.funcs import *
-
-from yt.data_objects.data_containers import data_object_registry
-from yt.utilities.amr_utils import TransferFunctionProxy, VectorPlane
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface
-from yt.visualization.volume_rendering.grid_partitioner import \
-    HomogenizedBrickCollection
-
-# We're going to register this class, but it does not directly inherit from
-# AMRData.
-class VolumeRendering(ParallelAnalysisInterface):
-    bricks = None
-    def __init__(self, normal_vector, width, center,
-                 resolution, transfer_function,
-                 fields = None, whole_box = False,
-                 sub_samples = 5, north_vector = None,
-                 pf = None):
-        # Now we replicate some of the 'cutting plane' logic
-        if not iterable(resolution):
-            resolution = (resolution, resolution)
-        self.resolution = resolution
-        self.sub_samples = sub_samples
-        if not iterable(width):
-            width = (width, width, width) # front/back, left/right, top/bottom
-        self.width = width
-        self.center = center
-        if fields is None: fields = ["Density"]
-        self.fields = fields
-        self.transfer_function = transfer_function
-
-        # Now we set up our  various vectors
-        normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
-        if north_vector is None:
-            vecs = na.identity(3)
-            t = na.cross(normal_vector, vecs).sum(axis=1)
-            ax = t.argmax()
-            north_vector = na.cross(vecs[ax,:], normal_vector).ravel()
-        north_vector /= na.sqrt(na.dot(north_vector, north_vector))
-        east_vector = -na.cross(north_vector, normal_vector).ravel()
-        east_vector /= na.sqrt(na.dot(east_vector, east_vector))
-        self.unit_vectors = [north_vector, east_vector, normal_vector]
-        self.box_vectors = na.array([self.unit_vectors[0]*self.width[0],
-                                     self.unit_vectors[1]*self.width[1],
-                                     self.unit_vectors[2]*self.width[2]])
-
-        self.origin = center - 0.5*width[0]*self.unit_vectors[0] \
-                             - 0.5*width[1]*self.unit_vectors[1] \
-                             - 0.5*width[2]*self.unit_vectors[2]
-        self.back_center = center - 0.5*width[0]*self.unit_vectors[2]
-        self.front_center = center + 0.5*width[0]*self.unit_vectors[2]
-
-        self._initialize_source()
-        self._construct_vector_array()
-
-    def _initialize_source(self):
-        check, source, rf = self._partition_hierarchy_2d_inclined(
-                self.unit_vectors, self.origin, self.width, self.box_vectors)
-        if check:
-            self._base_source = self.pf.h.inclined_box(
-                self.origin, self.box_vectors)
-        else:
-            # To avoid doubling-up
-            self._base_source = source
-        self.source = source
-        self.res_fac = rf
-        # Note that if we want to do this in parallel, with 3D domain decomp
-        # for the grid/bricks, we can supply self._base_source here.  But,
-        # _distributed can't be overridden in that case.
-        self._brick_collection = HomogenizedBrickCollection(self.source)
-
-    def ray_cast(self, finalize=True):
-        if self.bricks is None: self.partition_grids()
-        # Now we order our bricks
-        total_cells, LE, RE = 0, [], []
-        for b in self.bricks:
-            LE.append(b.LeftEdge)
-            RE.append(b.RightEdge)
-            total_cells += na.prod(b.my_data[0].shape)
-        LE = na.array(LE) - self.back_center
-        RE = na.array(RE) - self.back_center
-        LE = na.sum(LE * self.unit_vectors[2], axis=1)
-        RE = na.sum(RE * self.unit_vectors[2], axis=1)
-        dist = na.minimum(LE, RE)
-        ind = na.argsort(dist)
-        pbar = get_pbar("Ray casting ", total_cells)
-        total_cells = 0
-        tfp = TransferFunctionProxy(self.transfer_function)
-        tfp.ns = self.sub_samples
-        for i, b in enumerate(self.bricks[ind]):
-            pos = b.cast_plane(tfp, self.vector_plane)
-            total_cells += na.prod(b.my_data[0].shape)
-            pbar.update(total_cells)
-        pbar.finish()
-        if finalize: self._finalize()
-
-    def _finalize(self):
-        #im = self._mpi_catdict(dict(image=self.image)).pop('image')
-        im, f = self._mpi_catrgb((self.image, self.resolution))
-        self.image = im
-
-    def dump_image(self, prefix):
-        fn = "%s.h5" % (self._get_filename(prefix))
-        mylog.info("Saving to %s", fn)
-        f = h5py.File(fn, "w")
-        f.create_dataset("/image", data=self.image)
-
-    def load_bricks(self, fn):
-        self.bricks = import_partitioned_grids(fn)
-
-    def save_bricks(self, fn):
-        # This will need to be modified for parallel
-        export_partitioned_grids(self.bricks, fn)
-
-    def save_image(self, prefix = None, norm = 1.0):
-        if norm is not None:
-            mi, ma = self.image.min(), norm*self.image.max()
-            print "Normalizing with ", mi, ma
-            image = (na.clip(self.image, mi, ma) - mi)/(ma - mi)
-        else:
-            image = self.image
-        if prefix is None: prefix = "%s_volume_rendering" % (self.pf)
-        plot_rgb(image, prefix)
-
-    def partition_grids(self):
-        log_field = []
-        for field in self.fields:
-            log_field.append(field in self.pf.field_info and 
-                             self.pf.field_info[field].take_log)
-        self._brick_collection._partition_local_grids(self.fields, log_field)
-        # UNCOMMENT FOR PARALLELISM
-        #self._brick_collection._collect_bricks(self.source)
-        self.bricks = self._brick_collection.bricks
-
-    def _construct_vector_array(self):
-        rx = self.resolution[0] * self.res_fac[0]
-        ry = self.resolution[1] * self.res_fac[1]
-        # We should move away from pre-generation of vectors like this and into
-        # the usage of on-the-fly generation in the VolumeIntegrator module
-        self.image = na.zeros((rx,ry,3), dtype='float64', order='C')
-        # We might have a different width and back_center
-        bl = self.source.box_lengths
-        px = na.linspace(-bl[0]/2.0, bl[0]/2.0, rx)[:,None]
-        py = na.linspace(-bl[1]/2.0, bl[1]/2.0, ry)[None,:]
-        inv_mat = self.source._inv_mat
-        bc = self.source.origin + 0.5*self.source.box_vectors[0] \
-                                + 0.5*self.source.box_vectors[1]
-        vectors = na.zeros((rx, ry, 3),
-                            dtype='float64', order='C')
-        vectors[:,:,0] = inv_mat[0,0]*px + inv_mat[0,1]*py + bc[0]
-        vectors[:,:,1] = inv_mat[1,0]*px + inv_mat[1,1]*py + bc[1]
-        vectors[:,:,2] = inv_mat[2,0]*px + inv_mat[2,1]*py + bc[2]
-        bounds = (px.min(), px.max(), py.min(), py.max())
-        self.vector_plane = VectorPlane(vectors, self.box_vectors[2],
-                                    bc, bounds, self.image,
-                                    self.source._x_vec, self.source._y_vec)
-        self.vp_bounds = bounds
-        self.vectors = vectors
-
-data_object_registry["volume_rendering"] = VolumeRendering



https://bitbucket.org/yt_analysis/yt/changeset/29c6fdc4d8d3/
changeset:   29c6fdc4d8d3
branch:      yt
user:        chummels
date:        2011-10-18 21:01:06
summary:     Have converted references to "raven" as in "raven_colormaps" to "yt" as in "yt_colormaps".  Want to avoid references to old character naming modules in code/docs.
affected #:  4 files

diff -r e883666c4d8e5e71558ca92a099ba8f9ab575834 -r 29c6fdc4d8d30acc3d4c04046c8f222c99de3384 yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -34,15 +34,15 @@
     except ValueError:
         return False
 
-raven_colormaps = {}
+yt_colormaps = {}
 
 def add_cmap(name, cdict):
-    raven_colormaps[name] = \
+    yt_colormaps[name] = \
         cc.LinearSegmentedColormap(name,cdict,256)
     mcm.datad[name] = cdict
     mcm.__dict__[name] = cdict
     try: # API compatibility
-        mcm.register_cmap(name, raven_colormaps[name])
+        mcm.register_cmap(name, yt_colormaps[name])
     except AttributeError:
         pass
     


diff -r e883666c4d8e5e71558ca92a099ba8f9ab575834 -r 29c6fdc4d8d30acc3d4c04046c8f222c99de3384 yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -1064,7 +1064,7 @@
             the y-axis.  All subsequent fields will be binned and their
             profiles added to the underlying `BinnedProfile2D`.
         cmap : string, optional
-            An acceptable colormap.  See either raven.color_maps or
+            An acceptable colormap.  See either yt.visualization.color_maps or
             http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
         weight : string, default "CellMassMsun"
             The weighting field for an average.  This defaults to mass-weighted
@@ -1188,7 +1188,7 @@
             The center to be used for things like radius and radial velocity.
             Defaults to the center of the plot collection.
         cmap : string, optional
-            An acceptable colormap.  See either raven.color_maps or
+            An acceptable colormap.  See either yt.visualization.color_maps or
             http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
         weight : string, default "CellMassMsun"
             The weighting field for an average.  This defaults to mass-weighted


diff -r e883666c4d8e5e71558ca92a099ba8f9ab575834 -r 29c6fdc4d8d30acc3d4c04046c8f222c99de3384 yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -35,7 +35,7 @@
     x_dict, \
     y_dict, \
     axis_names
-from .color_maps import raven_colormaps
+from .color_maps import yt_colormaps
 
 class CallbackRegistryHandler(object):
     def __init__(self, plot):
@@ -226,8 +226,8 @@
         Change the colormap of this plot to *cmap*.
         """
         if isinstance(cmap, types.StringTypes):
-            if str(cmap) in raven_colormaps:
-                cmap = raven_colormaps[str(cmap)]
+            if str(cmap) in yt_colormaps:
+                cmap = yt_colormaps[str(cmap)]
             elif hasattr(matplotlib.cm, cmap):
                 cmap = getattr(matplotlib.cm, cmap)
         self.cmap = cmap


diff -r e883666c4d8e5e71558ca92a099ba8f9ab575834 -r 29c6fdc4d8d30acc3d4c04046c8f222c99de3384 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -198,7 +198,7 @@
             the y-axis.  All subsequent fields will be binned and their
             profiles added to the underlying `BinnedProfile2D`.
         cmap : string, optional
-            An acceptable colormap.  See either raven.color_maps or
+            An acceptable colormap.  See either yt.visualization.color_maps or
             http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
         weight : string, default "CellMassMsun"
             The weighting field for an average.  This defaults to mass-weighted



https://bitbucket.org/yt_analysis/yt/changeset/2317668ec945/
changeset:   2317668ec945
branch:      yt
user:        chummels
date:        2011-10-19 01:50:48
summary:     Removing references to lagos in source comments.
affected #:  2 files

diff -r 29c6fdc4d8d30acc3d4c04046c8f222c99de3384 -r 2317668ec945117352812ea1dba7f1a44e5b9988 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -723,7 +723,7 @@
                  font_size=8, print_halo_size=False,
                  print_halo_mass=False, width=None):
         """
-        Accepts a :class:`yt.lagos.HopList` *hop_output* and plots up to
+        Accepts a :class:`yt.HopList` *hop_output* and plots up to
         *max_number* (None for unlimited) halos as circles.
         """
         self.hop_output = hop_output


diff -r 29c6fdc4d8d30acc3d4c04046c8f222c99de3384 -r 2317668ec945117352812ea1dba7f1a44e5b9988 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -38,7 +38,7 @@
 
     Parameters
     ----------
-    pf : `~yt.lagos.StaticOutput`
+    pf : `~yt.data_objects.StaticOutput`
         This is the parameter file to streamline
     pos : array_like
         An array of initial starting positions of the streamlines.



https://bitbucket.org/yt_analysis/yt/changeset/1c6cb1262f44/
changeset:   1c6cb1262f44
branch:      yt
user:        chummels
date:        2011-11-18 17:59:20
summary:     Added no_ghost option to off_axis_projection function.
affected #:  1 file

diff -r 68227951f9b3fd2ed391ef63c44fbb49c06760a0 -r 1c6cb1262f440d2dfa867220088e8480072e7621 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -794,7 +794,7 @@
         return (left_camera, right_camera)
 
 def off_axis_projection(pf, center, normal_vector, width, resolution,
-                        field, weight = None, volume = None):
+                        field, weight = None, volume = None, no_ghost = False):
     r"""Project through a parameter file, off-axis, and return the image plane.
 
     This function will accept the necessary items to integrate through a volume
@@ -826,6 +826,14 @@
     volume : `yt.extensions.volume_rendering.HomogenizedVolume`, optional
         The volume to ray cast through.  Can be specified for finer-grained
         control, but otherwise will be automatically generated.
+    no_ghost: bool, optional
+        Optimization option.  If True, homogenized bricks will
+        extrapolate out from grid instead of interpolating from
+        ghost zones that have to first be calculated.  This can
+        lead to large speed improvements, but at a loss of
+        accuracy/smoothness in resulting image.  The effects are
+        less notable when the transfer function is smooth and
+        broad. Default: False
 
     Returns
     -------
@@ -853,7 +861,7 @@
     cam = pf.h.camera(center, normal_vector, width, resolution, tf,
                       fields = fields,
                       log_fields = [False] * len(fields),
-                      volume = volume)
+                      volume = volume, no_ghost = no_ghost)
     vals = cam.snapshot()
     image = vals[:,:,0]
     if weight is None:



https://bitbucket.org/yt_analysis/yt/changeset/2d06f5424c8c/
changeset:   2d06f5424c8c
branch:      yt
user:        chummels
date:        2011-11-22 21:39:38
summary:     Merging.
affected #:  5 files

diff -r 1c6cb1262f440d2dfa867220088e8480072e7621 -r 2d06f5424c8c89b4c383af7a3cf2a1b220d41ca9 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -8,6 +8,7 @@
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h
 yt/utilities/libconfig_wrapper.c
+yt/utilities/spatial/ckdtree.c
 syntax: glob
 *.pyc
 .*.swp


diff -r 1c6cb1262f440d2dfa867220088e8480072e7621 -r 2d06f5424c8c89b4c383af7a3cf2a1b220d41ca9 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -35,6 +35,7 @@
                  function=None, clump_info=None):
         self.parent = parent
         self.data = data
+        self.quantities = data.quantities
         self.field = field
         self.min_val = self.data[field].min()
         self.max_val = self.data[field].max()
@@ -167,6 +168,7 @@
     # unreliable in the unpickling
     for child in children: child.parent = obj
     obj.data = data[1] # Strip out the PF
+    obj.quantities = obj.data.quantities
     if obj.parent is None: return (data[0], obj)
     return obj
 


diff -r 1c6cb1262f440d2dfa867220088e8480072e7621 -r 2d06f5424c8c89b4c383af7a3cf2a1b220d41ca9 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -151,24 +151,11 @@
     """
     baryon_mass = data["CellMassMsun"].sum()
     particle_mass = data["ParticleMassMsun"].sum()
-    return baryon_mass, particle_mass
-def _combTotalMass(data, baryon_mass, particle_mass):
-    return baryon_mass.sum() + particle_mass.sum()
+    return [baryon_mass + particle_mass]
+def _combTotalMass(data, total_mass):
+    return total_mass.sum()
 add_quantity("TotalMass", function=_TotalMass,
-             combine_function=_combTotalMass, n_ret = 2)
-
-def _MatterMass(data):
-    """
-    This function takes no arguments and returns the array sum of cell masses
-    and particle masses.
-    """
-    cellvol = data["CellVolume"]
-    matter_rho = data["Matter_Density"]
-    return cellvol, matter_rho 
-def _combMatterMass(data, cellvol, matter_rho):
-    return cellvol*matter_rho
-add_quantity("MatterMass", function=_MatterMass,
-	     combine_function=_combMatterMass, n_ret=2)
+             combine_function=_combTotalMass, n_ret=1)
 
 def _CenterOfMass(data, use_cells=True, use_particles=False):
     """
@@ -358,7 +345,7 @@
     Add the mass contribution of particles if include_particles = True
     """
     if (include_particles):
-	mass_to_use = data.quantities["MatterMass"]()[0] 
+	mass_to_use = data["TotalMass"]
     else:
 	mass_to_use = data["CellMass"]
     kinetic = 0.5 * (mass_to_use * (


diff -r 1c6cb1262f440d2dfa867220088e8480072e7621 -r 2d06f5424c8c89b4c383af7a3cf2a1b220d41ca9 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -30,8 +30,6 @@
 import inspect
 import copy
 
-from math import pi
-
 from yt.funcs import *
 
 from yt.utilities.amr_utils import CICDeposit_3, obtain_rvec
@@ -266,7 +264,7 @@
     M{sqrt(3pi/(16*G*rho))} or M{sqrt(3pi/(16G))*rho^-(1/2)}
     Note that we return in our natural units already
     """
-    return (3.0*pi/(16*G*data["Density"]))**(1./2.)
+    return (3.0*na.pi/(16*G*data["Density"]))**(1./2.)
 add_field("DynamicalTime", function=_DynamicalTime,
            units=r"\rm{s}")
 
@@ -295,6 +293,7 @@
 
 def _TotalMass(field,data):
     return (data["Density"]+data["Dark_Matter_Density"]) * data["CellVolume"]
+add_field("TotalMass", function=_TotalMass, units=r"\rm{g}")
 add_field("TotalMassMsun", units=r"M_{\odot}",
           function=_TotalMass,
           convert_function=_convertCellMassMsun)


diff -r 1c6cb1262f440d2dfa867220088e8480072e7621 -r 2d06f5424c8c89b4c383af7a3cf2a1b220d41ca9 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -124,14 +124,14 @@
     """
     Returning resident size in megabytes
     """
+    pid = os.getpid()
     try:
         pagesize = resource.getpagesize()
     except NameError:
-        return 0
-    pid = os.getpid()
+        return float(os.popen('ps -o rss= -p %d' % pid).read()) / 1024
     status_file = "/proc/%s/statm" % (pid)
     if not os.path.isfile(status_file):
-        return 0.0
+        return float(os.popen('ps -o rss= -p %d' % pid).read()) / 1024
     line = open(status_file).read()
     size, resident, share, text, library, data, dt = [int(i) for i in line.split()]
     return resident * pagesize / (1024 * 1024) # return in megs

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list