[Yt-svn] commit/yt: 158 new changesets

Bitbucket commits-noreply at bitbucket.org
Thu Oct 20 06:27:15 PDT 2011


158 new changesets in yt:

http://bitbucket.org/yt_analysis/yt/changeset/6b1a9674d900/
changeset:   6b1a9674d900
branch:      yt
user:        MatthewTurk
date:        2011-10-17 17:35:16
summary:     Tests now run in parallel.  This does not, however, mean we should just test
gigantic datasets: there's still a lot of "all_data()" and whatnot.
affected #:  2 files (-1 bytes)

--- a/tests/runall.py	Mon Oct 17 10:06:05 2011 -0400
+++ b/tests/runall.py	Mon Oct 17 11:35:16 2011 -0400
@@ -73,6 +73,9 @@
     parser.add_option("-n", "--name", dest="this_name",
                       default=my_hash,
                       help = "The name we'll call this set of tests")
+    parser.add_option("", "--parallel", dest="parallel",
+                      default=False,
+                      help = "Run in parallel?")
     opts, args = parser.parse_args()
     if opts.list_tests:
         print "\n    ".join(sorted(itertools.chain(*mapping.values())))


--- a/yt/utilities/answer_testing/runner.py	Mon Oct 17 10:06:05 2011 -0400
+++ b/yt/utilities/answer_testing/runner.py	Mon Oct 17 11:35:16 2011 -0400
@@ -28,6 +28,7 @@
 
 from yt.config import ytcfg; ytcfg["yt","serialize"] = "False"
 import yt.utilities.cmdln as cmdln
+from yt.funcs import *
 from .xunit import Xunit
 
 from output_tests import test_registry, MultipleOutputTest, \
@@ -54,12 +55,14 @@
             self._path = os.path.join(path, "results")
         else:
             self._path = os.path.join(path, "results_%s" % self.id)
-        if not os.path.isdir(self._path): os.mkdir(self._path)
+        if not os.path.isdir(self._path): 
+            only_on_root(os.mkdir, self._path)
         if os.path.isfile(self._path): raise RuntimeError
 
     def _fn(self, tn):
         return os.path.join(self._path, tn)
 
+    @rootonly
     def __setitem__(self, test_name, result):
         # We have to close our shelf manually,
         # as the destructor does not necessarily do this.


http://bitbucket.org/yt_analysis/yt/changeset/ff2781dd4765/
changeset:   ff2781dd4765
branch:      yt
user:        MatthewTurk
date:        2011-10-17 18:17:04
summary:     Adding a bunch of derived quantity tests
affected #:  1 file (-1 bytes)

--- a/tests/object_field_values.py	Mon Oct 17 11:35:16 2011 -0400
+++ b/tests/object_field_values.py	Mon Oct 17 12:17:04 2011 -0400
@@ -54,3 +54,25 @@
         create_test(YTFieldValuesTest, "%s_%s" % (object_name, field),
                     field = field, object_name = object_name)
 
+class YTDerivedQuantityTest(YTStaticOutputTest):
+    def setup(self):
+        YTStaticOutputTest.setup(self)
+        known_objects[self.object_name](self)
+
+    def compare(self, old_results):
+        if self.result != old_result: raise FieldHashesDontMatch
+
+    def run(self):
+        # This only works if it takes no arguments
+        self.result = self.data_object.quantities[self.dq_name]()
+
+dq_names = ["TotalMass", "AngularMomentumVector", "CenterOfMass",
+            "BulkVelocity", "BaryonSpinParameter", "ParticleSpinParameter"]
+
+# Extrema, WeightedAverageQuantity, TotalQuantity, MaxLocation,
+# MinLocation
+
+for object_name in known_objects:
+    for dq in dq_names:
+        create_test(YTDerivedQuantityTest, "%s_%s" % (object_name, dq),
+                    dq_name = dq, object_name = object_name)


http://bitbucket.org/yt_analysis/yt/changeset/ae850236cb4f/
changeset:   ae850236cb4f
branch:      yt
user:        brittonsmith
date:        2011-09-26 21:22:26
summary:     Fixed proton charge constant.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/physical_constants.py	Sun Sep 25 13:26:29 2011 -0400
+++ b/yt/utilities/physical_constants.py	Mon Sep 26 15:22:26 2011 -0400
@@ -13,7 +13,7 @@
 cross_section_thompson_cgs = 6.65e-25 # cm^2
 
 # Charge
-charge_proton_cgs = 4.803e-28 # emu = 1.602e-19  Coulombs
+charge_proton_cgs = 4.803e-10 # esu = 1.602e-19  Coulombs
 
 # Physical Constants
 boltzmann_constant_cgs = 1.3806504e-16 # erg K^-1


http://bitbucket.org/yt_analysis/yt/changeset/ad467ed88be8/
changeset:   ad467ed88be8
branch:      yt
user:        brittonsmith
date:        2011-10-17 18:06:45
summary:     Added off-axis projection test.
affected #:  2 files (-1 bytes)

--- a/tests/projections.py	Mon Oct 17 10:06:05 2011 -0400
+++ b/tests/projections.py	Mon Oct 17 12:06:45 2011 -0400
@@ -1,7 +1,7 @@
 from yt.utilities.answer_testing.output_tests import \
     SingleOutputTest, create_test
 from yt.utilities.answer_testing.hydro_tests import \
-    TestProjection, TestGasDistribution
+    TestProjection, TestOffAxisProjection, TestGasDistribution
 from fields_to_test import field_list
 
 for axis in range(3):
@@ -11,6 +11,15 @@
         create_test(TestProjection, "projection_test_%s_%s_Density" % (axis, field),
                     field = field, axis = axis, weight_field = "Density")
 
+for axis in range(3):
+    for field in field_list:
+        create_test(TestOffAxisProjection, "off-axis_projection_test_%s_%s" % 
+                    (axis, field),
+                    field = field, axis = axis)
+        create_test(TestOffAxisProjection, "off-axis_projection_test_%s_%s_Density" % 
+                    (axis, field),
+                    field = field, axis = axis, weight_field = "Density")
+
 for field in field_list:
     create_test(TestGasDistribution, "profile_density_test_%s" % field,
                 field_x = "Density", field_y = field)


--- a/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 10:06:05 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 12:06:45 2011 -0400
@@ -37,7 +37,8 @@
     def run(self):
         # First we get our flattened projection -- this is the
         # Density, px, py, pdx, and pdy
-        proj = self.pf.h.proj(self.axis, self.field)
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field)
         # Now let's stick it in a buffer
         pixelized_proj = self.pixelize(proj, self.field)
         # We just want the values, so this can be stored
@@ -60,10 +61,39 @@
         pylab.clf()
         pylab.imshow(self.result[1][self.field],
             interpolation='nearest', origin='lower')
-        fn = "%s_%s_projection.png" % (self.pf, self.field)
+        fn = "%s_%s_%s_projection.png" % (self.pf, self.field,
+                                          self.weight_field)
         pylab.savefig(fn)
         return [fn]
 
+class TestOffAxisProjection(YTStaticOutputTest):
+
+    field = None
+    weight_field = None
+
+    def run(self):
+        # Here proj will just be the data array.
+        proj = off_axis_projection(self.pf, 
+                                   (0.5 * (self.pf.domain_left_edge + 
+                                           self.pf.domain_right_edge)),
+                                   [1., 1., 1.], 1., 400,
+                                   self.field, weight=self.weight_field)
+
+        # values.
+        self.result = proj
+
+    def compare(self, old_result):
+        proj  = self.result
+        oproj = old_result
+
+        self.compare_array_delta(proj, oproj, 1e-7)
+
+    def plot(self):
+        fn = "%s_%s_%s_off-axis_projection.png" % \
+            (self.pf, self.field, self.weight_field)
+        write_image(self.result, fn)
+        return [fn]
+
 # Now we create all our tests.  We are using the create_test
 # function, which is a relatively simple function that takes the base class,
 # a name, and any parameters that the test requires.


http://bitbucket.org/yt_analysis/yt/changeset/bdee2d76783d/
changeset:   bdee2d76783d
branch:      yt
user:        brittonsmith
date:        2011-10-17 18:12:19
summary:     Merged.
affected #:  2 files (-1 bytes)

--- a/tests/runall.py	Mon Oct 17 12:06:45 2011 -0400
+++ b/tests/runall.py	Mon Oct 17 12:12:19 2011 -0400
@@ -73,6 +73,9 @@
     parser.add_option("-n", "--name", dest="this_name",
                       default=my_hash,
                       help = "The name we'll call this set of tests")
+    parser.add_option("", "--parallel", dest="parallel",
+                      default=False,
+                      help = "Run in parallel?")
     opts, args = parser.parse_args()
     if opts.list_tests:
         print "\n    ".join(sorted(itertools.chain(*mapping.values())))


--- a/yt/utilities/answer_testing/runner.py	Mon Oct 17 12:06:45 2011 -0400
+++ b/yt/utilities/answer_testing/runner.py	Mon Oct 17 12:12:19 2011 -0400
@@ -28,6 +28,7 @@
 
 from yt.config import ytcfg; ytcfg["yt","serialize"] = "False"
 import yt.utilities.cmdln as cmdln
+from yt.funcs import *
 from .xunit import Xunit
 
 from output_tests import test_registry, MultipleOutputTest, \
@@ -54,12 +55,14 @@
             self._path = os.path.join(path, "results")
         else:
             self._path = os.path.join(path, "results_%s" % self.id)
-        if not os.path.isdir(self._path): os.mkdir(self._path)
+        if not os.path.isdir(self._path): 
+            only_on_root(os.mkdir, self._path)
         if os.path.isfile(self._path): raise RuntimeError
 
     def _fn(self, tn):
         return os.path.join(self._path, tn)
 
+    @rootonly
     def __setitem__(self, test_name, result):
         # We have to close our shelf manually,
         # as the destructor does not necessarily do this.


http://bitbucket.org/yt_analysis/yt/changeset/6eec1e506654/
changeset:   6eec1e506654
branch:      yt
user:        samskillman
date:        2011-10-17 18:16:20
summary:     Adding Volume Rendering test.
affected #:  1 file (-1 bytes)

--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/volume_rendering.py	Mon Oct 17 12:16:20 2011 -0400
@@ -0,0 +1,40 @@
+from yt.mods import *
+import numpy as na
+
+from yt.utilities.answer_testing.output_tests import \
+    YTStaticOutputTest, RegressionTestException
+from yt.funcs import ensure_list
+
+class VolumeRenderingInconsistent(RegressionTestException):
+    pass
+
+class VolumeRenderingConsistency(YTStaticOutputTest):
+    name = "volume_rendering_consistency"
+    def run(self):
+        c = (self.pf.domain_right_edge+self.pf.domain_left_edge)/2.
+        W = na.sqrt(3.)*(self.pf.domain_right_edge-self.pf.domain_left_edge)
+        N = 512
+        n_contours=5
+        cmap = 'algae'
+        field = 'Density'
+        mi, ma = self.pf.h.all_data().quantities['Extrema'](field)[0]
+        mi, ma = na.log10(mi), na.log10(ma)
+        contour_width=(ma-mi)/100.
+        L = na.array([1.]*3)
+        tf = ColorTransferFunction((mi-2, ma+2))
+        tf.add_layers(n_contours,w=contour_width,
+                      col_bounds = (mi*1.001,ma*0.999), 
+                      colormap=cmap,alpha=na.logspace(-1,0,n_contours))
+        cam = self.pf.h.camera(c, L, W, (N,N), transfer_function = tf, no_ghost=True)
+        image = cam.snapshot()
+        # image = cam.snapshot('test_rendering_%s.png'%field)
+        self.result = image
+
+    def compare(self, old_result):
+        if not na.all(self.result==old_result):
+            raise VolumeRenderingInconsistent()
+        else:
+            pass
+
+        # self.compare_array_delta(old_result, self.result, 0.0)
+


http://bitbucket.org/yt_analysis/yt/changeset/b866811d15a3/
changeset:   b866811d15a3
branch:      yt
user:        samskillman
date:        2011-10-17 18:18:16
summary:     Merging.
affected #:  4 files (-1 bytes)

--- a/tests/projections.py	Mon Oct 17 12:16:20 2011 -0400
+++ b/tests/projections.py	Mon Oct 17 12:18:16 2011 -0400
@@ -1,7 +1,7 @@
 from yt.utilities.answer_testing.output_tests import \
     SingleOutputTest, create_test
 from yt.utilities.answer_testing.hydro_tests import \
-    TestProjection, TestGasDistribution
+    TestProjection, TestOffAxisProjection, TestGasDistribution
 from fields_to_test import field_list
 
 for axis in range(3):
@@ -11,6 +11,15 @@
         create_test(TestProjection, "projection_test_%s_%s_Density" % (axis, field),
                     field = field, axis = axis, weight_field = "Density")
 
+for axis in range(3):
+    for field in field_list:
+        create_test(TestOffAxisProjection, "off-axis_projection_test_%s_%s" % 
+                    (axis, field),
+                    field = field, axis = axis)
+        create_test(TestOffAxisProjection, "off-axis_projection_test_%s_%s_Density" % 
+                    (axis, field),
+                    field = field, axis = axis, weight_field = "Density")
+
 for field in field_list:
     create_test(TestGasDistribution, "profile_density_test_%s" % field,
                 field_x = "Density", field_y = field)


--- a/tests/runall.py	Mon Oct 17 12:16:20 2011 -0400
+++ b/tests/runall.py	Mon Oct 17 12:18:16 2011 -0400
@@ -73,6 +73,9 @@
     parser.add_option("-n", "--name", dest="this_name",
                       default=my_hash,
                       help = "The name we'll call this set of tests")
+    parser.add_option("", "--parallel", dest="parallel",
+                      default=False,
+                      help = "Run in parallel?")
     opts, args = parser.parse_args()
     if opts.list_tests:
         print "\n    ".join(sorted(itertools.chain(*mapping.values())))


--- a/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 12:16:20 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 12:18:16 2011 -0400
@@ -37,7 +37,8 @@
     def run(self):
         # First we get our flattened projection -- this is the
         # Density, px, py, pdx, and pdy
-        proj = self.pf.h.proj(self.axis, self.field)
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field)
         # Now let's stick it in a buffer
         pixelized_proj = self.pixelize(proj, self.field)
         # We just want the values, so this can be stored
@@ -60,10 +61,39 @@
         pylab.clf()
         pylab.imshow(self.result[1][self.field],
             interpolation='nearest', origin='lower')
-        fn = "%s_%s_projection.png" % (self.pf, self.field)
+        fn = "%s_%s_%s_projection.png" % (self.pf, self.field,
+                                          self.weight_field)
         pylab.savefig(fn)
         return [fn]
 
+class TestOffAxisProjection(YTStaticOutputTest):
+
+    field = None
+    weight_field = None
+
+    def run(self):
+        # Here proj will just be the data array.
+        proj = off_axis_projection(self.pf, 
+                                   (0.5 * (self.pf.domain_left_edge + 
+                                           self.pf.domain_right_edge)),
+                                   [1., 1., 1.], 1., 400,
+                                   self.field, weight=self.weight_field)
+
+        # values.
+        self.result = proj
+
+    def compare(self, old_result):
+        proj  = self.result
+        oproj = old_result
+
+        self.compare_array_delta(proj, oproj, 1e-7)
+
+    def plot(self):
+        fn = "%s_%s_%s_off-axis_projection.png" % \
+            (self.pf, self.field, self.weight_field)
+        write_image(self.result, fn)
+        return [fn]
+
 # Now we create all our tests.  We are using the create_test
 # function, which is a relatively simple function that takes the base class,
 # a name, and any parameters that the test requires.


--- a/yt/utilities/answer_testing/runner.py	Mon Oct 17 12:16:20 2011 -0400
+++ b/yt/utilities/answer_testing/runner.py	Mon Oct 17 12:18:16 2011 -0400
@@ -28,6 +28,7 @@
 
 from yt.config import ytcfg; ytcfg["yt","serialize"] = "False"
 import yt.utilities.cmdln as cmdln
+from yt.funcs import *
 from .xunit import Xunit
 
 from output_tests import test_registry, MultipleOutputTest, \
@@ -54,12 +55,14 @@
             self._path = os.path.join(path, "results")
         else:
             self._path = os.path.join(path, "results_%s" % self.id)
-        if not os.path.isdir(self._path): os.mkdir(self._path)
+        if not os.path.isdir(self._path): 
+            only_on_root(os.mkdir, self._path)
         if os.path.isfile(self._path): raise RuntimeError
 
     def _fn(self, tn):
         return os.path.join(self._path, tn)
 
+    @rootonly
     def __setitem__(self, test_name, result):
         # We have to close our shelf manually,
         # as the destructor does not necessarily do this.


http://bitbucket.org/yt_analysis/yt/changeset/8730f06b1e58/
changeset:   8730f06b1e58
branch:      yt
user:        MatthewTurk
date:        2011-10-17 18:25:23
summary:     Adding more derived quantity tests
affected #:  1 file (-1 bytes)

--- a/tests/object_field_values.py	Mon Oct 17 12:17:04 2011 -0400
+++ b/tests/object_field_values.py	Mon Oct 17 12:25:23 2011 -0400
@@ -76,3 +76,29 @@
     for dq in dq_names:
         create_test(YTDerivedQuantityTest, "%s_%s" % (object_name, dq),
                     dq_name = dq, object_name = object_name)
+
+class YTDerivedQuantityTestField(YTDerivedQuantityTest):
+    def run(self):
+        self.result = self.data_object.quantities[self.dq_name](
+            self.field_name)
+
+for object_name in known_objects:
+    for field in field_list:
+        for dq in ["Extrema", "TotalQuantity", "MaxLocation", "MinLocation"]:
+            create_test(YTDerivedQuantityTestField,
+                        "%s_%s" % (object_name, field),
+                        field_name = field, dq_name = dq,
+                        object_name = object_name)
+
+class YTDerivedQuantityTest_WeightedAverageQuantity(YTDerivedQuantityTest):
+    def run(self):
+        self.result = self.data_object.quantities["WeightedAverageQuantity"](
+            self.field_name, weight="CellMassMsun")
+
+for object_name in known_objects:
+    for field in field_list:
+        create_test(YTDerivedQuantityTest_WeightedAverageQuantity,
+                    "%s_%s" % (object_name, field),
+                    field_name = field, 
+                    object_name = object_name)
+


http://bitbucket.org/yt_analysis/yt/changeset/9479051a2c1b/
changeset:   9479051a2c1b
branch:      yt
user:        jsoishi
date:        2011-10-17 18:18:35
summary:     added 2D gas distribution test.
affected #:  2 files (-1 bytes)

--- a/tests/projections.py	Sat Oct 15 11:56:37 2011 -0400
+++ b/tests/projections.py	Mon Oct 17 09:18:35 2011 -0700
@@ -1,7 +1,7 @@
 from yt.utilities.answer_testing.output_tests import \
     SingleOutputTest, create_test
 from yt.utilities.answer_testing.hydro_tests import \
-    TestProjection, TestGasDistribution
+    TestProjection, TestGasDistribution, Test2DGasDistribution
 from fields_to_test import field_list
 
 for axis in range(3):
@@ -14,3 +14,5 @@
 for field in field_list:
     create_test(TestGasDistribution, "profile_density_test_%s" % field,
                 field_x = "Density", field_y = field)
+    create_test(Test2DGasDistribution, "2d_profile_density_x-vel_test_%s" % field,
+                field_x = "Density", field_y = "x-velocity", field_z = field, weight = "CellMassMsun")


--- a/yt/utilities/answer_testing/hydro_tests.py	Sat Oct 15 11:56:37 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 09:18:35 2011 -0700
@@ -102,3 +102,21 @@
 for field in ["Temperature", "x-velocity"]:
     create_test(TestGasDistribution, "profile_density_test_%s" % field,
                 field_x = "Density", field_y = field)
+
+class Test2DGasDistribution(TestGasDistribution):
+    x_bins = 128
+    y_bins = 128
+    field_z = "CellMassMsun"
+    weight = None
+    def run(self):
+        # We're NOT going to use the low-level profiling API here,
+        # because we are avoiding the calculations of min/max,
+        # as those should be tested in another test.
+        pc = PlotCollection(self.pf, center=self.sim_center)
+        p = pc.add_phase_object(self.entire_simulation,
+            [self.field_x, self.field_y, self.field_z], x_bins = self.x_bins, y_bins = self.y_bins,
+            weight=self.weight)
+        # The arrays are all stored in a dictionary hanging off the profile
+        # object
+        self.result = p.data._data
+


http://bitbucket.org/yt_analysis/yt/changeset/f2effcf33f75/
changeset:   f2effcf33f75
branch:      yt
user:        jsoishi
date:        2011-10-17 18:20:40
summary:     merged.
affected #:  5 files (-1 bytes)

--- a/tests/projections.py	Mon Oct 17 09:18:35 2011 -0700
+++ b/tests/projections.py	Mon Oct 17 09:20:40 2011 -0700
@@ -1,7 +1,8 @@
 from yt.utilities.answer_testing.output_tests import \
     SingleOutputTest, create_test
 from yt.utilities.answer_testing.hydro_tests import \
-    TestProjection, TestGasDistribution, Test2DGasDistribution
+    TestProjection, TestOffAxisProjection, TestGasDistribution, Test2DGasDistribution
+
 from fields_to_test import field_list
 
 for axis in range(3):
@@ -11,6 +12,15 @@
         create_test(TestProjection, "projection_test_%s_%s_Density" % (axis, field),
                     field = field, axis = axis, weight_field = "Density")
 
+for axis in range(3):
+    for field in field_list:
+        create_test(TestOffAxisProjection, "off-axis_projection_test_%s_%s" % 
+                    (axis, field),
+                    field = field, axis = axis)
+        create_test(TestOffAxisProjection, "off-axis_projection_test_%s_%s_Density" % 
+                    (axis, field),
+                    field = field, axis = axis, weight_field = "Density")
+
 for field in field_list:
     create_test(TestGasDistribution, "profile_density_test_%s" % field,
                 field_x = "Density", field_y = field)


--- a/tests/runall.py	Mon Oct 17 09:18:35 2011 -0700
+++ b/tests/runall.py	Mon Oct 17 09:20:40 2011 -0700
@@ -73,6 +73,9 @@
     parser.add_option("-n", "--name", dest="this_name",
                       default=my_hash,
                       help = "The name we'll call this set of tests")
+    parser.add_option("", "--parallel", dest="parallel",
+                      default=False,
+                      help = "Run in parallel?")
     opts, args = parser.parse_args()
     if opts.list_tests:
         print "\n    ".join(sorted(itertools.chain(*mapping.values())))
@@ -103,5 +106,11 @@
         rtr.run_test(test_name)
     if watcher is not None:
         rtr.watcher.report()
+    failures = 0
+    passes = 1
     for test_name, result in sorted(rtr.passed_tests.items()):
         print "TEST %s: %s" % (test_name, result)
+        if result: passes += 1
+        else: failures += 1
+    print "Number of passes  : %s" % passes
+    print "Number of failures: %s" % failures


--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/volume_rendering.py	Mon Oct 17 09:20:40 2011 -0700
@@ -0,0 +1,40 @@
+from yt.mods import *
+import numpy as na
+
+from yt.utilities.answer_testing.output_tests import \
+    YTStaticOutputTest, RegressionTestException
+from yt.funcs import ensure_list
+
+class VolumeRenderingInconsistent(RegressionTestException):
+    pass
+
+class VolumeRenderingConsistency(YTStaticOutputTest):
+    name = "volume_rendering_consistency"
+    def run(self):
+        c = (self.pf.domain_right_edge+self.pf.domain_left_edge)/2.
+        W = na.sqrt(3.)*(self.pf.domain_right_edge-self.pf.domain_left_edge)
+        N = 512
+        n_contours=5
+        cmap = 'algae'
+        field = 'Density'
+        mi, ma = self.pf.h.all_data().quantities['Extrema'](field)[0]
+        mi, ma = na.log10(mi), na.log10(ma)
+        contour_width=(ma-mi)/100.
+        L = na.array([1.]*3)
+        tf = ColorTransferFunction((mi-2, ma+2))
+        tf.add_layers(n_contours,w=contour_width,
+                      col_bounds = (mi*1.001,ma*0.999), 
+                      colormap=cmap,alpha=na.logspace(-1,0,n_contours))
+        cam = self.pf.h.camera(c, L, W, (N,N), transfer_function = tf, no_ghost=True)
+        image = cam.snapshot()
+        # image = cam.snapshot('test_rendering_%s.png'%field)
+        self.result = image
+
+    def compare(self, old_result):
+        if not na.all(self.result==old_result):
+            raise VolumeRenderingInconsistent()
+        else:
+            pass
+
+        # self.compare_array_delta(old_result, self.result, 0.0)
+


--- a/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 09:18:35 2011 -0700
+++ b/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 09:20:40 2011 -0700
@@ -37,7 +37,8 @@
     def run(self):
         # First we get our flattened projection -- this is the
         # Density, px, py, pdx, and pdy
-        proj = self.pf.h.proj(self.axis, self.field)
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field)
         # Now let's stick it in a buffer
         pixelized_proj = self.pixelize(proj, self.field)
         # We just want the values, so this can be stored
@@ -60,10 +61,39 @@
         pylab.clf()
         pylab.imshow(self.result[1][self.field],
             interpolation='nearest', origin='lower')
-        fn = "%s_%s_projection.png" % (self.pf, self.field)
+        fn = "%s_%s_%s_projection.png" % (self.pf, self.field,
+                                          self.weight_field)
         pylab.savefig(fn)
         return [fn]
 
+class TestOffAxisProjection(YTStaticOutputTest):
+
+    field = None
+    weight_field = None
+
+    def run(self):
+        # Here proj will just be the data array.
+        proj = off_axis_projection(self.pf, 
+                                   (0.5 * (self.pf.domain_left_edge + 
+                                           self.pf.domain_right_edge)),
+                                   [1., 1., 1.], 1., 400,
+                                   self.field, weight=self.weight_field)
+
+        # values.
+        self.result = proj
+
+    def compare(self, old_result):
+        proj  = self.result
+        oproj = old_result
+
+        self.compare_array_delta(proj, oproj, 1e-7)
+
+    def plot(self):
+        fn = "%s_%s_%s_off-axis_projection.png" % \
+            (self.pf, self.field, self.weight_field)
+        write_image(self.result, fn)
+        return [fn]
+
 # Now we create all our tests.  We are using the create_test
 # function, which is a relatively simple function that takes the base class,
 # a name, and any parameters that the test requires.


--- a/yt/utilities/answer_testing/runner.py	Mon Oct 17 09:18:35 2011 -0700
+++ b/yt/utilities/answer_testing/runner.py	Mon Oct 17 09:20:40 2011 -0700
@@ -28,6 +28,7 @@
 
 from yt.config import ytcfg; ytcfg["yt","serialize"] = "False"
 import yt.utilities.cmdln as cmdln
+from yt.funcs import *
 from .xunit import Xunit
 
 from output_tests import test_registry, MultipleOutputTest, \
@@ -54,12 +55,14 @@
             self._path = os.path.join(path, "results")
         else:
             self._path = os.path.join(path, "results_%s" % self.id)
-        if not os.path.isdir(self._path): os.mkdir(self._path)
+        if not os.path.isdir(self._path): 
+            only_on_root(os.mkdir, self._path)
         if os.path.isfile(self._path): raise RuntimeError
 
     def _fn(self, tn):
         return os.path.join(self._path, tn)
 
+    @rootonly
     def __setitem__(self, test_name, result):
         # We have to close our shelf manually,
         # as the destructor does not necessarily do this.


http://bitbucket.org/yt_analysis/yt/changeset/c11747b2c5d1/
changeset:   c11747b2c5d1
branch:      yt
user:        MatthewTurk
date:        2011-10-17 18:25:46
summary:     Merging down
affected #:  1 file (-1 bytes)

--- a/yt/utilities/physical_constants.py	Mon Oct 17 12:25:23 2011 -0400
+++ b/yt/utilities/physical_constants.py	Mon Oct 17 12:25:46 2011 -0400
@@ -13,7 +13,7 @@
 cross_section_thompson_cgs = 6.65e-25 # cm^2
 
 # Charge
-charge_proton_cgs = 4.803e-28 # emu = 1.602e-19  Coulombs
+charge_proton_cgs = 4.803e-10 # esu = 1.602e-19  Coulombs
 
 # Physical Constants
 boltzmann_constant_cgs = 1.3806504e-16 # erg K^-1


http://bitbucket.org/yt_analysis/yt/changeset/37aee627ce70/
changeset:   37aee627ce70
branch:      yt
user:        MatthewTurk
date:        2011-10-17 18:25:56
summary:     Merging down
affected #:  3 files (-1 bytes)

--- a/tests/projections.py	Mon Oct 17 12:25:46 2011 -0400
+++ b/tests/projections.py	Mon Oct 17 12:25:56 2011 -0400
@@ -1,7 +1,8 @@
 from yt.utilities.answer_testing.output_tests import \
     SingleOutputTest, create_test
 from yt.utilities.answer_testing.hydro_tests import \
-    TestProjection, TestGasDistribution
+    TestProjection, TestOffAxisProjection, TestGasDistribution, Test2DGasDistribution
+
 from fields_to_test import field_list
 
 for axis in range(3):
@@ -11,6 +12,17 @@
         create_test(TestProjection, "projection_test_%s_%s_Density" % (axis, field),
                     field = field, axis = axis, weight_field = "Density")
 
+for axis in range(3):
+    for field in field_list:
+        create_test(TestOffAxisProjection, "off-axis_projection_test_%s_%s" % 
+                    (axis, field),
+                    field = field, axis = axis)
+        create_test(TestOffAxisProjection, "off-axis_projection_test_%s_%s_Density" % 
+                    (axis, field),
+                    field = field, axis = axis, weight_field = "Density")
+
 for field in field_list:
     create_test(TestGasDistribution, "profile_density_test_%s" % field,
                 field_x = "Density", field_y = field)
+    create_test(Test2DGasDistribution, "2d_profile_density_x-vel_test_%s" % field,
+                field_x = "Density", field_y = "x-velocity", field_z = field, weight = "CellMassMsun")


--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/volume_rendering.py	Mon Oct 17 12:25:56 2011 -0400
@@ -0,0 +1,40 @@
+from yt.mods import *
+import numpy as na
+
+from yt.utilities.answer_testing.output_tests import \
+    YTStaticOutputTest, RegressionTestException
+from yt.funcs import ensure_list
+
+class VolumeRenderingInconsistent(RegressionTestException):
+    pass
+
+class VolumeRenderingConsistency(YTStaticOutputTest):
+    name = "volume_rendering_consistency"
+    def run(self):
+        c = (self.pf.domain_right_edge+self.pf.domain_left_edge)/2.
+        W = na.sqrt(3.)*(self.pf.domain_right_edge-self.pf.domain_left_edge)
+        N = 512
+        n_contours=5
+        cmap = 'algae'
+        field = 'Density'
+        mi, ma = self.pf.h.all_data().quantities['Extrema'](field)[0]
+        mi, ma = na.log10(mi), na.log10(ma)
+        contour_width=(ma-mi)/100.
+        L = na.array([1.]*3)
+        tf = ColorTransferFunction((mi-2, ma+2))
+        tf.add_layers(n_contours,w=contour_width,
+                      col_bounds = (mi*1.001,ma*0.999), 
+                      colormap=cmap,alpha=na.logspace(-1,0,n_contours))
+        cam = self.pf.h.camera(c, L, W, (N,N), transfer_function = tf, no_ghost=True)
+        image = cam.snapshot()
+        # image = cam.snapshot('test_rendering_%s.png'%field)
+        self.result = image
+
+    def compare(self, old_result):
+        if not na.all(self.result==old_result):
+            raise VolumeRenderingInconsistent()
+        else:
+            pass
+
+        # self.compare_array_delta(old_result, self.result, 0.0)
+


--- a/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 12:25:46 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 12:25:56 2011 -0400
@@ -37,7 +37,8 @@
     def run(self):
         # First we get our flattened projection -- this is the
         # Density, px, py, pdx, and pdy
-        proj = self.pf.h.proj(self.axis, self.field)
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field)
         # Now let's stick it in a buffer
         pixelized_proj = self.pixelize(proj, self.field)
         # We just want the values, so this can be stored
@@ -60,10 +61,39 @@
         pylab.clf()
         pylab.imshow(self.result[1][self.field],
             interpolation='nearest', origin='lower')
-        fn = "%s_%s_projection.png" % (self.pf, self.field)
+        fn = "%s_%s_%s_projection.png" % (self.pf, self.field,
+                                          self.weight_field)
         pylab.savefig(fn)
         return [fn]
 
+class TestOffAxisProjection(YTStaticOutputTest):
+
+    field = None
+    weight_field = None
+
+    def run(self):
+        # Here proj will just be the data array.
+        proj = off_axis_projection(self.pf, 
+                                   (0.5 * (self.pf.domain_left_edge + 
+                                           self.pf.domain_right_edge)),
+                                   [1., 1., 1.], 1., 400,
+                                   self.field, weight=self.weight_field)
+
+        # values.
+        self.result = proj
+
+    def compare(self, old_result):
+        proj  = self.result
+        oproj = old_result
+
+        self.compare_array_delta(proj, oproj, 1e-7)
+
+    def plot(self):
+        fn = "%s_%s_%s_off-axis_projection.png" % \
+            (self.pf, self.field, self.weight_field)
+        write_image(self.result, fn)
+        return [fn]
+
 # Now we create all our tests.  We are using the create_test
 # function, which is a relatively simple function that takes the base class,
 # a name, and any parameters that the test requires.
@@ -102,3 +132,21 @@
 for field in ["Temperature", "x-velocity"]:
     create_test(TestGasDistribution, "profile_density_test_%s" % field,
                 field_x = "Density", field_y = field)
+
+class Test2DGasDistribution(TestGasDistribution):
+    x_bins = 128
+    y_bins = 128
+    field_z = "CellMassMsun"
+    weight = None
+    def run(self):
+        # We're NOT going to use the low-level profiling API here,
+        # because we are avoiding the calculations of min/max,
+        # as those should be tested in another test.
+        pc = PlotCollection(self.pf, center=self.sim_center)
+        p = pc.add_phase_object(self.entire_simulation,
+            [self.field_x, self.field_y, self.field_z], x_bins = self.x_bins, y_bins = self.y_bins,
+            weight=self.weight)
+        # The arrays are all stored in a dictionary hanging off the profile
+        # object
+        self.result = p.data._data
+


http://bitbucket.org/yt_analysis/yt/changeset/84c263fc009b/
changeset:   84c263fc009b
branch:      yt
user:        brittonsmith
date:        2011-10-17 18:34:15
summary:     Added slice tests.
affected #:  2 files (-1 bytes)

--- a/tests/projections.py	Mon Oct 17 12:18:16 2011 -0400
+++ b/tests/projections.py	Mon Oct 17 12:34:15 2011 -0400
@@ -1,11 +1,16 @@
 from yt.utilities.answer_testing.output_tests import \
     SingleOutputTest, create_test
 from yt.utilities.answer_testing.hydro_tests import \
-    TestProjection, TestOffAxisProjection, TestGasDistribution
+    TestProjection, TestOffAxisProjection, TestSlice, TestGasDistribution
 from fields_to_test import field_list
 
 for axis in range(3):
     for field in field_list:
+        create_test(TestSlice, "projection_slice_%s_%s" % (axis, field),
+                    field = field, axis = axis)
+
+for axis in range(3):
+    for field in field_list:
         create_test(TestProjection, "projection_test_%s_%s" % (axis, field),
                     field = field, axis = axis)
         create_test(TestProjection, "projection_test_%s_%s_Density" % (axis, field),


--- a/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 12:18:16 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 12:34:15 2011 -0400
@@ -94,6 +94,31 @@
         write_image(self.result, fn)
         return [fn]
 
+class TestSlice(YTStaticOutputTest):
+
+    field = None
+    axis = None
+
+    def run(self):
+        # Here proj will just be the data array.
+        slice = self.ph.h.slice(self.axis, 
+                                (0.5 * (self.pf.domain_left_edge + 
+                                        self.pf.domain_right_edge))[self.axis],
+                                fields=self.field)
+        # values.
+        self.result = slice.data
+
+    def compare(self, old_result):
+        slice  = self.result
+        oslice = old_result
+
+        self.compare_data_arrays(slice, oslice)
+
+    def plot(self):
+        fn = "%s_%s_slice.png" % (self.pf, self.field)
+        write_image(self.result[self.field], fn)
+        return [fn]
+
 # Now we create all our tests.  We are using the create_test
 # function, which is a relatively simple function that takes the base class,
 # a name, and any parameters that the test requires.


http://bitbucket.org/yt_analysis/yt/changeset/45f96c017374/
changeset:   45f96c017374
branch:      yt
user:        brittonsmith
date:        2011-10-17 18:35:44
summary:     Merged.
affected #:  4 files (-1 bytes)

--- a/tests/object_field_values.py	Mon Oct 17 12:34:15 2011 -0400
+++ b/tests/object_field_values.py	Mon Oct 17 12:35:44 2011 -0400
@@ -54,3 +54,51 @@
         create_test(YTFieldValuesTest, "%s_%s" % (object_name, field),
                     field = field, object_name = object_name)
 
+class YTDerivedQuantityTest(YTStaticOutputTest):
+    def setup(self):
+        YTStaticOutputTest.setup(self)
+        known_objects[self.object_name](self)
+
+    def compare(self, old_results):
+        if self.result != old_result: raise FieldHashesDontMatch
+
+    def run(self):
+        # This only works if it takes no arguments
+        self.result = self.data_object.quantities[self.dq_name]()
+
+dq_names = ["TotalMass", "AngularMomentumVector", "CenterOfMass",
+            "BulkVelocity", "BaryonSpinParameter", "ParticleSpinParameter"]
+
+# Extrema, WeightedAverageQuantity, TotalQuantity, MaxLocation,
+# MinLocation
+
+for object_name in known_objects:
+    for dq in dq_names:
+        create_test(YTDerivedQuantityTest, "%s_%s" % (object_name, dq),
+                    dq_name = dq, object_name = object_name)
+
+class YTDerivedQuantityTestField(YTDerivedQuantityTest):
+    def run(self):
+        self.result = self.data_object.quantities[self.dq_name](
+            self.field_name)
+
+for object_name in known_objects:
+    for field in field_list:
+        for dq in ["Extrema", "TotalQuantity", "MaxLocation", "MinLocation"]:
+            create_test(YTDerivedQuantityTestField,
+                        "%s_%s" % (object_name, field),
+                        field_name = field, dq_name = dq,
+                        object_name = object_name)
+
+class YTDerivedQuantityTest_WeightedAverageQuantity(YTDerivedQuantityTest):
+    def run(self):
+        self.result = self.data_object.quantities["WeightedAverageQuantity"](
+            self.field_name, weight="CellMassMsun")
+
+for object_name in known_objects:
+    for field in field_list:
+        create_test(YTDerivedQuantityTest_WeightedAverageQuantity,
+                    "%s_%s" % (object_name, field),
+                    field_name = field, 
+                    object_name = object_name)
+


--- a/tests/projections.py	Mon Oct 17 12:34:15 2011 -0400
+++ b/tests/projections.py	Mon Oct 17 12:35:44 2011 -0400
@@ -1,7 +1,9 @@
 from yt.utilities.answer_testing.output_tests import \
     SingleOutputTest, create_test
 from yt.utilities.answer_testing.hydro_tests import \
-    TestProjection, TestOffAxisProjection, TestSlice, TestGasDistribution
+    TestProjection, TestOffAxisProjection, TestSlice, \
+    TestGasDistribution, Test2DGasDistribution
+
 from fields_to_test import field_list
 
 for axis in range(3):
@@ -28,3 +30,5 @@
 for field in field_list:
     create_test(TestGasDistribution, "profile_density_test_%s" % field,
                 field_x = "Density", field_y = field)
+    create_test(Test2DGasDistribution, "2d_profile_density_x-vel_test_%s" % field,
+                field_x = "Density", field_y = "x-velocity", field_z = field, weight = "CellMassMsun")


--- a/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 12:34:15 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 12:35:44 2011 -0400
@@ -157,3 +157,21 @@
 for field in ["Temperature", "x-velocity"]:
     create_test(TestGasDistribution, "profile_density_test_%s" % field,
                 field_x = "Density", field_y = field)
+
+class Test2DGasDistribution(TestGasDistribution):
+    x_bins = 128
+    y_bins = 128
+    field_z = "CellMassMsun"
+    weight = None
+    def run(self):
+        # We're NOT going to use the low-level profiling API here,
+        # because we are avoiding the calculations of min/max,
+        # as those should be tested in another test.
+        pc = PlotCollection(self.pf, center=self.sim_center)
+        p = pc.add_phase_object(self.entire_simulation,
+            [self.field_x, self.field_y, self.field_z], x_bins = self.x_bins, y_bins = self.y_bins,
+            weight=self.weight)
+        # The arrays are all stored in a dictionary hanging off the profile
+        # object
+        self.result = p.data._data
+


--- a/yt/utilities/physical_constants.py	Mon Oct 17 12:34:15 2011 -0400
+++ b/yt/utilities/physical_constants.py	Mon Oct 17 12:35:44 2011 -0400
@@ -13,7 +13,7 @@
 cross_section_thompson_cgs = 6.65e-25 # cm^2
 
 # Charge
-charge_proton_cgs = 4.803e-28 # emu = 1.602e-19  Coulombs
+charge_proton_cgs = 4.803e-10 # esu = 1.602e-19  Coulombs
 
 # Physical Constants
 boltzmann_constant_cgs = 1.3806504e-16 # erg K^-1


http://bitbucket.org/yt_analysis/yt/changeset/4210bea66640/
changeset:   4210bea66640
branch:      yt
user:        MatthewTurk
date:        2011-10-17 18:58:54
summary:     Adding cut_regions and changing 'self' to 'tobj'
affected #:  1 file (-1 bytes)

--- a/tests/object_field_values.py	Mon Oct 17 12:35:44 2011 -0400
+++ b/tests/object_field_values.py	Mon Oct 17 12:58:54 2011 -0400
@@ -16,25 +16,41 @@
     return func
 
 @register_object
-def centered_sphere(self):
-    center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
-    width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-    self.data_object = self.pf.h.sphere(center, width/0.25)
+def centered_sphere(tobj):
+    center = 0.5*(tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
+    width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+    tobj.data_object = tobj.pf.h.sphere(center, width/0.25)
 
 @register_object
-def off_centered_sphere(self):
-    center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
-    width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-    self.data_object = self.pf.h.sphere(center - 0.25 * width, width/0.25)
+def off_centered_sphere(tobj):
+    center = 0.5*(tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
+    width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+    tobj.data_object = tobj.pf.h.sphere(center - 0.25 * width, width/0.25)
 
 @register_object
-def corner_sphere(self):
-    width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-    self.data_object = self.pf.h.sphere(self.pf.domain_left_edge, width/0.25)
+def corner_sphere(tobj):
+    width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+    tobj.data_object = tobj.pf.h.sphere(tobj.pf.domain_left_edge, width/0.25)
 
 @register_object
-def all_data(self):
-    self.data_object = self.pf.h.all_data()
+def all_data(tobj):
+    tobj.data_object = tobj.pf.h.all_data()
+
+_new_known_objects = {}
+for field in field_list:
+    for object_name in known_objects:
+        def _rfunc(oname, fname):
+            def func(tobj):
+                known_objects[oname](tobj)
+                tobj.orig_data_object = tobj.data_object
+                avg_value = tobj.orig_data_object.quantities[
+                        "WeightedAverageQuantity"](fname, "Density")
+                tobj.data_object = tobj.orig_data_object.cut_region(
+                        ["grid['%s'] > %s" % (fname, avg_value)])
+            return func
+        _new_known_objects["%s_cut_region_%s" % (object_name, field)] = \
+                _rfunc(object_name, field)
+known_objects.update(_new_known_objects)
 
 class YTFieldValuesTest(YTStaticOutputTest):
     def run(self):
@@ -53,7 +69,7 @@
     for field in field_list + particle_field_list:
         create_test(YTFieldValuesTest, "%s_%s" % (object_name, field),
                     field = field, object_name = object_name)
-
+    
 class YTDerivedQuantityTest(YTStaticOutputTest):
     def setup(self):
         YTStaticOutputTest.setup(self)


http://bitbucket.org/yt_analysis/yt/changeset/cdc13f4f9b71/
changeset:   cdc13f4f9b71
branch:      yt
user:        brittonsmith
date:        2011-10-17 18:44:46
summary:     Fixed a typo in slice test.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 12:35:44 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 12:44:46 2011 -0400
@@ -101,7 +101,7 @@
 
     def run(self):
         # Here proj will just be the data array.
-        slice = self.ph.h.slice(self.axis, 
+        slice = self.pf.h.slice(self.axis, 
                                 (0.5 * (self.pf.domain_left_edge + 
                                         self.pf.domain_right_edge))[self.axis],
                                 fields=self.field)


http://bitbucket.org/yt_analysis/yt/changeset/9914cf03bae4/
changeset:   9914cf03bae4
branch:      yt
user:        MatthewTurk
date:        2011-10-17 18:59:02
summary:     Merge
affected #:  1 file (-1 bytes)

--- a/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 12:58:54 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 12:59:02 2011 -0400
@@ -101,7 +101,7 @@
 
     def run(self):
         # Here proj will just be the data array.
-        slice = self.ph.h.slice(self.axis, 
+        slice = self.pf.h.slice(self.axis, 
                                 (0.5 * (self.pf.domain_left_edge + 
                                         self.pf.domain_right_edge))[self.axis],
                                 fields=self.field)


http://bitbucket.org/yt_analysis/yt/changeset/aa8122c841e3/
changeset:   aa8122c841e3
branch:      yt
user:        MatthewTurk
date:        2011-10-17 19:02:08
summary:     For now, drop down to only cutting regions on Density.
affected #:  1 file (-1 bytes)

--- a/tests/object_field_values.py	Mon Oct 17 12:59:02 2011 -0400
+++ b/tests/object_field_values.py	Mon Oct 17 13:02:08 2011 -0400
@@ -37,7 +37,7 @@
     tobj.data_object = tobj.pf.h.all_data()
 
 _new_known_objects = {}
-for field in field_list:
+for field in ["Density"]:#field_list:
     for object_name in known_objects:
         def _rfunc(oname, fname):
             def func(tobj):


http://bitbucket.org/yt_analysis/yt/changeset/a1deee834465/
changeset:   a1deee834465
branch:      yt
user:        MatthewTurk
date:        2011-10-17 19:15:46
summary:     Fixing errors with cut region tests and particle fields
affected #:  1 file (-1 bytes)

--- a/tests/object_field_values.py	Mon Oct 17 13:02:08 2011 -0400
+++ b/tests/object_field_values.py	Mon Oct 17 13:15:46 2011 -0400
@@ -67,6 +67,8 @@
 
 for object_name in known_objects:
     for field in field_list + particle_field_list:
+        if "cut_region" in object_name and field in particle_field_list:
+            continue
         create_test(YTFieldValuesTest, "%s_%s" % (object_name, field),
                     field = field, object_name = object_name)
     
@@ -90,6 +92,11 @@
 
 for object_name in known_objects:
     for dq in dq_names:
+        # Some special exceptions
+        if "cut_region" in object_name and (
+            "SpinParameter" in dq or
+            "TotalMass" in dq):
+            continue
         create_test(YTDerivedQuantityTest, "%s_%s" % (object_name, dq),
                     dq_name = dq, object_name = object_name)
 


http://bitbucket.org/yt_analysis/yt/changeset/3e175b22a54e/
changeset:   3e175b22a54e
branch:      yt
user:        MatthewTurk
date:        2011-10-17 19:17:02
summary:     Fixing an issue with ghost zones and cut regions
affected #:  1 file (-1 bytes)

--- a/yt/data_objects/field_info_container.py	Mon Oct 17 13:15:46 2011 -0400
+++ b/yt/data_objects/field_info_container.py	Mon Oct 17 13:17:02 2011 -0400
@@ -466,5 +466,5 @@
     def __call__(self, data):
         # We need to make sure that it's an actual AMR grid
         if isinstance(data, FieldDetector): return True
-        if data._type_name == 'grid': return True
+        if getattr(data, "_type_name", None) == 'grid': return True
         raise NeedsOriginalGrid()


http://bitbucket.org/yt_analysis/yt/changeset/4ccb2ed794cd/
changeset:   4ccb2ed794cd
branch:      yt
user:        samskillman
date:        2011-10-17 19:06:32
summary:     Adding disk to data object testing.
affected #:  2 files (-1 bytes)

--- a/tests/object_field_values.py	Mon Oct 17 12:25:56 2011 -0400
+++ b/tests/object_field_values.py	Mon Oct 17 13:06:32 2011 -0400
@@ -33,6 +33,14 @@
     self.data_object = self.pf.h.sphere(self.pf.domain_left_edge, width/0.25)
 
 @register_object
+def disk(self):
+    center = (self.pf.domain_right_edge + self.pf.domain_left_edge)/2.
+    radius = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()/10.
+    height = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()/10.
+    normal = na.array([1.]*3)
+    self.data_object = self.pf.h.disk(center, normal, radius, height)
+    
+ at register_object
 def all_data(self):
     self.data_object = self.pf.h.all_data()
 


--- a/tests/volume_rendering.py	Mon Oct 17 12:25:56 2011 -0400
+++ b/tests/volume_rendering.py	Mon Oct 17 13:06:32 2011 -0400
@@ -33,8 +33,4 @@
     def compare(self, old_result):
         if not na.all(self.result==old_result):
             raise VolumeRenderingInconsistent()
-        else:
-            pass
 
-        # self.compare_array_delta(old_result, self.result, 0.0)
-


http://bitbucket.org/yt_analysis/yt/changeset/23374115df9b/
changeset:   23374115df9b
branch:      yt
user:        samskillman
date:        2011-10-17 19:11:40
summary:     Merging.
affected #:  3 files (-1 bytes)

--- a/tests/object_field_values.py	Mon Oct 17 13:06:32 2011 -0400
+++ b/tests/object_field_values.py	Mon Oct 17 13:11:40 2011 -0400
@@ -16,21 +16,21 @@
     return func
 
 @register_object
-def centered_sphere(self):
-    center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
-    width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-    self.data_object = self.pf.h.sphere(center, width/0.25)
+def centered_sphere(tobj):
+    center = 0.5*(tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
+    width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+    tobj.data_object = tobj.pf.h.sphere(center, width/0.25)
 
 @register_object
-def off_centered_sphere(self):
-    center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
-    width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-    self.data_object = self.pf.h.sphere(center - 0.25 * width, width/0.25)
+def off_centered_sphere(tobj):
+    center = 0.5*(tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
+    width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+    tobj.data_object = tobj.pf.h.sphere(center - 0.25 * width, width/0.25)
 
 @register_object
-def corner_sphere(self):
-    width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-    self.data_object = self.pf.h.sphere(self.pf.domain_left_edge, width/0.25)
+def corner_sphere(tobj):
+    width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+    tobj.data_object = tobj.pf.h.sphere(tobj.pf.domain_left_edge, width/0.25)
 
 @register_object
 def disk(self):
@@ -44,6 +44,22 @@
 def all_data(self):
     self.data_object = self.pf.h.all_data()
 
+_new_known_objects = {}
+for field in ["Density"]:#field_list:
+    for object_name in known_objects:
+        def _rfunc(oname, fname):
+            def func(tobj):
+                known_objects[oname](tobj)
+                tobj.orig_data_object = tobj.data_object
+                avg_value = tobj.orig_data_object.quantities[
+                        "WeightedAverageQuantity"](fname, "Density")
+                tobj.data_object = tobj.orig_data_object.cut_region(
+                        ["grid['%s'] > %s" % (fname, avg_value)])
+            return func
+        _new_known_objects["%s_cut_region_%s" % (object_name, field)] = \
+                _rfunc(object_name, field)
+known_objects.update(_new_known_objects)
+
 class YTFieldValuesTest(YTStaticOutputTest):
     def run(self):
         vals = self.data_object[self.field].copy()
@@ -61,7 +77,7 @@
     for field in field_list + particle_field_list:
         create_test(YTFieldValuesTest, "%s_%s" % (object_name, field),
                     field = field, object_name = object_name)
-
+    
 class YTDerivedQuantityTest(YTStaticOutputTest):
     def setup(self):
         YTStaticOutputTest.setup(self)


--- a/tests/projections.py	Mon Oct 17 13:06:32 2011 -0400
+++ b/tests/projections.py	Mon Oct 17 13:11:40 2011 -0400
@@ -1,12 +1,18 @@
 from yt.utilities.answer_testing.output_tests import \
     SingleOutputTest, create_test
 from yt.utilities.answer_testing.hydro_tests import \
-    TestProjection, TestOffAxisProjection, TestGasDistribution, Test2DGasDistribution
+    TestProjection, TestOffAxisProjection, TestSlice, \
+    TestGasDistribution, Test2DGasDistribution
 
 from fields_to_test import field_list
 
 for axis in range(3):
     for field in field_list:
+        create_test(TestSlice, "projection_slice_%s_%s" % (axis, field),
+                    field = field, axis = axis)
+
+for axis in range(3):
+    for field in field_list:
         create_test(TestProjection, "projection_test_%s_%s" % (axis, field),
                     field = field, axis = axis)
         create_test(TestProjection, "projection_test_%s_%s_Density" % (axis, field),


--- a/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 13:06:32 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 13:11:40 2011 -0400
@@ -94,6 +94,31 @@
         write_image(self.result, fn)
         return [fn]
 
+class TestSlice(YTStaticOutputTest):
+
+    field = None
+    axis = None
+
+    def run(self):
+        # Here proj will just be the data array.
+        slice = self.pf.h.slice(self.axis, 
+                                (0.5 * (self.pf.domain_left_edge + 
+                                        self.pf.domain_right_edge))[self.axis],
+                                fields=self.field)
+        # values.
+        self.result = slice.data
+
+    def compare(self, old_result):
+        slice  = self.result
+        oslice = old_result
+
+        self.compare_data_arrays(slice, oslice)
+
+    def plot(self):
+        fn = "%s_%s_slice.png" % (self.pf, self.field)
+        write_image(self.result[self.field], fn)
+        return [fn]
+
 # Now we create all our tests.  We are using the create_test
 # function, which is a relatively simple function that takes the base class,
 # a name, and any parameters that the test requires.


http://bitbucket.org/yt_analysis/yt/changeset/306662d76a71/
changeset:   306662d76a71
branch:      yt
user:        brittonsmith
date:        2011-10-17 19:11:37
summary:     Added Ray tests and changed test file output names.
affected #:  2 files (-1 bytes)

--- a/tests/projections.py	Mon Oct 17 12:44:46 2011 -0400
+++ b/tests/projections.py	Mon Oct 17 13:11:37 2011 -0400
@@ -2,33 +2,34 @@
     SingleOutputTest, create_test
 from yt.utilities.answer_testing.hydro_tests import \
     TestProjection, TestOffAxisProjection, TestSlice, \
-    TestGasDistribution, Test2DGasDistribution
+    TestRay, TestGasDistribution, Test2DGasDistribution
 
 from fields_to_test import field_list
 
+for field in field_list:
+    create_test(TestRay, "%s" % field, field = field)
+
 for axis in range(3):
     for field in field_list:
-        create_test(TestSlice, "projection_slice_%s_%s" % (axis, field),
+        create_test(TestSlice, "%s_%s" % (axis, field),
                     field = field, axis = axis)
 
 for axis in range(3):
     for field in field_list:
-        create_test(TestProjection, "projection_test_%s_%s" % (axis, field),
+        create_test(TestProjection, "%s_%s" % (axis, field),
                     field = field, axis = axis)
-        create_test(TestProjection, "projection_test_%s_%s_Density" % (axis, field),
-                    field = field, axis = axis, weight_field = "Density")
-
-for axis in range(3):
-    for field in field_list:
-        create_test(TestOffAxisProjection, "off-axis_projection_test_%s_%s" % 
-                    (axis, field),
-                    field = field, axis = axis)
-        create_test(TestOffAxisProjection, "off-axis_projection_test_%s_%s_Density" % 
-                    (axis, field),
+        create_test(TestProjection, "%s_%s_Density" % (axis, field),
                     field = field, axis = axis, weight_field = "Density")
 
 for field in field_list:
-    create_test(TestGasDistribution, "profile_density_test_%s" % field,
+    create_test(TestOffAxisProjection, "%s_%s" % (axis, field),
+                field = field, axis = axis)
+    create_test(TestOffAxisProjection, "%s_%s_Density" % (axis, field),
+                field = field, axis = axis, weight_field = "Density")
+
+for field in field_list:
+    create_test(TestGasDistribution, "density_%s" % field,
                 field_x = "Density", field_y = field)
-    create_test(Test2DGasDistribution, "2d_profile_density_x-vel_test_%s" % field,
-                field_x = "Density", field_y = "x-velocity", field_z = field, weight = "CellMassMsun")
+    create_test(Test2DGasDistribution, "density_x-vel_%s" % field,
+                field_x = "Density", field_y = "x-velocity", field_z = field, 
+                weight = "CellMassMsun")


--- a/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 12:44:46 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 13:11:37 2011 -0400
@@ -94,6 +94,34 @@
         write_image(self.result, fn)
         return [fn]
 
+class TestRay(YTStaticOutputTest):
+
+    field = None
+
+    def run(self):
+        na.random.random(4333)
+        start_point = na.random.random(pf.dimensionality) * \
+            (pf.domain_right_edge - pf.domain_left_edge) + \
+            pf.domain_left_edge
+        end_point   = na.random.random(pf.dimensionality) * \
+            (pf.domain_right_edge - pf.domain_left_edge) + \
+            pf.domain_left_edge
+
+        # Here proj will just be the data array.
+        ray = self.pf.h.ray(start_point, end_point, field=self.field)
+
+        # values.
+        self.result = ray[self.field]
+
+    def compare(self, old_result):
+        ray  = self.result
+        oray = old_result
+
+        self.compare_array_delta(ray, oray, 1e-7)
+
+    def plot(self):
+        return
+
 class TestSlice(YTStaticOutputTest):
 
     field = None


http://bitbucket.org/yt_analysis/yt/changeset/ecbef21fbfd2/
changeset:   ecbef21fbfd2
branch:      yt
user:        brittonsmith
date:        2011-10-17 19:11:56
summary:     Merged.
affected #:  1 file (-1 bytes)

--- a/tests/object_field_values.py	Mon Oct 17 13:11:37 2011 -0400
+++ b/tests/object_field_values.py	Mon Oct 17 13:11:56 2011 -0400
@@ -16,25 +16,41 @@
     return func
 
 @register_object
-def centered_sphere(self):
-    center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
-    width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-    self.data_object = self.pf.h.sphere(center, width/0.25)
+def centered_sphere(tobj):
+    center = 0.5*(tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
+    width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+    tobj.data_object = tobj.pf.h.sphere(center, width/0.25)
 
 @register_object
-def off_centered_sphere(self):
-    center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
-    width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-    self.data_object = self.pf.h.sphere(center - 0.25 * width, width/0.25)
+def off_centered_sphere(tobj):
+    center = 0.5*(tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
+    width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+    tobj.data_object = tobj.pf.h.sphere(center - 0.25 * width, width/0.25)
 
 @register_object
-def corner_sphere(self):
-    width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-    self.data_object = self.pf.h.sphere(self.pf.domain_left_edge, width/0.25)
+def corner_sphere(tobj):
+    width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+    tobj.data_object = tobj.pf.h.sphere(tobj.pf.domain_left_edge, width/0.25)
 
 @register_object
-def all_data(self):
-    self.data_object = self.pf.h.all_data()
+def all_data(tobj):
+    tobj.data_object = tobj.pf.h.all_data()
+
+_new_known_objects = {}
+for field in ["Density"]:#field_list:
+    for object_name in known_objects:
+        def _rfunc(oname, fname):
+            def func(tobj):
+                known_objects[oname](tobj)
+                tobj.orig_data_object = tobj.data_object
+                avg_value = tobj.orig_data_object.quantities[
+                        "WeightedAverageQuantity"](fname, "Density")
+                tobj.data_object = tobj.orig_data_object.cut_region(
+                        ["grid['%s'] > %s" % (fname, avg_value)])
+            return func
+        _new_known_objects["%s_cut_region_%s" % (object_name, field)] = \
+                _rfunc(object_name, field)
+known_objects.update(_new_known_objects)
 
 class YTFieldValuesTest(YTStaticOutputTest):
     def run(self):
@@ -53,7 +69,7 @@
     for field in field_list + particle_field_list:
         create_test(YTFieldValuesTest, "%s_%s" % (object_name, field),
                     field = field, object_name = object_name)
-
+    
 class YTDerivedQuantityTest(YTStaticOutputTest):
     def setup(self):
         YTStaticOutputTest.setup(self)


http://bitbucket.org/yt_analysis/yt/changeset/887f468369e6/
changeset:   887f468369e6
branch:      yt
user:        brittonsmith
date:        2011-10-17 19:12:25
summary:     Merged.
affected #:  2 files (-1 bytes)

--- a/tests/object_field_values.py	Mon Oct 17 13:11:56 2011 -0400
+++ b/tests/object_field_values.py	Mon Oct 17 13:12:25 2011 -0400
@@ -33,8 +33,16 @@
     tobj.data_object = tobj.pf.h.sphere(tobj.pf.domain_left_edge, width/0.25)
 
 @register_object
-def all_data(tobj):
-    tobj.data_object = tobj.pf.h.all_data()
+def disk(self):
+    center = (self.pf.domain_right_edge + self.pf.domain_left_edge)/2.
+    radius = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()/10.
+    height = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()/10.
+    normal = na.array([1.]*3)
+    self.data_object = self.pf.h.disk(center, normal, radius, height)
+    
+ at register_object
+def all_data(self):
+    self.data_object = self.pf.h.all_data()
 
 _new_known_objects = {}
 for field in ["Density"]:#field_list:


--- a/tests/volume_rendering.py	Mon Oct 17 13:11:56 2011 -0400
+++ b/tests/volume_rendering.py	Mon Oct 17 13:12:25 2011 -0400
@@ -33,8 +33,4 @@
     def compare(self, old_result):
         if not na.all(self.result==old_result):
             raise VolumeRenderingInconsistent()
-        else:
-            pass
 
-        # self.compare_array_delta(old_result, self.result, 0.0)
-


http://bitbucket.org/yt_analysis/yt/changeset/168a13069d28/
changeset:   168a13069d28
branch:      yt
user:        MatthewTurk
date:        2011-10-17 19:17:09
summary:     Merging
affected #:  4 files (-1 bytes)

--- a/tests/object_field_values.py	Mon Oct 17 13:17:02 2011 -0400
+++ b/tests/object_field_values.py	Mon Oct 17 13:17:09 2011 -0400
@@ -33,8 +33,16 @@
     tobj.data_object = tobj.pf.h.sphere(tobj.pf.domain_left_edge, width/0.25)
 
 @register_object
-def all_data(tobj):
-    tobj.data_object = tobj.pf.h.all_data()
+def disk(self):
+    center = (self.pf.domain_right_edge + self.pf.domain_left_edge)/2.
+    radius = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()/10.
+    height = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()/10.
+    normal = na.array([1.]*3)
+    self.data_object = self.pf.h.disk(center, normal, radius, height)
+    
+ at register_object
+def all_data(self):
+    self.data_object = self.pf.h.all_data()
 
 _new_known_objects = {}
 for field in ["Density"]:#field_list:


--- a/tests/projections.py	Mon Oct 17 13:17:02 2011 -0400
+++ b/tests/projections.py	Mon Oct 17 13:17:09 2011 -0400
@@ -2,33 +2,34 @@
     SingleOutputTest, create_test
 from yt.utilities.answer_testing.hydro_tests import \
     TestProjection, TestOffAxisProjection, TestSlice, \
-    TestGasDistribution, Test2DGasDistribution
+    TestRay, TestGasDistribution, Test2DGasDistribution
 
 from fields_to_test import field_list
 
+for field in field_list:
+    create_test(TestRay, "%s" % field, field = field)
+
 for axis in range(3):
     for field in field_list:
-        create_test(TestSlice, "projection_slice_%s_%s" % (axis, field),
+        create_test(TestSlice, "%s_%s" % (axis, field),
                     field = field, axis = axis)
 
 for axis in range(3):
     for field in field_list:
-        create_test(TestProjection, "projection_test_%s_%s" % (axis, field),
+        create_test(TestProjection, "%s_%s" % (axis, field),
                     field = field, axis = axis)
-        create_test(TestProjection, "projection_test_%s_%s_Density" % (axis, field),
-                    field = field, axis = axis, weight_field = "Density")
-
-for axis in range(3):
-    for field in field_list:
-        create_test(TestOffAxisProjection, "off-axis_projection_test_%s_%s" % 
-                    (axis, field),
-                    field = field, axis = axis)
-        create_test(TestOffAxisProjection, "off-axis_projection_test_%s_%s_Density" % 
-                    (axis, field),
+        create_test(TestProjection, "%s_%s_Density" % (axis, field),
                     field = field, axis = axis, weight_field = "Density")
 
 for field in field_list:
-    create_test(TestGasDistribution, "profile_density_test_%s" % field,
+    create_test(TestOffAxisProjection, "%s_%s" % (axis, field),
+                field = field, axis = axis)
+    create_test(TestOffAxisProjection, "%s_%s_Density" % (axis, field),
+                field = field, axis = axis, weight_field = "Density")
+
+for field in field_list:
+    create_test(TestGasDistribution, "density_%s" % field,
                 field_x = "Density", field_y = field)
-    create_test(Test2DGasDistribution, "2d_profile_density_x-vel_test_%s" % field,
-                field_x = "Density", field_y = "x-velocity", field_z = field, weight = "CellMassMsun")
+    create_test(Test2DGasDistribution, "density_x-vel_%s" % field,
+                field_x = "Density", field_y = "x-velocity", field_z = field, 
+                weight = "CellMassMsun")


--- a/tests/volume_rendering.py	Mon Oct 17 13:17:02 2011 -0400
+++ b/tests/volume_rendering.py	Mon Oct 17 13:17:09 2011 -0400
@@ -33,8 +33,4 @@
     def compare(self, old_result):
         if not na.all(self.result==old_result):
             raise VolumeRenderingInconsistent()
-        else:
-            pass
 
-        # self.compare_array_delta(old_result, self.result, 0.0)
-


--- a/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 13:17:02 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 13:17:09 2011 -0400
@@ -94,6 +94,34 @@
         write_image(self.result, fn)
         return [fn]
 
+class TestRay(YTStaticOutputTest):
+
+    field = None
+
+    def run(self):
+        na.random.random(4333)
+        start_point = na.random.random(pf.dimensionality) * \
+            (pf.domain_right_edge - pf.domain_left_edge) + \
+            pf.domain_left_edge
+        end_point   = na.random.random(pf.dimensionality) * \
+            (pf.domain_right_edge - pf.domain_left_edge) + \
+            pf.domain_left_edge
+
+        # Here proj will just be the data array.
+        ray = self.pf.h.ray(start_point, end_point, field=self.field)
+
+        # values.
+        self.result = ray[self.field]
+
+    def compare(self, old_result):
+        ray  = self.result
+        oray = old_result
+
+        self.compare_array_delta(ray, oray, 1e-7)
+
+    def plot(self):
+        return
+
 class TestSlice(YTStaticOutputTest):
 
     field = None


http://bitbucket.org/yt_analysis/yt/changeset/3dbbb4dcd441/
changeset:   3dbbb4dcd441
branch:      yt
user:        MatthewTurk
date:        2011-10-17 19:28:43
summary:     Commenting this out preserves the existence of default tests in the registry.
affected #:  1 file (-1 bytes)

--- a/tests/runall.py	Mon Oct 17 13:17:09 2011 -0400
+++ b/tests/runall.py	Mon Oct 17 13:28:43 2011 -0400
@@ -37,7 +37,7 @@
 def find_and_initialize_tests():
     mapping = {}
     for f in glob.glob(os.path.join(cwd,"*.py")):
-        clear_registry()
+        #clear_registry()
         iname = os.path.basename(f[:-3])
         try:
             load_tests(iname, cwd)


http://bitbucket.org/yt_analysis/yt/changeset/107a12007daa/
changeset:   107a12007daa
branch:      yt
user:        MatthewTurk
date:        2011-10-17 19:33:48
summary:     A better solution for missing tests.
affected #:  1 file (-1 bytes)

--- a/tests/runall.py	Mon Oct 17 13:28:43 2011 -0400
+++ b/tests/runall.py	Mon Oct 17 13:33:48 2011 -0400
@@ -37,7 +37,7 @@
 def find_and_initialize_tests():
     mapping = {}
     for f in glob.glob(os.path.join(cwd,"*.py")):
-        #clear_registry()
+        clear_registry()
         iname = os.path.basename(f[:-3])
         try:
             load_tests(iname, cwd)
@@ -49,6 +49,7 @@
     return mapping
 
 if __name__ == "__main__":
+    clear_registry()
     mapping = find_and_initialize_tests()
     test_storage_directory = ytcfg.get("yt","test_storage_dir")
     try:
@@ -100,7 +101,8 @@
     for m, vals in mapping.items():
         new_tests = fnmatch.filter(vals, opts.test_pattern)
         if len(new_tests) == 0: continue
-        tests_to_run += new_tests
+        keys = registry_entries()
+        tests_to_run += [t for t in new_tests if t in keys]
         load_tests(m, cwd)
     for test_name in sorted(tests_to_run):
         rtr.run_test(test_name)


http://bitbucket.org/yt_analysis/yt/changeset/8e1b2d3f0b11/
changeset:   8e1b2d3f0b11
branch:      yt
user:        MatthewTurk
date:        2011-10-17 19:35:05
summary:     Use set() to speed up membership check
affected #:  1 file (-1 bytes)

--- a/tests/runall.py	Mon Oct 17 13:33:48 2011 -0400
+++ b/tests/runall.py	Mon Oct 17 13:35:05 2011 -0400
@@ -101,7 +101,7 @@
     for m, vals in mapping.items():
         new_tests = fnmatch.filter(vals, opts.test_pattern)
         if len(new_tests) == 0: continue
-        keys = registry_entries()
+        keys = set(registry_entries())
         tests_to_run += [t for t in new_tests if t in keys]
         load_tests(m, cwd)
     for test_name in sorted(tests_to_run):


http://bitbucket.org/yt_analysis/yt/changeset/06893657db38/
changeset:   06893657db38
branch:      yt
user:        chummels
date:        2011-10-17 18:13:24
summary:     Added halo profile tests for FOF and Parallel Hop; Renamed default profile test to include HOP explicitly in classname of test.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/answer_testing/halo_tests.py	Mon Oct 17 10:06:05 2011 -0400
+++ b/yt/utilities/answer_testing/halo_tests.py	Mon Oct 17 12:13:24 2011 -0400
@@ -3,7 +3,8 @@
 import pylab
 from output_tests import SingleOutputTest, YTStaticOutputTest, create_test
 
-class TestHaloCount(YTStaticOutputTest):
+# Tests the number of halos returned by the HOP halo finder on a dataset
+class TestHaloCountHOP(YTStaticOutputTest):
     threshold = 80.0
 
     def run(self):
@@ -19,7 +20,46 @@
     def plot(self):
         return []
 
-create_test(TestHaloCount, "halo_count_test", threshold=80.0)
+create_test(TestHaloCountHOP, "halo_count_test_HOP", threshold=80.0)
+
+# Tests the number of halos returned by the FOF halo finder on a dataset
+class TestHaloCountFOF(YTStaticOutputTest):
+    threshold = 80.0
+
+    def run(self):
+        # Find the haloes using FOF.
+        haloes = FOFHaloFinder(self.pf, threshold=self.threshold, dm_only=False)
+        # We only care about the number of haloes.
+        self.result = len(haloes)
+                    
+    def compare(self, old_result):
+        # The new value should be identical to the old one.
+        self.compare_value_delta(self.result, old_result, 0)
+
+    def plot(self):
+        return []
+
+create_test(TestHaloCountFOF, "halo_count_test_FOF", threshold=80.0)
+
+# Tests the number of halos returned by the Parallel HOP halo finder on a 
+# dataset
+class TestHaloCountPHOP(YTStaticOutputTest):
+    threshold = 80.0
+
+    def run(self):
+        # Find the haloes using parallel HOP.
+        haloes = parallelHF(self.pf, threshold=self.threshold, dm_only=False)
+        # We only care about the number of haloes.
+        self.result = len(haloes)
+                    
+    def compare(self, old_result):
+        # The new value should be identical to the old one.
+        self.compare_value_delta(self.result, old_result, 0)
+
+    def plot(self):
+        return []
+
+create_test(TestHaloCountPHOP, "halo_count_test_PHOP", threshold=80.0)
 
 class TestHaloComposition(YTStaticOutputTest):
     threshold=80.0


http://bitbucket.org/yt_analysis/yt/changeset/97aabea0b290/
changeset:   97aabea0b290
branch:      yt
user:        chummels
date:        2011-10-17 18:36:16
summary:     Added halo composition hash tests to make strict identicality tests between the outputs from all three different methods for halo finding.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/answer_testing/halo_tests.py	Mon Oct 17 12:13:24 2011 -0400
+++ b/yt/utilities/answer_testing/halo_tests.py	Mon Oct 17 12:36:16 2011 -0400
@@ -2,6 +2,7 @@
 import matplotlib; matplotlib.use("Agg")
 import pylab
 from output_tests import SingleOutputTest, YTStaticOutputTest, create_test
+import hashlib
 
 # Tests the number of halos returned by the HOP halo finder on a dataset
 class TestHaloCountHOP(YTStaticOutputTest):
@@ -82,7 +83,88 @@
                 return False
         return True
     
-    def plot(self):
-        return []
+create_test(TestHaloComposition, "halo_composition_test", threshold=80.0)
 
-create_test(TestHaloComposition, "halo_composition_test", threshold=80.0)
+# Tests the content of the halos returned by the HOP halo finder on a dataset 
+# by comparing the hash of the arrays of all the particles contained in each
+# halo.
+class TestHaloCompositionHashHOP(YTStaticOutputTest):
+    threshold=80.0
+    
+    def run(self):
+        # Find the haloes using vanilla HOP.
+        haloes = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
+        # The result is a list of the arrays of the particle IDs, for
+        # each halo
+        IDs = []
+        for halo in haloes:
+            IDs.append(halo["particle_index"])
+        self.result = IDs
+    
+    def compare(self, old_result):
+        # All the lists of arrays should be identical.  To check this
+        # faster, we take the 256-bit hash of these lists and compare them
+        result_hash = hashlib.sha256(self.result.tostring()).hexdigest()
+        old_result_hash = hashlib.sha256(old_result.tostring()).hexdigest()
+        if result_hash == old_result_hash:
+            return True
+        else:
+            return False
+
+create_test(TestHaloCompositionHashHOP, "halo_composition_test_hash_HOP", threshold=80.0)
+
+# Tests the content of the halos returned by the FOF halo finder on a dataset 
+# by comparing the hash of the arrays of all the particles contained in each
+# halo.
+class TestHaloCompositionHashFOF(YTStaticOutputTest):
+    threshold=80.0
+    
+    def run(self):
+        # Find the haloes using vanilla FOF.
+        haloes = FOFHaloFinder(self.pf, threshold=self.threshold, dm_only=False)
+        # The result is a list of the arrays of the particle IDs, for
+        # each halo
+        IDs = []
+        for halo in haloes:
+            IDs.append(halo["particle_index"])
+        self.result = IDs
+    
+    def compare(self, old_result):
+        # All the lists of arrays should be identical.  To check this
+        # faster, we take the 256-bit hash of these lists and compare them
+        result_hash = hashlib.sha256(self.result.tostring()).hexdigest()
+        old_result_hash = hashlib.sha256(old_result.tostring()).hexdigest()
+        if result_hash == old_result_hash:
+            return True
+        else:
+            return False
+
+create_test(TestHaloCompositionHashFOF, "halo_composition_test_hash_FOF", threshold=80.0)
+
+# Tests the content of the halos returned by the Parallel HOP halo finder on a 
+# dataset by comparing the hash of the arrays of all the particles contained 
+# in each halo.
+class TestHaloCompositionHashPHOP(YTStaticOutputTest):
+    threshold=80.0
+    
+    def run(self):
+        # Find the haloes using parallel HOP.
+        haloes = parallelHF(self.pf, threshold=self.threshold, dm_only=False)
+        # The result is a list of the arrays of the particle IDs, for
+        # each halo
+        IDs = []
+        for halo in haloes:
+            IDs.append(halo["particle_index"])
+        self.result = IDs
+    
+    def compare(self, old_result):
+        # All the lists of arrays should be identical.  To check this
+        # faster, we take the 256-bit hash of these lists and compare them
+        result_hash = hashlib.sha256(self.result.tostring()).hexdigest()
+        old_result_hash = hashlib.sha256(old_result.tostring()).hexdigest()
+        if result_hash == old_result_hash:
+            return True
+        else:
+            return False
+
+create_test(TestHaloCompositionHashPHOP, "halo_composition_test_hash_PHOP", threshold=80.0)


http://bitbucket.org/yt_analysis/yt/changeset/84798189b708/
changeset:   84798189b708
branch:      yt
user:        chummels
date:        2011-10-17 19:37:05
summary:     Added in calls in yt's test_runner to include different Halo Finder comparisons in test.
affected #:  3 files (-1 bytes)

--- a/tests/runall.py	Mon Oct 17 12:36:16 2011 -0400
+++ b/tests/runall.py	Mon Oct 17 13:37:05 2011 -0400
@@ -6,7 +6,6 @@
     RegressionTestRunner, clear_registry, create_test, \
     TestFieldStatistics, TestAllProjections, registry_entries, \
     Xunit
-
 from yt.utilities.command_line import get_yt_version
 
 from yt.mods import *


--- a/yt/utilities/answer_testing/api.py	Mon Oct 17 12:36:16 2011 -0400
+++ b/yt/utilities/answer_testing/api.py	Mon Oct 17 13:37:05 2011 -0400
@@ -45,3 +45,8 @@
 
 from .xunit import \
     Xunit
+
+from .halo_tests import \
+    TestHaloCompositionHashHOP, \
+    TestHaloCompositionHashFOF, \
+    TestHaloCompositionHashPHOP


--- a/yt/utilities/answer_testing/halo_tests.py	Mon Oct 17 12:36:16 2011 -0400
+++ b/yt/utilities/answer_testing/halo_tests.py	Mon Oct 17 13:37:05 2011 -0400
@@ -2,17 +2,19 @@
 import matplotlib; matplotlib.use("Agg")
 import pylab
 from output_tests import SingleOutputTest, YTStaticOutputTest, create_test
+from yt.analysis_modules.halo_finding.api import *
 import hashlib
+import numpy as np
 
 # Tests the number of halos returned by the HOP halo finder on a dataset
 class TestHaloCountHOP(YTStaticOutputTest):
     threshold = 80.0
 
     def run(self):
-        # Find the haloes using vanilla HOP.
-        haloes = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
-        # We only care about the number of haloes.
-        self.result = len(haloes)
+        # Find the halos using vanilla HOP.
+        halos = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
+        # We only care about the number of halos.
+        self.result = len(halos)
                     
     def compare(self, old_result):
         # The new value should be identical to the old one.
@@ -25,13 +27,15 @@
 
 # Tests the number of halos returned by the FOF halo finder on a dataset
 class TestHaloCountFOF(YTStaticOutputTest):
-    threshold = 80.0
+    link = 0.2
+    padding = 0.02
 
     def run(self):
-        # Find the haloes using FOF.
-        haloes = FOFHaloFinder(self.pf, threshold=self.threshold, dm_only=False)
-        # We only care about the number of haloes.
-        self.result = len(haloes)
+        # Find the halos using FOF.
+        halos = FOFHaloFinder(self.pf, link=self.link, dm_only=False, 
+                               padding=self.padding)
+        # We only care about the number of halos.
+        self.result = len(halos)
                     
     def compare(self, old_result):
         # The new value should be identical to the old one.
@@ -48,10 +52,10 @@
     threshold = 80.0
 
     def run(self):
-        # Find the haloes using parallel HOP.
-        haloes = parallelHF(self.pf, threshold=self.threshold, dm_only=False)
-        # We only care about the number of haloes.
-        self.result = len(haloes)
+        # Find the halos using parallel HOP.
+        halos = parallelHF(self.pf, threshold=self.threshold, dm_only=False)
+        # We only care about the number of halos.
+        self.result = len(halos)
                     
     def compare(self, old_result):
         # The new value should be identical to the old one.
@@ -66,12 +70,12 @@
     threshold=80.0
     
     def run(self):
-        # Find the haloes using vanilla HOP.
-        haloes = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
+        # Find the halos using vanilla HOP.
+        halos = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
         # The result is a list of the particle IDs, stored
         # as sets for easy comparison.
         IDs = []
-        for halo in haloes:
+        for halo in halos:
             IDs.append(set(halo["particle_index"]))
         self.result = IDs
     
@@ -92,13 +96,14 @@
     threshold=80.0
     
     def run(self):
-        # Find the haloes using vanilla HOP.
-        haloes = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
-        # The result is a list of the arrays of the particle IDs, for
+        # Find the halos using vanilla HOP.
+        halos = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
+        # The result is a flattened array of the arrays of the particle IDs for
         # each halo
         IDs = []
-        for halo in haloes:
+        for halo in halos:
             IDs.append(halo["particle_index"])
+        IDs = np.array(np.concatenate((IDs)))
         self.result = IDs
     
     def compare(self, old_result):
@@ -111,22 +116,23 @@
         else:
             return False
 
-create_test(TestHaloCompositionHashHOP, "halo_composition_test_hash_HOP", threshold=80.0)
-
 # Tests the content of the halos returned by the FOF halo finder on a dataset 
 # by comparing the hash of the arrays of all the particles contained in each
 # halo.
 class TestHaloCompositionHashFOF(YTStaticOutputTest):
-    threshold=80.0
+    link = 0.2
+    padding = 0.02
     
     def run(self):
-        # Find the haloes using vanilla FOF.
-        haloes = FOFHaloFinder(self.pf, threshold=self.threshold, dm_only=False)
-        # The result is a list of the arrays of the particle IDs, for
+        # Find the halos using vanilla FOF.
+        halos = FOFHaloFinder(self.pf, link=self.link, dm_only=False, 
+                               padding=self.padding)
+        # The result is a flattened array of the arrays of the particle IDs for
         # each halo
         IDs = []
-        for halo in haloes:
+        for halo in halos:
             IDs.append(halo["particle_index"])
+        IDs = np.array(np.concatenate((IDs)))
         self.result = IDs
     
     def compare(self, old_result):
@@ -139,8 +145,6 @@
         else:
             return False
 
-create_test(TestHaloCompositionHashFOF, "halo_composition_test_hash_FOF", threshold=80.0)
-
 # Tests the content of the halos returned by the Parallel HOP halo finder on a 
 # dataset by comparing the hash of the arrays of all the particles contained 
 # in each halo.
@@ -148,13 +152,14 @@
     threshold=80.0
     
     def run(self):
-        # Find the haloes using parallel HOP.
-        haloes = parallelHF(self.pf, threshold=self.threshold, dm_only=False)
-        # The result is a list of the arrays of the particle IDs, for
+        # Find the halos using parallel HOP.
+        halos = parallelHF(self.pf, threshold=self.threshold, dm_only=False)
+        # The result is a flattened array of the arrays of the particle IDs for
         # each halo
         IDs = []
-        for halo in haloes:
+        for halo in halos:
             IDs.append(halo["particle_index"])
+        IDs = np.array(np.concatenate((IDs)))
         self.result = IDs
     
     def compare(self, old_result):
@@ -166,5 +171,3 @@
             return True
         else:
             return False
-
-create_test(TestHaloCompositionHashPHOP, "halo_composition_test_hash_PHOP", threshold=80.0)


http://bitbucket.org/yt_analysis/yt/changeset/a71d820e1177/
changeset:   a71d820e1177
branch:      yt
user:        chummels
date:        2011-10-17 19:37:14
summary:     Merging.
affected #:  8 files (-1 bytes)

--- a/tests/object_field_values.py	Mon Oct 17 13:37:05 2011 -0400
+++ b/tests/object_field_values.py	Mon Oct 17 13:37:14 2011 -0400
@@ -16,26 +16,50 @@
     return func
 
 @register_object
-def centered_sphere(self):
-    center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
-    width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-    self.data_object = self.pf.h.sphere(center, width/0.25)
+def centered_sphere(tobj):
+    center = 0.5*(tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
+    width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+    tobj.data_object = tobj.pf.h.sphere(center, width/0.25)
 
 @register_object
-def off_centered_sphere(self):
-    center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
-    width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-    self.data_object = self.pf.h.sphere(center - 0.25 * width, width/0.25)
+def off_centered_sphere(tobj):
+    center = 0.5*(tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
+    width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+    tobj.data_object = tobj.pf.h.sphere(center - 0.25 * width, width/0.25)
 
 @register_object
-def corner_sphere(self):
-    width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-    self.data_object = self.pf.h.sphere(self.pf.domain_left_edge, width/0.25)
+def corner_sphere(tobj):
+    width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+    tobj.data_object = tobj.pf.h.sphere(tobj.pf.domain_left_edge, width/0.25)
 
 @register_object
+def disk(self):
+    center = (self.pf.domain_right_edge + self.pf.domain_left_edge)/2.
+    radius = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()/10.
+    height = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()/10.
+    normal = na.array([1.]*3)
+    self.data_object = self.pf.h.disk(center, normal, radius, height)
+    
+ at register_object
 def all_data(self):
     self.data_object = self.pf.h.all_data()
 
+_new_known_objects = {}
+for field in ["Density"]:#field_list:
+    for object_name in known_objects:
+        def _rfunc(oname, fname):
+            def func(tobj):
+                known_objects[oname](tobj)
+                tobj.orig_data_object = tobj.data_object
+                avg_value = tobj.orig_data_object.quantities[
+                        "WeightedAverageQuantity"](fname, "Density")
+                tobj.data_object = tobj.orig_data_object.cut_region(
+                        ["grid['%s'] > %s" % (fname, avg_value)])
+            return func
+        _new_known_objects["%s_cut_region_%s" % (object_name, field)] = \
+                _rfunc(object_name, field)
+known_objects.update(_new_known_objects)
+
 class YTFieldValuesTest(YTStaticOutputTest):
     def run(self):
         vals = self.data_object[self.field].copy()
@@ -51,6 +75,61 @@
 
 for object_name in known_objects:
     for field in field_list + particle_field_list:
+        if "cut_region" in object_name and field in particle_field_list:
+            continue
         create_test(YTFieldValuesTest, "%s_%s" % (object_name, field),
                     field = field, object_name = object_name)
+    
+class YTDerivedQuantityTest(YTStaticOutputTest):
+    def setup(self):
+        YTStaticOutputTest.setup(self)
+        known_objects[self.object_name](self)
 
+    def compare(self, old_results):
+        if self.result != old_result: raise FieldHashesDontMatch
+
+    def run(self):
+        # This only works if it takes no arguments
+        self.result = self.data_object.quantities[self.dq_name]()
+
+dq_names = ["TotalMass", "AngularMomentumVector", "CenterOfMass",
+            "BulkVelocity", "BaryonSpinParameter", "ParticleSpinParameter"]
+
+# Extrema, WeightedAverageQuantity, TotalQuantity, MaxLocation,
+# MinLocation
+
+for object_name in known_objects:
+    for dq in dq_names:
+        # Some special exceptions
+        if "cut_region" in object_name and (
+            "SpinParameter" in dq or
+            "TotalMass" in dq):
+            continue
+        create_test(YTDerivedQuantityTest, "%s_%s" % (object_name, dq),
+                    dq_name = dq, object_name = object_name)
+
+class YTDerivedQuantityTestField(YTDerivedQuantityTest):
+    def run(self):
+        self.result = self.data_object.quantities[self.dq_name](
+            self.field_name)
+
+for object_name in known_objects:
+    for field in field_list:
+        for dq in ["Extrema", "TotalQuantity", "MaxLocation", "MinLocation"]:
+            create_test(YTDerivedQuantityTestField,
+                        "%s_%s" % (object_name, field),
+                        field_name = field, dq_name = dq,
+                        object_name = object_name)
+
+class YTDerivedQuantityTest_WeightedAverageQuantity(YTDerivedQuantityTest):
+    def run(self):
+        self.result = self.data_object.quantities["WeightedAverageQuantity"](
+            self.field_name, weight="CellMassMsun")
+
+for object_name in known_objects:
+    for field in field_list:
+        create_test(YTDerivedQuantityTest_WeightedAverageQuantity,
+                    "%s_%s" % (object_name, field),
+                    field_name = field, 
+                    object_name = object_name)
+


--- a/tests/projections.py	Mon Oct 17 13:37:05 2011 -0400
+++ b/tests/projections.py	Mon Oct 17 13:37:14 2011 -0400
@@ -1,16 +1,35 @@
 from yt.utilities.answer_testing.output_tests import \
     SingleOutputTest, create_test
 from yt.utilities.answer_testing.hydro_tests import \
-    TestProjection, TestGasDistribution
+    TestProjection, TestOffAxisProjection, TestSlice, \
+    TestRay, TestGasDistribution, Test2DGasDistribution
+
 from fields_to_test import field_list
 
+for field in field_list:
+    create_test(TestRay, "%s" % field, field = field)
+
 for axis in range(3):
     for field in field_list:
-        create_test(TestProjection, "projection_test_%s_%s" % (axis, field),
+        create_test(TestSlice, "%s_%s" % (axis, field),
                     field = field, axis = axis)
-        create_test(TestProjection, "projection_test_%s_%s_Density" % (axis, field),
+
+for axis in range(3):
+    for field in field_list:
+        create_test(TestProjection, "%s_%s" % (axis, field),
+                    field = field, axis = axis)
+        create_test(TestProjection, "%s_%s_Density" % (axis, field),
                     field = field, axis = axis, weight_field = "Density")
 
 for field in field_list:
-    create_test(TestGasDistribution, "profile_density_test_%s" % field,
+    create_test(TestOffAxisProjection, "%s_%s" % (axis, field),
+                field = field, axis = axis)
+    create_test(TestOffAxisProjection, "%s_%s_Density" % (axis, field),
+                field = field, axis = axis, weight_field = "Density")
+
+for field in field_list:
+    create_test(TestGasDistribution, "density_%s" % field,
                 field_x = "Density", field_y = field)
+    create_test(Test2DGasDistribution, "density_x-vel_%s" % field,
+                field_x = "Density", field_y = "x-velocity", field_z = field, 
+                weight = "CellMassMsun")


--- a/tests/runall.py	Mon Oct 17 13:37:05 2011 -0400
+++ b/tests/runall.py	Mon Oct 17 13:37:14 2011 -0400
@@ -48,6 +48,7 @@
     return mapping
 
 if __name__ == "__main__":
+    clear_registry()
     mapping = find_and_initialize_tests()
     test_storage_directory = ytcfg.get("yt","test_storage_dir")
     try:
@@ -72,6 +73,9 @@
     parser.add_option("-n", "--name", dest="this_name",
                       default=my_hash,
                       help = "The name we'll call this set of tests")
+    parser.add_option("", "--parallel", dest="parallel",
+                      default=False,
+                      help = "Run in parallel?")
     opts, args = parser.parse_args()
     if opts.list_tests:
         print "\n    ".join(sorted(itertools.chain(*mapping.values())))
@@ -96,7 +100,8 @@
     for m, vals in mapping.items():
         new_tests = fnmatch.filter(vals, opts.test_pattern)
         if len(new_tests) == 0: continue
-        tests_to_run += new_tests
+        keys = set(registry_entries())
+        tests_to_run += [t for t in new_tests if t in keys]
         load_tests(m, cwd)
     for test_name in sorted(tests_to_run):
         rtr.run_test(test_name)


--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/volume_rendering.py	Mon Oct 17 13:37:14 2011 -0400
@@ -0,0 +1,36 @@
+from yt.mods import *
+import numpy as na
+
+from yt.utilities.answer_testing.output_tests import \
+    YTStaticOutputTest, RegressionTestException
+from yt.funcs import ensure_list
+
+class VolumeRenderingInconsistent(RegressionTestException):
+    pass
+
+class VolumeRenderingConsistency(YTStaticOutputTest):
+    name = "volume_rendering_consistency"
+    def run(self):
+        c = (self.pf.domain_right_edge+self.pf.domain_left_edge)/2.
+        W = na.sqrt(3.)*(self.pf.domain_right_edge-self.pf.domain_left_edge)
+        N = 512
+        n_contours=5
+        cmap = 'algae'
+        field = 'Density'
+        mi, ma = self.pf.h.all_data().quantities['Extrema'](field)[0]
+        mi, ma = na.log10(mi), na.log10(ma)
+        contour_width=(ma-mi)/100.
+        L = na.array([1.]*3)
+        tf = ColorTransferFunction((mi-2, ma+2))
+        tf.add_layers(n_contours,w=contour_width,
+                      col_bounds = (mi*1.001,ma*0.999), 
+                      colormap=cmap,alpha=na.logspace(-1,0,n_contours))
+        cam = self.pf.h.camera(c, L, W, (N,N), transfer_function = tf, no_ghost=True)
+        image = cam.snapshot()
+        # image = cam.snapshot('test_rendering_%s.png'%field)
+        self.result = image
+
+    def compare(self, old_result):
+        if not na.all(self.result==old_result):
+            raise VolumeRenderingInconsistent()
+


--- a/yt/data_objects/field_info_container.py	Mon Oct 17 13:37:05 2011 -0400
+++ b/yt/data_objects/field_info_container.py	Mon Oct 17 13:37:14 2011 -0400
@@ -466,5 +466,5 @@
     def __call__(self, data):
         # We need to make sure that it's an actual AMR grid
         if isinstance(data, FieldDetector): return True
-        if data._type_name == 'grid': return True
+        if getattr(data, "_type_name", None) == 'grid': return True
         raise NeedsOriginalGrid()


--- a/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 13:37:05 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 13:37:14 2011 -0400
@@ -37,7 +37,8 @@
     def run(self):
         # First we get our flattened projection -- this is the
         # Density, px, py, pdx, and pdy
-        proj = self.pf.h.proj(self.axis, self.field)
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field)
         # Now let's stick it in a buffer
         pixelized_proj = self.pixelize(proj, self.field)
         # We just want the values, so this can be stored
@@ -60,10 +61,92 @@
         pylab.clf()
         pylab.imshow(self.result[1][self.field],
             interpolation='nearest', origin='lower')
-        fn = "%s_%s_projection.png" % (self.pf, self.field)
+        fn = "%s_%s_%s_projection.png" % (self.pf, self.field,
+                                          self.weight_field)
         pylab.savefig(fn)
         return [fn]
 
+class TestOffAxisProjection(YTStaticOutputTest):
+
+    field = None
+    weight_field = None
+
+    def run(self):
+        # Here proj will just be the data array.
+        proj = off_axis_projection(self.pf, 
+                                   (0.5 * (self.pf.domain_left_edge + 
+                                           self.pf.domain_right_edge)),
+                                   [1., 1., 1.], 1., 400,
+                                   self.field, weight=self.weight_field)
+
+        # values.
+        self.result = proj
+
+    def compare(self, old_result):
+        proj  = self.result
+        oproj = old_result
+
+        self.compare_array_delta(proj, oproj, 1e-7)
+
+    def plot(self):
+        fn = "%s_%s_%s_off-axis_projection.png" % \
+            (self.pf, self.field, self.weight_field)
+        write_image(self.result, fn)
+        return [fn]
+
+class TestRay(YTStaticOutputTest):
+
+    field = None
+
+    def run(self):
+        na.random.random(4333)
+        start_point = na.random.random(pf.dimensionality) * \
+            (pf.domain_right_edge - pf.domain_left_edge) + \
+            pf.domain_left_edge
+        end_point   = na.random.random(pf.dimensionality) * \
+            (pf.domain_right_edge - pf.domain_left_edge) + \
+            pf.domain_left_edge
+
+        # Here proj will just be the data array.
+        ray = self.pf.h.ray(start_point, end_point, field=self.field)
+
+        # values.
+        self.result = ray[self.field]
+
+    def compare(self, old_result):
+        ray  = self.result
+        oray = old_result
+
+        self.compare_array_delta(ray, oray, 1e-7)
+
+    def plot(self):
+        return
+
+class TestSlice(YTStaticOutputTest):
+
+    field = None
+    axis = None
+
+    def run(self):
+        # Here proj will just be the data array.
+        slice = self.pf.h.slice(self.axis, 
+                                (0.5 * (self.pf.domain_left_edge + 
+                                        self.pf.domain_right_edge))[self.axis],
+                                fields=self.field)
+        # values.
+        self.result = slice.data
+
+    def compare(self, old_result):
+        slice  = self.result
+        oslice = old_result
+
+        self.compare_data_arrays(slice, oslice)
+
+    def plot(self):
+        fn = "%s_%s_slice.png" % (self.pf, self.field)
+        write_image(self.result[self.field], fn)
+        return [fn]
+
 # Now we create all our tests.  We are using the create_test
 # function, which is a relatively simple function that takes the base class,
 # a name, and any parameters that the test requires.
@@ -102,3 +185,21 @@
 for field in ["Temperature", "x-velocity"]:
     create_test(TestGasDistribution, "profile_density_test_%s" % field,
                 field_x = "Density", field_y = field)
+
+class Test2DGasDistribution(TestGasDistribution):
+    x_bins = 128
+    y_bins = 128
+    field_z = "CellMassMsun"
+    weight = None
+    def run(self):
+        # We're NOT going to use the low-level profiling API here,
+        # because we are avoiding the calculations of min/max,
+        # as those should be tested in another test.
+        pc = PlotCollection(self.pf, center=self.sim_center)
+        p = pc.add_phase_object(self.entire_simulation,
+            [self.field_x, self.field_y, self.field_z], x_bins = self.x_bins, y_bins = self.y_bins,
+            weight=self.weight)
+        # The arrays are all stored in a dictionary hanging off the profile
+        # object
+        self.result = p.data._data
+


--- a/yt/utilities/answer_testing/runner.py	Mon Oct 17 13:37:05 2011 -0400
+++ b/yt/utilities/answer_testing/runner.py	Mon Oct 17 13:37:14 2011 -0400
@@ -28,6 +28,7 @@
 
 from yt.config import ytcfg; ytcfg["yt","serialize"] = "False"
 import yt.utilities.cmdln as cmdln
+from yt.funcs import *
 from .xunit import Xunit
 
 from output_tests import test_registry, MultipleOutputTest, \
@@ -54,12 +55,14 @@
             self._path = os.path.join(path, "results")
         else:
             self._path = os.path.join(path, "results_%s" % self.id)
-        if not os.path.isdir(self._path): os.mkdir(self._path)
+        if not os.path.isdir(self._path): 
+            only_on_root(os.mkdir, self._path)
         if os.path.isfile(self._path): raise RuntimeError
 
     def _fn(self, tn):
         return os.path.join(self._path, tn)
 
+    @rootonly
     def __setitem__(self, test_name, result):
         # We have to close our shelf manually,
         # as the destructor does not necessarily do this.


--- a/yt/utilities/physical_constants.py	Mon Oct 17 13:37:05 2011 -0400
+++ b/yt/utilities/physical_constants.py	Mon Oct 17 13:37:14 2011 -0400
@@ -13,7 +13,7 @@
 cross_section_thompson_cgs = 6.65e-25 # cm^2
 
 # Charge
-charge_proton_cgs = 4.803e-28 # emu = 1.602e-19  Coulombs
+charge_proton_cgs = 4.803e-10 # esu = 1.602e-19  Coulombs
 
 # Physical Constants
 boltzmann_constant_cgs = 1.3806504e-16 # erg K^-1


http://bitbucket.org/yt_analysis/yt/changeset/6c65a318aca6/
changeset:   6c65a318aca6
branch:      yt
user:        MatthewTurk
date:        2011-10-17 19:51:33
summary:     Fixing error with loading tests and checking for existence of those tests.
affected #:  1 file (-1 bytes)

--- a/tests/runall.py	Mon Oct 17 13:37:14 2011 -0400
+++ b/tests/runall.py	Mon Oct 17 13:51:33 2011 -0400
@@ -78,7 +78,8 @@
                       help = "Run in parallel?")
     opts, args = parser.parse_args()
     if opts.list_tests:
-        print "\n    ".join(sorted(itertools.chain(*mapping.values())))
+        tests = list(set(sorted(itertools.chain(*mapping.values()))))
+        print "\n    ".join(tests)
         sys.exit(0)
     pf = load(opts.parameter_file)
     if pf is None:
@@ -100,9 +101,9 @@
     for m, vals in mapping.items():
         new_tests = fnmatch.filter(vals, opts.test_pattern)
         if len(new_tests) == 0: continue
+        load_tests(m, cwd)
         keys = set(registry_entries())
         tests_to_run += [t for t in new_tests if t in keys]
-        load_tests(m, cwd)
     for test_name in sorted(tests_to_run):
         rtr.run_test(test_name)
     if watcher is not None:


http://bitbucket.org/yt_analysis/yt/changeset/2579f9a2641f/
changeset:   2579f9a2641f
branch:      yt
user:        brittonsmith
date:        2011-10-17 19:59:54
summary:     Fixed bug in TestRay.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 13:51:33 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 13:59:54 2011 -0400
@@ -100,12 +100,12 @@
 
     def run(self):
         na.random.random(4333)
-        start_point = na.random.random(pf.dimensionality) * \
-            (pf.domain_right_edge - pf.domain_left_edge) + \
-            pf.domain_left_edge
-        end_point   = na.random.random(pf.dimensionality) * \
-            (pf.domain_right_edge - pf.domain_left_edge) + \
-            pf.domain_left_edge
+        start_point = na.random.random(self.pf.dimensionality) * \
+            (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
+            self.pf.domain_left_edge
+        end_point   = na.random.random(self.pf.dimensionality) * \
+            (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
+            self.pf.domain_left_edge
 
         # Here proj will just be the data array.
         ray = self.pf.h.ray(start_point, end_point, field=self.field)


http://bitbucket.org/yt_analysis/yt/changeset/eb57a6b45bd1/
changeset:   eb57a6b45bd1
branch:      yt
user:        chummels
date:        2011-10-17 19:50:42
summary:     Forgot to add halos.py file for initializing yt tests.  Added now.
affected #:  1 file (-1 bytes)

--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/halos.py	Mon Oct 17 13:50:42 2011 -0400
@@ -0,0 +1,11 @@
+from yt.utilities.answer_testing.output_tests import \
+    SingleOutputTest, create_test
+from yt.utilities.answer_testing.halo_tests import \
+    TestHaloCompositionHashHOP, TestHaloCompositionHashFOF, \
+    TestHaloCompositionHashPHOP 
+
+create_test(TestHaloCompositionHashHOP, "halo_composition_test_hash_HOP", threshold=80.0)
+
+create_test(TestHaloCompositionHashFOF, "halo_composition_test_hash_FOF", threshold=80.0)
+
+create_test(TestHaloCompositionHashPHOP, "halo_composition_test_hash_PHOP", threshold=80.0)


http://bitbucket.org/yt_analysis/yt/changeset/572ece4065a6/
changeset:   572ece4065a6
branch:      yt
user:        chummels
date:        2011-10-17 20:01:17
summary:     Removing out excess matplotlib.use('Agg') commands, to avoid annoying warning messages.
affected #:  6 files (-1 bytes)

--- a/tests/runall.py	Mon Oct 17 13:50:42 2011 -0400
+++ b/tests/runall.py	Mon Oct 17 14:01:17 2011 -0400
@@ -1,3 +1,4 @@
+import matplotlib; matplotlib.use('Agg')
 from yt.config import ytcfg
 ytcfg["yt","loglevel"] = "50"
 ytcfg["yt","serialize"] = "False"


--- a/yt/utilities/answer_testing/halo_tests.py	Mon Oct 17 13:50:42 2011 -0400
+++ b/yt/utilities/answer_testing/halo_tests.py	Mon Oct 17 14:01:17 2011 -0400
@@ -1,5 +1,5 @@
 from yt.mods import *
-import matplotlib; matplotlib.use("Agg")
+import matplotlib
 import pylab
 from output_tests import SingleOutputTest, YTStaticOutputTest, create_test
 from yt.analysis_modules.halo_finding.api import *


--- a/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 13:50:42 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 14:01:17 2011 -0400
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import matplotlib; matplotlib.use("Agg")
+import matplotlib
 import pylab
 from yt.mods import *
 from output_tests import SingleOutputTest, YTStaticOutputTest, create_test


--- a/yt/utilities/answer_testing/output_tests.py	Mon Oct 17 13:50:42 2011 -0400
+++ b/yt/utilities/answer_testing/output_tests.py	Mon Oct 17 14:01:17 2011 -0400
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import matplotlib
 from yt.mods import *
 
 # We first create our dictionary of tests to run.  This starts out empty, and


--- a/yt/utilities/answer_testing/particle_tests.py	Mon Oct 17 13:50:42 2011 -0400
+++ b/yt/utilities/answer_testing/particle_tests.py	Mon Oct 17 14:01:17 2011 -0400
@@ -1,5 +1,5 @@
+import matplotlib
 from yt.mods import *
-import matplotlib; matplotlib.use("Agg")
 import pylab
 from output_tests import SingleOutputTest, YTStaticOutputTest, create_test
 


--- a/yt/utilities/answer_testing/runner.py	Mon Oct 17 13:50:42 2011 -0400
+++ b/yt/utilities/answer_testing/runner.py	Mon Oct 17 14:01:17 2011 -0400
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import matplotlib; matplotlib.use("Agg")
+import matplotlib
 import os, shelve, cPickle, sys, imp, tempfile
 
 from yt.config import ytcfg; ytcfg["yt","serialize"] = "False"


http://bitbucket.org/yt_analysis/yt/changeset/36fc0f0e3136/
changeset:   36fc0f0e3136
branch:      yt
user:        chummels
date:        2011-10-17 20:01:35
summary:     Merging.
affected #:  2 files (-1 bytes)

--- a/tests/runall.py	Mon Oct 17 14:01:17 2011 -0400
+++ b/tests/runall.py	Mon Oct 17 14:01:35 2011 -0400
@@ -79,7 +79,8 @@
                       help = "Run in parallel?")
     opts, args = parser.parse_args()
     if opts.list_tests:
-        print "\n    ".join(sorted(itertools.chain(*mapping.values())))
+        tests = list(set(sorted(itertools.chain(*mapping.values()))))
+        print "\n    ".join(tests)
         sys.exit(0)
     pf = load(opts.parameter_file)
     if pf is None:
@@ -101,9 +102,9 @@
     for m, vals in mapping.items():
         new_tests = fnmatch.filter(vals, opts.test_pattern)
         if len(new_tests) == 0: continue
+        load_tests(m, cwd)
         keys = set(registry_entries())
         tests_to_run += [t for t in new_tests if t in keys]
-        load_tests(m, cwd)
     for test_name in sorted(tests_to_run):
         rtr.run_test(test_name)
     if watcher is not None:


--- a/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 14:01:17 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 14:01:35 2011 -0400
@@ -100,12 +100,12 @@
 
     def run(self):
         na.random.random(4333)
-        start_point = na.random.random(pf.dimensionality) * \
-            (pf.domain_right_edge - pf.domain_left_edge) + \
-            pf.domain_left_edge
-        end_point   = na.random.random(pf.dimensionality) * \
-            (pf.domain_right_edge - pf.domain_left_edge) + \
-            pf.domain_left_edge
+        start_point = na.random.random(self.pf.dimensionality) * \
+            (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
+            self.pf.domain_left_edge
+        end_point   = na.random.random(self.pf.dimensionality) * \
+            (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
+            self.pf.domain_left_edge
 
         # Here proj will just be the data array.
         ray = self.pf.h.ray(start_point, end_point, field=self.field)


http://bitbucket.org/yt_analysis/yt/changeset/45dac4c7f978/
changeset:   45dac4c7f978
branch:      yt
user:        MatthewTurk
date:        2011-10-17 20:13:17
summary:     Disable plotting and fixed up the listing of tests.
affected #:  2 files (-1 bytes)

--- a/tests/runall.py	Mon Oct 17 14:01:35 2011 -0400
+++ b/tests/runall.py	Mon Oct 17 14:13:17 2011 -0400
@@ -79,7 +79,14 @@
                       help = "Run in parallel?")
     opts, args = parser.parse_args()
     if opts.list_tests:
-        tests = list(set(sorted(itertools.chain(*mapping.values()))))
+        tests_to_run = []
+        for m, vals in mapping.items():
+            new_tests = fnmatch.filter(vals, opts.test_pattern)
+            if len(new_tests) == 0: continue
+            load_tests(m, cwd)
+            keys = set(registry_entries())
+            tests_to_run += [t for t in new_tests if t in keys]
+        tests = list(set(tests_to_run))
         print "\n    ".join(tests)
         sys.exit(0)
     pf = load(opts.parameter_file)


--- a/yt/utilities/answer_testing/runner.py	Mon Oct 17 14:01:35 2011 -0400
+++ b/yt/utilities/answer_testing/runner.py	Mon Oct 17 14:13:17 2011 -0400
@@ -82,7 +82,7 @@
 class RegressionTestRunner(object):
     def __init__(self, results_id, compare_id = None,
                  results_path = ".", compare_results_path = ".",
-                 io_log = "OutputLog"):
+                 io_log = "OutputLog", plot_tests = False):
         # This test runner assumes it has been launched with the current
         # working directory that of the test case itself.
         self.io_log = io_log
@@ -95,6 +95,7 @@
         self.results = RegressionTestStorage(results_id, path=results_path)
         self.plot_list = {}
         self.passed_tests = {}
+        self.plot_tests = plot_tests
 
     def run_all_tests(self):
         plot_list = []
@@ -129,7 +130,8 @@
         print self.id, "Running", test.name,
         test.setup()
         test.run()
-        self.plot_list[test.name] = test.plot()
+        if self.plot_tests:
+            self.plot_list[test.name] = test.plot()
         self.results[test.name] = test.result
         success, msg = self._compare(test)
         if self.old_results is None:


http://bitbucket.org/yt_analysis/yt/changeset/6a80a7799eb8/
changeset:   6a80a7799eb8
branch:      yt
user:        brittonsmith
date:        2011-10-17 22:25:04
summary:     Fixed a typo.
affected #:  1 file (-1 bytes)

--- a/tests/object_field_values.py	Mon Oct 17 14:13:17 2011 -0400
+++ b/tests/object_field_values.py	Mon Oct 17 16:25:04 2011 -0400
@@ -85,7 +85,7 @@
         YTStaticOutputTest.setup(self)
         known_objects[self.object_name](self)
 
-    def compare(self, old_results):
+    def compare(self, old_result):
         if self.result != old_result: raise FieldHashesDontMatch
 
     def run(self):


http://bitbucket.org/yt_analysis/yt/changeset/22566f124c39/
changeset:   22566f124c39
branch:      yt
user:        MatthewTurk
date:        2011-10-17 22:32:41
summary:     Fixing problems with DQ comparisons
affected #:  1 file (-1 bytes)

--- a/tests/object_field_values.py	Mon Oct 17 16:25:04 2011 -0400
+++ b/tests/object_field_values.py	Mon Oct 17 16:32:41 2011 -0400
@@ -3,7 +3,7 @@
 
 from yt.utilities.answer_testing.output_tests import \
     YTStaticOutputTest, RegressionTestException, create_test
-from yt.funcs import ensure_list
+from yt.funcs import ensure_list, iterable
 from fields_to_test import field_list, particle_field_list
 
 class FieldHashesDontMatch(RegressionTestException):
@@ -86,7 +86,15 @@
         known_objects[self.object_name](self)
 
     def compare(self, old_result):
-        if self.result != old_result: raise FieldHashesDontMatch
+        if hasattr(self.result, 'tostring'):
+            self.compare_array_delta(self.result, old_result, 1e-7)
+            return
+        elif iterable(self.result):
+            a1 = na.array(self.result)
+            a2 = na.array(old_result)
+            self.compare_array_delta(a1, a2, 1e-7)
+        else:
+            if self.result != old_result: raise FieldHashesDontMatch
 
     def run(self):
         # This only works if it takes no arguments


http://bitbucket.org/yt_analysis/yt/changeset/99fb33b9b270/
changeset:   99fb33b9b270
branch:      yt
user:        brittonsmith
date:        2011-10-17 22:49:16
summary:     Fixed test ray to seed random number generator.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 16:32:41 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Mon Oct 17 16:49:16 2011 -0400
@@ -99,7 +99,7 @@
     field = None
 
     def run(self):
-        na.random.random(4333)
+        na.random.seed(4333)
         start_point = na.random.random(self.pf.dimensionality) * \
             (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
             self.pf.domain_left_edge


http://bitbucket.org/yt_analysis/yt/changeset/36d4a4d6e5f1/
changeset:   36d4a4d6e5f1
branch:      yt
user:        MatthewTurk
date:        2011-10-17 23:46:55
summary:     Adding speedup for grouping fields and adding multiple at the same time in halo
profiler.
affected #:  2 files (-1 bytes)

--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Mon Oct 17 16:49:16 2011 -0400
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Mon Oct 17 17:46:55 2011 -0400
@@ -582,8 +582,14 @@
             except EmptyProfileData:
                 mylog.error("Caught EmptyProfileData exception, returning None for this halo.")
                 return None
+            # Figure out which fields to add simultaneously
+            field_groupings = defaultdict(lambda: defaultdict(list))
             for hp in self.profile_fields:
-                profile.add_fields(hp['field'], weight=hp['weight_field'], accumulation=hp['accumulation'])
+                field_groupings[hp['weight_field']][hp['accumulation']].append(hp['field'])
+            for weight_field in field_groupings:
+                for accum, fields in field_groupings[weight_field].items():
+                    profile.add_fields(fields, weight=weight_field,
+                                       accumulation=accum)
 
         if virial_filter:
             self._add_actual_overdensity(profile)


--- a/yt/analysis_modules/halo_profiler/standard_analysis.py	Mon Oct 17 16:49:16 2011 -0400
+++ b/yt/analysis_modules/halo_profiler/standard_analysis.py	Mon Oct 17 17:46:55 2011 -0400
@@ -57,7 +57,6 @@
     ["HI","HII","HeI","HeII","HeIII","H2I","H2II","HM","Electron",
     "DI","DII","HDI","Metal"]
 ]
-    
 
 class StandardRadialAnalysis(object):
     def __init__(self, pf, center, radius, n_bins = 128, inner_radius = None):


http://bitbucket.org/yt_analysis/yt/changeset/7a593c219fc8/
changeset:   7a593c219fc8
branch:      yt
user:        MatthewTurk
date:        2011-10-18 00:16:17
summary:     First pass at an expanded "yt analyze" command.
affected #:  4 files (-1 bytes)

--- a/yt/analysis_modules/halo_profiler/api.py	Mon Oct 17 17:46:55 2011 -0400
+++ b/yt/analysis_modules/halo_profiler/api.py	Mon Oct 17 18:16:17 2011 -0400
@@ -34,4 +34,5 @@
 from .multi_halo_profiler import \
     HaloProfiler, \
     FakeProfile, \
-    shift_projections
+    shift_projections, \
+    standard_fields


--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Mon Oct 17 17:46:55 2011 -0400
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Mon Oct 17 18:16:17 2011 -0400
@@ -1086,3 +1086,34 @@
 
     def keys(self):
         return self._data.keys()
+
+standard_fields = [
+    ("Density", "CellMassMsun", False),
+    ("Temperature", "CellMassMsun", False),
+    ("VelocityMagnitude", "CellMassMsun", False),
+    ("Ones", None, False),
+    ("Entropy", "CellMassMsun", False),
+    ("RadialVelocity", "CellMassMsun", False),
+    ("SpecificAngularMomentumX", "CellMassMsun", False),
+    ("SpecificAngularMomentumY", "CellMassMsun", False),
+    ("SpecificAngularMomentumZ", "CellMassMsun", False),
+    ("CoolingTime", "CellMassMsun", False),
+    ("DynamicalTime", "CellMassMsun", False),
+    ("CellMassMsun", None, True),
+    ("TotalMassMsun", None, True),
+    ("Dark_Matter_Density", "CellMassMsun", False),
+    #("ParticleSpecificAngularMomentumX", "ParticleMassMsun"),
+    #("ParticleSpecificAngularMomentumY", "ParticleMassMsun"),
+    #("ParticleSpecificAngularMomentumZ", "ParticleMassMsun"),
+    ("OverDensity", "CellMassMsun", False),
+    #("ParticleMassMsun", None),
+    ("StarParticleDensity", "StarParticleMassMsun", False), # How do we weight this?
+    #("StarParticleMassMsun", None), 
+    ("StarParticleDensity", "StarParticleMassMsun", False), # How do we weight this?
+]
+
+standard_fields += [("%s_Fraction" % (s), "CellMassMsun", False)
+    for s in ["HI","HII","HeI","HeII","HeIII","H2I","H2II",
+    "HM","Electron", "DI","DII","HDI","Metal"]
+]
+


--- a/yt/analysis_modules/halo_profiler/standard_analysis.py	Mon Oct 17 17:46:55 2011 -0400
+++ b/yt/analysis_modules/halo_profiler/standard_analysis.py	Mon Oct 17 18:16:17 2011 -0400
@@ -28,36 +28,6 @@
 from yt.data_objects.profiles import BinnedProfile1D
 from yt.funcs import *
 
-analysis_field_list = [
-    "Density",
-    "Temperature",
-    "VelocityMagnitude",
-    ("Ones", None),
-    "Entropy",
-    "RadialVelocity",
-    "SpecificAngularMomnetumX",
-    "SpecificAngularMomnetumY",
-    "SpecificAngularMomnetumZ",
-    "CoolingTime",
-    "DynamicalTime",
-    ("CellMassMsun", None),
-    "Dark_Matter_Density",
-    #("ParticleSpecificAngularMomentumX", "ParticleMassMsun"),
-    #("ParticleSpecificAngularMomentumY", "ParticleMassMsun"),
-    #("ParticleSpecificAngularMomentumZ", "ParticleMassMsun"),
-    ("TotalMass", None),
-    "OverDensity",
-    #("ParticleMassMsun", None),
-    ("StarParticleDensity", "StarParticleMassMsun"), # How do we weight this?
-    #("StarParticleMassMsun", None), 
-    ("StarParticleDensity", "StarParticleMassMsun"), # How do we weight this?
-]
-
-analysis_field_list += ["%s_Fraction" % (s) for s in
-    ["HI","HII","HeI","HeII","HeIII","H2I","H2II","HM","Electron",
-    "DI","DII","HDI","Metal"]
-]
-
 class StandardRadialAnalysis(object):
     def __init__(self, pf, center, radius, n_bins = 128, inner_radius = None):
         self.pf = pf


--- a/yt/utilities/command_line.py	Mon Oct 17 17:46:55 2011 -0400
+++ b/yt/utilities/command_line.py	Mon Oct 17 18:16:17 2011 -0400
@@ -643,16 +643,18 @@
                            virial_quantities=['TotalMassMsun','RadiusMpc'])
 
         # Add profile fields.
-        hp.add_profile('CellVolume',weight_field=None,accumulation=True)
-        hp.add_profile('TotalMassMsun',weight_field=None,accumulation=True)
-        hp.add_profile('Density',weight_field=None,accumulation=False)
-        hp.add_profile('Temperature',weight_field='CellMassMsun',accumulation=False)
+        pf = hp.pf
+        all_fields = pf.h.field_list + pf.h.derived_field_list
+        for field, wv, acc in HP.standard_fields:
+            if field not in all_fields: continue
+            hp.add_profile(field, wv, acc)
         hp.make_profiles(filename="FilteredQuantities.out")
 
         # Add projection fields.
         hp.add_projection('Density',weight_field=None)
         hp.add_projection('Temperature',weight_field='Density')
-        hp.add_projection('Metallicity',weight_field='Density')
+        if "Metallicity" in all_fields:
+            hp.add_projection('Metallicity',weight_field='Density')
 
         # Make projections for all three axes using the filtered halo list and
         # save data to hdf5 files.


http://bitbucket.org/yt_analysis/yt/changeset/b98516052e9a/
changeset:   b98516052e9a
branch:      yt
user:        MatthewTurk
date:        2011-10-18 16:55:34
summary:     Consolidating "_mpi_concatenate_array_*" operations into the _mpi_catarray
function.
affected #:  4 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Mon Oct 17 18:16:17 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Tue Oct 18 10:55:34 2011 -0400
@@ -1988,7 +1988,7 @@
         else:
             root_points = na.empty([])
         my_points.shape = (1, n_random*3)
-        root_points = self._mpi_concatenate_array_on_root_double(my_points[0])
+        root_points = self._mpi_catarray(my_points[0])
         del my_points
         if mine == 0:
             root_points.shape = (tot_random, 3)


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Mon Oct 17 18:16:17 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 10:55:34 2011 -0400
@@ -683,8 +683,8 @@
         # Shift the values over effectively by concatenating them in the same
         # order as the values have been shifted in _globally_assign_chainIDs()
         yt_counters("global chain MPI stuff.")
-        self.densest_in_chain = self._mpi_concatenate_array_double(self.densest_in_chain)
-        self.densest_in_chain_real_index = self._mpi_concatenate_array_long(self.densest_in_chain_real_index)
+        self.densest_in_chain = self._mpi_catarray(self.densest_in_chain)
+        self.densest_in_chain_real_index = self._mpi_catarray(self.densest_in_chain_real_index)
         yt_counters("global chain MPI stuff.")
         # Sort the chains by density here. This is an attempt to make it such
         # that the merging stuff in a few steps happens in the same order


--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py	Mon Oct 17 18:16:17 2011 -0400
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py	Tue Oct 18 10:55:34 2011 -0400
@@ -548,11 +548,11 @@
         child_IDs_tosend = child_IDs[child_send]
         child_halos_tosend = child_halos[child_send]
         
-        parent_IDs_tosend = self._mpi_concatenate_array_on_root_long(parent_IDs_tosend)
-        parent_masses_tosend = self._mpi_concatenate_array_on_root_double(parent_masses_tosend)
-        parent_halos_tosend = self._mpi_concatenate_array_on_root_int(parent_halos_tosend)
-        child_IDs_tosend = self._mpi_concatenate_array_on_root_long(child_IDs_tosend)
-        child_halos_tosend = self._mpi_concatenate_array_on_root_int(child_halos_tosend)
+        parent_IDs_tosend = self._mpi_catarray(parent_IDs_tosend)
+        parent_masses_tosend = self._mpi_catarray(parent_masses_tosend)
+        parent_halos_tosend = self._mpi_catarray(parent_halos_tosend)
+        child_IDs_tosend = self._mpi_catarray(child_IDs_tosend)
+        child_halos_tosend = self._mpi_catarray(child_halos_tosend)
 
         # Resort the received particles.
         Psort = parent_IDs_tosend.argsort()


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Mon Oct 17 18:16:17 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 10:55:34 2011 -0400
@@ -573,7 +573,6 @@
         ne = tasks[ne[0],ne[1],ne[2]]
         return ne
         
-        
     def _barrier(self):
         if not self._distributed: return
         mylog.debug("Opening MPI Barrier on %s", MPI.COMM_WORLD.rank)
@@ -735,117 +734,6 @@
         return data
 
     @parallel_passthrough
-    def _mpi_concatenate_array_long(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1, MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                new_data = na.empty(size, dtype='int64')
-                MPI.COMM_WORLD.Recv([new_data, MPI.LONG], i, 0)
-                data = na.concatenate((data, new_data))
-            size = data.size
-            del new_data
-        else:
-            MPI.COMM_WORLD.send(data.size, 0, 0)
-            MPI.COMM_WORLD.Send([data, MPI.LONG], 0, 0)
-        # Now we distribute the full array.
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        if MPI.COMM_WORLD.rank != 0:
-            del data
-            data = na.empty(size, dtype='int64')
-        MPI.COMM_WORLD.Bcast([data, MPI.LONG], root=0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_concatenate_array_double(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1, MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                new_data = na.empty(size, dtype='float64')
-                MPI.COMM_WORLD.Recv([new_data, MPI.DOUBLE], i, 0)
-                data = na.concatenate((data, new_data))
-            size = data.size
-            del new_data
-        else:
-            MPI.COMM_WORLD.send(data.size, 0, 0)
-            MPI.COMM_WORLD.Send([data, MPI.DOUBLE], 0, 0)
-        # Now we distribute the full array.
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        if MPI.COMM_WORLD.rank != 0:
-            del data
-            data = na.empty(size, dtype='float64')
-        MPI.COMM_WORLD.Bcast([data, MPI.DOUBLE], root=0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_concatenate_array_on_root_double(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1, MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                new_data = na.empty(size, dtype='float64')
-                MPI.COMM_WORLD.Recv([new_data, MPI.DOUBLE], i, 0)
-                data = na.concatenate((data, new_data))
-        else:
-            MPI.COMM_WORLD.send(data.size, 0, 0)
-            MPI.COMM_WORLD.Send([data, MPI.DOUBLE], 0, 0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_concatenate_array_on_root_int(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1, MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                new_data = na.empty(size, dtype='int32')
-                MPI.COMM_WORLD.Recv([new_data, MPI.INT], i, 0)
-                data = na.concatenate((data, new_data))
-        else:
-            MPI.COMM_WORLD.send(data.size, 0, 0)
-            MPI.COMM_WORLD.Send([data, MPI.INT], 0, 0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_concatenate_array_on_root_long(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1, MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                new_data = na.empty(size, dtype='int64')
-                MPI.COMM_WORLD.Recv([new_data, MPI.LONG], i, 0)
-                data = na.concatenate((data, new_data))
-        else:
-            MPI.COMM_WORLD.send(data.size, 0, 0)
-            MPI.COMM_WORLD.Send([data, MPI.LONG], 0, 0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_minimum_array_long(self, data):
-        """
-        Specifically for parallelHOP. For the identical array on each task,
-        it merges the arrays together, taking the lower value at each index.
-        """
-        self._barrier()
-        size = data.size # They're all the same size, of course
-        if MPI.COMM_WORLD.rank == 0:
-            new_data = na.empty(size, dtype='int64')
-            for i in range(1, MPI.COMM_WORLD.size):
-                MPI.COMM_WORLD.Recv([new_data, MPI.LONG], i, 0)
-                data = na.minimum(data, new_data)
-            del new_data
-        else:
-            MPI.COMM_WORLD.Send([data, MPI.LONG], 0, 0)
-        # Redistribute from root
-        MPI.COMM_WORLD.Bcast([data, MPI.LONG], root=0)
-        return data
-
-    @parallel_passthrough
     def _mpi_bcast_long_dict_unpickled(self, data):
         self._barrier()
         size = 0
@@ -1006,15 +894,6 @@
         return (top_keys, bot_keys, vals)
 
     @parallel_passthrough
-    def __mpi_recvlist(self, data):
-        # First we receive, then we make a new list.
-        data = ensure_list(data)
-        for i in range(1,MPI.COMM_WORLD.size):
-            buf = ensure_list(MPI.COMM_WORLD.recv(source=i, tag=0))
-            data += buf
-        return data
-
-    @parallel_passthrough
     def _mpi_catlist(self, data):
         self._barrier()
         if MPI.COMM_WORLD.rank == 0:
@@ -1027,16 +906,6 @@
         return data
 
     @parallel_passthrough
-    def __mpi_recvarrays(self, data):
-        # First we receive, then we make a new list.
-        for i in range(1,MPI.COMM_WORLD.size):
-            buf = _recv_array(source=i, tag=0)
-            if buf is not None:
-                if data is None: data = buf
-                else: data = na.concatenate([data, buf])
-        return data
-
-    @parallel_passthrough
     def _mpi_cat_na_array(self,data):
         self._barrier()
         comm = MPI.COMM_WORLD


http://bitbucket.org/yt_analysis/yt/changeset/fca586c7bf4b/
changeset:   fca586c7bf4b
branch:      yt
user:        MatthewTurk
date:        2011-10-18 16:58:57
summary:     Adding dtype_names dictionary to translate dtypes to MPI types.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 10:55:34 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 10:58:57 2011 -0400
@@ -82,6 +82,23 @@
 else:
     parallel_capable = False
 
+
+# Set up translation table
+if parallel_capable:
+    dtype_names = dict(
+            float32 = MPI.FLOAT,
+            float64 = MPI.DOUBLE,
+            int32   = MPI.INT,
+            int64   = MPI.LONG
+    )
+else:
+    dtype_names = dict(
+            float32 = "MPI.FLOAT",
+            float64 = "MPI.DOUBLE",
+            int32   = "MPI.INT",
+            int64   = "MPI.LONG"
+    )
+
 class ObjectIterator(object):
     """
     This is a generalized class that accepts a list of objects and then


http://bitbucket.org/yt_analysis/yt/changeset/be590d921374/
changeset:   be590d921374
branch:      yt
user:        MatthewTurk
date:        2011-10-18 17:14:52
summary:     Adding a get_mpi_type function
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 10:58:57 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:14:52 2011 -0400
@@ -99,6 +99,12 @@
             int64   = "MPI.LONG"
     )
 
+# Because the dtypes will == correctly but do not hash the same, we need this
+# function for dictionary access.
+def get_mpi_type(dtype):
+    for dt, val in dtype_names.items():
+        if dt == dtype: return val
+
 class ObjectIterator(object):
     """
     This is a generalized class that accepts a list of objects and then


http://bitbucket.org/yt_analysis/yt/changeset/6995f8ea8e5b/
changeset:   6995f8ea8e5b
branch:      yt
user:        MatthewTurk
date:        2011-10-18 17:27:23
summary:     Re-adding mpi_minimum_array_long for now
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:14:52 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:27:23 2011 -0400
@@ -609,6 +609,26 @@
         return None
 
     @parallel_passthrough
+    def _mpi_minimum_array_long(self, data):
+        """
+        Specifically for parallelHOP. For the identical array on each task,
+        it merges the arrays together, taking the lower value at each index.
+        """
+        self._barrier()
+        size = data.size # They're all the same size, of course
+        if MPI.COMM_WORLD.rank == 0:
+            new_data = na.empty(size, dtype='int64')
+            for i in range(1, MPI.COMM_WORLD.size):
+                MPI.COMM_WORLD.Recv([new_data, MPI.LONG], i, 0)
+                data = na.minimum(data, new_data)
+            del new_data
+        else:
+            MPI.COMM_WORLD.Send([data, MPI.LONG], 0, 0)
+        # Redistribute from root
+        MPI.COMM_WORLD.Bcast([data, MPI.LONG], root=0)
+        return data
+
+    @parallel_passthrough
     def _mpi_catrgb(self, data):
         self._barrier()
         data, final = data


http://bitbucket.org/yt_analysis/yt/changeset/e5c43c073f47/
changeset:   e5c43c073f47
branch:      yt
user:        brittonsmith
date:        2011-10-18 17:30:23
summary:     Replaced with mpi_Irecv_double and mpi_Irecv_long and mpi_nonblocking_recv.
affected #:  3 files (-1 bytes)

--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 11:14:52 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 11:30:23 2011 -0400
@@ -363,7 +363,7 @@
         for task in xrange(self.size):
             if task == self.mine: continue
             self.recv_done[task] = na.zeros(1, dtype='int64')
-            self.done_hooks.append(self._mpi_Irecv_long(self.recv_done[task], \
+            self.done_hooks.append(self._mpi_nonblocking_recv(self.recv_done[task], \
                 task, tag=15))
     
     def _send_done_to_root(self):
@@ -418,11 +418,11 @@
         self.recv_fields_vals = na.zeros((self.comm_size, len(self.fields)*2), \
             dtype='float64')
         self.recv_gen_array = na.zeros(self.size, dtype='int64')
-        self.recv_hooks.append(self._mpi_Irecv_double(self.recv_points, \
+        self.recv_hooks.append(self._mpi_nonblocking_recv(self.recv_points, \
             (self.mine-1)%self.size, tag=10))
-        self.recv_hooks.append(self._mpi_Irecv_double(self.recv_fields_vals, \
+        self.recv_hooks.append(self._mpi_nonblocking_recv(self.recv_fields_vals, \
             (self.mine-1)%self.size, tag=20))
-        self.recv_hooks.append(self._mpi_Irecv_long(self.recv_gen_array, \
+        self.recv_hooks.append(self._mpi_nonblocking_recv(self.recv_gen_array, \
             (self.mine-1)%self.size, tag=40))
 
     def _send_arrays(self):


--- a/yt/utilities/parallel_tools/distributed_object_collection.py	Tue Oct 18 11:14:52 2011 -0400
+++ b/yt/utilities/parallel_tools/distributed_object_collection.py	Tue Oct 18 11:30:23 2011 -0400
@@ -91,11 +91,13 @@
                         size, p)
             proc_hooks[len(drecv_buffers)] = p
             drecv_buffers.append(self._create_buffer(requests[p]))
-            drecv_hooks.append(self._mpi_Irecv_double(drecv_buffers[-1], p, 1))
+            # does this work without specifying the type? (was double)
+            drecv_hooks.append(self._mpi_nonblocking_recv(drecv_buffers[-1], p, 1))
             recv_buffers.append(na.zeros(size, dtype='int64'))
             # Our index list goes on 0, our buffer goes on 1.  We know how big
             # the index list will be, now.
-            recv_hooks.append(self._mpi_Irecv_long(recv_buffers[-1], p, 0))
+            # does this work without specifying the type? (was long)
+            recv_hooks.append(self._mpi_nonblocking_recv(recv_buffers[-1], p, 0))
         # Send our index lists into hte waiting buffers
         mylog.debug("Sending index lists")
         for p, ind_list in requests.items():


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:14:52 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:30:23 2011 -0400
@@ -1050,13 +1050,11 @@
     # Non-blocking stuff.
     ###
 
-    def _mpi_Irecv_long(self, data, source, tag=0):
+    def _mpi_nonblocking_recv(self, data, source, tag=0, dtype=None):
         if not self._distributed: return -1
-        return MPI.COMM_WORLD.Irecv([data, MPI.LONG], source, tag)
-
-    def _mpi_Irecv_double(self, data, source, tag=0):
-        if not self._distributed: return -1
-        return MPI.COMM_WORLD.Irecv([data, MPI.DOUBLE], source, tag)
+        if dtype is None: dtype = data.dtype
+        mpi_type = get_mpi_type(dtype)
+        return MPI.COMM_WORLD.Irecv([data, mpi_type], source, tag)
 
     def _mpi_Isend_long(self, data, dest, tag=0):
         if not self._distributed: return -1


http://bitbucket.org/yt_analysis/yt/changeset/62fd8946f118/
changeset:   62fd8946f118
branch:      yt
user:        brittonsmith
date:        2011-10-18 17:30:34
summary:     Merged.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:30:23 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:30:34 2011 -0400
@@ -609,6 +609,26 @@
         return None
 
     @parallel_passthrough
+    def _mpi_minimum_array_long(self, data):
+        """
+        Specifically for parallelHOP. For the identical array on each task,
+        it merges the arrays together, taking the lower value at each index.
+        """
+        self._barrier()
+        size = data.size # They're all the same size, of course
+        if MPI.COMM_WORLD.rank == 0:
+            new_data = na.empty(size, dtype='int64')
+            for i in range(1, MPI.COMM_WORLD.size):
+                MPI.COMM_WORLD.Recv([new_data, MPI.LONG], i, 0)
+                data = na.minimum(data, new_data)
+            del new_data
+        else:
+            MPI.COMM_WORLD.Send([data, MPI.LONG], 0, 0)
+        # Redistribute from root
+        MPI.COMM_WORLD.Bcast([data, MPI.LONG], root=0)
+        return data
+
+    @parallel_passthrough
     def _mpi_catrgb(self, data):
         self._barrier()
         data, final = data


http://bitbucket.org/yt_analysis/yt/changeset/cca33d7f9a54/
changeset:   cca33d7f9a54
branch:      yt
user:        brittonsmith
date:        2011-10-18 17:33:23
summary:     Replaced more Irecv calls.
affected #:  1 file (-1 bytes)

--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 11:30:34 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 11:33:23 2011 -0400
@@ -284,9 +284,9 @@
         yt_counters("MPI stuff.")
         hooks = []
         for opp_neighbor in self.neighbors:
-            hooks.append(self._mpi_Irecv_long(recv_real_indices[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_Irecv_double(recv_points[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_Irecv_double(recv_mass[opp_neighbor], opp_neighbor))
+            hooks.append(self._mpi_nonblocking_recv(recv_real_indices[opp_neighbor], opp_neighbor))
+            hooks.append(self._mpi_nonblocking_recv(recv_points[opp_neighbor], opp_neighbor))
+            hooks.append(self._mpi_nonblocking_recv(recv_mass[opp_neighbor], opp_neighbor))
         # Let's wait here to be absolutely sure that all the receive buffers
         # have been created before any sending happens!
         self._barrier()
@@ -774,8 +774,8 @@
         # Set up the receives, but don't actually use them.
         hooks = []
         for opp_neighbor in self.neighbors:
-            hooks.append(self._mpi_Irecv_long(temp_indices[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_Irecv_long(temp_chainIDs[opp_neighbor], opp_neighbor))
+            hooks.append(self._mpi_nonblocking_recv(temp_indices[opp_neighbor], opp_neighbor))
+            hooks.append(self._mpi_nonblocking_recv(temp_chainIDs[opp_neighbor], opp_neighbor))
         # Make sure all the receive buffers are set before continuing.
         self._barrier()
         # Send padded particles to our neighbors.
@@ -943,8 +943,8 @@
         # Set up the receving hooks.
         hooks = []
         for opp_neighbor in self.neighbors:
-            hooks.append(self._mpi_Irecv_long(recv_real_indices[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_Irecv_long(recv_chainIDs[opp_neighbor], opp_neighbor))
+            hooks.append(self._mpi_nonblocking_recv(recv_real_indices[opp_neighbor], opp_neighbor))
+            hooks.append(self._mpi_nonblocking_recv(recv_chainIDs[opp_neighbor], opp_neighbor))
         # Make sure the recv buffers are set before continuing.
         self._barrier()
         # Now we send them.


http://bitbucket.org/yt_analysis/yt/changeset/a294a46af717/
changeset:   a294a46af717
branch:      yt
user:        MatthewTurk
date:        2011-10-18 17:46:05
summary:     Removing unused functions and commented code.
affected #:  2 files (-1 bytes)

--- a/tests/runall.py	Tue Oct 18 11:33:23 2011 -0400
+++ b/tests/runall.py	Tue Oct 18 11:46:05 2011 -0400
@@ -75,8 +75,7 @@
                       default=my_hash,
                       help = "The name we'll call this set of tests")
     parser.add_option("", "--parallel", dest="parallel",
-                      default=False,
-                      help = "Run in parallel?")
+                      default=False, help = "Run in parallel?")
     opts, args = parser.parse_args()
     if opts.list_tests:
         tests_to_run = []


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:33:23 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:46:05 2011 -0400
@@ -685,121 +685,6 @@
         return data
 
     @parallel_passthrough
-    def _mpi_joindict_unpickled_double(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                keys = na.empty(size, dtype='int64')
-                values = na.empty(size, dtype='float64')
-                MPI.COMM_WORLD.Recv([keys, MPI.LONG], i, 0)
-                MPI.COMM_WORLD.Recv([values, MPI.DOUBLE], i, 0)
-                for i,key in enumerate(keys):
-                    data[key] = values[i]
-            # Now convert root's data to arrays.
-            size = len(data)
-            root_keys = na.empty(size, dtype='int64')
-            root_values = na.empty(size, dtype='float64')
-            count = 0
-            for key in data:
-                root_keys[count] = key
-                root_values[count] = data[key]
-                count += 1
-        else:
-            MPI.COMM_WORLD.send(len(data), 0, 0)
-            keys = na.empty(len(data), dtype='int64')
-            values = na.empty(len(data), dtype='float64')
-            count = 0
-            for key in data:
-                keys[count] = key
-                values[count] = data[key]
-                count += 1
-            MPI.COMM_WORLD.Send([keys, MPI.LONG], 0, 0)
-            MPI.COMM_WORLD.Send([values, MPI.DOUBLE], 0, 0)
-        # Now send it back as arrays.
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        if MPI.COMM_WORLD.rank != 0:
-            del keys, values
-            root_keys = na.empty(size, dtype='int64')
-            root_values = na.empty(size, dtype='float64')
-        MPI.COMM_WORLD.Bcast([root_keys, MPI.LONG], root=0)
-        MPI.COMM_WORLD.Bcast([root_values, MPI.DOUBLE], root=0)
-        # Convert back to a dict.
-        del data
-        data = dict(itertools.izip(root_keys, root_values))
-        return data
-
-    @parallel_passthrough
-    def _mpi_joindict_unpickled_long(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                keys = na.empty(size, dtype='int64')
-                values = na.empty(size, dtype='int64')
-                MPI.COMM_WORLD.Recv([keys, MPI.LONG], i, 0)
-                MPI.COMM_WORLD.Recv([values, MPI.LONG], i, 0)
-                for i,key in enumerate(keys):
-                    data[key] = values[i]
-            # Now convert root's data to arrays.
-            size = len(data)
-            root_keys = na.empty(size, dtype='int64')
-            root_values = na.empty(size, dtype='int64')
-            count = 0
-            for key in data:
-                root_keys[count] = key
-                root_values[count] = data[key]
-                count += 1
-        else:
-            MPI.COMM_WORLD.send(len(data), 0, 0)
-            keys = na.empty(len(data), dtype='int64')
-            values = na.empty(len(data), dtype='int64')
-            count = 0
-            for key in data:
-                keys[count] = key
-                values[count] = data[key]
-                count += 1
-            MPI.COMM_WORLD.Send([keys, MPI.LONG], 0, 0)
-            MPI.COMM_WORLD.Send([values, MPI.LONG], 0, 0)
-        # Now send it back as arrays.
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        if MPI.COMM_WORLD.rank != 0:
-            del keys, values
-            root_keys = na.empty(size, dtype='int64')
-            root_values = na.empty(size, dtype='int64')
-        MPI.COMM_WORLD.Bcast([root_keys, MPI.LONG], root=0)
-        MPI.COMM_WORLD.Bcast([root_values, MPI.LONG], root=0)
-        # Convert back to a dict.
-        del data
-        data = dict(itertools.izip(root_keys,root_values))
-        return data
-
-    @parallel_passthrough
-    def _mpi_bcast_long_dict_unpickled(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            size = len(data)
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        root_keys = na.empty(size, dtype='int64')
-        root_values = na.empty(size, dtype='int64')
-        if MPI.COMM_WORLD.rank == 0:
-            count = 0
-            for key in data:
-                root_keys[count] = key
-                root_values[count] = data[key]
-                count += 1
-        MPI.COMM_WORLD.Bcast([root_keys, MPI.LONG], root=0)
-        MPI.COMM_WORLD.Bcast([root_values, MPI.LONG], root=0)
-        if MPI.COMM_WORLD.rank != 0:
-            data = {}
-            for i,key in enumerate(root_keys):
-                data[key] = root_values[i]
-        return data
-
-    @parallel_passthrough
     def _mpi_maxdict(self, data):
         """
         For each key in data, find the maximum value across all tasks, and
@@ -869,61 +754,15 @@
                 top_keys = na.concatenate([top_keys, recv_top_keys])
                 bot_keys = na.concatenate([bot_keys, recv_bot_keys])
                 vals = na.concatenate([vals, recv_vals])
-#                 for j, top_key in enumerate(top_keys):
-#                     if j%1000 == 0: mylog.info(j)
-#                     # Make sure there's an entry for top_key in data
-#                     try:
-#                         test = data[top_key]
-#                     except KeyError:
-#                         data[top_key] = {}
-#                     try:
-#                         old_value = data[top_key][bot_keys[j]]
-#                     except KeyError:
-#                         # This guarantees the new value gets added.
-#                         old_value = None
-#                     if old_value < vals[j]:
-#                         data[top_key][bot_keys[j]] = vals[j]
         else:
-#             top_keys = []
-#             bot_keys = []
-#             vals = []
-#             for top_key in data:
-#                 for bot_key in data[top_key]:
-#                     top_keys.append(top_key)
-#                     bot_keys.append(bot_key)
-#                     vals.append(data[top_key][bot_key])
-#             top_keys = na.array(top_keys, dtype='int64')
-#             bot_keys = na.array(bot_keys, dtype='int64')
-#             vals = na.array(vals, dtype='float64')
             size = top_keys.size
             MPI.COMM_WORLD.send(size, dest=0, tag=0)
             MPI.COMM_WORLD.Send([top_keys, MPI.LONG], dest=0, tag=0)
             MPI.COMM_WORLD.Send([bot_keys, MPI.LONG], dest=0, tag=0)
             MPI.COMM_WORLD.Send([vals, MPI.DOUBLE], dest=0, tag=0)
-        # Getting ghetto here, we're going to decompose the dict into arrays,
-        # send that, and then reconstruct it. When data is too big the pickling
-        # of the dict fails.
+        # We're going to decompose the dict into arrays, send that, and then
+        # reconstruct it. When data is too big the pickling of the dict fails.
         if MPI.COMM_WORLD.rank == 0:
-#             data = defaultdict(dict)
-#             for i,top_key in enumerate(top_keys):
-#                 try:
-#                     old = data[top_key][bot_keys[i]]
-#                 except KeyError:
-#                     old = None
-#                 if old < vals[i]:
-#                     data[top_key][bot_keys[i]] = vals[i]
-#             top_keys = []
-#             bot_keys = []
-#             vals = []
-#             for top_key in data:
-#                 for bot_key in data[top_key]:
-#                     top_keys.append(top_key)
-#                     bot_keys.append(bot_key)
-#                     vals.append(data[top_key][bot_key])
-#             del data
-#             top_keys = na.array(top_keys, dtype='int64')
-#             bot_keys = na.array(bot_keys, dtype='int64')
-#             vals = na.array(vals, dtype='float64')
             size = top_keys.size
         # Broadcast them using array methods
         size = MPI.COMM_WORLD.bcast(size, root=0)


http://bitbucket.org/yt_analysis/yt/changeset/4663bcc0d748/
changeset:   4663bcc0d748
branch:      yt
user:        samskillman
date:        2011-10-18 17:37:02
summary:     Adding a catch-all allsum, am about to go through removing old functions and preserving functionality.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:33:23 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:37:02 2011 -0400
@@ -1026,18 +1026,22 @@
         return data
 
     @parallel_passthrough
-    def _mpi_allsum(self, data):
-        #self._barrier()
-        # We use old-school pickling here on the assumption the arrays are
-        # relatively small ( < 1e7 elements )
+    def _mpi_allsum(self, data, dtype=None):
         if isinstance(data, na.ndarray) and data.dtype != na.bool:
-            tr = na.zeros_like(data)
-            if not data.flags.c_contiguous: data = data.copy()
-            MPI.COMM_WORLD.Allreduce(data, tr, op=MPI.SUM)
-            return tr
+            if dtype is None:
+                dtype = data.dtype
+            if dtype != data.dtype:
+                data = data.astype(dtype)
+            temp = data.copy()
+            MPI.COMM_WORLD.Allreduce([temp,dtype_names[dtype]], 
+                                     [data,dtype_names[dtype]], op=MPI.SUM)
+            return data
         else:
+            # We use old-school pickling here on the assumption the arrays are
+            # relatively small ( < 1e7 elements )
             return MPI.COMM_WORLD.allreduce(data, op=MPI.SUM)
 
+
     @parallel_passthrough
     def _mpi_Allsum_double(self, data):
         self._barrier()


http://bitbucket.org/yt_analysis/yt/changeset/970380b7a595/
changeset:   970380b7a595
branch:      yt
user:        brittonsmith
date:        2011-10-18 17:43:14
summary:     Replaced mpi_Isend_long and mpi_Isend_double with mpi_nonblocking_send.
affected #:  4 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 11:33:23 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 11:43:14 2011 -0400
@@ -292,9 +292,9 @@
         self._barrier()
         # Now we send the data.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_Isend_long(send_real_indices[neighbor], neighbor))
-            hooks.append(self._mpi_Isend_double(send_points[neighbor], neighbor))
-            hooks.append(self._mpi_Isend_double(send_mass[neighbor], neighbor))
+            hooks.append(self._mpi_nonblocking_send(send_real_indices[neighbor], neighbor))
+            hooks.append(self._mpi_nonblocking_send(send_points[neighbor], neighbor))
+            hooks.append(self._mpi_nonblocking_send(send_mass[neighbor], neighbor))
         # Now we use the data, after all the comms are done.
         self._mpi_Request_Waitall(hooks)
         yt_counters("MPI stuff.")
@@ -780,8 +780,8 @@
         self._barrier()
         # Send padded particles to our neighbors.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_Isend_long(self.uphill_real_indices, neighbor))
-            hooks.append(self._mpi_Isend_long(self.uphill_chainIDs, neighbor))
+            hooks.append(self._mpi_nonblocking_send(self.uphill_real_indices, neighbor))
+            hooks.append(self._mpi_nonblocking_send(self.uphill_chainIDs, neighbor))
         # Now actually use the data once it's good to go.
         self._mpi_Request_Waitall(hooks)
         self.__max_memory()
@@ -949,8 +949,8 @@
         self._barrier()
         # Now we send them.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_Isend_long(real_indices, neighbor))
-            hooks.append(self._mpi_Isend_long(chainIDs, neighbor))
+            hooks.append(self._mpi_nonblocking_send(real_indices, neighbor))
+            hooks.append(self._mpi_nonblocking_send(chainIDs, neighbor))
         # Now we use them when they're nice and ripe.
         self._mpi_Request_Waitall(hooks)
         self.__max_memory()


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 11:33:23 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 11:43:14 2011 -0400
@@ -376,7 +376,7 @@
             # I send when I *think* things should finish.
             self.send_done = na.ones(1, dtype='int64') * \
                 (self.size / self.vol_ratio -1) + self.comm_cycle_count
-            self.done_hooks.append(self._mpi_Isend_long(self.send_done, \
+            self.done_hooks.append(self._mpi_nonblocking_send(self.send_done, \
                     0, tag=15))
         else:
             # As root, I need to mark myself!
@@ -429,11 +429,11 @@
         """
         Send the data arrays to the right-hand neighbor.
         """
-        self.send_hooks.append(self._mpi_Isend_double(self.points,\
+        self.send_hooks.append(self._mpi_nonblocking_send(self.points,\
             (self.mine+1)%self.size, tag=10))
-        self.send_hooks.append(self._mpi_Isend_double(self.fields_vals,\
+        self.send_hooks.append(self._mpi_nonblocking_send(self.fields_vals,\
             (self.mine+1)%self.size, tag=20))
-        self.send_hooks.append(self._mpi_Isend_long(self.gen_array, \
+        self.send_hooks.append(self._mpi_nonblocking_send(self.gen_array, \
             (self.mine+1)%self.size, tag=40))
 
     def _allsum_bin_hits(self):


--- a/yt/utilities/parallel_tools/distributed_object_collection.py	Tue Oct 18 11:33:23 2011 -0400
+++ b/yt/utilities/parallel_tools/distributed_object_collection.py	Tue Oct 18 11:43:14 2011 -0400
@@ -104,7 +104,7 @@
             if p == m: continue
             if len(ind_list) == 0: continue
             # Now, we actually send our index lists.
-            send_hooks.append(self._mpi_Isend_long(ind_list, p, 0))
+            send_hooks.append(self._mpi_nonblocking_send(ind_list, p, 0))
         # Now we post receives for all of the data buffers.
         mylog.debug("Sending data")
         for i in self._mpi_Request_Waititer(recv_hooks):
@@ -115,8 +115,7 @@
             ind_list = recv_buffers[i]
             dsend_buffers.append(self._create_buffer(ind_list))
             self._pack_buffer(ind_list, dsend_buffers[-1])
-            dsend_hooks.append(self._mpi_Isend_double(
-                dsend_buffers[-1], p, 1))
+            dsend_hooks.append(self._mpi_nonblocking_send(dsend_buffers[-1], p, 1))
         mylog.debug("Waiting on data receives: %s", len(drecv_hooks))
         for i in self._mpi_Request_Waititer(drecv_hooks):
             mylog.debug("Unpacking from %s", proc_hooks[i])


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:33:23 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:43:14 2011 -0400
@@ -1076,6 +1076,12 @@
         mpi_type = get_mpi_type(dtype)
         return MPI.COMM_WORLD.Irecv([data, mpi_type], source, tag)
 
+    def _mpi_nonblocking_send(self, data, dest, tag=0, dtype=None):
+        if not self._distributed: return -1
+        if dtype is None: dtype = data.dtype
+        mpi_type = get_mpi_type(dtype)
+        return MPI.COMM_WORLD.Isend([data, mpi_type], dest, tag)
+
     def _mpi_Isend_long(self, data, dest, tag=0):
         if not self._distributed: return -1
         return MPI.COMM_WORLD.Isend([data, MPI.LONG], dest, tag)


http://bitbucket.org/yt_analysis/yt/changeset/c34219c7bcd9/
changeset:   c34219c7bcd9
branch:      yt
user:        brittonsmith
date:        2011-10-18 17:43:23
summary:     Merged.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:43:14 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:43:23 2011 -0400
@@ -1026,18 +1026,22 @@
         return data
 
     @parallel_passthrough
-    def _mpi_allsum(self, data):
-        #self._barrier()
-        # We use old-school pickling here on the assumption the arrays are
-        # relatively small ( < 1e7 elements )
+    def _mpi_allsum(self, data, dtype=None):
         if isinstance(data, na.ndarray) and data.dtype != na.bool:
-            tr = na.zeros_like(data)
-            if not data.flags.c_contiguous: data = data.copy()
-            MPI.COMM_WORLD.Allreduce(data, tr, op=MPI.SUM)
-            return tr
+            if dtype is None:
+                dtype = data.dtype
+            if dtype != data.dtype:
+                data = data.astype(dtype)
+            temp = data.copy()
+            MPI.COMM_WORLD.Allreduce([temp,dtype_names[dtype]], 
+                                     [data,dtype_names[dtype]], op=MPI.SUM)
+            return data
         else:
+            # We use old-school pickling here on the assumption the arrays are
+            # relatively small ( < 1e7 elements )
             return MPI.COMM_WORLD.allreduce(data, op=MPI.SUM)
 
+
     @parallel_passthrough
     def _mpi_Allsum_double(self, data):
         self._barrier()


http://bitbucket.org/yt_analysis/yt/changeset/26228751834d/
changeset:   26228751834d
branch:      yt
user:        MatthewTurk
date:        2011-10-18 17:46:18
summary:     Merging.
affected #:  4 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 11:46:05 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 11:46:18 2011 -0400
@@ -292,9 +292,9 @@
         self._barrier()
         # Now we send the data.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_Isend_long(send_real_indices[neighbor], neighbor))
-            hooks.append(self._mpi_Isend_double(send_points[neighbor], neighbor))
-            hooks.append(self._mpi_Isend_double(send_mass[neighbor], neighbor))
+            hooks.append(self._mpi_nonblocking_send(send_real_indices[neighbor], neighbor))
+            hooks.append(self._mpi_nonblocking_send(send_points[neighbor], neighbor))
+            hooks.append(self._mpi_nonblocking_send(send_mass[neighbor], neighbor))
         # Now we use the data, after all the comms are done.
         self._mpi_Request_Waitall(hooks)
         yt_counters("MPI stuff.")
@@ -780,8 +780,8 @@
         self._barrier()
         # Send padded particles to our neighbors.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_Isend_long(self.uphill_real_indices, neighbor))
-            hooks.append(self._mpi_Isend_long(self.uphill_chainIDs, neighbor))
+            hooks.append(self._mpi_nonblocking_send(self.uphill_real_indices, neighbor))
+            hooks.append(self._mpi_nonblocking_send(self.uphill_chainIDs, neighbor))
         # Now actually use the data once it's good to go.
         self._mpi_Request_Waitall(hooks)
         self.__max_memory()
@@ -949,8 +949,8 @@
         self._barrier()
         # Now we send them.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_Isend_long(real_indices, neighbor))
-            hooks.append(self._mpi_Isend_long(chainIDs, neighbor))
+            hooks.append(self._mpi_nonblocking_send(real_indices, neighbor))
+            hooks.append(self._mpi_nonblocking_send(chainIDs, neighbor))
         # Now we use them when they're nice and ripe.
         self._mpi_Request_Waitall(hooks)
         self.__max_memory()


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 11:46:05 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 11:46:18 2011 -0400
@@ -376,7 +376,7 @@
             # I send when I *think* things should finish.
             self.send_done = na.ones(1, dtype='int64') * \
                 (self.size / self.vol_ratio -1) + self.comm_cycle_count
-            self.done_hooks.append(self._mpi_Isend_long(self.send_done, \
+            self.done_hooks.append(self._mpi_nonblocking_send(self.send_done, \
                     0, tag=15))
         else:
             # As root, I need to mark myself!
@@ -429,11 +429,11 @@
         """
         Send the data arrays to the right-hand neighbor.
         """
-        self.send_hooks.append(self._mpi_Isend_double(self.points,\
+        self.send_hooks.append(self._mpi_nonblocking_send(self.points,\
             (self.mine+1)%self.size, tag=10))
-        self.send_hooks.append(self._mpi_Isend_double(self.fields_vals,\
+        self.send_hooks.append(self._mpi_nonblocking_send(self.fields_vals,\
             (self.mine+1)%self.size, tag=20))
-        self.send_hooks.append(self._mpi_Isend_long(self.gen_array, \
+        self.send_hooks.append(self._mpi_nonblocking_send(self.gen_array, \
             (self.mine+1)%self.size, tag=40))
 
     def _allsum_bin_hits(self):


--- a/yt/utilities/parallel_tools/distributed_object_collection.py	Tue Oct 18 11:46:05 2011 -0400
+++ b/yt/utilities/parallel_tools/distributed_object_collection.py	Tue Oct 18 11:46:18 2011 -0400
@@ -104,7 +104,7 @@
             if p == m: continue
             if len(ind_list) == 0: continue
             # Now, we actually send our index lists.
-            send_hooks.append(self._mpi_Isend_long(ind_list, p, 0))
+            send_hooks.append(self._mpi_nonblocking_send(ind_list, p, 0))
         # Now we post receives for all of the data buffers.
         mylog.debug("Sending data")
         for i in self._mpi_Request_Waititer(recv_hooks):
@@ -115,8 +115,7 @@
             ind_list = recv_buffers[i]
             dsend_buffers.append(self._create_buffer(ind_list))
             self._pack_buffer(ind_list, dsend_buffers[-1])
-            dsend_hooks.append(self._mpi_Isend_double(
-                dsend_buffers[-1], p, 1))
+            dsend_hooks.append(self._mpi_nonblocking_send(dsend_buffers[-1], p, 1))
         mylog.debug("Waiting on data receives: %s", len(drecv_hooks))
         for i in self._mpi_Request_Waititer(drecv_hooks):
             mylog.debug("Unpacking from %s", proc_hooks[i])


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:46:05 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:46:18 2011 -0400
@@ -865,18 +865,22 @@
         return data
 
     @parallel_passthrough
-    def _mpi_allsum(self, data):
-        #self._barrier()
-        # We use old-school pickling here on the assumption the arrays are
-        # relatively small ( < 1e7 elements )
+    def _mpi_allsum(self, data, dtype=None):
         if isinstance(data, na.ndarray) and data.dtype != na.bool:
-            tr = na.zeros_like(data)
-            if not data.flags.c_contiguous: data = data.copy()
-            MPI.COMM_WORLD.Allreduce(data, tr, op=MPI.SUM)
-            return tr
+            if dtype is None:
+                dtype = data.dtype
+            if dtype != data.dtype:
+                data = data.astype(dtype)
+            temp = data.copy()
+            MPI.COMM_WORLD.Allreduce([temp,dtype_names[dtype]], 
+                                     [data,dtype_names[dtype]], op=MPI.SUM)
+            return data
         else:
+            # We use old-school pickling here on the assumption the arrays are
+            # relatively small ( < 1e7 elements )
             return MPI.COMM_WORLD.allreduce(data, op=MPI.SUM)
 
+
     @parallel_passthrough
     def _mpi_Allsum_double(self, data):
         self._barrier()
@@ -915,6 +919,12 @@
         mpi_type = get_mpi_type(dtype)
         return MPI.COMM_WORLD.Irecv([data, mpi_type], source, tag)
 
+    def _mpi_nonblocking_send(self, data, dest, tag=0, dtype=None):
+        if not self._distributed: return -1
+        if dtype is None: dtype = data.dtype
+        mpi_type = get_mpi_type(dtype)
+        return MPI.COMM_WORLD.Isend([data, mpi_type], dest, tag)
+
     def _mpi_Isend_long(self, data, dest, tag=0):
         if not self._distributed: return -1
         return MPI.COMM_WORLD.Isend([data, MPI.LONG], dest, tag)


http://bitbucket.org/yt_analysis/yt/changeset/c62f96cf6550/
changeset:   c62f96cf6550
branch:      yt
user:        samskillman
date:        2011-10-18 17:47:05
summary:     Removing Allsum_double and Allsum_long in favor of general allsum function.
affected #:  3 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Tue Oct 18 11:37:02 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Tue Oct 18 11:47:05 2011 -0400
@@ -752,7 +752,7 @@
             for i in xrange(self.bin_count):
                 self.mass_bins[i+1] += self.mass_bins[i]
         # Sum up the mass_bins globally
-        self.mass_bins = self._mpi_Allsum_double(self.mass_bins)
+        self.mass_bins = self._mpi_allsum(self.mass_bins)
         # Calculate the over densities in the bins.
         self.overdensity = self.mass_bins * Msun2g / \
         (4./3. * math.pi * rho_crit * \
@@ -1479,7 +1479,7 @@
             del diff_subchain
         # Bring it together, and divide by the previously computed total mass
         # of each halo.
-        self.bulk_vel = self._mpi_Allsum_double(self.bulk_vel)
+        self.bulk_vel = self._mpi_allsum(self.bulk_vel)
         for groupID in xrange(self.group_count):
             self.bulk_vel[groupID] = self.bulk_vel[groupID] / self.Tot_M[groupID]
         yt_counters("bulk vel. computing")
@@ -1501,7 +1501,7 @@
                 rms_vel_temp[u][1] = marks[i+1] - marks[i]
             del vel, marks, uniq_subchain
         # Bring it together.
-        rms_vel_temp = self._mpi_Allsum_double(rms_vel_temp)
+        rms_vel_temp = self._mpi_allsum(rms_vel_temp)
         self.rms_vel = na.empty(self.group_count, dtype='float64')
         for groupID in xrange(self.group_count):
             # Here we do the Mean and the Root.


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 11:37:02 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 11:47:05 2011 -0400
@@ -1330,7 +1330,7 @@
         # Now we broadcast this, effectively, with an allsum. Even though
         # some groups are on multiple tasks, there is only one densest_in_chain
         # and only that task contributed above.
-        self.max_dens_point = self._mpi_Allsum_double(max_dens_point)
+        self.max_dens_point = self._mpi_allsum(max_dens_point)
         del max_dens_point
         yt_counters("max dens point")
         # Now CoM.
@@ -1385,9 +1385,9 @@
                     CoM_M[groupID] += self.max_dens_point[groupID,1:4] - na.array([0.5,0.5,0.5])
                     CoM_M[groupID] *= Tot_M[groupID]
         # Now we find their global values
-        self.group_sizes = self._mpi_Allsum_long(size)
-        CoM_M = self._mpi_Allsum_double(CoM_M)
-        self.Tot_M = self._mpi_Allsum_double(Tot_M)
+        self.group_sizes = self._mpi_allsum(size)
+        CoM_M = self._mpi_allsum(CoM_M)
+        self.Tot_M = self._mpi_allsum(Tot_M)
         self.CoM = na.empty((self.group_count,3), dtype='float64')
         for groupID in xrange(int(self.group_count)):
             self.CoM[groupID] = CoM_M[groupID] / self.Tot_M[groupID]


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:37:02 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:47:05 2011 -0400
@@ -1033,33 +1033,14 @@
             if dtype != data.dtype:
                 data = data.astype(dtype)
             temp = data.copy()
-            MPI.COMM_WORLD.Allreduce([temp,dtype_names[dtype]], 
-                                     [data,dtype_names[dtype]], op=MPI.SUM)
+            MPI.COMM_WORLD.Allreduce([temp,get_mpi_type(dtype)], 
+                                     [data,get_mpi_type(dtype)], op=MPI.SUM)
             return data
         else:
             # We use old-school pickling here on the assumption the arrays are
             # relatively small ( < 1e7 elements )
             return MPI.COMM_WORLD.allreduce(data, op=MPI.SUM)
 
-
-    @parallel_passthrough
-    def _mpi_Allsum_double(self, data):
-        self._barrier()
-        # Non-pickling float allsum of a float array, data.
-        temp = data.copy()
-        MPI.COMM_WORLD.Allreduce([temp, MPI.DOUBLE], [data, MPI.DOUBLE], op=MPI.SUM)
-        del temp
-        return data
-
-    @parallel_passthrough
-    def _mpi_Allsum_long(self, data):
-        self._barrier()
-        # Non-pickling float allsum of an int array, data.
-        temp = data.copy()
-        MPI.COMM_WORLD.Allreduce([temp, MPI.LONG], [data, MPI.LONG], op=MPI.SUM)
-        del temp
-        return data
-
     @parallel_passthrough
     def _mpi_allmax(self, data):
         self._barrier()


http://bitbucket.org/yt_analysis/yt/changeset/fe629585f52e/
changeset:   fe629585f52e
branch:      yt
user:        samskillman
date:        2011-10-18 17:47:17
summary:     Merging.
affected #:  5 files (-1 bytes)

--- a/tests/runall.py	Tue Oct 18 11:47:05 2011 -0400
+++ b/tests/runall.py	Tue Oct 18 11:47:17 2011 -0400
@@ -75,8 +75,7 @@
                       default=my_hash,
                       help = "The name we'll call this set of tests")
     parser.add_option("", "--parallel", dest="parallel",
-                      default=False,
-                      help = "Run in parallel?")
+                      default=False, help = "Run in parallel?")
     opts, args = parser.parse_args()
     if opts.list_tests:
         tests_to_run = []


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 11:47:05 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 11:47:17 2011 -0400
@@ -292,9 +292,9 @@
         self._barrier()
         # Now we send the data.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_Isend_long(send_real_indices[neighbor], neighbor))
-            hooks.append(self._mpi_Isend_double(send_points[neighbor], neighbor))
-            hooks.append(self._mpi_Isend_double(send_mass[neighbor], neighbor))
+            hooks.append(self._mpi_nonblocking_send(send_real_indices[neighbor], neighbor))
+            hooks.append(self._mpi_nonblocking_send(send_points[neighbor], neighbor))
+            hooks.append(self._mpi_nonblocking_send(send_mass[neighbor], neighbor))
         # Now we use the data, after all the comms are done.
         self._mpi_Request_Waitall(hooks)
         yt_counters("MPI stuff.")
@@ -780,8 +780,8 @@
         self._barrier()
         # Send padded particles to our neighbors.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_Isend_long(self.uphill_real_indices, neighbor))
-            hooks.append(self._mpi_Isend_long(self.uphill_chainIDs, neighbor))
+            hooks.append(self._mpi_nonblocking_send(self.uphill_real_indices, neighbor))
+            hooks.append(self._mpi_nonblocking_send(self.uphill_chainIDs, neighbor))
         # Now actually use the data once it's good to go.
         self._mpi_Request_Waitall(hooks)
         self.__max_memory()
@@ -949,8 +949,8 @@
         self._barrier()
         # Now we send them.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_Isend_long(real_indices, neighbor))
-            hooks.append(self._mpi_Isend_long(chainIDs, neighbor))
+            hooks.append(self._mpi_nonblocking_send(real_indices, neighbor))
+            hooks.append(self._mpi_nonblocking_send(chainIDs, neighbor))
         # Now we use them when they're nice and ripe.
         self._mpi_Request_Waitall(hooks)
         self.__max_memory()


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 11:47:05 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 11:47:17 2011 -0400
@@ -376,7 +376,7 @@
             # I send when I *think* things should finish.
             self.send_done = na.ones(1, dtype='int64') * \
                 (self.size / self.vol_ratio -1) + self.comm_cycle_count
-            self.done_hooks.append(self._mpi_Isend_long(self.send_done, \
+            self.done_hooks.append(self._mpi_nonblocking_send(self.send_done, \
                     0, tag=15))
         else:
             # As root, I need to mark myself!
@@ -429,11 +429,11 @@
         """
         Send the data arrays to the right-hand neighbor.
         """
-        self.send_hooks.append(self._mpi_Isend_double(self.points,\
+        self.send_hooks.append(self._mpi_nonblocking_send(self.points,\
             (self.mine+1)%self.size, tag=10))
-        self.send_hooks.append(self._mpi_Isend_double(self.fields_vals,\
+        self.send_hooks.append(self._mpi_nonblocking_send(self.fields_vals,\
             (self.mine+1)%self.size, tag=20))
-        self.send_hooks.append(self._mpi_Isend_long(self.gen_array, \
+        self.send_hooks.append(self._mpi_nonblocking_send(self.gen_array, \
             (self.mine+1)%self.size, tag=40))
 
     def _allsum_bin_hits(self):


--- a/yt/utilities/parallel_tools/distributed_object_collection.py	Tue Oct 18 11:47:05 2011 -0400
+++ b/yt/utilities/parallel_tools/distributed_object_collection.py	Tue Oct 18 11:47:17 2011 -0400
@@ -104,7 +104,7 @@
             if p == m: continue
             if len(ind_list) == 0: continue
             # Now, we actually send our index lists.
-            send_hooks.append(self._mpi_Isend_long(ind_list, p, 0))
+            send_hooks.append(self._mpi_nonblocking_send(ind_list, p, 0))
         # Now we post receives for all of the data buffers.
         mylog.debug("Sending data")
         for i in self._mpi_Request_Waititer(recv_hooks):
@@ -115,8 +115,7 @@
             ind_list = recv_buffers[i]
             dsend_buffers.append(self._create_buffer(ind_list))
             self._pack_buffer(ind_list, dsend_buffers[-1])
-            dsend_hooks.append(self._mpi_Isend_double(
-                dsend_buffers[-1], p, 1))
+            dsend_hooks.append(self._mpi_nonblocking_send(dsend_buffers[-1], p, 1))
         mylog.debug("Waiting on data receives: %s", len(drecv_hooks))
         for i in self._mpi_Request_Waititer(drecv_hooks):
             mylog.debug("Unpacking from %s", proc_hooks[i])


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:47:05 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:47:17 2011 -0400
@@ -685,121 +685,6 @@
         return data
 
     @parallel_passthrough
-    def _mpi_joindict_unpickled_double(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                keys = na.empty(size, dtype='int64')
-                values = na.empty(size, dtype='float64')
-                MPI.COMM_WORLD.Recv([keys, MPI.LONG], i, 0)
-                MPI.COMM_WORLD.Recv([values, MPI.DOUBLE], i, 0)
-                for i,key in enumerate(keys):
-                    data[key] = values[i]
-            # Now convert root's data to arrays.
-            size = len(data)
-            root_keys = na.empty(size, dtype='int64')
-            root_values = na.empty(size, dtype='float64')
-            count = 0
-            for key in data:
-                root_keys[count] = key
-                root_values[count] = data[key]
-                count += 1
-        else:
-            MPI.COMM_WORLD.send(len(data), 0, 0)
-            keys = na.empty(len(data), dtype='int64')
-            values = na.empty(len(data), dtype='float64')
-            count = 0
-            for key in data:
-                keys[count] = key
-                values[count] = data[key]
-                count += 1
-            MPI.COMM_WORLD.Send([keys, MPI.LONG], 0, 0)
-            MPI.COMM_WORLD.Send([values, MPI.DOUBLE], 0, 0)
-        # Now send it back as arrays.
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        if MPI.COMM_WORLD.rank != 0:
-            del keys, values
-            root_keys = na.empty(size, dtype='int64')
-            root_values = na.empty(size, dtype='float64')
-        MPI.COMM_WORLD.Bcast([root_keys, MPI.LONG], root=0)
-        MPI.COMM_WORLD.Bcast([root_values, MPI.DOUBLE], root=0)
-        # Convert back to a dict.
-        del data
-        data = dict(itertools.izip(root_keys, root_values))
-        return data
-
-    @parallel_passthrough
-    def _mpi_joindict_unpickled_long(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                keys = na.empty(size, dtype='int64')
-                values = na.empty(size, dtype='int64')
-                MPI.COMM_WORLD.Recv([keys, MPI.LONG], i, 0)
-                MPI.COMM_WORLD.Recv([values, MPI.LONG], i, 0)
-                for i,key in enumerate(keys):
-                    data[key] = values[i]
-            # Now convert root's data to arrays.
-            size = len(data)
-            root_keys = na.empty(size, dtype='int64')
-            root_values = na.empty(size, dtype='int64')
-            count = 0
-            for key in data:
-                root_keys[count] = key
-                root_values[count] = data[key]
-                count += 1
-        else:
-            MPI.COMM_WORLD.send(len(data), 0, 0)
-            keys = na.empty(len(data), dtype='int64')
-            values = na.empty(len(data), dtype='int64')
-            count = 0
-            for key in data:
-                keys[count] = key
-                values[count] = data[key]
-                count += 1
-            MPI.COMM_WORLD.Send([keys, MPI.LONG], 0, 0)
-            MPI.COMM_WORLD.Send([values, MPI.LONG], 0, 0)
-        # Now send it back as arrays.
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        if MPI.COMM_WORLD.rank != 0:
-            del keys, values
-            root_keys = na.empty(size, dtype='int64')
-            root_values = na.empty(size, dtype='int64')
-        MPI.COMM_WORLD.Bcast([root_keys, MPI.LONG], root=0)
-        MPI.COMM_WORLD.Bcast([root_values, MPI.LONG], root=0)
-        # Convert back to a dict.
-        del data
-        data = dict(itertools.izip(root_keys,root_values))
-        return data
-
-    @parallel_passthrough
-    def _mpi_bcast_long_dict_unpickled(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            size = len(data)
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        root_keys = na.empty(size, dtype='int64')
-        root_values = na.empty(size, dtype='int64')
-        if MPI.COMM_WORLD.rank == 0:
-            count = 0
-            for key in data:
-                root_keys[count] = key
-                root_values[count] = data[key]
-                count += 1
-        MPI.COMM_WORLD.Bcast([root_keys, MPI.LONG], root=0)
-        MPI.COMM_WORLD.Bcast([root_values, MPI.LONG], root=0)
-        if MPI.COMM_WORLD.rank != 0:
-            data = {}
-            for i,key in enumerate(root_keys):
-                data[key] = root_values[i]
-        return data
-
-    @parallel_passthrough
     def _mpi_maxdict(self, data):
         """
         For each key in data, find the maximum value across all tasks, and
@@ -869,61 +754,15 @@
                 top_keys = na.concatenate([top_keys, recv_top_keys])
                 bot_keys = na.concatenate([bot_keys, recv_bot_keys])
                 vals = na.concatenate([vals, recv_vals])
-#                 for j, top_key in enumerate(top_keys):
-#                     if j%1000 == 0: mylog.info(j)
-#                     # Make sure there's an entry for top_key in data
-#                     try:
-#                         test = data[top_key]
-#                     except KeyError:
-#                         data[top_key] = {}
-#                     try:
-#                         old_value = data[top_key][bot_keys[j]]
-#                     except KeyError:
-#                         # This guarantees the new value gets added.
-#                         old_value = None
-#                     if old_value < vals[j]:
-#                         data[top_key][bot_keys[j]] = vals[j]
         else:
-#             top_keys = []
-#             bot_keys = []
-#             vals = []
-#             for top_key in data:
-#                 for bot_key in data[top_key]:
-#                     top_keys.append(top_key)
-#                     bot_keys.append(bot_key)
-#                     vals.append(data[top_key][bot_key])
-#             top_keys = na.array(top_keys, dtype='int64')
-#             bot_keys = na.array(bot_keys, dtype='int64')
-#             vals = na.array(vals, dtype='float64')
             size = top_keys.size
             MPI.COMM_WORLD.send(size, dest=0, tag=0)
             MPI.COMM_WORLD.Send([top_keys, MPI.LONG], dest=0, tag=0)
             MPI.COMM_WORLD.Send([bot_keys, MPI.LONG], dest=0, tag=0)
             MPI.COMM_WORLD.Send([vals, MPI.DOUBLE], dest=0, tag=0)
-        # Getting ghetto here, we're going to decompose the dict into arrays,
-        # send that, and then reconstruct it. When data is too big the pickling
-        # of the dict fails.
+        # We're going to decompose the dict into arrays, send that, and then
+        # reconstruct it. When data is too big the pickling of the dict fails.
         if MPI.COMM_WORLD.rank == 0:
-#             data = defaultdict(dict)
-#             for i,top_key in enumerate(top_keys):
-#                 try:
-#                     old = data[top_key][bot_keys[i]]
-#                 except KeyError:
-#                     old = None
-#                 if old < vals[i]:
-#                     data[top_key][bot_keys[i]] = vals[i]
-#             top_keys = []
-#             bot_keys = []
-#             vals = []
-#             for top_key in data:
-#                 for bot_key in data[top_key]:
-#                     top_keys.append(top_key)
-#                     bot_keys.append(bot_key)
-#                     vals.append(data[top_key][bot_key])
-#             del data
-#             top_keys = na.array(top_keys, dtype='int64')
-#             bot_keys = na.array(bot_keys, dtype='int64')
-#             vals = na.array(vals, dtype='float64')
             size = top_keys.size
         # Broadcast them using array methods
         size = MPI.COMM_WORLD.bcast(size, root=0)
@@ -1061,6 +900,12 @@
         mpi_type = get_mpi_type(dtype)
         return MPI.COMM_WORLD.Irecv([data, mpi_type], source, tag)
 
+    def _mpi_nonblocking_send(self, data, dest, tag=0, dtype=None):
+        if not self._distributed: return -1
+        if dtype is None: dtype = data.dtype
+        mpi_type = get_mpi_type(dtype)
+        return MPI.COMM_WORLD.Isend([data, mpi_type], dest, tag)
+
     def _mpi_Isend_long(self, data, dest, tag=0):
         if not self._distributed: return -1
         return MPI.COMM_WORLD.Isend([data, MPI.LONG], dest, tag)


http://bitbucket.org/yt_analysis/yt/changeset/a80c5d1158da/
changeset:   a80c5d1158da
branch:      yt
user:        brittonsmith
date:        2011-10-18 17:58:16
summary:     Removing Isend_long and Isend_double.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:47:17 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:58:16 2011 -0400
@@ -906,14 +906,6 @@
         mpi_type = get_mpi_type(dtype)
         return MPI.COMM_WORLD.Isend([data, mpi_type], dest, tag)
 
-    def _mpi_Isend_long(self, data, dest, tag=0):
-        if not self._distributed: return -1
-        return MPI.COMM_WORLD.Isend([data, MPI.LONG], dest, tag)
-
-    def _mpi_Isend_double(self, data, dest, tag=0):
-        if not self._distributed: return -1
-        return MPI.COMM_WORLD.Isend([data, MPI.DOUBLE], dest, tag)
-
     def _mpi_Request_Waitall(self, hooks):
         if not self._distributed: return
         MPI.Request.Waitall(hooks)


http://bitbucket.org/yt_analysis/yt/changeset/87c385ac3c64/
changeset:   87c385ac3c64
branch:      yt
user:        MatthewTurk
date:        2011-10-18 18:00:05
summary:     Removing mpi_find_neighbor_3d
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 11:58:16 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:00:05 2011 -0400
@@ -82,7 +82,6 @@
 else:
     parallel_capable = False
 
-
 # Set up translation table
 if parallel_capable:
     dtype_names = dict(
@@ -561,41 +560,6 @@
         return new_group, new_comm, my_LE, my_RE, new_top_bounds, cc,\
             self.hierarchy.region_strict(self.center, my_LE, my_RE)
 
-    def _mpi_find_neighbor_3d(self, shift):
-        """ Given a shift array, 1x3 long, find the task ID
-        of that neighbor. For example, shift=[1,0,0] finds the neighbor
-        immediately to the right in the positive x direction. Each task
-        has 26 neighbors, of which some may be itself depending on the number
-        and arrangement of tasks.
-        """
-        if not self._distributed: return 0
-        shift = na.array(shift)
-        cc = na.array(MPI.Compute_dims(MPI.COMM_WORLD.size, 3))
-        mi = MPI.COMM_WORLD.rank
-        si = MPI.COMM_WORLD.size
-        # store some facts about myself
-        mi_cx,mi_cy,mi_cz = na.unravel_index(mi,cc)
-        mi_ar = na.array([mi_cx,mi_cy,mi_cz])
-        # these are identical on all tasks
-        # should these be calculated once and stored?
-        #dLE = na.empty((si,3), dtype='float64') # positions not needed yet...
-        #dRE = na.empty((si,3), dtype='float64')
-        tasks = na.empty((cc[0],cc[1],cc[2]), dtype='int64')
-        
-        for i in range(si):
-            cx,cy,cz = na.unravel_index(i,cc)
-            tasks[cx,cy,cz] = i
-            #x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-            #y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-            #z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
-            #dLE[i, :] = na.array([x[0], y[0], z[0]], dtype='float64')
-            #dRE[i, :] = na.array([x[1], y[1], z[1]], dtype='float64')
-        
-        # find the neighbor
-        ne = (mi_ar + shift) % cc
-        ne = tasks[ne[0],ne[1],ne[2]]
-        return ne
-        
     def _barrier(self):
         if not self._distributed: return
         mylog.debug("Opening MPI Barrier on %s", MPI.COMM_WORLD.rank)


http://bitbucket.org/yt_analysis/yt/changeset/054c08ceac00/
changeset:   054c08ceac00
branch:      yt
user:        MatthewTurk
date:        2011-10-18 18:08:05
summary:     Removing an unused function and the software_sampler file.
affected #:  3 files (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:00:05 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:08:05 2011 -0400
@@ -752,19 +752,6 @@
         return data
 
     @parallel_passthrough
-    def _mpi_cat_na_array(self,data):
-        self._barrier()
-        comm = MPI.COMM_WORLD
-        if comm.rank == 0:
-            for i in range(1,comm.size):
-                buf = comm.recv(source=i, tag=0)
-                data = na.concatenate([data,buf])
-        else:
-            comm.send(data, 0, tag = 0)
-        data = comm.bcast(data, root=0)
-        return data
-
-    @parallel_passthrough
     def _mpi_catarray(self, data):
         if data is None:
             ncols = -1


--- a/yt/visualization/volume_rendering/api.py	Tue Oct 18 12:00:05 2011 -0400
+++ b/yt/visualization/volume_rendering/api.py	Tue Oct 18 12:08:05 2011 -0400
@@ -40,6 +40,5 @@
                              import_partitioned_grids
 from image_handling import export_rgba, import_rgba, \
                            plot_channel, plot_rgb
-from software_sampler import VolumeRendering
 from camera import Camera, PerspectiveCamera, StereoPairCamera, \
     off_axis_projection


--- a/yt/visualization/volume_rendering/software_sampler.py	Tue Oct 18 12:00:05 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,188 +0,0 @@
-"""
-Import the components of the volume rendering extension
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-import h5py
-import numpy as na
-
-from yt.funcs import *
-
-from yt.data_objects.data_containers import data_object_registry
-from yt.utilities.amr_utils import TransferFunctionProxy, VectorPlane
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface
-from yt.visualization.volume_rendering.grid_partitioner import \
-    HomogenizedBrickCollection
-
-# We're going to register this class, but it does not directly inherit from
-# AMRData.
-class VolumeRendering(ParallelAnalysisInterface):
-    bricks = None
-    def __init__(self, normal_vector, width, center,
-                 resolution, transfer_function,
-                 fields = None, whole_box = False,
-                 sub_samples = 5, north_vector = None,
-                 pf = None):
-        # Now we replicate some of the 'cutting plane' logic
-        if not iterable(resolution):
-            resolution = (resolution, resolution)
-        self.resolution = resolution
-        self.sub_samples = sub_samples
-        if not iterable(width):
-            width = (width, width, width) # front/back, left/right, top/bottom
-        self.width = width
-        self.center = center
-        if fields is None: fields = ["Density"]
-        self.fields = fields
-        self.transfer_function = transfer_function
-
-        # Now we set up our  various vectors
-        normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
-        if north_vector is None:
-            vecs = na.identity(3)
-            t = na.cross(normal_vector, vecs).sum(axis=1)
-            ax = t.argmax()
-            north_vector = na.cross(vecs[ax,:], normal_vector).ravel()
-        north_vector /= na.sqrt(na.dot(north_vector, north_vector))
-        east_vector = -na.cross(north_vector, normal_vector).ravel()
-        east_vector /= na.sqrt(na.dot(east_vector, east_vector))
-        self.unit_vectors = [north_vector, east_vector, normal_vector]
-        self.box_vectors = na.array([self.unit_vectors[0]*self.width[0],
-                                     self.unit_vectors[1]*self.width[1],
-                                     self.unit_vectors[2]*self.width[2]])
-
-        self.origin = center - 0.5*width[0]*self.unit_vectors[0] \
-                             - 0.5*width[1]*self.unit_vectors[1] \
-                             - 0.5*width[2]*self.unit_vectors[2]
-        self.back_center = center - 0.5*width[0]*self.unit_vectors[2]
-        self.front_center = center + 0.5*width[0]*self.unit_vectors[2]
-
-        self._initialize_source()
-        self._construct_vector_array()
-
-    def _initialize_source(self):
-        check, source, rf = self._partition_hierarchy_2d_inclined(
-                self.unit_vectors, self.origin, self.width, self.box_vectors)
-        if check:
-            self._base_source = self.pf.h.inclined_box(
-                self.origin, self.box_vectors)
-        else:
-            # To avoid doubling-up
-            self._base_source = source
-        self.source = source
-        self.res_fac = rf
-        # Note that if we want to do this in parallel, with 3D domain decomp
-        # for the grid/bricks, we can supply self._base_source here.  But,
-        # _distributed can't be overridden in that case.
-        self._brick_collection = HomogenizedBrickCollection(self.source)
-
-    def ray_cast(self, finalize=True):
-        if self.bricks is None: self.partition_grids()
-        # Now we order our bricks
-        total_cells, LE, RE = 0, [], []
-        for b in self.bricks:
-            LE.append(b.LeftEdge)
-            RE.append(b.RightEdge)
-            total_cells += na.prod(b.my_data[0].shape)
-        LE = na.array(LE) - self.back_center
-        RE = na.array(RE) - self.back_center
-        LE = na.sum(LE * self.unit_vectors[2], axis=1)
-        RE = na.sum(RE * self.unit_vectors[2], axis=1)
-        dist = na.minimum(LE, RE)
-        ind = na.argsort(dist)
-        pbar = get_pbar("Ray casting ", total_cells)
-        total_cells = 0
-        tfp = TransferFunctionProxy(self.transfer_function)
-        tfp.ns = self.sub_samples
-        for i, b in enumerate(self.bricks[ind]):
-            pos = b.cast_plane(tfp, self.vector_plane)
-            total_cells += na.prod(b.my_data[0].shape)
-            pbar.update(total_cells)
-        pbar.finish()
-        if finalize: self._finalize()
-
-    def _finalize(self):
-        #im = self._mpi_catdict(dict(image=self.image)).pop('image')
-        im, f = self._mpi_catrgb((self.image, self.resolution))
-        self.image = im
-
-    def dump_image(self, prefix):
-        fn = "%s.h5" % (self._get_filename(prefix))
-        mylog.info("Saving to %s", fn)
-        f = h5py.File(fn, "w")
-        f.create_dataset("/image", data=self.image)
-
-    def load_bricks(self, fn):
-        self.bricks = import_partitioned_grids(fn)
-
-    def save_bricks(self, fn):
-        # This will need to be modified for parallel
-        export_partitioned_grids(self.bricks, fn)
-
-    def save_image(self, prefix = None, norm = 1.0):
-        if norm is not None:
-            mi, ma = self.image.min(), norm*self.image.max()
-            print "Normalizing with ", mi, ma
-            image = (na.clip(self.image, mi, ma) - mi)/(ma - mi)
-        else:
-            image = self.image
-        if prefix is None: prefix = "%s_volume_rendering" % (self.pf)
-        plot_rgb(image, prefix)
-
-    def partition_grids(self):
-        log_field = []
-        for field in self.fields:
-            log_field.append(field in self.pf.field_info and 
-                             self.pf.field_info[field].take_log)
-        self._brick_collection._partition_local_grids(self.fields, log_field)
-        # UNCOMMENT FOR PARALLELISM
-        #self._brick_collection._collect_bricks(self.source)
-        self.bricks = self._brick_collection.bricks
-
-    def _construct_vector_array(self):
-        rx = self.resolution[0] * self.res_fac[0]
-        ry = self.resolution[1] * self.res_fac[1]
-        # We should move away from pre-generation of vectors like this and into
-        # the usage of on-the-fly generation in the VolumeIntegrator module
-        self.image = na.zeros((rx,ry,3), dtype='float64', order='C')
-        # We might have a different width and back_center
-        bl = self.source.box_lengths
-        px = na.linspace(-bl[0]/2.0, bl[0]/2.0, rx)[:,None]
-        py = na.linspace(-bl[1]/2.0, bl[1]/2.0, ry)[None,:]
-        inv_mat = self.source._inv_mat
-        bc = self.source.origin + 0.5*self.source.box_vectors[0] \
-                                + 0.5*self.source.box_vectors[1]
-        vectors = na.zeros((rx, ry, 3),
-                            dtype='float64', order='C')
-        vectors[:,:,0] = inv_mat[0,0]*px + inv_mat[0,1]*py + bc[0]
-        vectors[:,:,1] = inv_mat[1,0]*px + inv_mat[1,1]*py + bc[1]
-        vectors[:,:,2] = inv_mat[2,0]*px + inv_mat[2,1]*py + bc[2]
-        bounds = (px.min(), px.max(), py.min(), py.max())
-        self.vector_plane = VectorPlane(vectors, self.box_vectors[2],
-                                    bc, bounds, self.image,
-                                    self.source._x_vec, self.source._y_vec)
-        self.vp_bounds = bounds
-        self.vectors = vectors
-
-data_object_registry["volume_rendering"] = VolumeRendering


http://bitbucket.org/yt_analysis/yt/changeset/ce6b206297d5/
changeset:   ce6b206297d5
branch:      yt
user:        MatthewTurk
date:        2011-10-18 18:10:59
summary:     More unused function removal.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:08:05 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:10:59 2011 -0400
@@ -319,35 +319,6 @@
         reg = self.hierarchy.region_strict(self.center, LE, RE)
         return True, reg
 
-    def _partition_hierarchy_2d_inclined(self, unit_vectors, origin, widths,
-                                         box_vectors, resolution = (1.0, 1.0)):
-        if not self._distributed:
-            ib = self.hierarchy.inclined_box(origin, box_vectors)
-            return False, ib, resolution
-        # We presuppose that unit_vectors is already unitary.  If it's not,
-        # caveat emptor.
-        uv = na.array(unit_vectors)
-        inv_mat = na.linalg.pinv(uv)
-        cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 2)
-        mi = MPI.COMM_WORLD.rank
-        cx, cy = na.unravel_index(mi, cc)
-        resolution = (1.0/cc[0], 1.0/cc[1])
-        # We are rotating with respect to the *origin*, not the back center,
-        # so we go from 0 .. width.
-        px = na.mgrid[0.0:1.0:(cc[0]+1)*1j][cx] * widths[0]
-        py = na.mgrid[0.0:1.0:(cc[1]+1)*1j][cy] * widths[1]
-        nxo = inv_mat[0,0]*px + inv_mat[0,1]*py + origin[0]
-        nyo = inv_mat[1,0]*px + inv_mat[1,1]*py + origin[1]
-        nzo = inv_mat[2,0]*px + inv_mat[2,1]*py + origin[2]
-        nbox_vectors = na.array(
-                       [unit_vectors[0] * widths[0]/cc[0],
-                        unit_vectors[1] * widths[1]/cc[1],
-                        unit_vectors[2] * widths[2]],
-                        dtype='float64')
-        norigin = na.array([nxo, nyo, nzo])
-        box = self.hierarchy.inclined_box(norigin, nbox_vectors)
-        return True, box, resolution
-        
     def _partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
         LE, RE = na.array(ds.left_edge), na.array(ds.right_edge)
         # We need to establish if we're looking at a subvolume, in which case
@@ -463,102 +434,6 @@
                     break
                 nextdim = (nextdim + 1) % 3
         return cuts
-        
-
-    def _partition_hierarchy_3d_bisection(self, axis, bins, counts, top_bounds = None,\
-        old_group = None, old_comm = None, cut=None, old_cc=None):
-        """
-        Partition the volume into evenly weighted subvolumes using the distribution
-        in counts. The bisection happens in the MPI communicator group old_group.
-        You may need to set "MPI_COMM_MAX" and "MPI_GROUP_MAX" environment 
-        variables.
-        """
-        counts = counts.astype('int64')
-        if not self._distributed:
-            LE, RE = self.pf.domain_left_edge.copy(), self.pf.domain_right_edge.copy()
-            return False, LE, RE, self.hierarchy.grid_collection(self.center, self.hierarchy.grids)
-        
-        # First time through the world is the current group.
-        if old_group == None or old_comm == None:
-            old_group = MPI.COMM_WORLD.Get_group()
-            old_comm = MPI.COMM_WORLD
-        
-        # Figure out the gridding based on the deepness of cuts.
-        if old_cc is None:
-            cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 3)
-        else:
-            cc = old_cc
-        cc[cut[0]] /= cut[1]
-        # Set the boundaries of the full bounding box for this group.
-        if top_bounds == None:
-            LE, RE = self.pf.domain_left_edge.copy(), self.pf.domain_right_edge.copy()
-        else:
-            LE, RE = top_bounds
-
-        ra = old_group.Get_rank() # In this group, not WORLD, unless it's the first time.
-        
-        # First find the total number of particles in my group.
-        parts = old_comm.allreduce(int(counts.sum()), op=MPI.SUM)
-        # Now the full sum in the bins along this axis in this group.
-        full_counts = na.empty(counts.size, dtype='int64')
-        old_comm.Allreduce([counts, MPI.LONG], [full_counts, MPI.LONG], op=MPI.SUM)
-        # Find the bin that passes the cut points.
-        midpoints = [LE[axis]]
-        sum = 0
-        bin = 0
-        for step in xrange(1,cut[1]):
-            while sum < ((parts*step)/cut[1]):
-                lastsum = sum
-                sum += full_counts[bin]
-                bin += 1
-            # Bin edges
-            left_edge = bins[bin-1]
-            right_edge = bins[bin]
-            # Find a better approx of the midpoint cut line using a linear approx.
-            a = float(sum - lastsum) / (right_edge - left_edge)
-            midpoints.append(left_edge + (0.5 - (float(lastsum) / parts / 2)) / a)
-            #midpoint = (left_edge + right_edge) / 2.
-        midpoints.append(RE[axis])
-        # Now we need to split the members of this group into chunks. 
-        # The values that go into the _ranks are the ranks of the tasks
-        # in *this* communicator group, which go zero to size - 1. They are not
-        # the same as the global ranks!
-        groups = {}
-        ranks = {}
-        old_group_size = old_group.Get_size()
-        for step in xrange(cut[1]):
-            groups[step] = na.arange(step*old_group_size/cut[1], (step+1)*old_group_size/cut[1])
-            # [ (start, stop, step), ]
-            ranks[step] = [ (groups[step][0], groups[step][-1], 1), ] 
-        
-        # Based on where we are, adjust our LE or RE, depending on axis. At the
-        # same time assign the new MPI group membership.
-        for step in xrange(cut[1]):
-            if ra in groups[step]:
-                LE[axis] = midpoints[step]
-                RE[axis] = midpoints[step+1]
-                new_group = old_group.Range_incl(ranks[step])
-                new_comm = old_comm.Create(new_group)
-        
-        if old_cc is not None:
-            old_group.Free()
-            old_comm.Free()
-        
-        new_top_bounds = (LE,RE)
-        
-        # Using the new boundaries, regrid.
-        mi = new_comm.rank
-        cx, cy, cz = na.unravel_index(mi, cc)
-        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
-
-        my_LE = na.array([x[0], y[0], z[0]], dtype='float64')
-        my_RE = na.array([x[1], y[1], z[1]], dtype='float64')
-        
-        # Return a new subvolume and associated stuff.
-        return new_group, new_comm, my_LE, my_RE, new_top_bounds, cc,\
-            self.hierarchy.region_strict(self.center, my_LE, my_RE)
 
     def _barrier(self):
         if not self._distributed: return


http://bitbucket.org/yt_analysis/yt/changeset/85aec0763886/
changeset:   85aec0763886
branch:      yt
user:        samskillman
date:        2011-10-18 18:00:57
summary:     Getting rid of a few more Allsum's.
affected #:  3 files (-1 bytes)

--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py	Tue Oct 18 11:58:16 2011 -0400
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py	Tue Oct 18 12:00:57 2011 -0400
@@ -599,7 +599,7 @@
             (matched, parent_IDs_tosend.size, child_IDs_tosend.size))
 
         # Now we sum up the contributions globally.
-        self.child_mass_arr = self._mpi_Allsum_double(self.child_mass_arr)
+        self.child_mass_arr = self._mpi_allsum(self.child_mass_arr)
         
         # Turn these Msol masses into percentages of the parent.
         line = "SELECT HaloMass FROM Halos WHERE SnapCurrentTimeIdentifier=%d \


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 11:58:16 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 12:00:57 2011 -0400
@@ -452,7 +452,7 @@
                     (field, fset.too_high[i], fset.too_low[i]))
             for length in self.lengths:
                 fset.length_bin_hits[length] = \
-                    self._mpi_Allsum_long(fset.length_bin_hits[length])
+                    self._mpi_allsum(fset.length_bin_hits[length])
                 # Find out how many were successfully binned.
                 fset.binned[length] = fset.length_bin_hits[length].sum()
                 # Normalize the counts.


--- a/yt/data_objects/data_containers.py	Tue Oct 18 11:58:16 2011 -0400
+++ b/yt/data_objects/data_containers.py	Tue Oct 18 12:00:57 2011 -0400
@@ -2221,7 +2221,7 @@
             self._get_data_from_grid(grid, fields_to_get, dls)
         mylog.info("IO completed; summing")
         for field in fields_to_get:
-            self[field] = self._mpi_Allsum_double(self[field])
+            self[field] = self._mpi_allsum(self[field])
             conv = self.pf.units[self.pf.field_info[field].projection_conversion]
             self[field] *= conv
 


http://bitbucket.org/yt_analysis/yt/changeset/cde1baae3e45/
changeset:   cde1baae3e45
branch:      yt
user:        samskillman
date:        2011-10-18 18:01:10
summary:     Merging.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:00:57 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:01:10 2011 -0400
@@ -82,7 +82,6 @@
 else:
     parallel_capable = False
 
-
 # Set up translation table
 if parallel_capable:
     dtype_names = dict(
@@ -561,41 +560,6 @@
         return new_group, new_comm, my_LE, my_RE, new_top_bounds, cc,\
             self.hierarchy.region_strict(self.center, my_LE, my_RE)
 
-    def _mpi_find_neighbor_3d(self, shift):
-        """ Given a shift array, 1x3 long, find the task ID
-        of that neighbor. For example, shift=[1,0,0] finds the neighbor
-        immediately to the right in the positive x direction. Each task
-        has 26 neighbors, of which some may be itself depending on the number
-        and arrangement of tasks.
-        """
-        if not self._distributed: return 0
-        shift = na.array(shift)
-        cc = na.array(MPI.Compute_dims(MPI.COMM_WORLD.size, 3))
-        mi = MPI.COMM_WORLD.rank
-        si = MPI.COMM_WORLD.size
-        # store some facts about myself
-        mi_cx,mi_cy,mi_cz = na.unravel_index(mi,cc)
-        mi_ar = na.array([mi_cx,mi_cy,mi_cz])
-        # these are identical on all tasks
-        # should these be calculated once and stored?
-        #dLE = na.empty((si,3), dtype='float64') # positions not needed yet...
-        #dRE = na.empty((si,3), dtype='float64')
-        tasks = na.empty((cc[0],cc[1],cc[2]), dtype='int64')
-        
-        for i in range(si):
-            cx,cy,cz = na.unravel_index(i,cc)
-            tasks[cx,cy,cz] = i
-            #x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-            #y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-            #z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
-            #dLE[i, :] = na.array([x[0], y[0], z[0]], dtype='float64')
-            #dRE[i, :] = na.array([x[1], y[1], z[1]], dtype='float64')
-        
-        # find the neighbor
-        ne = (mi_ar + shift) % cc
-        ne = tasks[ne[0],ne[1],ne[2]]
-        return ne
-        
     def _barrier(self):
         if not self._distributed: return
         mylog.debug("Opening MPI Barrier on %s", MPI.COMM_WORLD.rank)


http://bitbucket.org/yt_analysis/yt/changeset/b7499df4c269/
changeset:   b7499df4c269
branch:      yt
user:        MatthewTurk
date:        2011-10-18 18:11:10
summary:     Merge
affected #:  3 files (-1 bytes)

--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py	Tue Oct 18 12:10:59 2011 -0400
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py	Tue Oct 18 12:11:10 2011 -0400
@@ -599,7 +599,7 @@
             (matched, parent_IDs_tosend.size, child_IDs_tosend.size))
 
         # Now we sum up the contributions globally.
-        self.child_mass_arr = self._mpi_Allsum_double(self.child_mass_arr)
+        self.child_mass_arr = self._mpi_allsum(self.child_mass_arr)
         
         # Turn these Msol masses into percentages of the parent.
         line = "SELECT HaloMass FROM Halos WHERE SnapCurrentTimeIdentifier=%d \


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 12:10:59 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 12:11:10 2011 -0400
@@ -452,7 +452,7 @@
                     (field, fset.too_high[i], fset.too_low[i]))
             for length in self.lengths:
                 fset.length_bin_hits[length] = \
-                    self._mpi_Allsum_long(fset.length_bin_hits[length])
+                    self._mpi_allsum(fset.length_bin_hits[length])
                 # Find out how many were successfully binned.
                 fset.binned[length] = fset.length_bin_hits[length].sum()
                 # Normalize the counts.


--- a/yt/data_objects/data_containers.py	Tue Oct 18 12:10:59 2011 -0400
+++ b/yt/data_objects/data_containers.py	Tue Oct 18 12:11:10 2011 -0400
@@ -2221,7 +2221,7 @@
             self._get_data_from_grid(grid, fields_to_get, dls)
         mylog.info("IO completed; summing")
         for field in fields_to_get:
-            self[field] = self._mpi_Allsum_double(self[field])
+            self[field] = self._mpi_allsum(self[field])
             conv = self.pf.units[self.pf.field_info[field].projection_conversion]
             self[field] *= conv
 


http://bitbucket.org/yt_analysis/yt/changeset/a7038c49a265/
changeset:   a7038c49a265
branch:      yt
user:        MatthewTurk
date:        2011-10-18 18:18:46
summary:     Removing parallel option from argparse, mandating usage of python2.7-mpi for
instance.
affected #:  1 file (-1 bytes)

--- a/tests/runall.py	Tue Oct 18 12:11:10 2011 -0400
+++ b/tests/runall.py	Tue Oct 18 12:18:46 2011 -0400
@@ -74,8 +74,6 @@
     parser.add_option("-n", "--name", dest="this_name",
                       default=my_hash,
                       help = "The name we'll call this set of tests")
-    parser.add_option("", "--parallel", dest="parallel",
-                      default=False, help = "Run in parallel?")
     opts, args = parser.parse_args()
     if opts.list_tests:
         tests_to_run = []


http://bitbucket.org/yt_analysis/yt/changeset/0313b65dd839/
changeset:   0313b65dd839
branch:      yt
user:        MatthewTurk
date:        2011-10-18 18:27:31
summary:     Removing mpi_catrgb.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:18:46 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:27:31 2011 -0400
@@ -468,30 +468,6 @@
         return data
 
     @parallel_passthrough
-    def _mpi_catrgb(self, data):
-        self._barrier()
-        data, final = data
-        if MPI.COMM_WORLD.rank == 0:
-            cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 2)
-            nsize = final[0]/cc[0], final[1]/cc[1]
-            new_image = na.zeros((final[0], final[1], 6), dtype='float64')
-            new_image[0:nsize[0],0:nsize[1],:] = data[:]
-            for i in range(1,MPI.COMM_WORLD.size):
-                cy, cx = na.unravel_index(i, cc)
-                mylog.debug("Receiving image from % into bits %s:%s, %s:%s",
-                    i, nsize[0]*cx,nsize[0]*(cx+1),
-                       nsize[1]*cy,nsize[1]*(cy+1))
-                buf = _recv_array(source=i, tag=0).reshape(
-                    (nsize[0],nsize[1],6))
-                new_image[nsize[0]*cy:nsize[0]*(cy+1),
-                          nsize[1]*cx:nsize[1]*(cx+1),:] = buf[:]
-            data = new_image
-        else:
-            _send_array(data.ravel(), dest=0, tag=0)
-        data = MPI.COMM_WORLD.bcast(data)
-        return (data, final)
-
-    @parallel_passthrough
     def _mpi_catdict(self, data):
         field_keys = data.keys()
         field_keys.sort()


http://bitbucket.org/yt_analysis/yt/changeset/e09c665d3cfd/
changeset:   e09c665d3cfd
branch:      yt
user:        brittonsmith
date:        2011-10-18 18:25:45
summary:     Replaced _mpi_get_size() with _par_size property and
_mpi_get_rank() with _par_rank property.
affected #:  11 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Tue Oct 18 12:00:05 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Tue Oct 18 12:25:45 2011 -0400
@@ -1855,20 +1855,20 @@
         # analyzing a subvolume.
         ds_names = ["particle_position_x","particle_position_y","particle_position_z"]
         if ytcfg.getboolean("yt","inline") == False and \
-            resize and self._mpi_get_size() != 1 and subvolume is None:
-            random.seed(self._mpi_get_rank())
+            resize and self._par_size != 1 and subvolume is None:
+            random.seed(self._par_rank)
             cut_list = self._partition_hierarchy_3d_bisection_list()
             root_points = self._subsample_points()
             self.bucket_bounds = []
-            if self._mpi_get_rank() == 0:
+            if self._par_rank == 0:
                 self._recursive_divide(root_points, topbounds, 0, cut_list)
             self.bucket_bounds = self._mpi_bcast_pickled(self.bucket_bounds)
-            my_bounds = self.bucket_bounds[self._mpi_get_rank()]
+            my_bounds = self.bucket_bounds[self._par_rank]
             LE, RE = my_bounds[0], my_bounds[1]
             self._data_source = self.hierarchy.region_strict([0.]*3, LE, RE)
         # If this isn't parallel, define the region as an AMRRegionStrict so
         # particle IO works.
-        if self._mpi_get_size() == 1:
+        if self._par_size == 1:
             self._data_source = self.hierarchy.periodic_region_strict([0.5]*3, LE, RE)
         # get the average spacing between particles for this region
         # The except is for the serial case, where the full box is what we want.
@@ -1964,8 +1964,8 @@
         random_points = int(self.sample * n_parts)
         # We want to get a representative selection of random particles in
         # each subvolume.
-        adjust = float(local_parts) / ( float(n_parts) / self._mpi_get_size())
-        n_random = int(adjust * float(random_points) / self._mpi_get_size())
+        adjust = float(local_parts) / ( float(n_parts) / self._par_size)
+        n_random = int(adjust * float(random_points) / self._par_size)
         mylog.info("Reading in %d random particles." % n_random)
         # Get unique random particles.
         my_points = na.empty((n_random, 3), dtype='float64')


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 12:00:05 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 12:25:45 2011 -0400
@@ -1155,7 +1155,7 @@
         Set_list = []
         # We only want the holes that are modulo mine.
         keys = na.arange(groupID, dtype='int64')
-        size = self._mpi_get_size()
+        size = self._par_size
         select = (keys % size == self.mine)
         groupIDs = keys[select]
         mine_groupIDs = set([]) # Records only ones modulo mine.


--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py	Tue Oct 18 12:00:05 2011 -0400
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py	Tue Oct 18 12:25:45 2011 -0400
@@ -168,10 +168,10 @@
         if self.sleep <= 0.:
             self.sleep = 5
         # MPI stuff
-        self.mine = self._mpi_get_rank()
+        self.mine = self._par_rank
         if self.mine is None:
             self.mine = 0
-        self.size = self._mpi_get_size()
+        self.size = self._par_size
         if self.size is None:
             self.size = 1
         # Get to work.


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 12:00:05 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 12:25:45 2011 -0400
@@ -107,8 +107,8 @@
         self.constant_theta = theta
         self.constant_phi = phi
         # MPI stuff.
-        self.size = self._mpi_get_size()
-        self.mine = self._mpi_get_rank()
+        self.size = self._par_size
+        self.mine = self._par_rank
         self.vol_ratio = vol_ratio
         if self.vol_ratio == -1:
             self.vol_ratio = self.size


--- a/yt/frontends/enzo/data_structures.py	Tue Oct 18 12:00:05 2011 -0400
+++ b/yt/frontends/enzo/data_structures.py	Tue Oct 18 12:25:45 2011 -0400
@@ -384,7 +384,7 @@
     def _detect_fields(self):
         self.field_list = []
         # Do this only on the root processor to save disk work.
-        if self._mpi_get_rank() == 0 or self._mpi_get_rank() == None:
+        if self._par_rank == 0 or self._par_rank == None:
             field_list = self.get_data("/", "DataFields")
             if field_list is None:
                 mylog.info("Gathering a field list (this may take a moment.)")
@@ -589,7 +589,7 @@
             self.derived_field_list = self.__class__._cached_derived_field_list
 
     def _generate_random_grids(self):
-        my_rank = self._mpi_get_rank()
+        my_rank = self._par_rank
         my_grids = self.grids[self.grid_procs.ravel() == my_rank]
         if len(my_grids) > 40:
             starter = na.random.randint(0, 20)


--- a/yt/utilities/command_line.py	Tue Oct 18 12:00:05 2011 -0400
+++ b/yt/utilities/command_line.py	Tue Oct 18 12:25:45 2011 -0400
@@ -1570,7 +1570,7 @@
             save_name = "%s"%pf+"_"+field+"_rendering.png"
         if not '.png' in save_name:
             save_name += '.png'
-        if cam._mpi_get_rank() != -1:
+        if cam._par_rank != -1:
             write_bitmap(image,save_name)
         
 


--- a/yt/utilities/parallel_tools/distributed_object_collection.py	Tue Oct 18 12:00:05 2011 -0400
+++ b/yt/utilities/parallel_tools/distributed_object_collection.py	Tue Oct 18 12:25:45 2011 -0400
@@ -53,8 +53,8 @@
         owners = self._object_owners[desired_indices]
         mylog.debug("Owner list: %s", na.unique1d(owners))
         # Even if we have a million bricks, this should not take long.
-        s = self._mpi_get_size()
-        m = self._mpi_get_rank()
+        s = self._par_size
+        m = self._par_rank
         requests = dict( ( (i, []) for i in xrange(s) ) )
         for i, p in izip(desired_indices, owners):
             requests[p].append(i)


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:00:05 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:25:45 2011 -0400
@@ -891,11 +891,17 @@
     # End non-blocking stuff.
     ###
 
-    def _mpi_get_size(self):
+    ###
+    # Parallel rank and size properties.
+    ###
+
+    @property
+    def _par_size(self):
         if not self._distributed: return 1
         return MPI.COMM_WORLD.size
 
-    def _mpi_get_rank(self):
+    @property
+    def _par_rank(self):
         if not self._distributed: return 0
         return MPI.COMM_WORLD.rank
 


--- a/yt/visualization/streamlines.py	Tue Oct 18 12:00:05 2011 -0400
+++ b/yt/visualization/streamlines.py	Tue Oct 18 12:25:45 2011 -0400
@@ -124,8 +124,8 @@
             self.magnitudes = na.zeros((self.N,self.steps), dtype='float64')
         
     def integrate_through_volume(self):
-        nprocs = self._mpi_get_size()
-        my_rank = self._mpi_get_rank()
+        nprocs = self._par_size
+        my_rank = self._par_rank
         self.streamlines[my_rank::nprocs,0,:] = self.start_positions[my_rank::nprocs]
 
         pbar = get_pbar("Streamlining", self.N)


--- a/yt/visualization/volume_rendering/camera.py	Tue Oct 18 12:00:05 2011 -0400
+++ b/yt/visualization/volume_rendering/camera.py	Tue Oct 18 12:25:45 2011 -0400
@@ -356,7 +356,7 @@
             pbar.update(total_cells)
         pbar.finish()
 
-        if self._mpi_get_rank() is 0 and fn is not None:
+        if self._par_rank is 0 and fn is not None:
             if clip_ratio is not None:
                 write_bitmap(image, fn, clip_ratio*image.std())
             else:
@@ -623,7 +623,7 @@
             pbar.update(total_cells)
         pbar.finish()
 
-        if self._mpi_get_rank() is 0 and fn is not None:
+        if self._par_rank is 0 and fn is not None:
             # This assumes Density; this is a relatively safe assumption.
             import matplotlib.figure
             import matplotlib.backends.backend_agg


--- a/yt/visualization/volume_rendering/grid_partitioner.py	Tue Oct 18 12:00:05 2011 -0400
+++ b/yt/visualization/volume_rendering/grid_partitioner.py	Tue Oct 18 12:25:45 2011 -0400
@@ -275,7 +275,7 @@
         self.brick_right_edges = na.zeros( (NB, 3), dtype='float64')
         self.brick_parents = na.zeros( NB, dtype='int64')
         self.brick_dimensions = na.zeros( (NB, 3), dtype='int64')
-        self.brick_owners = na.ones(NB, dtype='int32') * self._mpi_get_rank()
+        self.brick_owners = na.ones(NB, dtype='int32') * self._par_rank
         self._object_owners = self.brick_owners
         for i,b in enumerate(bricks):
             self.brick_left_edges[i,:] = b.LeftEdge
@@ -307,7 +307,7 @@
         bricks = self.bricks
         self.bricks = na.array([None] * self.brick_owners.size, dtype='object')
         # Copy our bricks back in
-        self.bricks[self.brick_owners == self._mpi_get_rank()] = bricks[:]
+        self.bricks[self.brick_owners == self._par_rank] = bricks[:]
 
     def _create_buffer(self, ind_list):
         # Note that we have vertex-centered data, so we add one before taking


http://bitbucket.org/yt_analysis/yt/changeset/173dd8246984/
changeset:   173dd8246984
branch:      yt
user:        brittonsmith
date:        2011-10-18 18:25:56
summary:     Merged.
affected #:  7 files (-1 bytes)

--- a/tests/runall.py	Tue Oct 18 12:25:45 2011 -0400
+++ b/tests/runall.py	Tue Oct 18 12:25:56 2011 -0400
@@ -74,8 +74,6 @@
     parser.add_option("-n", "--name", dest="this_name",
                       default=my_hash,
                       help = "The name we'll call this set of tests")
-    parser.add_option("", "--parallel", dest="parallel",
-                      default=False, help = "Run in parallel?")
     opts, args = parser.parse_args()
     if opts.list_tests:
         tests_to_run = []


--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py	Tue Oct 18 12:25:45 2011 -0400
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py	Tue Oct 18 12:25:56 2011 -0400
@@ -599,7 +599,7 @@
             (matched, parent_IDs_tosend.size, child_IDs_tosend.size))
 
         # Now we sum up the contributions globally.
-        self.child_mass_arr = self._mpi_Allsum_double(self.child_mass_arr)
+        self.child_mass_arr = self._mpi_allsum(self.child_mass_arr)
         
         # Turn these Msol masses into percentages of the parent.
         line = "SELECT HaloMass FROM Halos WHERE SnapCurrentTimeIdentifier=%d \


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 12:25:45 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 12:25:56 2011 -0400
@@ -452,7 +452,7 @@
                     (field, fset.too_high[i], fset.too_low[i]))
             for length in self.lengths:
                 fset.length_bin_hits[length] = \
-                    self._mpi_Allsum_long(fset.length_bin_hits[length])
+                    self._mpi_allsum(fset.length_bin_hits[length])
                 # Find out how many were successfully binned.
                 fset.binned[length] = fset.length_bin_hits[length].sum()
                 # Normalize the counts.


--- a/yt/data_objects/data_containers.py	Tue Oct 18 12:25:45 2011 -0400
+++ b/yt/data_objects/data_containers.py	Tue Oct 18 12:25:56 2011 -0400
@@ -2221,7 +2221,7 @@
             self._get_data_from_grid(grid, fields_to_get, dls)
         mylog.info("IO completed; summing")
         for field in fields_to_get:
-            self[field] = self._mpi_Allsum_double(self[field])
+            self[field] = self._mpi_allsum(self[field])
             conv = self.pf.units[self.pf.field_info[field].projection_conversion]
             self[field] *= conv
 


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:25:45 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:25:56 2011 -0400
@@ -319,35 +319,6 @@
         reg = self.hierarchy.region_strict(self.center, LE, RE)
         return True, reg
 
-    def _partition_hierarchy_2d_inclined(self, unit_vectors, origin, widths,
-                                         box_vectors, resolution = (1.0, 1.0)):
-        if not self._distributed:
-            ib = self.hierarchy.inclined_box(origin, box_vectors)
-            return False, ib, resolution
-        # We presuppose that unit_vectors is already unitary.  If it's not,
-        # caveat emptor.
-        uv = na.array(unit_vectors)
-        inv_mat = na.linalg.pinv(uv)
-        cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 2)
-        mi = MPI.COMM_WORLD.rank
-        cx, cy = na.unravel_index(mi, cc)
-        resolution = (1.0/cc[0], 1.0/cc[1])
-        # We are rotating with respect to the *origin*, not the back center,
-        # so we go from 0 .. width.
-        px = na.mgrid[0.0:1.0:(cc[0]+1)*1j][cx] * widths[0]
-        py = na.mgrid[0.0:1.0:(cc[1]+1)*1j][cy] * widths[1]
-        nxo = inv_mat[0,0]*px + inv_mat[0,1]*py + origin[0]
-        nyo = inv_mat[1,0]*px + inv_mat[1,1]*py + origin[1]
-        nzo = inv_mat[2,0]*px + inv_mat[2,1]*py + origin[2]
-        nbox_vectors = na.array(
-                       [unit_vectors[0] * widths[0]/cc[0],
-                        unit_vectors[1] * widths[1]/cc[1],
-                        unit_vectors[2] * widths[2]],
-                        dtype='float64')
-        norigin = na.array([nxo, nyo, nzo])
-        box = self.hierarchy.inclined_box(norigin, nbox_vectors)
-        return True, box, resolution
-        
     def _partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
         LE, RE = na.array(ds.left_edge), na.array(ds.right_edge)
         # We need to establish if we're looking at a subvolume, in which case
@@ -463,102 +434,6 @@
                     break
                 nextdim = (nextdim + 1) % 3
         return cuts
-        
-
-    def _partition_hierarchy_3d_bisection(self, axis, bins, counts, top_bounds = None,\
-        old_group = None, old_comm = None, cut=None, old_cc=None):
-        """
-        Partition the volume into evenly weighted subvolumes using the distribution
-        in counts. The bisection happens in the MPI communicator group old_group.
-        You may need to set "MPI_COMM_MAX" and "MPI_GROUP_MAX" environment 
-        variables.
-        """
-        counts = counts.astype('int64')
-        if not self._distributed:
-            LE, RE = self.pf.domain_left_edge.copy(), self.pf.domain_right_edge.copy()
-            return False, LE, RE, self.hierarchy.grid_collection(self.center, self.hierarchy.grids)
-        
-        # First time through the world is the current group.
-        if old_group == None or old_comm == None:
-            old_group = MPI.COMM_WORLD.Get_group()
-            old_comm = MPI.COMM_WORLD
-        
-        # Figure out the gridding based on the deepness of cuts.
-        if old_cc is None:
-            cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 3)
-        else:
-            cc = old_cc
-        cc[cut[0]] /= cut[1]
-        # Set the boundaries of the full bounding box for this group.
-        if top_bounds == None:
-            LE, RE = self.pf.domain_left_edge.copy(), self.pf.domain_right_edge.copy()
-        else:
-            LE, RE = top_bounds
-
-        ra = old_group.Get_rank() # In this group, not WORLD, unless it's the first time.
-        
-        # First find the total number of particles in my group.
-        parts = old_comm.allreduce(int(counts.sum()), op=MPI.SUM)
-        # Now the full sum in the bins along this axis in this group.
-        full_counts = na.empty(counts.size, dtype='int64')
-        old_comm.Allreduce([counts, MPI.LONG], [full_counts, MPI.LONG], op=MPI.SUM)
-        # Find the bin that passes the cut points.
-        midpoints = [LE[axis]]
-        sum = 0
-        bin = 0
-        for step in xrange(1,cut[1]):
-            while sum < ((parts*step)/cut[1]):
-                lastsum = sum
-                sum += full_counts[bin]
-                bin += 1
-            # Bin edges
-            left_edge = bins[bin-1]
-            right_edge = bins[bin]
-            # Find a better approx of the midpoint cut line using a linear approx.
-            a = float(sum - lastsum) / (right_edge - left_edge)
-            midpoints.append(left_edge + (0.5 - (float(lastsum) / parts / 2)) / a)
-            #midpoint = (left_edge + right_edge) / 2.
-        midpoints.append(RE[axis])
-        # Now we need to split the members of this group into chunks. 
-        # The values that go into the _ranks are the ranks of the tasks
-        # in *this* communicator group, which go zero to size - 1. They are not
-        # the same as the global ranks!
-        groups = {}
-        ranks = {}
-        old_group_size = old_group.Get_size()
-        for step in xrange(cut[1]):
-            groups[step] = na.arange(step*old_group_size/cut[1], (step+1)*old_group_size/cut[1])
-            # [ (start, stop, step), ]
-            ranks[step] = [ (groups[step][0], groups[step][-1], 1), ] 
-        
-        # Based on where we are, adjust our LE or RE, depending on axis. At the
-        # same time assign the new MPI group membership.
-        for step in xrange(cut[1]):
-            if ra in groups[step]:
-                LE[axis] = midpoints[step]
-                RE[axis] = midpoints[step+1]
-                new_group = old_group.Range_incl(ranks[step])
-                new_comm = old_comm.Create(new_group)
-        
-        if old_cc is not None:
-            old_group.Free()
-            old_comm.Free()
-        
-        new_top_bounds = (LE,RE)
-        
-        # Using the new boundaries, regrid.
-        mi = new_comm.rank
-        cx, cy, cz = na.unravel_index(mi, cc)
-        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
-
-        my_LE = na.array([x[0], y[0], z[0]], dtype='float64')
-        my_RE = na.array([x[1], y[1], z[1]], dtype='float64')
-        
-        # Return a new subvolume and associated stuff.
-        return new_group, new_comm, my_LE, my_RE, new_top_bounds, cc,\
-            self.hierarchy.region_strict(self.center, my_LE, my_RE)
 
     def _barrier(self):
         if not self._distributed: return
@@ -752,19 +627,6 @@
         return data
 
     @parallel_passthrough
-    def _mpi_cat_na_array(self,data):
-        self._barrier()
-        comm = MPI.COMM_WORLD
-        if comm.rank == 0:
-            for i in range(1,comm.size):
-                buf = comm.recv(source=i, tag=0)
-                data = na.concatenate([data,buf])
-        else:
-            comm.send(data, 0, tag = 0)
-        data = comm.bcast(data, root=0)
-        return data
-
-    @parallel_passthrough
     def _mpi_catarray(self, data):
         if data is None:
             ncols = -1


--- a/yt/visualization/volume_rendering/api.py	Tue Oct 18 12:25:45 2011 -0400
+++ b/yt/visualization/volume_rendering/api.py	Tue Oct 18 12:25:56 2011 -0400
@@ -40,6 +40,5 @@
                              import_partitioned_grids
 from image_handling import export_rgba, import_rgba, \
                            plot_channel, plot_rgb
-from software_sampler import VolumeRendering
 from camera import Camera, PerspectiveCamera, StereoPairCamera, \
     off_axis_projection


--- a/yt/visualization/volume_rendering/software_sampler.py	Tue Oct 18 12:25:45 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,188 +0,0 @@
-"""
-Import the components of the volume rendering extension
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-import h5py
-import numpy as na
-
-from yt.funcs import *
-
-from yt.data_objects.data_containers import data_object_registry
-from yt.utilities.amr_utils import TransferFunctionProxy, VectorPlane
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface
-from yt.visualization.volume_rendering.grid_partitioner import \
-    HomogenizedBrickCollection
-
-# We're going to register this class, but it does not directly inherit from
-# AMRData.
-class VolumeRendering(ParallelAnalysisInterface):
-    bricks = None
-    def __init__(self, normal_vector, width, center,
-                 resolution, transfer_function,
-                 fields = None, whole_box = False,
-                 sub_samples = 5, north_vector = None,
-                 pf = None):
-        # Now we replicate some of the 'cutting plane' logic
-        if not iterable(resolution):
-            resolution = (resolution, resolution)
-        self.resolution = resolution
-        self.sub_samples = sub_samples
-        if not iterable(width):
-            width = (width, width, width) # front/back, left/right, top/bottom
-        self.width = width
-        self.center = center
-        if fields is None: fields = ["Density"]
-        self.fields = fields
-        self.transfer_function = transfer_function
-
-        # Now we set up our  various vectors
-        normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
-        if north_vector is None:
-            vecs = na.identity(3)
-            t = na.cross(normal_vector, vecs).sum(axis=1)
-            ax = t.argmax()
-            north_vector = na.cross(vecs[ax,:], normal_vector).ravel()
-        north_vector /= na.sqrt(na.dot(north_vector, north_vector))
-        east_vector = -na.cross(north_vector, normal_vector).ravel()
-        east_vector /= na.sqrt(na.dot(east_vector, east_vector))
-        self.unit_vectors = [north_vector, east_vector, normal_vector]
-        self.box_vectors = na.array([self.unit_vectors[0]*self.width[0],
-                                     self.unit_vectors[1]*self.width[1],
-                                     self.unit_vectors[2]*self.width[2]])
-
-        self.origin = center - 0.5*width[0]*self.unit_vectors[0] \
-                             - 0.5*width[1]*self.unit_vectors[1] \
-                             - 0.5*width[2]*self.unit_vectors[2]
-        self.back_center = center - 0.5*width[0]*self.unit_vectors[2]
-        self.front_center = center + 0.5*width[0]*self.unit_vectors[2]
-
-        self._initialize_source()
-        self._construct_vector_array()
-
-    def _initialize_source(self):
-        check, source, rf = self._partition_hierarchy_2d_inclined(
-                self.unit_vectors, self.origin, self.width, self.box_vectors)
-        if check:
-            self._base_source = self.pf.h.inclined_box(
-                self.origin, self.box_vectors)
-        else:
-            # To avoid doubling-up
-            self._base_source = source
-        self.source = source
-        self.res_fac = rf
-        # Note that if we want to do this in parallel, with 3D domain decomp
-        # for the grid/bricks, we can supply self._base_source here.  But,
-        # _distributed can't be overridden in that case.
-        self._brick_collection = HomogenizedBrickCollection(self.source)
-
-    def ray_cast(self, finalize=True):
-        if self.bricks is None: self.partition_grids()
-        # Now we order our bricks
-        total_cells, LE, RE = 0, [], []
-        for b in self.bricks:
-            LE.append(b.LeftEdge)
-            RE.append(b.RightEdge)
-            total_cells += na.prod(b.my_data[0].shape)
-        LE = na.array(LE) - self.back_center
-        RE = na.array(RE) - self.back_center
-        LE = na.sum(LE * self.unit_vectors[2], axis=1)
-        RE = na.sum(RE * self.unit_vectors[2], axis=1)
-        dist = na.minimum(LE, RE)
-        ind = na.argsort(dist)
-        pbar = get_pbar("Ray casting ", total_cells)
-        total_cells = 0
-        tfp = TransferFunctionProxy(self.transfer_function)
-        tfp.ns = self.sub_samples
-        for i, b in enumerate(self.bricks[ind]):
-            pos = b.cast_plane(tfp, self.vector_plane)
-            total_cells += na.prod(b.my_data[0].shape)
-            pbar.update(total_cells)
-        pbar.finish()
-        if finalize: self._finalize()
-
-    def _finalize(self):
-        #im = self._mpi_catdict(dict(image=self.image)).pop('image')
-        im, f = self._mpi_catrgb((self.image, self.resolution))
-        self.image = im
-
-    def dump_image(self, prefix):
-        fn = "%s.h5" % (self._get_filename(prefix))
-        mylog.info("Saving to %s", fn)
-        f = h5py.File(fn, "w")
-        f.create_dataset("/image", data=self.image)
-
-    def load_bricks(self, fn):
-        self.bricks = import_partitioned_grids(fn)
-
-    def save_bricks(self, fn):
-        # This will need to be modified for parallel
-        export_partitioned_grids(self.bricks, fn)
-
-    def save_image(self, prefix = None, norm = 1.0):
-        if norm is not None:
-            mi, ma = self.image.min(), norm*self.image.max()
-            print "Normalizing with ", mi, ma
-            image = (na.clip(self.image, mi, ma) - mi)/(ma - mi)
-        else:
-            image = self.image
-        if prefix is None: prefix = "%s_volume_rendering" % (self.pf)
-        plot_rgb(image, prefix)
-
-    def partition_grids(self):
-        log_field = []
-        for field in self.fields:
-            log_field.append(field in self.pf.field_info and 
-                             self.pf.field_info[field].take_log)
-        self._brick_collection._partition_local_grids(self.fields, log_field)
-        # UNCOMMENT FOR PARALLELISM
-        #self._brick_collection._collect_bricks(self.source)
-        self.bricks = self._brick_collection.bricks
-
-    def _construct_vector_array(self):
-        rx = self.resolution[0] * self.res_fac[0]
-        ry = self.resolution[1] * self.res_fac[1]
-        # We should move away from pre-generation of vectors like this and into
-        # the usage of on-the-fly generation in the VolumeIntegrator module
-        self.image = na.zeros((rx,ry,3), dtype='float64', order='C')
-        # We might have a different width and back_center
-        bl = self.source.box_lengths
-        px = na.linspace(-bl[0]/2.0, bl[0]/2.0, rx)[:,None]
-        py = na.linspace(-bl[1]/2.0, bl[1]/2.0, ry)[None,:]
-        inv_mat = self.source._inv_mat
-        bc = self.source.origin + 0.5*self.source.box_vectors[0] \
-                                + 0.5*self.source.box_vectors[1]
-        vectors = na.zeros((rx, ry, 3),
-                            dtype='float64', order='C')
-        vectors[:,:,0] = inv_mat[0,0]*px + inv_mat[0,1]*py + bc[0]
-        vectors[:,:,1] = inv_mat[1,0]*px + inv_mat[1,1]*py + bc[1]
-        vectors[:,:,2] = inv_mat[2,0]*px + inv_mat[2,1]*py + bc[2]
-        bounds = (px.min(), px.max(), py.min(), py.max())
-        self.vector_plane = VectorPlane(vectors, self.box_vectors[2],
-                                    bc, bounds, self.image,
-                                    self.source._x_vec, self.source._y_vec)
-        self.vp_bounds = bounds
-        self.vectors = vectors
-
-data_object_registry["volume_rendering"] = VolumeRendering


http://bitbucket.org/yt_analysis/yt/changeset/d650557353c8/
changeset:   d650557353c8
branch:      yt
user:        MatthewTurk
date:        2011-10-18 18:27:44
summary:     Merging
affected #:  11 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Tue Oct 18 12:27:31 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Tue Oct 18 12:27:44 2011 -0400
@@ -1855,20 +1855,20 @@
         # analyzing a subvolume.
         ds_names = ["particle_position_x","particle_position_y","particle_position_z"]
         if ytcfg.getboolean("yt","inline") == False and \
-            resize and self._mpi_get_size() != 1 and subvolume is None:
-            random.seed(self._mpi_get_rank())
+            resize and self._par_size != 1 and subvolume is None:
+            random.seed(self._par_rank)
             cut_list = self._partition_hierarchy_3d_bisection_list()
             root_points = self._subsample_points()
             self.bucket_bounds = []
-            if self._mpi_get_rank() == 0:
+            if self._par_rank == 0:
                 self._recursive_divide(root_points, topbounds, 0, cut_list)
             self.bucket_bounds = self._mpi_bcast_pickled(self.bucket_bounds)
-            my_bounds = self.bucket_bounds[self._mpi_get_rank()]
+            my_bounds = self.bucket_bounds[self._par_rank]
             LE, RE = my_bounds[0], my_bounds[1]
             self._data_source = self.hierarchy.region_strict([0.]*3, LE, RE)
         # If this isn't parallel, define the region as an AMRRegionStrict so
         # particle IO works.
-        if self._mpi_get_size() == 1:
+        if self._par_size == 1:
             self._data_source = self.hierarchy.periodic_region_strict([0.5]*3, LE, RE)
         # get the average spacing between particles for this region
         # The except is for the serial case, where the full box is what we want.
@@ -1964,8 +1964,8 @@
         random_points = int(self.sample * n_parts)
         # We want to get a representative selection of random particles in
         # each subvolume.
-        adjust = float(local_parts) / ( float(n_parts) / self._mpi_get_size())
-        n_random = int(adjust * float(random_points) / self._mpi_get_size())
+        adjust = float(local_parts) / ( float(n_parts) / self._par_size)
+        n_random = int(adjust * float(random_points) / self._par_size)
         mylog.info("Reading in %d random particles." % n_random)
         # Get unique random particles.
         my_points = na.empty((n_random, 3), dtype='float64')


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 12:27:31 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 12:27:44 2011 -0400
@@ -1155,7 +1155,7 @@
         Set_list = []
         # We only want the holes that are modulo mine.
         keys = na.arange(groupID, dtype='int64')
-        size = self._mpi_get_size()
+        size = self._par_size
         select = (keys % size == self.mine)
         groupIDs = keys[select]
         mine_groupIDs = set([]) # Records only ones modulo mine.


--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py	Tue Oct 18 12:27:31 2011 -0400
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py	Tue Oct 18 12:27:44 2011 -0400
@@ -168,10 +168,10 @@
         if self.sleep <= 0.:
             self.sleep = 5
         # MPI stuff
-        self.mine = self._mpi_get_rank()
+        self.mine = self._par_rank
         if self.mine is None:
             self.mine = 0
-        self.size = self._mpi_get_size()
+        self.size = self._par_size
         if self.size is None:
             self.size = 1
         # Get to work.


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 12:27:31 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 12:27:44 2011 -0400
@@ -107,8 +107,8 @@
         self.constant_theta = theta
         self.constant_phi = phi
         # MPI stuff.
-        self.size = self._mpi_get_size()
-        self.mine = self._mpi_get_rank()
+        self.size = self._par_size
+        self.mine = self._par_rank
         self.vol_ratio = vol_ratio
         if self.vol_ratio == -1:
             self.vol_ratio = self.size


--- a/yt/frontends/enzo/data_structures.py	Tue Oct 18 12:27:31 2011 -0400
+++ b/yt/frontends/enzo/data_structures.py	Tue Oct 18 12:27:44 2011 -0400
@@ -384,7 +384,7 @@
     def _detect_fields(self):
         self.field_list = []
         # Do this only on the root processor to save disk work.
-        if self._mpi_get_rank() == 0 or self._mpi_get_rank() == None:
+        if self._par_rank == 0 or self._par_rank == None:
             field_list = self.get_data("/", "DataFields")
             if field_list is None:
                 mylog.info("Gathering a field list (this may take a moment.)")
@@ -589,7 +589,7 @@
             self.derived_field_list = self.__class__._cached_derived_field_list
 
     def _generate_random_grids(self):
-        my_rank = self._mpi_get_rank()
+        my_rank = self._par_rank
         my_grids = self.grids[self.grid_procs.ravel() == my_rank]
         if len(my_grids) > 40:
             starter = na.random.randint(0, 20)


--- a/yt/utilities/command_line.py	Tue Oct 18 12:27:31 2011 -0400
+++ b/yt/utilities/command_line.py	Tue Oct 18 12:27:44 2011 -0400
@@ -1570,7 +1570,7 @@
             save_name = "%s"%pf+"_"+field+"_rendering.png"
         if not '.png' in save_name:
             save_name += '.png'
-        if cam._mpi_get_rank() != -1:
+        if cam._par_rank != -1:
             write_bitmap(image,save_name)
         
 


--- a/yt/utilities/parallel_tools/distributed_object_collection.py	Tue Oct 18 12:27:31 2011 -0400
+++ b/yt/utilities/parallel_tools/distributed_object_collection.py	Tue Oct 18 12:27:44 2011 -0400
@@ -53,8 +53,8 @@
         owners = self._object_owners[desired_indices]
         mylog.debug("Owner list: %s", na.unique1d(owners))
         # Even if we have a million bricks, this should not take long.
-        s = self._mpi_get_size()
-        m = self._mpi_get_rank()
+        s = self._par_size
+        m = self._par_rank
         requests = dict( ( (i, []) for i in xrange(s) ) )
         for i, p in izip(desired_indices, owners):
             requests[p].append(i)


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:27:31 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:27:44 2011 -0400
@@ -729,11 +729,17 @@
     # End non-blocking stuff.
     ###
 
-    def _mpi_get_size(self):
+    ###
+    # Parallel rank and size properties.
+    ###
+
+    @property
+    def _par_size(self):
         if not self._distributed: return 1
         return MPI.COMM_WORLD.size
 
-    def _mpi_get_rank(self):
+    @property
+    def _par_rank(self):
         if not self._distributed: return 0
         return MPI.COMM_WORLD.rank
 


--- a/yt/visualization/streamlines.py	Tue Oct 18 12:27:31 2011 -0400
+++ b/yt/visualization/streamlines.py	Tue Oct 18 12:27:44 2011 -0400
@@ -124,8 +124,8 @@
             self.magnitudes = na.zeros((self.N,self.steps), dtype='float64')
         
     def integrate_through_volume(self):
-        nprocs = self._mpi_get_size()
-        my_rank = self._mpi_get_rank()
+        nprocs = self._par_size
+        my_rank = self._par_rank
         self.streamlines[my_rank::nprocs,0,:] = self.start_positions[my_rank::nprocs]
 
         pbar = get_pbar("Streamlining", self.N)


--- a/yt/visualization/volume_rendering/camera.py	Tue Oct 18 12:27:31 2011 -0400
+++ b/yt/visualization/volume_rendering/camera.py	Tue Oct 18 12:27:44 2011 -0400
@@ -356,7 +356,7 @@
             pbar.update(total_cells)
         pbar.finish()
 
-        if self._mpi_get_rank() is 0 and fn is not None:
+        if self._par_rank is 0 and fn is not None:
             if clip_ratio is not None:
                 write_bitmap(image, fn, clip_ratio*image.std())
             else:
@@ -623,7 +623,7 @@
             pbar.update(total_cells)
         pbar.finish()
 
-        if self._mpi_get_rank() is 0 and fn is not None:
+        if self._par_rank is 0 and fn is not None:
             # This assumes Density; this is a relatively safe assumption.
             import matplotlib.figure
             import matplotlib.backends.backend_agg


--- a/yt/visualization/volume_rendering/grid_partitioner.py	Tue Oct 18 12:27:31 2011 -0400
+++ b/yt/visualization/volume_rendering/grid_partitioner.py	Tue Oct 18 12:27:44 2011 -0400
@@ -275,7 +275,7 @@
         self.brick_right_edges = na.zeros( (NB, 3), dtype='float64')
         self.brick_parents = na.zeros( NB, dtype='int64')
         self.brick_dimensions = na.zeros( (NB, 3), dtype='int64')
-        self.brick_owners = na.ones(NB, dtype='int32') * self._mpi_get_rank()
+        self.brick_owners = na.ones(NB, dtype='int32') * self._par_rank
         self._object_owners = self.brick_owners
         for i,b in enumerate(bricks):
             self.brick_left_edges[i,:] = b.LeftEdge
@@ -307,7 +307,7 @@
         bricks = self.bricks
         self.bricks = na.array([None] * self.brick_owners.size, dtype='object')
         # Copy our bricks back in
-        self.bricks[self.brick_owners == self._mpi_get_rank()] = bricks[:]
+        self.bricks[self.brick_owners == self._par_rank] = bricks[:]
 
     def _create_buffer(self, ind_list):
         # Note that we have vertex-centered data, so we add one before taking


http://bitbucket.org/yt_analysis/yt/changeset/1fd3966203ea/
changeset:   1fd3966203ea
branch:      yt
user:        MatthewTurk
date:        2011-10-18 18:59:25
summary:     Removing mpi_maxdict
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:27:44 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:59:25 2011 -0400
@@ -500,29 +500,6 @@
         return data
 
     @parallel_passthrough
-    def _mpi_maxdict(self, data):
-        """
-        For each key in data, find the maximum value across all tasks, and
-        then broadcast it back.
-        """
-        self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                temp_data = MPI.COMM_WORLD.recv(source=i, tag=0)
-                for key in temp_data:
-                    try:
-                        old_value = data[key]
-                    except KeyError:
-                        # This guarantees the new value gets added.
-                        old_value = None
-                    if old_value < temp_data[key]:
-                        data[key] = temp_data[key]
-        else:
-            MPI.COMM_WORLD.send(data, dest=0, tag=0)
-        data = MPI.COMM_WORLD.bcast(data, root=0)
-        self._barrier()
-        return data
-
     def _mpi_maxdict_dict(self, data):
         """
         Similar to above, but finds maximums for dicts of dicts. This is
@@ -634,7 +611,6 @@
 
     @parallel_passthrough
     def _mpi_bcast_pickled(self, data):
-        #self._barrier()
         data = MPI.COMM_WORLD.bcast(data, root=0)
         return data
 


http://bitbucket.org/yt_analysis/yt/changeset/fa165f1d44c1/
changeset:   fa165f1d44c1
branch:      yt
user:        samskillman
date:        2011-10-18 18:51:06
summary:     Intermediate step to remove various mpi reduction operations.  Functionality is currently preserved, and we will now remove all _mpi_all[operation].
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:27:44 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:51:06 2011 -0400
@@ -204,9 +204,9 @@
     output; otherwise, the function gets called.  Used as a decorator.
     """
     @wraps(func)
-    def passage(self, data):
+    def passage(self, data, **kwargs):
         if not self._distributed: return data
-        return func(self, data)
+        return func(self, data, **kwargs)
     return passage
 
 def parallel_blocking_call(func):
@@ -447,25 +447,25 @@
             raise RuntimeError("Fatal error. Exiting.")
         return None
 
-    @parallel_passthrough
     def _mpi_minimum_array_long(self, data):
-        """
-        Specifically for parallelHOP. For the identical array on each task,
-        it merges the arrays together, taking the lower value at each index.
-        """
-        self._barrier()
-        size = data.size # They're all the same size, of course
-        if MPI.COMM_WORLD.rank == 0:
-            new_data = na.empty(size, dtype='int64')
-            for i in range(1, MPI.COMM_WORLD.size):
-                MPI.COMM_WORLD.Recv([new_data, MPI.LONG], i, 0)
-                data = na.minimum(data, new_data)
-            del new_data
-        else:
-            MPI.COMM_WORLD.Send([data, MPI.LONG], 0, 0)
-        # Redistribute from root
-        MPI.COMM_WORLD.Bcast([data, MPI.LONG], root=0)
-        return data
+        return self._mpi_reduce(data, op=MPI.MIN)
+#         """
+#         Specifically for parallelHOP. For the identical array on each task,
+#         it merges the arrays together, taking the lower value at each index.
+#         """
+#         self._barrier()
+#         size = data.size # They're all the same size, of course
+#         if MPI.COMM_WORLD.rank == 0:
+#             new_data = na.empty(size, dtype='int64')
+#             for i in range(1, MPI.COMM_WORLD.size):
+#                 MPI.COMM_WORLD.Recv([new_data, MPI.LONG], i, 0)
+#                 data = na.minimum(data, new_data)
+#             del new_data
+#         else:
+#             MPI.COMM_WORLD.Send([data, MPI.LONG], 0, 0)
+#         # Redistribute from root
+#         MPI.COMM_WORLD.Bcast([data, MPI.LONG], root=0)
+#         return data
 
     @parallel_passthrough
     def _mpi_catdict(self, data):
@@ -649,25 +649,25 @@
         if not self._distributed: return
         io_handler.preload(grids, fields)
 
-    @parallel_passthrough
     def _mpi_double_array_max(self,data):
-        """
-        Finds the na.maximum of a distributed array and returns the result
-        back to all. The array should be the same length on all tasks!
-        """
-        self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            recv_data = na.empty(data.size, dtype='float64')
-            for i in xrange(1, MPI.COMM_WORLD.size):
-                MPI.COMM_WORLD.Recv([recv_data, MPI.DOUBLE], source=i, tag=0)
-                data = na.maximum(data, recv_data)
-        else:
-            MPI.COMM_WORLD.Send([data, MPI.DOUBLE], dest=0, tag=0)
-        MPI.COMM_WORLD.Bcast([data, MPI.DOUBLE], root=0)
-        return data
+        return self._mpi_reduce(data, op=MPI.MAX)
+#         """
+#         Finds the na.maximum of a distributed array and returns the result
+#         back to all. The array should be the same length on all tasks!
+#         """
+#         self._barrier()
+#         if MPI.COMM_WORLD.rank == 0:
+#             recv_data = na.empty(data.size, dtype='float64')
+#             for i in xrange(1, MPI.COMM_WORLD.size):
+#                 MPI.COMM_WORLD.Recv([recv_data, MPI.DOUBLE], source=i, tag=0)
+#                 data = na.maximum(data, recv_data)
+#         else:
+#             MPI.COMM_WORLD.Send([data, MPI.DOUBLE], dest=0, tag=0)
+#         MPI.COMM_WORLD.Bcast([data, MPI.DOUBLE], root=0)
+#         return data
 
     @parallel_passthrough
-    def _mpi_allsum(self, data, dtype=None):
+    def _mpi_reduce(self, data, dtype=None, op=MPI.SUM):
         if isinstance(data, na.ndarray) and data.dtype != na.bool:
             if dtype is None:
                 dtype = data.dtype
@@ -675,22 +675,21 @@
                 data = data.astype(dtype)
             temp = data.copy()
             MPI.COMM_WORLD.Allreduce([temp,get_mpi_type(dtype)], 
-                                     [data,get_mpi_type(dtype)], op=MPI.SUM)
+                                     [data,get_mpi_type(dtype)], op)
             return data
         else:
             # We use old-school pickling here on the assumption the arrays are
             # relatively small ( < 1e7 elements )
-            return MPI.COMM_WORLD.allreduce(data, op=MPI.SUM)
+            return MPI.COMM_WORLD.allreduce(data, op)
 
-    @parallel_passthrough
     def _mpi_allmax(self, data):
-        self._barrier()
-        return MPI.COMM_WORLD.allreduce(data, op=MPI.MAX)
+        return self._mpi_reduce(data, op=MPI.MAX)
+    
+    def _mpi_allmin(self, data):
+        return self._mpi_reduce(data, op=MPI.MIN)
 
-    @parallel_passthrough
-    def _mpi_allmin(self, data):
-        self._barrier()
-        return MPI.COMM_WORLD.allreduce(data, op=MPI.MIN)
+    def _mpi_allsum(self, data):
+        return self._mpi_reduce(data, op=MPI.SUM)
 
     ###
     # Non-blocking stuff.


http://bitbucket.org/yt_analysis/yt/changeset/659da3ca467b/
changeset:   659da3ca467b
branch:      yt
user:        MatthewTurk
date:        2011-10-18 19:34:21
summary:     Coalescing parallel routines into par_combine_object
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:59:25 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 13:34:21 2011 -0400
@@ -469,35 +469,11 @@
 
     @parallel_passthrough
     def _mpi_catdict(self, data):
-        field_keys = data.keys()
-        field_keys.sort()
-        size = data[field_keys[0]].shape[-1]
-        sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
-        outsize = na.array(size, dtype='int64')
-        MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
-                                 [sizes, 1, MPI.LONG] )
-        # This nested concatenate is to get the shapes to work out correctly;
-        # if we just add [0] to sizes, it will broadcast a summation, not a
-        # concatenation.
-        offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
-        arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
-        for key in field_keys:
-            dd = data[key]
-            rv = _alltoallv_array(dd, arr_size, offsets, sizes)
-            data[key] = rv
-        return data
+        self._par_combine_object(data, op = "cat")
 
     @parallel_passthrough
     def _mpi_joindict(self, data):
-        #self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                data.update(MPI.COMM_WORLD.recv(source=i, tag=0))
-        else:
-            MPI.COMM_WORLD.send(data, dest=0, tag=0)
-        data = MPI.COMM_WORLD.bcast(data, root=0)
-        #self._barrier()
-        return data
+        self._par_combine_object(data, op = "join")
 
     @parallel_passthrough
     def _mpi_maxdict_dict(self, data):
@@ -568,46 +544,85 @@
         return (top_keys, bot_keys, vals)
 
     @parallel_passthrough
+    def _par_combine_object(self, data, op):
+        # op can be chosen from:
+        #   cat
+        #   join
+        # data is selected to be of types:
+        #   na.ndarray
+        #   dict
+        #   data field dict
+        if isinstance(data, types.DictType) and op == "join":
+            if MPI.COMM_WORLD.rank == 0:
+                for i in range(1,MPI.COMM_WORLD.size):
+                    data.update(MPI.COMM_WORLD.recv(source=i, tag=0))
+            else:
+                MPI.COMM_WORLD.send(data, dest=0, tag=0)
+            data = MPI.COMM_WORLD.bcast(data, root=0)
+            return data
+        elif isinstance(data, types.DictType) and op == "cat":
+            field_keys = data.keys()
+            field_keys.sort()
+            size = data[field_keys[0]].shape[-1]
+            sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
+            outsize = na.array(size, dtype='int64')
+            MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
+                                     [sizes, 1, MPI.LONG] )
+            # This nested concatenate is to get the shapes to work out correctly;
+            # if we just add [0] to sizes, it will broadcast a summation, not a
+            # concatenation.
+            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
+            for key in field_keys:
+                dd = data[key]
+                rv = _alltoallv_array(dd, arr_size, offsets, sizes)
+                data[key] = rv
+            return data
+        elif isinstance(data, na.ndarray) and op == "cat":
+            if data is None:
+                ncols = -1
+                size = 0
+            else:
+                if len(data) == 0:
+                    ncols = -1
+                    size = 0
+                elif len(data.shape) == 1:
+                    ncols = 1
+                    size = data.shape[0]
+                else:
+                    ncols, size = data.shape
+            ncols = MPI.COMM_WORLD.allreduce(ncols, op=MPI.MAX)
+            if size == 0:
+                data = na.zeros((ncols,0), dtype='float64') # This only works for
+            size = data.shape[-1]
+            sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
+            outsize = na.array(size, dtype='int64')
+            MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
+                                     [sizes, 1, MPI.LONG] )
+            # This nested concatenate is to get the shapes to work out correctly;
+            # if we just add [0] to sizes, it will broadcast a summation, not a
+            # concatenation.
+            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
+            data = _alltoallv_array(data, arr_size, offsets, sizes)
+            return data
+        elif isinstance(data, types.ListType) and op == "cat":
+            if MPI.COMM_WORLD.rank == 0:
+                data = self.__mpi_recvlist(data)
+            else:
+                MPI.COMM_WORLD.send(data, dest=0, tag=0)
+            mylog.debug("Opening MPI Broadcast on %s", MPI.COMM_WORLD.rank)
+            data = MPI.COMM_WORLD.bcast(data, root=0)
+            return data
+        raise NotImplementedError
+
+    @parallel_passthrough
     def _mpi_catlist(self, data):
-        self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            data = self.__mpi_recvlist(data)
-        else:
-            MPI.COMM_WORLD.send(data, dest=0, tag=0)
-        mylog.debug("Opening MPI Broadcast on %s", MPI.COMM_WORLD.rank)
-        data = MPI.COMM_WORLD.bcast(data, root=0)
-        self._barrier()
-        return data
+        self._par_combine_object(data, op = "cat")
 
     @parallel_passthrough
     def _mpi_catarray(self, data):
-        if data is None:
-            ncols = -1
-            size = 0
-        else:
-            if len(data) == 0:
-                ncols = -1
-                size = 0
-            elif len(data.shape) == 1:
-                ncols = 1
-                size = data.shape[0]
-            else:
-                ncols, size = data.shape
-        ncols = MPI.COMM_WORLD.allreduce(ncols, op=MPI.MAX)
-        if size == 0:
-            data = na.zeros((ncols,0), dtype='float64') # This only works for
-        size = data.shape[-1]
-        sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
-        outsize = na.array(size, dtype='int64')
-        MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
-                                 [sizes, 1, MPI.LONG] )
-        # This nested concatenate is to get the shapes to work out correctly;
-        # if we just add [0] to sizes, it will broadcast a summation, not a
-        # concatenation.
-        offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
-        arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
-        data = _alltoallv_array(data, arr_size, offsets, sizes)
-        return data
+        self._par_combine_object(data, op = "cat")
 
     @parallel_passthrough
     def _mpi_bcast_pickled(self, data):


http://bitbucket.org/yt_analysis/yt/changeset/0c7905cf5c76/
changeset:   0c7905cf5c76
branch:      yt
user:        samskillman
date:        2011-10-18 19:27:48
summary:     Removing all data and type specific reductions in preference for a catch-all _mpi_allreduce
affected #:  7 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Tue Oct 18 12:51:06 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Tue Oct 18 13:27:48 2011 -0400
@@ -426,7 +426,7 @@
         """
         if self.max_dens_point is not None:
             return self.max_dens_point[0]
-        max = self._mpi_allmax(self._max_dens[self.id][0])
+        max = self._mpi_allreduce(self._max_dens[self.id][0], op='max')
         return max
 
     def maximum_density_location(self):
@@ -450,7 +450,7 @@
         else:
             value = na.array([0,0,0])
         # This works, and isn't appropriate but for now will be fine...
-        value = self._mpi_allsum(value)
+        value = self._mpi_allreduce(value, op='sum')
         return value
 
     def center_of_mass(self):
@@ -479,8 +479,8 @@
         else:
             my_mass = 0.
             my_com = na.array([0.,0.,0.])
-        global_mass = self._mpi_allsum(my_mass)
-        global_com = self._mpi_allsum(my_com)
+        global_mass = self._mpi_allreduce(my_mass, op='sum')
+        global_com = self._mpi_allreduce(my_com, op='sum')
         return global_com / global_mass
 
     def total_mass(self):
@@ -499,7 +499,7 @@
             my_mass = self["ParticleMassMsun"].sum()
         else:
             my_mass = 0.
-        global_mass = self._mpi_allsum(float(my_mass))
+        global_mass = self._mpi_allreduce(float(my_mass), op='sum')
         return global_mass
 
     def bulk_velocity(self):
@@ -528,7 +528,7 @@
             vy = 0.
             vz = 0.
         bv = na.array([vx,vy,vz,pm])
-        global_bv = self._mpi_allsum(bv)
+        global_bv = self._mpi_allreduce(bv, op='sum')
         return global_bv[:3]/global_bv[3]
 
     def rms_velocity(self):
@@ -558,7 +558,7 @@
             ss = na.array([s, float(size)])
         else:
             ss = na.array([0.,0.])
-        global_ss = self._mpi_allsum(ss)
+        global_ss = self._mpi_allreduce(ss, op='sum')
         ms = global_ss[0] / global_ss[1]
         return na.sqrt(ms) * global_ss[1]
 
@@ -598,7 +598,7 @@
             
         else:
             my_max = 0.
-        return self._mpi_allmax(my_max)
+        return self._mpi_allreduce(my_max, op='max')
 
     def get_size(self):
         if self.size is not None:
@@ -607,7 +607,7 @@
             my_size = self.indices.size
         else:
             my_size = 0
-        global_size = self._mpi_allsum(my_size)
+        global_size = self._mpi_allreduce(my_size, op='sum')
         return global_size
 
     def __getitem__(self, key):
@@ -736,8 +736,8 @@
             dist_max = 0.0
         # In this parallel case, we're going to find the global dist extrema
         # and built identical bins on all tasks.
-        dist_min = self._mpi_allmin(dist_min)
-        dist_max = self._mpi_allmax(dist_max)
+        dist_min = self._mpi_allreduce(dist_min, op='min')
+        dist_max = self._mpi_allreduce(dist_max, op='max')
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
         self.radial_bins = na.logspace(math.log10(dist_min*.99 + TINY), 
@@ -752,7 +752,7 @@
             for i in xrange(self.bin_count):
                 self.mass_bins[i+1] += self.mass_bins[i]
         # Sum up the mass_bins globally
-        self.mass_bins = self._mpi_allsum(self.mass_bins)
+        self.mass_bins = self._mpi_allreduce(self.mass_bins, op='sum')
         # Calculate the over densities in the bins.
         self.overdensity = self.mass_bins * Msun2g / \
         (4./3. * math.pi * rho_crit * \
@@ -1479,7 +1479,7 @@
             del diff_subchain
         # Bring it together, and divide by the previously computed total mass
         # of each halo.
-        self.bulk_vel = self._mpi_allsum(self.bulk_vel)
+        self.bulk_vel = self._mpi_allreduce(self.bulk_vel, op='sum')
         for groupID in xrange(self.group_count):
             self.bulk_vel[groupID] = self.bulk_vel[groupID] / self.Tot_M[groupID]
         yt_counters("bulk vel. computing")
@@ -1501,7 +1501,7 @@
                 rms_vel_temp[u][1] = marks[i+1] - marks[i]
             del vel, marks, uniq_subchain
         # Bring it together.
-        rms_vel_temp = self._mpi_allsum(rms_vel_temp)
+        rms_vel_temp = self._mpi_allreduce(rms_vel_temp, op='sum')
         self.rms_vel = na.empty(self.group_count, dtype='float64')
         for groupID in xrange(self.group_count):
             # Here we do the Mean and the Root.
@@ -1934,10 +1934,8 @@
                 (str(self.padding), avg_spacing, full_vol, data.size, str(self._data_source)))
         # Now we get the full box mass after we have the final composition of
         # subvolumes.
-        if ytcfg.getboolean("yt","inline") == False:
-            total_mass = self._mpi_allsum((self._data_source["ParticleMassMsun"].astype('float64')).sum())
-        else:
-            total_mass = self._mpi_allsum((self._data_source["ParticleMassMsun"].astype('float64')).sum())
+        total_mass = self._mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(), 
+                                         op='sum')
         if not self._distributed:
             self.padding = (na.zeros(3,dtype='float64'), na.zeros(3,dtype='float64'))
         # If we're using a subvolume, we now re-divide.
@@ -1959,7 +1957,7 @@
         # Read in a random subset of the points in each domain, and then
         # collect them on the root task.
         xp = self._data_source["particle_position_x"]
-        n_parts = self._mpi_allsum(xp.size)
+        n_parts = self._mpi_allreduce(xp.size, op='sum')
         local_parts = xp.size
         random_points = int(self.sample * n_parts)
         # We want to get a representative selection of random particles in
@@ -2112,9 +2110,9 @@
         if dm_only:
             select = self._get_dm_indices()
             total_mass = \
-                self._mpi_allsum((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'))
+                self._mpi_allreduce((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
         else:
-            total_mass = self._mpi_allsum(self._data_source["ParticleMassMsun"].sum(dtype='float64'))
+            total_mass = self._mpi_allreduce(self._data_source["ParticleMassMsun"].sum(dtype='float64'), op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles
         # are all on different processors, we should instead construct an
         # object representing the entire domain and sum it "lazily" with
@@ -2194,7 +2192,7 @@
             self._partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         if link > 0.0:
-            n_parts = self._mpi_allsum(self._data_source["particle_position_x"].size)
+            n_parts = self._mpi_allreduce(self._data_source["particle_position_x"].size, op='sum')
             # get the average spacing between particles
             #l = pf.domain_right_edge - pf.domain_left_edge
             #vol = l[0] * l[1] * l[2]


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 12:51:06 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 13:27:48 2011 -0400
@@ -878,7 +878,7 @@
         # it. Therefore each key (a chain) in this dict is unique, but the items
         # the keys point to are not necessarily unique.
         chainID_translate_map_global = \
-            self._mpi_minimum_array_long(chainID_translate_map_local)
+            self._mpi_allreduce(chainID_translate_map_local, op='min')
         # Loop over chains, smallest to largest density, recursively until
         # we reach a self-assigned chain. Then we assign that final chainID to
         # the *current* one only.
@@ -1202,7 +1202,7 @@
         del Set_list
         # To bring it all together, find the minimum values at each entry
         # globally.
-        lookup = self._mpi_minimum_array_long(lookup)
+        lookup = self._mpi_allreduce(lookup, op='min')
         # Now apply this to reverse_map
         for chainID,groupID in enumerate(self.reverse_map):
             if groupID == -1:
@@ -1330,7 +1330,7 @@
         # Now we broadcast this, effectively, with an allsum. Even though
         # some groups are on multiple tasks, there is only one densest_in_chain
         # and only that task contributed above.
-        self.max_dens_point = self._mpi_allsum(max_dens_point)
+        self.max_dens_point = self._mpi_allreduce(max_dens_point, op='sum')
         del max_dens_point
         yt_counters("max dens point")
         # Now CoM.
@@ -1385,9 +1385,9 @@
                     CoM_M[groupID] += self.max_dens_point[groupID,1:4] - na.array([0.5,0.5,0.5])
                     CoM_M[groupID] *= Tot_M[groupID]
         # Now we find their global values
-        self.group_sizes = self._mpi_allsum(size)
-        CoM_M = self._mpi_allsum(CoM_M)
-        self.Tot_M = self._mpi_allsum(Tot_M)
+        self.group_sizes = self._mpi_allreduce(size, op='sum')
+        CoM_M = self._mpi_allreduce(CoM_M, op='sum')
+        self.Tot_M = self._mpi_allreduce(Tot_M, op='sum')
         self.CoM = na.empty((self.group_count,3), dtype='float64')
         for groupID in xrange(int(self.group_count)):
             self.CoM[groupID] = CoM_M[groupID] / self.Tot_M[groupID]
@@ -1405,7 +1405,7 @@
                 max_radius[u] = na.max(dist[marks[i]:marks[i+1]])
         # Find the maximum across all tasks.
         mylog.info('Fraction of particles in this region in groups: %f' % (float(calc)/self.size))
-        self.max_radius = self._mpi_double_array_max(max_radius)
+        self.max_radius = self._mpi_allreduce(max_radius, op='max')
         self.max_radius = na.sqrt(self.max_radius)
         yt_counters("max radius")
         yt_counters("Precomp.")


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 12:51:06 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 13:27:48 2011 -0400
@@ -441,8 +441,8 @@
         Add up the hits to all the bins globally for all functions.
         """
         for fset in self._fsets:
-            fset.too_low = self._mpi_allsum(fset.too_low)
-            fset.too_high = self._mpi_allsum(fset.too_high)
+            fset.too_low = self._mpi_allreduce(fset.too_low, op='sum')
+            fset.too_high = self._mpi_allreduce(fset.too_high, op='sum')
             fset.binned = {}
             if self.mine == 0:
                 mylog.info("Function %s had values out of range for these fields:" % \
@@ -452,7 +452,7 @@
                     (field, fset.too_high[i], fset.too_low[i]))
             for length in self.lengths:
                 fset.length_bin_hits[length] = \
-                    self._mpi_allsum(fset.length_bin_hits[length])
+                    self._mpi_allreduce(fset.length_bin_hits[length], op='sum')
                 # Find out how many were successfully binned.
                 fset.binned[length] = fset.length_bin_hits[length].sum()
                 # Normalize the counts.


--- a/yt/data_objects/data_containers.py	Tue Oct 18 12:51:06 2011 -0400
+++ b/yt/data_objects/data_containers.py	Tue Oct 18 13:27:48 2011 -0400
@@ -1432,8 +1432,8 @@
             self[field] = na.zeros(_size, dtype='float64')
             for grid in self._get_grids():
                 self._get_data_from_grid(grid, field)
-            self[field] = self._mpi_allsum(\
-                self[field]).reshape([self.dims]*2).transpose()
+            self[field] = self._mpi_allreduce(\
+                self[field], op='sum').reshape([self.dims]*2).transpose()
 
     def interpolate_discretize(self, *args, **kwargs):
         pass
@@ -2221,7 +2221,7 @@
             self._get_data_from_grid(grid, fields_to_get, dls)
         mylog.info("IO completed; summing")
         for field in fields_to_get:
-            self[field] = self._mpi_allsum(self[field])
+            self[field] = self._mpi_allreduce(self[field], op='sum')
             conv = self.pf.units[self.pf.field_info[field].projection_conversion]
             self[field] *= conv
 


--- a/yt/data_objects/profiles.py	Tue Oct 18 12:51:06 2011 -0400
+++ b/yt/data_objects/profiles.py	Tue Oct 18 13:27:48 2011 -0400
@@ -119,10 +119,10 @@
 
     def _finalize_parallel(self):
         for key in self.__data:
-            self.__data[key] = self._mpi_allsum(self.__data[key])
+            self.__data[key] = self._mpi_allreduce(self.__data[key], op='sum')
         for key in self.__weight_data:
-            self.__weight_data[key] = self._mpi_allsum(self.__weight_data[key])
-        self.__used = self._mpi_allsum(self.__used)
+            self.__weight_data[key] = self._mpi_allreduce(self.__weight_data[key], op='sum')
+        self.__used = self._mpi_allreduce(self.__used, op='sum')
 
     def _unlazy_add_fields(self, fields, weight, accumulation):
         for field in fields:


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 12:51:06 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 13:27:48 2011 -0400
@@ -90,6 +90,12 @@
             int32   = MPI.INT,
             int64   = MPI.LONG
     )
+    op_names = dict(
+        sum = MPI.SUM,
+        min = MPI.MIN,
+        max = MPI.MAX
+    )
+
 else:
     dtype_names = dict(
             float32 = "MPI.FLOAT",
@@ -97,6 +103,11 @@
             int32   = "MPI.INT",
             int64   = "MPI.LONG"
     )
+    op_names = dict(
+            sum = "MPI.SUM",
+            min = "MPI.MIN",
+            max = "MPI.MAX"
+    )
 
 # Because the dtypes will == correctly but do not hash the same, we need this
 # function for dictionary access.
@@ -447,26 +458,6 @@
             raise RuntimeError("Fatal error. Exiting.")
         return None
 
-    def _mpi_minimum_array_long(self, data):
-        return self._mpi_reduce(data, op=MPI.MIN)
-#         """
-#         Specifically for parallelHOP. For the identical array on each task,
-#         it merges the arrays together, taking the lower value at each index.
-#         """
-#         self._barrier()
-#         size = data.size # They're all the same size, of course
-#         if MPI.COMM_WORLD.rank == 0:
-#             new_data = na.empty(size, dtype='int64')
-#             for i in range(1, MPI.COMM_WORLD.size):
-#                 MPI.COMM_WORLD.Recv([new_data, MPI.LONG], i, 0)
-#                 data = na.minimum(data, new_data)
-#             del new_data
-#         else:
-#             MPI.COMM_WORLD.Send([data, MPI.LONG], 0, 0)
-#         # Redistribute from root
-#         MPI.COMM_WORLD.Bcast([data, MPI.LONG], root=0)
-#         return data
-
     @parallel_passthrough
     def _mpi_catdict(self, data):
         field_keys = data.keys()
@@ -649,25 +640,9 @@
         if not self._distributed: return
         io_handler.preload(grids, fields)
 
-    def _mpi_double_array_max(self,data):
-        return self._mpi_reduce(data, op=MPI.MAX)
-#         """
-#         Finds the na.maximum of a distributed array and returns the result
-#         back to all. The array should be the same length on all tasks!
-#         """
-#         self._barrier()
-#         if MPI.COMM_WORLD.rank == 0:
-#             recv_data = na.empty(data.size, dtype='float64')
-#             for i in xrange(1, MPI.COMM_WORLD.size):
-#                 MPI.COMM_WORLD.Recv([recv_data, MPI.DOUBLE], source=i, tag=0)
-#                 data = na.maximum(data, recv_data)
-#         else:
-#             MPI.COMM_WORLD.Send([data, MPI.DOUBLE], dest=0, tag=0)
-#         MPI.COMM_WORLD.Bcast([data, MPI.DOUBLE], root=0)
-#         return data
-
     @parallel_passthrough
-    def _mpi_reduce(self, data, dtype=None, op=MPI.SUM):
+    def _mpi_allreduce(self, data, dtype=None, op='sum'):
+        op = op_names[op]
         if isinstance(data, na.ndarray) and data.dtype != na.bool:
             if dtype is None:
                 dtype = data.dtype
@@ -682,14 +657,20 @@
             # relatively small ( < 1e7 elements )
             return MPI.COMM_WORLD.allreduce(data, op)
 
-    def _mpi_allmax(self, data):
-        return self._mpi_reduce(data, op=MPI.MAX)
+#     def _mpi_double_array_max(self,data):
+#         return self._mpi_allreduce(data, op='max')
+
+#     def _mpi_minimum_array_long(self, data):
+#         return self._mpi_allreduce(data, op='min')
+
+#     def _mpi_allmax(self, data):
+#         return self._mpi_allreduce(data, op='max')
     
-    def _mpi_allmin(self, data):
-        return self._mpi_reduce(data, op=MPI.MIN)
+#     def _mpi_allmin(self, data):
+#         return self._mpi_allreduce(data, op='min')
 
-    def _mpi_allsum(self, data):
-        return self._mpi_reduce(data, op=MPI.SUM)
+#     def _mpi_allsum(self, data):
+#         return self._mpi_allreduce(data, op='sum')
 
     ###
     # Non-blocking stuff.


--- a/yt/visualization/streamlines.py	Tue Oct 18 12:51:06 2011 -0400
+++ b/yt/visualization/streamlines.py	Tue Oct 18 13:27:48 2011 -0400
@@ -144,8 +144,8 @@
        
     @parallel_passthrough
     def _finalize_parallel(self,data):
-        self.streamlines = self._mpi_allsum(self.streamlines)
-        self.magnitudes = self._mpi_allsum(self.magnitudes)
+        self.streamlines = self._mpi_allreduce(self.streamlines, op='sum')
+        self.magnitudes = self._mpi_allreduce(self.magnitudes, op='sum')
         
     def _integrate_through_brick(self, node, stream, step,
                                  periodic=False, mag=None):


http://bitbucket.org/yt_analysis/yt/changeset/42d5cec44273/
changeset:   42d5cec44273
branch:      yt
user:        samskillman
date:        2011-10-18 19:31:37
summary:     Removing commented lines.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 13:27:48 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 13:31:37 2011 -0400
@@ -657,21 +657,6 @@
             # relatively small ( < 1e7 elements )
             return MPI.COMM_WORLD.allreduce(data, op)
 
-#     def _mpi_double_array_max(self,data):
-#         return self._mpi_allreduce(data, op='max')
-
-#     def _mpi_minimum_array_long(self, data):
-#         return self._mpi_allreduce(data, op='min')
-
-#     def _mpi_allmax(self, data):
-#         return self._mpi_allreduce(data, op='max')
-    
-#     def _mpi_allmin(self, data):
-#         return self._mpi_allreduce(data, op='min')
-
-#     def _mpi_allsum(self, data):
-#         return self._mpi_allreduce(data, op='sum')
-
     ###
     # Non-blocking stuff.
     ###


http://bitbucket.org/yt_analysis/yt/changeset/160ad5b4ed39/
changeset:   160ad5b4ed39
branch:      yt
user:        MatthewTurk
date:        2011-10-18 19:34:34
summary:     Merge
affected #:  7 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Tue Oct 18 13:34:21 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Tue Oct 18 13:34:34 2011 -0400
@@ -426,7 +426,7 @@
         """
         if self.max_dens_point is not None:
             return self.max_dens_point[0]
-        max = self._mpi_allmax(self._max_dens[self.id][0])
+        max = self._mpi_allreduce(self._max_dens[self.id][0], op='max')
         return max
 
     def maximum_density_location(self):
@@ -450,7 +450,7 @@
         else:
             value = na.array([0,0,0])
         # This works, and isn't appropriate but for now will be fine...
-        value = self._mpi_allsum(value)
+        value = self._mpi_allreduce(value, op='sum')
         return value
 
     def center_of_mass(self):
@@ -479,8 +479,8 @@
         else:
             my_mass = 0.
             my_com = na.array([0.,0.,0.])
-        global_mass = self._mpi_allsum(my_mass)
-        global_com = self._mpi_allsum(my_com)
+        global_mass = self._mpi_allreduce(my_mass, op='sum')
+        global_com = self._mpi_allreduce(my_com, op='sum')
         return global_com / global_mass
 
     def total_mass(self):
@@ -499,7 +499,7 @@
             my_mass = self["ParticleMassMsun"].sum()
         else:
             my_mass = 0.
-        global_mass = self._mpi_allsum(float(my_mass))
+        global_mass = self._mpi_allreduce(float(my_mass), op='sum')
         return global_mass
 
     def bulk_velocity(self):
@@ -528,7 +528,7 @@
             vy = 0.
             vz = 0.
         bv = na.array([vx,vy,vz,pm])
-        global_bv = self._mpi_allsum(bv)
+        global_bv = self._mpi_allreduce(bv, op='sum')
         return global_bv[:3]/global_bv[3]
 
     def rms_velocity(self):
@@ -558,7 +558,7 @@
             ss = na.array([s, float(size)])
         else:
             ss = na.array([0.,0.])
-        global_ss = self._mpi_allsum(ss)
+        global_ss = self._mpi_allreduce(ss, op='sum')
         ms = global_ss[0] / global_ss[1]
         return na.sqrt(ms) * global_ss[1]
 
@@ -598,7 +598,7 @@
             
         else:
             my_max = 0.
-        return self._mpi_allmax(my_max)
+        return self._mpi_allreduce(my_max, op='max')
 
     def get_size(self):
         if self.size is not None:
@@ -607,7 +607,7 @@
             my_size = self.indices.size
         else:
             my_size = 0
-        global_size = self._mpi_allsum(my_size)
+        global_size = self._mpi_allreduce(my_size, op='sum')
         return global_size
 
     def __getitem__(self, key):
@@ -736,8 +736,8 @@
             dist_max = 0.0
         # In this parallel case, we're going to find the global dist extrema
         # and built identical bins on all tasks.
-        dist_min = self._mpi_allmin(dist_min)
-        dist_max = self._mpi_allmax(dist_max)
+        dist_min = self._mpi_allreduce(dist_min, op='min')
+        dist_max = self._mpi_allreduce(dist_max, op='max')
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
         self.radial_bins = na.logspace(math.log10(dist_min*.99 + TINY), 
@@ -752,7 +752,7 @@
             for i in xrange(self.bin_count):
                 self.mass_bins[i+1] += self.mass_bins[i]
         # Sum up the mass_bins globally
-        self.mass_bins = self._mpi_allsum(self.mass_bins)
+        self.mass_bins = self._mpi_allreduce(self.mass_bins, op='sum')
         # Calculate the over densities in the bins.
         self.overdensity = self.mass_bins * Msun2g / \
         (4./3. * math.pi * rho_crit * \
@@ -1479,7 +1479,7 @@
             del diff_subchain
         # Bring it together, and divide by the previously computed total mass
         # of each halo.
-        self.bulk_vel = self._mpi_allsum(self.bulk_vel)
+        self.bulk_vel = self._mpi_allreduce(self.bulk_vel, op='sum')
         for groupID in xrange(self.group_count):
             self.bulk_vel[groupID] = self.bulk_vel[groupID] / self.Tot_M[groupID]
         yt_counters("bulk vel. computing")
@@ -1501,7 +1501,7 @@
                 rms_vel_temp[u][1] = marks[i+1] - marks[i]
             del vel, marks, uniq_subchain
         # Bring it together.
-        rms_vel_temp = self._mpi_allsum(rms_vel_temp)
+        rms_vel_temp = self._mpi_allreduce(rms_vel_temp, op='sum')
         self.rms_vel = na.empty(self.group_count, dtype='float64')
         for groupID in xrange(self.group_count):
             # Here we do the Mean and the Root.
@@ -1934,10 +1934,8 @@
                 (str(self.padding), avg_spacing, full_vol, data.size, str(self._data_source)))
         # Now we get the full box mass after we have the final composition of
         # subvolumes.
-        if ytcfg.getboolean("yt","inline") == False:
-            total_mass = self._mpi_allsum((self._data_source["ParticleMassMsun"].astype('float64')).sum())
-        else:
-            total_mass = self._mpi_allsum((self._data_source["ParticleMassMsun"].astype('float64')).sum())
+        total_mass = self._mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(), 
+                                         op='sum')
         if not self._distributed:
             self.padding = (na.zeros(3,dtype='float64'), na.zeros(3,dtype='float64'))
         # If we're using a subvolume, we now re-divide.
@@ -1959,7 +1957,7 @@
         # Read in a random subset of the points in each domain, and then
         # collect them on the root task.
         xp = self._data_source["particle_position_x"]
-        n_parts = self._mpi_allsum(xp.size)
+        n_parts = self._mpi_allreduce(xp.size, op='sum')
         local_parts = xp.size
         random_points = int(self.sample * n_parts)
         # We want to get a representative selection of random particles in
@@ -2112,9 +2110,9 @@
         if dm_only:
             select = self._get_dm_indices()
             total_mass = \
-                self._mpi_allsum((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'))
+                self._mpi_allreduce((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
         else:
-            total_mass = self._mpi_allsum(self._data_source["ParticleMassMsun"].sum(dtype='float64'))
+            total_mass = self._mpi_allreduce(self._data_source["ParticleMassMsun"].sum(dtype='float64'), op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles
         # are all on different processors, we should instead construct an
         # object representing the entire domain and sum it "lazily" with
@@ -2194,7 +2192,7 @@
             self._partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         if link > 0.0:
-            n_parts = self._mpi_allsum(self._data_source["particle_position_x"].size)
+            n_parts = self._mpi_allreduce(self._data_source["particle_position_x"].size, op='sum')
             # get the average spacing between particles
             #l = pf.domain_right_edge - pf.domain_left_edge
             #vol = l[0] * l[1] * l[2]


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 13:34:21 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 13:34:34 2011 -0400
@@ -878,7 +878,7 @@
         # it. Therefore each key (a chain) in this dict is unique, but the items
         # the keys point to are not necessarily unique.
         chainID_translate_map_global = \
-            self._mpi_minimum_array_long(chainID_translate_map_local)
+            self._mpi_allreduce(chainID_translate_map_local, op='min')
         # Loop over chains, smallest to largest density, recursively until
         # we reach a self-assigned chain. Then we assign that final chainID to
         # the *current* one only.
@@ -1202,7 +1202,7 @@
         del Set_list
         # To bring it all together, find the minimum values at each entry
         # globally.
-        lookup = self._mpi_minimum_array_long(lookup)
+        lookup = self._mpi_allreduce(lookup, op='min')
         # Now apply this to reverse_map
         for chainID,groupID in enumerate(self.reverse_map):
             if groupID == -1:
@@ -1330,7 +1330,7 @@
         # Now we broadcast this, effectively, with an allsum. Even though
         # some groups are on multiple tasks, there is only one densest_in_chain
         # and only that task contributed above.
-        self.max_dens_point = self._mpi_allsum(max_dens_point)
+        self.max_dens_point = self._mpi_allreduce(max_dens_point, op='sum')
         del max_dens_point
         yt_counters("max dens point")
         # Now CoM.
@@ -1385,9 +1385,9 @@
                     CoM_M[groupID] += self.max_dens_point[groupID,1:4] - na.array([0.5,0.5,0.5])
                     CoM_M[groupID] *= Tot_M[groupID]
         # Now we find their global values
-        self.group_sizes = self._mpi_allsum(size)
-        CoM_M = self._mpi_allsum(CoM_M)
-        self.Tot_M = self._mpi_allsum(Tot_M)
+        self.group_sizes = self._mpi_allreduce(size, op='sum')
+        CoM_M = self._mpi_allreduce(CoM_M, op='sum')
+        self.Tot_M = self._mpi_allreduce(Tot_M, op='sum')
         self.CoM = na.empty((self.group_count,3), dtype='float64')
         for groupID in xrange(int(self.group_count)):
             self.CoM[groupID] = CoM_M[groupID] / self.Tot_M[groupID]
@@ -1405,7 +1405,7 @@
                 max_radius[u] = na.max(dist[marks[i]:marks[i+1]])
         # Find the maximum across all tasks.
         mylog.info('Fraction of particles in this region in groups: %f' % (float(calc)/self.size))
-        self.max_radius = self._mpi_double_array_max(max_radius)
+        self.max_radius = self._mpi_allreduce(max_radius, op='max')
         self.max_radius = na.sqrt(self.max_radius)
         yt_counters("max radius")
         yt_counters("Precomp.")


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 13:34:21 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 13:34:34 2011 -0400
@@ -441,8 +441,8 @@
         Add up the hits to all the bins globally for all functions.
         """
         for fset in self._fsets:
-            fset.too_low = self._mpi_allsum(fset.too_low)
-            fset.too_high = self._mpi_allsum(fset.too_high)
+            fset.too_low = self._mpi_allreduce(fset.too_low, op='sum')
+            fset.too_high = self._mpi_allreduce(fset.too_high, op='sum')
             fset.binned = {}
             if self.mine == 0:
                 mylog.info("Function %s had values out of range for these fields:" % \
@@ -452,7 +452,7 @@
                     (field, fset.too_high[i], fset.too_low[i]))
             for length in self.lengths:
                 fset.length_bin_hits[length] = \
-                    self._mpi_allsum(fset.length_bin_hits[length])
+                    self._mpi_allreduce(fset.length_bin_hits[length], op='sum')
                 # Find out how many were successfully binned.
                 fset.binned[length] = fset.length_bin_hits[length].sum()
                 # Normalize the counts.


--- a/yt/data_objects/data_containers.py	Tue Oct 18 13:34:21 2011 -0400
+++ b/yt/data_objects/data_containers.py	Tue Oct 18 13:34:34 2011 -0400
@@ -1432,8 +1432,8 @@
             self[field] = na.zeros(_size, dtype='float64')
             for grid in self._get_grids():
                 self._get_data_from_grid(grid, field)
-            self[field] = self._mpi_allsum(\
-                self[field]).reshape([self.dims]*2).transpose()
+            self[field] = self._mpi_allreduce(\
+                self[field], op='sum').reshape([self.dims]*2).transpose()
 
     def interpolate_discretize(self, *args, **kwargs):
         pass
@@ -2221,7 +2221,7 @@
             self._get_data_from_grid(grid, fields_to_get, dls)
         mylog.info("IO completed; summing")
         for field in fields_to_get:
-            self[field] = self._mpi_allsum(self[field])
+            self[field] = self._mpi_allreduce(self[field], op='sum')
             conv = self.pf.units[self.pf.field_info[field].projection_conversion]
             self[field] *= conv
 


--- a/yt/data_objects/profiles.py	Tue Oct 18 13:34:21 2011 -0400
+++ b/yt/data_objects/profiles.py	Tue Oct 18 13:34:34 2011 -0400
@@ -119,10 +119,10 @@
 
     def _finalize_parallel(self):
         for key in self.__data:
-            self.__data[key] = self._mpi_allsum(self.__data[key])
+            self.__data[key] = self._mpi_allreduce(self.__data[key], op='sum')
         for key in self.__weight_data:
-            self.__weight_data[key] = self._mpi_allsum(self.__weight_data[key])
-        self.__used = self._mpi_allsum(self.__used)
+            self.__weight_data[key] = self._mpi_allreduce(self.__weight_data[key], op='sum')
+        self.__used = self._mpi_allreduce(self.__used, op='sum')
 
     def _unlazy_add_fields(self, fields, weight, accumulation):
         for field in fields:


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 13:34:21 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 13:34:34 2011 -0400
@@ -90,6 +90,12 @@
             int32   = MPI.INT,
             int64   = MPI.LONG
     )
+    op_names = dict(
+        sum = MPI.SUM,
+        min = MPI.MIN,
+        max = MPI.MAX
+    )
+
 else:
     dtype_names = dict(
             float32 = "MPI.FLOAT",
@@ -97,6 +103,11 @@
             int32   = "MPI.INT",
             int64   = "MPI.LONG"
     )
+    op_names = dict(
+            sum = "MPI.SUM",
+            min = "MPI.MIN",
+            max = "MPI.MAX"
+    )
 
 # Because the dtypes will == correctly but do not hash the same, we need this
 # function for dictionary access.
@@ -204,9 +215,9 @@
     output; otherwise, the function gets called.  Used as a decorator.
     """
     @wraps(func)
-    def passage(self, data):
+    def passage(self, data, **kwargs):
         if not self._distributed: return data
-        return func(self, data)
+        return func(self, data, **kwargs)
     return passage
 
 def parallel_blocking_call(func):
@@ -448,26 +459,6 @@
         return None
 
     @parallel_passthrough
-    def _mpi_minimum_array_long(self, data):
-        """
-        Specifically for parallelHOP. For the identical array on each task,
-        it merges the arrays together, taking the lower value at each index.
-        """
-        self._barrier()
-        size = data.size # They're all the same size, of course
-        if MPI.COMM_WORLD.rank == 0:
-            new_data = na.empty(size, dtype='int64')
-            for i in range(1, MPI.COMM_WORLD.size):
-                MPI.COMM_WORLD.Recv([new_data, MPI.LONG], i, 0)
-                data = na.minimum(data, new_data)
-            del new_data
-        else:
-            MPI.COMM_WORLD.Send([data, MPI.LONG], 0, 0)
-        # Redistribute from root
-        MPI.COMM_WORLD.Bcast([data, MPI.LONG], root=0)
-        return data
-
-    @parallel_passthrough
     def _mpi_catdict(self, data):
         self._par_combine_object(data, op = "cat")
 
@@ -641,24 +632,8 @@
         io_handler.preload(grids, fields)
 
     @parallel_passthrough
-    def _mpi_double_array_max(self,data):
-        """
-        Finds the na.maximum of a distributed array and returns the result
-        back to all. The array should be the same length on all tasks!
-        """
-        self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            recv_data = na.empty(data.size, dtype='float64')
-            for i in xrange(1, MPI.COMM_WORLD.size):
-                MPI.COMM_WORLD.Recv([recv_data, MPI.DOUBLE], source=i, tag=0)
-                data = na.maximum(data, recv_data)
-        else:
-            MPI.COMM_WORLD.Send([data, MPI.DOUBLE], dest=0, tag=0)
-        MPI.COMM_WORLD.Bcast([data, MPI.DOUBLE], root=0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_allsum(self, data, dtype=None):
+    def _mpi_allreduce(self, data, dtype=None, op='sum'):
+        op = op_names[op]
         if isinstance(data, na.ndarray) and data.dtype != na.bool:
             if dtype is None:
                 dtype = data.dtype
@@ -666,22 +641,12 @@
                 data = data.astype(dtype)
             temp = data.copy()
             MPI.COMM_WORLD.Allreduce([temp,get_mpi_type(dtype)], 
-                                     [data,get_mpi_type(dtype)], op=MPI.SUM)
+                                     [data,get_mpi_type(dtype)], op)
             return data
         else:
             # We use old-school pickling here on the assumption the arrays are
             # relatively small ( < 1e7 elements )
-            return MPI.COMM_WORLD.allreduce(data, op=MPI.SUM)
-
-    @parallel_passthrough
-    def _mpi_allmax(self, data):
-        self._barrier()
-        return MPI.COMM_WORLD.allreduce(data, op=MPI.MAX)
-
-    @parallel_passthrough
-    def _mpi_allmin(self, data):
-        self._barrier()
-        return MPI.COMM_WORLD.allreduce(data, op=MPI.MIN)
+            return MPI.COMM_WORLD.allreduce(data, op)
 
     ###
     # Non-blocking stuff.


--- a/yt/visualization/streamlines.py	Tue Oct 18 13:34:21 2011 -0400
+++ b/yt/visualization/streamlines.py	Tue Oct 18 13:34:34 2011 -0400
@@ -144,8 +144,8 @@
        
     @parallel_passthrough
     def _finalize_parallel(self,data):
-        self.streamlines = self._mpi_allsum(self.streamlines)
-        self.magnitudes = self._mpi_allsum(self.magnitudes)
+        self.streamlines = self._mpi_allreduce(self.streamlines, op='sum')
+        self.magnitudes = self._mpi_allreduce(self.magnitudes, op='sum')
         
     def _integrate_through_brick(self, node, stream, step,
                                  periodic=False, mag=None):


http://bitbucket.org/yt_analysis/yt/changeset/edfdb1776f3b/
changeset:   edfdb1776f3b
branch:      yt
user:        MatthewTurk
date:        2011-10-18 19:40:36
summary:     Attempting to make datatypes and whatnot more specific.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 13:34:34 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 13:40:36 2011 -0400
@@ -460,11 +460,11 @@
 
     @parallel_passthrough
     def _mpi_catdict(self, data):
-        self._par_combine_object(data, op = "cat")
+        self._par_combine_object(data, datatype = "dict", op = "cat")
 
     @parallel_passthrough
     def _mpi_joindict(self, data):
-        self._par_combine_object(data, op = "join")
+        self._par_combine_object(data, datatype = "dict", op = "join")
 
     @parallel_passthrough
     def _mpi_maxdict_dict(self, data):
@@ -535,7 +535,7 @@
         return (top_keys, bot_keys, vals)
 
     @parallel_passthrough
-    def _par_combine_object(self, data, op):
+    def _par_combine_object(self, data, op, datatype = None):
         # op can be chosen from:
         #   cat
         #   join
@@ -543,7 +543,16 @@
         #   na.ndarray
         #   dict
         #   data field dict
-        if isinstance(data, types.DictType) and op == "join":
+        if datatype is not None:
+            pass
+        elif isinstance(data, types.DictType):
+            datatype == "dict"
+        elif isinstance(data, na.ndarray):
+            datatype == "array"
+        elif isinstance(data, types.ListType):
+            datatype == "list"
+        # Now we have our datatype, and we conduct our operation
+        if datatype == "dict" and op == "join":
             if MPI.COMM_WORLD.rank == 0:
                 for i in range(1,MPI.COMM_WORLD.size):
                     data.update(MPI.COMM_WORLD.recv(source=i, tag=0))
@@ -551,7 +560,7 @@
                 MPI.COMM_WORLD.send(data, dest=0, tag=0)
             data = MPI.COMM_WORLD.bcast(data, root=0)
             return data
-        elif isinstance(data, types.DictType) and op == "cat":
+        elif datatype == "dict" and op == "cat":
             field_keys = data.keys()
             field_keys.sort()
             size = data[field_keys[0]].shape[-1]
@@ -569,7 +578,7 @@
                 rv = _alltoallv_array(dd, arr_size, offsets, sizes)
                 data[key] = rv
             return data
-        elif isinstance(data, na.ndarray) and op == "cat":
+        elif datatype == "array" and op == "cat":
             if data is None:
                 ncols = -1
                 size = 0
@@ -597,7 +606,7 @@
             arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
             data = _alltoallv_array(data, arr_size, offsets, sizes)
             return data
-        elif isinstance(data, types.ListType) and op == "cat":
+        elif datatype == "list" and op == "cat":
             if MPI.COMM_WORLD.rank == 0:
                 data = self.__mpi_recvlist(data)
             else:
@@ -609,11 +618,11 @@
 
     @parallel_passthrough
     def _mpi_catlist(self, data):
-        self._par_combine_object(data, op = "cat")
+        self._par_combine_object(data, datatype = "list", op = "cat")
 
     @parallel_passthrough
     def _mpi_catarray(self, data):
-        self._par_combine_object(data, op = "cat")
+        self._par_combine_object(data, datatype = "array", op = "cat")
 
     @parallel_passthrough
     def _mpi_bcast_pickled(self, data):


http://bitbucket.org/yt_analysis/yt/changeset/2e3577f4339d/
changeset:   2e3577f4339d
branch:      yt
user:        MatthewTurk
date:        2011-10-18 19:49:59
summary:     This return statement should fix the issues.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 13:40:36 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 13:49:59 2011 -0400
@@ -460,11 +460,11 @@
 
     @parallel_passthrough
     def _mpi_catdict(self, data):
-        self._par_combine_object(data, datatype = "dict", op = "cat")
+        return self._par_combine_object(data, datatype = "dict", op = "cat")
 
     @parallel_passthrough
     def _mpi_joindict(self, data):
-        self._par_combine_object(data, datatype = "dict", op = "join")
+        return self._par_combine_object(data, datatype = "dict", op = "join")
 
     @parallel_passthrough
     def _mpi_maxdict_dict(self, data):
@@ -618,11 +618,11 @@
 
     @parallel_passthrough
     def _mpi_catlist(self, data):
-        self._par_combine_object(data, datatype = "list", op = "cat")
+        return self._par_combine_object(data, datatype = "list", op = "cat")
 
     @parallel_passthrough
     def _mpi_catarray(self, data):
-        self._par_combine_object(data, datatype = "array", op = "cat")
+        return self._par_combine_object(data, datatype = "array", op = "cat")
 
     @parallel_passthrough
     def _mpi_bcast_pickled(self, data):


http://bitbucket.org/yt_analysis/yt/changeset/dd9ffd2377d1/
changeset:   dd9ffd2377d1
branch:      yt
user:        chummels
date:        2011-10-18 19:50:22
summary:     Modified halo finder tests to only compare number of halos detected in various modes of halo finding.  It seems that comparing the contents of the halo does not behave reproducibly in parallel with different numbers of nodes.
affected #:  2 files (-1 bytes)

--- a/tests/halos.py	Tue Oct 18 12:11:10 2011 -0400
+++ b/tests/halos.py	Tue Oct 18 13:50:22 2011 -0400
@@ -1,11 +1,10 @@
 from yt.utilities.answer_testing.output_tests import \
     SingleOutputTest, create_test
 from yt.utilities.answer_testing.halo_tests import \
-    TestHaloCompositionHashHOP, TestHaloCompositionHashFOF, \
-    TestHaloCompositionHashPHOP 
+    TestHaloCountHOP, TestHaloCountFOF, TestHaloCountPHOP 
 
-create_test(TestHaloCompositionHashHOP, "halo_composition_test_hash_HOP", threshold=80.0)
+create_test(TestHaloCountHOP, "halo_count_HOP", threshold=80.0)
 
-create_test(TestHaloCompositionHashFOF, "halo_composition_test_hash_FOF", threshold=80.0)
+create_test(TestHaloCountFOF, "halo_count_FOF", link=0.2, padding=0.02)
 
-create_test(TestHaloCompositionHashPHOP, "halo_composition_test_hash_PHOP", threshold=80.0)
+create_test(TestHaloCountPHOP, "halo_count_PHOP", threshold=80.0)


--- a/yt/utilities/answer_testing/halo_tests.py	Tue Oct 18 12:11:10 2011 -0400
+++ b/yt/utilities/answer_testing/halo_tests.py	Tue Oct 18 13:50:22 2011 -0400
@@ -23,8 +23,6 @@
     def plot(self):
         return []
 
-create_test(TestHaloCountHOP, "halo_count_test_HOP", threshold=80.0)
-
 # Tests the number of halos returned by the FOF halo finder on a dataset
 class TestHaloCountFOF(YTStaticOutputTest):
     link = 0.2
@@ -44,8 +42,6 @@
     def plot(self):
         return []
 
-create_test(TestHaloCountFOF, "halo_count_test_FOF", threshold=80.0)
-
 # Tests the number of halos returned by the Parallel HOP halo finder on a 
 # dataset
 class TestHaloCountPHOP(YTStaticOutputTest):
@@ -64,8 +60,6 @@
     def plot(self):
         return []
 
-create_test(TestHaloCountPHOP, "halo_count_test_PHOP", threshold=80.0)
-
 class TestHaloComposition(YTStaticOutputTest):
     threshold=80.0
     
@@ -87,11 +81,9 @@
                 return False
         return True
     
-create_test(TestHaloComposition, "halo_composition_test", threshold=80.0)
-
 # Tests the content of the halos returned by the HOP halo finder on a dataset 
 # by comparing the hash of the arrays of all the particles contained in each
-# halo.
+# halo.  Evidently breaks on parallel runtime.  DO NOT USE.
 class TestHaloCompositionHashHOP(YTStaticOutputTest):
     threshold=80.0
     
@@ -103,7 +95,7 @@
         IDs = []
         for halo in halos:
             IDs.append(halo["particle_index"])
-        IDs = np.array(np.concatenate((IDs)))
+        IDs = np.concatenate(IDs)
         self.result = IDs
     
     def compare(self, old_result):
@@ -118,7 +110,7 @@
 
 # Tests the content of the halos returned by the FOF halo finder on a dataset 
 # by comparing the hash of the arrays of all the particles contained in each
-# halo.
+# halo.  Evidently breaks on parallel runtime.  DO NOT USE.
 class TestHaloCompositionHashFOF(YTStaticOutputTest):
     link = 0.2
     padding = 0.02
@@ -132,7 +124,7 @@
         IDs = []
         for halo in halos:
             IDs.append(halo["particle_index"])
-        IDs = np.array(np.concatenate((IDs)))
+        IDs = np.concatenate(IDs)
         self.result = IDs
     
     def compare(self, old_result):
@@ -147,7 +139,7 @@
 
 # Tests the content of the halos returned by the Parallel HOP halo finder on a 
 # dataset by comparing the hash of the arrays of all the particles contained 
-# in each halo.
+# in each halo.  Evidently breaks on parallel runtime.  DO NOT USE.
 class TestHaloCompositionHashPHOP(YTStaticOutputTest):
     threshold=80.0
     
@@ -159,7 +151,7 @@
         IDs = []
         for halo in halos:
             IDs.append(halo["particle_index"])
-        IDs = np.array(np.concatenate((IDs)))
+        IDs = np.concatenate(IDs)
         self.result = IDs
     
     def compare(self, old_result):


http://bitbucket.org/yt_analysis/yt/changeset/e883666c4d8e/
changeset:   e883666c4d8e
branch:      yt
user:        chummels
date:        2011-10-18 19:50:31
summary:     Merging.
affected #:  14 files (-1 bytes)

--- a/tests/runall.py	Tue Oct 18 13:50:22 2011 -0400
+++ b/tests/runall.py	Tue Oct 18 13:50:31 2011 -0400
@@ -74,8 +74,6 @@
     parser.add_option("-n", "--name", dest="this_name",
                       default=my_hash,
                       help = "The name we'll call this set of tests")
-    parser.add_option("", "--parallel", dest="parallel",
-                      default=False, help = "Run in parallel?")
     opts, args = parser.parse_args()
     if opts.list_tests:
         tests_to_run = []


--- a/yt/analysis_modules/halo_finding/halo_objects.py	Tue Oct 18 13:50:22 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Tue Oct 18 13:50:31 2011 -0400
@@ -426,7 +426,7 @@
         """
         if self.max_dens_point is not None:
             return self.max_dens_point[0]
-        max = self._mpi_allmax(self._max_dens[self.id][0])
+        max = self._mpi_allreduce(self._max_dens[self.id][0], op='max')
         return max
 
     def maximum_density_location(self):
@@ -450,7 +450,7 @@
         else:
             value = na.array([0,0,0])
         # This works, and isn't appropriate but for now will be fine...
-        value = self._mpi_allsum(value)
+        value = self._mpi_allreduce(value, op='sum')
         return value
 
     def center_of_mass(self):
@@ -479,8 +479,8 @@
         else:
             my_mass = 0.
             my_com = na.array([0.,0.,0.])
-        global_mass = self._mpi_allsum(my_mass)
-        global_com = self._mpi_allsum(my_com)
+        global_mass = self._mpi_allreduce(my_mass, op='sum')
+        global_com = self._mpi_allreduce(my_com, op='sum')
         return global_com / global_mass
 
     def total_mass(self):
@@ -499,7 +499,7 @@
             my_mass = self["ParticleMassMsun"].sum()
         else:
             my_mass = 0.
-        global_mass = self._mpi_allsum(float(my_mass))
+        global_mass = self._mpi_allreduce(float(my_mass), op='sum')
         return global_mass
 
     def bulk_velocity(self):
@@ -528,7 +528,7 @@
             vy = 0.
             vz = 0.
         bv = na.array([vx,vy,vz,pm])
-        global_bv = self._mpi_allsum(bv)
+        global_bv = self._mpi_allreduce(bv, op='sum')
         return global_bv[:3]/global_bv[3]
 
     def rms_velocity(self):
@@ -558,7 +558,7 @@
             ss = na.array([s, float(size)])
         else:
             ss = na.array([0.,0.])
-        global_ss = self._mpi_allsum(ss)
+        global_ss = self._mpi_allreduce(ss, op='sum')
         ms = global_ss[0] / global_ss[1]
         return na.sqrt(ms) * global_ss[1]
 
@@ -598,7 +598,7 @@
             
         else:
             my_max = 0.
-        return self._mpi_allmax(my_max)
+        return self._mpi_allreduce(my_max, op='max')
 
     def get_size(self):
         if self.size is not None:
@@ -607,7 +607,7 @@
             my_size = self.indices.size
         else:
             my_size = 0
-        global_size = self._mpi_allsum(my_size)
+        global_size = self._mpi_allreduce(my_size, op='sum')
         return global_size
 
     def __getitem__(self, key):
@@ -736,8 +736,8 @@
             dist_max = 0.0
         # In this parallel case, we're going to find the global dist extrema
         # and built identical bins on all tasks.
-        dist_min = self._mpi_allmin(dist_min)
-        dist_max = self._mpi_allmax(dist_max)
+        dist_min = self._mpi_allreduce(dist_min, op='min')
+        dist_max = self._mpi_allreduce(dist_max, op='max')
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
         self.radial_bins = na.logspace(math.log10(dist_min*.99 + TINY), 
@@ -752,7 +752,7 @@
             for i in xrange(self.bin_count):
                 self.mass_bins[i+1] += self.mass_bins[i]
         # Sum up the mass_bins globally
-        self.mass_bins = self._mpi_allsum(self.mass_bins)
+        self.mass_bins = self._mpi_allreduce(self.mass_bins, op='sum')
         # Calculate the over densities in the bins.
         self.overdensity = self.mass_bins * Msun2g / \
         (4./3. * math.pi * rho_crit * \
@@ -1479,7 +1479,7 @@
             del diff_subchain
         # Bring it together, and divide by the previously computed total mass
         # of each halo.
-        self.bulk_vel = self._mpi_allsum(self.bulk_vel)
+        self.bulk_vel = self._mpi_allreduce(self.bulk_vel, op='sum')
         for groupID in xrange(self.group_count):
             self.bulk_vel[groupID] = self.bulk_vel[groupID] / self.Tot_M[groupID]
         yt_counters("bulk vel. computing")
@@ -1501,7 +1501,7 @@
                 rms_vel_temp[u][1] = marks[i+1] - marks[i]
             del vel, marks, uniq_subchain
         # Bring it together.
-        rms_vel_temp = self._mpi_allsum(rms_vel_temp)
+        rms_vel_temp = self._mpi_allreduce(rms_vel_temp, op='sum')
         self.rms_vel = na.empty(self.group_count, dtype='float64')
         for groupID in xrange(self.group_count):
             # Here we do the Mean and the Root.
@@ -1855,20 +1855,20 @@
         # analyzing a subvolume.
         ds_names = ["particle_position_x","particle_position_y","particle_position_z"]
         if ytcfg.getboolean("yt","inline") == False and \
-            resize and self._mpi_get_size() != 1 and subvolume is None:
-            random.seed(self._mpi_get_rank())
+            resize and self._par_size != 1 and subvolume is None:
+            random.seed(self._par_rank)
             cut_list = self._partition_hierarchy_3d_bisection_list()
             root_points = self._subsample_points()
             self.bucket_bounds = []
-            if self._mpi_get_rank() == 0:
+            if self._par_rank == 0:
                 self._recursive_divide(root_points, topbounds, 0, cut_list)
             self.bucket_bounds = self._mpi_bcast_pickled(self.bucket_bounds)
-            my_bounds = self.bucket_bounds[self._mpi_get_rank()]
+            my_bounds = self.bucket_bounds[self._par_rank]
             LE, RE = my_bounds[0], my_bounds[1]
             self._data_source = self.hierarchy.region_strict([0.]*3, LE, RE)
         # If this isn't parallel, define the region as an AMRRegionStrict so
         # particle IO works.
-        if self._mpi_get_size() == 1:
+        if self._par_size == 1:
             self._data_source = self.hierarchy.periodic_region_strict([0.5]*3, LE, RE)
         # get the average spacing between particles for this region
         # The except is for the serial case, where the full box is what we want.
@@ -1934,10 +1934,8 @@
                 (str(self.padding), avg_spacing, full_vol, data.size, str(self._data_source)))
         # Now we get the full box mass after we have the final composition of
         # subvolumes.
-        if ytcfg.getboolean("yt","inline") == False:
-            total_mass = self._mpi_allsum((self._data_source["ParticleMassMsun"].astype('float64')).sum())
-        else:
-            total_mass = self._mpi_allsum((self._data_source["ParticleMassMsun"].astype('float64')).sum())
+        total_mass = self._mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(), 
+                                         op='sum')
         if not self._distributed:
             self.padding = (na.zeros(3,dtype='float64'), na.zeros(3,dtype='float64'))
         # If we're using a subvolume, we now re-divide.
@@ -1959,13 +1957,13 @@
         # Read in a random subset of the points in each domain, and then
         # collect them on the root task.
         xp = self._data_source["particle_position_x"]
-        n_parts = self._mpi_allsum(xp.size)
+        n_parts = self._mpi_allreduce(xp.size, op='sum')
         local_parts = xp.size
         random_points = int(self.sample * n_parts)
         # We want to get a representative selection of random particles in
         # each subvolume.
-        adjust = float(local_parts) / ( float(n_parts) / self._mpi_get_size())
-        n_random = int(adjust * float(random_points) / self._mpi_get_size())
+        adjust = float(local_parts) / ( float(n_parts) / self._par_size)
+        n_random = int(adjust * float(random_points) / self._par_size)
         mylog.info("Reading in %d random particles." % n_random)
         # Get unique random particles.
         my_points = na.empty((n_random, 3), dtype='float64')
@@ -2112,9 +2110,9 @@
         if dm_only:
             select = self._get_dm_indices()
             total_mass = \
-                self._mpi_allsum((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'))
+                self._mpi_allreduce((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
         else:
-            total_mass = self._mpi_allsum(self._data_source["ParticleMassMsun"].sum(dtype='float64'))
+            total_mass = self._mpi_allreduce(self._data_source["ParticleMassMsun"].sum(dtype='float64'), op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles
         # are all on different processors, we should instead construct an
         # object representing the entire domain and sum it "lazily" with
@@ -2194,7 +2192,7 @@
             self._partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         if link > 0.0:
-            n_parts = self._mpi_allsum(self._data_source["particle_position_x"].size)
+            n_parts = self._mpi_allreduce(self._data_source["particle_position_x"].size, op='sum')
             # get the average spacing between particles
             #l = pf.domain_right_edge - pf.domain_left_edge
             #vol = l[0] * l[1] * l[2]


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 13:50:22 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 13:50:31 2011 -0400
@@ -878,7 +878,7 @@
         # it. Therefore each key (a chain) in this dict is unique, but the items
         # the keys point to are not necessarily unique.
         chainID_translate_map_global = \
-            self._mpi_minimum_array_long(chainID_translate_map_local)
+            self._mpi_allreduce(chainID_translate_map_local, op='min')
         # Loop over chains, smallest to largest density, recursively until
         # we reach a self-assigned chain. Then we assign that final chainID to
         # the *current* one only.
@@ -1155,7 +1155,7 @@
         Set_list = []
         # We only want the holes that are modulo mine.
         keys = na.arange(groupID, dtype='int64')
-        size = self._mpi_get_size()
+        size = self._par_size
         select = (keys % size == self.mine)
         groupIDs = keys[select]
         mine_groupIDs = set([]) # Records only ones modulo mine.
@@ -1202,7 +1202,7 @@
         del Set_list
         # To bring it all together, find the minimum values at each entry
         # globally.
-        lookup = self._mpi_minimum_array_long(lookup)
+        lookup = self._mpi_allreduce(lookup, op='min')
         # Now apply this to reverse_map
         for chainID,groupID in enumerate(self.reverse_map):
             if groupID == -1:
@@ -1330,7 +1330,7 @@
         # Now we broadcast this, effectively, with an allsum. Even though
         # some groups are on multiple tasks, there is only one densest_in_chain
         # and only that task contributed above.
-        self.max_dens_point = self._mpi_allsum(max_dens_point)
+        self.max_dens_point = self._mpi_allreduce(max_dens_point, op='sum')
         del max_dens_point
         yt_counters("max dens point")
         # Now CoM.
@@ -1385,9 +1385,9 @@
                     CoM_M[groupID] += self.max_dens_point[groupID,1:4] - na.array([0.5,0.5,0.5])
                     CoM_M[groupID] *= Tot_M[groupID]
         # Now we find their global values
-        self.group_sizes = self._mpi_allsum(size)
-        CoM_M = self._mpi_allsum(CoM_M)
-        self.Tot_M = self._mpi_allsum(Tot_M)
+        self.group_sizes = self._mpi_allreduce(size, op='sum')
+        CoM_M = self._mpi_allreduce(CoM_M, op='sum')
+        self.Tot_M = self._mpi_allreduce(Tot_M, op='sum')
         self.CoM = na.empty((self.group_count,3), dtype='float64')
         for groupID in xrange(int(self.group_count)):
             self.CoM[groupID] = CoM_M[groupID] / self.Tot_M[groupID]
@@ -1405,7 +1405,7 @@
                 max_radius[u] = na.max(dist[marks[i]:marks[i+1]])
         # Find the maximum across all tasks.
         mylog.info('Fraction of particles in this region in groups: %f' % (float(calc)/self.size))
-        self.max_radius = self._mpi_double_array_max(max_radius)
+        self.max_radius = self._mpi_allreduce(max_radius, op='max')
         self.max_radius = na.sqrt(self.max_radius)
         yt_counters("max radius")
         yt_counters("Precomp.")


--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py	Tue Oct 18 13:50:22 2011 -0400
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py	Tue Oct 18 13:50:31 2011 -0400
@@ -168,10 +168,10 @@
         if self.sleep <= 0.:
             self.sleep = 5
         # MPI stuff
-        self.mine = self._mpi_get_rank()
+        self.mine = self._par_rank
         if self.mine is None:
             self.mine = 0
-        self.size = self._mpi_get_size()
+        self.size = self._par_size
         if self.size is None:
             self.size = 1
         # Get to work.


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 13:50:22 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 13:50:31 2011 -0400
@@ -107,8 +107,8 @@
         self.constant_theta = theta
         self.constant_phi = phi
         # MPI stuff.
-        self.size = self._mpi_get_size()
-        self.mine = self._mpi_get_rank()
+        self.size = self._par_size
+        self.mine = self._par_rank
         self.vol_ratio = vol_ratio
         if self.vol_ratio == -1:
             self.vol_ratio = self.size
@@ -441,8 +441,8 @@
         Add up the hits to all the bins globally for all functions.
         """
         for fset in self._fsets:
-            fset.too_low = self._mpi_allsum(fset.too_low)
-            fset.too_high = self._mpi_allsum(fset.too_high)
+            fset.too_low = self._mpi_allreduce(fset.too_low, op='sum')
+            fset.too_high = self._mpi_allreduce(fset.too_high, op='sum')
             fset.binned = {}
             if self.mine == 0:
                 mylog.info("Function %s had values out of range for these fields:" % \
@@ -452,7 +452,7 @@
                     (field, fset.too_high[i], fset.too_low[i]))
             for length in self.lengths:
                 fset.length_bin_hits[length] = \
-                    self._mpi_allsum(fset.length_bin_hits[length])
+                    self._mpi_allreduce(fset.length_bin_hits[length], op='sum')
                 # Find out how many were successfully binned.
                 fset.binned[length] = fset.length_bin_hits[length].sum()
                 # Normalize the counts.


--- a/yt/data_objects/data_containers.py	Tue Oct 18 13:50:22 2011 -0400
+++ b/yt/data_objects/data_containers.py	Tue Oct 18 13:50:31 2011 -0400
@@ -1432,8 +1432,8 @@
             self[field] = na.zeros(_size, dtype='float64')
             for grid in self._get_grids():
                 self._get_data_from_grid(grid, field)
-            self[field] = self._mpi_allsum(\
-                self[field]).reshape([self.dims]*2).transpose()
+            self[field] = self._mpi_allreduce(\
+                self[field], op='sum').reshape([self.dims]*2).transpose()
 
     def interpolate_discretize(self, *args, **kwargs):
         pass
@@ -2221,7 +2221,7 @@
             self._get_data_from_grid(grid, fields_to_get, dls)
         mylog.info("IO completed; summing")
         for field in fields_to_get:
-            self[field] = self._mpi_allsum(self[field])
+            self[field] = self._mpi_allreduce(self[field], op='sum')
             conv = self.pf.units[self.pf.field_info[field].projection_conversion]
             self[field] *= conv
 


--- a/yt/data_objects/profiles.py	Tue Oct 18 13:50:22 2011 -0400
+++ b/yt/data_objects/profiles.py	Tue Oct 18 13:50:31 2011 -0400
@@ -119,10 +119,10 @@
 
     def _finalize_parallel(self):
         for key in self.__data:
-            self.__data[key] = self._mpi_allsum(self.__data[key])
+            self.__data[key] = self._mpi_allreduce(self.__data[key], op='sum')
         for key in self.__weight_data:
-            self.__weight_data[key] = self._mpi_allsum(self.__weight_data[key])
-        self.__used = self._mpi_allsum(self.__used)
+            self.__weight_data[key] = self._mpi_allreduce(self.__weight_data[key], op='sum')
+        self.__used = self._mpi_allreduce(self.__used, op='sum')
 
     def _unlazy_add_fields(self, fields, weight, accumulation):
         for field in fields:


--- a/yt/frontends/enzo/data_structures.py	Tue Oct 18 13:50:22 2011 -0400
+++ b/yt/frontends/enzo/data_structures.py	Tue Oct 18 13:50:31 2011 -0400
@@ -384,7 +384,7 @@
     def _detect_fields(self):
         self.field_list = []
         # Do this only on the root processor to save disk work.
-        if self._mpi_get_rank() == 0 or self._mpi_get_rank() == None:
+        if self._par_rank == 0 or self._par_rank == None:
             field_list = self.get_data("/", "DataFields")
             if field_list is None:
                 mylog.info("Gathering a field list (this may take a moment.)")
@@ -589,7 +589,7 @@
             self.derived_field_list = self.__class__._cached_derived_field_list
 
     def _generate_random_grids(self):
-        my_rank = self._mpi_get_rank()
+        my_rank = self._par_rank
         my_grids = self.grids[self.grid_procs.ravel() == my_rank]
         if len(my_grids) > 40:
             starter = na.random.randint(0, 20)


--- a/yt/utilities/command_line.py	Tue Oct 18 13:50:22 2011 -0400
+++ b/yt/utilities/command_line.py	Tue Oct 18 13:50:31 2011 -0400
@@ -1570,7 +1570,7 @@
             save_name = "%s"%pf+"_"+field+"_rendering.png"
         if not '.png' in save_name:
             save_name += '.png'
-        if cam._mpi_get_rank() != -1:
+        if cam._par_rank != -1:
             write_bitmap(image,save_name)
         
 


--- a/yt/utilities/parallel_tools/distributed_object_collection.py	Tue Oct 18 13:50:22 2011 -0400
+++ b/yt/utilities/parallel_tools/distributed_object_collection.py	Tue Oct 18 13:50:31 2011 -0400
@@ -53,8 +53,8 @@
         owners = self._object_owners[desired_indices]
         mylog.debug("Owner list: %s", na.unique1d(owners))
         # Even if we have a million bricks, this should not take long.
-        s = self._mpi_get_size()
-        m = self._mpi_get_rank()
+        s = self._par_size
+        m = self._par_rank
         requests = dict( ( (i, []) for i in xrange(s) ) )
         for i, p in izip(desired_indices, owners):
             requests[p].append(i)


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 13:50:22 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 13:50:31 2011 -0400
@@ -90,6 +90,12 @@
             int32   = MPI.INT,
             int64   = MPI.LONG
     )
+    op_names = dict(
+        sum = MPI.SUM,
+        min = MPI.MIN,
+        max = MPI.MAX
+    )
+
 else:
     dtype_names = dict(
             float32 = "MPI.FLOAT",
@@ -97,6 +103,11 @@
             int32   = "MPI.INT",
             int64   = "MPI.LONG"
     )
+    op_names = dict(
+            sum = "MPI.SUM",
+            min = "MPI.MIN",
+            max = "MPI.MAX"
+    )
 
 # Because the dtypes will == correctly but do not hash the same, we need this
 # function for dictionary access.
@@ -204,9 +215,9 @@
     output; otherwise, the function gets called.  Used as a decorator.
     """
     @wraps(func)
-    def passage(self, data):
+    def passage(self, data, **kwargs):
         if not self._distributed: return data
-        return func(self, data)
+        return func(self, data, **kwargs)
     return passage
 
 def parallel_blocking_call(func):
@@ -448,105 +459,14 @@
         return None
 
     @parallel_passthrough
-    def _mpi_minimum_array_long(self, data):
-        """
-        Specifically for parallelHOP. For the identical array on each task,
-        it merges the arrays together, taking the lower value at each index.
-        """
-        self._barrier()
-        size = data.size # They're all the same size, of course
-        if MPI.COMM_WORLD.rank == 0:
-            new_data = na.empty(size, dtype='int64')
-            for i in range(1, MPI.COMM_WORLD.size):
-                MPI.COMM_WORLD.Recv([new_data, MPI.LONG], i, 0)
-                data = na.minimum(data, new_data)
-            del new_data
-        else:
-            MPI.COMM_WORLD.Send([data, MPI.LONG], 0, 0)
-        # Redistribute from root
-        MPI.COMM_WORLD.Bcast([data, MPI.LONG], root=0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_catrgb(self, data):
-        self._barrier()
-        data, final = data
-        if MPI.COMM_WORLD.rank == 0:
-            cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 2)
-            nsize = final[0]/cc[0], final[1]/cc[1]
-            new_image = na.zeros((final[0], final[1], 6), dtype='float64')
-            new_image[0:nsize[0],0:nsize[1],:] = data[:]
-            for i in range(1,MPI.COMM_WORLD.size):
-                cy, cx = na.unravel_index(i, cc)
-                mylog.debug("Receiving image from % into bits %s:%s, %s:%s",
-                    i, nsize[0]*cx,nsize[0]*(cx+1),
-                       nsize[1]*cy,nsize[1]*(cy+1))
-                buf = _recv_array(source=i, tag=0).reshape(
-                    (nsize[0],nsize[1],6))
-                new_image[nsize[0]*cy:nsize[0]*(cy+1),
-                          nsize[1]*cx:nsize[1]*(cx+1),:] = buf[:]
-            data = new_image
-        else:
-            _send_array(data.ravel(), dest=0, tag=0)
-        data = MPI.COMM_WORLD.bcast(data)
-        return (data, final)
-
-    @parallel_passthrough
     def _mpi_catdict(self, data):
-        field_keys = data.keys()
-        field_keys.sort()
-        size = data[field_keys[0]].shape[-1]
-        sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
-        outsize = na.array(size, dtype='int64')
-        MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
-                                 [sizes, 1, MPI.LONG] )
-        # This nested concatenate is to get the shapes to work out correctly;
-        # if we just add [0] to sizes, it will broadcast a summation, not a
-        # concatenation.
-        offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
-        arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
-        for key in field_keys:
-            dd = data[key]
-            rv = _alltoallv_array(dd, arr_size, offsets, sizes)
-            data[key] = rv
-        return data
+        self._par_combine_object(data, op = "cat")
 
     @parallel_passthrough
     def _mpi_joindict(self, data):
-        #self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                data.update(MPI.COMM_WORLD.recv(source=i, tag=0))
-        else:
-            MPI.COMM_WORLD.send(data, dest=0, tag=0)
-        data = MPI.COMM_WORLD.bcast(data, root=0)
-        #self._barrier()
-        return data
+        self._par_combine_object(data, op = "join")
 
     @parallel_passthrough
-    def _mpi_maxdict(self, data):
-        """
-        For each key in data, find the maximum value across all tasks, and
-        then broadcast it back.
-        """
-        self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                temp_data = MPI.COMM_WORLD.recv(source=i, tag=0)
-                for key in temp_data:
-                    try:
-                        old_value = data[key]
-                    except KeyError:
-                        # This guarantees the new value gets added.
-                        old_value = None
-                    if old_value < temp_data[key]:
-                        data[key] = temp_data[key]
-        else:
-            MPI.COMM_WORLD.send(data, dest=0, tag=0)
-        data = MPI.COMM_WORLD.bcast(data, root=0)
-        self._barrier()
-        return data
-
     def _mpi_maxdict_dict(self, data):
         """
         Similar to above, but finds maximums for dicts of dicts. This is
@@ -615,50 +535,88 @@
         return (top_keys, bot_keys, vals)
 
     @parallel_passthrough
+    def _par_combine_object(self, data, op):
+        # op can be chosen from:
+        #   cat
+        #   join
+        # data is selected to be of types:
+        #   na.ndarray
+        #   dict
+        #   data field dict
+        if isinstance(data, types.DictType) and op == "join":
+            if MPI.COMM_WORLD.rank == 0:
+                for i in range(1,MPI.COMM_WORLD.size):
+                    data.update(MPI.COMM_WORLD.recv(source=i, tag=0))
+            else:
+                MPI.COMM_WORLD.send(data, dest=0, tag=0)
+            data = MPI.COMM_WORLD.bcast(data, root=0)
+            return data
+        elif isinstance(data, types.DictType) and op == "cat":
+            field_keys = data.keys()
+            field_keys.sort()
+            size = data[field_keys[0]].shape[-1]
+            sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
+            outsize = na.array(size, dtype='int64')
+            MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
+                                     [sizes, 1, MPI.LONG] )
+            # This nested concatenate is to get the shapes to work out correctly;
+            # if we just add [0] to sizes, it will broadcast a summation, not a
+            # concatenation.
+            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
+            for key in field_keys:
+                dd = data[key]
+                rv = _alltoallv_array(dd, arr_size, offsets, sizes)
+                data[key] = rv
+            return data
+        elif isinstance(data, na.ndarray) and op == "cat":
+            if data is None:
+                ncols = -1
+                size = 0
+            else:
+                if len(data) == 0:
+                    ncols = -1
+                    size = 0
+                elif len(data.shape) == 1:
+                    ncols = 1
+                    size = data.shape[0]
+                else:
+                    ncols, size = data.shape
+            ncols = MPI.COMM_WORLD.allreduce(ncols, op=MPI.MAX)
+            if size == 0:
+                data = na.zeros((ncols,0), dtype='float64') # This only works for
+            size = data.shape[-1]
+            sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
+            outsize = na.array(size, dtype='int64')
+            MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
+                                     [sizes, 1, MPI.LONG] )
+            # This nested concatenate is to get the shapes to work out correctly;
+            # if we just add [0] to sizes, it will broadcast a summation, not a
+            # concatenation.
+            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
+            data = _alltoallv_array(data, arr_size, offsets, sizes)
+            return data
+        elif isinstance(data, types.ListType) and op == "cat":
+            if MPI.COMM_WORLD.rank == 0:
+                data = self.__mpi_recvlist(data)
+            else:
+                MPI.COMM_WORLD.send(data, dest=0, tag=0)
+            mylog.debug("Opening MPI Broadcast on %s", MPI.COMM_WORLD.rank)
+            data = MPI.COMM_WORLD.bcast(data, root=0)
+            return data
+        raise NotImplementedError
+
+    @parallel_passthrough
     def _mpi_catlist(self, data):
-        self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            data = self.__mpi_recvlist(data)
-        else:
-            MPI.COMM_WORLD.send(data, dest=0, tag=0)
-        mylog.debug("Opening MPI Broadcast on %s", MPI.COMM_WORLD.rank)
-        data = MPI.COMM_WORLD.bcast(data, root=0)
-        self._barrier()
-        return data
+        self._par_combine_object(data, op = "cat")
 
     @parallel_passthrough
     def _mpi_catarray(self, data):
-        if data is None:
-            ncols = -1
-            size = 0
-        else:
-            if len(data) == 0:
-                ncols = -1
-                size = 0
-            elif len(data.shape) == 1:
-                ncols = 1
-                size = data.shape[0]
-            else:
-                ncols, size = data.shape
-        ncols = MPI.COMM_WORLD.allreduce(ncols, op=MPI.MAX)
-        if size == 0:
-            data = na.zeros((ncols,0), dtype='float64') # This only works for
-        size = data.shape[-1]
-        sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
-        outsize = na.array(size, dtype='int64')
-        MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
-                                 [sizes, 1, MPI.LONG] )
-        # This nested concatenate is to get the shapes to work out correctly;
-        # if we just add [0] to sizes, it will broadcast a summation, not a
-        # concatenation.
-        offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
-        arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
-        data = _alltoallv_array(data, arr_size, offsets, sizes)
-        return data
+        self._par_combine_object(data, op = "cat")
 
     @parallel_passthrough
     def _mpi_bcast_pickled(self, data):
-        #self._barrier()
         data = MPI.COMM_WORLD.bcast(data, root=0)
         return data
 
@@ -674,24 +632,8 @@
         io_handler.preload(grids, fields)
 
     @parallel_passthrough
-    def _mpi_double_array_max(self,data):
-        """
-        Finds the na.maximum of a distributed array and returns the result
-        back to all. The array should be the same length on all tasks!
-        """
-        self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            recv_data = na.empty(data.size, dtype='float64')
-            for i in xrange(1, MPI.COMM_WORLD.size):
-                MPI.COMM_WORLD.Recv([recv_data, MPI.DOUBLE], source=i, tag=0)
-                data = na.maximum(data, recv_data)
-        else:
-            MPI.COMM_WORLD.Send([data, MPI.DOUBLE], dest=0, tag=0)
-        MPI.COMM_WORLD.Bcast([data, MPI.DOUBLE], root=0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_allsum(self, data, dtype=None):
+    def _mpi_allreduce(self, data, dtype=None, op='sum'):
+        op = op_names[op]
         if isinstance(data, na.ndarray) and data.dtype != na.bool:
             if dtype is None:
                 dtype = data.dtype
@@ -699,22 +641,12 @@
                 data = data.astype(dtype)
             temp = data.copy()
             MPI.COMM_WORLD.Allreduce([temp,get_mpi_type(dtype)], 
-                                     [data,get_mpi_type(dtype)], op=MPI.SUM)
+                                     [data,get_mpi_type(dtype)], op)
             return data
         else:
             # We use old-school pickling here on the assumption the arrays are
             # relatively small ( < 1e7 elements )
-            return MPI.COMM_WORLD.allreduce(data, op=MPI.SUM)
-
-    @parallel_passthrough
-    def _mpi_allmax(self, data):
-        self._barrier()
-        return MPI.COMM_WORLD.allreduce(data, op=MPI.MAX)
-
-    @parallel_passthrough
-    def _mpi_allmin(self, data):
-        self._barrier()
-        return MPI.COMM_WORLD.allreduce(data, op=MPI.MIN)
+            return MPI.COMM_WORLD.allreduce(data, op)
 
     ###
     # Non-blocking stuff.
@@ -753,11 +685,17 @@
     # End non-blocking stuff.
     ###
 
-    def _mpi_get_size(self):
+    ###
+    # Parallel rank and size properties.
+    ###
+
+    @property
+    def _par_size(self):
         if not self._distributed: return 1
         return MPI.COMM_WORLD.size
 
-    def _mpi_get_rank(self):
+    @property
+    def _par_rank(self):
         if not self._distributed: return 0
         return MPI.COMM_WORLD.rank
 


--- a/yt/visualization/streamlines.py	Tue Oct 18 13:50:22 2011 -0400
+++ b/yt/visualization/streamlines.py	Tue Oct 18 13:50:31 2011 -0400
@@ -124,8 +124,8 @@
             self.magnitudes = na.zeros((self.N,self.steps), dtype='float64')
         
     def integrate_through_volume(self):
-        nprocs = self._mpi_get_size()
-        my_rank = self._mpi_get_rank()
+        nprocs = self._par_size
+        my_rank = self._par_rank
         self.streamlines[my_rank::nprocs,0,:] = self.start_positions[my_rank::nprocs]
 
         pbar = get_pbar("Streamlining", self.N)
@@ -144,8 +144,8 @@
        
     @parallel_passthrough
     def _finalize_parallel(self,data):
-        self.streamlines = self._mpi_allsum(self.streamlines)
-        self.magnitudes = self._mpi_allsum(self.magnitudes)
+        self.streamlines = self._mpi_allreduce(self.streamlines, op='sum')
+        self.magnitudes = self._mpi_allreduce(self.magnitudes, op='sum')
         
     def _integrate_through_brick(self, node, stream, step,
                                  periodic=False, mag=None):


--- a/yt/visualization/volume_rendering/camera.py	Tue Oct 18 13:50:22 2011 -0400
+++ b/yt/visualization/volume_rendering/camera.py	Tue Oct 18 13:50:31 2011 -0400
@@ -356,7 +356,7 @@
             pbar.update(total_cells)
         pbar.finish()
 
-        if self._mpi_get_rank() is 0 and fn is not None:
+        if self._par_rank is 0 and fn is not None:
             if clip_ratio is not None:
                 write_bitmap(image, fn, clip_ratio*image.std())
             else:
@@ -623,7 +623,7 @@
             pbar.update(total_cells)
         pbar.finish()
 
-        if self._mpi_get_rank() is 0 and fn is not None:
+        if self._par_rank is 0 and fn is not None:
             # This assumes Density; this is a relatively safe assumption.
             import matplotlib.figure
             import matplotlib.backends.backend_agg


--- a/yt/visualization/volume_rendering/grid_partitioner.py	Tue Oct 18 13:50:22 2011 -0400
+++ b/yt/visualization/volume_rendering/grid_partitioner.py	Tue Oct 18 13:50:31 2011 -0400
@@ -275,7 +275,7 @@
         self.brick_right_edges = na.zeros( (NB, 3), dtype='float64')
         self.brick_parents = na.zeros( NB, dtype='int64')
         self.brick_dimensions = na.zeros( (NB, 3), dtype='int64')
-        self.brick_owners = na.ones(NB, dtype='int32') * self._mpi_get_rank()
+        self.brick_owners = na.ones(NB, dtype='int32') * self._par_rank
         self._object_owners = self.brick_owners
         for i,b in enumerate(bricks):
             self.brick_left_edges[i,:] = b.LeftEdge
@@ -307,7 +307,7 @@
         bricks = self.bricks
         self.bricks = na.array([None] * self.brick_owners.size, dtype='object')
         # Copy our bricks back in
-        self.bricks[self.brick_owners == self._mpi_get_rank()] = bricks[:]
+        self.bricks[self.brick_owners == self._par_rank] = bricks[:]
 
     def _create_buffer(self, ind_list):
         # Note that we have vertex-centered data, so we add one before taking


http://bitbucket.org/yt_analysis/yt/changeset/63c3250e30aa/
changeset:   63c3250e30aa
branch:      yt
user:        MatthewTurk
date:        2011-10-18 19:50:57
summary:     Merging
affected #:  2 files (-1 bytes)

--- a/tests/halos.py	Tue Oct 18 13:49:59 2011 -0400
+++ b/tests/halos.py	Tue Oct 18 13:50:57 2011 -0400
@@ -1,11 +1,10 @@
 from yt.utilities.answer_testing.output_tests import \
     SingleOutputTest, create_test
 from yt.utilities.answer_testing.halo_tests import \
-    TestHaloCompositionHashHOP, TestHaloCompositionHashFOF, \
-    TestHaloCompositionHashPHOP 
+    TestHaloCountHOP, TestHaloCountFOF, TestHaloCountPHOP 
 
-create_test(TestHaloCompositionHashHOP, "halo_composition_test_hash_HOP", threshold=80.0)
+create_test(TestHaloCountHOP, "halo_count_HOP", threshold=80.0)
 
-create_test(TestHaloCompositionHashFOF, "halo_composition_test_hash_FOF", threshold=80.0)
+create_test(TestHaloCountFOF, "halo_count_FOF", link=0.2, padding=0.02)
 
-create_test(TestHaloCompositionHashPHOP, "halo_composition_test_hash_PHOP", threshold=80.0)
+create_test(TestHaloCountPHOP, "halo_count_PHOP", threshold=80.0)


--- a/yt/utilities/answer_testing/halo_tests.py	Tue Oct 18 13:49:59 2011 -0400
+++ b/yt/utilities/answer_testing/halo_tests.py	Tue Oct 18 13:50:57 2011 -0400
@@ -23,8 +23,6 @@
     def plot(self):
         return []
 
-create_test(TestHaloCountHOP, "halo_count_test_HOP", threshold=80.0)
-
 # Tests the number of halos returned by the FOF halo finder on a dataset
 class TestHaloCountFOF(YTStaticOutputTest):
     link = 0.2
@@ -44,8 +42,6 @@
     def plot(self):
         return []
 
-create_test(TestHaloCountFOF, "halo_count_test_FOF", threshold=80.0)
-
 # Tests the number of halos returned by the Parallel HOP halo finder on a 
 # dataset
 class TestHaloCountPHOP(YTStaticOutputTest):
@@ -64,8 +60,6 @@
     def plot(self):
         return []
 
-create_test(TestHaloCountPHOP, "halo_count_test_PHOP", threshold=80.0)
-
 class TestHaloComposition(YTStaticOutputTest):
     threshold=80.0
     
@@ -87,11 +81,9 @@
                 return False
         return True
     
-create_test(TestHaloComposition, "halo_composition_test", threshold=80.0)
-
 # Tests the content of the halos returned by the HOP halo finder on a dataset 
 # by comparing the hash of the arrays of all the particles contained in each
-# halo.
+# halo.  Evidently breaks on parallel runtime.  DO NOT USE.
 class TestHaloCompositionHashHOP(YTStaticOutputTest):
     threshold=80.0
     
@@ -103,7 +95,7 @@
         IDs = []
         for halo in halos:
             IDs.append(halo["particle_index"])
-        IDs = np.array(np.concatenate((IDs)))
+        IDs = np.concatenate(IDs)
         self.result = IDs
     
     def compare(self, old_result):
@@ -118,7 +110,7 @@
 
 # Tests the content of the halos returned by the FOF halo finder on a dataset 
 # by comparing the hash of the arrays of all the particles contained in each
-# halo.
+# halo.  Evidently breaks on parallel runtime.  DO NOT USE.
 class TestHaloCompositionHashFOF(YTStaticOutputTest):
     link = 0.2
     padding = 0.02
@@ -132,7 +124,7 @@
         IDs = []
         for halo in halos:
             IDs.append(halo["particle_index"])
-        IDs = np.array(np.concatenate((IDs)))
+        IDs = np.concatenate(IDs)
         self.result = IDs
     
     def compare(self, old_result):
@@ -147,7 +139,7 @@
 
 # Tests the content of the halos returned by the Parallel HOP halo finder on a 
 # dataset by comparing the hash of the arrays of all the particles contained 
-# in each halo.
+# in each halo.  Evidently breaks on parallel runtime.  DO NOT USE.
 class TestHaloCompositionHashPHOP(YTStaticOutputTest):
     threshold=80.0
     
@@ -159,7 +151,7 @@
         IDs = []
         for halo in halos:
             IDs.append(halo["particle_index"])
-        IDs = np.array(np.concatenate((IDs)))
+        IDs = np.concatenate(IDs)
         self.result = IDs
     
     def compare(self, old_result):


http://bitbucket.org/yt_analysis/yt/changeset/c756684a7a9e/
changeset:   c756684a7a9e
branch:      yt
user:        MatthewTurk
date:        2011-10-18 20:21:07
summary:     Removing unused stuff, coalescing almost all of the mpi_[op][type] usages.
Just catarray left.
affected #:  8 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 13:50:57 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 14:21:07 2011 -0400
@@ -837,7 +837,8 @@
         # Now we make a global dict of how many particles each task is
         # sending.
         self.global_padded_count = {self.mine:self.uphill_chainIDs.size}
-        self.global_padded_count = self._mpi_joindict(self.global_padded_count)
+        self.global_padded_count = self._par_object_combine(
+                self.global_padded_count, datatype = "dict", opt = "join")
         # Send/receive 'em.
         self._communicate_uphill_info()
         del self.global_padded_count
@@ -932,7 +933,8 @@
         # but there's so many places in this that need to be globally synched
         # that it's not worth the effort right now to make this one spot better.
         global_annulus_count = {self.mine:send_count}
-        global_annulus_count = self._mpi_joindict(global_annulus_count)
+        self.global_annulus_count = self._par_combine_object(
+                self.global_annulus_count, datatype = "dict", opt = "join")
         # Set up the receiving arrays.
         recv_real_indices = dict.fromkeys(self.neighbors)
         recv_chainIDs = dict.fromkeys(self.neighbors)


--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Tue Oct 18 13:50:57 2011 -0400
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Tue Oct 18 14:21:07 2011 -0400
@@ -494,11 +494,13 @@
             updated_halos.append(halo)
         
         # And here is where we bring it all together.
-        updated_halos = self._mpi_catlist(updated_halos)
+        updated_halos = self._par_combine_object(updated_halos,
+                            datatype="list", op="cat")
         updated_halos.sort(key = lambda a:a['id'])
         self.all_halos = updated_halos
 
-        self.filtered_halos = self._mpi_catlist(self.filtered_halos)
+        self.filtered_halos = self._par_combine_object(self.filtered_halos)
+                            datatype="list", op="cat")
         self.filtered_halos.sort(key = lambda a:a['id'])
 
         if filename is not None:


--- a/yt/data_objects/data_containers.py	Tue Oct 18 13:50:57 2011 -0400
+++ b/yt/data_objects/data_containers.py	Tue Oct 18 14:21:07 2011 -0400
@@ -804,7 +804,8 @@
             self[field] = temp_data[field] 
         # We finalize
         if temp_data != {}:
-            temp_data = self._mpi_catdict(temp_data)
+            temp_data = self._par_object_combine(temp_data,
+                    datatype='dict', op='cat')
         # And set, for the next group
         for field in temp_data.keys():
             self[field] = temp_data[field]
@@ -2035,7 +2036,7 @@
         data['pdy'] *= 0.5
         data['fields'] = field_data
         # Now we run the finalizer, which is ignored if we don't need it
-        data = self._mpi_catdict(data)
+        data = self._par_object_combine(temp_data, datatype='dict', op='cat')
         field_data = na.vsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             self[field] = field_data[fi].ravel()


--- a/yt/utilities/amr_kdtree/amr_kdtree.py	Tue Oct 18 13:50:57 2011 -0400
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py	Tue Oct 18 14:21:07 2011 -0400
@@ -770,7 +770,8 @@
         if self.tree_dict[0].split_pos is None:
             self.tree_dict.pop(0)
     def merge_trees(self):
-        self.tree_dict = self._mpi_joindict(self.tree_dict)
+        self.tree_dict = self._par_combine_object(self.tree_dict,
+                            datatype = "dict", op = "join")
 
     def rebuild_references(self):
         self.tree = self.tree_dict[0]


--- a/yt/utilities/parallel_tools/distributed_object_collection.py	Tue Oct 18 13:50:57 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,139 +0,0 @@
-"""
-A simple distributed object mechanism, for storing array-heavy objects.
-Meant to be subclassed.
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from itertools import izip
-
-import numpy as na
-
-from yt.funcs import *
-
-from .parallel_analysis_interface import ParallelAnalysisInterface
-
-class DistributedObjectCollection(ParallelAnalysisInterface):
-    valid = True
-
-    def _get_object_info(self):
-        pass
-
-    def _set_object_info(self):
-        pass
-
-    def join_lists(self):
-        info_dict = self._get_object_info()
-        info_dict = self._mpi_catdict(info_dict)
-        self._set_object_info(info_dict)
-
-    def _collect_objects(self, desired_indices):
-        # We figure out which indices belong to which processor,
-        # then we pack them up, and we send a list to each processor.
-        request_count = []
-        owners = self._object_owners[desired_indices]
-        mylog.debug("Owner list: %s", na.unique1d(owners))
-        # Even if we have a million bricks, this should not take long.
-        s = self._par_size
-        m = self._par_rank
-        requests = dict( ( (i, []) for i in xrange(s) ) )
-        for i, p in izip(desired_indices, owners):
-            requests[p].append(i)
-        for p in sorted(requests):
-            requests[p] = na.array(requests[p], dtype='int64')
-            request_count.append(len(requests[p]))
-        size = len(request_count)
-        mylog.debug("Requesting: %s", request_count)
-        request_count = na.array(request_count, dtype='int64')
-        # Now we distribute our requests to all the processors.
-        # This is two-pass.  One to get the length of the arrays.  The second
-        # pass is to get the actual indices themselves.
-        request_count = self._mpi_joindict({m : request_count})
-        # Now we have our final array of requests, with arrangement
-        # (Nproc,Nproc).  First index corresponds to requesting proc, second to
-        # sending.  So [them,us] = 5 means we owe 5, whereas [us, them] means
-        # we are owed.
-        send_hooks = []
-        dsend_buffers, dsend_hooks = [], []
-        recv_hooks, recv_buffers = [], []
-        drecv_buffers, drecv_hooks = [], []
-        # We post our index-list and data receives from each processor.
-        mylog.debug("Posting data buffer receives")
-        proc_hooks = {}
-        for p, request_from in request_count.items():
-            if p == m: continue
-            size = request_from[m]
-            #if size == 0: continue
-            # We post receives of the grids we *asked* for.
-            # Note that indices into this are not necessarily processor ids.
-            # So we store.  This has to go before the appends or it's an
-            # off-by-one.
-            mylog.debug("Setting up index buffer of size %s for receive from %s",
-                        size, p)
-            proc_hooks[len(drecv_buffers)] = p
-            drecv_buffers.append(self._create_buffer(requests[p]))
-            # does this work without specifying the type? (was double)
-            drecv_hooks.append(self._mpi_nonblocking_recv(drecv_buffers[-1], p, 1))
-            recv_buffers.append(na.zeros(size, dtype='int64'))
-            # Our index list goes on 0, our buffer goes on 1.  We know how big
-            # the index list will be, now.
-            # does this work without specifying the type? (was long)
-            recv_hooks.append(self._mpi_nonblocking_recv(recv_buffers[-1], p, 0))
-        # Send our index lists into hte waiting buffers
-        mylog.debug("Sending index lists")
-        for p, ind_list in requests.items():
-            if p == m: continue
-            if len(ind_list) == 0: continue
-            # Now, we actually send our index lists.
-            send_hooks.append(self._mpi_nonblocking_send(ind_list, p, 0))
-        # Now we post receives for all of the data buffers.
-        mylog.debug("Sending data")
-        for i in self._mpi_Request_Waititer(recv_hooks):
-            # We get back the index, which here is identical to the processor
-            # number doing the send.  At this point, we can post our receives.
-            p = proc_hooks[i]
-            mylog.debug("Processing from %s", p)
-            ind_list = recv_buffers[i]
-            dsend_buffers.append(self._create_buffer(ind_list))
-            self._pack_buffer(ind_list, dsend_buffers[-1])
-            dsend_hooks.append(self._mpi_nonblocking_send(dsend_buffers[-1], p, 1))
-        mylog.debug("Waiting on data receives: %s", len(drecv_hooks))
-        for i in self._mpi_Request_Waititer(drecv_hooks):
-            mylog.debug("Unpacking from %s", proc_hooks[i])
-            # Now we have to unpack our buffers
-            # Our key into this is actually the request for the processor
-            # number.
-            p = proc_hooks[i]
-            self._unpack_buffer(requests[p], drecv_buffers[i])
-        mylog.debug("Finalizing sends: %s", len(dsend_hooks))
-        for i in self._mpi_Request_Waititer(dsend_hooks):
-            continue
-
-    def _create_buffer(self, ind_list):
-        pass
-
-    def _pack_buffer(self, ind_list):
-        pass
-
-    def _unpack_buffer(self, ind_list, my_buffer):
-        pass
-


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 13:50:57 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 14:21:07 2011 -0400
@@ -459,14 +459,6 @@
         return None
 
     @parallel_passthrough
-    def _mpi_catdict(self, data):
-        return self._par_combine_object(data, datatype = "dict", op = "cat")
-
-    @parallel_passthrough
-    def _mpi_joindict(self, data):
-        return self._par_combine_object(data, datatype = "dict", op = "join")
-
-    @parallel_passthrough
     def _mpi_maxdict_dict(self, data):
         """
         Similar to above, but finds maximums for dicts of dicts. This is
@@ -617,10 +609,6 @@
         raise NotImplementedError
 
     @parallel_passthrough
-    def _mpi_catlist(self, data):
-        return self._par_combine_object(data, datatype = "list", op = "cat")
-
-    @parallel_passthrough
     def _mpi_catarray(self, data):
         return self._par_combine_object(data, datatype = "array", op = "cat")
 


--- a/yt/visualization/volume_rendering/api.py	Tue Oct 18 13:50:57 2011 -0400
+++ b/yt/visualization/volume_rendering/api.py	Tue Oct 18 14:21:07 2011 -0400
@@ -35,7 +35,6 @@
 from yt.utilities.amr_utils import PartitionedGrid, VectorPlane, \
     TransferFunctionProxy
 from grid_partitioner import HomogenizedVolume, \
-                             HomogenizedBrickCollection, \
                              export_partitioned_grids, \
                              import_partitioned_grids
 from image_handling import export_rgba, import_rgba, \


--- a/yt/visualization/volume_rendering/grid_partitioner.py	Tue Oct 18 13:50:57 2011 -0400
+++ b/yt/visualization/volume_rendering/grid_partitioner.py	Tue Oct 18 14:21:07 2011 -0400
@@ -29,8 +29,6 @@
 
 from yt.utilities.amr_utils import PartitionedGrid, ProtoPrism, GridFace, \
     grid_points_in_volume, find_grids_in_inclined_box
-from yt.utilities.parallel_tools.distributed_object_collection import \
-    DistributedObjectCollection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_root_only
 
@@ -201,176 +199,6 @@
     def reset_cast(self):
         pass
 
-class HomogenizedBrickCollection(DistributedObjectCollection):
-    def __init__(self, source):
-        # The idea here is that we have two sources -- the global_domain
-        # source, which would be a decomposition of the 3D domain, and a
-        # local_domain source, which is the set of bricks we want at the end.
-        self.source = source
-        self.pf = source.pf
-
-    @classmethod
-    def load_bricks(self, base_filename):
-        pass
-
-    def write_my_bricks(self, base_filename):
-        pass
-
-    def store_bricks(self, base_filename):
-        pass
-    
-    @parallel_root_only
-    def write_hierarchy(self, base_filename):
-        pass
-    
-    def _partition_grid(self, grid, fields, log_field = None):
-        fields = ensure_list(fields)
-        if log_field is None: log_field = [True] * len(fields)
-
-        # This is not super efficient, as it re-fills the regions once for each
-        # field.
-        vcds = []
-        for i,field in enumerate(fields):
-            vcd = grid.get_vertex_centered_data(field).astype('float64')
-            if log_field[i]: vcd = na.log10(vcd)
-            vcds.append(vcd)
-
-        GF = GridFaces(grid.Children + [grid])
-        PP = ProtoPrism(grid.id, grid.LeftEdge, grid.RightEdge, GF)
-
-        pgs = []
-        for P in PP.sweep(0):
-            sl = P.get_brick(grid.LeftEdge, grid.dds, grid.child_mask)
-            if len(sl) == 0: continue
-            dd = [d[sl[0][0]:sl[0][1]+1,
-                    sl[1][0]:sl[1][1]+1,
-                    sl[2][0]:sl[2][1]+1].copy() for d in vcds]
-            pgs.append(PartitionedGrid(grid.id, len(fields), dd,
-                        P.LeftEdge, P.RightEdge, sl[-1]))
-        return pgs
-
-    def _partition_local_grids(self, fields = "Density", log_field = None):
-        fields = ensure_list(fields)
-        bricks = []
-        # We preload.
-        # UNCOMMENT FOR PARALLELISM
-        #grid_list = list(self._get_grid_objs())
-        grid_list = list(self.source._grids)
-        self._preload(grid_list, fields, self.pf.h.io)
-        pbar = get_pbar("Partitioning ", len(grid_list))
-        # UNCOMMENT FOR PARALLELISM
-        #for i, g in enumerate(self._get_grids()):
-        print "THIS MANY GRIDS!", len(grid_list)
-        for i, g in enumerate(self.source._grids):
-            pbar.update(i)
-            bricks += self._partition_grid(g, fields, log_field)
-        pbar.finish()
-        bricks = na.array(bricks, dtype='object')
-        NB = len(bricks)
-        # Now we set up our (local for now) hierarchy.  Note that to calculate
-        # intersection, we only need to do the left edge & right edge.
-        #
-        # We're going to double up a little bit here in memory.
-        self.brick_left_edges = na.zeros( (NB, 3), dtype='float64')
-        self.brick_right_edges = na.zeros( (NB, 3), dtype='float64')
-        self.brick_parents = na.zeros( NB, dtype='int64')
-        self.brick_dimensions = na.zeros( (NB, 3), dtype='int64')
-        self.brick_owners = na.ones(NB, dtype='int32') * self._par_rank
-        self._object_owners = self.brick_owners
-        for i,b in enumerate(bricks):
-            self.brick_left_edges[i,:] = b.LeftEdge
-            self.brick_right_edges[i,:] = b.RightEdge
-            self.brick_parents[i] = b.parent_grid_id
-            self.brick_dimensions[i,:] = b.my_data[0].shape
-        # Vertex-centered means we subtract one from the shape
-        self.brick_dimensions -= 1
-        self.bricks = na.array(bricks, dtype='object')
-        # UNCOMMENT FOR PARALLELISM
-        #self.join_lists()
-
-    def _get_object_info(self):
-        # We transpose here for the catdict operation
-        info_dict = dict(left_edges = self.brick_left_edges.transpose(),
-                         right_edges = self.brick_right_edges.transpose(),
-                         parents = self.brick_parents,
-                         owners = self.brick_owners,
-                         dimensions = self.brick_dimensions.transpose(),)
-        return info_dict
-
-    def _set_object_info(self, info_dict):
-        self.brick_left_edges = info_dict.pop("left_edges").transpose()
-        self.brick_right_edges = info_dict.pop("right_edges").transpose()
-        self.brick_parents = info_dict.pop("parents")
-        self.brick_dimensions = info_dict.pop("dimensions").transpose()
-        self.brick_owners = info_dict.pop("owners")
-        self._object_owners = self.brick_owners
-        bricks = self.bricks
-        self.bricks = na.array([None] * self.brick_owners.size, dtype='object')
-        # Copy our bricks back in
-        self.bricks[self.brick_owners == self._par_rank] = bricks[:]
-
-    def _create_buffer(self, ind_list):
-        # Note that we have vertex-centered data, so we add one before taking
-        # the prod and the sum
-        total_size = (self.brick_dimensions[ind_list,:] + 1).prod(axis=1).sum()
-        mylog.debug("Creating buffer for %s bricks (%s)",
-                    len(ind_list), total_size)
-        my_buffer = na.zeros(total_size, dtype='float64')
-        return my_buffer
-
-    def _pack_buffer(self, ind_list, my_buffer):
-        si = 0
-        for index in ind_list:
-            d = self.bricks[index].my_data.ravel()
-            my_buffer[si:si+d.size] = d[:]
-            si += d.size
-
-    def _unpack_buffer(self, ind_list, my_buffer):
-        si = 0
-        for index in ind_list:
-            pgi = self.brick_parents[index]
-            LE = self.brick_left_edges[index,:].copy()
-            RE = self.brick_right_edges[index,:].copy()
-            dims = self.brick_dimensions[index,:].copy()
-            size = (dims + 1).prod()
-            data = my_buffer[si:si+size].reshape(dims + 1)
-            self.bricks[index] = PartitionedGrid(
-                    pgi, data, LE, RE, dims)
-            si += size
-
-    def _wipe_objects(self, indices):
-        self.bricks[indices] = None
-
-    def _collect_bricks(self, intersection_source):
-        if not self._distributed: return
-        # This entire routine should instead be set up to do:
-        #   alltoall broadcast of the *number* of requested bricks
-        #   non-blocking receives posted for int arrays
-        #   sizes of data calculated
-        #   concatenated data receives posted
-        #   send all data
-        #   get bricks back
-        # This presupposes that we are using the AMRInclinedBox as a data
-        # source.  If we're not, we ought to be.
-        needed_brick_i = find_grids_in_inclined_box(
-            intersection_source.box_vectors, intersection_source.center,
-            self.brick_left_edges, self.brick_right_edges)
-        needed_brick_i = na.where(needed_brick_i)[0]
-        self._collect_objects(needed_brick_i)
-
-    def _initialize_parallel(self):
-        pass
-
-    def _finalize_parallel(self):
-        pass
-
-    def get_brick(self, brick_id):
-        pass
-
-    @property
-    def _grids(self):
-        return self.source._grids
-
 class GridFaces(object):
     def __init__(self, grids):
         self.faces = [ [], [], [] ]


http://bitbucket.org/yt_analysis/yt/changeset/bcb7478b3dae/
changeset:   bcb7478b3dae
branch:      yt
user:        MatthewTurk
date:        2011-10-18 20:22:16
summary:     Fixing typo
affected #:  1 file (-1 bytes)

--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 14:21:07 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 14:22:16 2011 -0400
@@ -837,7 +837,7 @@
         # Now we make a global dict of how many particles each task is
         # sending.
         self.global_padded_count = {self.mine:self.uphill_chainIDs.size}
-        self.global_padded_count = self._par_object_combine(
+        self.global_padded_count = self._par_combine_object(
                 self.global_padded_count, datatype = "dict", opt = "join")
         # Send/receive 'em.
         self._communicate_uphill_info()


http://bitbucket.org/yt_analysis/yt/changeset/6971ac47ce95/
changeset:   6971ac47ce95
branch:      yt
user:        MatthewTurk
date:        2011-10-18 20:31:49
summary:     A couple fixes, but still breaking in PHOP.
affected #:  6 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Tue Oct 18 14:22:16 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Tue Oct 18 14:31:49 2011 -0400
@@ -1986,7 +1986,8 @@
         else:
             root_points = na.empty([])
         my_points.shape = (1, n_random*3)
-        root_points = self._mpi_catarray(my_points[0])
+        root_points = self._par_combine_object(my_points[0],
+                datatype="array", op="cat")
         del my_points
         if mine == 0:
             root_points.shape = (tot_random, 3)


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 14:22:16 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 14:31:49 2011 -0400
@@ -683,8 +683,11 @@
         # Shift the values over effectively by concatenating them in the same
         # order as the values have been shifted in _globally_assign_chainIDs()
         yt_counters("global chain MPI stuff.")
-        self.densest_in_chain = self._mpi_catarray(self.densest_in_chain)
-        self.densest_in_chain_real_index = self._mpi_catarray(self.densest_in_chain_real_index)
+        self.densest_in_chain = self._par_combine_object(self.densest_in_chain,
+                datatype="array", op="cat")
+        self.densest_in_chain_real_index = self._par_combine_object(
+                self.densest_in_chain_real_index,
+                datatype="array", op="cat")
         yt_counters("global chain MPI stuff.")
         # Sort the chains by density here. This is an attempt to make it such
         # that the merging stuff in a few steps happens in the same order
@@ -838,7 +841,7 @@
         # sending.
         self.global_padded_count = {self.mine:self.uphill_chainIDs.size}
         self.global_padded_count = self._par_combine_object(
-                self.global_padded_count, datatype = "dict", opt = "join")
+                self.global_padded_count, datatype = "dict", op = "join")
         # Send/receive 'em.
         self._communicate_uphill_info()
         del self.global_padded_count
@@ -934,7 +937,7 @@
         # that it's not worth the effort right now to make this one spot better.
         global_annulus_count = {self.mine:send_count}
         self.global_annulus_count = self._par_combine_object(
-                self.global_annulus_count, datatype = "dict", opt = "join")
+                global_annulus_count, datatype = "dict", op = "join")
         # Set up the receiving arrays.
         recv_real_indices = dict.fromkeys(self.neighbors)
         recv_chainIDs = dict.fromkeys(self.neighbors)


--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py	Tue Oct 18 14:22:16 2011 -0400
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py	Tue Oct 18 14:31:49 2011 -0400
@@ -548,11 +548,16 @@
         child_IDs_tosend = child_IDs[child_send]
         child_halos_tosend = child_halos[child_send]
         
-        parent_IDs_tosend = self._mpi_catarray(parent_IDs_tosend)
-        parent_masses_tosend = self._mpi_catarray(parent_masses_tosend)
-        parent_halos_tosend = self._mpi_catarray(parent_halos_tosend)
-        child_IDs_tosend = self._mpi_catarray(child_IDs_tosend)
-        child_halos_tosend = self._mpi_catarray(child_halos_tosend)
+        parent_IDs_tosend = self._par_combine_object(parent_IDs_tosend,
+                datatype="array", op="cat")
+        parent_masses_tosend = self._par_combine_object(parent_masses_tosend,
+                datatype="array", op="cat")
+        parent_halos_tosend = self._par_combine_object(parent_halos_tosend,
+                datatype="array", op="cat")
+        child_IDs_tosend = self._par_combine_object(child_IDs_tosend,
+                datatype="array", op="cat")
+        child_halos_tosend = self._par_combine_object(child_halos_tosend,
+                datatype="array", op="cat")
 
         # Resort the received particles.
         Psort = parent_IDs_tosend.argsort()


--- a/yt/data_objects/data_containers.py	Tue Oct 18 14:22:16 2011 -0400
+++ b/yt/data_objects/data_containers.py	Tue Oct 18 14:31:49 2011 -0400
@@ -993,12 +993,14 @@
             points.append(self._generate_grid_coords(grid))
         if len(points) == 0:
             points = None
-            t = self._mpi_catarray(None)
+            t = self._par_combine_object(None, datatype="array", op="cat")
         else:
             points = na.concatenate(points)
-            # We have to transpose here so that _mpi_catarray works properly, as
-            # it and the alltoall assume the long axis is the last one.
-            t = self._mpi_catarray(points.transpose())
+            # We have to transpose here so that _par_combine_object works
+            # properly, as it and the alltoall assume the long axis is the last
+            # one.
+            t = self._par_combine_object(points.transpose(),
+                        datatype="array", op="cat")
         self['px'] = t[0,:]
         self['py'] = t[1,:]
         self['pz'] = t[2,:]
@@ -1213,7 +1215,7 @@
             points.append(self._generate_grid_coords(grid))
         if len(points) == 0: points = None
         else: points = na.concatenate(points)
-        t = self._mpi_catarray(points)
+        t = self._par_combine_object(points, datatype="array", op="cat")
         pos = (t[:,0:3] - self.center)
         self['px'] = na.dot(pos, self._x_vec)
         self['py'] = na.dot(pos, self._y_vec)


--- a/yt/data_objects/derived_quantities.py	Tue Oct 18 14:22:16 2011 -0400
+++ b/yt/data_objects/derived_quantities.py	Tue Oct 18 14:31:49 2011 -0400
@@ -103,13 +103,14 @@
 
     def _finalize_parallel(self):
         # Note that we do some fancy footwork here.
-        # _mpi_catarray and its affiliated alltoall function
+        # _par_combine_object and its affiliated alltoall function
         # assume that the *long* axis is the last one.  However,
         # our long axis is the first one!
         rv = []
         for my_list in self.retvals:
             data = na.array(my_list).transpose()
-            rv.append(self._mpi_catarray(data).transpose())
+            rv.append(self._par_combine_object(data.transpose(),
+                        datatype="array", op="cat"))
         self.retvals = rv
         
     def _call_func_unlazy(self, args, kwargs):


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 14:22:16 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 14:31:49 2011 -0400
@@ -609,10 +609,6 @@
         raise NotImplementedError
 
     @parallel_passthrough
-    def _mpi_catarray(self, data):
-        return self._par_combine_object(data, datatype = "array", op = "cat")
-
-    @parallel_passthrough
     def _mpi_bcast_pickled(self, data):
         data = MPI.COMM_WORLD.bcast(data, root=0)
         return data


http://bitbucket.org/yt_analysis/yt/changeset/56f952029a0a/
changeset:   56f952029a0a
branch:      yt
user:        MatthewTurk
date:        2011-10-18 20:32:59
summary:     Fixed PHOP
affected #:  1 file (-1 bytes)

--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 14:31:49 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 14:32:59 2011 -0400
@@ -936,7 +936,7 @@
         # but there's so many places in this that need to be globally synched
         # that it's not worth the effort right now to make this one spot better.
         global_annulus_count = {self.mine:send_count}
-        self.global_annulus_count = self._par_combine_object(
+        global_annulus_count = self._par_combine_object(
                 global_annulus_count, datatype = "dict", op = "join")
         # Set up the receiving arrays.
         recv_real_indices = dict.fromkeys(self.neighbors)


http://bitbucket.org/yt_analysis/yt/changeset/8ca992c16f39/
changeset:   8ca992c16f39
branch:      yt
user:        brittonsmith
date:        2011-10-18 19:48:26
summary:     Changed all ._data to .field_data and ther are now defined as
instances of YTFieldData.
affected #:  2 files (-1 bytes)

--- a/yt/data_objects/data_containers.py	Tue Oct 18 13:31:37 2011 -0400
+++ b/yt/data_objects/data_containers.py	Tue Oct 18 13:48:26 2011 -0400
@@ -120,6 +120,12 @@
         return self._vc_data[field][grid.id]
     return check_cache
 
+class YTFieldData(dict):
+    """
+    A Container object for field data, instead of just having it be a dict.
+    """
+    pass
+
 class FakeGridForParticles(object):
     """
     Mock up a grid to insert particle positions and radii


--- a/yt/data_objects/profiles.py	Tue Oct 18 13:31:37 2011 -0400
+++ b/yt/data_objects/profiles.py	Tue Oct 18 13:48:26 2011 -0400
@@ -30,6 +30,7 @@
 
 from yt.funcs import *
 
+from yt.data_objects.data_containers import YTFieldData
 from yt.utilities.data_point_utilities import \
     Bin1DProfile, Bin2DProfile, Bin3DProfile
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -65,7 +66,7 @@
     def __init__(self, data_source, lazy_reader):
         self._data_source = data_source
         self.pf = data_source.pf
-        self._data = {}
+        self.field_data = YTFieldData()
         self._pdata = {}
         self._lazy_reader = lazy_reader
 
@@ -148,18 +149,18 @@
             self._unlazy_add_fields(fields, weight, accumulation)
         if fractional:
             for field in fields:
-                self._data[field] /= self._data[field].sum()
+                self.field_data[field] /= self.field_data[field].sum()
 
     def keys(self):
-        return self._data.keys()
+        return self.field_data.keys()
 
     def __getitem__(self, key):
         # This raises a KeyError if it doesn't exist
         # This is because we explicitly want to add all fields
-        return self._data[key]
+        return self.field_data[key]
 
     def __setitem__(self, key, value):
-        self._data[key] = value
+        self.field_data[key] = value
 
     def _get_field(self, source, this_field, check_cut):
         # This is where we will iterate to get all contributions to a field
@@ -288,7 +289,7 @@
         # both: 0...N, left: 0...N-1, right: 1...N 
         # center: N bins that are the average (both in linear or log
         # space) of each pair of left/right edges
-        x = self._data[self.bin_field]
+        x = self.field_data[self.bin_field]
         if bin_style is 'both': pass
         elif bin_style is 'left': x = x[:-1]
         elif bin_style is 'right': x = x[1:]
@@ -307,15 +308,15 @@
         *bin_style* (left, right, center, both).
         '''
         fid = open(filename,"w")
-        fields = [field for field in sorted(self._data.keys()) if field != "UsedBins"]
+        fields = [field for field in sorted(self.field_data.keys()) if field != "UsedBins"]
         fields.remove(self.bin_field)
         fid.write("\t".join(["#"] + [self.bin_field] + fields + ["\n"]))
 
         field_data = na.array(self.choose_bins(bin_style)) 
         if bin_style is 'both':
-            field_data = na.append([field_data], na.array([self._data[field] for field in fields]), axis=0)
+            field_data = na.append([field_data], na.array([self.field_data[field] for field in fields]), axis=0)
         else: 
-            field_data = na.append([field_data], na.array([self._data[field][:-1] for field in fields]), axis=0)
+            field_data = na.append([field_data], na.array([self.field_data[field][:-1] for field in fields]), axis=0)
         
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
@@ -334,7 +335,7 @@
         *bin_style* (left, right, center, both).
         """
         fid = h5py.File(filename)
-        fields = [field for field in sorted(self._data.keys()) if (field != "UsedBins" and field != self.bin_field)]
+        fields = [field for field in sorted(self.field_data.keys()) if (field != "UsedBins" and field != self.bin_field)]
         if group_prefix is None:
             name = "%s-1d" % (self.bin_field)
         else:
@@ -346,7 +347,7 @@
         group = fid.create_group(name)
         group.attrs["x-axis-%s" % self.bin_field] = self.choose_bins(bin_style)
         for field in fields:
-            dset = group.create_dataset("%s" % field, data=self._data[field][:-1])
+            dset = group.create_dataset("%s" % field, data=self.field_data[field][:-1])
         fid.close()
 
     def _get_bin_fields(self):
@@ -467,8 +468,8 @@
         # center: N bins that are the average (both in linear or log
         # space) of each pair of left/right edges
 
-        x = self._data[self.x_bin_field]
-        y = self._data[self.y_bin_field]
+        x = self.field_data[self.x_bin_field]
+        y = self.field_data[self.y_bin_field]
         if bin_style is 'both':
             pass
         elif bin_style is 'left':
@@ -498,17 +499,17 @@
         both).
         """
         fid = open(filename,"w")
-        fields = [field for field in sorted(self._data.keys()) if field != "UsedBins"]
+        fields = [field for field in sorted(self.field_data.keys()) if field != "UsedBins"]
         fid.write("\t".join(["#"] + [self.x_bin_field, self.y_bin_field]
                           + fields + ["\n"]))
         x,y = self.choose_bins(bin_style)
         x,y = na.meshgrid(x,y)
         field_data = [x.ravel(), y.ravel()]
         if bin_style is not 'both':
-            field_data += [self._data[field][:-1,:-1].ravel() for field in fields
+            field_data += [self.field_data[field][:-1,:-1].ravel() for field in fields
                            if field not in [self.x_bin_field, self.y_bin_field]]
         else:
-            field_data += [self._data[field].ravel() for field in fields
+            field_data += [self.field_data[field].ravel() for field in fields
                            if field not in [self.x_bin_field, self.y_bin_field]]
 
         field_data = na.array(field_data)
@@ -529,7 +530,7 @@
         right, center, both).
         """
         fid = h5py.File(filename)
-        fields = [field for field in sorted(self._data.keys()) if (field != "UsedBins" and field != self.x_bin_field and field != self.y_bin_field)]
+        fields = [field for field in sorted(self.field_data.keys()) if (field != "UsedBins" and field != self.x_bin_field and field != self.y_bin_field)]
         if group_prefix is None:
             name = "%s-%s-2d" % (self.y_bin_field, self.x_bin_field)
         else:
@@ -543,7 +544,7 @@
         group.attrs["x-axis-%s" % self.x_bin_field] = xbins
         group.attrs["y-axis-%s" % self.y_bin_field] = ybins
         for field in fields:
-            dset = group.create_dataset("%s" % field, data=self._data[field][:-1,:-1])
+            dset = group.create_dataset("%s" % field, data=self.field_data[field][:-1,:-1])
         fid.close()
 
     def _get_bin_fields(self):
@@ -727,9 +728,9 @@
         # center: N bins that are the average (both in linear or log
         # space) of each pair of left/right edges
 
-        x = self._data[self.x_bin_field]
-        y = self._data[self.y_bin_field]
-        z = self._data[self.z_bin_field]
+        x = self.field_data[self.x_bin_field]
+        y = self.field_data[self.y_bin_field]
+        z = self.field_data[self.z_bin_field]
         if bin_style is 'both':
             pass
         elif bin_style is 'left':
@@ -770,7 +771,7 @@
         attributes.
         """
         fid = h5py.File(filename)
-        fields = [field for field in sorted(self._data.keys()) 
+        fields = [field for field in sorted(self.field_data.keys()) 
                   if (field != "UsedBins" and field != self.x_bin_field and field != self.y_bin_field and field != self.z_bin_field)]
         if group_prefix is None:
             name = "%s-%s-%s-3d" % (self.z_bin_field, self.y_bin_field, self.x_bin_field)
@@ -788,7 +789,7 @@
         group.attrs["z-axis-%s" % self.z_bin_field] = zbins
         
         for field in fields:
-            dset = group.create_dataset("%s" % field, data=self._data[field][:-1,:-1,:-1])
+            dset = group.create_dataset("%s" % field, data=self.field_data[field][:-1,:-1,:-1])
         fid.close()
 
 
@@ -818,7 +819,7 @@
                               self[self.z_bin_field].size),
                     'field_order':order }
         values = []
-        for field in self._data:
+        for field in self.field_data:
             if field in set_attr.values(): continue
             order.append(field)
             values.append(self[field].ravel())
@@ -832,7 +833,7 @@
         Given a *pf* parameterfile and the *name* of a stored profile, retrieve
         it into a read-only data structure.
         """
-        self._data = {}
+        self.field_data = YTFieldData()
         prof_arr = pf.h.get_data("/Profiles", name)
         if prof_arr is None: raise KeyError("No such array")
         for ax in 'xyz':
@@ -840,11 +841,11 @@
                 setattr(self, base % ax, prof_arr.getAttr(base % ax))
         for ax in 'xyz':
             fn = getattr(self, '%s_bin_field' % ax)
-            self._data[fn] = prof_arr.getAttr('%s_bin_values' % ax)
+            self.field_data[fn] = prof_arr.getAttr('%s_bin_values' % ax)
         shape = prof_arr.getAttr('shape')
         for fn, fd in zip(prof_arr.getAttr('field_order'),
                           prof_arr.read().transpose()):
-            self._data[fn] = fd.reshape(shape)
+            self.field_data[fn] = fd.reshape(shape)
 
     def add_fields(self, *args, **kwargs):
         raise RuntimeError("Sorry, you can't add to a stored profile.")


http://bitbucket.org/yt_analysis/yt/changeset/e4c9e27657e0/
changeset:   e4c9e27657e0
branch:      yt
user:        brittonsmith
date:        2011-10-18 19:51:06
summary:     Changing ._data to .field_data in tests.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/answer_testing/hydro_tests.py	Tue Oct 18 13:48:26 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Tue Oct 18 13:51:06 2011 -0400
@@ -171,7 +171,7 @@
             weight=self.weight)
         # The arrays are all stored in a dictionary hanging off the profile
         # object
-        self.result = p.data._data
+        self.result = p.data.field_data
                     
     def compare(self, old_result):
         self.compare_data_arrays(
@@ -201,5 +201,5 @@
             weight=self.weight)
         # The arrays are all stored in a dictionary hanging off the profile
         # object
-        self.result = p.data._data
+        self.result = p.data.field_data
 


http://bitbucket.org/yt_analysis/yt/changeset/42c93eab28c4/
changeset:   42c93eab28c4
branch:      yt
user:        brittonsmith
date:        2011-10-18 19:58:34
summary:     Changed .data to .field_data and made them instances of YTFieldData.
affected #:  1 file (-1 bytes)

--- a/yt/data_objects/data_containers.py	Tue Oct 18 13:51:06 2011 -0400
+++ b/yt/data_objects/data_containers.py	Tue Oct 18 13:58:34 2011 -0400
@@ -36,6 +36,7 @@
 
 from yt.funcs import *
 
+from yt.data_objects.data_containers import YTFieldData
 from yt.data_objects.derived_quantities import GridChildMaskWrapper
 from yt.data_objects.particle_io import particle_handler_registry
 from yt.utilities.amr_utils import find_grids_in_inclined_box, \
@@ -70,11 +71,11 @@
     """
     def save_state(self, grid, field=None):
         old_params = grid.field_parameters
-        old_keys = grid.data.keys()
+        old_keys = grid.field_data.keys()
         grid.field_parameters = self.field_parameters
         tr = func(self, grid, field)
         grid.field_parameters = old_params
-        grid.data = dict( [(k, grid.data[k]) for k in old_keys] )
+        grid.field_data = YTFieldData( [(k, grid.field_data[k]) for k in old_keys] )
         return tr
     return save_state
 
@@ -134,20 +135,20 @@
     def __init__(self, grid):
         self._corners = grid._corners
         self.field_parameters = {}
-        self.data = {'x':grid['particle_position_x'],
-                     'y':grid['particle_position_y'],
-                     'z':grid['particle_position_z'],
-                     'dx':grid['dx'],
-                     'dy':grid['dy'],
-                     'dz':grid['dz']}
+        self.field_data = YTFieldData({'x':grid['particle_position_x'],
+                                       'y':grid['particle_position_y'],
+                                       'z':grid['particle_position_z'],
+                                       'dx':grid['dx'],
+                                       'dy':grid['dy'],
+                                       'dz':grid['dz']})
         self.dds = grid.dds.copy()
         self.real_grid = grid
         self.child_mask = 1
-        self.ActiveDimensions = self.data['x'].shape
+        self.ActiveDimensions = self.field_data['x'].shape
         self.DW = grid.pf.domain_right_edge - grid.pf.domain_left_edge
         
     def __getitem__(self, field):
-        if field not in self.data.keys():
+        if field not in self.field_data.keys():
             if field == "RadiusCode":
                 center = self.field_parameters['center']
                 tempx = na.abs(self['x'] - center[0])
@@ -159,7 +160,7 @@
                 tr = na.sqrt( tempx**2.0 + tempy**2.0 + tempz**2.0 )
             else:
                 raise KeyError(field)
-        else: tr = self.data[field]
+        else: tr = self.field_data[field]
         return tr
 
 class AMRData(object):
@@ -193,7 +194,7 @@
         mylog.debug("Appending object to %s (type: %s)", self.pf, type(self))
         if fields == None: fields = []
         self.fields = ensure_list(fields)[:]
-        self.data = {}
+        self.field_data = YTFieldData()
         self.field_parameters = {}
         self.__set_default_field_parameters()
         self._cut_masks = {}
@@ -255,7 +256,7 @@
         """
         Clears out all data from the AMRData instance, freeing memory.
         """
-        self.data.clear()
+        self.field_data.clear()
         if self._grids is not None:
             for grid in self._grids: grid.clear_data()
 
@@ -272,7 +273,7 @@
         """
         Checks if a data field already exists.
         """
-        return self.data.has_key(key)
+        return self.field_data.has_key(key)
 
     def _refresh_data(self):
         """
@@ -282,24 +283,24 @@
         self.get_data()
 
     def keys(self):
-        return self.data.keys()
+        return self.field_data.keys()
 
     def __getitem__(self, key):
         """
         Returns a single field.  Will add if necessary.
         """
-        if not self.data.has_key(key):
+        if not self.field_data.has_key(key):
             if key not in self.fields:
                 self.fields.append(key)
             self.get_data(key)
-        return self.data[key]
+        return self.field_data[key]
 
     def __setitem__(self, key, val):
         """
         Sets a field to be some other value.
         """
         if key not in self.fields: self.fields.append(key)
-        self.data[key] = val
+        self.field_data[key] = val
 
     def __delitem__(self, key):
         """
@@ -309,21 +310,21 @@
             del self.fields[self.fields.index(key)]
         except ValueError:
             pass
-        del self.data[key]
+        del self.field_data[key]
 
     def _generate_field_in_grids(self, fieldName):
         pass
 
     _key_fields = None
     def write_out(self, filename, fields=None, format="%0.16e"):
-        if fields is None: fields=sorted(self.data.keys())
+        if fields is None: fields=sorted(self.field_data.keys())
         if self._key_fields is None: raise ValueError
         field_order = self._key_fields[:]
         for field in field_order: self[field]
         field_order += [field for field in fields if field not in field_order]
         fid = open(filename,"w")
         fid.write("\t".join(["#"] + field_order + ["\n"]))
-        field_data = na.array([self.data[field] for field in field_order])
+        field_data = na.array([self.field_data[field] for field in field_order])
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -471,11 +472,11 @@
         else:
             fields_to_get = ensure_list(fields)
         if not self.sort_by in fields_to_get and \
-            self.sort_by not in self.data:
+            self.sort_by not in self.field_data:
             fields_to_get.insert(0, self.sort_by)
         mylog.debug("Going to obtain %s", fields_to_get)
         for field in fields_to_get:
-            if self.data.has_key(field):
+            if self.field_data.has_key(field):
                 continue
             mylog.info("Getting field %s from %s", field, len(self._grids))
             if field not in self.hierarchy.field_list and not in_grids:
@@ -484,7 +485,7 @@
             self[field] = na.concatenate(
                 [self._get_data_from_grid(grid, field)
                  for grid in self._grids])
-            if not self.data.has_key(field):
+            if not self.field_data.has_key(field):
                 continue
             if self._sortkey is None:
                 self._sortkey = na.argsort(self[self.sort_by])
@@ -795,7 +796,7 @@
             fields_to_get = ensure_list(fields)
         temp_data = {}
         for field in fields_to_get:
-            if self.data.has_key(field): continue
+            if self.field_data.has_key(field): continue
             if field not in self.hierarchy.field_list:
                 if self._generate_field(field):
                     continue # A "True" return means we did it
@@ -1429,7 +1430,7 @@
         temp_data = {}
         _size = self.dims * self.dims
         for field in fields_to_get:
-            if self.data.has_key(field): continue
+            if self.field_data.has_key(field): continue
             if field not in self.hierarchy.field_list:
                 if self._generate_field(field):
                     continue # A "True" return means we did it
@@ -1598,7 +1599,7 @@
         else: fields = ensure_list(fields)
         # We need a new tree for every single set of fields we add
         self._obtain_fields(fields, self._node_name)
-        fields = [f for f in fields if f not in self.data]
+        fields = [f for f in fields if f not in self.field_data]
         if len(fields) == 0: return
         tree = self._get_tree(len(fields))
         coord_data = []
@@ -1988,7 +1989,7 @@
         if fields is None: fields = ensure_list(self.fields)[:]
         else: fields = ensure_list(fields)
         self._obtain_fields(fields, self._node_name)
-        fields = [f for f in fields if f not in self.data]
+        fields = [f for f in fields if f not in self.field_data]
         if len(fields) == 0: return
         coord_data = []
         field_data = []
@@ -2310,7 +2311,7 @@
             fields_to_get = ensure_list(fields)
         mylog.debug("Going to obtain %s", fields_to_get)
         for field in fields_to_get:
-            if self.data.has_key(field):
+            if self.field_data.has_key(field):
                 continue
             if field not in self.hierarchy.field_list and not in_grids:
                 if self._generate_field(field):
@@ -2322,14 +2323,14 @@
                self.pf.field_info[field].particle_type and \
                self.pf.h.io._particle_reader:
                 self.particles.get_data(field)
-                if field not in self.data:
+                if field not in self.field_data:
                     if self._generate_field(field): continue
             mylog.info("Getting field %s from %s", field, len(self._grids))
             self[field] = na.concatenate(
                 [self._get_data_from_grid(grid, field)
                  for grid in self._grids])
         for field in fields_to_get:
-            if not self.data.has_key(field):
+            if not self.field_data.has_key(field):
                 continue
             self[field] = self[field]
 
@@ -3202,7 +3203,7 @@
             fields = ensure_list(fields)
         obtain_fields = []
         for field in fields:
-            if self.data.has_key(field): continue
+            if self.field_data.has_key(field): continue
             if field not in self.hierarchy.field_list:
                 try:
                     #print "Generating", field
@@ -3315,7 +3316,7 @@
             fields_to_get = ensure_list(field)
         for field in fields_to_get:
             grid_count = 0
-            if self.data.has_key(field):
+            if self.field_data.has_key(field):
                 continue
             mylog.debug("Getting field %s from %s possible grids",
                        field, len(self._grids))
@@ -3347,9 +3348,9 @@
 
     def _update_level_state(self, level, field = None):
         dx = self._base_dx / self.pf.refine_by**level
-        self.data['cdx'] = dx[0]
-        self.data['cdy'] = dx[1]
-        self.data['cdz'] = dx[2]
+        self.field_data['cdx'] = dx[0]
+        self.field_data['cdy'] = dx[1]
+        self.field_data['cdz'] = dx[2]
         LL = self.left_edge - self.pf.domain_left_edge
         self._old_global_startindex = self.global_startindex
         self.global_startindex = na.rint(LL / dx).astype('int64') - 1
@@ -3358,13 +3359,13 @@
         if level == 0 and self.level > 0:
             # We use one grid cell at LEAST, plus one buffer on all sides
             idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64') + 2
-            self.data[field] = na.zeros(idims,dtype='float64')-999
+            self.field_data[field] = na.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
         elif level == 0 and self.level == 0:
             DLE = self.pf.domain_left_edge
             self.global_startindex = na.array(na.floor(LL/ dx), dtype='int64')
             idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64')
-            self.data[field] = na.zeros(idims,dtype='float64')-999
+            self.field_data[field] = na.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
 
     def _refine(self, dlevel, field):


http://bitbucket.org/yt_analysis/yt/changeset/95d1aba7c3a0/
changeset:   95d1aba7c3a0
branch:      yt
user:        brittonsmith
date:        2011-10-18 20:27:25
summary:     Changed .data to .field_data YTFieldData instances.
affected #:  17 files (-1 bytes)

--- a/scripts/iyt	Tue Oct 18 13:58:34 2011 -0400
+++ b/scripts/iyt	Tue Oct 18 14:27:25 2011 -0400
@@ -150,7 +150,7 @@
             return self[self._key_numbers[key]]
         return UserDict.__getitem__(self, key)
     def __iter__(self):
-        return itertools.chain(self.data.iterkeys(),
+        return itertools.chain(self.field_data.iterkeys(),
                         self._key_numbers.iterkeys())
     def __repr__(self):
         s = "{" + ", \n ".join(
@@ -158,9 +158,9 @@
                     for i in sorted(self._key_numbers)]) + "}"
         return s
     def has_key(self, key):
-        return self.data.has_key(key) or self._key_numbers.has_key(key)
+        return self.field_data.has_key(key) or self._key_numbers.has_key(key)
     def keys(self):
-        return self.data.key(key) + self._key_numbers.key(key)
+        return self.field_data.key(key) + self._key_numbers.key(key)
 
 pfs = ParameterFileDict()
 pcs = []


--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Tue Oct 18 13:58:34 2011 -0400
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Tue Oct 18 14:27:25 2011 -0400
@@ -1001,7 +1001,7 @@
     for plot in projections:
         # Get name of data field.
         other_fields = {'px':True, 'py':True, 'pdx':True, 'pdy':True, 'weight_field':True}
-        for pfield in plot.data.keys():
+        for pfield in plot.field_data.keys():
             if not(other_fields.has_key(pfield)):
                 field = pfield
                 break
@@ -1056,12 +1056,12 @@
         add2_y_weight_field = plot['weight_field'][plot['py'] - 0.5 * plot['pdy'] < 0]
 
         # Add the hanging cells back to the projection data.
-        plot.data['px'] = na.concatenate([plot['px'], add_x_px, add_y_px, add2_x_px, add2_y_px])
-        plot.data['py'] = na.concatenate([plot['py'], add_x_py, add_y_py, add2_x_py, add2_y_py])
-        plot.data['pdx'] = na.concatenate([plot['pdx'], add_x_pdx, add_y_pdx, add2_x_pdx, add2_y_pdx])
-        plot.data['pdy'] = na.concatenate([plot['pdy'], add_x_pdy, add_y_pdy, add2_x_pdy, add2_y_pdy])
-        plot.data[field] = na.concatenate([plot[field], add_x_field, add_y_field, add2_x_field, add2_y_field])
-        plot.data['weight_field'] = na.concatenate([plot['weight_field'],
+        plot.field_data['px'] = na.concatenate([plot['px'], add_x_px, add_y_px, add2_x_px, add2_y_px])
+        plot.field_data['py'] = na.concatenate([plot['py'], add_x_py, add_y_py, add2_x_py, add2_y_py])
+        plot.field_data['pdx'] = na.concatenate([plot['pdx'], add_x_pdx, add_y_pdx, add2_x_pdx, add2_y_pdx])
+        plot.field_data['pdy'] = na.concatenate([plot['pdy'], add_x_pdy, add_y_pdy, add2_x_pdy, add2_y_pdy])
+        plot.field_data[field] = na.concatenate([plot[field], add_x_field, add_y_field, add2_x_field, add2_y_field])
+        plot.field_data['weight_field'] = na.concatenate([plot['weight_field'],
                                                     add_x_weight_field, add_y_weight_field, 
                                                     add2_x_weight_field, add2_y_weight_field])
 


--- a/yt/analysis_modules/hierarchy_subset/hierarchy_subset.py	Tue Oct 18 13:58:34 2011 -0400
+++ b/yt/analysis_modules/hierarchy_subset/hierarchy_subset.py	Tue Oct 18 14:27:25 2011 -0400
@@ -27,6 +27,7 @@
 import numpy as na
 
 from yt.funcs import *
+from yt.data_objects.data_containers import YTFieldData
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.data_objects.static_output import \
@@ -65,7 +66,7 @@
         self.base_grid = base_pf.h.smoothed_covering_grid(level, self.LeftEdge,
                         self.RightEdge, dims=dims)
         self.base_grid.Level = self.base_grid.level
-        self.data = {}
+        self.field_data = YTFieldData()
         #self._calculate_child_masks()
         self.Parent = None
         self.Children = []


--- a/yt/analysis_modules/level_sets/contour_finder.py	Tue Oct 18 13:58:34 2011 -0400
+++ b/yt/analysis_modules/level_sets/contour_finder.py	Tue Oct 18 14:27:25 2011 -0400
@@ -129,7 +129,7 @@
     print "Finished joining in %0.2e seconds" % (t2-t1)
     pbar.finish()
     data_source._flush_data_to_grids("tempContours", -1, dtype='int64')
-    del data_source.data["tempContours"] # Force a reload from the grids
+    del data_source.field_data["tempContours"] # Force a reload from the grids
     data_source.get_data("tempContours", in_grids=True)
     contour_ind = {}
     i = 0
@@ -141,6 +141,6 @@
     mylog.info("Identified %s contours between %0.5e and %0.5e",
                len(contour_ind.keys()),min_val,max_val)
     for grid in chain(grid_set):
-        grid.data.pop("tempContours", None)
-    del data_source.data["tempContours"]
+        grid.field_data.pop("tempContours", None)
+    del data_source.field_data["tempContours"]
     return contour_ind


--- a/yt/analysis_modules/light_cone/light_cone.py	Tue Oct 18 13:58:34 2011 -0400
+++ b/yt/analysis_modules/light_cone/light_cone.py	Tue Oct 18 14:27:25 2011 -0400
@@ -356,7 +356,7 @@
 
             # Save the last fixed resolution buffer for the plot collection, 
             # but replace the data with the full light cone projection data.
-            frb.data[field] = lightConeProjection
+            frb.field_data[field] = lightConeProjection
 
             # Write image.
             if save_slice_images:
@@ -370,7 +370,7 @@
             if apply_halo_mask:
                 if len(self.halo_mask) > 0:
                     mylog.info("Applying halo mask.")
-                    frb.data[field] *= self.halo_mask
+                    frb.field_data[field] *= self.halo_mask
                 else:
                     mylog.error("No halo mask loaded, call get_halo_mask.")
 


--- a/yt/data_objects/grid_patch.py	Tue Oct 18 13:58:34 2011 -0400
+++ b/yt/data_objects/grid_patch.py	Tue Oct 18 14:27:25 2011 -0400
@@ -30,6 +30,7 @@
 
 from yt.funcs import *
 
+from yt.data_objects.data_containers import YTFieldData
 from yt.utilities.definitions import x_dict, y_dict
 from .field_info_container import \
     NeedsGridType, \
@@ -57,7 +58,7 @@
                  '_parent_id', '_children_ids']
 
     def __init__(self, id, filename=None, hierarchy=None):
-        self.data = {}
+        self.field_data = YTFieldData()
         self.field_parameters = {}
         self.id = id
         if hierarchy: self.hierarchy = weakref.proxy(hierarchy)
@@ -140,36 +141,36 @@
             raise exceptions.KeyError, field
 
     def has_key(self, key):
-        return (key in self.data)
+        return (key in self.field_data)
 
     def __getitem__(self, key):
         """
         Returns a single field.  Will add if necessary.
         """
-        if not self.data.has_key(key):
+        if not self.field_data.has_key(key):
             self.get_data(key)
-        return self.data[key]
+        return self.field_data[key]
 
     def __setitem__(self, key, val):
         """
         Sets a field to be some other value.
         """
-        self.data[key] = val
+        self.field_data[key] = val
 
     def __delitem__(self, key):
         """
         Deletes a field
         """
-        del self.data[key]
+        del self.field_data[key]
 
     def keys(self):
-        return self.data.keys()
+        return self.field_data.keys()
 
     def get_data(self, field):
         """
         Returns a field or set of fields for a key or set of keys
         """
-        if not self.data.has_key(field):
+        if not self.field_data.has_key(field):
             if field in self.hierarchy.field_list:
                 conv_factor = 1.0
                 if self.pf.field_info.has_key(field):
@@ -178,7 +179,7 @@
                    self.NumberOfParticles == 0:
                     # because this gets upcast to float
                     self[field] = na.array([],dtype='int64')
-                    return self.data[field]
+                    return self.field_data[field]
                 try:
                     temp = self.hierarchy.io.pop(self, field)
                     self[field] = na.multiply(temp, conv_factor, temp)
@@ -191,7 +192,7 @@
                     else: raise
             else:
                 self._generate_field(field)
-        return self.data[field]
+        return self.field_data[field]
 
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume
@@ -205,7 +206,7 @@
             self.dds = na.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     @property
     def _corners(self):
@@ -247,7 +248,7 @@
         """
         self._del_child_mask()
         self._del_child_indices()
-        self.data.clear()
+        self.field_data.clear()
         self._setup_dx()
 
     def check_child_masks(self):
@@ -304,11 +305,11 @@
         :meth:`clear_derived_quantities`.
         """
         for key in self.keys():
-            del self.data[key]
-        del self.data
+            del self.field_data[key]
+        del self.field_data
         if hasattr(self,"retVal"):
             del self.retVal
-        self.data = {}
+        self.field_data = YTFieldData()
         self.clear_derived_quantities()
 
     def clear_derived_quantities(self):


--- a/yt/data_objects/particle_io.py	Tue Oct 18 13:58:34 2011 -0400
+++ b/yt/data_objects/particle_io.py	Tue Oct 18 14:27:25 2011 -0400
@@ -91,8 +91,8 @@
             fields_to_read, rtype, args, grid_list, count_list,
             conv_factors)
         for [n, v] in zip(fields_to_read, rvs):
-            self.source.data[n] = v
-        print self.source.data.keys()
+            self.source.field_data[n] = v
+        print self.source.field_data.keys()
 
 class ParticleIOHandlerRegion(ParticleIOHandlerImplemented):
     periodic = False


--- a/yt/frontends/art/data_structures.py	Tue Oct 18 13:58:34 2011 -0400
+++ b/yt/frontends/art/data_structures.py	Tue Oct 18 14:27:25 2011 -0400
@@ -85,7 +85,7 @@
             self.dds = na.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def get_global_startindex(self):
         """


--- a/yt/frontends/castro/data_structures.py	Tue Oct 18 13:58:34 2011 -0400
+++ b/yt/frontends/castro/data_structures.py	Tue Oct 18 14:27:25 2011 -0400
@@ -117,7 +117,7 @@
             self.dds = na.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):
         return "CastroGrid_%04i" % (self.id)


--- a/yt/frontends/chombo/data_structures.py	Tue Oct 18 13:58:34 2011 -0400
+++ b/yt/frontends/chombo/data_structures.py	Tue Oct 18 14:27:25 2011 -0400
@@ -82,7 +82,7 @@
             self.dds = na.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
 class ChomboHierarchy(AMRHierarchy):
 


--- a/yt/frontends/gdf/data_structures.py	Tue Oct 18 13:58:34 2011 -0400
+++ b/yt/frontends/gdf/data_structures.py	Tue Oct 18 14:27:25 2011 -0400
@@ -58,7 +58,7 @@
             self.dds = na.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
 class GDFHierarchy(AMRHierarchy):
 


--- a/yt/frontends/maestro/data_structures.py	Tue Oct 18 13:58:34 2011 -0400
+++ b/yt/frontends/maestro/data_structures.py	Tue Oct 18 14:27:25 2011 -0400
@@ -110,7 +110,7 @@
             self.dds = na.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):
         return "MaestroGrid_%04i" % (self.id)


--- a/yt/frontends/nyx/data_structures.py	Tue Oct 18 13:58:34 2011 -0400
+++ b/yt/frontends/nyx/data_structures.py	Tue Oct 18 14:27:25 2011 -0400
@@ -109,7 +109,7 @@
 
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):
         return "NyxGrid_%04i" % (self.id)


--- a/yt/frontends/orion/data_structures.py	Tue Oct 18 13:58:34 2011 -0400
+++ b/yt/frontends/orion/data_structures.py	Tue Oct 18 14:27:25 2011 -0400
@@ -114,7 +114,7 @@
             self.dds = na.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):
         return "OrionGrid_%04i" % (self.id)


--- a/yt/frontends/ramses/data_structures.py	Tue Oct 18 13:58:34 2011 -0400
+++ b/yt/frontends/ramses/data_structures.py	Tue Oct 18 14:27:25 2011 -0400
@@ -80,7 +80,7 @@
             self.dds = na.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def get_global_startindex(self):
         """


--- a/yt/utilities/answer_testing/hydro_tests.py	Tue Oct 18 13:58:34 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Tue Oct 18 14:27:25 2011 -0400
@@ -134,7 +134,7 @@
                                         self.pf.domain_right_edge))[self.axis],
                                 fields=self.field)
         # values.
-        self.result = slice.data
+        self.result = slice.field_data
 
     def compare(self, old_result):
         slice  = self.result


--- a/yt/utilities/command_line.py	Tue Oct 18 13:58:34 2011 -0400
+++ b/yt/utilities/command_line.py	Tue Oct 18 14:27:25 2011 -0400
@@ -573,7 +573,7 @@
         else:
             p = pc.add_slice(opts.field, opts.axis)
         from yt.gui.reason.pannable_map import PannableMapServer
-        mapper = PannableMapServer(p.data, opts.field)
+        mapper = PannableMapServer(p.field_data, opts.field)
         import yt.utilities.bottle as bottle
         bottle.debug(True)
         if opts.host is not None:
@@ -677,7 +677,7 @@
         pc_dummy = PlotCollection(pf, center=c)
         pr = pc_dummy.add_profile_object(dd, ["Density", "Temperature"],
                             weight="CellMassMsun")
-        ph.modify["line"](pr.data["Density"], pr.data["Temperature"])
+        ph.modify["line"](pr.field_data["Density"], pr.field_data["Temperature"])
         pc.save()
 
     @cmdln.option("-d", "--desc", action="store",


http://bitbucket.org/yt_analysis/yt/changeset/71cfbb72e66c/
changeset:   71cfbb72e66c
branch:      yt
user:        brittonsmith
date:        2011-10-18 20:35:16
summary:     Fixed a few issues with moving .data to .field_data.
affected #:  2 files (-1 bytes)

--- a/yt/data_objects/data_containers.py	Tue Oct 18 14:27:25 2011 -0400
+++ b/yt/data_objects/data_containers.py	Tue Oct 18 14:35:16 2011 -0400
@@ -36,7 +36,6 @@
 
 from yt.funcs import *
 
-from yt.data_objects.data_containers import YTFieldData
 from yt.data_objects.derived_quantities import GridChildMaskWrapper
 from yt.data_objects.particle_io import particle_handler_registry
 from yt.utilities.amr_utils import find_grids_in_inclined_box, \


--- a/yt/data_objects/grid_patch.py	Tue Oct 18 14:27:25 2011 -0400
+++ b/yt/data_objects/grid_patch.py	Tue Oct 18 14:35:16 2011 -0400
@@ -50,7 +50,7 @@
     _con_args = ('id', 'filename')
     OverlappingSiblings = None
 
-    __slots__ = ['data', 'field_parameters', 'id', 'hierarchy', 'pf',
+    __slots__ = ['field_data', 'field_parameters', 'id', 'hierarchy', 'pf',
                  'ActiveDimensions', 'LeftEdge', 'RightEdge', 'Level',
                  'NumberOfParticles', 'Children', 'Parent',
                  'start_index', 'filename', '__weakref__', 'dds',


http://bitbucket.org/yt_analysis/yt/changeset/66fda71ea4a6/
changeset:   66fda71ea4a6
branch:      yt
user:        brittonsmith
date:        2011-10-18 20:36:43
summary:     Merged.
affected #:  13 files (-1 bytes)

--- a/tests/halos.py	Tue Oct 18 14:35:16 2011 -0400
+++ b/tests/halos.py	Tue Oct 18 14:36:43 2011 -0400
@@ -1,11 +1,10 @@
 from yt.utilities.answer_testing.output_tests import \
     SingleOutputTest, create_test
 from yt.utilities.answer_testing.halo_tests import \
-    TestHaloCompositionHashHOP, TestHaloCompositionHashFOF, \
-    TestHaloCompositionHashPHOP 
+    TestHaloCountHOP, TestHaloCountFOF, TestHaloCountPHOP 
 
-create_test(TestHaloCompositionHashHOP, "halo_composition_test_hash_HOP", threshold=80.0)
+create_test(TestHaloCountHOP, "halo_count_HOP", threshold=80.0)
 
-create_test(TestHaloCompositionHashFOF, "halo_composition_test_hash_FOF", threshold=80.0)
+create_test(TestHaloCountFOF, "halo_count_FOF", link=0.2, padding=0.02)
 
-create_test(TestHaloCompositionHashPHOP, "halo_composition_test_hash_PHOP", threshold=80.0)
+create_test(TestHaloCountPHOP, "halo_count_PHOP", threshold=80.0)


--- a/yt/analysis_modules/halo_finding/halo_objects.py	Tue Oct 18 14:35:16 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Tue Oct 18 14:36:43 2011 -0400
@@ -1986,7 +1986,8 @@
         else:
             root_points = na.empty([])
         my_points.shape = (1, n_random*3)
-        root_points = self._mpi_catarray(my_points[0])
+        root_points = self._par_combine_object(my_points[0],
+                datatype="array", op="cat")
         del my_points
         if mine == 0:
             root_points.shape = (tot_random, 3)


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 14:35:16 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 14:36:43 2011 -0400
@@ -683,8 +683,11 @@
         # Shift the values over effectively by concatenating them in the same
         # order as the values have been shifted in _globally_assign_chainIDs()
         yt_counters("global chain MPI stuff.")
-        self.densest_in_chain = self._mpi_catarray(self.densest_in_chain)
-        self.densest_in_chain_real_index = self._mpi_catarray(self.densest_in_chain_real_index)
+        self.densest_in_chain = self._par_combine_object(self.densest_in_chain,
+                datatype="array", op="cat")
+        self.densest_in_chain_real_index = self._par_combine_object(
+                self.densest_in_chain_real_index,
+                datatype="array", op="cat")
         yt_counters("global chain MPI stuff.")
         # Sort the chains by density here. This is an attempt to make it such
         # that the merging stuff in a few steps happens in the same order
@@ -837,7 +840,8 @@
         # Now we make a global dict of how many particles each task is
         # sending.
         self.global_padded_count = {self.mine:self.uphill_chainIDs.size}
-        self.global_padded_count = self._mpi_joindict(self.global_padded_count)
+        self.global_padded_count = self._par_combine_object(
+                self.global_padded_count, datatype = "dict", op = "join")
         # Send/receive 'em.
         self._communicate_uphill_info()
         del self.global_padded_count
@@ -932,7 +936,8 @@
         # but there's so many places in this that need to be globally synched
         # that it's not worth the effort right now to make this one spot better.
         global_annulus_count = {self.mine:send_count}
-        global_annulus_count = self._mpi_joindict(global_annulus_count)
+        global_annulus_count = self._par_combine_object(
+                global_annulus_count, datatype = "dict", op = "join")
         # Set up the receiving arrays.
         recv_real_indices = dict.fromkeys(self.neighbors)
         recv_chainIDs = dict.fromkeys(self.neighbors)


--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py	Tue Oct 18 14:35:16 2011 -0400
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py	Tue Oct 18 14:36:43 2011 -0400
@@ -548,11 +548,16 @@
         child_IDs_tosend = child_IDs[child_send]
         child_halos_tosend = child_halos[child_send]
         
-        parent_IDs_tosend = self._mpi_catarray(parent_IDs_tosend)
-        parent_masses_tosend = self._mpi_catarray(parent_masses_tosend)
-        parent_halos_tosend = self._mpi_catarray(parent_halos_tosend)
-        child_IDs_tosend = self._mpi_catarray(child_IDs_tosend)
-        child_halos_tosend = self._mpi_catarray(child_halos_tosend)
+        parent_IDs_tosend = self._par_combine_object(parent_IDs_tosend,
+                datatype="array", op="cat")
+        parent_masses_tosend = self._par_combine_object(parent_masses_tosend,
+                datatype="array", op="cat")
+        parent_halos_tosend = self._par_combine_object(parent_halos_tosend,
+                datatype="array", op="cat")
+        child_IDs_tosend = self._par_combine_object(child_IDs_tosend,
+                datatype="array", op="cat")
+        child_halos_tosend = self._par_combine_object(child_halos_tosend,
+                datatype="array", op="cat")
 
         # Resort the received particles.
         Psort = parent_IDs_tosend.argsort()


--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Tue Oct 18 14:35:16 2011 -0400
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Tue Oct 18 14:36:43 2011 -0400
@@ -494,11 +494,13 @@
             updated_halos.append(halo)
         
         # And here is where we bring it all together.
-        updated_halos = self._mpi_catlist(updated_halos)
+        updated_halos = self._par_combine_object(updated_halos,
+                            datatype="list", op="cat")
         updated_halos.sort(key = lambda a:a['id'])
         self.all_halos = updated_halos
 
-        self.filtered_halos = self._mpi_catlist(self.filtered_halos)
+        self.filtered_halos = self._par_combine_object(self.filtered_halos)
+                            datatype="list", op="cat")
         self.filtered_halos.sort(key = lambda a:a['id'])
 
         if filename is not None:


--- a/yt/data_objects/data_containers.py	Tue Oct 18 14:35:16 2011 -0400
+++ b/yt/data_objects/data_containers.py	Tue Oct 18 14:36:43 2011 -0400
@@ -810,7 +810,8 @@
             self[field] = temp_data[field] 
         # We finalize
         if temp_data != {}:
-            temp_data = self._mpi_catdict(temp_data)
+            temp_data = self._par_object_combine(temp_data,
+                    datatype='dict', op='cat')
         # And set, for the next group
         for field in temp_data.keys():
             self[field] = temp_data[field]
@@ -998,12 +999,14 @@
             points.append(self._generate_grid_coords(grid))
         if len(points) == 0:
             points = None
-            t = self._mpi_catarray(None)
+            t = self._par_combine_object(None, datatype="array", op="cat")
         else:
             points = na.concatenate(points)
-            # We have to transpose here so that _mpi_catarray works properly, as
-            # it and the alltoall assume the long axis is the last one.
-            t = self._mpi_catarray(points.transpose())
+            # We have to transpose here so that _par_combine_object works
+            # properly, as it and the alltoall assume the long axis is the last
+            # one.
+            t = self._par_combine_object(points.transpose(),
+                        datatype="array", op="cat")
         self['px'] = t[0,:]
         self['py'] = t[1,:]
         self['pz'] = t[2,:]
@@ -1218,7 +1221,7 @@
             points.append(self._generate_grid_coords(grid))
         if len(points) == 0: points = None
         else: points = na.concatenate(points)
-        t = self._mpi_catarray(points)
+        t = self._par_combine_object(points, datatype="array", op="cat")
         pos = (t[:,0:3] - self.center)
         self['px'] = na.dot(pos, self._x_vec)
         self['py'] = na.dot(pos, self._y_vec)
@@ -2041,7 +2044,7 @@
         data['pdy'] *= 0.5
         data['fields'] = field_data
         # Now we run the finalizer, which is ignored if we don't need it
-        data = self._mpi_catdict(data)
+        data = self._par_object_combine(temp_data, datatype='dict', op='cat')
         field_data = na.vsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             self[field] = field_data[fi].ravel()


--- a/yt/data_objects/derived_quantities.py	Tue Oct 18 14:35:16 2011 -0400
+++ b/yt/data_objects/derived_quantities.py	Tue Oct 18 14:36:43 2011 -0400
@@ -103,13 +103,14 @@
 
     def _finalize_parallel(self):
         # Note that we do some fancy footwork here.
-        # _mpi_catarray and its affiliated alltoall function
+        # _par_combine_object and its affiliated alltoall function
         # assume that the *long* axis is the last one.  However,
         # our long axis is the first one!
         rv = []
         for my_list in self.retvals:
             data = na.array(my_list).transpose()
-            rv.append(self._mpi_catarray(data).transpose())
+            rv.append(self._par_combine_object(data.transpose(),
+                        datatype="array", op="cat"))
         self.retvals = rv
         
     def _call_func_unlazy(self, args, kwargs):


--- a/yt/utilities/amr_kdtree/amr_kdtree.py	Tue Oct 18 14:35:16 2011 -0400
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py	Tue Oct 18 14:36:43 2011 -0400
@@ -770,7 +770,8 @@
         if self.tree_dict[0].split_pos is None:
             self.tree_dict.pop(0)
     def merge_trees(self):
-        self.tree_dict = self._mpi_joindict(self.tree_dict)
+        self.tree_dict = self._par_combine_object(self.tree_dict,
+                            datatype = "dict", op = "join")
 
     def rebuild_references(self):
         self.tree = self.tree_dict[0]


--- a/yt/utilities/answer_testing/halo_tests.py	Tue Oct 18 14:35:16 2011 -0400
+++ b/yt/utilities/answer_testing/halo_tests.py	Tue Oct 18 14:36:43 2011 -0400
@@ -23,8 +23,6 @@
     def plot(self):
         return []
 
-create_test(TestHaloCountHOP, "halo_count_test_HOP", threshold=80.0)
-
 # Tests the number of halos returned by the FOF halo finder on a dataset
 class TestHaloCountFOF(YTStaticOutputTest):
     link = 0.2
@@ -44,8 +42,6 @@
     def plot(self):
         return []
 
-create_test(TestHaloCountFOF, "halo_count_test_FOF", threshold=80.0)
-
 # Tests the number of halos returned by the Parallel HOP halo finder on a 
 # dataset
 class TestHaloCountPHOP(YTStaticOutputTest):
@@ -64,8 +60,6 @@
     def plot(self):
         return []
 
-create_test(TestHaloCountPHOP, "halo_count_test_PHOP", threshold=80.0)
-
 class TestHaloComposition(YTStaticOutputTest):
     threshold=80.0
     
@@ -87,11 +81,9 @@
                 return False
         return True
     
-create_test(TestHaloComposition, "halo_composition_test", threshold=80.0)
-
 # Tests the content of the halos returned by the HOP halo finder on a dataset 
 # by comparing the hash of the arrays of all the particles contained in each
-# halo.
+# halo.  Evidently breaks on parallel runtime.  DO NOT USE.
 class TestHaloCompositionHashHOP(YTStaticOutputTest):
     threshold=80.0
     
@@ -103,7 +95,7 @@
         IDs = []
         for halo in halos:
             IDs.append(halo["particle_index"])
-        IDs = np.array(np.concatenate((IDs)))
+        IDs = np.concatenate(IDs)
         self.result = IDs
     
     def compare(self, old_result):
@@ -118,7 +110,7 @@
 
 # Tests the content of the halos returned by the FOF halo finder on a dataset 
 # by comparing the hash of the arrays of all the particles contained in each
-# halo.
+# halo.  Evidently breaks on parallel runtime.  DO NOT USE.
 class TestHaloCompositionHashFOF(YTStaticOutputTest):
     link = 0.2
     padding = 0.02
@@ -132,7 +124,7 @@
         IDs = []
         for halo in halos:
             IDs.append(halo["particle_index"])
-        IDs = np.array(np.concatenate((IDs)))
+        IDs = np.concatenate(IDs)
         self.result = IDs
     
     def compare(self, old_result):
@@ -147,7 +139,7 @@
 
 # Tests the content of the halos returned by the Parallel HOP halo finder on a 
 # dataset by comparing the hash of the arrays of all the particles contained 
-# in each halo.
+# in each halo.  Evidently breaks on parallel runtime.  DO NOT USE.
 class TestHaloCompositionHashPHOP(YTStaticOutputTest):
     threshold=80.0
     
@@ -159,7 +151,7 @@
         IDs = []
         for halo in halos:
             IDs.append(halo["particle_index"])
-        IDs = np.array(np.concatenate((IDs)))
+        IDs = np.concatenate(IDs)
         self.result = IDs
     
     def compare(self, old_result):


--- a/yt/utilities/parallel_tools/distributed_object_collection.py	Tue Oct 18 14:35:16 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,139 +0,0 @@
-"""
-A simple distributed object mechanism, for storing array-heavy objects.
-Meant to be subclassed.
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from itertools import izip
-
-import numpy as na
-
-from yt.funcs import *
-
-from .parallel_analysis_interface import ParallelAnalysisInterface
-
-class DistributedObjectCollection(ParallelAnalysisInterface):
-    valid = True
-
-    def _get_object_info(self):
-        pass
-
-    def _set_object_info(self):
-        pass
-
-    def join_lists(self):
-        info_dict = self._get_object_info()
-        info_dict = self._mpi_catdict(info_dict)
-        self._set_object_info(info_dict)
-
-    def _collect_objects(self, desired_indices):
-        # We figure out which indices belong to which processor,
-        # then we pack them up, and we send a list to each processor.
-        request_count = []
-        owners = self._object_owners[desired_indices]
-        mylog.debug("Owner list: %s", na.unique1d(owners))
-        # Even if we have a million bricks, this should not take long.
-        s = self._par_size
-        m = self._par_rank
-        requests = dict( ( (i, []) for i in xrange(s) ) )
-        for i, p in izip(desired_indices, owners):
-            requests[p].append(i)
-        for p in sorted(requests):
-            requests[p] = na.array(requests[p], dtype='int64')
-            request_count.append(len(requests[p]))
-        size = len(request_count)
-        mylog.debug("Requesting: %s", request_count)
-        request_count = na.array(request_count, dtype='int64')
-        # Now we distribute our requests to all the processors.
-        # This is two-pass.  One to get the length of the arrays.  The second
-        # pass is to get the actual indices themselves.
-        request_count = self._mpi_joindict({m : request_count})
-        # Now we have our final array of requests, with arrangement
-        # (Nproc,Nproc).  First index corresponds to requesting proc, second to
-        # sending.  So [them,us] = 5 means we owe 5, whereas [us, them] means
-        # we are owed.
-        send_hooks = []
-        dsend_buffers, dsend_hooks = [], []
-        recv_hooks, recv_buffers = [], []
-        drecv_buffers, drecv_hooks = [], []
-        # We post our index-list and data receives from each processor.
-        mylog.debug("Posting data buffer receives")
-        proc_hooks = {}
-        for p, request_from in request_count.items():
-            if p == m: continue
-            size = request_from[m]
-            #if size == 0: continue
-            # We post receives of the grids we *asked* for.
-            # Note that indices into this are not necessarily processor ids.
-            # So we store.  This has to go before the appends or it's an
-            # off-by-one.
-            mylog.debug("Setting up index buffer of size %s for receive from %s",
-                        size, p)
-            proc_hooks[len(drecv_buffers)] = p
-            drecv_buffers.append(self._create_buffer(requests[p]))
-            # does this work without specifying the type? (was double)
-            drecv_hooks.append(self._mpi_nonblocking_recv(drecv_buffers[-1], p, 1))
-            recv_buffers.append(na.zeros(size, dtype='int64'))
-            # Our index list goes on 0, our buffer goes on 1.  We know how big
-            # the index list will be, now.
-            # does this work without specifying the type? (was long)
-            recv_hooks.append(self._mpi_nonblocking_recv(recv_buffers[-1], p, 0))
-        # Send our index lists into hte waiting buffers
-        mylog.debug("Sending index lists")
-        for p, ind_list in requests.items():
-            if p == m: continue
-            if len(ind_list) == 0: continue
-            # Now, we actually send our index lists.
-            send_hooks.append(self._mpi_nonblocking_send(ind_list, p, 0))
-        # Now we post receives for all of the data buffers.
-        mylog.debug("Sending data")
-        for i in self._mpi_Request_Waititer(recv_hooks):
-            # We get back the index, which here is identical to the processor
-            # number doing the send.  At this point, we can post our receives.
-            p = proc_hooks[i]
-            mylog.debug("Processing from %s", p)
-            ind_list = recv_buffers[i]
-            dsend_buffers.append(self._create_buffer(ind_list))
-            self._pack_buffer(ind_list, dsend_buffers[-1])
-            dsend_hooks.append(self._mpi_nonblocking_send(dsend_buffers[-1], p, 1))
-        mylog.debug("Waiting on data receives: %s", len(drecv_hooks))
-        for i in self._mpi_Request_Waititer(drecv_hooks):
-            mylog.debug("Unpacking from %s", proc_hooks[i])
-            # Now we have to unpack our buffers
-            # Our key into this is actually the request for the processor
-            # number.
-            p = proc_hooks[i]
-            self._unpack_buffer(requests[p], drecv_buffers[i])
-        mylog.debug("Finalizing sends: %s", len(dsend_hooks))
-        for i in self._mpi_Request_Waititer(dsend_hooks):
-            continue
-
-    def _create_buffer(self, ind_list):
-        pass
-
-    def _pack_buffer(self, ind_list):
-        pass
-
-    def _unpack_buffer(self, ind_list, my_buffer):
-        pass
-


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 14:35:16 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 14:36:43 2011 -0400
@@ -459,61 +459,6 @@
         return None
 
     @parallel_passthrough
-    def _mpi_catdict(self, data):
-        field_keys = data.keys()
-        field_keys.sort()
-        size = data[field_keys[0]].shape[-1]
-        sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
-        outsize = na.array(size, dtype='int64')
-        MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
-                                 [sizes, 1, MPI.LONG] )
-        # This nested concatenate is to get the shapes to work out correctly;
-        # if we just add [0] to sizes, it will broadcast a summation, not a
-        # concatenation.
-        offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
-        arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
-        for key in field_keys:
-            dd = data[key]
-            rv = _alltoallv_array(dd, arr_size, offsets, sizes)
-            data[key] = rv
-        return data
-
-    @parallel_passthrough
-    def _mpi_joindict(self, data):
-        #self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                data.update(MPI.COMM_WORLD.recv(source=i, tag=0))
-        else:
-            MPI.COMM_WORLD.send(data, dest=0, tag=0)
-        data = MPI.COMM_WORLD.bcast(data, root=0)
-        #self._barrier()
-        return data
-
-    @parallel_passthrough
-    def _mpi_maxdict(self, data):
-        """
-        For each key in data, find the maximum value across all tasks, and
-        then broadcast it back.
-        """
-        self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                temp_data = MPI.COMM_WORLD.recv(source=i, tag=0)
-                for key in temp_data:
-                    try:
-                        old_value = data[key]
-                    except KeyError:
-                        # This guarantees the new value gets added.
-                        old_value = None
-                    if old_value < temp_data[key]:
-                        data[key] = temp_data[key]
-        else:
-            MPI.COMM_WORLD.send(data, dest=0, tag=0)
-        data = MPI.COMM_WORLD.bcast(data, root=0)
-        self._barrier()
-        return data
-
     def _mpi_maxdict_dict(self, data):
         """
         Similar to above, but finds maximums for dicts of dicts. This is
@@ -582,50 +527,89 @@
         return (top_keys, bot_keys, vals)
 
     @parallel_passthrough
-    def _mpi_catlist(self, data):
-        self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            data = self.__mpi_recvlist(data)
-        else:
-            MPI.COMM_WORLD.send(data, dest=0, tag=0)
-        mylog.debug("Opening MPI Broadcast on %s", MPI.COMM_WORLD.rank)
-        data = MPI.COMM_WORLD.bcast(data, root=0)
-        self._barrier()
-        return data
-
-    @parallel_passthrough
-    def _mpi_catarray(self, data):
-        if data is None:
-            ncols = -1
-            size = 0
-        else:
-            if len(data) == 0:
+    def _par_combine_object(self, data, op, datatype = None):
+        # op can be chosen from:
+        #   cat
+        #   join
+        # data is selected to be of types:
+        #   na.ndarray
+        #   dict
+        #   data field dict
+        if datatype is not None:
+            pass
+        elif isinstance(data, types.DictType):
+            datatype == "dict"
+        elif isinstance(data, na.ndarray):
+            datatype == "array"
+        elif isinstance(data, types.ListType):
+            datatype == "list"
+        # Now we have our datatype, and we conduct our operation
+        if datatype == "dict" and op == "join":
+            if MPI.COMM_WORLD.rank == 0:
+                for i in range(1,MPI.COMM_WORLD.size):
+                    data.update(MPI.COMM_WORLD.recv(source=i, tag=0))
+            else:
+                MPI.COMM_WORLD.send(data, dest=0, tag=0)
+            data = MPI.COMM_WORLD.bcast(data, root=0)
+            return data
+        elif datatype == "dict" and op == "cat":
+            field_keys = data.keys()
+            field_keys.sort()
+            size = data[field_keys[0]].shape[-1]
+            sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
+            outsize = na.array(size, dtype='int64')
+            MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
+                                     [sizes, 1, MPI.LONG] )
+            # This nested concatenate is to get the shapes to work out correctly;
+            # if we just add [0] to sizes, it will broadcast a summation, not a
+            # concatenation.
+            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
+            for key in field_keys:
+                dd = data[key]
+                rv = _alltoallv_array(dd, arr_size, offsets, sizes)
+                data[key] = rv
+            return data
+        elif datatype == "array" and op == "cat":
+            if data is None:
                 ncols = -1
                 size = 0
-            elif len(data.shape) == 1:
-                ncols = 1
-                size = data.shape[0]
             else:
-                ncols, size = data.shape
-        ncols = MPI.COMM_WORLD.allreduce(ncols, op=MPI.MAX)
-        if size == 0:
-            data = na.zeros((ncols,0), dtype='float64') # This only works for
-        size = data.shape[-1]
-        sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
-        outsize = na.array(size, dtype='int64')
-        MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
-                                 [sizes, 1, MPI.LONG] )
-        # This nested concatenate is to get the shapes to work out correctly;
-        # if we just add [0] to sizes, it will broadcast a summation, not a
-        # concatenation.
-        offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
-        arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
-        data = _alltoallv_array(data, arr_size, offsets, sizes)
-        return data
+                if len(data) == 0:
+                    ncols = -1
+                    size = 0
+                elif len(data.shape) == 1:
+                    ncols = 1
+                    size = data.shape[0]
+                else:
+                    ncols, size = data.shape
+            ncols = MPI.COMM_WORLD.allreduce(ncols, op=MPI.MAX)
+            if size == 0:
+                data = na.zeros((ncols,0), dtype='float64') # This only works for
+            size = data.shape[-1]
+            sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
+            outsize = na.array(size, dtype='int64')
+            MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
+                                     [sizes, 1, MPI.LONG] )
+            # This nested concatenate is to get the shapes to work out correctly;
+            # if we just add [0] to sizes, it will broadcast a summation, not a
+            # concatenation.
+            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
+            data = _alltoallv_array(data, arr_size, offsets, sizes)
+            return data
+        elif datatype == "list" and op == "cat":
+            if MPI.COMM_WORLD.rank == 0:
+                data = self.__mpi_recvlist(data)
+            else:
+                MPI.COMM_WORLD.send(data, dest=0, tag=0)
+            mylog.debug("Opening MPI Broadcast on %s", MPI.COMM_WORLD.rank)
+            data = MPI.COMM_WORLD.bcast(data, root=0)
+            return data
+        raise NotImplementedError
 
     @parallel_passthrough
     def _mpi_bcast_pickled(self, data):
-        #self._barrier()
         data = MPI.COMM_WORLD.bcast(data, root=0)
         return data
 


--- a/yt/visualization/volume_rendering/api.py	Tue Oct 18 14:35:16 2011 -0400
+++ b/yt/visualization/volume_rendering/api.py	Tue Oct 18 14:36:43 2011 -0400
@@ -35,7 +35,6 @@
 from yt.utilities.amr_utils import PartitionedGrid, VectorPlane, \
     TransferFunctionProxy
 from grid_partitioner import HomogenizedVolume, \
-                             HomogenizedBrickCollection, \
                              export_partitioned_grids, \
                              import_partitioned_grids
 from image_handling import export_rgba, import_rgba, \


--- a/yt/visualization/volume_rendering/grid_partitioner.py	Tue Oct 18 14:35:16 2011 -0400
+++ b/yt/visualization/volume_rendering/grid_partitioner.py	Tue Oct 18 14:36:43 2011 -0400
@@ -29,8 +29,6 @@
 
 from yt.utilities.amr_utils import PartitionedGrid, ProtoPrism, GridFace, \
     grid_points_in_volume, find_grids_in_inclined_box
-from yt.utilities.parallel_tools.distributed_object_collection import \
-    DistributedObjectCollection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_root_only
 
@@ -201,176 +199,6 @@
     def reset_cast(self):
         pass
 
-class HomogenizedBrickCollection(DistributedObjectCollection):
-    def __init__(self, source):
-        # The idea here is that we have two sources -- the global_domain
-        # source, which would be a decomposition of the 3D domain, and a
-        # local_domain source, which is the set of bricks we want at the end.
-        self.source = source
-        self.pf = source.pf
-
-    @classmethod
-    def load_bricks(self, base_filename):
-        pass
-
-    def write_my_bricks(self, base_filename):
-        pass
-
-    def store_bricks(self, base_filename):
-        pass
-    
-    @parallel_root_only
-    def write_hierarchy(self, base_filename):
-        pass
-    
-    def _partition_grid(self, grid, fields, log_field = None):
-        fields = ensure_list(fields)
-        if log_field is None: log_field = [True] * len(fields)
-
-        # This is not super efficient, as it re-fills the regions once for each
-        # field.
-        vcds = []
-        for i,field in enumerate(fields):
-            vcd = grid.get_vertex_centered_data(field).astype('float64')
-            if log_field[i]: vcd = na.log10(vcd)
-            vcds.append(vcd)
-
-        GF = GridFaces(grid.Children + [grid])
-        PP = ProtoPrism(grid.id, grid.LeftEdge, grid.RightEdge, GF)
-
-        pgs = []
-        for P in PP.sweep(0):
-            sl = P.get_brick(grid.LeftEdge, grid.dds, grid.child_mask)
-            if len(sl) == 0: continue
-            dd = [d[sl[0][0]:sl[0][1]+1,
-                    sl[1][0]:sl[1][1]+1,
-                    sl[2][0]:sl[2][1]+1].copy() for d in vcds]
-            pgs.append(PartitionedGrid(grid.id, len(fields), dd,
-                        P.LeftEdge, P.RightEdge, sl[-1]))
-        return pgs
-
-    def _partition_local_grids(self, fields = "Density", log_field = None):
-        fields = ensure_list(fields)
-        bricks = []
-        # We preload.
-        # UNCOMMENT FOR PARALLELISM
-        #grid_list = list(self._get_grid_objs())
-        grid_list = list(self.source._grids)
-        self._preload(grid_list, fields, self.pf.h.io)
-        pbar = get_pbar("Partitioning ", len(grid_list))
-        # UNCOMMENT FOR PARALLELISM
-        #for i, g in enumerate(self._get_grids()):
-        print "THIS MANY GRIDS!", len(grid_list)
-        for i, g in enumerate(self.source._grids):
-            pbar.update(i)
-            bricks += self._partition_grid(g, fields, log_field)
-        pbar.finish()
-        bricks = na.array(bricks, dtype='object')
-        NB = len(bricks)
-        # Now we set up our (local for now) hierarchy.  Note that to calculate
-        # intersection, we only need to do the left edge & right edge.
-        #
-        # We're going to double up a little bit here in memory.
-        self.brick_left_edges = na.zeros( (NB, 3), dtype='float64')
-        self.brick_right_edges = na.zeros( (NB, 3), dtype='float64')
-        self.brick_parents = na.zeros( NB, dtype='int64')
-        self.brick_dimensions = na.zeros( (NB, 3), dtype='int64')
-        self.brick_owners = na.ones(NB, dtype='int32') * self._par_rank
-        self._object_owners = self.brick_owners
-        for i,b in enumerate(bricks):
-            self.brick_left_edges[i,:] = b.LeftEdge
-            self.brick_right_edges[i,:] = b.RightEdge
-            self.brick_parents[i] = b.parent_grid_id
-            self.brick_dimensions[i,:] = b.my_data[0].shape
-        # Vertex-centered means we subtract one from the shape
-        self.brick_dimensions -= 1
-        self.bricks = na.array(bricks, dtype='object')
-        # UNCOMMENT FOR PARALLELISM
-        #self.join_lists()
-
-    def _get_object_info(self):
-        # We transpose here for the catdict operation
-        info_dict = dict(left_edges = self.brick_left_edges.transpose(),
-                         right_edges = self.brick_right_edges.transpose(),
-                         parents = self.brick_parents,
-                         owners = self.brick_owners,
-                         dimensions = self.brick_dimensions.transpose(),)
-        return info_dict
-
-    def _set_object_info(self, info_dict):
-        self.brick_left_edges = info_dict.pop("left_edges").transpose()
-        self.brick_right_edges = info_dict.pop("right_edges").transpose()
-        self.brick_parents = info_dict.pop("parents")
-        self.brick_dimensions = info_dict.pop("dimensions").transpose()
-        self.brick_owners = info_dict.pop("owners")
-        self._object_owners = self.brick_owners
-        bricks = self.bricks
-        self.bricks = na.array([None] * self.brick_owners.size, dtype='object')
-        # Copy our bricks back in
-        self.bricks[self.brick_owners == self._par_rank] = bricks[:]
-
-    def _create_buffer(self, ind_list):
-        # Note that we have vertex-centered data, so we add one before taking
-        # the prod and the sum
-        total_size = (self.brick_dimensions[ind_list,:] + 1).prod(axis=1).sum()
-        mylog.debug("Creating buffer for %s bricks (%s)",
-                    len(ind_list), total_size)
-        my_buffer = na.zeros(total_size, dtype='float64')
-        return my_buffer
-
-    def _pack_buffer(self, ind_list, my_buffer):
-        si = 0
-        for index in ind_list:
-            d = self.bricks[index].my_data.ravel()
-            my_buffer[si:si+d.size] = d[:]
-            si += d.size
-
-    def _unpack_buffer(self, ind_list, my_buffer):
-        si = 0
-        for index in ind_list:
-            pgi = self.brick_parents[index]
-            LE = self.brick_left_edges[index,:].copy()
-            RE = self.brick_right_edges[index,:].copy()
-            dims = self.brick_dimensions[index,:].copy()
-            size = (dims + 1).prod()
-            data = my_buffer[si:si+size].reshape(dims + 1)
-            self.bricks[index] = PartitionedGrid(
-                    pgi, data, LE, RE, dims)
-            si += size
-
-    def _wipe_objects(self, indices):
-        self.bricks[indices] = None
-
-    def _collect_bricks(self, intersection_source):
-        if not self._distributed: return
-        # This entire routine should instead be set up to do:
-        #   alltoall broadcast of the *number* of requested bricks
-        #   non-blocking receives posted for int arrays
-        #   sizes of data calculated
-        #   concatenated data receives posted
-        #   send all data
-        #   get bricks back
-        # This presupposes that we are using the AMRInclinedBox as a data
-        # source.  If we're not, we ought to be.
-        needed_brick_i = find_grids_in_inclined_box(
-            intersection_source.box_vectors, intersection_source.center,
-            self.brick_left_edges, self.brick_right_edges)
-        needed_brick_i = na.where(needed_brick_i)[0]
-        self._collect_objects(needed_brick_i)
-
-    def _initialize_parallel(self):
-        pass
-
-    def _finalize_parallel(self):
-        pass
-
-    def get_brick(self, brick_id):
-        pass
-
-    @property
-    def _grids(self):
-        return self.source._grids
-
 class GridFaces(object):
     def __init__(self, grids):
         self.faces = [ [], [], [] ]


http://bitbucket.org/yt_analysis/yt/changeset/e48ea6e3d8d2/
changeset:   e48ea6e3d8d2
branch:      yt
user:        MatthewTurk
date:        2011-10-18 20:57:43
summary:     Misplaced parenthesis
affected #:  2 files (-1 bytes)

--- a/tests/runall.py	Tue Oct 18 14:36:43 2011 -0400
+++ b/tests/runall.py	Tue Oct 18 14:57:43 2011 -0400
@@ -110,6 +110,7 @@
         keys = set(registry_entries())
         tests_to_run += [t for t in new_tests if t in keys]
     for test_name in sorted(tests_to_run):
+        print "RUNNING TEST", test_name
         rtr.run_test(test_name)
     if watcher is not None:
         rtr.watcher.report()


--- a/yt/data_objects/derived_quantities.py	Tue Oct 18 14:36:43 2011 -0400
+++ b/yt/data_objects/derived_quantities.py	Tue Oct 18 14:57:43 2011 -0400
@@ -109,8 +109,8 @@
         rv = []
         for my_list in self.retvals:
             data = na.array(my_list).transpose()
-            rv.append(self._par_combine_object(data.transpose(),
-                        datatype="array", op="cat"))
+            rv.append(self._par_combine_object(data,
+                        datatype="array", op="cat").transpose())
         self.retvals = rv
         
     def _call_func_unlazy(self, args, kwargs):


http://bitbucket.org/yt_analysis/yt/changeset/48164f9be5e2/
changeset:   48164f9be5e2
branch:      yt
user:        brittonsmith
date:        2011-10-18 20:38:23
summary:     Fixed a test for .field_data.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/answer_testing/hydro_tests.py	Tue Oct 18 14:36:43 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Tue Oct 18 14:38:23 2011 -0400
@@ -43,9 +43,9 @@
         pixelized_proj = self.pixelize(proj, self.field)
         # We just want the values, so this can be stored
         # independently of the parameter file.
-        # The .data attributes strip out everything other than the actual array
+        # The .field_data attributes strip out everything other than the actual array
         # values.
-        self.result = (proj.data, pixelized_proj.data)
+        self.result = (proj.field_data, pixelized_proj.field_data)
 
     def compare(self, old_result):
         proj, pixelized_proj = self.result


http://bitbucket.org/yt_analysis/yt/changeset/a70478832e7a/
changeset:   a70478832e7a
branch:      yt
user:        MatthewTurk
date:        2011-10-18 20:57:57
summary:     Merging
affected #:  1 file (-1 bytes)

--- a/yt/utilities/answer_testing/hydro_tests.py	Tue Oct 18 14:57:43 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Tue Oct 18 14:57:57 2011 -0400
@@ -43,9 +43,9 @@
         pixelized_proj = self.pixelize(proj, self.field)
         # We just want the values, so this can be stored
         # independently of the parameter file.
-        # The .data attributes strip out everything other than the actual array
+        # The .field_data attributes strip out everything other than the actual array
         # values.
-        self.result = (proj.data, pixelized_proj.data)
+        self.result = (proj.field_data, pixelized_proj.field_data)
 
     def compare(self, old_result):
         proj, pixelized_proj = self.result


http://bitbucket.org/yt_analysis/yt/changeset/1cfcb4e51a62/
changeset:   1cfcb4e51a62
branch:      yt
user:        MatthewTurk
date:        2011-10-18 21:00:17
summary:     Fixing PHOP in serial.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 14:57:57 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 15:00:17 2011 -0400
@@ -458,7 +458,6 @@
             raise RuntimeError("Fatal error. Exiting.")
         return None
 
-    @parallel_passthrough
     def _mpi_maxdict_dict(self, data):
         """
         Similar to above, but finds maximums for dicts of dicts. This is


http://bitbucket.org/yt_analysis/yt/changeset/360893ee3393/
changeset:   360893ee3393
branch:      yt
user:        MatthewTurk
date:        2011-10-18 21:12:43
summary:     Removing "traited explorer"
affected #:  6 files (-1 bytes)

--- a/yt/gui/setup.py	Tue Oct 18 15:00:17 2011 -0400
+++ b/yt/gui/setup.py	Tue Oct 18 15:12:43 2011 -0400
@@ -6,7 +6,6 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('gui',parent_package,top_path)
     config.add_subpackage('opengl_widgets')
-    config.add_subpackage('traited_explorer')
     config.add_subpackage('reason')
     config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()


--- a/yt/gui/traited_explorer/plot_editors.py	Tue Oct 18 15:00:17 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,128 +0,0 @@
-"""
-Figure editors for the Traits GUI
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-import sys, matplotlib
-# We want matplotlib to use a wxPython backend
-matplotlib.use('QT4Agg')
-from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
-from matplotlib.figure import Figure
-from matplotlib.axes import Axes
-
-from enthought.traits.api import Any, Instance
-from enthought.traits.ui.qt4.editor import Editor
-from enthought.traits.ui.qt4.basic_editor_factory import BasicEditorFactory
-
-from enthought.pyface.action.api import ActionController
-
-from enthought.traits.ui.menu import \
-    Menu, Action, Separator, OKCancelButtons, OKButton
-
-from matplotlib.backend_bases import Event as MPLEvent
-
-class _MPLFigureEditor(Editor):
-    """ Snagged from Gael's tutorial """
-
-    scrollable  = True
-    mpl_control = Instance(FigureCanvas)
-
-    def init(self, parent):
-        self.control = self._create_canvas(parent)
-        self.set_tooltip()
-
-    def update_editor(self):
-        pass
-
-    def _create_canvas(self, parent):
-        """ Create the MPL canvas. """
-        # The panel lets us add additional controls.
-        panel = wx.Panel(parent, -1)
-        sizer = wx.BoxSizer(wx.VERTICAL)
-        panel.SetSizer(sizer)
-        # matplotlib commands to create a canvas
-        self.mpl_control = FigureCanvas(panel, -1, self.value)
-        sizer.Add(self.mpl_control, 1, wx.LEFT | wx.TOP | wx.GROW | wx.SHAPED)
-        self.value.canvas.SetMinSize((10,8))
-        return panel
-
-class MPLFigureEditor(BasicEditorFactory):
-    klass = _MPLFigureEditor
-
-class MPLAction(Action):
-    event = Instance(MPLEvent)
-
-class _MPLVMPlotEditor(_MPLFigureEditor, ActionController):
-
-    def _create_canvas(self, parent):
-        panel = _MPLFigureEditor._create_canvas(self, parent)
-        self.mpl_control.mpl_connect("button_press_event", self.on_click)
-        return panel
-
-    def on_click(self, event):
-        if not event.inaxes: return
-        if event.button == 3:
-            my_menu = Menu(MPLAction(name="Recenter", action="object.recenter",
-                                     event=event),
-                           MPLAction(name="Yo!", action="object.do_something",
-                                     event=event))
-            wxmenu = my_menu.create_menu(self.mpl_control, self)
-            self.mpl_control.PopupMenuXY(wxmenu)
-
-    def perform ( self, action ):
-        """
-        This is largely taken/modified from the TreeEditor _perform method.
-        """
-        object            = self.object
-        method_name       = action.action
-        info              = self.ui.info
-        handler           = self.ui.handler
-        event             = action.event
-
-        if method_name.find( '.' ) >= 0:
-            if method_name.find( '(' ) < 0:
-                method_name += '(event)'
-            try:
-                eval( method_name, globals(),
-                      { 'object':  object,
-                        'editor':  self,
-                        'info':    info,
-                        'event':   event,
-                        'handler': handler } )
-            except:
-                # fixme: Should the exception be logged somewhere?
-                print sys.exc_info()
-                
-            return
-
-        method = getattr( handler, method_name, None )
-        if method is not None:
-            method( info, object )
-            return
-
-        if action.on_perform is not None:
-            action.on_perform( object )
-
-class MPLVMPlotEditor(BasicEditorFactory):
-    klass = _MPLVMPlotEditor
-


--- a/yt/gui/traited_explorer/setup.py	Tue Oct 18 15:00:17 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,10 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os, sys, os.path
-
-def configuration(parent_package='',top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('traited_explorer',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
-    #config.make_svn_version_py()
-    return config


--- a/yt/gui/traited_explorer/traited_explorer.py	Tue Oct 18 15:00:17 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,479 +0,0 @@
-"""
-New version of Reason, using a TraitsUI-based approach
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from yt.mods import *
-from yt.utilities.definitions import \
-    x_dict, \
-    y_dict
-#pf = EnzoStaticOutput("/Users/matthewturk/Research/data/galaxy1200.dir/galaxy1200")
-
-from enthought.traits.api import \
-    HasTraits, List, Instance, Str, Float, Any, Code, PythonValue, Int, CArray, \
-    Property, Enum, cached_property, DelegatesTo, Callable, Array, \
-    Button
-from enthought.traits.ui.api import \
-    Group, VGroup, HGroup, Tabbed, View, Item, ShellEditor, InstanceEditor, ListStrEditor, \
-    ListEditor, VSplit, VFlow, HSplit, VFold, ValueEditor, TreeEditor, TreeNode, RangeEditor, \
-    EnumEditor, Handler, Controller, DNDEditor
-from enthought.traits.ui.menu import \
-    Menu, Action, Separator, OKCancelButtons, OKButton
-from enthought.pyface.action.api import \
-    ActionController
-from enthought.tvtk.pyface.scene_editor import SceneEditor
-from enthought.tvtk.pyface.api import \
-    DecoratedScene
-from enthought.tvtk.pyface.scene_model import SceneModel
-
-from plot_editors import Figure, MPLFigureEditor, MPLVMPlotEditor, Axes
-
-from yt.visualization.plot_types import VMPlot, ProjectionPlot, SlicePlot
-
-import traceback
-from tvtk_interface import \
-    HierarchyImporter, YTScene
-
-class PlotCreationHandler(Controller):
-    main_window = Instance(HasTraits)
-    pnode = Instance(HasTraits)
-
-    format = Str
-    plot_type = Any
-    
-    def close(self, info, is_ok):
-        if not is_ok:
-            super(Controller, self).close(info, True)
-            return
-        spt = self.plot_type(plot_spec=self.model, pf=self.pnode.pf,
-                           name=self.format % (self.model.axis))
-        self.pnode.data_objects.append(spt)
-        self.main_window.plot_frame_tabs.append(spt)
-        spt.plot
-
-class VTKSceneCreationHandler(PlotCreationHandler):
-    importer = Instance(HierarchyImporter)
-
-    def close(self, info, is_ok):
-        if is_ok: 
-            yt_scene = YTScene(importer=self.importer,
-                scene=SceneModel())
-            spt = VTKDataObject(name = "VTK: %s" % self.pnode.pf,
-                    scene=yt_scene.scene,
-                    yt_scene=yt_scene)
-            self.pnode.data_objects.append(spt)
-            self.main_window.plot_frame_tabs.append(spt)
-        super(Controller, self).close(info, True)
-        return True
-
-
-class DataObject(HasTraits):
-    name = Str
-
-class VTKDataObject(DataObject):
-    yt_scene = Instance(YTScene)
-    scene = DelegatesTo("yt_scene")
-    add_contours = Button
-    add_isocontour = Button
-    add_x_plane = Button
-    add_y_plane = Button
-    add_z_plane = Button
-    edit_camera = Button
-    edit_operators = Button
-    edit_pipeline = Button
-    center_on_max = Button
-    operators = DelegatesTo("yt_scene")
-    traits_view = View(
-            Item("scene", editor = 
-        SceneEditor(scene_class=DecoratedScene),
-                    resizable=True, show_label=False),
-            HGroup(Item("add_contours", show_label=False),
-                   Item("add_isocontour", show_label=False),
-                   Item("add_x_plane", show_label=False),
-                   Item("add_y_plane", show_label=False),
-                   Item("add_z_plane", show_label=False),
-                   Item("edit_camera", show_label=False),
-                   Item("edit_operators", show_label=False),
-                   Item("edit_pipeline", show_label=False),
-                   Item("center_on_max", show_label=False),
-                ),
-            )
-
-    operators_edit = View(
-        Item("operators", style='custom', show_label=False,
-             editor=ListEditor(editor=InstanceEditor(),
-                               use_notebook=True),
-              name="Edit Operators"),
-        height=500.0, width=500.0, resizable=True)
-    
-    def _edit_camera_fired(self):
-        self.yt_scene.camera_path.edit_traits()
-
-    def _edit_operators_fired(self):
-        self.edit_traits(view='operators_edit')
-
-    def _edit_pipeline_fired(self):
-        from enthought.tvtk.pipeline.browser import PipelineBrowser
-        pb = PipelineBrowser(self.scene)
-        pb.show()
-
-    def _add_contours_fired(self):
-        self.yt_scene.add_contour()
-
-    def _add_isocontour_fired(self):
-        self.yt_scene.add_isocontour()
-
-    def _add_x_plane_fired(self):
-        self.yt_scene.add_x_plane()
-
-    def _add_y_plane_fired(self):
-        self.yt_scene.add_y_plane()
-
-    def _add_z_plane_fired(self):
-        self.yt_scene.add_z_plane()
-
-    def _center_on_max_fired(self):
-        self.yt_scene.do_center_on_max()
-
-class ParameterFile(HasTraits):
-    pf = Instance(EnzoStaticOutput)
-    data_objects = List(Instance(DataObject))
-    name = Str
-
-    def _name_default(self):
-        return str(self.pf)
-
-    def do_slice(self):
-        cons_view = View(
-                Item('axis'), 
-                Item('center'), 
-                Item('field', editor=EnumEditor(name='field_list')),
-                buttons=OKCancelButtons, title="Slicer: %s" % self.pf)
-        ps = SlicePlotSpec(pf=self.pf)
-        hand = PlotCreationHandler(main_window=mw, pnode=self, model=ps,
-                                   plot_type=SlicePlotTab, format="Slice: %s")
-        ps.edit_traits(cons_view, handler=hand)
-
-    def do_proj(self):
-        cons_view = View(
-                Item('axis'), 
-                Item('field', editor=EnumEditor(name='field_list')),
-                Item('weight_field', editor=EnumEditor(name='none_field_list')),
-                buttons=OKCancelButtons, title="Projector: %s" % self.pf)
-        ps = ProjPlotSpec(pf=self.pf)
-        hand = PlotCreationHandler(main_window=mw, pnode=self, model=ps,
-                                   plot_type=ProjPlotTab, format="Proj: %s")
-        ps.edit_traits(cons_view, handler=hand)
-
-    def do_vtk(self):
-        from tvtk_interface import HierarchyImporter, \
-            HierarchyImportHandler
-        importer = HierarchyImporter(pf=self.pf, max_level=self.pf.h.max_level)
-        importer.edit_traits(handler = VTKSceneCreationHandler(
-            main_window=mw, pnode=self, importer = importer))
-
-class ParameterFileCollection(HasTraits):
-    parameter_files = List(Instance(ParameterFile))
-    name = Str
-    collection = Any
-
-    def _parameter_files_default(self):
-        my_list = []
-        for f in self.collection:
-            try:
-                pf = EnzoStaticOutput(f)
-                my_list.append(
-                    ParameterFile(pf=pf, 
-                            data_objects = []))
-            except IOError: pass
-        return my_list
-
-    def _name_default(self):
-        return str(self.collection)
-
-class ParameterFileCollectionList(HasTraits):
-    parameter_file_collections = List(Instance(ParameterFileCollection))
-
-    def _parameter_file_collections_default(self):
-        return [ParameterFileCollection(collection=c)
-                for c in fido.GrabCollections()]
-
-class DataObjectList(HasTraits):
-    data_objects = List(Str)
-
-    traits_view = View(
-              Item('data_objects', show_label=False,
-                   editor=ListStrEditor())
-               )
-
-    def _data_objects_default(self):
-        return ['a','b','c']
-
-class PlotFrameTab(DataObject):
-    figure = Instance(Figure)
-
-class VMPlotSpec(HasTraits):
-    pf = Instance(EnzoStaticOutput)
-    field = Str('Density')
-    field_list = Property(depends_on = 'pf')
-
-    center = Array(shape=(3,), dtype='float64')
-    axis = Enum(0,1,2)
-
-    @cached_property
-    def _get_field_list(self):
-        fl = self.pf.h.field_list
-        df = self.pf.h.derived_field_list
-        fl.sort(); df.sort()
-        return fl + df
-
-    def _center_default(self):
-        return self.pf.h.find_max("Density")[1]
-
-class SlicePlotSpec(VMPlotSpec):
-    pass
-
-class ProjPlotSpec(VMPlotSpec):
-    weight_field = Str("None")
-    none_field_list = Property(depends_on = 'field_list')
-
-    @cached_property
-    def _get_none_field_list(self):
-        return ["None"] + self.field_list
-
-class VMPlotTab(PlotFrameTab):
-    pf = Instance(EnzoStaticOutput)
-    figure = Instance(Figure, args=())
-    field = DelegatesTo('plot_spec')
-    field_list = DelegatesTo('plot_spec')
-    plot = Instance(VMPlot)
-    axes = Instance(Axes)
-    disp_width = Float(1.0)
-    unit = Str('unitary')
-    min_width = Property(Float, depends_on=['pf','unit'])
-    max_width = Property(Float, depends_on=['pf','unit'])
-    unit_list = Property(depends_on = 'pf')
-    smallest_dx = Property(depends_on = 'pf')
-
-    traits_view = View(VGroup(
-            HGroup(Item('figure', editor=MPLVMPlotEditor(),
-                     show_label=False)),
-            HGroup(Item('disp_width',
-                     editor=RangeEditor(format="%0.2e",
-                        low_name='min_width', high_name='max_width',
-                        mode='logslider', enter_set=True),
-                     show_label=False, width=400.0),
-                   Item('unit',
-                      editor=EnumEditor(name='unit_list')),),
-            HGroup(Item('field',
-                      editor=EnumEditor(name='field_list')),
-                )),
-             resizable=True)
-
-    def __init__(self, **traits):
-        super(VMPlotTab, self).__init__(**traits)
-        self.axes = self.figure.add_subplot(111, aspect='equal')
-
-    def _field_changed(self, old, new):
-        self.plot.switch_z(new)
-        self._redraw()
-
-    @cached_property
-    def _get_min_width(self):
-        return 50.0*self.smallest_dx*self.pf[self.unit]
-
-    @cached_property
-    def _get_max_width(self):
-        return self.pf['unitary']*self.pf[self.unit]
-
-    @cached_property
-    def _get_smallest_dx(self):
-        return self.pf.h.get_smallest_dx()
-
-    @cached_property
-    def _get_unit_list(self):
-        return self.pf.units.keys()
-
-    def _unit_changed(self, old, new):
-        self.disp_width = self.disp_width * self.pf[new]/self.pf[old]
-
-    def _disp_width_changed(self, old, new):
-        self.plot.set_width(new, self.unit)
-        self._redraw()
-
-    def _redraw(self):
-        self.figure.canvas.draw()
-
-    def recenter(self, event):
-        xp, yp = event.xdata, event.ydata
-        dx = abs(self.plot.xlim[0] - self.plot.xlim[1])/self.plot.pix[0]
-        dy = abs(self.plot.ylim[0] - self.plot.ylim[1])/self.plot.pix[1]
-        x = (dx * xp) + self.plot.xlim[0]
-        y = (dy * yp) + self.plot.ylim[0]
-        xi = x_dict[self.axis]
-        yi = y_dict[self.axis]
-        cc = self.center[:]
-        cc[xi] = x; cc[yi] = y
-        self.plot.data.center = cc[:]
-        self.plot.data.set_field_parameter('center', cc.copy())
-        self.center = cc
-
-class SlicePlotTab(VMPlotTab):
-    plot_spec = Instance(SlicePlotSpec)
-
-    axis = DelegatesTo('plot_spec')
-    center = DelegatesTo('plot_spec')
-    
-    plot = Instance(SlicePlot)
-
-    def _plot_default(self):
-        coord = self.center[self.axis]
-        sl = self.pf.h.slice(self.axis, coord, center=self.center[:])
-        sp = SlicePlot(sl, self.field, self.figure, self.axes)
-        self.figure.canvas.draw()
-        return sp
-
-    def _center_changed(self, old, new):
-        #traceback.print_stack()
-        if na.all(na.abs(old - new) == 0.0): return
-        print na.abs(old-new)
-        print "Re-slicing", old, new
-        pp = self.center
-        self.plot.data.reslice(pp[self.axis])
-        self.plot._refresh_display_width()
-        self.figure.canvas.draw()
-
-class ProjPlotTab(VMPlotTab):
-    plot_spec = Instance(ProjPlotSpec)
-
-    axis = DelegatesTo('plot_spec')
-    center = DelegatesTo('plot_spec')
-    weight_field = DelegatesTo('plot_spec')
-
-    plot = Instance(ProjectionPlot)
-
-    def _plot_default(self):
-        self.field = self.field[:]
-        self.weight_field = self.weight_field[:]
-        wf = self.weight_field
-        if str(wf) == "None": wf = None
-        proj = self.pf.h.proj(self.axis, self.field, wf,
-                        center=self.center[:])
-        pp = ProjectionPlot(proj, self.field, self.figure, self.axes)
-        self.figure.canvas.draw()
-        return pp
-
-    def _center_changed(self, old, new):
-        self.plot._refresh_display_width()
-
-class SphereWrapper(DataObject):
-    radius = Float
-    unit = Str
-
-class MainWindow(HasTraits):
-    parameter_file_collections = Instance(ParameterFileCollectionList)
-    parameter_files = Instance(ParameterFileCollection)
-    plot_frame_tabs = List(Instance(DataObject))
-    open_parameterfile = Button
-    shell = PythonValue
-
-    def _shell_default(self):
-        return globals()
-    notebook_editor = ListEditor(editor=InstanceEditor(editable=True),
-                                 use_notebook=True)
-
-    traits_view = View(VSplit(
-                    HSplit(VGroup(
-                       Item('parameter_file_collections', 
-                            width=120.0, height=500.0,
-                            show_label=False,
-                            editor = TreeEditor(editable=False,
-                    nodes=[
-                        TreeNode(node_for=[ParameterFileCollectionList],
-                                 children='parameter_file_collections',
-                                 label="=Data Collections"),
-                        TreeNode(node_for=[ParameterFileCollection],
-                                 children='parameter_files',
-                                 label="name",
-                                 view=View()),
-                        TreeNode(node_for=[ParameterFile],
-                                 children='data_objects',
-                                 label="name",
-                                 menu = Menu(Action(name='Slice',
-                                                    action='object.do_slice'),
-                                             Action(name='Project',
-                                                    action='object.do_proj'),
-                                             Action(name='VTK',
-                                                    action='object.do_vtk')),
-                                 view=View()),
-                        TreeNode(node_for=[DataObject],
-                                 children='',
-                                 label="name"),
-                                ], show_icons=False),),
-                        Item('open_parameterfile', show_label=False)),
-                       Item('plot_frame_tabs', style='custom',
-                            editor = notebook_editor,
-                            show_label=False, height=500.0, width=500.0),
-                    ),
-                    HGroup(
-                       #Item('shell', editor=ShellEditor(share=True),
-                            #show_label=False, height=120.0),
-                    ),
-                ),
-               resizable=True, width=800.0, height=660.0,
-               title="reason v2 [prototype]")
-
-    def _open_parameterfile_fired(self):
-        print "OPENING"
-
-    def _parameter_file_collections_default(self):
-        return ParameterFileCollectionList()
-
-class YTScript(HasTraits):
-    code = Code
-    traits_view = View(Item('code', show_label=False),
-                       height=0.8, width=0.8, resizable=True,
-                       buttons=OKCancelButtons)
-
-class ObjectViewer(HasTraits):
-    to_view=Any
-    traits_view = View(
-            Item('to_view', editor=ValueEditor(), show_label=False),
-                     resizable=True, height=0.8, width=0.8)
-
-def view_object(obj):
-    ObjectViewer(to_view=obj).edit_traits()
-
-def run_script():
-    my_script = YTScript()
-    my_script.edit_traits()
-    return my_script
-
-class event_mock(object):
-    inaxes = True
-    button = 3
-
-dol = DataObjectList()
-mw = MainWindow(plot_frame_tabs = [])
-mw.edit_traits()
-#mw.edit_traits()


--- a/yt/gui/traited_explorer/tvtk_interface.py	Tue Oct 18 15:00:17 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,692 +0,0 @@
-"""
-This is the preliminary interface to VTK.  Note that as of VTK 5.2, it still
-requires a patchset prepared here:
-http://yt-project.org/files/vtk_composite_data.zip
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from enthought.tvtk.tools import ivtk
-from enthought.tvtk.api import tvtk 
-from enthought.traits.api import \
-    Float, HasTraits, Instance, Range, Any, Delegate, Tuple, File, Int, Str, \
-    CArray, List, Button, Bool, Property, cached_property
-from enthought.traits.ui.api import View, Item, HGroup, VGroup, TableEditor, \
-    Handler, Controller, RangeEditor, EnumEditor, InstanceEditor
-from enthought.traits.ui.menu import \
-    Menu, Action, Separator, OKCancelButtons, OKButton
-from enthought.traits.ui.table_column import ObjectColumn
-from enthought.tvtk.pyface.api import DecoratedScene
-
-import enthought.pyface.api as pyface
-
-#from yt.reason import *
-import sys
-import numpy as na
-import time, pickle, os, os.path
-from yt.funcs import *
-from yt.analysis_modules.hierarchy_subset.api import \
-        ExtractedHierarchy, ExtractedParameterFile
-
-#from enthought.tvtk.pyface.ui.wx.wxVTKRenderWindowInteractor \
-     #import wxVTKRenderWindowInteractor
-
-from enthought.mayavi.core.lut_manager import LUTManager
-
-#wxVTKRenderWindowInteractor.USE_STEREO = 1
-
-class TVTKMapperWidget(HasTraits):
-    alpha = Float(1.0)
-    post_call = Any
-    lut_manager = Instance(LUTManager)
-
-    def _alpha_changed(self, old, new):
-        self.lut_manager.lut.alpha_range = (new, new)
-        self.post_call()
-
-class MappingPlane(TVTKMapperWidget):
-    plane = Instance(tvtk.Plane)
-    _coord_redit = editor=RangeEditor(format="%0.2e",
-                              low_name='vmin', high_name='vmax',
-                              auto_set=False, enter_set=True)
-    auto_set = Bool(False)
-    traits_view = View(Item('coord', editor=_coord_redit),
-                       Item('auto_set'),
-                       Item('alpha', editor=RangeEditor(
-                              low=0.0, high=1.0,
-                              enter_set=True, auto_set=False)),
-                       Item('lut_manager', show_label=False,
-                            editor=InstanceEditor(), style='custom'))
-    vmin = Float
-    vmax = Float
-
-    def _auto_set_changed(self, old, new):
-        if new is True:
-            self._coord_redit.auto_set = True
-            self._coord_redit.enter_set = False
-        else:
-            self._coord_redit.auto_set = False
-            self._coord_redit.enter_set = True
-
-    def __init__(self, vmin, vmax, vdefault, **traits):
-        HasTraits.__init__(self, **traits)
-        self.vmin = vmin
-        self.vmax = vmax
-        trait = Range(float(vmin), float(vmax), value=vdefault)
-        self.add_trait("coord", trait)
-        self.coord = vdefault
-
-    def _coord_changed(self, old, new):
-        orig = self.plane.origin[:]
-        orig[self.axis] = new
-        self.plane.origin = orig
-        self.post_call()
-
-class MappingMarchingCubes(TVTKMapperWidget):
-    operator = Instance(tvtk.MarchingCubes)
-    mapper = Instance(tvtk.HierarchicalPolyDataMapper)
-    vmin = Float
-    vmax = Float
-    auto_set = Bool(False)
-    _val_redit = RangeEditor(format="%0.2f",
-                             low_name='vmin', high_name='vmax',
-                             auto_set=False, enter_set=True)
-    traits_view = View(Item('value', editor=_val_redit),
-                       Item('auto_set'),
-                       Item('alpha', editor=RangeEditor(
-                            low=0.0, high=1.0,
-                            enter_set=True, auto_set=False,)),
-                       Item('lut_manager', show_label=False,
-                            editor=InstanceEditor(), style='custom'))
-
-    def __init__(self, vmin, vmax, vdefault, **traits):
-        HasTraits.__init__(self, **traits)
-        self.vmin = vmin
-        self.vmax = vmax
-        trait = Range(float(vmin), float(vmax), value=vdefault)
-        self.add_trait("value", trait)
-        self.value = vdefault
-
-    def _auto_set_changed(self, old, new):
-        if new is True:
-            self._val_redit.auto_set = True
-            self._val_redit.enter_set = False
-        else:
-            self._val_redit.auto_set = False
-            self._val_redit.enter_set = True
-
-    def _value_changed(self, old, new):
-        self.operator.set_value(0, new)
-        self.post_call()
-
-class MappingIsoContour(MappingMarchingCubes):
-    operator = Instance(tvtk.ContourFilter)
-
-class CameraPosition(HasTraits):
-    position = CArray(shape=(3,), dtype='float64')
-    focal_point = CArray(shape=(3,), dtype='float64')
-    view_up = CArray(shape=(3,), dtype='float64')
-    clipping_range = CArray(shape=(2,), dtype='float64')
-    distance = Float
-    num_steps = Int(10)
-    orientation_wxyz = CArray(shape=(4,), dtype='float64')
-
-class CameraControl(HasTraits):
-    # Traits
-    positions = List(CameraPosition)
-    yt_scene = Instance('YTScene')
-    center = Delegate('yt_scene')
-    scene = Delegate('yt_scene')
-    camera = Instance(tvtk.OpenGLCamera)
-    reset_position = Instance(CameraPosition)
-    fps = Float(25.0)
-    export_filename = 'frames'
-    periodic = Bool
-
-    # UI elements
-    snapshot = Button()
-    play = Button()
-    export_frames = Button()
-    reset_path = Button()
-    recenter = Button()
-    save_path = Button()
-    load_path = Button()
-    export_path = Button()
-
-    table_def = TableEditor(
-        columns = [ ObjectColumn(name='position'),
-                    ObjectColumn(name='focal_point'),
-                    ObjectColumn(name='view_up'),
-                    ObjectColumn(name='clipping_range'),
-                    ObjectColumn(name='num_steps') ],
-        reorderable=True, deletable=True,
-        sortable=True, sort_model=True,
-        show_toolbar=True,
-        selection_mode='row',
-        selected = 'reset_position'
-                )
-
-    default_view = View(
-                VGroup(
-                  HGroup(
-                    Item('camera', show_label=False),
-                    Item('recenter', show_label=False),
-                    label='Camera'),
-                  HGroup(
-                    Item('snapshot', show_label=False),
-                    Item('play', show_label=False),
-                    Item('export_frames',show_label=False),
-                    Item('reset_path', show_label=False),
-                    Item('save_path', show_label=False),
-                    Item('load_path', show_label=False),
-                    Item('export_path', show_label=False),
-                    Item('export_filename'),
-                    Item('periodic'),
-                    Item('fps'),
-                    label='Playback'),
-                  VGroup(
-                    Item('positions', show_label=False,
-                        editor=table_def),
-                    label='Camera Path'),
-                 ),
-                resizable=True, title="Camera Path Editor",
-                       )
-
-    def _reset_position_changed(self, old, new):
-        if new is None: return
-        cam = self.scene.camera
-        cam.position = new.position
-        cam.focal_point = new.focal_point
-        cam.view_up = new.view_up
-        cam.clipping_range = new.clipping_range
-        self.scene.render()
-
-    def __init__(self, **traits):
-        HasTraits.__init__(self, **traits)
-
-    def take_snapshot(self):
-        cam = self.scene.camera
-        self.positions.append(CameraPosition(
-                position=cam.position,
-                focal_point=cam.focal_point,
-                view_up=cam.view_up,
-                clipping_range=cam.clipping_range,
-                distance=cam.distance,
-                orientation_wxyz=cam.orientation_wxyz))
-
-    def _export_path_fired(self): 
-        dlg = pyface.FileDialog(
-            action='save as',
-            wildcard="*.cpath",
-        )
-        if dlg.open() == pyface.OK:
-            print "Saving:", dlg.path
-            self.export_camera_path(dlg.path)
-
-    def export_camera_path(self, fn):
-        to_dump = dict(positions=[], focal_points=[],
-                       view_ups=[], clipping_ranges=[],
-                       distances=[], orientation_wxyzs=[])
-        def _write(cam):
-            to_dump['positions'].append(cam.position)
-            to_dump['focal_points'].append(cam.focal_point)
-            to_dump['view_ups'].append(cam.view_up)
-            to_dump['clipping_ranges'].append(cam.clipping_range)
-            to_dump['distances'].append(cam.distance)
-            to_dump['orientation_wxyzs'].append(cam.orientation_wxyz)
-        self.step_through(0.0, callback=_write)
-        pickle.dump(to_dump, open(fn, "wb"))
-
-    def _save_path_fired(self): 
-        dlg = pyface.FileDialog(
-            action='save as',
-            wildcard="*.cpath",
-        )
-        if dlg.open() == pyface.OK:
-            print "Saving:", dlg.path
-            self.dump_camera_path(dlg.path)
-
-    def dump_camera_path(self, fn):
-        to_dump = dict(positions=[], focal_points=[],
-                       view_ups=[], clipping_ranges=[],
-                       distances=[], orientation_wxyzs=[],
-                       num_stepss=[])
-        for p in self.positions:
-            to_dump['positions'].append(p.position)
-            to_dump['focal_points'].append(p.focal_point)
-            to_dump['view_ups'].append(p.view_up)
-            to_dump['clipping_ranges'].append(p.clipping_range)
-            to_dump['distances'].append(p.distance)
-            to_dump['num_stepss'].append(p.num_steps) # stupid s
-            to_dump['orientation_wxyzs'].append(p.orientation_wxyz)
-        pickle.dump(to_dump, open(fn, "wb"))
-
-    def _load_path_fired(self):
-        dlg = pyface.FileDialog(
-            action='open',
-            wildcard="*.cpath",
-        )
-        if dlg.open() == pyface.OK:
-            print "Loading:", dlg.path
-            self.load_camera_path(dlg.path)
-
-    def load_camera_path(self, fn):
-        to_use = pickle.load(open(fn, "rb"))
-        self.positions = []
-        for i in range(len(to_use['positions'])):
-            dd = {}
-            for kw in to_use:
-                # Strip the s
-                dd[kw[:-1]] = to_use[kw][i]
-            self.positions.append(
-                CameraPosition(**dd))
-
-    def _recenter_fired(self):
-        self.camera.focal_point = self.center
-        self.scene.render()
-
-    def _snapshot_fired(self):
-        self.take_snapshot()
-
-    def _play_fired(self):
-        self.step_through()
-
-    def _export_frames_fired(self):
-        self.step_through(save_frames=True)
-
-    def _reset_path_fired(self):
-        self.positions = []
-
-    def step_through(self, pause = 1.0, callback=None, save_frames=False):
-        cam = self.scene.camera
-        frame_counter=0
-        if self.periodic:
-            cyclic_pos = self.positions + [self.positions[0]]
-        else:
-            cyclic_pos = self.positions
-        for i in range(len(cyclic_pos)-1):
-            pos1 = cyclic_pos[i]
-            pos2 = cyclic_pos[i+1]
-            r = pos1.num_steps
-            for p in range(pos1.num_steps):
-                po = _interpolate(pos1.position, pos2.position, p, r)
-                fp = _interpolate(pos1.focal_point, pos2.focal_point, p, r)
-                vu = _interpolate(pos1.view_up, pos2.view_up, p, r)
-                cr = _interpolate(pos1.clipping_range, pos2.clipping_range, p, r)
-                _set_cpos(cam, po, fp, vu, cr)
-                self.scene.render()
-                if callback is not None: callback(cam)
-                if save_frames:
-                    self.scene.save("%s_%0.5d.png" % (self.export_filename,frame_counter))
-                else:
-                    time.sleep(pause * 1.0/self.fps)
-                frame_counter += 1
-
-def _interpolate(q1, q2, p, r):
-    return q1 + p*(q2 - q1)/float(r)
-
-def _set_cpos(cam, po, fp, vu, cr):
-    cam.position = po
-    cam.focal_point = fp
-    cam.view_up = vu
-    cam.clipping_range = cr
-
-class HierarchyImporter(HasTraits):
-    pf = Any
-    min_grid_level = Int(0)
-    max_level = Int(1)
-    number_of_levels = Range(0, 13)
-    max_import_levels = Property(depends_on='min_grid_level')
-    field = Str("Density")
-    field_list = List
-    center_on_max = Bool(True)
-    center = CArray(shape = (3,), dtype = 'float64')
-    cache = Bool(True)
-    smoothed = Bool(True)
-    show_grids = Bool(True)
-
-    def _field_list_default(self):
-        fl = self.pf.h.field_list
-        df = self.pf.h.derived_field_list
-        fl.sort(); df.sort()
-        return fl + df
-    
-    default_view = View(Item('min_grid_level',
-                              editor=RangeEditor(low=0,
-                                                 high_name='max_level')),
-                        Item('number_of_levels', 
-                              editor=RangeEditor(low=1,
-                                                 high_name='max_import_levels')),
-                        Item('field', editor=EnumEditor(name='field_list')),
-                        Item('center_on_max'),
-                        Item('center', enabled_when='not object.center_on_max'),
-                        Item('smoothed'),
-                        Item('cache', label='Pre-load data'),
-                        Item('show_grids'),
-                        buttons=OKCancelButtons)
-
-    def _center_default(self):
-        return [0.5,0.5,0.5]
-
-    @cached_property
-    def _get_max_import_levels(self):
-        return min(13, self.pf.h.max_level - self.min_grid_level + 1)
-
-class HierarchyImportHandler(Controller):
-    importer = Instance(HierarchyImporter)
-    
-
-    def close(self, info, is_ok):
-        if is_ok: 
-            yt_scene = YTScene(
-                importer=self.importer)
-        super(Controller, self).close(info, True)
-        return
-
-
-class YTScene(HasTraits):
-
-    # Traits
-    importer = Instance(HierarchyImporter)
-    pf = Delegate("importer")
-    min_grid_level = Delegate("importer")
-    number_of_levels = Delegate("importer")
-    field = Delegate("importer")
-    center = CArray(shape = (3,), dtype = 'float64')
-    center_on_max = Delegate("importer")
-    smoothed = Delegate("importer")
-    cache = Delegate("importer")
-    show_grids = Delegate("importer")
-
-    camera_path = Instance(CameraControl)
-    #window = Instance(ivtk.IVTKWithCrustAndBrowser)
-    #python_shell = Delegate('window')
-    #scene = Delegate('window')
-    scene = Instance(HasTraits)
-    operators = List(HasTraits)
-
-    # State variables
-    _grid_boundaries_actor = None
-
-    # Views
-    def _window_default(self):
-        # Should experiment with passing in a pipeline browser
-        # that has two root objects -- one for TVTKBases, i.e. the render
-        # window, and one that accepts our objects
-        return ivtk.IVTKWithCrustAndBrowser(size=(800,600), stereo=1)
-
-    def _camera_path_default(self):
-        return CameraControl(yt_scene=self, camera=self.scene.camera)
-
-    def __init__(self, **traits):
-        HasTraits.__init__(self, **traits)
-        max_level = min(self.pf.h.max_level,
-                        self.min_grid_level + self.number_of_levels - 1)
-        self.extracted_pf = ExtractedParameterFile(self.pf,
-                             self.min_grid_level, max_level, offset=None)
-        self.extracted_hierarchy = self.extracted_pf.h
-        self._hdata_set = tvtk.HierarchicalBoxDataSet()
-        self._ugs = []
-        self._grids = []
-        self._min_val = 1e60
-        self._max_val = -1e60
-        gid = 0
-        if self.cache:
-            for grid_set in self.extracted_hierarchy.get_levels():
-                for grid in grid_set:
-                    grid[self.field]
-        for l, grid_set in enumerate(self.extracted_hierarchy.get_levels()):
-            gid = self._add_level(grid_set, l, gid)
-        if self.show_grids:
-            self.toggle_grid_boundaries()
-            
-    def _center_default(self):
-        return self.extracted_hierarchy._convert_coords(
-                [0.5, 0.5, 0.5])
-
-    def do_center_on_max(self):
-        self.center = self.extracted_hierarchy._convert_coords(
-            self.pf.h.find_max("Density")[1])
-        self.scene.camera.focal_point = self.center
-
-    def _add_level(self, grid_set, level, gid):
-        for grid in grid_set:
-            self._hdata_set.set_refinement_ratio(level, 2)
-            gid = self._add_grid(grid, gid, level)
-        return gid
-
-    def _add_grid(self, grid, gid, level=0):
-        mylog.debug("Adding grid %s on level %s (%s)",
-                    grid.id, level, grid.Level)
-        if grid in self._grids: return
-        self._grids.append(grid)
-
-        scalars = grid.get_vertex_centered_data(self.field, smoothed=self.smoothed)
-
-        left_index = grid.get_global_startindex()
-        origin = grid.LeftEdge
-        dds = grid.dds
-        right_index = left_index + scalars.shape - 1
-        ug = tvtk.UniformGrid(origin=origin, spacing=dds,
-                              dimensions=grid.ActiveDimensions+1)
-        if self.field not in self.pf.field_info or \
-            self.pf.field_info[self.field].take_log:
-            scalars = na.log10(scalars)
-        ug.point_data.scalars = scalars.transpose().ravel()
-        ug.point_data.scalars.name = self.field
-        if grid.Level != self.min_grid_level + self.number_of_levels - 1:
-            ug.cell_visibility_array = grid.child_mask.transpose().ravel()
-        else:
-            ug.cell_visibility_array = na.ones(
-                    grid.ActiveDimensions, dtype='int').ravel()
-        self._ugs.append((grid,ug))
-        self._hdata_set.set_data_set(level, gid, left_index, right_index, ug)
-
-        self._min_val = min(self._min_val, scalars.min())
-        self._max_val = max(self._max_val, scalars.max())
-
-        gid += 1
-        return gid
-
-    def _add_data_to_ug(self, field):
-        for g, ug in self._ugs:
-            scalars_temp = grid.get_vertex_centered_data(field, smoothed=self.smoothed)
-            ii = ug.point_data.add_array(scalars_temp.transpose().ravel())
-            ug.point_data.get_array(ii).name = field
-
-    def zoom(self, dist, unit='1'):
-        vec = self.scene.camera.focal_point - \
-              self.scene.camera.position
-        self.scene.camera.position += \
-            vec * dist/self._grids[0].pf[unit]
-        self.scene.render()
-
-    def toggle_grid_boundaries(self):
-        if self._grid_boundaries_actor is None:
-            # We don't need to track this stuff right now.
-            ocf = tvtk.OutlineCornerFilter(
-                    executive=tvtk.CompositeDataPipeline(),
-                    corner_factor = 0.5)
-            ocf.input = self._hdata_set
-            ocm = tvtk.HierarchicalPolyDataMapper(
-                input_connection = ocf.output_port)
-            self._grid_boundaries_actor = tvtk.Actor(mapper = ocm)
-            self.scene.add_actor(self._grid_boundaries_actor)
-        else:
-            self._grid_boundaries_actor.visibility = \
-            (not self._grid_boundaries_actor.visibility)
-
-    def _add_sphere(self, origin=(0.0,0.0,0.0), normal=(0,1,0)):
-        sphere = tvtk.Sphere(center=origin, radius=0.25)
-        cutter = tvtk.Cutter(executive = tvtk.CompositeDataPipeline(),
-                             cut_function = sphere)
-        cutter.input = self._hdata_set
-        lut_manager = LUTManager(data_name=self.field, scene=self.scene)
-        smap = tvtk.HierarchicalPolyDataMapper(
-                        scalar_range=(self._min_val, self._max_val),
-                        lookup_table=lut_manager.lut,
-                        input_connection = cutter.output_port)
-        sactor = tvtk.Actor(mapper=smap)
-        self.scene.add_actors(sactor)
-        return sphere, lut_manager
-
-    def _add_plane(self, origin=(0.0,0.0,0.0), normal=(0,1,0)):
-        plane = tvtk.Plane(origin=origin, normal=normal)
-        cutter = tvtk.Cutter(executive = tvtk.CompositeDataPipeline(),
-                             cut_function = plane)
-        cutter.input = self._hdata_set
-        lut_manager = LUTManager(data_name=self.field, scene=self.scene)
-        smap = tvtk.HierarchicalPolyDataMapper(
-                        scalar_range=(self._min_val, self._max_val),
-                        lookup_table=lut_manager.lut,
-                        input_connection = cutter.output_port)
-        sactor = tvtk.Actor(mapper=smap)
-        self.scene.add_actors(sactor)
-        return plane, lut_manager
-
-    def add_plane(self, origin=(0.0,0.0,0.0), normal=(0,1,0)):
-        self.operators.append(self._add_plane(origin, normal))
-        return self.operators[-1]
-
-    def _add_axis_plane(self, axis):
-        normal = [0,0,0]
-        normal[axis] = 1
-        np, lut_manager = self._add_plane(self.center, normal=normal)
-        LE = self.extracted_hierarchy.min_left_edge
-        RE = self.extracted_hierarchy.max_right_edge
-        self.operators.append(MappingPlane(
-                vmin=LE[axis], vmax=RE[axis],
-                vdefault = self.center[axis],
-                post_call = self.scene.render,
-                plane = np, axis=axis, coord=0.0,
-                lut_manager = lut_manager,
-                scene=self.scene))
-
-    def add_x_plane(self):
-        self._add_axis_plane(0)
-        return self.operators[-1]
-
-    def add_y_plane(self):
-        self._add_axis_plane(1)
-        return self.operators[-1]
-
-    def add_z_plane(self):
-        self._add_axis_plane(2)
-        return self.operators[-1]
-
-    def add_contour(self, val=None):
-        if val is None: 
-            if self._min_val != self._min_val:
-                self._min_val = 1.0
-            val = (self._max_val+self._min_val) * 0.5
-        cubes = tvtk.MarchingCubes(
-                    executive = tvtk.CompositeDataPipeline())
-        cubes.input = self._hdata_set
-        cubes.set_value(0, val)
-        lut_manager = LUTManager(data_name=self.field, scene=self.scene)
-        cube_mapper = tvtk.HierarchicalPolyDataMapper(
-                                input_connection = cubes.output_port,
-                                lookup_table=lut_manager.lut)
-        cube_mapper.color_mode = 'map_scalars'
-        cube_mapper.scalar_range = (self._min_val, self._max_val)
-        cube_actor = tvtk.Actor(mapper=cube_mapper)
-        self.scene.add_actors(cube_actor)
-        self.operators.append(MappingMarchingCubes(operator=cubes,
-                    vmin=self._min_val, vmax=self._max_val,
-                    vdefault=val,
-                    mapper = cube_mapper,
-                    post_call = self.scene.render,
-                    lut_manager = lut_manager,
-                    scene=self.scene))
-        return self.operators[-1]
-
-    def add_isocontour(self, val=None):
-        if val is None: val = (self._max_val+self._min_val) * 0.5
-        isocontour = tvtk.ContourFilter(
-                    executive = tvtk.CompositeDataPipeline())
-        isocontour.input = self._hdata_set
-        isocontour.generate_values(1, (val, val))
-        lut_manager = LUTManager(data_name=self.field, scene=self.scene)
-        isocontour_normals = tvtk.PolyDataNormals(
-            executive=tvtk.CompositeDataPipeline())
-        isocontour_normals.input_connection = isocontour.output_port
-        iso_mapper = tvtk.HierarchicalPolyDataMapper(
-                                input_connection = isocontour_normals.output_port,
-                                lookup_table=lut_manager.lut)
-        iso_mapper.scalar_range = (self._min_val, self._max_val)
-        iso_actor = tvtk.Actor(mapper=iso_mapper)
-        self.scene.add_actors(iso_actor)
-        self.operators.append(MappingIsoContour(operator=isocontour,
-                    vmin=self._min_val, vmax=self._max_val,
-                    vdefault=val,
-                    mapper = iso_mapper,
-                    post_call = self.scene.render,
-                    lut_manager = lut_manager,
-                    scene=self.scene))
-        return self.operators[-1]
-
-    def display_points(self):
-        dd = self.pf.h.all_data()
-        points = tvtk.Points()
-        good = (dd["creation_time"] > 0.0)
-        points.data = na.array([ dd["particle_position_%s" % ax][good] for ax in 'xyz' ]).transpose()
-        mass = na.log10(dd["ParticleAge"][good])
-        self.conn = tvtk.CellArray()
-        for i in xrange(mass.shape[0]):
-            self.conn.insert_next_cell(1)
-            self.conn.insert_cell_point(i)
-        self.points = points
-        self.pd = tvtk.PolyData(points = self.points, verts = self.conn)
-        self.pd.point_data.scalars = mass
-        lut = tvtk.LookupTable()
-        self.pdm = tvtk.PolyDataMapper(input = self.pd,
-                                       lookup_table = lut)
-        self.pdm.scalar_range = (mass.min(), mass.max())
-        self.pdm.scalar_mode = 'use_point_data'
-        self.point_actor = tvtk.Actor(mapper = self.pdm)
-        self.scene.add_actor(self.point_actor)
-
-def get_all_parents(grid):
-    parents = []
-    if len(grid.Parents) == 0: return grid
-    for parent in grid.Parents: parents.append(get_all_parents(parent))
-    return list(set(parents))
-
-def run_vtk():
-    gui = pyface.GUI()
-    importer = HierarchyImporter()
-    importer.edit_traits(handler = HierarchyImportHandler(
-            importer = importer))
-    #ehds.edit_traits()
-    gui.start_event_loop()
-
-
-if __name__=="__main__":
-    print "This code probably won't work.  But if you want to give it a try,"
-    print "you need:"
-    print
-    print "VTK (CVS)"
-    print "Mayavi2 (from Enthought)"
-    print
-    print "If you have 'em, give it a try!"
-    print
-    run_vtk()


http://bitbucket.org/yt_analysis/yt/changeset/124b8edce9d3/
changeset:   124b8edce9d3
branch:      yt
user:        brittonsmith
date:        2011-10-18 21:06:23
summary:     Fixing some overzealousness of .field_data change.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/answer_testing/hydro_tests.py	Tue Oct 18 15:00:17 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Tue Oct 18 15:06:23 2011 -0400
@@ -171,7 +171,7 @@
             weight=self.weight)
         # The arrays are all stored in a dictionary hanging off the profile
         # object
-        self.result = p.data.field_data
+        self.result = p.data.data
                     
     def compare(self, old_result):
         self.compare_data_arrays(
@@ -201,5 +201,5 @@
             weight=self.weight)
         # The arrays are all stored in a dictionary hanging off the profile
         # object
-        self.result = p.data.field_data
+        self.result = p.data.data
 


http://bitbucket.org/yt_analysis/yt/changeset/8b0185668830/
changeset:   8b0185668830
branch:      yt
user:        brittonsmith
date:        2011-10-18 21:08:29
summary:     Fixed tests again.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/answer_testing/hydro_tests.py	Tue Oct 18 15:06:23 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Tue Oct 18 15:08:29 2011 -0400
@@ -45,7 +45,7 @@
         # independently of the parameter file.
         # The .field_data attributes strip out everything other than the actual array
         # values.
-        self.result = (proj.field_data, pixelized_proj.field_data)
+        self.result = (proj.field_data, pixelized_proj.data)
 
     def compare(self, old_result):
         proj, pixelized_proj = self.result


http://bitbucket.org/yt_analysis/yt/changeset/bb62fc941b24/
changeset:   bb62fc941b24
branch:      yt
user:        samskillman
date:        2011-10-18 21:12:38
summary:     Fixing a few .data -> .field_data issues.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/answer_testing/hydro_tests.py	Tue Oct 18 15:08:29 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Tue Oct 18 15:12:38 2011 -0400
@@ -171,7 +171,7 @@
             weight=self.weight)
         # The arrays are all stored in a dictionary hanging off the profile
         # object
-        self.result = p.data.data
+        self.result = p.data.field_data
                     
     def compare(self, old_result):
         self.compare_data_arrays(
@@ -201,5 +201,5 @@
             weight=self.weight)
         # The arrays are all stored in a dictionary hanging off the profile
         # object
-        self.result = p.data.data
+        self.result = p.data.field_data
 


http://bitbucket.org/yt_analysis/yt/changeset/5bd682651027/
changeset:   5bd682651027
branch:      yt
user:        MatthewTurk
date:        2011-10-18 21:12:54
summary:     Merging
affected #:  1 file (-1 bytes)

--- a/yt/utilities/answer_testing/hydro_tests.py	Tue Oct 18 15:12:43 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Tue Oct 18 15:12:54 2011 -0400
@@ -45,7 +45,7 @@
         # independently of the parameter file.
         # The .field_data attributes strip out everything other than the actual array
         # values.
-        self.result = (proj.field_data, pixelized_proj.field_data)
+        self.result = (proj.field_data, pixelized_proj.data)
 
     def compare(self, old_result):
         proj, pixelized_proj = self.result


http://bitbucket.org/yt_analysis/yt/changeset/fc2d4772ff0d/
changeset:   fc2d4772ff0d
branch:      yt
user:        MatthewTurk
date:        2011-10-19 00:07:32
summary:     Fixing a problem with my oft-repeated typo of combine_object / object_combine.
affected #:  1 file (-1 bytes)

--- a/yt/data_objects/data_containers.py	Tue Oct 18 15:12:54 2011 -0400
+++ b/yt/data_objects/data_containers.py	Tue Oct 18 18:07:32 2011 -0400
@@ -810,7 +810,7 @@
             self[field] = temp_data[field] 
         # We finalize
         if temp_data != {}:
-            temp_data = self._par_object_combine(temp_data,
+            temp_data = self._par_combine_object(temp_data,
                     datatype='dict', op='cat')
         # And set, for the next group
         for field in temp_data.keys():
@@ -2044,7 +2044,7 @@
         data['pdy'] *= 0.5
         data['fields'] = field_data
         # Now we run the finalizer, which is ignored if we don't need it
-        data = self._par_object_combine(temp_data, datatype='dict', op='cat')
+        data = self._par_combine_object(temp_data, datatype='dict', op='cat')
         field_data = na.vsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             self[field] = field_data[fi].ravel()


http://bitbucket.org/yt_analysis/yt/changeset/5dd5c1e0f11a/
changeset:   5dd5c1e0f11a
branch:      yt
user:        MatthewTurk
date:        2011-10-19 17:18:48
summary:     Adding in ParallelAnalysisInterface.__init__ calls.  Shouldn't we be using
super() by now?
affected #:  14 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Tue Oct 18 18:07:32 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 11:18:48 2011 -0400
@@ -1387,6 +1387,7 @@
         *dm_only* is set, only run it on the dark matter particles, otherwise
         on all particles.  Returns an iterable collection of *HopGroup* items.
         """
+        ParallelAnalysisInterface.__init__(self)
         self.threshold = threshold
         self.num_neighbors = num_neighbors
         self.bounds = bounds
@@ -1606,6 +1607,7 @@
 
 class GenericHaloFinder(HaloList, ParallelAnalysisInterface):
     def __init__(self, pf, ds, dm_only=True, padding=0.0):
+        ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.hierarchy = pf.h
         self.center = (na.array(ds.right_edge) + na.array(ds.left_edge))/2.0


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Tue Oct 18 18:07:32 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 11:18:48 2011 -0400
@@ -45,6 +45,7 @@
     def __init__(self,period, padding, num_neighbors, bounds,
             xpos, ypos, zpos, index, mass, threshold=160.0, rearrange=True,
             premerge=True):
+        ParallelAnalysisInterface.__init__(self)
         self.threshold = threshold
         self.rearrange = rearrange
         self.premerge = premerge


--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py	Tue Oct 18 18:07:32 2011 -0400
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py	Wed Oct 19 11:18:48 2011 -0400
@@ -79,6 +79,7 @@
         :param mass_column (int): The column of halo_file that contains the
         masses of the haloes. Default=4.
         """
+        ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.halo_file = halo_file
         self.omega_matter0 = omega_matter0


--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py	Tue Oct 18 18:07:32 2011 -0400
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py	Wed Oct 19 11:18:48 2011 -0400
@@ -156,6 +156,7 @@
         >>> MergerTree(rf, database = '/home/user/sim1-halos.db',
         ... halo_finder_function=parallelHF)
         """
+        ParallelAnalysisInterface.__init__(self)
         self.restart_files = restart_files # list of enzo restart files
         self.with_halos = na.ones(len(restart_files), dtype='bool')
         self.database = database # the sqlite database of haloes.
@@ -967,6 +968,7 @@
         >>> MergerTreeDotOutput(halos=182842, database='/home/user/sim1-halos.db',
         ... dotfile = 'halo-182842.gv')
         """
+        ParallelAnalysisInterface.__init__(self)
         self.database = database
         self.link_min = link_min
         if halos is None:
@@ -1113,6 +1115,7 @@
         >>> MergerTreeTextOutput(database='/home/user/sim1-halos.db',
         ... outfile='halos-db.txt')
         """
+        ParallelAnalysisInterface.__init__(self)
         self.database = database
         self.outfile = outfile
         result = self._open_database()


--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Tue Oct 18 18:07:32 2011 -0400
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Wed Oct 19 11:18:48 2011 -0400
@@ -164,6 +164,7 @@
         >>> hp = HP.halo_profiler("DD0242/DD0242")
         
         """
+        ParallelAnalysisInterface.__init__(self)
 
         self.dataset = dataset
         self.output_dir = output_dir
@@ -1080,6 +1081,7 @@
     This is used to mimic a profile object when reading profile data from disk.
     """
     def __init__(self, pf):
+        ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self._data = {}
 


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Tue Oct 18 18:07:32 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Wed Oct 19 11:18:48 2011 -0400
@@ -98,6 +98,7 @@
         ... length_number=10, length_range=[1./128, .5],
         ... length_type="log")
         """
+        ParallelAnalysisInterface.__init__(self)
         try:
             fKD
         except NameError:


--- a/yt/data_objects/data_containers.py	Tue Oct 18 18:07:32 2011 -0400
+++ b/yt/data_objects/data_containers.py	Wed Oct 19 11:18:48 2011 -0400
@@ -770,6 +770,7 @@
         Prepares the AMR2DData, normal to *axis*.  If *axis* is 4, we are not
         aligned with any axis.
         """
+        ParallelAnalysisInterface.__init__(self)
         self.axis = axis
         AMRData.__init__(self, pf, fields, **kwargs)
         self.field = ensure_list(fields)[0]
@@ -816,7 +817,6 @@
         for field in temp_data.keys():
             self[field] = temp_data[field]
 
-
     def _generate_field(self, field):
         if self.pf.field_info.has_key(field):
             # First we check the validator


--- a/yt/data_objects/derived_quantities.py	Tue Oct 18 18:07:32 2011 -0400
+++ b/yt/data_objects/derived_quantities.py	Wed Oct 19 11:18:48 2011 -0400
@@ -66,6 +66,7 @@
                  combine_function, units = "",
                  n_ret = 0, force_unlazy=False):
         # We wrap the function with our object
+        ParallelAnalysisInterface.__init__(self)
         self.__doc__ = function.__doc__
         self.__name__ = name
         self.collection = collection


--- a/yt/data_objects/hierarchy.py	Tue Oct 18 18:07:32 2011 -0400
+++ b/yt/data_objects/hierarchy.py	Wed Oct 19 11:18:48 2011 -0400
@@ -48,6 +48,7 @@
     float_type = 'float64'
 
     def __init__(self, pf, data_style):
+        ParallelAnalysisInterface.__init__(self)
         self.parameter_file = weakref.proxy(pf)
         self.pf = self.parameter_file
 


--- a/yt/data_objects/profiles.py	Tue Oct 18 18:07:32 2011 -0400
+++ b/yt/data_objects/profiles.py	Wed Oct 19 11:18:48 2011 -0400
@@ -64,6 +64,7 @@
 # We could, but I think we instead want to deal with the root datasource.
 class BinnedProfile(ParallelAnalysisInterface):
     def __init__(self, data_source, lazy_reader):
+        ParallelAnalysisInterface.__init__(self)
         self._data_source = data_source
         self.pf = data_source.pf
         self.field_data = YTFieldData()


--- a/yt/data_objects/static_output.py	Tue Oct 18 18:07:32 2011 -0400
+++ b/yt/data_objects/static_output.py	Wed Oct 19 11:18:48 2011 -0400
@@ -162,7 +162,7 @@
     _instantiated_hierarchy = None
     @property
     def hierarchy(self):
-        if self._instantiated_hierarchy == None:
+        if self._instantiated_hierarchy is None:
             if self._hierarchy_class == None:
                 raise RuntimeError("You should not instantiate StaticOutput.")
             self._instantiated_hierarchy = self._hierarchy_class(


--- a/yt/visualization/streamlines.py	Tue Oct 18 18:07:32 2011 -0400
+++ b/yt/visualization/streamlines.py	Wed Oct 19 11:18:48 2011 -0400
@@ -99,6 +99,7 @@
                  zfield='x-velocity', volume=None,
                  dx=None, length=None, direction=1,
                  get_magnitude=False):
+        ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.start_positions = na.array(positions)
         self.N = self.start_positions.shape[0]


--- a/yt/visualization/volume_rendering/camera.py	Tue Oct 18 18:07:32 2011 -0400
+++ b/yt/visualization/volume_rendering/camera.py	Wed Oct 19 11:18:48 2011 -0400
@@ -180,6 +180,7 @@
         >>> image = cam.snapshot(fn='my_rendering.png')
 
         """
+        ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf
         if not iterable(resolution):
             resolution = (resolution, resolution)


--- a/yt/visualization/volume_rendering/grid_partitioner.py	Tue Oct 18 18:07:32 2011 -0400
+++ b/yt/visualization/volume_rendering/grid_partitioner.py	Wed Oct 19 11:18:48 2011 -0400
@@ -43,6 +43,7 @@
     def __init__(self, fields = "Density", source = None, pf = None,
                  log_fields = None, no_ghost = False):
         # Typically, initialized as hanging off a hierarchy.  But, not always.
+        ParallelAnalysisInterface.__init__(self)
         self.no_ghost = no_ghost
         if pf is not None: self.pf = pf
         if source is None: source = self.pf.h.all_data()


http://bitbucket.org/yt_analysis/yt/changeset/1a084444e077/
changeset:   1a084444e077
branch:      yt
user:        samskillman
date:        2011-10-19 17:55:32
summary:     Adding intermediate point in moving to subcommunicators.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Oct 18 18:07:32 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 11:55:32 2011 -0400
@@ -275,12 +275,14 @@
     return func
 
 class ParallelAnalysisInterface(object):
-    """
-    This is an interface specification providing several useful utility
-    functions for analyzing something in parallel.
-    """
+    comm = None
     _grids = None
-    _distributed = parallel_capable
+    _distributed = None
+
+    def __init__(self, size=1):
+        self.comm = CommunicationSystem.pop()
+        self._grids = self.comm._grids
+        self._distributed = self.comm._distributed
 
     def _get_objs(self, attr, *args, **kwargs):
         if self._distributed:
@@ -307,13 +309,43 @@
     def _finalize_parallel(self):
         pass
 
+class CommunicationSystem(object):
+    communicators = []
+    def push(self, size=None, ranks=None):
+        if size = None:
+            size = len(available_ranks)
+        if len(available_ranks) < size:
+            raise RuntimeError
+        if ranks is None:
+            ranks = [available_ranks.pop() for i in range(size)]
+        
+        group = MPI.COMM_WORLD.Group.Incl(ranks)
+        new_comm = MPI.COMM_WORLD.Create(group)
+        self.communicators.append(Communicator(new_comm))
+        return new_comm
+        
+    def pop(self):
+        self.communicators.pop()
+
+class Communicator(object):
+    comm = None
+    def __init__(self, comm=MPI.COMM_WORLD):
+        self.comm = comm
+    """
+    This is an interface specification providing several useful utility
+    functions for analyzing something in parallel.
+    """
+    _grids = None
+    _distributed = (comm.size > 1)
+
     def _partition_hierarchy_2d(self, axis):
         if not self._distributed:
-           return False, self.hierarchy.grid_collection(self.center, self.hierarchy.grids)
+           return False, self.hierarchy.grid_collection(self.center, 
+                                                        self.hierarchy.grids)
 
         xax, yax = x_dict[axis], y_dict[axis]
-        cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 2)
-        mi = MPI.COMM_WORLD.rank
+        cc = MPI.Compute_dims(self.comm.size, 2)
+        mi = self.comm.rank
         cx, cy = na.unravel_index(mi, cc)
         x = na.mgrid[0:1:(cc[0]+1)*1j][cx:cx+2]
         y = na.mgrid[0:1:(cc[1]+1)*1j][cy:cy+2]
@@ -352,15 +384,15 @@
             # grid that belongs to this processor.
             grids = self.pf.h.select_grids(0)
             root_grids = [g for g in grids
-                          if g.proc_num == MPI.COMM_WORLD.rank]
+                          if g.proc_num == self.comm.rank]
             if len(root_grids) != 1: raise RuntimeError
             #raise KeyError
             LE = root_grids[0].LeftEdge
             RE = root_grids[0].RightEdge
             return True, LE, RE, self.hierarchy.region(self.center, LE, RE)
 
-        cc = MPI.Compute_dims(MPI.COMM_WORLD.size / rank_ratio, 3)
-        mi = MPI.COMM_WORLD.rank % (MPI.COMM_WORLD.size / rank_ratio)
+        cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
+        mi = self.comm.rank % (self.comm.size / rank_ratio)
         cx, cy, cz = na.unravel_index(mi, cc)
         x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
         y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
@@ -386,8 +418,8 @@
         if not self._distributed:
             return LE, RE, re
         
-        cc = MPI.Compute_dims(MPI.COMM_WORLD.size / rank_ratio, 3)
-        mi = MPI.COMM_WORLD.rank % (MPI.COMM_WORLD.size / rank_ratio)
+        cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
+        mi = self.comm.rank % (self.comm.size / rank_ratio)
         cx, cy, cz = na.unravel_index(mi, cc)
         x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
         y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
@@ -421,8 +453,8 @@
                 i += 1
             return [n]
 
-        cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 3)
-        si = MPI.COMM_WORLD.size
+        cc = MPI.Compute_dims(self.comm.size, 3)
+        si = self.comm.size
         
         factors = factor(si)
         xyzfactors = [factor(cc[0]), factor(cc[1]), factor(cc[2])]
@@ -448,8 +480,8 @@
 
     def _barrier(self):
         if not self._distributed: return
-        mylog.debug("Opening MPI Barrier on %s", MPI.COMM_WORLD.rank)
-        MPI.COMM_WORLD.Barrier()
+        mylog.debug("Opening MPI Barrier on %s", self.comm.rank)
+        self.comm.Barrier()
 
     def _mpi_exit_test(self, data=False):
         # data==True -> exit. data==False -> no exit
@@ -490,39 +522,39 @@
         bot_keys = na.array(bot_keys, dtype='int64')
         vals = na.array(vals, dtype='float64')
         del data
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
+        if self.comm.rank == 0:
+            for i in range(1,self.comm.size):
+                size = self.comm.recv(source=i, tag=0)
                 mylog.info('Global Hash Table Merge %d of %d size %d' % \
-                    (i,MPI.COMM_WORLD.size, size))
+                    (i,self.comm.size, size))
                 recv_top_keys = na.empty(size, dtype='int64')
                 recv_bot_keys = na.empty(size, dtype='int64')
                 recv_vals = na.empty(size, dtype='float64')
-                MPI.COMM_WORLD.Recv([recv_top_keys, MPI.LONG], source=i, tag=0)
-                MPI.COMM_WORLD.Recv([recv_bot_keys, MPI.LONG], source=i, tag=0)
-                MPI.COMM_WORLD.Recv([recv_vals, MPI.DOUBLE], source=i, tag=0)
+                self.comm.Recv([recv_top_keys, MPI.LONG], source=i, tag=0)
+                self.comm.Recv([recv_bot_keys, MPI.LONG], source=i, tag=0)
+                self.comm.Recv([recv_vals, MPI.DOUBLE], source=i, tag=0)
                 top_keys = na.concatenate([top_keys, recv_top_keys])
                 bot_keys = na.concatenate([bot_keys, recv_bot_keys])
                 vals = na.concatenate([vals, recv_vals])
         else:
             size = top_keys.size
-            MPI.COMM_WORLD.send(size, dest=0, tag=0)
-            MPI.COMM_WORLD.Send([top_keys, MPI.LONG], dest=0, tag=0)
-            MPI.COMM_WORLD.Send([bot_keys, MPI.LONG], dest=0, tag=0)
-            MPI.COMM_WORLD.Send([vals, MPI.DOUBLE], dest=0, tag=0)
+            self.comm.send(size, dest=0, tag=0)
+            self.comm.Send([top_keys, MPI.LONG], dest=0, tag=0)
+            self.comm.Send([bot_keys, MPI.LONG], dest=0, tag=0)
+            self.comm.Send([vals, MPI.DOUBLE], dest=0, tag=0)
         # We're going to decompose the dict into arrays, send that, and then
         # reconstruct it. When data is too big the pickling of the dict fails.
-        if MPI.COMM_WORLD.rank == 0:
+        if self.comm.rank == 0:
             size = top_keys.size
         # Broadcast them using array methods
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        if MPI.COMM_WORLD.rank != 0:
+        size = self.comm.bcast(size, root=0)
+        if self.comm.rank != 0:
             top_keys = na.empty(size, dtype='int64')
             bot_keys = na.empty(size, dtype='int64')
             vals = na.empty(size, dtype='float64')
-        MPI.COMM_WORLD.Bcast([top_keys,MPI.LONG], root=0)
-        MPI.COMM_WORLD.Bcast([bot_keys,MPI.LONG], root=0)
-        MPI.COMM_WORLD.Bcast([vals, MPI.DOUBLE], root=0)
+        self.comm.Bcast([top_keys,MPI.LONG], root=0)
+        self.comm.Bcast([bot_keys,MPI.LONG], root=0)
+        self.comm.Bcast([vals, MPI.DOUBLE], root=0)
         return (top_keys, bot_keys, vals)
 
     @parallel_passthrough
@@ -544,26 +576,26 @@
             datatype == "list"
         # Now we have our datatype, and we conduct our operation
         if datatype == "dict" and op == "join":
-            if MPI.COMM_WORLD.rank == 0:
-                for i in range(1,MPI.COMM_WORLD.size):
-                    data.update(MPI.COMM_WORLD.recv(source=i, tag=0))
+            if self.comm.rank == 0:
+                for i in range(1,self.comm.size):
+                    data.update(self.comm.recv(source=i, tag=0))
             else:
-                MPI.COMM_WORLD.send(data, dest=0, tag=0)
-            data = MPI.COMM_WORLD.bcast(data, root=0)
+                self.comm.send(data, dest=0, tag=0)
+            data = self.comm.bcast(data, root=0)
             return data
         elif datatype == "dict" and op == "cat":
             field_keys = data.keys()
             field_keys.sort()
             size = data[field_keys[0]].shape[-1]
-            sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
+            sizes = na.zeros(self.comm.size, dtype='int64')
             outsize = na.array(size, dtype='int64')
-            MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
+            self.comm.Allgather([outsize, 1, MPI.LONG],
                                      [sizes, 1, MPI.LONG] )
             # This nested concatenate is to get the shapes to work out correctly;
             # if we just add [0] to sizes, it will broadcast a summation, not a
             # concatenation.
             offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
-            arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
+            arr_size = self.comm.allreduce(size, op=MPI.SUM)
             for key in field_keys:
                 dd = data[key]
                 rv = _alltoallv_array(dd, arr_size, offsets, sizes)
@@ -582,39 +614,39 @@
                     size = data.shape[0]
                 else:
                     ncols, size = data.shape
-            ncols = MPI.COMM_WORLD.allreduce(ncols, op=MPI.MAX)
+            ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if size == 0:
                 data = na.zeros((ncols,0), dtype='float64') # This only works for
             size = data.shape[-1]
-            sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
+            sizes = na.zeros(self.comm.size, dtype='int64')
             outsize = na.array(size, dtype='int64')
-            MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
+            self.comm.Allgather([outsize, 1, MPI.LONG],
                                      [sizes, 1, MPI.LONG] )
             # This nested concatenate is to get the shapes to work out correctly;
             # if we just add [0] to sizes, it will broadcast a summation, not a
             # concatenation.
             offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
-            arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
+            arr_size = self.comm.allreduce(size, op=MPI.SUM)
             data = _alltoallv_array(data, arr_size, offsets, sizes)
             return data
         elif datatype == "list" and op == "cat":
-            if MPI.COMM_WORLD.rank == 0:
+            if self.comm.rank == 0:
                 data = self.__mpi_recvlist(data)
             else:
-                MPI.COMM_WORLD.send(data, dest=0, tag=0)
-            mylog.debug("Opening MPI Broadcast on %s", MPI.COMM_WORLD.rank)
-            data = MPI.COMM_WORLD.bcast(data, root=0)
+                self.comm.send(data, dest=0, tag=0)
+            mylog.debug("Opening MPI Broadcast on %s", self.comm.rank)
+            data = self.comm.bcast(data, root=0)
             return data
         raise NotImplementedError
 
     @parallel_passthrough
     def _mpi_bcast_pickled(self, data):
-        data = MPI.COMM_WORLD.bcast(data, root=0)
+        data = self.comm.bcast(data, root=0)
         return data
 
     def _should_i_write(self):
         if not self._distributed: return True
-        return (MPI.COMM_WORLD == 0)
+        return (self.comm == 0)
 
     def _preload(self, grids, fields, io_handler):
         # This will preload if it detects we are parallel capable and
@@ -632,13 +664,13 @@
             if dtype != data.dtype:
                 data = data.astype(dtype)
             temp = data.copy()
-            MPI.COMM_WORLD.Allreduce([temp,get_mpi_type(dtype)], 
+            self.comm.Allreduce([temp,get_mpi_type(dtype)], 
                                      [data,get_mpi_type(dtype)], op)
             return data
         else:
             # We use old-school pickling here on the assumption the arrays are
             # relatively small ( < 1e7 elements )
-            return MPI.COMM_WORLD.allreduce(data, op)
+            return self.comm.allreduce(data, op)
 
     ###
     # Non-blocking stuff.
@@ -648,13 +680,13 @@
         if not self._distributed: return -1
         if dtype is None: dtype = data.dtype
         mpi_type = get_mpi_type(dtype)
-        return MPI.COMM_WORLD.Irecv([data, mpi_type], source, tag)
+        return self.comm.Irecv([data, mpi_type], source, tag)
 
     def _mpi_nonblocking_send(self, data, dest, tag=0, dtype=None):
         if not self._distributed: return -1
         if dtype is None: dtype = data.dtype
         mpi_type = get_mpi_type(dtype)
-        return MPI.COMM_WORLD.Isend([data, mpi_type], dest, tag)
+        return self.comm.Isend([data, mpi_type], dest, tag)
 
     def _mpi_Request_Waitall(self, hooks):
         if not self._distributed: return
@@ -684,27 +716,27 @@
     @property
     def _par_size(self):
         if not self._distributed: return 1
-        return MPI.COMM_WORLD.size
+        return self.comm.size
 
     @property
     def _par_rank(self):
         if not self._distributed: return 0
-        return MPI.COMM_WORLD.rank
+        return self.comm.rank
 
     def _mpi_info_dict(self, info):
         if not self._distributed: return 0, {0:info}
         self._barrier()
         data = None
-        if MPI.COMM_WORLD.rank == 0:
+        if self.comm.rank == 0:
             data = {0:info}
-            for i in range(1, MPI.COMM_WORLD.size):
-                data[i] = MPI.COMM_WORLD.recv(source=i, tag=0)
+            for i in range(1, self.comm.size):
+                data[i] = self.comm.recv(source=i, tag=0)
         else:
-            MPI.COMM_WORLD.send(info, dest=0, tag=0)
-        mylog.debug("Opening MPI Broadcast on %s", MPI.COMM_WORLD.rank)
-        data = MPI.COMM_WORLD.bcast(data, root=0)
+            self.comm.send(info, dest=0, tag=0)
+        mylog.debug("Opening MPI Broadcast on %s", self.comm.rank)
+        data = self.comm.bcast(data, root=0)
         self._barrier()
-        return MPI.COMM_WORLD.rank, data
+        return self.comm.rank, data
 
     def _get_dependencies(self, fields):
         deps = []
@@ -715,7 +747,7 @@
 
     def _claim_object(self, obj):
         if not self._distributed: return
-        obj._owner = MPI.COMM_WORLD.rank
+        obj._owner = self.comm.rank
         obj._distributed = True
 
     def _do_not_claim_object(self, obj):
@@ -725,7 +757,7 @@
 
     def _write_on_root(self, fn):
         if not self._distributed: return open(fn, "w")
-        if MPI.COMM_WORLD.rank == 0:
+        if self.comm.rank == 0:
             return open(fn, "w")
         else:
             return cStringIO.StringIO()
@@ -733,39 +765,39 @@
     def _get_filename(self, prefix, rank=None):
         if not self._distributed: return prefix
         if rank == None:
-            return "%s_%04i" % (prefix, MPI.COMM_WORLD.rank)
+            return "%s_%04i" % (prefix, self.comm.rank)
         else:
             return "%s_%04i" % (prefix, rank)
 
     def _is_mine(self, obj):
         if not obj._distributed: return True
-        return (obj._owner == MPI.COMM_WORLD.rank)
+        return (obj._owner == self.comm.rank)
 
     def _send_quadtree(self, target, buf, tgd, args):
         sizebuf = na.zeros(1, 'int64')
         sizebuf[0] = buf[0].size
-        MPI.COMM_WORLD.Send([sizebuf, MPI.LONG], dest=target)
-        MPI.COMM_WORLD.Send([buf[0], MPI.INT], dest=target)
-        MPI.COMM_WORLD.Send([buf[1], MPI.DOUBLE], dest=target)
-        MPI.COMM_WORLD.Send([buf[2], MPI.DOUBLE], dest=target)
+        self.comm.Send([sizebuf, MPI.LONG], dest=target)
+        self.comm.Send([buf[0], MPI.INT], dest=target)
+        self.comm.Send([buf[1], MPI.DOUBLE], dest=target)
+        self.comm.Send([buf[2], MPI.DOUBLE], dest=target)
         
     def _recv_quadtree(self, target, tgd, args):
         sizebuf = na.zeros(1, 'int64')
-        MPI.COMM_WORLD.Recv(sizebuf, source=target)
+        self.comm.Recv(sizebuf, source=target)
         buf = [na.empty((sizebuf[0],), 'int32'),
                na.empty((sizebuf[0], args[2]),'float64'),
                na.empty((sizebuf[0],),'float64')]
-        MPI.COMM_WORLD.Recv([buf[0], MPI.INT], source=target)
-        MPI.COMM_WORLD.Recv([buf[1], MPI.DOUBLE], source=target)
-        MPI.COMM_WORLD.Recv([buf[2], MPI.DOUBLE], source=target)
+        self.comm.Recv([buf[0], MPI.INT], source=target)
+        self.comm.Recv([buf[1], MPI.DOUBLE], source=target)
+        self.comm.Recv([buf[2], MPI.DOUBLE], source=target)
         return buf
 
     @parallel_passthrough
     def merge_quadtree_buffers(self, qt):
         # This is a modified version of pairwise reduction from Lisandro Dalcin,
         # in the reductions demo of mpi4py
-        size = MPI.COMM_WORLD.size
-        rank = MPI.COMM_WORLD.rank
+        size = self.comm.size
+        rank = self.comm.rank
 
         mask = 1
 
@@ -795,14 +827,14 @@
         if rank == 0:
             buf = qt.tobuffer()
             sizebuf[0] = buf[0].size
-        MPI.COMM_WORLD.Bcast([sizebuf, MPI.LONG], root=0)
+        self.comm.Bcast([sizebuf, MPI.LONG], root=0)
         if rank != 0:
             buf = [na.empty((sizebuf[0],), 'int32'),
                    na.empty((sizebuf[0], args[2]),'float64'),
                    na.empty((sizebuf[0],),'float64')]
-        MPI.COMM_WORLD.Bcast([buf[0], MPI.INT], root=0)
-        MPI.COMM_WORLD.Bcast([buf[1], MPI.DOUBLE], root=0)
-        MPI.COMM_WORLD.Bcast([buf[2], MPI.DOUBLE], root=0)
+        self.comm.Bcast([buf[0], MPI.INT], root=0)
+        self.comm.Bcast([buf[1], MPI.DOUBLE], root=0)
+        self.comm.Bcast([buf[2], MPI.DOUBLE], root=0)
         self.refined = buf[0]
         if rank != 0:
             qt = QuadTree(tgd, args[2])


http://bitbucket.org/yt_analysis/yt/changeset/84fb67b6e389/
changeset:   84fb67b6e389
branch:      yt
user:        samskillman
date:        2011-10-19 17:55:54
summary:     Merging.
affected #:  14 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 11:55:32 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 11:55:54 2011 -0400
@@ -1387,6 +1387,7 @@
         *dm_only* is set, only run it on the dark matter particles, otherwise
         on all particles.  Returns an iterable collection of *HopGroup* items.
         """
+        ParallelAnalysisInterface.__init__(self)
         self.threshold = threshold
         self.num_neighbors = num_neighbors
         self.bounds = bounds
@@ -1606,6 +1607,7 @@
 
 class GenericHaloFinder(HaloList, ParallelAnalysisInterface):
     def __init__(self, pf, ds, dm_only=True, padding=0.0):
+        ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.hierarchy = pf.h
         self.center = (na.array(ds.right_edge) + na.array(ds.left_edge))/2.0


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 11:55:32 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 11:55:54 2011 -0400
@@ -45,6 +45,7 @@
     def __init__(self,period, padding, num_neighbors, bounds,
             xpos, ypos, zpos, index, mass, threshold=160.0, rearrange=True,
             premerge=True):
+        ParallelAnalysisInterface.__init__(self)
         self.threshold = threshold
         self.rearrange = rearrange
         self.premerge = premerge


--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py	Wed Oct 19 11:55:32 2011 -0400
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py	Wed Oct 19 11:55:54 2011 -0400
@@ -79,6 +79,7 @@
         :param mass_column (int): The column of halo_file that contains the
         masses of the haloes. Default=4.
         """
+        ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.halo_file = halo_file
         self.omega_matter0 = omega_matter0


--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py	Wed Oct 19 11:55:32 2011 -0400
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py	Wed Oct 19 11:55:54 2011 -0400
@@ -156,6 +156,7 @@
         >>> MergerTree(rf, database = '/home/user/sim1-halos.db',
         ... halo_finder_function=parallelHF)
         """
+        ParallelAnalysisInterface.__init__(self)
         self.restart_files = restart_files # list of enzo restart files
         self.with_halos = na.ones(len(restart_files), dtype='bool')
         self.database = database # the sqlite database of haloes.
@@ -967,6 +968,7 @@
         >>> MergerTreeDotOutput(halos=182842, database='/home/user/sim1-halos.db',
         ... dotfile = 'halo-182842.gv')
         """
+        ParallelAnalysisInterface.__init__(self)
         self.database = database
         self.link_min = link_min
         if halos is None:
@@ -1113,6 +1115,7 @@
         >>> MergerTreeTextOutput(database='/home/user/sim1-halos.db',
         ... outfile='halos-db.txt')
         """
+        ParallelAnalysisInterface.__init__(self)
         self.database = database
         self.outfile = outfile
         result = self._open_database()


--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Wed Oct 19 11:55:32 2011 -0400
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Wed Oct 19 11:55:54 2011 -0400
@@ -164,6 +164,7 @@
         >>> hp = HP.halo_profiler("DD0242/DD0242")
         
         """
+        ParallelAnalysisInterface.__init__(self)
 
         self.dataset = dataset
         self.output_dir = output_dir
@@ -1080,6 +1081,7 @@
     This is used to mimic a profile object when reading profile data from disk.
     """
     def __init__(self, pf):
+        ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self._data = {}
 


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Wed Oct 19 11:55:32 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Wed Oct 19 11:55:54 2011 -0400
@@ -98,6 +98,7 @@
         ... length_number=10, length_range=[1./128, .5],
         ... length_type="log")
         """
+        ParallelAnalysisInterface.__init__(self)
         try:
             fKD
         except NameError:


--- a/yt/data_objects/data_containers.py	Wed Oct 19 11:55:32 2011 -0400
+++ b/yt/data_objects/data_containers.py	Wed Oct 19 11:55:54 2011 -0400
@@ -770,6 +770,7 @@
         Prepares the AMR2DData, normal to *axis*.  If *axis* is 4, we are not
         aligned with any axis.
         """
+        ParallelAnalysisInterface.__init__(self)
         self.axis = axis
         AMRData.__init__(self, pf, fields, **kwargs)
         self.field = ensure_list(fields)[0]
@@ -816,7 +817,6 @@
         for field in temp_data.keys():
             self[field] = temp_data[field]
 
-
     def _generate_field(self, field):
         if self.pf.field_info.has_key(field):
             # First we check the validator


--- a/yt/data_objects/derived_quantities.py	Wed Oct 19 11:55:32 2011 -0400
+++ b/yt/data_objects/derived_quantities.py	Wed Oct 19 11:55:54 2011 -0400
@@ -66,6 +66,7 @@
                  combine_function, units = "",
                  n_ret = 0, force_unlazy=False):
         # We wrap the function with our object
+        ParallelAnalysisInterface.__init__(self)
         self.__doc__ = function.__doc__
         self.__name__ = name
         self.collection = collection


--- a/yt/data_objects/hierarchy.py	Wed Oct 19 11:55:32 2011 -0400
+++ b/yt/data_objects/hierarchy.py	Wed Oct 19 11:55:54 2011 -0400
@@ -48,6 +48,7 @@
     float_type = 'float64'
 
     def __init__(self, pf, data_style):
+        ParallelAnalysisInterface.__init__(self)
         self.parameter_file = weakref.proxy(pf)
         self.pf = self.parameter_file
 


--- a/yt/data_objects/profiles.py	Wed Oct 19 11:55:32 2011 -0400
+++ b/yt/data_objects/profiles.py	Wed Oct 19 11:55:54 2011 -0400
@@ -64,6 +64,7 @@
 # We could, but I think we instead want to deal with the root datasource.
 class BinnedProfile(ParallelAnalysisInterface):
     def __init__(self, data_source, lazy_reader):
+        ParallelAnalysisInterface.__init__(self)
         self._data_source = data_source
         self.pf = data_source.pf
         self.field_data = YTFieldData()


--- a/yt/data_objects/static_output.py	Wed Oct 19 11:55:32 2011 -0400
+++ b/yt/data_objects/static_output.py	Wed Oct 19 11:55:54 2011 -0400
@@ -162,7 +162,7 @@
     _instantiated_hierarchy = None
     @property
     def hierarchy(self):
-        if self._instantiated_hierarchy == None:
+        if self._instantiated_hierarchy is None:
             if self._hierarchy_class == None:
                 raise RuntimeError("You should not instantiate StaticOutput.")
             self._instantiated_hierarchy = self._hierarchy_class(


--- a/yt/visualization/streamlines.py	Wed Oct 19 11:55:32 2011 -0400
+++ b/yt/visualization/streamlines.py	Wed Oct 19 11:55:54 2011 -0400
@@ -99,6 +99,7 @@
                  zfield='x-velocity', volume=None,
                  dx=None, length=None, direction=1,
                  get_magnitude=False):
+        ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.start_positions = na.array(positions)
         self.N = self.start_positions.shape[0]


--- a/yt/visualization/volume_rendering/camera.py	Wed Oct 19 11:55:32 2011 -0400
+++ b/yt/visualization/volume_rendering/camera.py	Wed Oct 19 11:55:54 2011 -0400
@@ -180,6 +180,7 @@
         >>> image = cam.snapshot(fn='my_rendering.png')
 
         """
+        ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf
         if not iterable(resolution):
             resolution = (resolution, resolution)


--- a/yt/visualization/volume_rendering/grid_partitioner.py	Wed Oct 19 11:55:32 2011 -0400
+++ b/yt/visualization/volume_rendering/grid_partitioner.py	Wed Oct 19 11:55:54 2011 -0400
@@ -43,6 +43,7 @@
     def __init__(self, fields = "Density", source = None, pf = None,
                  log_fields = None, no_ghost = False):
         # Typically, initialized as hanging off a hierarchy.  But, not always.
+        ParallelAnalysisInterface.__init__(self)
         self.no_ghost = no_ghost
         if pf is not None: self.pf = pf
         if source is None: source = self.pf.h.all_data()


http://bitbucket.org/yt_analysis/yt/changeset/088e46392bff/
changeset:   088e46392bff
branch:      yt
user:        MatthewTurk
date:        2011-10-19 18:09:22
summary:     Adding a communication_system object
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 11:55:54 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 12:09:22 2011 -0400
@@ -327,6 +327,10 @@
     def pop(self):
         self.communicators.pop()
 
+communication_system = CommunicationSystem()
+if parallel_enabled:
+    communication_system.communicators.append(MPI.COMM_WORLD)
+
 class Communicator(object):
     comm = None
     def __init__(self, comm=MPI.COMM_WORLD):


http://bitbucket.org/yt_analysis/yt/changeset/19af2e9199fe/
changeset:   19af2e9199fe
branch:      yt
user:        samskillman
date:        2011-10-19 18:25:46
summary:     More work to moving to subcommunicators, moving _send_array and other mpi operations that were just free standing to within the Communicator object.
affected #:  3 files (-1 bytes)

--- a/yt/data_objects/data_containers.py	Wed Oct 19 11:55:54 2011 -0400
+++ b/yt/data_objects/data_containers.py	Wed Oct 19 12:25:46 2011 -0400
@@ -1627,7 +1627,7 @@
             mylog.debug("End of projecting level level %s, memory usage %0.3e", 
                         level, get_memory_usage()/1024.)
         # Note that this will briefly double RAM usage
-        tree = self.merge_quadtree_buffers(tree)
+        tree = self.comm.merge_quadtree_buffers(tree)
         coord_data, field_data, weight_data, dxs = [], [], [], []
         for level in range(0, self._max_level + 1):
             npos, nvals, nwvals = tree.get_all_from_level(level, False)


--- a/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 11:55:54 2011 -0400
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 12:25:46 2011 -0400
@@ -1178,7 +1178,7 @@
             if front.owner == my_rank:
                 if front.owner == parent.owner:
                     mylog.debug( '%04i receiving image from %04i'%(my_rank,back.owner))
-                    arr2 = PT._recv_array(back.owner, tag=back.owner).reshape(
+                    arr2 = self.comm._recv_array(back.owner, tag=back.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1192,17 +1192,17 @@
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
                     mylog.debug('%04i sending my image to %04i'%(my_rank,back.owner))
-                    PT._send_array(self.image.ravel(), back.owner, tag=my_rank)
+                    self.comm._send_array(self.image.ravel(), back.owner, tag=my_rank)
 
                 
             if back.owner == my_rank:
                 if front.owner == parent.owner:
                     mylog.debug('%04i sending my image to %04i'%(my_rank, front.owner))
-                    PT._send_array(self.image.ravel(), front.owner, tag=my_rank)
+                    self.comm._send_array(self.image.ravel(), front.owner, tag=my_rank)
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
                     mylog.debug('%04i receiving image from %04i'%(my_rank,front.owner))
-                    arr2 = PT._recv_array(front.owner, tag=front.owner).reshape(
+                    arr2 = self.comm._recv_array(front.owner, tag=front.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1225,7 +1225,7 @@
         if fn is None:
             fn = '%s_kd_bricks.h5'%self.pf
         if my_rank != 0:
-            PT._recv_array(my_rank-1, tag=my_rank-1)
+            self.comm._recv_array(my_rank-1, tag=my_rank-1)
         f = h5py.File(fn,"a")
         for node in self.depth_traverse():
             i = node.id
@@ -1238,13 +1238,13 @@
                         pass
         f.close()
         if my_rank != (nprocs-1):
-            PT._send_array([0],my_rank+1, tag=my_rank)
+            self.comm._send_array([0],my_rank+1, tag=my_rank)
         
     def load_kd_bricks(self,fn=None):
         if fn is None:
             fn = '%s_kd_bricks.h5' % self.pf
         if my_rank != 0:
-            PT._recv_array(my_rank-1, tag=my_rank-1)
+            self.comm._recv_array(my_rank-1, tag=my_rank-1)
         try:
             f = h5py.File(fn,"r")
             for node in self.depth_traverse():
@@ -1268,7 +1268,7 @@
         except:
             pass
         if my_rank != (nprocs-1):
-            PT._send_array([0],my_rank+1, tag=my_rank)
+            self.comm._send_array([0],my_rank+1, tag=my_rank)
 
     def load_tree(self,fn):
         raise NotImplementedError()


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 11:55:54 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 12:25:46 2011 -0400
@@ -115,6 +115,8 @@
     for dt, val in dtype_names.items():
         if dt == dtype: return val
 
+__tocast = 'c'
+
 class ObjectIterator(object):
     """
     This is a generalized class that accepts a list of objects and then
@@ -312,7 +314,7 @@
 class CommunicationSystem(object):
     communicators = []
     def push(self, size=None, ranks=None):
-        if size = None:
+        if size is None:
             size = len(available_ranks)
         if len(available_ranks) < size:
             raise RuntimeError
@@ -329,14 +331,15 @@
 
 class Communicator(object):
     comm = None
+    _grids = None
+    _distributed = None
     def __init__(self, comm=MPI.COMM_WORLD):
         self.comm = comm
+        self._distributed = self.comm.size > 1
     """
     This is an interface specification providing several useful utility
     functions for analyzing something in parallel.
     """
-    _grids = None
-    _distributed = (comm.size > 1)
 
     def _partition_hierarchy_2d(self, axis):
         if not self._distributed:
@@ -598,7 +601,7 @@
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
             for key in field_keys:
                 dd = data[key]
-                rv = _alltoallv_array(dd, arr_size, offsets, sizes)
+                rv = self._alltoallv_array(dd, arr_size, offsets, sizes)
                 data[key] = rv
             return data
         elif datatype == "array" and op == "cat":
@@ -627,7 +630,7 @@
             # concatenation.
             offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
-            data = _alltoallv_array(data, arr_size, offsets, sizes)
+            data = self._alltoallv_array(data, arr_size, offsets, sizes)
             return data
         elif datatype == "list" and op == "cat":
             if self.comm.rank == 0:
@@ -841,55 +844,44 @@
             qt.frombuffer(*buf)
         return qt
 
-__tocast = 'c'
 
-def _send_array(arr, dest, tag = 0):
-    if not isinstance(arr, na.ndarray):
-        MPI.COMM_WORLD.send((None,None), dest=dest, tag=tag)
-        MPI.COMM_WORLD.send(arr, dest=dest, tag=tag)
-        return
-    tmp = arr.view(__tocast) # Cast to CHAR
-    # communicate type and shape
-    MPI.COMM_WORLD.send((arr.dtype.str, arr.shape), dest=dest, tag=tag)
-    MPI.COMM_WORLD.Send([arr, MPI.CHAR], dest=dest, tag=tag)
-    del tmp
+    def _send_array(self, arr, dest, tag = 0):
+        if not isinstance(arr, na.ndarray):
+            self.comm.send((None,None), dest=dest, tag=tag)
+            self.comm.send(arr, dest=dest, tag=tag)
+            return
+        tmp = arr.view(__tocast) # Cast to CHAR
+        # communicate type and shape
+        self.comm.send((arr.dtype.str, arr.shape), dest=dest, tag=tag)
+        self.comm.Send([arr, MPI.CHAR], dest=dest, tag=tag)
+        del tmp
 
-def _recv_array(source, tag = 0):
-    dt, ne = MPI.COMM_WORLD.recv(source=source, tag=tag)
-    if dt is None and ne is None:
-        return MPI.COMM_WORLD.recv(source=source, tag=tag)
-    arr = na.empty(ne, dtype=dt)
-    tmp = arr.view(__tocast)
-    MPI.COMM_WORLD.Recv([tmp, MPI.CHAR], source=source, tag=tag)
-    return arr
-
-def _bcast_array(arr, root = 0):
-    if MPI.COMM_WORLD.rank == root:
-        tmp = arr.view(__tocast) # Cast to CHAR
-        MPI.COMM_WORLD.bcast((arr.dtype.str, arr.shape), root=root)
-    else:
-        dt, ne = MPI.COMM_WORLD.bcast(None, root=root)
+    def _recv_array(self, source, tag = 0):
+        dt, ne = self.comm.recv(source=source, tag=tag)
+        if dt is None and ne is None:
+            return self.comm.recv(source=source, tag=tag)
         arr = na.empty(ne, dtype=dt)
         tmp = arr.view(__tocast)
-    MPI.COMM_WORLD.Bcast([tmp, MPI.CHAR], root=root)
-    return arr
+        self.comm.Recv([tmp, MPI.CHAR], source=source, tag=tag)
+        return arr
 
-def _alltoallv_array(send, total_size, offsets, sizes):
-    if len(send.shape) > 1:
-        recv = []
-        for i in range(send.shape[0]):
-            recv.append(_alltoallv_array(send[i,:].copy(), total_size, offsets, sizes))
-        recv = na.array(recv)
+    def _alltoallv_array(self, send, total_size, offsets, sizes):
+        if len(send.shape) > 1:
+            recv = []
+            for i in range(send.shape[0]):
+                recv.append(self._alltoallv_array(send[i,:].copy(), 
+                                                  total_size, offsets, sizes))
+            recv = na.array(recv)
+            return recv
+        offset = offsets[self.comm.rank]
+        tmp_send = send.view(__tocast)
+        recv = na.empty(total_size, dtype=send.dtype)
+        recv[offset:offset+send.size] = send[:]
+        dtr = send.dtype.itemsize / tmp_send.dtype.itemsize # > 1
+        roff = [off * dtr for off in offsets]
+        rsize = [siz * dtr for siz in sizes]
+        tmp_recv = recv.view(__tocast)
+        self.comm.Allgatherv((tmp_send, tmp_send.size, MPI.CHAR),
+                                  (tmp_recv, (rsize, roff), MPI.CHAR))
         return recv
-    offset = offsets[MPI.COMM_WORLD.rank]
-    tmp_send = send.view(__tocast)
-    recv = na.empty(total_size, dtype=send.dtype)
-    recv[offset:offset+send.size] = send[:]
-    dtr = send.dtype.itemsize / tmp_send.dtype.itemsize # > 1
-    roff = [off * dtr for off in offsets]
-    rsize = [siz * dtr for siz in sizes]
-    tmp_recv = recv.view(__tocast)
-    MPI.COMM_WORLD.Allgatherv((tmp_send, tmp_send.size, MPI.CHAR),
-                              (tmp_recv, (rsize, roff), MPI.CHAR))
-    return recv
     


http://bitbucket.org/yt_analysis/yt/changeset/83bfa783bead/
changeset:   83bfa783bead
branch:      yt
user:        samskillman
date:        2011-10-19 18:25:55
summary:     Merging
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 12:25:46 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 12:25:55 2011 -0400
@@ -329,6 +329,10 @@
     def pop(self):
         self.communicators.pop()
 
+communication_system = CommunicationSystem()
+if parallel_enabled:
+    communication_system.communicators.append(MPI.COMM_WORLD)
+
 class Communicator(object):
     comm = None
     _grids = None


http://bitbucket.org/yt_analysis/yt/changeset/4c750d972b93/
changeset:   4c750d972b93
branch:      yt
user:        brittonsmith
date:        2011-10-19 18:33:21
summary:     Added parallel_objects function.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 12:09:22 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 12:33:21 2011 -0400
@@ -309,6 +309,18 @@
     def _finalize_parallel(self):
         pass
 
+def parallel_objects(objects, njobs):
+    my_communicator = communication_system.communicators[-1]
+    my_size = my_communicator._par_size
+    my_rank = my_communicator._par_rank
+    all_new_comms = na.arange(my_size)
+    my_new_id = int(my_rank / njobs)
+    communication_system.push_with_ids(all_new_comms[my_new_id])
+
+    for obj in objects[my_new_id::njobs]:
+        yield obj
+    communication_system.communicators.pop()
+
 class CommunicationSystem(object):
     communicators = []
     def push(self, size=None, ranks=None):
@@ -323,7 +335,13 @@
         new_comm = MPI.COMM_WORLD.Create(group)
         self.communicators.append(Communicator(new_comm))
         return new_comm
-        
+
+    def push_with_ids(self, ids):
+        group = self.communicators[-1].comm.Group.Incl(ids)
+        new_comm = self.communicators[-1].comm.Create(group)
+        self.communicators.append(Communicator(new_comm))
+        return new_comm
+
     def pop(self):
         self.communicators.pop()
 


http://bitbucket.org/yt_analysis/yt/changeset/a528a066679b/
changeset:   a528a066679b
branch:      yt
user:        brittonsmith
date:        2011-10-19 18:33:34
summary:     Merged.
affected #:  3 files (-1 bytes)

--- a/yt/data_objects/data_containers.py	Wed Oct 19 12:33:21 2011 -0400
+++ b/yt/data_objects/data_containers.py	Wed Oct 19 12:33:34 2011 -0400
@@ -1627,7 +1627,7 @@
             mylog.debug("End of projecting level level %s, memory usage %0.3e", 
                         level, get_memory_usage()/1024.)
         # Note that this will briefly double RAM usage
-        tree = self.merge_quadtree_buffers(tree)
+        tree = self.comm.merge_quadtree_buffers(tree)
         coord_data, field_data, weight_data, dxs = [], [], [], []
         for level in range(0, self._max_level + 1):
             npos, nvals, nwvals = tree.get_all_from_level(level, False)


--- a/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 12:33:21 2011 -0400
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 12:33:34 2011 -0400
@@ -1178,7 +1178,7 @@
             if front.owner == my_rank:
                 if front.owner == parent.owner:
                     mylog.debug( '%04i receiving image from %04i'%(my_rank,back.owner))
-                    arr2 = PT._recv_array(back.owner, tag=back.owner).reshape(
+                    arr2 = self.comm._recv_array(back.owner, tag=back.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1192,17 +1192,17 @@
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
                     mylog.debug('%04i sending my image to %04i'%(my_rank,back.owner))
-                    PT._send_array(self.image.ravel(), back.owner, tag=my_rank)
+                    self.comm._send_array(self.image.ravel(), back.owner, tag=my_rank)
 
                 
             if back.owner == my_rank:
                 if front.owner == parent.owner:
                     mylog.debug('%04i sending my image to %04i'%(my_rank, front.owner))
-                    PT._send_array(self.image.ravel(), front.owner, tag=my_rank)
+                    self.comm._send_array(self.image.ravel(), front.owner, tag=my_rank)
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
                     mylog.debug('%04i receiving image from %04i'%(my_rank,front.owner))
-                    arr2 = PT._recv_array(front.owner, tag=front.owner).reshape(
+                    arr2 = self.comm._recv_array(front.owner, tag=front.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1225,7 +1225,7 @@
         if fn is None:
             fn = '%s_kd_bricks.h5'%self.pf
         if my_rank != 0:
-            PT._recv_array(my_rank-1, tag=my_rank-1)
+            self.comm._recv_array(my_rank-1, tag=my_rank-1)
         f = h5py.File(fn,"a")
         for node in self.depth_traverse():
             i = node.id
@@ -1238,13 +1238,13 @@
                         pass
         f.close()
         if my_rank != (nprocs-1):
-            PT._send_array([0],my_rank+1, tag=my_rank)
+            self.comm._send_array([0],my_rank+1, tag=my_rank)
         
     def load_kd_bricks(self,fn=None):
         if fn is None:
             fn = '%s_kd_bricks.h5' % self.pf
         if my_rank != 0:
-            PT._recv_array(my_rank-1, tag=my_rank-1)
+            self.comm._recv_array(my_rank-1, tag=my_rank-1)
         try:
             f = h5py.File(fn,"r")
             for node in self.depth_traverse():
@@ -1268,7 +1268,7 @@
         except:
             pass
         if my_rank != (nprocs-1):
-            PT._send_array([0],my_rank+1, tag=my_rank)
+            self.comm._send_array([0],my_rank+1, tag=my_rank)
 
     def load_tree(self,fn):
         raise NotImplementedError()


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 12:33:21 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 12:33:34 2011 -0400
@@ -115,6 +115,8 @@
     for dt, val in dtype_names.items():
         if dt == dtype: return val
 
+__tocast = 'c'
+
 class ObjectIterator(object):
     """
     This is a generalized class that accepts a list of objects and then
@@ -324,7 +326,7 @@
 class CommunicationSystem(object):
     communicators = []
     def push(self, size=None, ranks=None):
-        if size = None:
+        if size is None:
             size = len(available_ranks)
         if len(available_ranks) < size:
             raise RuntimeError
@@ -351,14 +353,15 @@
 
 class Communicator(object):
     comm = None
+    _grids = None
+    _distributed = None
     def __init__(self, comm=MPI.COMM_WORLD):
         self.comm = comm
+        self._distributed = self.comm.size > 1
     """
     This is an interface specification providing several useful utility
     functions for analyzing something in parallel.
     """
-    _grids = None
-    _distributed = (comm.size > 1)
 
     def _partition_hierarchy_2d(self, axis):
         if not self._distributed:
@@ -620,7 +623,7 @@
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
             for key in field_keys:
                 dd = data[key]
-                rv = _alltoallv_array(dd, arr_size, offsets, sizes)
+                rv = self._alltoallv_array(dd, arr_size, offsets, sizes)
                 data[key] = rv
             return data
         elif datatype == "array" and op == "cat":
@@ -649,7 +652,7 @@
             # concatenation.
             offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
-            data = _alltoallv_array(data, arr_size, offsets, sizes)
+            data = self._alltoallv_array(data, arr_size, offsets, sizes)
             return data
         elif datatype == "list" and op == "cat":
             if self.comm.rank == 0:
@@ -863,55 +866,44 @@
             qt.frombuffer(*buf)
         return qt
 
-__tocast = 'c'
 
-def _send_array(arr, dest, tag = 0):
-    if not isinstance(arr, na.ndarray):
-        MPI.COMM_WORLD.send((None,None), dest=dest, tag=tag)
-        MPI.COMM_WORLD.send(arr, dest=dest, tag=tag)
-        return
-    tmp = arr.view(__tocast) # Cast to CHAR
-    # communicate type and shape
-    MPI.COMM_WORLD.send((arr.dtype.str, arr.shape), dest=dest, tag=tag)
-    MPI.COMM_WORLD.Send([arr, MPI.CHAR], dest=dest, tag=tag)
-    del tmp
+    def _send_array(self, arr, dest, tag = 0):
+        if not isinstance(arr, na.ndarray):
+            self.comm.send((None,None), dest=dest, tag=tag)
+            self.comm.send(arr, dest=dest, tag=tag)
+            return
+        tmp = arr.view(__tocast) # Cast to CHAR
+        # communicate type and shape
+        self.comm.send((arr.dtype.str, arr.shape), dest=dest, tag=tag)
+        self.comm.Send([arr, MPI.CHAR], dest=dest, tag=tag)
+        del tmp
 
-def _recv_array(source, tag = 0):
-    dt, ne = MPI.COMM_WORLD.recv(source=source, tag=tag)
-    if dt is None and ne is None:
-        return MPI.COMM_WORLD.recv(source=source, tag=tag)
-    arr = na.empty(ne, dtype=dt)
-    tmp = arr.view(__tocast)
-    MPI.COMM_WORLD.Recv([tmp, MPI.CHAR], source=source, tag=tag)
-    return arr
-
-def _bcast_array(arr, root = 0):
-    if MPI.COMM_WORLD.rank == root:
-        tmp = arr.view(__tocast) # Cast to CHAR
-        MPI.COMM_WORLD.bcast((arr.dtype.str, arr.shape), root=root)
-    else:
-        dt, ne = MPI.COMM_WORLD.bcast(None, root=root)
+    def _recv_array(self, source, tag = 0):
+        dt, ne = self.comm.recv(source=source, tag=tag)
+        if dt is None and ne is None:
+            return self.comm.recv(source=source, tag=tag)
         arr = na.empty(ne, dtype=dt)
         tmp = arr.view(__tocast)
-    MPI.COMM_WORLD.Bcast([tmp, MPI.CHAR], root=root)
-    return arr
+        self.comm.Recv([tmp, MPI.CHAR], source=source, tag=tag)
+        return arr
 
-def _alltoallv_array(send, total_size, offsets, sizes):
-    if len(send.shape) > 1:
-        recv = []
-        for i in range(send.shape[0]):
-            recv.append(_alltoallv_array(send[i,:].copy(), total_size, offsets, sizes))
-        recv = na.array(recv)
+    def _alltoallv_array(self, send, total_size, offsets, sizes):
+        if len(send.shape) > 1:
+            recv = []
+            for i in range(send.shape[0]):
+                recv.append(self._alltoallv_array(send[i,:].copy(), 
+                                                  total_size, offsets, sizes))
+            recv = na.array(recv)
+            return recv
+        offset = offsets[self.comm.rank]
+        tmp_send = send.view(__tocast)
+        recv = na.empty(total_size, dtype=send.dtype)
+        recv[offset:offset+send.size] = send[:]
+        dtr = send.dtype.itemsize / tmp_send.dtype.itemsize # > 1
+        roff = [off * dtr for off in offsets]
+        rsize = [siz * dtr for siz in sizes]
+        tmp_recv = recv.view(__tocast)
+        self.comm.Allgatherv((tmp_send, tmp_send.size, MPI.CHAR),
+                                  (tmp_recv, (rsize, roff), MPI.CHAR))
         return recv
-    offset = offsets[MPI.COMM_WORLD.rank]
-    tmp_send = send.view(__tocast)
-    recv = na.empty(total_size, dtype=send.dtype)
-    recv[offset:offset+send.size] = send[:]
-    dtr = send.dtype.itemsize / tmp_send.dtype.itemsize # > 1
-    roff = [off * dtr for off in offsets]
-    rsize = [siz * dtr for siz in sizes]
-    tmp_recv = recv.view(__tocast)
-    MPI.COMM_WORLD.Allgatherv((tmp_send, tmp_send.size, MPI.CHAR),
-                              (tmp_recv, (rsize, roff), MPI.CHAR))
-    return recv
     


http://bitbucket.org/yt_analysis/yt/changeset/19926e48039e/
changeset:   19926e48039e
branch:      yt
user:        samskillman
date:        2011-10-19 18:35:07
summary:     capable, not enabled.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 12:33:34 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 12:35:07 2011 -0400
@@ -348,7 +348,7 @@
         self.communicators.pop()
 
 communication_system = CommunicationSystem()
-if parallel_enabled:
+if parallel_capable:
     communication_system.communicators.append(MPI.COMM_WORLD)
 
 class Communicator(object):


http://bitbucket.org/yt_analysis/yt/changeset/e4e73e14268c/
changeset:   e4e73e14268c
branch:      yt
user:        samskillman
date:        2011-10-19 18:41:08
summary:     Moving stuff around so that things can be instantiated.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 12:35:07 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 12:41:08 2011 -0400
@@ -276,41 +276,6 @@
     if parallel_capable: return root_only
     return func
 
-class ParallelAnalysisInterface(object):
-    comm = None
-    _grids = None
-    _distributed = None
-
-    def __init__(self, size=1):
-        self.comm = CommunicationSystem.pop()
-        self._grids = self.comm._grids
-        self._distributed = self.comm._distributed
-
-    def _get_objs(self, attr, *args, **kwargs):
-        if self._distributed:
-            rr = kwargs.pop("round_robin", False)
-            self._initialize_parallel(*args, **kwargs)
-            return ParallelObjectIterator(self, attr=attr,
-                    round_robin=rr)
-        return ObjectIterator(self, attr=attr)
-
-    def _get_grids(self, *args, **kwargs):
-        if self._distributed:
-            self._initialize_parallel(*args, **kwargs)
-            return ParallelObjectIterator(self, attr='_grids')
-        return ObjectIterator(self, attr='_grids')
-
-    def _get_grid_objs(self):
-        if self._distributed:
-            return ParallelObjectIterator(self, True, attr='_grids')
-        return ObjectIterator(self, True, attr='_grids')
-
-    def _initialize_parallel(self):
-        pass
-
-    def _finalize_parallel(self):
-        pass
-
 def parallel_objects(objects, njobs):
     my_communicator = communication_system.communicators[-1]
     my_size = my_communicator._par_size
@@ -347,10 +312,6 @@
     def pop(self):
         self.communicators.pop()
 
-communication_system = CommunicationSystem()
-if parallel_capable:
-    communication_system.communicators.append(MPI.COMM_WORLD)
-
 class Communicator(object):
     comm = None
     _grids = None
@@ -906,4 +867,45 @@
         self.comm.Allgatherv((tmp_send, tmp_send.size, MPI.CHAR),
                                   (tmp_recv, (rsize, roff), MPI.CHAR))
         return recv
+
+communication_system = CommunicationSystem()
+if parallel_capable:
+    communication_system.communicators.append(Communicator(MPI.COMM_WORLD))
+
+class ParallelAnalysisInterface(object):
+    comm = None
+    _grids = None
+    _distributed = None
+
+    def __init__(self, size=1):
+        self.comm = communication_system.communicators[-1]
+        self._grids = self.comm._grids
+        self._distributed = self.comm._distributed
+
+    def _get_objs(self, attr, *args, **kwargs):
+        if self._distributed:
+            rr = kwargs.pop("round_robin", False)
+            self._initialize_parallel(*args, **kwargs)
+            return ParallelObjectIterator(self, attr=attr,
+                    round_robin=rr)
+        return ObjectIterator(self, attr=attr)
+
+    def _get_grids(self, *args, **kwargs):
+        if self._distributed:
+            self._initialize_parallel(*args, **kwargs)
+            return ParallelObjectIterator(self, attr='_grids')
+        return ObjectIterator(self, attr='_grids')
+
+    def _get_grid_objs(self):
+        if self._distributed:
+            return ParallelObjectIterator(self, True, attr='_grids')
+        return ObjectIterator(self, True, attr='_grids')
+
+    def _initialize_parallel(self):
+        pass
+
+    def _finalize_parallel(self):
+        pass
+
+
     


http://bitbucket.org/yt_analysis/yt/changeset/0677a9f60cff/
changeset:   0677a9f60cff
branch:      yt
user:        MatthewTurk
date:        2011-10-19 18:43:56
summary:     self._par_rank => self.comm.rank
self._par_size => self.comm.size
affected #:  7 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 12:41:08 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 12:43:56 2011 -0400
@@ -1857,20 +1857,20 @@
         # analyzing a subvolume.
         ds_names = ["particle_position_x","particle_position_y","particle_position_z"]
         if ytcfg.getboolean("yt","inline") == False and \
-            resize and self._par_size != 1 and subvolume is None:
-            random.seed(self._par_rank)
+            resize and self.comm.size != 1 and subvolume is None:
+            random.seed(self.comm.rank)
             cut_list = self._partition_hierarchy_3d_bisection_list()
             root_points = self._subsample_points()
             self.bucket_bounds = []
-            if self._par_rank == 0:
+            if self.comm.rank == 0:
                 self._recursive_divide(root_points, topbounds, 0, cut_list)
             self.bucket_bounds = self._mpi_bcast_pickled(self.bucket_bounds)
-            my_bounds = self.bucket_bounds[self._par_rank]
+            my_bounds = self.bucket_bounds[self.comm.rank]
             LE, RE = my_bounds[0], my_bounds[1]
             self._data_source = self.hierarchy.region_strict([0.]*3, LE, RE)
         # If this isn't parallel, define the region as an AMRRegionStrict so
         # particle IO works.
-        if self._par_size == 1:
+        if self.comm.size == 1:
             self._data_source = self.hierarchy.periodic_region_strict([0.5]*3, LE, RE)
         # get the average spacing between particles for this region
         # The except is for the serial case, where the full box is what we want.
@@ -1964,8 +1964,8 @@
         random_points = int(self.sample * n_parts)
         # We want to get a representative selection of random particles in
         # each subvolume.
-        adjust = float(local_parts) / ( float(n_parts) / self._par_size)
-        n_random = int(adjust * float(random_points) / self._par_size)
+        adjust = float(local_parts) / ( float(n_parts) / self.comm.size)
+        n_random = int(adjust * float(random_points) / self.comm.size)
         mylog.info("Reading in %d random particles." % n_random)
         # Get unique random particles.
         my_points = na.empty((n_random, 3), dtype='float64')


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 12:41:08 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 12:43:56 2011 -0400
@@ -1161,7 +1161,7 @@
         Set_list = []
         # We only want the holes that are modulo mine.
         keys = na.arange(groupID, dtype='int64')
-        size = self._par_size
+        size = self.comm.size
         select = (keys % size == self.mine)
         groupIDs = keys[select]
         mine_groupIDs = set([]) # Records only ones modulo mine.


--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py	Wed Oct 19 12:41:08 2011 -0400
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py	Wed Oct 19 12:43:56 2011 -0400
@@ -169,10 +169,10 @@
         if self.sleep <= 0.:
             self.sleep = 5
         # MPI stuff
-        self.mine = self._par_rank
+        self.mine = self.comm.rank
         if self.mine is None:
             self.mine = 0
-        self.size = self._par_size
+        self.size = self.comm.size
         if self.size is None:
             self.size = 1
         # Get to work.


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Wed Oct 19 12:41:08 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Wed Oct 19 12:43:56 2011 -0400
@@ -108,8 +108,8 @@
         self.constant_theta = theta
         self.constant_phi = phi
         # MPI stuff.
-        self.size = self._par_size
-        self.mine = self._par_rank
+        self.size = self.comm.size
+        self.mine = self.comm.rank
         self.vol_ratio = vol_ratio
         if self.vol_ratio == -1:
             self.vol_ratio = self.size


--- a/yt/frontends/enzo/data_structures.py	Wed Oct 19 12:41:08 2011 -0400
+++ b/yt/frontends/enzo/data_structures.py	Wed Oct 19 12:43:56 2011 -0400
@@ -384,7 +384,7 @@
     def _detect_fields(self):
         self.field_list = []
         # Do this only on the root processor to save disk work.
-        if self._par_rank == 0 or self._par_rank == None:
+        if self.comm.rank == 0 or self._par_rank == None:
             field_list = self.get_data("/", "DataFields")
             if field_list is None:
                 mylog.info("Gathering a field list (this may take a moment.)")
@@ -589,7 +589,7 @@
             self.derived_field_list = self.__class__._cached_derived_field_list
 
     def _generate_random_grids(self):
-        my_rank = self._par_rank
+        my_rank = self.comm.rank
         my_grids = self.grids[self.grid_procs.ravel() == my_rank]
         if len(my_grids) > 40:
             starter = na.random.randint(0, 20)


--- a/yt/visualization/streamlines.py	Wed Oct 19 12:41:08 2011 -0400
+++ b/yt/visualization/streamlines.py	Wed Oct 19 12:43:56 2011 -0400
@@ -125,8 +125,8 @@
             self.magnitudes = na.zeros((self.N,self.steps), dtype='float64')
         
     def integrate_through_volume(self):
-        nprocs = self._par_size
-        my_rank = self._par_rank
+        nprocs = self.comm.size
+        my_rank = self.comm.rank
         self.streamlines[my_rank::nprocs,0,:] = self.start_positions[my_rank::nprocs]
 
         pbar = get_pbar("Streamlining", self.N)


--- a/yt/visualization/volume_rendering/camera.py	Wed Oct 19 12:41:08 2011 -0400
+++ b/yt/visualization/volume_rendering/camera.py	Wed Oct 19 12:43:56 2011 -0400
@@ -357,7 +357,7 @@
             pbar.update(total_cells)
         pbar.finish()
 
-        if self._par_rank is 0 and fn is not None:
+        if self.comm.rank is 0 and fn is not None:
             if clip_ratio is not None:
                 write_bitmap(image, fn, clip_ratio*image.std())
             else:
@@ -624,7 +624,7 @@
             pbar.update(total_cells)
         pbar.finish()
 
-        if self._par_rank is 0 and fn is not None:
+        if self.comm.rank is 0 and fn is not None:
             # This assumes Density; this is a relatively safe assumption.
             import matplotlib.figure
             import matplotlib.backends.backend_agg


http://bitbucket.org/yt_analysis/yt/changeset/1cc947a7426d/
changeset:   1cc947a7426d
branch:      yt
user:        MatthewTurk
date:        2011-10-19 18:44:58
summary:     Fixing _par_rank and _par_size in one more place
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 12:43:56 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 12:44:58 2011 -0400
@@ -278,8 +278,8 @@
 
 def parallel_objects(objects, njobs):
     my_communicator = communication_system.communicators[-1]
-    my_size = my_communicator._par_size
-    my_rank = my_communicator._par_rank
+    my_size = my_communicator.size
+    my_rank = my_communicator.rank
     all_new_comms = na.arange(my_size)
     my_new_id = int(my_rank / njobs)
     communication_system.push_with_ids(all_new_comms[my_new_id])
@@ -700,12 +700,12 @@
     ###
 
     @property
-    def _par_size(self):
+    def size(self):
         if not self._distributed: return 1
         return self.comm.size
 
     @property
-    def _par_rank(self):
+    def rank(self):
         if not self._distributed: return 0
         return self.comm.rank
 


http://bitbucket.org/yt_analysis/yt/changeset/6486bd8c671c/
changeset:   6486bd8c671c
branch:      yt
user:        samskillman
date:        2011-10-19 18:57:07
summary:     hg push moving calls to self.comm.
affected #:  2 files (-1 bytes)

--- a/yt/data_objects/data_containers.py	Wed Oct 19 12:44:58 2011 -0400
+++ b/yt/data_objects/data_containers.py	Wed Oct 19 12:57:07 2011 -0400
@@ -1841,7 +1841,7 @@
 
     def _initialize_source(self, source = None):
         if source is None:
-            check, source = self._partition_hierarchy_2d(self.axis)
+            check, source = self.comm.partition_hierarchy_2d(self.axis)
             self._check_region = check
             #self._okay_to_serialize = (not check)
         else:


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 12:44:58 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 12:57:07 2011 -0400
@@ -324,7 +324,7 @@
     functions for analyzing something in parallel.
     """
 
-    def _partition_hierarchy_2d(self, axis):
+    def partition_hierarchy_2d(self, axis):
         if not self._distributed:
            return False, self.hierarchy.grid_collection(self.center, 
                                                         self.hierarchy.grids)


http://bitbucket.org/yt_analysis/yt/changeset/df0989663171/
changeset:   df0989663171
branch:      yt
user:        samskillman
date:        2011-10-19 19:01:37
summary:     More ._mpithing -> .comm.mpithing
affected #:  7 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 12:57:07 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 13:01:37 2011 -0400
@@ -1849,7 +1849,7 @@
         topbounds = na.array([[0., 0., 0.], period])
         # Cut up the volume evenly initially, with no padding.
         padded, LE, RE, self._data_source = \
-            self._partition_hierarchy_3d(ds=self._data_source,
+            self.comm.partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         # also get the total mass of particles
         yt_counters("Reading Data")
@@ -1859,7 +1859,7 @@
         if ytcfg.getboolean("yt","inline") == False and \
             resize and self.comm.size != 1 and subvolume is None:
             random.seed(self.comm.rank)
-            cut_list = self._partition_hierarchy_3d_bisection_list()
+            cut_list = self.comm.partition_hierarchy_3d_bisection_list()
             root_points = self._subsample_points()
             self.bucket_bounds = []
             if self.comm.rank == 0:
@@ -1945,7 +1945,7 @@
             self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
             # Cut up the volume.
             padded, LE, RE, self._data_source = \
-                self._partition_hierarchy_3d(ds=self._data_source,
+                self.comm.partition_hierarchy_3d(ds=self._data_source,
                 padding=0.)
         self.bounds = (LE, RE)
         (LE_padding, RE_padding) = self.padding
@@ -2108,7 +2108,7 @@
         # a small part is actually going to be used.
         self.padding = 0.0
         padded, LE, RE, self._data_source = \
-            self._partition_hierarchy_3d(ds = self._data_source, padding=self.padding)
+            self.comm.partition_hierarchy_3d(ds = self._data_source, padding=self.padding)
         # For scaling the threshold, note that it's a passthrough
         if dm_only:
             select = self._get_dm_indices()
@@ -2124,7 +2124,7 @@
             self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
         self.padding = padding #* pf["unitary"] # This should be clevererer
         padded, LE, RE, self._data_source = \
-            self._partition_hierarchy_3d(ds = self._data_source,
+            self.comm.partition_hierarchy_3d(ds = self._data_source,
             padding=self.padding)
         self.bounds = (LE, RE)
         # reflect particles around the periodic boundary
@@ -2192,7 +2192,7 @@
         self.padding = 0.0 #* pf["unitary"] # This should be clevererer
         # get the total number of particles across all procs, with no padding
         padded, LE, RE, self._data_source = \
-            self._partition_hierarchy_3d(ds=self._data_source,
+            self.comm.partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         if link > 0.0:
             n_parts = self._mpi_allreduce(self._data_source["particle_position_x"].size, op='sum')
@@ -2210,7 +2210,7 @@
         if subvolume is not None:
             self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
         padded, LE, RE, self._data_source = \
-            self._partition_hierarchy_3d(ds=self._data_source,
+            self.comm.partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         self.bounds = (LE, RE)
         # reflect particles around the periodic boundary


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 12:57:07 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 13:01:37 2011 -0400
@@ -290,7 +290,7 @@
             hooks.append(self._mpi_nonblocking_recv(recv_mass[opp_neighbor], opp_neighbor))
         # Let's wait here to be absolutely sure that all the receive buffers
         # have been created before any sending happens!
-        self._barrier()
+        self.comm.barrier()
         # Now we send the data.
         for neighbor in self.neighbors:
             hooks.append(self._mpi_nonblocking_send(send_real_indices[neighbor], neighbor))
@@ -781,7 +781,7 @@
             hooks.append(self._mpi_nonblocking_recv(temp_indices[opp_neighbor], opp_neighbor))
             hooks.append(self._mpi_nonblocking_recv(temp_chainIDs[opp_neighbor], opp_neighbor))
         # Make sure all the receive buffers are set before continuing.
-        self._barrier()
+        self.comm.barrier()
         # Send padded particles to our neighbors.
         for neighbor in self.neighbors:
             hooks.append(self._mpi_nonblocking_send(self.uphill_real_indices, neighbor))
@@ -952,7 +952,7 @@
             hooks.append(self._mpi_nonblocking_recv(recv_real_indices[opp_neighbor], opp_neighbor))
             hooks.append(self._mpi_nonblocking_recv(recv_chainIDs[opp_neighbor], opp_neighbor))
         # Make sure the recv buffers are set before continuing.
-        self._barrier()
+        self.comm.barrier()
         # Now we send them.
         for neighbor in self.neighbors:
             hooks.append(self._mpi_nonblocking_send(real_indices, neighbor))


--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py	Wed Oct 19 12:57:07 2011 -0400
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py	Wed Oct 19 13:01:37 2011 -0400
@@ -181,7 +181,7 @@
                 os.unlink(self.database)
             except:
                 pass
-        self._barrier()
+        self.comm.barrier()
         self._open_create_database()
         self._create_halo_table()
         self._run_halo_finder_add_to_db()
@@ -204,7 +204,7 @@
         # Now update the database with all the writes.
         mylog.info("Updating database with parent-child relationships.")
         self._copy_and_update_db()
-        self._barrier()
+        self.comm.barrier()
         mylog.info("Done!")
         
     def _read_halo_lists(self):
@@ -276,7 +276,7 @@
                     line = 'INSERT into Halos VALUES (' + line[:-1] + ')'
                     self.cursor.execute(line, values)
                 self.conn.commit()
-            self._barrier()
+            self.comm.barrier()
             del hp
     
     def _open_create_database(self):
@@ -284,7 +284,7 @@
         # doesn't already exist. Open it first on root, and then on the others.
         if self.mine == 0:
             self.conn = sql.connect(self.database)
-        self._barrier()
+        self.comm.barrier()
         self._ensure_db_sync()
         if self.mine != 0:
             self.conn = sql.connect(self.database)
@@ -295,7 +295,7 @@
         # parallel file system funniness, things will go bad very quickly.
         # Therefore, just to be very, very careful, we will ensure that the
         # md5 hash of the file is identical across all tasks before proceeding.
-        self._barrier()
+        self.comm.barrier()
         for i in range(5):
             try:
                 file = open(self.database)
@@ -339,7 +339,7 @@
                 self.conn.commit()
             except sql.OperationalError:
                 pass
-        self._barrier()
+        self.comm.barrier()
     
     def _find_likely_children(self, parentfile, childfile):
         # For each halo in the parent list, identify likely children in the 
@@ -718,7 +718,7 @@
             temp_cursor.close()
             temp_conn.close()
         self._close_database()
-        self._barrier()
+        self.comm.barrier()
         if self.mine == 0:
             os.rename(temp_name, self.database)
 


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Wed Oct 19 12:57:07 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Wed Oct 19 13:01:37 2011 -0400
@@ -161,7 +161,7 @@
             ds = pf.h.periodic_region_strict([0.]*3, self.left_edge, 
                 self.right_edge)
             padded, self.LE, self.RE, self.ds = \
-            self._partition_hierarchy_3d(ds = ds, padding=0.,
+            self.comm.partition_hierarchy_3d(ds = ds, padding=0.,
                 rank_ratio = self.vol_ratio)
         else:
             self.left_edge = left_edge
@@ -169,10 +169,10 @@
             # We do this twice, first with no 'buffer' to get the unbuffered
             # self.LE/RE, and then second to get a buffered self.ds.
             padded, self.LE, self.RE, temp = \
-                self._partition_region_3d(left_edge, right_edge,
+                self.comm.partition_region_3d(left_edge, right_edge,
                     rank_ratio=self.vol_ratio)
             padded, temp, temp, self.ds = \
-                self._partition_region_3d(left_edge - self.lengths[-1], \
+                self.comm.partition_region_3d(left_edge - self.lengths[-1], \
                 right_edge + self.lengths[-1], rank_ratio=self.vol_ratio)
         mylog.info("LE %s RE %s %s" % (str(self.LE), str(self.RE), str(self.ds)))
         self.width = self.ds.right_edge - self.ds.left_edge


--- a/yt/data_objects/hierarchy.py	Wed Oct 19 12:57:07 2011 -0400
+++ b/yt/data_objects/hierarchy.py	Wed Oct 19 13:01:37 2011 -0400
@@ -178,7 +178,7 @@
             writeable = os.access(fn, os.W_OK)
         writeable = writeable and not ytcfg.getboolean('yt','onlydeserialize')
         # We now have our conditional stuff
-        self._barrier()
+        self.comm.barrier()
         if not writeable and not exists: return
         if writeable:
             try:


--- a/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 12:57:07 2011 -0400
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 13:01:37 2011 -0400
@@ -1138,7 +1138,7 @@
                     yield node.brick
          
         self.reduce_tree_images(self.tree, front_center)
-        self._barrier()
+        self.comm.barrier()
         
     def reduce_tree_images(self, tree, viewpoint, image=None):
         if image is not None:


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 12:57:07 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:01:37 2011 -0400
@@ -348,7 +348,7 @@
         reg = self.hierarchy.region_strict(self.center, LE, RE)
         return True, reg
 
-    def _partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
+    def partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
         LE, RE = na.array(ds.left_edge), na.array(ds.right_edge)
         # We need to establish if we're looking at a subvolume, in which case
         # we *do* want to pad things.
@@ -494,7 +494,7 @@
             bot_keys = na.array(bot_keys, dtype='int64')
             vals = na.array(vals, dtype='float64')
             return (top_keys, bot_keys, vals)
-        self._barrier()
+        self.comm.barrier()
         size = 0
         top_keys = []
         bot_keys = []
@@ -711,7 +711,7 @@
 
     def _mpi_info_dict(self, info):
         if not self._distributed: return 0, {0:info}
-        self._barrier()
+        self.comm.barrier()
         data = None
         if self.comm.rank == 0:
             data = {0:info}
@@ -721,7 +721,7 @@
             self.comm.send(info, dest=0, tag=0)
         mylog.debug("Opening MPI Broadcast on %s", self.comm.rank)
         data = self.comm.bcast(data, root=0)
-        self._barrier()
+        self.comm.barrier()
         return self.comm.rank, data
 
     def _get_dependencies(self, fields):


http://bitbucket.org/yt_analysis/yt/changeset/a79e61df0a7b/
changeset:   a79e61df0a7b
branch:      yt
user:        samskillman
date:        2011-10-19 19:02:19
summary:     put the fixes in PAI as well.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:01:37 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:02:19 2011 -0400
@@ -394,7 +394,7 @@
 
         return False, LE, RE, self.hierarchy.region_strict(self.center, LE, RE)
 
-    def _partition_region_3d(self, left_edge, right_edge, padding=0.0,
+    def partition_region_3d(self, left_edge, right_edge, padding=0.0,
             rank_ratio = 1):
         """
         Given a region, it subdivides it into smaller regions for parallel
@@ -421,7 +421,7 @@
 
         return False, LE, RE, self.hierarchy.region(self.center, LE, RE)
 
-    def _partition_hierarchy_3d_bisection_list(self):
+    def partition_hierarchy_3d_bisection_list(self):
         """
         Returns an array that is used to drive _partition_hierarchy_3d_bisection,
         below.
@@ -464,7 +464,7 @@
                 nextdim = (nextdim + 1) % 3
         return cuts
 
-    def _barrier(self):
+    def barrier(self):
         if not self._distributed: return
         mylog.debug("Opening MPI Barrier on %s", self.comm.rank)
         self.comm.Barrier()


http://bitbucket.org/yt_analysis/yt/changeset/3f5cc85aafc9/
changeset:   3f5cc85aafc9
branch:      yt
user:        samskillman
date:        2011-10-19 19:05:26
summary:     Up to _mpi_allreduce
affected #:  12 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 13:02:19 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 13:05:26 2011 -0400
@@ -1412,7 +1412,7 @@
                 self.particle_fields["particle_index"].size:
             mylog.error("Non-unique values in particle_index field. Parallel HOP will fail.")
             exit = True
-        self._mpi_exit_test(exit)
+        self.comm.mpi_exit_test(exit)
         obj = ParallelHOPHaloFinder(self.period, self.padding,
             self.num_neighbors, self.bounds,
             self.particle_fields["particle_position_x"] / self.old_period[0],
@@ -1864,7 +1864,7 @@
             self.bucket_bounds = []
             if self.comm.rank == 0:
                 self._recursive_divide(root_points, topbounds, 0, cut_list)
-            self.bucket_bounds = self._mpi_bcast_pickled(self.bucket_bounds)
+            self.bucket_bounds = self.comm.mpi_bcast_pickled(self.bucket_bounds)
             my_bounds = self.bucket_bounds[self.comm.rank]
             LE, RE = my_bounds[0], my_bounds[1]
             self._data_source = self.hierarchy.region_strict([0.]*3, LE, RE)
@@ -1988,7 +1988,7 @@
         else:
             root_points = na.empty([])
         my_points.shape = (1, n_random*3)
-        root_points = self._par_combine_object(my_points[0],
+        root_points = self.comm.par_combine_object(my_points[0],
                 datatype="array", op="cat")
         del my_points
         if mine == 0:


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 13:02:19 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 13:05:26 2011 -0400
@@ -684,9 +684,9 @@
         # Shift the values over effectively by concatenating them in the same
         # order as the values have been shifted in _globally_assign_chainIDs()
         yt_counters("global chain MPI stuff.")
-        self.densest_in_chain = self._par_combine_object(self.densest_in_chain,
+        self.densest_in_chain = self.comm.par_combine_object(self.densest_in_chain,
                 datatype="array", op="cat")
-        self.densest_in_chain_real_index = self._par_combine_object(
+        self.densest_in_chain_real_index = self.comm.par_combine_object(
                 self.densest_in_chain_real_index,
                 datatype="array", op="cat")
         yt_counters("global chain MPI stuff.")
@@ -841,7 +841,7 @@
         # Now we make a global dict of how many particles each task is
         # sending.
         self.global_padded_count = {self.mine:self.uphill_chainIDs.size}
-        self.global_padded_count = self._par_combine_object(
+        self.global_padded_count = self.comm.par_combine_object(
                 self.global_padded_count, datatype = "dict", op = "join")
         # Send/receive 'em.
         self._communicate_uphill_info()
@@ -937,7 +937,7 @@
         # but there's so many places in this that need to be globally synched
         # that it's not worth the effort right now to make this one spot better.
         global_annulus_count = {self.mine:send_count}
-        global_annulus_count = self._par_combine_object(
+        global_annulus_count = self.comm.par_combine_object(
                 global_annulus_count, datatype = "dict", op = "join")
         # Set up the receiving arrays.
         recv_real_indices = dict.fromkeys(self.neighbors)
@@ -1067,7 +1067,7 @@
         """
         yt_counters("make_global_chain_densest_n")
         (self.top_keys, self.bot_keys, self.vals) = \
-            self._mpi_maxdict_dict(self.chain_densest_n)
+            self.comm.mpi_maxdict_dict(self.chain_densest_n)
         self.__max_memory()
         del self.chain_densest_n
         yt_counters("make_global_chain_densest_n")


--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py	Wed Oct 19 13:02:19 2011 -0400
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py	Wed Oct 19 13:05:26 2011 -0400
@@ -549,15 +549,15 @@
         child_IDs_tosend = child_IDs[child_send]
         child_halos_tosend = child_halos[child_send]
         
-        parent_IDs_tosend = self._par_combine_object(parent_IDs_tosend,
+        parent_IDs_tosend = self.comm.par_combine_object(parent_IDs_tosend,
                 datatype="array", op="cat")
-        parent_masses_tosend = self._par_combine_object(parent_masses_tosend,
+        parent_masses_tosend = self.comm.par_combine_object(parent_masses_tosend,
                 datatype="array", op="cat")
-        parent_halos_tosend = self._par_combine_object(parent_halos_tosend,
+        parent_halos_tosend = self.comm.par_combine_object(parent_halos_tosend,
                 datatype="array", op="cat")
-        child_IDs_tosend = self._par_combine_object(child_IDs_tosend,
+        child_IDs_tosend = self.comm.par_combine_object(child_IDs_tosend,
                 datatype="array", op="cat")
-        child_halos_tosend = self._par_combine_object(child_halos_tosend,
+        child_halos_tosend = self.comm.par_combine_object(child_halos_tosend,
                 datatype="array", op="cat")
 
         # Resort the received particles.


--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Wed Oct 19 13:02:19 2011 -0400
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Wed Oct 19 13:05:26 2011 -0400
@@ -495,12 +495,12 @@
             updated_halos.append(halo)
         
         # And here is where we bring it all together.
-        updated_halos = self._par_combine_object(updated_halos,
+        updated_halos = self.comm.par_combine_object(updated_halos,
                             datatype="list", op="cat")
         updated_halos.sort(key = lambda a:a['id'])
         self.all_halos = updated_halos
 
-        self.filtered_halos = self._par_combine_object(self.filtered_halos)
+        self.filtered_halos = self.comm.par_combine_object(self.filtered_halos)
                             datatype="list", op="cat")
         self.filtered_halos.sort(key = lambda a:a['id'])
 


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Wed Oct 19 13:02:19 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Wed Oct 19 13:05:26 2011 -0400
@@ -403,7 +403,7 @@
             status = 0
         # Broadcast the status from root - we stop only if root thinks we should
         # stop.
-        status = self._mpi_bcast_pickled(status)
+        status = self.comm.mpi_bcast_pickled(status)
         if status == 0: return True
         if self.comm_cycle_count < status:
             return True


--- a/yt/data_objects/data_containers.py	Wed Oct 19 13:02:19 2011 -0400
+++ b/yt/data_objects/data_containers.py	Wed Oct 19 13:05:26 2011 -0400
@@ -811,7 +811,7 @@
             self[field] = temp_data[field] 
         # We finalize
         if temp_data != {}:
-            temp_data = self._par_combine_object(temp_data,
+            temp_data = self.comm.par_combine_object(temp_data,
                     datatype='dict', op='cat')
         # And set, for the next group
         for field in temp_data.keys():
@@ -999,13 +999,13 @@
             points.append(self._generate_grid_coords(grid))
         if len(points) == 0:
             points = None
-            t = self._par_combine_object(None, datatype="array", op="cat")
+            t = self.comm.par_combine_object(None, datatype="array", op="cat")
         else:
             points = na.concatenate(points)
             # We have to transpose here so that _par_combine_object works
             # properly, as it and the alltoall assume the long axis is the last
             # one.
-            t = self._par_combine_object(points.transpose(),
+            t = self.comm.par_combine_object(points.transpose(),
                         datatype="array", op="cat")
         self['px'] = t[0,:]
         self['py'] = t[1,:]
@@ -1221,7 +1221,7 @@
             points.append(self._generate_grid_coords(grid))
         if len(points) == 0: points = None
         else: points = na.concatenate(points)
-        t = self._par_combine_object(points, datatype="array", op="cat")
+        t = self.comm.par_combine_object(points, datatype="array", op="cat")
         pos = (t[:,0:3] - self.center)
         self['px'] = na.dot(pos, self._x_vec)
         self['py'] = na.dot(pos, self._y_vec)
@@ -1614,13 +1614,13 @@
             print "Preloading %s grids and getting %s" % (
                     len(self.source._get_grid_objs()),
                     self._get_dependencies(fields))
-            self._preload([g for g in self._get_grid_objs()],
+            self.comm.preload([g for g in self._get_grid_objs()],
                           self._get_dependencies(fields), self.hierarchy.io)
         # By changing the remove-from-tree method to accumulate, we can avoid
         # having to do this by level, and instead do it by CPU file
         for level in range(0, self._max_level+1):
             if self.preload_style == 'level':
-                self._preload([g for g in self._get_grid_objs()
+                self.comm.preload([g for g in self._get_grid_objs()
                                  if g.Level == level],
                               self._get_dependencies(fields), self.hierarchy.io)
             self._add_level_to_tree(tree, level, fields)
@@ -2003,11 +2003,11 @@
         if self.preload_style == 'all':
             print "Preloading %s grids and getting %s" % (
                     len(self.source._grids), self._get_dependencies(fields))
-            self._preload(self.source._grids,
+            self.comm.preload(self.source._grids,
                           self._get_dependencies(fields), self.hierarchy.io)
         for level in range(0, self._max_level+1):
             if self.preload_style == 'level':
-                self._preload(self.source.select_grids(level),
+                self.comm.preload(self.source.select_grids(level),
                               self._get_dependencies(fields), self.hierarchy.io)
             self.__calculate_overlap(level)
             my_coords, my_pdx, my_pdy, my_fields = \
@@ -2044,7 +2044,7 @@
         data['pdy'] *= 0.5
         data['fields'] = field_data
         # Now we run the finalizer, which is ignored if we don't need it
-        data = self._par_combine_object(temp_data, datatype='dict', op='cat')
+        data = self.comm.par_combine_object(temp_data, datatype='dict', op='cat')
         field_data = na.vsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             self[field] = field_data[fi].ravel()


--- a/yt/data_objects/derived_quantities.py	Wed Oct 19 13:02:19 2011 -0400
+++ b/yt/data_objects/derived_quantities.py	Wed Oct 19 13:05:26 2011 -0400
@@ -86,7 +86,7 @@
             e.NumberOfParticles = 1
             self.func(e, *args, **kwargs)
             mylog.debug("Preloading %s", e.requested)
-            self._preload([g for g in self._get_grid_objs()], e.requested,
+            self.comm.preload([g for g in self._get_grid_objs()], e.requested,
                           self._data_source.pf.h.io)
         if lazy_reader and not self.force_unlazy:
             return self._call_func_lazy(args, kwargs)
@@ -110,7 +110,7 @@
         rv = []
         for my_list in self.retvals:
             data = na.array(my_list).transpose()
-            rv.append(self._par_combine_object(data,
+            rv.append(self.comm.par_combine_object(data,
                         datatype="array", op="cat").transpose())
         self.retvals = rv
         


--- a/yt/data_objects/profiles.py	Wed Oct 19 13:02:19 2011 -0400
+++ b/yt/data_objects/profiles.py	Wed Oct 19 13:05:26 2011 -0400
@@ -81,7 +81,7 @@
 
     def _initialize_parallel(self, fields):
         g_objs = [g for g in self._get_grid_objs()]
-        self._preload(g_objs, self._get_dependencies(fields),
+        self.comm.preload(g_objs, self._get_dependencies(fields),
                       self._data_source.hierarchy.io)
 
     def _lazy_add_fields(self, fields, weight, accumulation):


--- a/yt/frontends/enzo/data_structures.py	Wed Oct 19 13:02:19 2011 -0400
+++ b/yt/frontends/enzo/data_structures.py	Wed Oct 19 13:05:26 2011 -0400
@@ -401,7 +401,7 @@
                     field_list = field_list.union(gf)
         else:
             field_list = None
-        field_list = self._mpi_bcast_pickled(field_list)
+        field_list = self.comm.mpi_bcast_pickled(field_list)
         self.save_data(list(field_list),"/","DataFields",passthrough=True)
         self.field_list = list(field_list)
 


--- a/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 13:02:19 2011 -0400
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 13:05:26 2011 -0400
@@ -770,7 +770,7 @@
         if self.tree_dict[0].split_pos is None:
             self.tree_dict.pop(0)
     def merge_trees(self):
-        self.tree_dict = self._par_combine_object(self.tree_dict,
+        self.tree_dict = self.comm.par_combine_object(self.tree_dict,
                             datatype = "dict", op = "join")
 
     def rebuild_references(self):


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:02:19 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:05:26 2011 -0400
@@ -469,14 +469,14 @@
         mylog.debug("Opening MPI Barrier on %s", self.comm.rank)
         self.comm.Barrier()
 
-    def _mpi_exit_test(self, data=False):
+    def mpi_exit_test(self, data=False):
         # data==True -> exit. data==False -> no exit
         mine, statuses = self._mpi_info_dict(data)
         if True in statuses.values():
             raise RuntimeError("Fatal error. Exiting.")
         return None
 
-    def _mpi_maxdict_dict(self, data):
+    def mpi_maxdict_dict(self, data):
         """
         Similar to above, but finds maximums for dicts of dicts. This is
         specificaly for a part of chainHOP.
@@ -544,7 +544,7 @@
         return (top_keys, bot_keys, vals)
 
     @parallel_passthrough
-    def _par_combine_object(self, data, op, datatype = None):
+    def par_combine_object(self, data, op, datatype = None):
         # op can be chosen from:
         #   cat
         #   join
@@ -626,15 +626,11 @@
         raise NotImplementedError
 
     @parallel_passthrough
-    def _mpi_bcast_pickled(self, data):
+    def mpi_bcast_pickled(self, data):
         data = self.comm.bcast(data, root=0)
         return data
 
-    def _should_i_write(self):
-        if not self._distributed: return True
-        return (self.comm == 0)
-
-    def _preload(self, grids, fields, io_handler):
+    def preload(self, grids, fields, io_handler):
         # This will preload if it detects we are parallel capable and
         # if so, we load *everything* that we need.  Use with some care.
         mylog.debug("Preloading %s from %s grids", fields, len(grids))


--- a/yt/visualization/volume_rendering/grid_partitioner.py	Wed Oct 19 13:02:19 2011 -0400
+++ b/yt/visualization/volume_rendering/grid_partitioner.py	Wed Oct 19 13:05:26 2011 -0400
@@ -100,7 +100,7 @@
                                       " not yet supported")
         if self.bricks is not None and source is None: return
         bricks = []
-        self._preload(self.source._grids, self.fields, self.pf.h.io)
+        self.comm.preload(self.source._grids, self.fields, self.pf.h.io)
         pbar = get_pbar("Partitioning ", len(self.source._grids))
         for i, g in enumerate(self.source._grids):
             pbar.update(i)


http://bitbucket.org/yt_analysis/yt/changeset/fe1304bb98db/
changeset:   fe1304bb98db
branch:      yt
user:        samskillman
date:        2011-10-19 19:10:10
summary:     Up to mpi_info_dict
affected #:  7 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 13:05:26 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 13:10:10 2011 -0400
@@ -426,7 +426,7 @@
         """
         if self.max_dens_point is not None:
             return self.max_dens_point[0]
-        max = self._mpi_allreduce(self._max_dens[self.id][0], op='max')
+        max = self.comm.mpi_allreduce(self._max_dens[self.id][0], op='max')
         return max
 
     def maximum_density_location(self):
@@ -450,7 +450,7 @@
         else:
             value = na.array([0,0,0])
         # This works, and isn't appropriate but for now will be fine...
-        value = self._mpi_allreduce(value, op='sum')
+        value = self.comm.mpi_allreduce(value, op='sum')
         return value
 
     def center_of_mass(self):
@@ -479,8 +479,8 @@
         else:
             my_mass = 0.
             my_com = na.array([0.,0.,0.])
-        global_mass = self._mpi_allreduce(my_mass, op='sum')
-        global_com = self._mpi_allreduce(my_com, op='sum')
+        global_mass = self.comm.mpi_allreduce(my_mass, op='sum')
+        global_com = self.comm.mpi_allreduce(my_com, op='sum')
         return global_com / global_mass
 
     def total_mass(self):
@@ -499,7 +499,7 @@
             my_mass = self["ParticleMassMsun"].sum()
         else:
             my_mass = 0.
-        global_mass = self._mpi_allreduce(float(my_mass), op='sum')
+        global_mass = self.comm.mpi_allreduce(float(my_mass), op='sum')
         return global_mass
 
     def bulk_velocity(self):
@@ -528,7 +528,7 @@
             vy = 0.
             vz = 0.
         bv = na.array([vx,vy,vz,pm])
-        global_bv = self._mpi_allreduce(bv, op='sum')
+        global_bv = self.comm.mpi_allreduce(bv, op='sum')
         return global_bv[:3]/global_bv[3]
 
     def rms_velocity(self):
@@ -558,7 +558,7 @@
             ss = na.array([s, float(size)])
         else:
             ss = na.array([0.,0.])
-        global_ss = self._mpi_allreduce(ss, op='sum')
+        global_ss = self.comm.mpi_allreduce(ss, op='sum')
         ms = global_ss[0] / global_ss[1]
         return na.sqrt(ms) * global_ss[1]
 
@@ -598,7 +598,7 @@
             
         else:
             my_max = 0.
-        return self._mpi_allreduce(my_max, op='max')
+        return self.comm.mpi_allreduce(my_max, op='max')
 
     def get_size(self):
         if self.size is not None:
@@ -607,7 +607,7 @@
             my_size = self.indices.size
         else:
             my_size = 0
-        global_size = self._mpi_allreduce(my_size, op='sum')
+        global_size = self.comm.mpi_allreduce(my_size, op='sum')
         return global_size
 
     def __getitem__(self, key):
@@ -736,8 +736,8 @@
             dist_max = 0.0
         # In this parallel case, we're going to find the global dist extrema
         # and built identical bins on all tasks.
-        dist_min = self._mpi_allreduce(dist_min, op='min')
-        dist_max = self._mpi_allreduce(dist_max, op='max')
+        dist_min = self.comm.mpi_allreduce(dist_min, op='min')
+        dist_max = self.comm.mpi_allreduce(dist_max, op='max')
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
         self.radial_bins = na.logspace(math.log10(dist_min*.99 + TINY), 
@@ -752,7 +752,7 @@
             for i in xrange(self.bin_count):
                 self.mass_bins[i+1] += self.mass_bins[i]
         # Sum up the mass_bins globally
-        self.mass_bins = self._mpi_allreduce(self.mass_bins, op='sum')
+        self.mass_bins = self.comm.mpi_allreduce(self.mass_bins, op='sum')
         # Calculate the over densities in the bins.
         self.overdensity = self.mass_bins * Msun2g / \
         (4./3. * math.pi * rho_crit * \
@@ -1480,7 +1480,7 @@
             del diff_subchain
         # Bring it together, and divide by the previously computed total mass
         # of each halo.
-        self.bulk_vel = self._mpi_allreduce(self.bulk_vel, op='sum')
+        self.bulk_vel = self.comm.mpi_allreduce(self.bulk_vel, op='sum')
         for groupID in xrange(self.group_count):
             self.bulk_vel[groupID] = self.bulk_vel[groupID] / self.Tot_M[groupID]
         yt_counters("bulk vel. computing")
@@ -1502,7 +1502,7 @@
                 rms_vel_temp[u][1] = marks[i+1] - marks[i]
             del vel, marks, uniq_subchain
         # Bring it together.
-        rms_vel_temp = self._mpi_allreduce(rms_vel_temp, op='sum')
+        rms_vel_temp = self.comm.mpi_allreduce(rms_vel_temp, op='sum')
         self.rms_vel = na.empty(self.group_count, dtype='float64')
         for groupID in xrange(self.group_count):
             # Here we do the Mean and the Root.
@@ -1936,7 +1936,7 @@
                 (str(self.padding), avg_spacing, full_vol, data.size, str(self._data_source)))
         # Now we get the full box mass after we have the final composition of
         # subvolumes.
-        total_mass = self._mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(), 
+        total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(), 
                                          op='sum')
         if not self._distributed:
             self.padding = (na.zeros(3,dtype='float64'), na.zeros(3,dtype='float64'))
@@ -1959,7 +1959,7 @@
         # Read in a random subset of the points in each domain, and then
         # collect them on the root task.
         xp = self._data_source["particle_position_x"]
-        n_parts = self._mpi_allreduce(xp.size, op='sum')
+        n_parts = self.comm.mpi_allreduce(xp.size, op='sum')
         local_parts = xp.size
         random_points = int(self.sample * n_parts)
         # We want to get a representative selection of random particles in
@@ -2113,9 +2113,9 @@
         if dm_only:
             select = self._get_dm_indices()
             total_mass = \
-                self._mpi_allreduce((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
+                self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
         else:
-            total_mass = self._mpi_allreduce(self._data_source["ParticleMassMsun"].sum(dtype='float64'), op='sum')
+            total_mass = self.comm.mpi_allreduce(self._data_source["ParticleMassMsun"].sum(dtype='float64'), op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles
         # are all on different processors, we should instead construct an
         # object representing the entire domain and sum it "lazily" with
@@ -2195,7 +2195,7 @@
             self.comm.partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         if link > 0.0:
-            n_parts = self._mpi_allreduce(self._data_source["particle_position_x"].size, op='sum')
+            n_parts = self.comm.mpi_allreduce(self._data_source["particle_position_x"].size, op='sum')
             # get the average spacing between particles
             #l = pf.domain_right_edge - pf.domain_left_edge
             #vol = l[0] * l[1] * l[2]


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 13:05:26 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 13:10:10 2011 -0400
@@ -285,19 +285,19 @@
         yt_counters("MPI stuff.")
         hooks = []
         for opp_neighbor in self.neighbors:
-            hooks.append(self._mpi_nonblocking_recv(recv_real_indices[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_nonblocking_recv(recv_points[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_nonblocking_recv(recv_mass[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(recv_real_indices[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(recv_points[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(recv_mass[opp_neighbor], opp_neighbor))
         # Let's wait here to be absolutely sure that all the receive buffers
         # have been created before any sending happens!
         self.comm.barrier()
         # Now we send the data.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_nonblocking_send(send_real_indices[neighbor], neighbor))
-            hooks.append(self._mpi_nonblocking_send(send_points[neighbor], neighbor))
-            hooks.append(self._mpi_nonblocking_send(send_mass[neighbor], neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(send_real_indices[neighbor], neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(send_points[neighbor], neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(send_mass[neighbor], neighbor))
         # Now we use the data, after all the comms are done.
-        self._mpi_Request_Waitall(hooks)
+        self.comm.mpi_Request_Waitall(hooks)
         yt_counters("MPI stuff.")
         yt_counters("Processing padded data.")
         del send_real_indices, send_points, send_mass
@@ -778,16 +778,16 @@
         # Set up the receives, but don't actually use them.
         hooks = []
         for opp_neighbor in self.neighbors:
-            hooks.append(self._mpi_nonblocking_recv(temp_indices[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_nonblocking_recv(temp_chainIDs[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(temp_indices[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(temp_chainIDs[opp_neighbor], opp_neighbor))
         # Make sure all the receive buffers are set before continuing.
         self.comm.barrier()
         # Send padded particles to our neighbors.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_nonblocking_send(self.uphill_real_indices, neighbor))
-            hooks.append(self._mpi_nonblocking_send(self.uphill_chainIDs, neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(self.uphill_real_indices, neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(self.uphill_chainIDs, neighbor))
         # Now actually use the data once it's good to go.
-        self._mpi_Request_Waitall(hooks)
+        self.comm.mpi_Request_Waitall(hooks)
         self.__max_memory()
         so_far = 0
         for opp_neighbor in self.neighbors:
@@ -883,7 +883,7 @@
         # it. Therefore each key (a chain) in this dict is unique, but the items
         # the keys point to are not necessarily unique.
         chainID_translate_map_global = \
-            self._mpi_allreduce(chainID_translate_map_local, op='min')
+            self.comm.mpi_allreduce(chainID_translate_map_local, op='min')
         # Loop over chains, smallest to largest density, recursively until
         # we reach a self-assigned chain. Then we assign that final chainID to
         # the *current* one only.
@@ -949,16 +949,16 @@
         # Set up the receving hooks.
         hooks = []
         for opp_neighbor in self.neighbors:
-            hooks.append(self._mpi_nonblocking_recv(recv_real_indices[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_nonblocking_recv(recv_chainIDs[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(recv_real_indices[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(recv_chainIDs[opp_neighbor], opp_neighbor))
         # Make sure the recv buffers are set before continuing.
         self.comm.barrier()
         # Now we send them.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_nonblocking_send(real_indices, neighbor))
-            hooks.append(self._mpi_nonblocking_send(chainIDs, neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(real_indices, neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(chainIDs, neighbor))
         # Now we use them when they're nice and ripe.
-        self._mpi_Request_Waitall(hooks)
+        self.comm.mpi_Request_Waitall(hooks)
         self.__max_memory()
         for opp_neighbor in self.neighbors:
             opp_size = global_annulus_count[opp_neighbor]
@@ -1208,7 +1208,7 @@
         del Set_list
         # To bring it all together, find the minimum values at each entry
         # globally.
-        lookup = self._mpi_allreduce(lookup, op='min')
+        lookup = self.comm.mpi_allreduce(lookup, op='min')
         # Now apply this to reverse_map
         for chainID,groupID in enumerate(self.reverse_map):
             if groupID == -1:
@@ -1336,7 +1336,7 @@
         # Now we broadcast this, effectively, with an allsum. Even though
         # some groups are on multiple tasks, there is only one densest_in_chain
         # and only that task contributed above.
-        self.max_dens_point = self._mpi_allreduce(max_dens_point, op='sum')
+        self.max_dens_point = self.comm.mpi_allreduce(max_dens_point, op='sum')
         del max_dens_point
         yt_counters("max dens point")
         # Now CoM.
@@ -1391,9 +1391,9 @@
                     CoM_M[groupID] += self.max_dens_point[groupID,1:4] - na.array([0.5,0.5,0.5])
                     CoM_M[groupID] *= Tot_M[groupID]
         # Now we find their global values
-        self.group_sizes = self._mpi_allreduce(size, op='sum')
-        CoM_M = self._mpi_allreduce(CoM_M, op='sum')
-        self.Tot_M = self._mpi_allreduce(Tot_M, op='sum')
+        self.group_sizes = self.comm.mpi_allreduce(size, op='sum')
+        CoM_M = self.comm.mpi_allreduce(CoM_M, op='sum')
+        self.Tot_M = self.comm.mpi_allreduce(Tot_M, op='sum')
         self.CoM = na.empty((self.group_count,3), dtype='float64')
         for groupID in xrange(int(self.group_count)):
             self.CoM[groupID] = CoM_M[groupID] / self.Tot_M[groupID]
@@ -1411,7 +1411,7 @@
                 max_radius[u] = na.max(dist[marks[i]:marks[i+1]])
         # Find the maximum across all tasks.
         mylog.info('Fraction of particles in this region in groups: %f' % (float(calc)/self.size))
-        self.max_radius = self._mpi_allreduce(max_radius, op='max')
+        self.max_radius = self.comm.mpi_allreduce(max_radius, op='max')
         self.max_radius = na.sqrt(self.max_radius)
         yt_counters("max radius")
         yt_counters("Precomp.")


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Wed Oct 19 13:05:26 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Wed Oct 19 13:10:10 2011 -0400
@@ -274,8 +274,8 @@
                 self._setup_recv_arrays()
                 self._send_arrays()
                 t0 = time.time()
-                self._mpi_Request_Waitall(self.send_hooks)
-                self._mpi_Request_Waitall(self.recv_hooks)
+                self.comm.mpi_Request_Waitall(self.send_hooks)
+                self.comm.mpi_Request_Waitall(self.recv_hooks)
                 t1 = time.time()
                 t_waiting += (t1-t0)
                 if (self.recv_points < -1.).any() or (self.recv_points > 1.).any(): # or \
@@ -364,7 +364,7 @@
         for task in xrange(self.size):
             if task == self.mine: continue
             self.recv_done[task] = na.zeros(1, dtype='int64')
-            self.done_hooks.append(self._mpi_nonblocking_recv(self.recv_done[task], \
+            self.done_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_done[task], \
                 task, tag=15))
     
     def _send_done_to_root(self):
@@ -377,7 +377,7 @@
             # I send when I *think* things should finish.
             self.send_done = na.ones(1, dtype='int64') * \
                 (self.size / self.vol_ratio -1) + self.comm_cycle_count
-            self.done_hooks.append(self._mpi_nonblocking_send(self.send_done, \
+            self.done_hooks.append(self.comm.mpi_nonblocking_send(self.send_done, \
                     0, tag=15))
         else:
             # As root, I need to mark myself!
@@ -391,7 +391,7 @@
         """
         if self.mine == 0:
             # If other tasks aren't finished, this will return False.
-            status = self._mpi_Request_Testall(self.done_hooks)
+            status = self.comm.mpi_Request_Testall(self.done_hooks)
             # Convolve this with with root's status.
             status = status * (self.generated_points == self.total_values)
             if status == 1:
@@ -419,22 +419,22 @@
         self.recv_fields_vals = na.zeros((self.comm_size, len(self.fields)*2), \
             dtype='float64')
         self.recv_gen_array = na.zeros(self.size, dtype='int64')
-        self.recv_hooks.append(self._mpi_nonblocking_recv(self.recv_points, \
+        self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_points, \
             (self.mine-1)%self.size, tag=10))
-        self.recv_hooks.append(self._mpi_nonblocking_recv(self.recv_fields_vals, \
+        self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_fields_vals, \
             (self.mine-1)%self.size, tag=20))
-        self.recv_hooks.append(self._mpi_nonblocking_recv(self.recv_gen_array, \
+        self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_gen_array, \
             (self.mine-1)%self.size, tag=40))
 
     def _send_arrays(self):
         """
         Send the data arrays to the right-hand neighbor.
         """
-        self.send_hooks.append(self._mpi_nonblocking_send(self.points,\
+        self.send_hooks.append(self.comm.mpi_nonblocking_send(self.points,\
             (self.mine+1)%self.size, tag=10))
-        self.send_hooks.append(self._mpi_nonblocking_send(self.fields_vals,\
+        self.send_hooks.append(self.comm.mpi_nonblocking_send(self.fields_vals,\
             (self.mine+1)%self.size, tag=20))
-        self.send_hooks.append(self._mpi_nonblocking_send(self.gen_array, \
+        self.send_hooks.append(self.comm.mpi_nonblocking_send(self.gen_array, \
             (self.mine+1)%self.size, tag=40))
 
     def _allsum_bin_hits(self):
@@ -442,8 +442,8 @@
         Add up the hits to all the bins globally for all functions.
         """
         for fset in self._fsets:
-            fset.too_low = self._mpi_allreduce(fset.too_low, op='sum')
-            fset.too_high = self._mpi_allreduce(fset.too_high, op='sum')
+            fset.too_low = self.comm.mpi_allreduce(fset.too_low, op='sum')
+            fset.too_high = self.comm.mpi_allreduce(fset.too_high, op='sum')
             fset.binned = {}
             if self.mine == 0:
                 mylog.info("Function %s had values out of range for these fields:" % \
@@ -453,7 +453,7 @@
                     (field, fset.too_high[i], fset.too_low[i]))
             for length in self.lengths:
                 fset.length_bin_hits[length] = \
-                    self._mpi_allreduce(fset.length_bin_hits[length], op='sum')
+                    self.comm.mpi_allreduce(fset.length_bin_hits[length], op='sum')
                 # Find out how many were successfully binned.
                 fset.binned[length] = fset.length_bin_hits[length].sum()
                 # Normalize the counts.


--- a/yt/data_objects/data_containers.py	Wed Oct 19 13:05:26 2011 -0400
+++ b/yt/data_objects/data_containers.py	Wed Oct 19 13:10:10 2011 -0400
@@ -1441,7 +1441,7 @@
             self[field] = na.zeros(_size, dtype='float64')
             for grid in self._get_grids():
                 self._get_data_from_grid(grid, field)
-            self[field] = self._mpi_allreduce(\
+            self[field] = self.comm.mpi_allreduce(\
                 self[field], op='sum').reshape([self.dims]*2).transpose()
 
     def interpolate_discretize(self, *args, **kwargs):
@@ -2230,7 +2230,7 @@
             self._get_data_from_grid(grid, fields_to_get, dls)
         mylog.info("IO completed; summing")
         for field in fields_to_get:
-            self[field] = self._mpi_allreduce(self[field], op='sum')
+            self[field] = self.comm.mpi_allreduce(self[field], op='sum')
             conv = self.pf.units[self.pf.field_info[field].projection_conversion]
             self[field] *= conv
 


--- a/yt/data_objects/profiles.py	Wed Oct 19 13:05:26 2011 -0400
+++ b/yt/data_objects/profiles.py	Wed Oct 19 13:10:10 2011 -0400
@@ -121,10 +121,10 @@
 
     def _finalize_parallel(self):
         for key in self.__data:
-            self.__data[key] = self._mpi_allreduce(self.__data[key], op='sum')
+            self.__data[key] = self.comm.mpi_allreduce(self.__data[key], op='sum')
         for key in self.__weight_data:
-            self.__weight_data[key] = self._mpi_allreduce(self.__weight_data[key], op='sum')
-        self.__used = self._mpi_allreduce(self.__used, op='sum')
+            self.__weight_data[key] = self.comm.mpi_allreduce(self.__weight_data[key], op='sum')
+        self.__used = self.comm.mpi_allreduce(self.__used, op='sum')
 
     def _unlazy_add_fields(self, fields, weight, accumulation):
         for field in fields:


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:05:26 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:10:10 2011 -0400
@@ -638,7 +638,7 @@
         io_handler.preload(grids, fields)
 
     @parallel_passthrough
-    def _mpi_allreduce(self, data, dtype=None, op='sum'):
+    def mpi_allreduce(self, data, dtype=None, op='sum'):
         op = op_names[op]
         if isinstance(data, na.ndarray) and data.dtype != na.bool:
             if dtype is None:
@@ -658,28 +658,28 @@
     # Non-blocking stuff.
     ###
 
-    def _mpi_nonblocking_recv(self, data, source, tag=0, dtype=None):
+    def mpi_nonblocking_recv(self, data, source, tag=0, dtype=None):
         if not self._distributed: return -1
         if dtype is None: dtype = data.dtype
         mpi_type = get_mpi_type(dtype)
         return self.comm.Irecv([data, mpi_type], source, tag)
 
-    def _mpi_nonblocking_send(self, data, dest, tag=0, dtype=None):
+    def mpi_nonblocking_send(self, data, dest, tag=0, dtype=None):
         if not self._distributed: return -1
         if dtype is None: dtype = data.dtype
         mpi_type = get_mpi_type(dtype)
         return self.comm.Isend([data, mpi_type], dest, tag)
 
-    def _mpi_Request_Waitall(self, hooks):
+    def mpi_Request_Waitall(self, hooks):
         if not self._distributed: return
         MPI.Request.Waitall(hooks)
 
-    def _mpi_Request_Waititer(self, hooks):
+    def mpi_Request_Waititer(self, hooks):
         for i in xrange(len(hooks)):
             req = MPI.Request.Waitany(hooks)
             yield req
 
-    def _mpi_Request_Testall(self, hooks):
+    def mpi_Request_Testall(self, hooks):
         """
         This returns False if any of the request hooks are un-finished,
         and True if they are all finished.


--- a/yt/visualization/streamlines.py	Wed Oct 19 13:05:26 2011 -0400
+++ b/yt/visualization/streamlines.py	Wed Oct 19 13:10:10 2011 -0400
@@ -145,8 +145,8 @@
        
     @parallel_passthrough
     def _finalize_parallel(self,data):
-        self.streamlines = self._mpi_allreduce(self.streamlines, op='sum')
-        self.magnitudes = self._mpi_allreduce(self.magnitudes, op='sum')
+        self.streamlines = self.comm.mpi_allreduce(self.streamlines, op='sum')
+        self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
         
     def _integrate_through_brick(self, node, stream, step,
                                  periodic=False, mag=None):


http://bitbucket.org/yt_analysis/yt/changeset/d89e2079ffdb/
changeset:   d89e2079ffdb
branch:      yt
user:        samskillman
date:        2011-10-19 19:13:14
summary:     Up to write_on_root.
affected #:  8 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 13:10:10 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 13:13:14 2011 -0400
@@ -1548,7 +1548,7 @@
                     bulk_vel=self.bulk_vel[index], tasks=self.halo_taskmap[index],
                     rms_vel=self.rms_vel[index])
                 # I don't own this halo
-                self._do_not_claim_object(self._groups[index])
+                self.comm.do_not_claim_object(self._groups[index])
                 self._max_dens[index] = [self.max_dens_point[index][0], self.max_dens_point[index][1], \
                     self.max_dens_point[index][2], self.max_dens_point[index][3]]
                 index += 1
@@ -1561,7 +1561,7 @@
                 bulk_vel=self.bulk_vel[i], tasks=self.halo_taskmap[index],
                 rms_vel=self.rms_vel[i])
             # This halo may be owned by many, including this task
-            self._claim_object(self._groups[index])
+            self.comm.claim_object(self._groups[index])
             self._max_dens[index] = [self.max_dens_point[i][0], self.max_dens_point[i][1], \
                 self.max_dens_point[i][2], self.max_dens_point[i][3]]
             cp += counts[i+1]
@@ -1574,7 +1574,7 @@
                 group_total_mass=self.Tot_M[index], max_radius=self.max_radius[index],
                 bulk_vel=self.bulk_vel[index], tasks=self.halo_taskmap[index],
                 rms_vel=self.rms_vel[index])
-            self._do_not_claim_object(self._groups[index])
+            self.comm.do_not_claim_object(self._groups[index])
             self._max_dens[index] = [self.max_dens_point[index][0], self.max_dens_point[index][1], \
                 self.max_dens_point[index][2], self.max_dens_point[index][3]]
             index += 1
@@ -1627,7 +1627,7 @@
                 max_dens[hi] = [max_dens_temp] + list(self._max_dens[halo.id])[1:4]
                 groups.append(self._halo_class(self, hi))
                 groups[-1].indices = halo.indices
-                self._claim_object(groups[-1])
+                self.comm.claim_object(groups[-1])
                 hi += 1
         del self._groups, self._max_dens # explicit >> implicit
         self._groups = groups
@@ -1640,7 +1640,7 @@
         # about processors and ownership and so forth.
         # _mpi_info_dict returns a dict of {proc: whatever} where whatever is
         # what is fed in on each proc.
-        mine, halo_info = self._mpi_info_dict(len(self))
+        mine, halo_info = self.comm.mpi_info_dict(len(self))
         nhalos = sum(halo_info.values())
         # Figure out our offset
         my_first_id = sum([v for k,v in halo_info.items() if k < mine])
@@ -1703,7 +1703,7 @@
         --------
         >>> halos.write_out("HopAnalysis.out")
         """
-        f = self._write_on_root(filename)
+        f = self.comm.write_on_root(filename)
         HaloList.write_out(self, f)
 
     def write_particle_lists_txt(self, prefix):
@@ -1722,7 +1722,7 @@
         --------
         >>> halos.write_particle_lists_txt("halo-parts")
         """
-        f = self._write_on_root("%s.txt" % prefix)
+        f = self.comm.write_on_root("%s.txt" % prefix)
         HaloList.write_particle_lists_txt(self, prefix, fp=f)
 
     @parallel_blocking_call
@@ -1980,7 +1980,7 @@
         self._data_source.clear_data()
         del uni
         # Collect them on the root task.
-        mine, sizes = self._mpi_info_dict(n_random)
+        mine, sizes = self.comm.mpi_info_dict(n_random)
         if mine == 0:
             tot_random = sum(sizes.values())
             root_points = na.empty((tot_random, 3), dtype='float64')


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 13:10:10 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 13:13:14 2011 -0400
@@ -75,7 +75,7 @@
         tasks are our geometric neighbors.
         """
         self.neighbors = set([])
-        self.mine, global_bounds = self._mpi_info_dict(self.bounds)
+        self.mine, global_bounds = self.comm.mpi_info_dict(self.bounds)
         my_LE, my_RE = self.bounds
         # Put the vertices into a big list, each row is
         # array[x,y,z, taskID]
@@ -199,7 +199,7 @@
         # lists us as their neighbor, we add them as our neighbor. This is 
         # probably not needed because the stuff above should be symmetric,
         # but it isn't a big issue.
-        self.mine, global_neighbors = self._mpi_info_dict(self.neighbors)
+        self.mine, global_neighbors = self.comm.mpi_info_dict(self.neighbors)
         for taskID in global_neighbors:
             if taskID == self.mine: continue
             if self.mine in global_neighbors[taskID]:
@@ -216,7 +216,7 @@
         """
         if round == 'first':
             max_pad = na.max(self.padding)
-            self.mine, self.global_padding = self._mpi_info_dict(max_pad)
+            self.mine, self.global_padding = self.comm.mpi_info_dict(max_pad)
             self.max_padding = max(self.global_padding.itervalues())
         elif round == 'second':
             self.max_padding = 0.
@@ -235,7 +235,7 @@
         temp_LE = LE - LE_padding
         temp_RE = RE + RE_padding
         expanded_bounds = (temp_LE, temp_RE)
-        self.mine, global_exp_bounds = self._mpi_info_dict(expanded_bounds)
+        self.mine, global_exp_bounds = self.comm.mpi_info_dict(expanded_bounds)
         send_real_indices = {}
         send_points = {}
         send_mass = {}
@@ -266,7 +266,7 @@
         del points, shift_points, mass, real_indices
         yt_counters("Picking padding data to send.")
         # Communicate the sizes to send.
-        self.mine, global_send_count = self._mpi_info_dict(send_size)
+        self.mine, global_send_count = self.comm.mpi_info_dict(send_size)
         del send_size
         # Initialize the arrays to receive data.
         yt_counters("Initalizing recv arrays.")
@@ -665,7 +665,7 @@
         """
         yt_counters("globally_assign_chainIDs")
         # First find out the number of chains on each processor.
-        self.mine, chain_info = self._mpi_info_dict(chain_count)
+        self.mine, chain_info = self.comm.mpi_info_dict(chain_count)
         self.nchains = sum(chain_info.values())
         # Figure out our offset.
         self.my_first_id = sum([v for k,v in chain_info.iteritems() if k < self.mine])
@@ -1483,7 +1483,7 @@
         self.density = self.density[:self.real_size]
         # We'll make this a global object, which can be used to write a text
         # file giving the names of hdf5 files the particles for each halo.
-        self.mine, self.I_own = self._mpi_info_dict(self.I_own)
+        self.mine, self.I_own = self.comm.mpi_info_dict(self.I_own)
         self.halo_taskmap = defaultdict(set)
         for taskID in self.I_own:
             for groupID in self.I_own[taskID]:


--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py	Wed Oct 19 13:10:10 2011 -0400
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py	Wed Oct 19 13:13:14 2011 -0400
@@ -148,7 +148,7 @@
         # First the fit file.
         if fit:
             fitname = prefix + '-fit.dat'
-            fp = self._write_on_root(fitname)
+            fp = self.comm.write_on_root(fitname)
             line = \
             """#Columns:
 #1. log10 of mass (Msolar, NOT Msolar/h)
@@ -164,7 +164,7 @@
             fp.close()
         if self.mode == 'haloes' and haloes:
             haloname = prefix + '-haloes.dat'
-            fp = self._write_on_root(haloname)
+            fp = self.comm.write_on_root(haloname)
             line = \
             """#Columns:
 #1. log10 of mass (Msolar, NOT Msolar/h)


--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py	Wed Oct 19 13:10:10 2011 -0400
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py	Wed Oct 19 13:13:14 2011 -0400
@@ -306,7 +306,7 @@
                 file = open(self.database)
             hash = md5.md5(file.read()).hexdigest()
             file.close()
-            ignore, hashes = self._mpi_info_dict(hash)
+            ignore, hashes = self.comm.mpi_info_dict(hash)
             hashes = set(hashes.values())
             if len(hashes) == 1:
                 break


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Wed Oct 19 13:10:10 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Wed Oct 19 13:13:14 2011 -0400
@@ -622,7 +622,7 @@
         >>> tpf.write_out_means()
         """
         for fset in self._fsets:
-            fp = self._write_on_root(fn % fset.function.__name__)
+            fp = self.comm.write_on_root(fn % fset.function.__name__)
             fset._avg_bin_hits()
             line = "# length".ljust(sep)
             line += "count".ljust(sep)
@@ -690,7 +690,7 @@
         for fset in self._fsets:
             # Only operate on correlation functions.
             if fset.corr_norm == None: continue
-            fp = self._write_on_root("%s_correlation.txt" % fset.function.__name__)
+            fp = self.comm.write_on_root("%s_correlation.txt" % fset.function.__name__)
             line = "# length".ljust(sep)
             line += "\\xi".ljust(sep)
             fp.write(line + "\n")


--- a/yt/data_objects/data_containers.py	Wed Oct 19 13:10:10 2011 -0400
+++ b/yt/data_objects/data_containers.py	Wed Oct 19 13:13:14 2011 -0400
@@ -1613,16 +1613,16 @@
         if self.preload_style == 'all':
             print "Preloading %s grids and getting %s" % (
                     len(self.source._get_grid_objs()),
-                    self._get_dependencies(fields))
+                    self.comm.get_dependencies(fields))
             self.comm.preload([g for g in self._get_grid_objs()],
-                          self._get_dependencies(fields), self.hierarchy.io)
+                          self.comm.get_dependencies(fields), self.hierarchy.io)
         # By changing the remove-from-tree method to accumulate, we can avoid
         # having to do this by level, and instead do it by CPU file
         for level in range(0, self._max_level+1):
             if self.preload_style == 'level':
                 self.comm.preload([g for g in self._get_grid_objs()
                                  if g.Level == level],
-                              self._get_dependencies(fields), self.hierarchy.io)
+                              self.comm.get_dependencies(fields), self.hierarchy.io)
             self._add_level_to_tree(tree, level, fields)
             mylog.debug("End of projecting level level %s, memory usage %0.3e", 
                         level, get_memory_usage()/1024.)
@@ -2002,13 +2002,13 @@
         # _project_level, then it would be more memory conservative
         if self.preload_style == 'all':
             print "Preloading %s grids and getting %s" % (
-                    len(self.source._grids), self._get_dependencies(fields))
+                    len(self.source._grids), self.comm.get_dependencies(fields))
             self.comm.preload(self.source._grids,
-                          self._get_dependencies(fields), self.hierarchy.io)
+                          self.comm.get_dependencies(fields), self.hierarchy.io)
         for level in range(0, self._max_level+1):
             if self.preload_style == 'level':
                 self.comm.preload(self.source.select_grids(level),
-                              self._get_dependencies(fields), self.hierarchy.io)
+                              self.comm.get_dependencies(fields), self.hierarchy.io)
             self.__calculate_overlap(level)
             my_coords, my_pdx, my_pdy, my_fields = \
                 self.__project_level(level, fields)


--- a/yt/data_objects/profiles.py	Wed Oct 19 13:10:10 2011 -0400
+++ b/yt/data_objects/profiles.py	Wed Oct 19 13:13:14 2011 -0400
@@ -81,7 +81,7 @@
 
     def _initialize_parallel(self, fields):
         g_objs = [g for g in self._get_grid_objs()]
-        self.comm.preload(g_objs, self._get_dependencies(fields),
+        self.comm.preload(g_objs, self.comm.get_dependencies(fields),
                       self._data_source.hierarchy.io)
 
     def _lazy_add_fields(self, fields, weight, accumulation):


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:10:10 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:13:14 2011 -0400
@@ -471,7 +471,7 @@
 
     def mpi_exit_test(self, data=False):
         # data==True -> exit. data==False -> no exit
-        mine, statuses = self._mpi_info_dict(data)
+        mine, statuses = self.comm.mpi_info_dict(data)
         if True in statuses.values():
             raise RuntimeError("Fatal error. Exiting.")
         return None
@@ -705,7 +705,7 @@
         if not self._distributed: return 0
         return self.comm.rank
 
-    def _mpi_info_dict(self, info):
+    def mpi_info_dict(self, info):
         if not self._distributed: return 0, {0:info}
         self.comm.barrier()
         data = None
@@ -720,24 +720,24 @@
         self.comm.barrier()
         return self.comm.rank, data
 
-    def _get_dependencies(self, fields):
+    def get_dependencies(self, fields):
         deps = []
         fi = self.pf.field_info
         for field in fields:
             deps += ensure_list(fi[field].get_dependencies(pf=self.pf).requested)
         return list(set(deps))
 
-    def _claim_object(self, obj):
+    def claim_object(self, obj):
         if not self._distributed: return
         obj._owner = self.comm.rank
         obj._distributed = True
 
-    def _do_not_claim_object(self, obj):
+    def do_not_claim_object(self, obj):
         if not self._distributed: return
         obj._owner = -1
         obj._distributed = True
 
-    def _write_on_root(self, fn):
+    def write_on_root(self, fn):
         if not self._distributed: return open(fn, "w")
         if self.comm.rank == 0:
             return open(fn, "w")


http://bitbucket.org/yt_analysis/yt/changeset/0493a0e200b3/
changeset:   0493a0e200b3
branch:      yt
user:        samskillman
date:        2011-10-19 19:16:51
summary:     getting rid of explict calls to what the parallel setup is.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 13:13:14 2011 -0400
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 13:16:51 2011 -0400
@@ -35,8 +35,6 @@
 from yt.config import ytcfg
 from time import time
 import h5py
-my_rank = ytcfg.getint("yt", "__parallel_rank")
-nprocs = ytcfg.getint("yt", "__parallel_size")
 
 def corner_bounds(split_dim, split, current_left = None, current_right = None):
     r"""
@@ -293,7 +291,7 @@
         self.pf = pf
         self.sdx = self.pf.h.get_smallest_dx()
         self._id_offset = pf.h.grids[0]._id_offset
-        if nprocs > len(pf.h.grids):
+        if self.comm.size > len(pf.h.grids):
             mylog.info('Parallel rendering requires that the number of \n \
             grids in the dataset is greater or equal to the number of \n \
             processors.  Reduce number of processors.')
@@ -379,7 +377,7 @@
 
         # If the full amr kD-tree is requested, merge the results from
         # the parallel build.
-        if merge_trees and nprocs > 1:
+        if merge_trees and self.comm.size > 1:
             self.join_parallel_trees()            
             self.my_l_corner = self.domain_left_edge
             self.my_r_corner = self.domain_right_edge
@@ -752,11 +750,11 @@
         self.rebuild_references()
                 
     def trim_references(self):
-        par_tree_depth = long(na.log2(nprocs))
-        for i in range(2**nprocs):
+        par_tree_depth = long(na.log2(self.comm.size))
+        for i in range(2**self.comm.size):
             if ((i + 1)>>par_tree_depth) == 1:
-                # There are nprocs nodes that meet this criteria
-                if (i+1-nprocs) != my_rank:
+                # There are self.comm.size nodes that meet this criteria
+                if (i+1-self.comm.size) != self.comm.rank:
                     self.tree_dict.pop(i)
                     continue
         for node in self.tree_dict.itervalues():
@@ -989,9 +987,9 @@
         current_node.grids = grids
         current_node.l_corner = l_corner
         current_node.r_corner = r_corner
-        # current_node.owner = my_rank
+        # current_node.owner = self.comm.rank
         current_node.id = 0
-        par_tree_depth = int(na.log2(nprocs))
+        par_tree_depth = int(na.log2(self.comm.size))
         anprocs = 2**par_tree_depth
         while current_node is not None:
             # If we don't have any grids, that means we are revisiting
@@ -1004,7 +1002,7 @@
             # This is where all the domain decomposition occurs.  
             if ((current_node.id + 1)>>par_tree_depth) == 1:
                 # There are anprocs nodes that meet this criteria
-                if (current_node.id+1-anprocs) == my_rank:
+                if (current_node.id+1-anprocs) == self.comm.rank:
                     # I own this shared node
                     self.my_l_corner = current_node.l_corner
                     self.my_r_corner = current_node.r_corner
@@ -1143,12 +1141,12 @@
     def reduce_tree_images(self, tree, viewpoint, image=None):
         if image is not None:
             self.image = image
-        rounds = int(na.log2(nprocs))
+        rounds = int(na.log2(self.comm.size))
         anprocs = 2**rounds
         my_node = tree
         my_node_id = 0
         my_node.owner = 0
-        path = na.binary_repr(anprocs+my_rank)
+        path = na.binary_repr(anprocs+self.comm.rank)
         for i in range(rounds):
             try:
                 my_node.left_child.owner = my_node.owner
@@ -1162,7 +1160,7 @@
             except:
                 rounds = i-1
         for thisround in range(rounds,0,-1):
-            #print my_rank, 'my node', my_node_id
+            #print self.comm.rank, 'my node', my_node_id
             parent = my_node.parent
             #print parent['split_ax'], parent['split_pos']
             if viewpoint[parent.split_ax] <= parent.split_pos:
@@ -1175,9 +1173,9 @@
             # mylog.debug('front owner %i back owner %i parent owner %i'%( front.owner, back.owner, parent.owner))
                 
             # Send the images around
-            if front.owner == my_rank:
+            if front.owner == self.comm.rank:
                 if front.owner == parent.owner:
-                    mylog.debug( '%04i receiving image from %04i'%(my_rank,back.owner))
+                    mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
                     arr2 = self.comm._recv_array(back.owner, tag=back.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
                     for i in range(3):
@@ -1191,17 +1189,17 @@
                         self.image[:,:,i  ] = self.image[:,:,i  ] + ta * arr2[:,:,i  ]
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
-                    mylog.debug('%04i sending my image to %04i'%(my_rank,back.owner))
-                    self.comm._send_array(self.image.ravel(), back.owner, tag=my_rank)
+                    mylog.debug('%04i sending my image to %04i'%(self.comm.rank,back.owner))
+                    self.comm._send_array(self.image.ravel(), back.owner, tag=self.comm.rank)
 
                 
-            if back.owner == my_rank:
+            if back.owner == self.comm.rank:
                 if front.owner == parent.owner:
-                    mylog.debug('%04i sending my image to %04i'%(my_rank, front.owner))
-                    self.comm._send_array(self.image.ravel(), front.owner, tag=my_rank)
+                    mylog.debug('%04i sending my image to %04i'%(self.comm.rank, front.owner))
+                    self.comm._send_array(self.image.ravel(), front.owner, tag=self.comm.rank)
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
-                    mylog.debug('%04i receiving image from %04i'%(my_rank,front.owner))
+                    mylog.debug('%04i receiving image from %04i'%(self.comm.rank,front.owner))
                     arr2 = self.comm._recv_array(front.owner, tag=front.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
                     for i in range(3):
@@ -1216,7 +1214,7 @@
                         # image[:,:,i+3] = arr2[:,:,i+3] + ta * image[:,:,i+3]
             # Set parent owner to back owner
             # my_node = (my_node-1)>>1
-            if my_rank == my_node.parent.owner: 
+            if self.comm.rank == my_node.parent.owner: 
                 my_node = my_node.parent
             else:
                 break
@@ -1224,8 +1222,8 @@
     def store_kd_bricks(self, fn=None):
         if fn is None:
             fn = '%s_kd_bricks.h5'%self.pf
-        if my_rank != 0:
-            self.comm._recv_array(my_rank-1, tag=my_rank-1)
+        if self.comm.rank != 0:
+            self.comm._recv_array(self.comm.rank-1, tag=self.comm.rank-1)
         f = h5py.File(fn,"a")
         for node in self.depth_traverse():
             i = node.id
@@ -1237,14 +1235,14 @@
                     except:
                         pass
         f.close()
-        if my_rank != (nprocs-1):
-            self.comm._send_array([0],my_rank+1, tag=my_rank)
+        if self.comm.rank != (self.comm.size-1):
+            self.comm._send_array([0],self.comm.rank+1, tag=self.comm.rank)
         
     def load_kd_bricks(self,fn=None):
         if fn is None:
             fn = '%s_kd_bricks.h5' % self.pf
-        if my_rank != 0:
-            self.comm._recv_array(my_rank-1, tag=my_rank-1)
+        if self.comm.rank != 0:
+            self.comm._recv_array(self.comm.rank-1, tag=self.comm.rank-1)
         try:
             f = h5py.File(fn,"r")
             for node in self.depth_traverse():
@@ -1267,8 +1265,8 @@
             f.close()
         except:
             pass
-        if my_rank != (nprocs-1):
-            self.comm._send_array([0],my_rank+1, tag=my_rank)
+        if self.comm.rank != (self.comm.size-1):
+            self.comm._send_array([0],self.comm.rank+1, tag=self.comm.rank)
 
     def load_tree(self,fn):
         raise NotImplementedError()


http://bitbucket.org/yt_analysis/yt/changeset/0cfad3623a7c/
changeset:   0cfad3623a7c
branch:      yt
user:        MatthewTurk
date:        2011-10-19 19:33:46
summary:     Replacing __parallel_rank and __parallel_size with the appropriate
__[global|topcomm]__parallel_[rank|size] config options.
affected #:  8 files (-1 bytes)

--- a/yt/analysis_modules/light_cone/halo_mask.py	Wed Oct 19 13:16:51 2011 -0400
+++ b/yt/analysis_modules/light_cone/halo_mask.py	Wed Oct 19 13:33:46 2011 -0400
@@ -45,14 +45,14 @@
         light_cone_mask.append(_make_slice_mask(slice, halo_list, pixels))
 
     # Write out cube of masks from each slice.
-    if cube_file is not None and ytcfg.getint("yt", "__parallel_rank") == 0:
+    if cube_file is not None and ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
         mylog.info("Saving halo mask cube to %s." % cube_file)
         output = h5py.File(cube_file, 'a')
         output.create_dataset('haloMaskCube', data=na.array(light_cone_mask))
         output.close()
 
     # Write out final mask.
-    if mask_file is not None and ytcfg.getint("yt", "__parallel_rank") == 0:
+    if mask_file is not None and ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
         # Final mask is simply the product of the mask from each slice.
         mylog.info("Saving halo mask to %s." % mask_file)
         finalMask = na.ones(shape=(pixels, pixels))
@@ -76,7 +76,7 @@
         haloMap.extend(_make_slice_halo_map(slice, halo_list))
 
     # Write out file.
-    if ytcfg.getint("yt", "__parallel_rank") == 0:
+    if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
         mylog.info("Saving halo map to %s." % map_file)
         f = open(map_file, 'w')
         f.write("#z       x         y        M [Msun]  R [Mpc]   R [image]\n")


--- a/yt/analysis_modules/light_cone/light_cone.py	Wed Oct 19 13:16:51 2011 -0400
+++ b/yt/analysis_modules/light_cone/light_cone.py	Wed Oct 19 13:33:46 2011 -0400
@@ -108,7 +108,7 @@
         self.pixels = int(self.field_of_view_in_arcminutes * 60.0 / \
                           self.image_resolution_in_arcseconds)
 
-        if ytcfg.getint("yt", "__parallel_rank") == 0:
+        if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
             # Create output directory.
             if (os.path.exists(self.output_dir)):
                 if not(os.path.isdir(self.output_dir)):
@@ -243,7 +243,7 @@
         else:
             halo_mask_cube = light_cone_halo_mask(self, mask_file=mask_file, **kwargs)
             # Collapse cube into final mask.
-            if ytcfg.getint("yt", "__parallel_rank") == 0:
+            if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
                 self.halo_mask = na.ones(shape=(self.pixels, self.pixels), dtype=bool)
                 for mask in halo_mask_cube:
                     self.halo_mask *= mask
@@ -302,7 +302,7 @@
             output['object'].parameters.update(self.set_parameters)
             frb = _light_cone_projection(output, field, self.pixels, 
                                          weight_field=weight_field, node=node)
-            if ytcfg.getint("yt", "__parallel_rank") == 0:
+            if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
                 if save_slice_images:
                     write_image(na.log10(frb[field]), "%s_%s.png" % (name, field), cmap_name=cmap_name)
 
@@ -342,7 +342,7 @@
             if (q < len(self.light_cone_solution) - 1):
                 del output['object']
 
-        if ytcfg.getint("yt", "__parallel_rank") == 0:
+        if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
             # Add up slices to make light cone projection.
             if (weight_field is None):
                 lightConeProjection = sum(self.projection_stack)


--- a/yt/analysis_modules/light_cone/light_cone_projection.py	Wed Oct 19 13:16:51 2011 -0400
+++ b/yt/analysis_modules/light_cone/light_cone_projection.py	Wed Oct 19 13:33:46 2011 -0400
@@ -88,7 +88,7 @@
                                            field_cuts=these_field_cuts, node_name=node_name)
 
     # If parallel: all the processes have the whole projection object, but we only need to do the tiling, shifting, and cutting once.
-    if ytcfg.getint("yt", "__parallel_rank") == 0:
+    if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
 
         # 2. The Tile Problem
         # Tile projection to specified width.


--- a/yt/config.py	Wed Oct 19 13:16:51 2011 -0400
+++ b/yt/config.py	Wed Oct 19 13:33:46 2011 -0400
@@ -38,8 +38,10 @@
     inline = 'False',
     __withinreason = 'False',
     __parallel = 'False',
-    __parallel_rank = '0',
-    __parallel_size = '1',
+    __global_parallel_rank = '0',
+    __global_parallel_size = '1',
+    __topcomm_parallel_rank = '0',
+    __topcomm_parallel_size = '1',
     storeparameterfiles = 'True',
     parameterfilestore = 'parameter_files.csv',
     maximumstoredpfs = '500',


--- a/yt/funcs.py	Wed Oct 19 13:16:51 2011 -0400
+++ b/yt/funcs.py	Wed Oct 19 13:33:46 2011 -0400
@@ -187,12 +187,13 @@
        def some_root_only_function(...):
 
     """
+    from yt.config import ytcfg
     @wraps(func)
-    def donothing(*args, **kwargs):
-        return
-    from yt.config import ytcfg
-    if ytcfg.getint("yt","__parallel_rank") > 0: return donothing
-    return func
+    def check_parallel_rank(*args, **kwargs):
+        if ytcfg.getint("yt","__topcomm_parallel_rank") > 0:
+            return 
+        return func(*args, **kwargs)
+    return check_parallel_rank
 
 def deprecate(func):
     """
@@ -313,7 +314,7 @@
     from yt.config import ytcfg
     if ytcfg.getboolean("yt","suppressStreamLogging"):
         return DummyProgressBar()
-    elif ytcfg.getboolean("yt", "__parallel"):
+    elif ytcfg.getboolean("yt", "__topcomm_parallel"):
         return ParallelProgressBar(title, maxval)
     elif "SAGE_ROOT" in os.environ:
         try:
@@ -341,9 +342,13 @@
     handed back.
     """
     from yt.config import ytcfg
+    if kwargs.pop("global_rootonly", False):
+        cfg_option = "__global_parallel_rank"
+    else:
+        cfg_option = "__topcomm_parallel_rank"
     if not ytcfg.getboolean("yt","__parallel"):
         return func(*args,**kwargs)
-    if ytcfg.getint("yt","__parallel_rank") > 0: return
+    if ytcfg.getint("yt", cfg_option) > 0: return
     return func(*args, **kwargs)
 
 #


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:16:51 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:33:46 2011 -0400
@@ -51,8 +51,8 @@
     if parallel_capable:
         mylog.info("Parallel computation enabled: %s / %s",
                    MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
-        ytcfg["yt","__parallel_rank"] = str(MPI.COMM_WORLD.rank)
-        ytcfg["yt","__parallel_size"] = str(MPI.COMM_WORLD.size)
+        ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
+        ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
         ytcfg["yt","__parallel"] = "True"
         if exe_name == "embed_enzo" or \
             ("_parallel" in dir(sys) and sys._parallel == True):
@@ -131,7 +131,7 @@
         if hasattr(gs[0], 'proc_num'):
             # This one sort of knows about MPI, but not quite
             self._objs = [g for g in gs if g.proc_num ==
-                          ytcfg.getint('yt','__parallel_rank')]
+                          ytcfg.getint('yt','__topcomm_parallel_rank')]
             self._use_all = True
         else:
             self._objs = gs
@@ -866,7 +866,8 @@
 
 communication_system = CommunicationSystem()
 if parallel_capable:
-    communication_system.communicators.append(Communicator(MPI.COMM_WORLD))
+    ranks = na.arange(MPI.COMM_WORLD.size)
+    communication_system.push_with_ids(ranks)
 
 class ParallelAnalysisInterface(object):
     comm = None


--- a/yt/utilities/performance_counters.py	Wed Oct 19 13:16:51 2011 -0400
+++ b/yt/utilities/performance_counters.py	Wed Oct 19 13:33:46 2011 -0400
@@ -125,8 +125,8 @@
     def write_out(self, filename_prefix):
         if ytcfg.getboolean("yt","__parallel"):
             pfn = "%s_%03i_%03i" % (filename_prefix,
-                     ytcfg.getint("yt", "__parallel_rank"),
-                    ytcfg.getint("yt", "__parallel_size"))
+                     ytcfg.getint("yt", "__global_parallel_rank"),
+                    ytcfg.getint("yt", "__global_parallel_size"))
         else:
             pfn = "%s" % (filename_prefix)
         for n, p in sorted(self.profilers.items()):


--- a/yt/utilities/rpdb.py	Wed Oct 19 13:16:51 2011 -0400
+++ b/yt/utilities/rpdb.py	Wed Oct 19 13:33:46 2011 -0400
@@ -53,8 +53,8 @@
 
 def rpdb_excepthook(exc_type, exc, tb):
     traceback.print_exception(exc_type, exc, tb)
-    task = ytcfg.getint("yt", "__parallel_rank")
-    size = ytcfg.getint("yt", "__parallel_size")
+    task = ytcfg.getint("yt", "__global_parallel_rank")
+    size = ytcfg.getint("yt", "__global_parallel_size")
     print "Starting RPDB server on task %s ; connect with 'yt rpdb %s'" \
             % (task,task)
     handler = pdb_handler(tb)


http://bitbucket.org/yt_analysis/yt/changeset/87a0bf46fec5/
changeset:   87a0bf46fec5
branch:      yt
user:        brittonsmith
date:        2011-10-19 19:02:06
summary:     Changed alltoallv_array.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 12:57:07 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:02:06 2011 -0400
@@ -584,7 +584,7 @@
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
             for key in field_keys:
                 dd = data[key]
-                rv = self._alltoallv_array(dd, arr_size, offsets, sizes)
+                rv = self.comm.alltoallv_array(dd, arr_size, offsets, sizes)
                 data[key] = rv
             return data
         elif datatype == "array" and op == "cat":
@@ -613,7 +613,7 @@
             # concatenation.
             offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
-            data = self._alltoallv_array(data, arr_size, offsets, sizes)
+            data = self.comm.alltoallv_array(data, arr_size, offsets, sizes)
             return data
         elif datatype == "list" and op == "cat":
             if self.comm.rank == 0:
@@ -848,11 +848,11 @@
         self.comm.Recv([tmp, MPI.CHAR], source=source, tag=tag)
         return arr
 
-    def _alltoallv_array(self, send, total_size, offsets, sizes):
+    def alltoallv_array(self, send, total_size, offsets, sizes):
         if len(send.shape) > 1:
             recv = []
             for i in range(send.shape[0]):
-                recv.append(self._alltoallv_array(send[i,:].copy(), 
+                recv.append(self.comm.alltoallv_array(send[i,:].copy(), 
                                                   total_size, offsets, sizes))
             recv = na.array(recv)
             return recv


http://bitbucket.org/yt_analysis/yt/changeset/5f9ab9fc701e/
changeset:   5f9ab9fc701e
branch:      yt
user:        brittonsmith
date:        2011-10-19 19:04:34
summary:     Fixed recv_array.
affected #:  2 files (-1 bytes)

--- a/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 13:02:06 2011 -0400
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 13:04:34 2011 -0400
@@ -1178,7 +1178,7 @@
             if front.owner == my_rank:
                 if front.owner == parent.owner:
                     mylog.debug( '%04i receiving image from %04i'%(my_rank,back.owner))
-                    arr2 = self.comm._recv_array(back.owner, tag=back.owner).reshape(
+                    arr2 = self.comm.recv_array(back.owner, tag=back.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1202,7 +1202,7 @@
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
                     mylog.debug('%04i receiving image from %04i'%(my_rank,front.owner))
-                    arr2 = self.comm._recv_array(front.owner, tag=front.owner).reshape(
+                    arr2 = self.comm.recv_array(front.owner, tag=front.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1225,7 +1225,7 @@
         if fn is None:
             fn = '%s_kd_bricks.h5'%self.pf
         if my_rank != 0:
-            self.comm._recv_array(my_rank-1, tag=my_rank-1)
+            self.comm.recv_array(my_rank-1, tag=my_rank-1)
         f = h5py.File(fn,"a")
         for node in self.depth_traverse():
             i = node.id
@@ -1244,7 +1244,7 @@
         if fn is None:
             fn = '%s_kd_bricks.h5' % self.pf
         if my_rank != 0:
-            self.comm._recv_array(my_rank-1, tag=my_rank-1)
+            self.comm.recv_array(my_rank-1, tag=my_rank-1)
         try:
             f = h5py.File(fn,"r")
             for node in self.depth_traverse():


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:02:06 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:04:34 2011 -0400
@@ -839,7 +839,7 @@
         self.comm.Send([arr, MPI.CHAR], dest=dest, tag=tag)
         del tmp
 
-    def _recv_array(self, source, tag = 0):
+    def recv_array(self, source, tag = 0):
         dt, ne = self.comm.recv(source=source, tag=tag)
         if dt is None and ne is None:
             return self.comm.recv(source=source, tag=tag)


http://bitbucket.org/yt_analysis/yt/changeset/ba89462c0e2e/
changeset:   ba89462c0e2e
branch:      yt
user:        brittonsmith
date:        2011-10-19 19:09:08
summary:     Fixed send_array.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 13:04:34 2011 -0400
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 13:09:08 2011 -0400
@@ -1192,13 +1192,13 @@
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
                     mylog.debug('%04i sending my image to %04i'%(my_rank,back.owner))
-                    self.comm._send_array(self.image.ravel(), back.owner, tag=my_rank)
+                    self.comm.send_array(self.image.ravel(), back.owner, tag=my_rank)
 
                 
             if back.owner == my_rank:
                 if front.owner == parent.owner:
                     mylog.debug('%04i sending my image to %04i'%(my_rank, front.owner))
-                    self.comm._send_array(self.image.ravel(), front.owner, tag=my_rank)
+                    self.comm.send_array(self.image.ravel(), front.owner, tag=my_rank)
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
                     mylog.debug('%04i receiving image from %04i'%(my_rank,front.owner))
@@ -1238,7 +1238,7 @@
                         pass
         f.close()
         if my_rank != (nprocs-1):
-            self.comm._send_array([0],my_rank+1, tag=my_rank)
+            self.comm.send_array([0],my_rank+1, tag=my_rank)
         
     def load_kd_bricks(self,fn=None):
         if fn is None:
@@ -1268,7 +1268,7 @@
         except:
             pass
         if my_rank != (nprocs-1):
-            self.comm._send_array([0],my_rank+1, tag=my_rank)
+            self.comm.send_array([0],my_rank+1, tag=my_rank)
 
     def load_tree(self,fn):
         raise NotImplementedError()


http://bitbucket.org/yt_analysis/yt/changeset/8b9eef9049f2/
changeset:   8b9eef9049f2
branch:      yt
user:        brittonsmith
date:        2011-10-19 19:09:43
summary:     Fixed one more send_array.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:09:08 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:09:43 2011 -0400
@@ -828,7 +828,7 @@
         return qt
 
 
-    def _send_array(self, arr, dest, tag = 0):
+    def send_array(self, arr, dest, tag = 0):
         if not isinstance(arr, na.ndarray):
             self.comm.send((None,None), dest=dest, tag=tag)
             self.comm.send(arr, dest=dest, tag=tag)


http://bitbucket.org/yt_analysis/yt/changeset/a0eabbd82a9b/
changeset:   a0eabbd82a9b
branch:      yt
user:        brittonsmith
date:        2011-10-19 19:14:08
summary:     Fixed a few more communicator functions.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:09:43 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:14:08 2011 -0400
@@ -584,7 +584,7 @@
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
             for key in field_keys:
                 dd = data[key]
-                rv = self.comm.alltoallv_array(dd, arr_size, offsets, sizes)
+                rv = self.alltoallv_array(dd, arr_size, offsets, sizes)
                 data[key] = rv
             return data
         elif datatype == "array" and op == "cat":
@@ -613,7 +613,7 @@
             # concatenation.
             offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
-            data = self.comm.alltoallv_array(data, arr_size, offsets, sizes)
+            data = self.alltoallv_array(data, arr_size, offsets, sizes)
             return data
         elif datatype == "list" and op == "cat":
             if self.comm.rank == 0:
@@ -759,7 +759,7 @@
         if not obj._distributed: return True
         return (obj._owner == self.comm.rank)
 
-    def _send_quadtree(self, target, buf, tgd, args):
+    def send_quadtree(self, target, buf, tgd, args):
         sizebuf = na.zeros(1, 'int64')
         sizebuf[0] = buf[0].size
         self.comm.Send([sizebuf, MPI.LONG], dest=target)
@@ -767,7 +767,7 @@
         self.comm.Send([buf[1], MPI.DOUBLE], dest=target)
         self.comm.Send([buf[2], MPI.DOUBLE], dest=target)
         
-    def _recv_quadtree(self, target, tgd, args):
+    def recv_quadtree(self, target, tgd, args):
         sizebuf = na.zeros(1, 'int64')
         self.comm.Recv(sizebuf, source=target)
         buf = [na.empty((sizebuf[0],), 'int32'),
@@ -796,18 +796,18 @@
                 target = (rank & ~mask) % size
                 #print "SENDING FROM %02i to %02i" % (rank, target)
                 buf = qt.tobuffer()
-                self._send_quadtree(target, buf, tgd, args)
-                #qt = self._recv_quadtree(target, tgd, args)
+                self.send_quadtree(target, buf, tgd, args)
+                #qt = self.recv_quadtree(target, tgd, args)
             else:
                 target = (rank | mask)
                 if target < size:
                     #print "RECEIVING FROM %02i on %02i" % (target, rank)
-                    buf = self._recv_quadtree(target, tgd, args)
+                    buf = self.recv_quadtree(target, tgd, args)
                     qto = QuadTree(tgd, args[2])
                     qto.frombuffer(*buf)
                     merge_quadtrees(qt, qto)
                     del qto
-                    #self._send_quadtree(target, qt, tgd, args)
+                    #self.send_quadtree(target, qt, tgd, args)
             mask <<= 1
 
         if rank == 0:
@@ -852,8 +852,8 @@
         if len(send.shape) > 1:
             recv = []
             for i in range(send.shape[0]):
-                recv.append(self.comm.alltoallv_array(send[i,:].copy(), 
-                                                  total_size, offsets, sizes))
+                recv.append(self.alltoallv_array(send[i,:].copy(), 
+                                                 total_size, offsets, sizes))
             recv = na.array(recv)
             return recv
         offset = offsets[self.comm.rank]


http://bitbucket.org/yt_analysis/yt/changeset/97383cf4c886/
changeset:   97383cf4c886
branch:      yt
user:        brittonsmith
date:        2011-10-19 19:15:29
summary:     Fixed is_mine.
affected #:  2 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 13:14:08 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 13:15:29 2011 -0400
@@ -1748,7 +1748,7 @@
         fn = "%s.h5" % self._get_filename(prefix)
         f = h5py.File(fn, "w")
         for halo in self._groups:
-            if not self._is_mine(halo): continue
+            if not self.comm.is_mine(halo): continue
             halo.write_particle_list(f)
 
     def dump(self, basename="HopAnalysis"):


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:14:08 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:15:29 2011 -0400
@@ -755,7 +755,7 @@
         else:
             return "%s_%04i" % (prefix, rank)
 
-    def _is_mine(self, obj):
+    def is_mine(self, obj):
         if not obj._distributed: return True
         return (obj._owner == self.comm.rank)
 


http://bitbucket.org/yt_analysis/yt/changeset/1b659ae2020e/
changeset:   1b659ae2020e
branch:      yt
user:        brittonsmith
date:        2011-10-19 19:17:13
summary:     Fixed get_filename.
affected #:  2 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 13:15:29 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 13:17:13 2011 -0400
@@ -1240,11 +1240,11 @@
             if group.tasks is not None:
                 fn = ""
                 for task in group.tasks:
-                    fn += "%s.h5 " % self._get_filename(prefix, rank=task)
+                    fn += "%s.h5 " % self.comm.get_filename(prefix, rank=task)
             elif self._distributed:
-                fn = "%s.h5" % self._get_filename(prefix, rank=group._owner)
+                fn = "%s.h5" % self.comm.get_filename(prefix, rank=group._owner)
             else:
-                fn = "%s.h5" % self._get_filename(prefix)
+                fn = "%s.h5" % self.comm.get_filename(prefix)
             gn = "Halo%08i" % (group.id)
             f.write("%s %s\n" % (gn, fn))
             f.flush()
@@ -1745,7 +1745,7 @@
         --------
         >>> halos.write_particle_lists("halo-parts")
         """
-        fn = "%s.h5" % self._get_filename(prefix)
+        fn = "%s.h5" % self.comm.get_filename(prefix)
         f = h5py.File(fn, "w")
         for halo in self._groups:
             if not self.comm.is_mine(halo): continue


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:15:29 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:17:13 2011 -0400
@@ -748,7 +748,7 @@
         else:
             return cStringIO.StringIO()
 
-    def _get_filename(self, prefix, rank=None):
+    def get_filename(self, prefix, rank=None):
         if not self._distributed: return prefix
         if rank == None:
             return "%s_%04i" % (prefix, self.comm.rank)


http://bitbucket.org/yt_analysis/yt/changeset/98b35f4c6afb/
changeset:   98b35f4c6afb
branch:      yt
user:        brittonsmith
date:        2011-10-19 19:23:17
summary:     Merged.
affected #:  15 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 13:17:13 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 13:23:17 2011 -0400
@@ -426,7 +426,7 @@
         """
         if self.max_dens_point is not None:
             return self.max_dens_point[0]
-        max = self._mpi_allreduce(self._max_dens[self.id][0], op='max')
+        max = self.comm.mpi_allreduce(self._max_dens[self.id][0], op='max')
         return max
 
     def maximum_density_location(self):
@@ -450,7 +450,7 @@
         else:
             value = na.array([0,0,0])
         # This works, and isn't appropriate but for now will be fine...
-        value = self._mpi_allreduce(value, op='sum')
+        value = self.comm.mpi_allreduce(value, op='sum')
         return value
 
     def center_of_mass(self):
@@ -479,8 +479,8 @@
         else:
             my_mass = 0.
             my_com = na.array([0.,0.,0.])
-        global_mass = self._mpi_allreduce(my_mass, op='sum')
-        global_com = self._mpi_allreduce(my_com, op='sum')
+        global_mass = self.comm.mpi_allreduce(my_mass, op='sum')
+        global_com = self.comm.mpi_allreduce(my_com, op='sum')
         return global_com / global_mass
 
     def total_mass(self):
@@ -499,7 +499,7 @@
             my_mass = self["ParticleMassMsun"].sum()
         else:
             my_mass = 0.
-        global_mass = self._mpi_allreduce(float(my_mass), op='sum')
+        global_mass = self.comm.mpi_allreduce(float(my_mass), op='sum')
         return global_mass
 
     def bulk_velocity(self):
@@ -528,7 +528,7 @@
             vy = 0.
             vz = 0.
         bv = na.array([vx,vy,vz,pm])
-        global_bv = self._mpi_allreduce(bv, op='sum')
+        global_bv = self.comm.mpi_allreduce(bv, op='sum')
         return global_bv[:3]/global_bv[3]
 
     def rms_velocity(self):
@@ -558,7 +558,7 @@
             ss = na.array([s, float(size)])
         else:
             ss = na.array([0.,0.])
-        global_ss = self._mpi_allreduce(ss, op='sum')
+        global_ss = self.comm.mpi_allreduce(ss, op='sum')
         ms = global_ss[0] / global_ss[1]
         return na.sqrt(ms) * global_ss[1]
 
@@ -598,7 +598,7 @@
             
         else:
             my_max = 0.
-        return self._mpi_allreduce(my_max, op='max')
+        return self.comm.mpi_allreduce(my_max, op='max')
 
     def get_size(self):
         if self.size is not None:
@@ -607,7 +607,7 @@
             my_size = self.indices.size
         else:
             my_size = 0
-        global_size = self._mpi_allreduce(my_size, op='sum')
+        global_size = self.comm.mpi_allreduce(my_size, op='sum')
         return global_size
 
     def __getitem__(self, key):
@@ -736,8 +736,8 @@
             dist_max = 0.0
         # In this parallel case, we're going to find the global dist extrema
         # and built identical bins on all tasks.
-        dist_min = self._mpi_allreduce(dist_min, op='min')
-        dist_max = self._mpi_allreduce(dist_max, op='max')
+        dist_min = self.comm.mpi_allreduce(dist_min, op='min')
+        dist_max = self.comm.mpi_allreduce(dist_max, op='max')
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
         self.radial_bins = na.logspace(math.log10(dist_min*.99 + TINY), 
@@ -752,7 +752,7 @@
             for i in xrange(self.bin_count):
                 self.mass_bins[i+1] += self.mass_bins[i]
         # Sum up the mass_bins globally
-        self.mass_bins = self._mpi_allreduce(self.mass_bins, op='sum')
+        self.mass_bins = self.comm.mpi_allreduce(self.mass_bins, op='sum')
         # Calculate the over densities in the bins.
         self.overdensity = self.mass_bins * Msun2g / \
         (4./3. * math.pi * rho_crit * \
@@ -1412,7 +1412,7 @@
                 self.particle_fields["particle_index"].size:
             mylog.error("Non-unique values in particle_index field. Parallel HOP will fail.")
             exit = True
-        self._mpi_exit_test(exit)
+        self.comm.mpi_exit_test(exit)
         obj = ParallelHOPHaloFinder(self.period, self.padding,
             self.num_neighbors, self.bounds,
             self.particle_fields["particle_position_x"] / self.old_period[0],
@@ -1480,7 +1480,7 @@
             del diff_subchain
         # Bring it together, and divide by the previously computed total mass
         # of each halo.
-        self.bulk_vel = self._mpi_allreduce(self.bulk_vel, op='sum')
+        self.bulk_vel = self.comm.mpi_allreduce(self.bulk_vel, op='sum')
         for groupID in xrange(self.group_count):
             self.bulk_vel[groupID] = self.bulk_vel[groupID] / self.Tot_M[groupID]
         yt_counters("bulk vel. computing")
@@ -1502,7 +1502,7 @@
                 rms_vel_temp[u][1] = marks[i+1] - marks[i]
             del vel, marks, uniq_subchain
         # Bring it together.
-        rms_vel_temp = self._mpi_allreduce(rms_vel_temp, op='sum')
+        rms_vel_temp = self.comm.mpi_allreduce(rms_vel_temp, op='sum')
         self.rms_vel = na.empty(self.group_count, dtype='float64')
         for groupID in xrange(self.group_count):
             # Here we do the Mean and the Root.
@@ -1548,7 +1548,7 @@
                     bulk_vel=self.bulk_vel[index], tasks=self.halo_taskmap[index],
                     rms_vel=self.rms_vel[index])
                 # I don't own this halo
-                self._do_not_claim_object(self._groups[index])
+                self.comm.do_not_claim_object(self._groups[index])
                 self._max_dens[index] = [self.max_dens_point[index][0], self.max_dens_point[index][1], \
                     self.max_dens_point[index][2], self.max_dens_point[index][3]]
                 index += 1
@@ -1561,7 +1561,7 @@
                 bulk_vel=self.bulk_vel[i], tasks=self.halo_taskmap[index],
                 rms_vel=self.rms_vel[i])
             # This halo may be owned by many, including this task
-            self._claim_object(self._groups[index])
+            self.comm.claim_object(self._groups[index])
             self._max_dens[index] = [self.max_dens_point[i][0], self.max_dens_point[i][1], \
                 self.max_dens_point[i][2], self.max_dens_point[i][3]]
             cp += counts[i+1]
@@ -1574,7 +1574,7 @@
                 group_total_mass=self.Tot_M[index], max_radius=self.max_radius[index],
                 bulk_vel=self.bulk_vel[index], tasks=self.halo_taskmap[index],
                 rms_vel=self.rms_vel[index])
-            self._do_not_claim_object(self._groups[index])
+            self.comm.do_not_claim_object(self._groups[index])
             self._max_dens[index] = [self.max_dens_point[index][0], self.max_dens_point[index][1], \
                 self.max_dens_point[index][2], self.max_dens_point[index][3]]
             index += 1
@@ -1627,7 +1627,7 @@
                 max_dens[hi] = [max_dens_temp] + list(self._max_dens[halo.id])[1:4]
                 groups.append(self._halo_class(self, hi))
                 groups[-1].indices = halo.indices
-                self._claim_object(groups[-1])
+                self.comm.claim_object(groups[-1])
                 hi += 1
         del self._groups, self._max_dens # explicit >> implicit
         self._groups = groups
@@ -1640,7 +1640,7 @@
         # about processors and ownership and so forth.
         # _mpi_info_dict returns a dict of {proc: whatever} where whatever is
         # what is fed in on each proc.
-        mine, halo_info = self._mpi_info_dict(len(self))
+        mine, halo_info = self.comm.mpi_info_dict(len(self))
         nhalos = sum(halo_info.values())
         # Figure out our offset
         my_first_id = sum([v for k,v in halo_info.items() if k < mine])
@@ -1703,7 +1703,7 @@
         --------
         >>> halos.write_out("HopAnalysis.out")
         """
-        f = self._write_on_root(filename)
+        f = self.comm.write_on_root(filename)
         HaloList.write_out(self, f)
 
     def write_particle_lists_txt(self, prefix):
@@ -1722,7 +1722,7 @@
         --------
         >>> halos.write_particle_lists_txt("halo-parts")
         """
-        f = self._write_on_root("%s.txt" % prefix)
+        f = self.comm.write_on_root("%s.txt" % prefix)
         HaloList.write_particle_lists_txt(self, prefix, fp=f)
 
     @parallel_blocking_call
@@ -1849,7 +1849,7 @@
         topbounds = na.array([[0., 0., 0.], period])
         # Cut up the volume evenly initially, with no padding.
         padded, LE, RE, self._data_source = \
-            self._partition_hierarchy_3d(ds=self._data_source,
+            self.comm.partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         # also get the total mass of particles
         yt_counters("Reading Data")
@@ -1859,12 +1859,12 @@
         if ytcfg.getboolean("yt","inline") == False and \
             resize and self.comm.size != 1 and subvolume is None:
             random.seed(self.comm.rank)
-            cut_list = self._partition_hierarchy_3d_bisection_list()
+            cut_list = self.comm.partition_hierarchy_3d_bisection_list()
             root_points = self._subsample_points()
             self.bucket_bounds = []
             if self.comm.rank == 0:
                 self._recursive_divide(root_points, topbounds, 0, cut_list)
-            self.bucket_bounds = self._mpi_bcast_pickled(self.bucket_bounds)
+            self.bucket_bounds = self.comm.mpi_bcast_pickled(self.bucket_bounds)
             my_bounds = self.bucket_bounds[self.comm.rank]
             LE, RE = my_bounds[0], my_bounds[1]
             self._data_source = self.hierarchy.region_strict([0.]*3, LE, RE)
@@ -1936,7 +1936,7 @@
                 (str(self.padding), avg_spacing, full_vol, data.size, str(self._data_source)))
         # Now we get the full box mass after we have the final composition of
         # subvolumes.
-        total_mass = self._mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(), 
+        total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(), 
                                          op='sum')
         if not self._distributed:
             self.padding = (na.zeros(3,dtype='float64'), na.zeros(3,dtype='float64'))
@@ -1945,7 +1945,7 @@
             self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
             # Cut up the volume.
             padded, LE, RE, self._data_source = \
-                self._partition_hierarchy_3d(ds=self._data_source,
+                self.comm.partition_hierarchy_3d(ds=self._data_source,
                 padding=0.)
         self.bounds = (LE, RE)
         (LE_padding, RE_padding) = self.padding
@@ -1959,7 +1959,7 @@
         # Read in a random subset of the points in each domain, and then
         # collect them on the root task.
         xp = self._data_source["particle_position_x"]
-        n_parts = self._mpi_allreduce(xp.size, op='sum')
+        n_parts = self.comm.mpi_allreduce(xp.size, op='sum')
         local_parts = xp.size
         random_points = int(self.sample * n_parts)
         # We want to get a representative selection of random particles in
@@ -1980,7 +1980,7 @@
         self._data_source.clear_data()
         del uni
         # Collect them on the root task.
-        mine, sizes = self._mpi_info_dict(n_random)
+        mine, sizes = self.comm.mpi_info_dict(n_random)
         if mine == 0:
             tot_random = sum(sizes.values())
             root_points = na.empty((tot_random, 3), dtype='float64')
@@ -1988,7 +1988,7 @@
         else:
             root_points = na.empty([])
         my_points.shape = (1, n_random*3)
-        root_points = self._par_combine_object(my_points[0],
+        root_points = self.comm.par_combine_object(my_points[0],
                 datatype="array", op="cat")
         del my_points
         if mine == 0:
@@ -2108,14 +2108,14 @@
         # a small part is actually going to be used.
         self.padding = 0.0
         padded, LE, RE, self._data_source = \
-            self._partition_hierarchy_3d(ds = self._data_source, padding=self.padding)
+            self.comm.partition_hierarchy_3d(ds = self._data_source, padding=self.padding)
         # For scaling the threshold, note that it's a passthrough
         if dm_only:
             select = self._get_dm_indices()
             total_mass = \
-                self._mpi_allreduce((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
+                self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
         else:
-            total_mass = self._mpi_allreduce(self._data_source["ParticleMassMsun"].sum(dtype='float64'), op='sum')
+            total_mass = self.comm.mpi_allreduce(self._data_source["ParticleMassMsun"].sum(dtype='float64'), op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles
         # are all on different processors, we should instead construct an
         # object representing the entire domain and sum it "lazily" with
@@ -2124,7 +2124,7 @@
             self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
         self.padding = padding #* pf["unitary"] # This should be clevererer
         padded, LE, RE, self._data_source = \
-            self._partition_hierarchy_3d(ds = self._data_source,
+            self.comm.partition_hierarchy_3d(ds = self._data_source,
             padding=self.padding)
         self.bounds = (LE, RE)
         # reflect particles around the periodic boundary
@@ -2192,10 +2192,10 @@
         self.padding = 0.0 #* pf["unitary"] # This should be clevererer
         # get the total number of particles across all procs, with no padding
         padded, LE, RE, self._data_source = \
-            self._partition_hierarchy_3d(ds=self._data_source,
+            self.comm.partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         if link > 0.0:
-            n_parts = self._mpi_allreduce(self._data_source["particle_position_x"].size, op='sum')
+            n_parts = self.comm.mpi_allreduce(self._data_source["particle_position_x"].size, op='sum')
             # get the average spacing between particles
             #l = pf.domain_right_edge - pf.domain_left_edge
             #vol = l[0] * l[1] * l[2]
@@ -2210,7 +2210,7 @@
         if subvolume is not None:
             self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
         padded, LE, RE, self._data_source = \
-            self._partition_hierarchy_3d(ds=self._data_source,
+            self.comm.partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         self.bounds = (LE, RE)
         # reflect particles around the periodic boundary


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 13:17:13 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 13:23:17 2011 -0400
@@ -75,7 +75,7 @@
         tasks are our geometric neighbors.
         """
         self.neighbors = set([])
-        self.mine, global_bounds = self._mpi_info_dict(self.bounds)
+        self.mine, global_bounds = self.comm.mpi_info_dict(self.bounds)
         my_LE, my_RE = self.bounds
         # Put the vertices into a big list, each row is
         # array[x,y,z, taskID]
@@ -199,7 +199,7 @@
         # lists us as their neighbor, we add them as our neighbor. This is 
         # probably not needed because the stuff above should be symmetric,
         # but it isn't a big issue.
-        self.mine, global_neighbors = self._mpi_info_dict(self.neighbors)
+        self.mine, global_neighbors = self.comm.mpi_info_dict(self.neighbors)
         for taskID in global_neighbors:
             if taskID == self.mine: continue
             if self.mine in global_neighbors[taskID]:
@@ -216,7 +216,7 @@
         """
         if round == 'first':
             max_pad = na.max(self.padding)
-            self.mine, self.global_padding = self._mpi_info_dict(max_pad)
+            self.mine, self.global_padding = self.comm.mpi_info_dict(max_pad)
             self.max_padding = max(self.global_padding.itervalues())
         elif round == 'second':
             self.max_padding = 0.
@@ -235,7 +235,7 @@
         temp_LE = LE - LE_padding
         temp_RE = RE + RE_padding
         expanded_bounds = (temp_LE, temp_RE)
-        self.mine, global_exp_bounds = self._mpi_info_dict(expanded_bounds)
+        self.mine, global_exp_bounds = self.comm.mpi_info_dict(expanded_bounds)
         send_real_indices = {}
         send_points = {}
         send_mass = {}
@@ -266,7 +266,7 @@
         del points, shift_points, mass, real_indices
         yt_counters("Picking padding data to send.")
         # Communicate the sizes to send.
-        self.mine, global_send_count = self._mpi_info_dict(send_size)
+        self.mine, global_send_count = self.comm.mpi_info_dict(send_size)
         del send_size
         # Initialize the arrays to receive data.
         yt_counters("Initalizing recv arrays.")
@@ -285,19 +285,19 @@
         yt_counters("MPI stuff.")
         hooks = []
         for opp_neighbor in self.neighbors:
-            hooks.append(self._mpi_nonblocking_recv(recv_real_indices[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_nonblocking_recv(recv_points[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_nonblocking_recv(recv_mass[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(recv_real_indices[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(recv_points[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(recv_mass[opp_neighbor], opp_neighbor))
         # Let's wait here to be absolutely sure that all the receive buffers
         # have been created before any sending happens!
-        self._barrier()
+        self.comm.barrier()
         # Now we send the data.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_nonblocking_send(send_real_indices[neighbor], neighbor))
-            hooks.append(self._mpi_nonblocking_send(send_points[neighbor], neighbor))
-            hooks.append(self._mpi_nonblocking_send(send_mass[neighbor], neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(send_real_indices[neighbor], neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(send_points[neighbor], neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(send_mass[neighbor], neighbor))
         # Now we use the data, after all the comms are done.
-        self._mpi_Request_Waitall(hooks)
+        self.comm.mpi_Request_Waitall(hooks)
         yt_counters("MPI stuff.")
         yt_counters("Processing padded data.")
         del send_real_indices, send_points, send_mass
@@ -665,7 +665,7 @@
         """
         yt_counters("globally_assign_chainIDs")
         # First find out the number of chains on each processor.
-        self.mine, chain_info = self._mpi_info_dict(chain_count)
+        self.mine, chain_info = self.comm.mpi_info_dict(chain_count)
         self.nchains = sum(chain_info.values())
         # Figure out our offset.
         self.my_first_id = sum([v for k,v in chain_info.iteritems() if k < self.mine])
@@ -684,9 +684,9 @@
         # Shift the values over effectively by concatenating them in the same
         # order as the values have been shifted in _globally_assign_chainIDs()
         yt_counters("global chain MPI stuff.")
-        self.densest_in_chain = self._par_combine_object(self.densest_in_chain,
+        self.densest_in_chain = self.comm.par_combine_object(self.densest_in_chain,
                 datatype="array", op="cat")
-        self.densest_in_chain_real_index = self._par_combine_object(
+        self.densest_in_chain_real_index = self.comm.par_combine_object(
                 self.densest_in_chain_real_index,
                 datatype="array", op="cat")
         yt_counters("global chain MPI stuff.")
@@ -778,16 +778,16 @@
         # Set up the receives, but don't actually use them.
         hooks = []
         for opp_neighbor in self.neighbors:
-            hooks.append(self._mpi_nonblocking_recv(temp_indices[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_nonblocking_recv(temp_chainIDs[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(temp_indices[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(temp_chainIDs[opp_neighbor], opp_neighbor))
         # Make sure all the receive buffers are set before continuing.
-        self._barrier()
+        self.comm.barrier()
         # Send padded particles to our neighbors.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_nonblocking_send(self.uphill_real_indices, neighbor))
-            hooks.append(self._mpi_nonblocking_send(self.uphill_chainIDs, neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(self.uphill_real_indices, neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(self.uphill_chainIDs, neighbor))
         # Now actually use the data once it's good to go.
-        self._mpi_Request_Waitall(hooks)
+        self.comm.mpi_Request_Waitall(hooks)
         self.__max_memory()
         so_far = 0
         for opp_neighbor in self.neighbors:
@@ -841,7 +841,7 @@
         # Now we make a global dict of how many particles each task is
         # sending.
         self.global_padded_count = {self.mine:self.uphill_chainIDs.size}
-        self.global_padded_count = self._par_combine_object(
+        self.global_padded_count = self.comm.par_combine_object(
                 self.global_padded_count, datatype = "dict", op = "join")
         # Send/receive 'em.
         self._communicate_uphill_info()
@@ -883,7 +883,7 @@
         # it. Therefore each key (a chain) in this dict is unique, but the items
         # the keys point to are not necessarily unique.
         chainID_translate_map_global = \
-            self._mpi_allreduce(chainID_translate_map_local, op='min')
+            self.comm.mpi_allreduce(chainID_translate_map_local, op='min')
         # Loop over chains, smallest to largest density, recursively until
         # we reach a self-assigned chain. Then we assign that final chainID to
         # the *current* one only.
@@ -937,7 +937,7 @@
         # but there's so many places in this that need to be globally synched
         # that it's not worth the effort right now to make this one spot better.
         global_annulus_count = {self.mine:send_count}
-        global_annulus_count = self._par_combine_object(
+        global_annulus_count = self.comm.par_combine_object(
                 global_annulus_count, datatype = "dict", op = "join")
         # Set up the receiving arrays.
         recv_real_indices = dict.fromkeys(self.neighbors)
@@ -949,16 +949,16 @@
         # Set up the receving hooks.
         hooks = []
         for opp_neighbor in self.neighbors:
-            hooks.append(self._mpi_nonblocking_recv(recv_real_indices[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_nonblocking_recv(recv_chainIDs[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(recv_real_indices[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(recv_chainIDs[opp_neighbor], opp_neighbor))
         # Make sure the recv buffers are set before continuing.
-        self._barrier()
+        self.comm.barrier()
         # Now we send them.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_nonblocking_send(real_indices, neighbor))
-            hooks.append(self._mpi_nonblocking_send(chainIDs, neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(real_indices, neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(chainIDs, neighbor))
         # Now we use them when they're nice and ripe.
-        self._mpi_Request_Waitall(hooks)
+        self.comm.mpi_Request_Waitall(hooks)
         self.__max_memory()
         for opp_neighbor in self.neighbors:
             opp_size = global_annulus_count[opp_neighbor]
@@ -1067,7 +1067,7 @@
         """
         yt_counters("make_global_chain_densest_n")
         (self.top_keys, self.bot_keys, self.vals) = \
-            self._mpi_maxdict_dict(self.chain_densest_n)
+            self.comm.mpi_maxdict_dict(self.chain_densest_n)
         self.__max_memory()
         del self.chain_densest_n
         yt_counters("make_global_chain_densest_n")
@@ -1208,7 +1208,7 @@
         del Set_list
         # To bring it all together, find the minimum values at each entry
         # globally.
-        lookup = self._mpi_allreduce(lookup, op='min')
+        lookup = self.comm.mpi_allreduce(lookup, op='min')
         # Now apply this to reverse_map
         for chainID,groupID in enumerate(self.reverse_map):
             if groupID == -1:
@@ -1336,7 +1336,7 @@
         # Now we broadcast this, effectively, with an allsum. Even though
         # some groups are on multiple tasks, there is only one densest_in_chain
         # and only that task contributed above.
-        self.max_dens_point = self._mpi_allreduce(max_dens_point, op='sum')
+        self.max_dens_point = self.comm.mpi_allreduce(max_dens_point, op='sum')
         del max_dens_point
         yt_counters("max dens point")
         # Now CoM.
@@ -1391,9 +1391,9 @@
                     CoM_M[groupID] += self.max_dens_point[groupID,1:4] - na.array([0.5,0.5,0.5])
                     CoM_M[groupID] *= Tot_M[groupID]
         # Now we find their global values
-        self.group_sizes = self._mpi_allreduce(size, op='sum')
-        CoM_M = self._mpi_allreduce(CoM_M, op='sum')
-        self.Tot_M = self._mpi_allreduce(Tot_M, op='sum')
+        self.group_sizes = self.comm.mpi_allreduce(size, op='sum')
+        CoM_M = self.comm.mpi_allreduce(CoM_M, op='sum')
+        self.Tot_M = self.comm.mpi_allreduce(Tot_M, op='sum')
         self.CoM = na.empty((self.group_count,3), dtype='float64')
         for groupID in xrange(int(self.group_count)):
             self.CoM[groupID] = CoM_M[groupID] / self.Tot_M[groupID]
@@ -1411,7 +1411,7 @@
                 max_radius[u] = na.max(dist[marks[i]:marks[i+1]])
         # Find the maximum across all tasks.
         mylog.info('Fraction of particles in this region in groups: %f' % (float(calc)/self.size))
-        self.max_radius = self._mpi_allreduce(max_radius, op='max')
+        self.max_radius = self.comm.mpi_allreduce(max_radius, op='max')
         self.max_radius = na.sqrt(self.max_radius)
         yt_counters("max radius")
         yt_counters("Precomp.")
@@ -1483,7 +1483,7 @@
         self.density = self.density[:self.real_size]
         # We'll make this a global object, which can be used to write a text
         # file giving the names of hdf5 files the particles for each halo.
-        self.mine, self.I_own = self._mpi_info_dict(self.I_own)
+        self.mine, self.I_own = self.comm.mpi_info_dict(self.I_own)
         self.halo_taskmap = defaultdict(set)
         for taskID in self.I_own:
             for groupID in self.I_own[taskID]:


--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py	Wed Oct 19 13:17:13 2011 -0400
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py	Wed Oct 19 13:23:17 2011 -0400
@@ -148,7 +148,7 @@
         # First the fit file.
         if fit:
             fitname = prefix + '-fit.dat'
-            fp = self._write_on_root(fitname)
+            fp = self.comm.write_on_root(fitname)
             line = \
             """#Columns:
 #1. log10 of mass (Msolar, NOT Msolar/h)
@@ -164,7 +164,7 @@
             fp.close()
         if self.mode == 'haloes' and haloes:
             haloname = prefix + '-haloes.dat'
-            fp = self._write_on_root(haloname)
+            fp = self.comm.write_on_root(haloname)
             line = \
             """#Columns:
 #1. log10 of mass (Msolar, NOT Msolar/h)


--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py	Wed Oct 19 13:17:13 2011 -0400
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py	Wed Oct 19 13:23:17 2011 -0400
@@ -181,7 +181,7 @@
                 os.unlink(self.database)
             except:
                 pass
-        self._barrier()
+        self.comm.barrier()
         self._open_create_database()
         self._create_halo_table()
         self._run_halo_finder_add_to_db()
@@ -204,7 +204,7 @@
         # Now update the database with all the writes.
         mylog.info("Updating database with parent-child relationships.")
         self._copy_and_update_db()
-        self._barrier()
+        self.comm.barrier()
         mylog.info("Done!")
         
     def _read_halo_lists(self):
@@ -276,7 +276,7 @@
                     line = 'INSERT into Halos VALUES (' + line[:-1] + ')'
                     self.cursor.execute(line, values)
                 self.conn.commit()
-            self._barrier()
+            self.comm.barrier()
             del hp
     
     def _open_create_database(self):
@@ -284,7 +284,7 @@
         # doesn't already exist. Open it first on root, and then on the others.
         if self.mine == 0:
             self.conn = sql.connect(self.database)
-        self._barrier()
+        self.comm.barrier()
         self._ensure_db_sync()
         if self.mine != 0:
             self.conn = sql.connect(self.database)
@@ -295,7 +295,7 @@
         # parallel file system funniness, things will go bad very quickly.
         # Therefore, just to be very, very careful, we will ensure that the
         # md5 hash of the file is identical across all tasks before proceeding.
-        self._barrier()
+        self.comm.barrier()
         for i in range(5):
             try:
                 file = open(self.database)
@@ -306,7 +306,7 @@
                 file = open(self.database)
             hash = md5.md5(file.read()).hexdigest()
             file.close()
-            ignore, hashes = self._mpi_info_dict(hash)
+            ignore, hashes = self.comm.mpi_info_dict(hash)
             hashes = set(hashes.values())
             if len(hashes) == 1:
                 break
@@ -339,7 +339,7 @@
                 self.conn.commit()
             except sql.OperationalError:
                 pass
-        self._barrier()
+        self.comm.barrier()
     
     def _find_likely_children(self, parentfile, childfile):
         # For each halo in the parent list, identify likely children in the 
@@ -549,15 +549,15 @@
         child_IDs_tosend = child_IDs[child_send]
         child_halos_tosend = child_halos[child_send]
         
-        parent_IDs_tosend = self._par_combine_object(parent_IDs_tosend,
+        parent_IDs_tosend = self.comm.par_combine_object(parent_IDs_tosend,
                 datatype="array", op="cat")
-        parent_masses_tosend = self._par_combine_object(parent_masses_tosend,
+        parent_masses_tosend = self.comm.par_combine_object(parent_masses_tosend,
                 datatype="array", op="cat")
-        parent_halos_tosend = self._par_combine_object(parent_halos_tosend,
+        parent_halos_tosend = self.comm.par_combine_object(parent_halos_tosend,
                 datatype="array", op="cat")
-        child_IDs_tosend = self._par_combine_object(child_IDs_tosend,
+        child_IDs_tosend = self.comm.par_combine_object(child_IDs_tosend,
                 datatype="array", op="cat")
-        child_halos_tosend = self._par_combine_object(child_halos_tosend,
+        child_halos_tosend = self.comm.par_combine_object(child_halos_tosend,
                 datatype="array", op="cat")
 
         # Resort the received particles.
@@ -718,7 +718,7 @@
             temp_cursor.close()
             temp_conn.close()
         self._close_database()
-        self._barrier()
+        self.comm.barrier()
         if self.mine == 0:
             os.rename(temp_name, self.database)
 


--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Wed Oct 19 13:17:13 2011 -0400
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Wed Oct 19 13:23:17 2011 -0400
@@ -495,12 +495,12 @@
             updated_halos.append(halo)
         
         # And here is where we bring it all together.
-        updated_halos = self._par_combine_object(updated_halos,
+        updated_halos = self.comm.par_combine_object(updated_halos,
                             datatype="list", op="cat")
         updated_halos.sort(key = lambda a:a['id'])
         self.all_halos = updated_halos
 
-        self.filtered_halos = self._par_combine_object(self.filtered_halos)
+        self.filtered_halos = self.comm.par_combine_object(self.filtered_halos)
                             datatype="list", op="cat")
         self.filtered_halos.sort(key = lambda a:a['id'])
 


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Wed Oct 19 13:17:13 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Wed Oct 19 13:23:17 2011 -0400
@@ -161,7 +161,7 @@
             ds = pf.h.periodic_region_strict([0.]*3, self.left_edge, 
                 self.right_edge)
             padded, self.LE, self.RE, self.ds = \
-            self._partition_hierarchy_3d(ds = ds, padding=0.,
+            self.comm.partition_hierarchy_3d(ds = ds, padding=0.,
                 rank_ratio = self.vol_ratio)
         else:
             self.left_edge = left_edge
@@ -169,10 +169,10 @@
             # We do this twice, first with no 'buffer' to get the unbuffered
             # self.LE/RE, and then second to get a buffered self.ds.
             padded, self.LE, self.RE, temp = \
-                self._partition_region_3d(left_edge, right_edge,
+                self.comm.partition_region_3d(left_edge, right_edge,
                     rank_ratio=self.vol_ratio)
             padded, temp, temp, self.ds = \
-                self._partition_region_3d(left_edge - self.lengths[-1], \
+                self.comm.partition_region_3d(left_edge - self.lengths[-1], \
                 right_edge + self.lengths[-1], rank_ratio=self.vol_ratio)
         mylog.info("LE %s RE %s %s" % (str(self.LE), str(self.RE), str(self.ds)))
         self.width = self.ds.right_edge - self.ds.left_edge
@@ -274,8 +274,8 @@
                 self._setup_recv_arrays()
                 self._send_arrays()
                 t0 = time.time()
-                self._mpi_Request_Waitall(self.send_hooks)
-                self._mpi_Request_Waitall(self.recv_hooks)
+                self.comm.mpi_Request_Waitall(self.send_hooks)
+                self.comm.mpi_Request_Waitall(self.recv_hooks)
                 t1 = time.time()
                 t_waiting += (t1-t0)
                 if (self.recv_points < -1.).any() or (self.recv_points > 1.).any(): # or \
@@ -364,7 +364,7 @@
         for task in xrange(self.size):
             if task == self.mine: continue
             self.recv_done[task] = na.zeros(1, dtype='int64')
-            self.done_hooks.append(self._mpi_nonblocking_recv(self.recv_done[task], \
+            self.done_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_done[task], \
                 task, tag=15))
     
     def _send_done_to_root(self):
@@ -377,7 +377,7 @@
             # I send when I *think* things should finish.
             self.send_done = na.ones(1, dtype='int64') * \
                 (self.size / self.vol_ratio -1) + self.comm_cycle_count
-            self.done_hooks.append(self._mpi_nonblocking_send(self.send_done, \
+            self.done_hooks.append(self.comm.mpi_nonblocking_send(self.send_done, \
                     0, tag=15))
         else:
             # As root, I need to mark myself!
@@ -391,7 +391,7 @@
         """
         if self.mine == 0:
             # If other tasks aren't finished, this will return False.
-            status = self._mpi_Request_Testall(self.done_hooks)
+            status = self.comm.mpi_Request_Testall(self.done_hooks)
             # Convolve this with with root's status.
             status = status * (self.generated_points == self.total_values)
             if status == 1:
@@ -403,7 +403,7 @@
             status = 0
         # Broadcast the status from root - we stop only if root thinks we should
         # stop.
-        status = self._mpi_bcast_pickled(status)
+        status = self.comm.mpi_bcast_pickled(status)
         if status == 0: return True
         if self.comm_cycle_count < status:
             return True
@@ -419,22 +419,22 @@
         self.recv_fields_vals = na.zeros((self.comm_size, len(self.fields)*2), \
             dtype='float64')
         self.recv_gen_array = na.zeros(self.size, dtype='int64')
-        self.recv_hooks.append(self._mpi_nonblocking_recv(self.recv_points, \
+        self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_points, \
             (self.mine-1)%self.size, tag=10))
-        self.recv_hooks.append(self._mpi_nonblocking_recv(self.recv_fields_vals, \
+        self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_fields_vals, \
             (self.mine-1)%self.size, tag=20))
-        self.recv_hooks.append(self._mpi_nonblocking_recv(self.recv_gen_array, \
+        self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_gen_array, \
             (self.mine-1)%self.size, tag=40))
 
     def _send_arrays(self):
         """
         Send the data arrays to the right-hand neighbor.
         """
-        self.send_hooks.append(self._mpi_nonblocking_send(self.points,\
+        self.send_hooks.append(self.comm.mpi_nonblocking_send(self.points,\
             (self.mine+1)%self.size, tag=10))
-        self.send_hooks.append(self._mpi_nonblocking_send(self.fields_vals,\
+        self.send_hooks.append(self.comm.mpi_nonblocking_send(self.fields_vals,\
             (self.mine+1)%self.size, tag=20))
-        self.send_hooks.append(self._mpi_nonblocking_send(self.gen_array, \
+        self.send_hooks.append(self.comm.mpi_nonblocking_send(self.gen_array, \
             (self.mine+1)%self.size, tag=40))
 
     def _allsum_bin_hits(self):
@@ -442,8 +442,8 @@
         Add up the hits to all the bins globally for all functions.
         """
         for fset in self._fsets:
-            fset.too_low = self._mpi_allreduce(fset.too_low, op='sum')
-            fset.too_high = self._mpi_allreduce(fset.too_high, op='sum')
+            fset.too_low = self.comm.mpi_allreduce(fset.too_low, op='sum')
+            fset.too_high = self.comm.mpi_allreduce(fset.too_high, op='sum')
             fset.binned = {}
             if self.mine == 0:
                 mylog.info("Function %s had values out of range for these fields:" % \
@@ -453,7 +453,7 @@
                     (field, fset.too_high[i], fset.too_low[i]))
             for length in self.lengths:
                 fset.length_bin_hits[length] = \
-                    self._mpi_allreduce(fset.length_bin_hits[length], op='sum')
+                    self.comm.mpi_allreduce(fset.length_bin_hits[length], op='sum')
                 # Find out how many were successfully binned.
                 fset.binned[length] = fset.length_bin_hits[length].sum()
                 # Normalize the counts.
@@ -622,7 +622,7 @@
         >>> tpf.write_out_means()
         """
         for fset in self._fsets:
-            fp = self._write_on_root(fn % fset.function.__name__)
+            fp = self.comm.write_on_root(fn % fset.function.__name__)
             fset._avg_bin_hits()
             line = "# length".ljust(sep)
             line += "count".ljust(sep)
@@ -690,7 +690,7 @@
         for fset in self._fsets:
             # Only operate on correlation functions.
             if fset.corr_norm == None: continue
-            fp = self._write_on_root("%s_correlation.txt" % fset.function.__name__)
+            fp = self.comm.write_on_root("%s_correlation.txt" % fset.function.__name__)
             line = "# length".ljust(sep)
             line += "\\xi".ljust(sep)
             fp.write(line + "\n")


--- a/yt/data_objects/data_containers.py	Wed Oct 19 13:17:13 2011 -0400
+++ b/yt/data_objects/data_containers.py	Wed Oct 19 13:23:17 2011 -0400
@@ -811,7 +811,7 @@
             self[field] = temp_data[field] 
         # We finalize
         if temp_data != {}:
-            temp_data = self._par_combine_object(temp_data,
+            temp_data = self.comm.par_combine_object(temp_data,
                     datatype='dict', op='cat')
         # And set, for the next group
         for field in temp_data.keys():
@@ -999,13 +999,13 @@
             points.append(self._generate_grid_coords(grid))
         if len(points) == 0:
             points = None
-            t = self._par_combine_object(None, datatype="array", op="cat")
+            t = self.comm.par_combine_object(None, datatype="array", op="cat")
         else:
             points = na.concatenate(points)
             # We have to transpose here so that _par_combine_object works
             # properly, as it and the alltoall assume the long axis is the last
             # one.
-            t = self._par_combine_object(points.transpose(),
+            t = self.comm.par_combine_object(points.transpose(),
                         datatype="array", op="cat")
         self['px'] = t[0,:]
         self['py'] = t[1,:]
@@ -1221,7 +1221,7 @@
             points.append(self._generate_grid_coords(grid))
         if len(points) == 0: points = None
         else: points = na.concatenate(points)
-        t = self._par_combine_object(points, datatype="array", op="cat")
+        t = self.comm.par_combine_object(points, datatype="array", op="cat")
         pos = (t[:,0:3] - self.center)
         self['px'] = na.dot(pos, self._x_vec)
         self['py'] = na.dot(pos, self._y_vec)
@@ -1441,7 +1441,7 @@
             self[field] = na.zeros(_size, dtype='float64')
             for grid in self._get_grids():
                 self._get_data_from_grid(grid, field)
-            self[field] = self._mpi_allreduce(\
+            self[field] = self.comm.mpi_allreduce(\
                 self[field], op='sum').reshape([self.dims]*2).transpose()
 
     def interpolate_discretize(self, *args, **kwargs):
@@ -1613,16 +1613,16 @@
         if self.preload_style == 'all':
             print "Preloading %s grids and getting %s" % (
                     len(self.source._get_grid_objs()),
-                    self._get_dependencies(fields))
-            self._preload([g for g in self._get_grid_objs()],
-                          self._get_dependencies(fields), self.hierarchy.io)
+                    self.comm.get_dependencies(fields))
+            self.comm.preload([g for g in self._get_grid_objs()],
+                          self.comm.get_dependencies(fields), self.hierarchy.io)
         # By changing the remove-from-tree method to accumulate, we can avoid
         # having to do this by level, and instead do it by CPU file
         for level in range(0, self._max_level+1):
             if self.preload_style == 'level':
-                self._preload([g for g in self._get_grid_objs()
+                self.comm.preload([g for g in self._get_grid_objs()
                                  if g.Level == level],
-                              self._get_dependencies(fields), self.hierarchy.io)
+                              self.comm.get_dependencies(fields), self.hierarchy.io)
             self._add_level_to_tree(tree, level, fields)
             mylog.debug("End of projecting level level %s, memory usage %0.3e", 
                         level, get_memory_usage()/1024.)
@@ -2002,13 +2002,13 @@
         # _project_level, then it would be more memory conservative
         if self.preload_style == 'all':
             print "Preloading %s grids and getting %s" % (
-                    len(self.source._grids), self._get_dependencies(fields))
-            self._preload(self.source._grids,
-                          self._get_dependencies(fields), self.hierarchy.io)
+                    len(self.source._grids), self.comm.get_dependencies(fields))
+            self.comm.preload(self.source._grids,
+                          self.comm.get_dependencies(fields), self.hierarchy.io)
         for level in range(0, self._max_level+1):
             if self.preload_style == 'level':
-                self._preload(self.source.select_grids(level),
-                              self._get_dependencies(fields), self.hierarchy.io)
+                self.comm.preload(self.source.select_grids(level),
+                              self.comm.get_dependencies(fields), self.hierarchy.io)
             self.__calculate_overlap(level)
             my_coords, my_pdx, my_pdy, my_fields = \
                 self.__project_level(level, fields)
@@ -2044,7 +2044,7 @@
         data['pdy'] *= 0.5
         data['fields'] = field_data
         # Now we run the finalizer, which is ignored if we don't need it
-        data = self._par_combine_object(temp_data, datatype='dict', op='cat')
+        data = self.comm.par_combine_object(temp_data, datatype='dict', op='cat')
         field_data = na.vsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             self[field] = field_data[fi].ravel()
@@ -2230,7 +2230,7 @@
             self._get_data_from_grid(grid, fields_to_get, dls)
         mylog.info("IO completed; summing")
         for field in fields_to_get:
-            self[field] = self._mpi_allreduce(self[field], op='sum')
+            self[field] = self.comm.mpi_allreduce(self[field], op='sum')
             conv = self.pf.units[self.pf.field_info[field].projection_conversion]
             self[field] *= conv
 


--- a/yt/data_objects/derived_quantities.py	Wed Oct 19 13:17:13 2011 -0400
+++ b/yt/data_objects/derived_quantities.py	Wed Oct 19 13:23:17 2011 -0400
@@ -86,7 +86,7 @@
             e.NumberOfParticles = 1
             self.func(e, *args, **kwargs)
             mylog.debug("Preloading %s", e.requested)
-            self._preload([g for g in self._get_grid_objs()], e.requested,
+            self.comm.preload([g for g in self._get_grid_objs()], e.requested,
                           self._data_source.pf.h.io)
         if lazy_reader and not self.force_unlazy:
             return self._call_func_lazy(args, kwargs)
@@ -110,7 +110,7 @@
         rv = []
         for my_list in self.retvals:
             data = na.array(my_list).transpose()
-            rv.append(self._par_combine_object(data,
+            rv.append(self.comm.par_combine_object(data,
                         datatype="array", op="cat").transpose())
         self.retvals = rv
         


--- a/yt/data_objects/hierarchy.py	Wed Oct 19 13:17:13 2011 -0400
+++ b/yt/data_objects/hierarchy.py	Wed Oct 19 13:23:17 2011 -0400
@@ -178,7 +178,7 @@
             writeable = os.access(fn, os.W_OK)
         writeable = writeable and not ytcfg.getboolean('yt','onlydeserialize')
         # We now have our conditional stuff
-        self._barrier()
+        self.comm.barrier()
         if not writeable and not exists: return
         if writeable:
             try:


--- a/yt/data_objects/profiles.py	Wed Oct 19 13:17:13 2011 -0400
+++ b/yt/data_objects/profiles.py	Wed Oct 19 13:23:17 2011 -0400
@@ -81,7 +81,7 @@
 
     def _initialize_parallel(self, fields):
         g_objs = [g for g in self._get_grid_objs()]
-        self._preload(g_objs, self._get_dependencies(fields),
+        self.comm.preload(g_objs, self.comm.get_dependencies(fields),
                       self._data_source.hierarchy.io)
 
     def _lazy_add_fields(self, fields, weight, accumulation):
@@ -121,10 +121,10 @@
 
     def _finalize_parallel(self):
         for key in self.__data:
-            self.__data[key] = self._mpi_allreduce(self.__data[key], op='sum')
+            self.__data[key] = self.comm.mpi_allreduce(self.__data[key], op='sum')
         for key in self.__weight_data:
-            self.__weight_data[key] = self._mpi_allreduce(self.__weight_data[key], op='sum')
-        self.__used = self._mpi_allreduce(self.__used, op='sum')
+            self.__weight_data[key] = self.comm.mpi_allreduce(self.__weight_data[key], op='sum')
+        self.__used = self.comm.mpi_allreduce(self.__used, op='sum')
 
     def _unlazy_add_fields(self, fields, weight, accumulation):
         for field in fields:


--- a/yt/frontends/enzo/data_structures.py	Wed Oct 19 13:17:13 2011 -0400
+++ b/yt/frontends/enzo/data_structures.py	Wed Oct 19 13:23:17 2011 -0400
@@ -401,7 +401,7 @@
                     field_list = field_list.union(gf)
         else:
             field_list = None
-        field_list = self._mpi_bcast_pickled(field_list)
+        field_list = self.comm.mpi_bcast_pickled(field_list)
         self.save_data(list(field_list),"/","DataFields",passthrough=True)
         self.field_list = list(field_list)
 


--- a/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 13:17:13 2011 -0400
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 13:23:17 2011 -0400
@@ -35,8 +35,6 @@
 from yt.config import ytcfg
 from time import time
 import h5py
-my_rank = ytcfg.getint("yt", "__parallel_rank")
-nprocs = ytcfg.getint("yt", "__parallel_size")
 
 def corner_bounds(split_dim, split, current_left = None, current_right = None):
     r"""
@@ -293,7 +291,7 @@
         self.pf = pf
         self.sdx = self.pf.h.get_smallest_dx()
         self._id_offset = pf.h.grids[0]._id_offset
-        if nprocs > len(pf.h.grids):
+        if self.comm.size > len(pf.h.grids):
             mylog.info('Parallel rendering requires that the number of \n \
             grids in the dataset is greater or equal to the number of \n \
             processors.  Reduce number of processors.')
@@ -379,7 +377,7 @@
 
         # If the full amr kD-tree is requested, merge the results from
         # the parallel build.
-        if merge_trees and nprocs > 1:
+        if merge_trees and self.comm.size > 1:
             self.join_parallel_trees()            
             self.my_l_corner = self.domain_left_edge
             self.my_r_corner = self.domain_right_edge
@@ -752,11 +750,11 @@
         self.rebuild_references()
                 
     def trim_references(self):
-        par_tree_depth = long(na.log2(nprocs))
-        for i in range(2**nprocs):
+        par_tree_depth = long(na.log2(self.comm.size))
+        for i in range(2**self.comm.size):
             if ((i + 1)>>par_tree_depth) == 1:
-                # There are nprocs nodes that meet this criteria
-                if (i+1-nprocs) != my_rank:
+                # There are self.comm.size nodes that meet this criteria
+                if (i+1-self.comm.size) != self.comm.rank:
                     self.tree_dict.pop(i)
                     continue
         for node in self.tree_dict.itervalues():
@@ -770,7 +768,7 @@
         if self.tree_dict[0].split_pos is None:
             self.tree_dict.pop(0)
     def merge_trees(self):
-        self.tree_dict = self._par_combine_object(self.tree_dict,
+        self.tree_dict = self.comm.par_combine_object(self.tree_dict,
                             datatype = "dict", op = "join")
 
     def rebuild_references(self):
@@ -989,9 +987,9 @@
         current_node.grids = grids
         current_node.l_corner = l_corner
         current_node.r_corner = r_corner
-        # current_node.owner = my_rank
+        # current_node.owner = self.comm.rank
         current_node.id = 0
-        par_tree_depth = int(na.log2(nprocs))
+        par_tree_depth = int(na.log2(self.comm.size))
         anprocs = 2**par_tree_depth
         while current_node is not None:
             # If we don't have any grids, that means we are revisiting
@@ -1004,7 +1002,7 @@
             # This is where all the domain decomposition occurs.  
             if ((current_node.id + 1)>>par_tree_depth) == 1:
                 # There are anprocs nodes that meet this criteria
-                if (current_node.id+1-anprocs) == my_rank:
+                if (current_node.id+1-anprocs) == self.comm.rank:
                     # I own this shared node
                     self.my_l_corner = current_node.l_corner
                     self.my_r_corner = current_node.r_corner
@@ -1138,17 +1136,17 @@
                     yield node.brick
          
         self.reduce_tree_images(self.tree, front_center)
-        self._barrier()
+        self.comm.barrier()
         
     def reduce_tree_images(self, tree, viewpoint, image=None):
         if image is not None:
             self.image = image
-        rounds = int(na.log2(nprocs))
+        rounds = int(na.log2(self.comm.size))
         anprocs = 2**rounds
         my_node = tree
         my_node_id = 0
         my_node.owner = 0
-        path = na.binary_repr(anprocs+my_rank)
+        path = na.binary_repr(anprocs+self.comm.rank)
         for i in range(rounds):
             try:
                 my_node.left_child.owner = my_node.owner
@@ -1162,7 +1160,7 @@
             except:
                 rounds = i-1
         for thisround in range(rounds,0,-1):
-            #print my_rank, 'my node', my_node_id
+            #print self.comm.rank, 'my node', my_node_id
             parent = my_node.parent
             #print parent['split_ax'], parent['split_pos']
             if viewpoint[parent.split_ax] <= parent.split_pos:
@@ -1175,9 +1173,9 @@
             # mylog.debug('front owner %i back owner %i parent owner %i'%( front.owner, back.owner, parent.owner))
                 
             # Send the images around
-            if front.owner == my_rank:
+            if front.owner == self.comm.rank:
                 if front.owner == parent.owner:
-                    mylog.debug( '%04i receiving image from %04i'%(my_rank,back.owner))
+                    mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
                     arr2 = self.comm.recv_array(back.owner, tag=back.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
                     for i in range(3):
@@ -1191,17 +1189,16 @@
                         self.image[:,:,i  ] = self.image[:,:,i  ] + ta * arr2[:,:,i  ]
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
-                    mylog.debug('%04i sending my image to %04i'%(my_rank,back.owner))
-                    self.comm.send_array(self.image.ravel(), back.owner, tag=my_rank)
-
+                    mylog.debug('%04i sending my image to %04i'%(self.comm.rank,back.owner))
+                    self.comm.send_array(self.image.ravel(), back.owner, tag=self.comm.rank)
                 
-            if back.owner == my_rank:
+            if back.owner == self.comm.rank:
                 if front.owner == parent.owner:
-                    mylog.debug('%04i sending my image to %04i'%(my_rank, front.owner))
-                    self.comm.send_array(self.image.ravel(), front.owner, tag=my_rank)
+                    mylog.debug('%04i sending my image to %04i'%(self.comm.rank, front.owner))
+                    self.comm.send_array(self.image.ravel(), front.owner, tag=self.comm.rank)
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
-                    mylog.debug('%04i receiving image from %04i'%(my_rank,front.owner))
+                    mylog.debug('%04i receiving image from %04i'%(self.comm.rank,front.owner))
                     arr2 = self.comm.recv_array(front.owner, tag=front.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
                     for i in range(3):
@@ -1216,7 +1213,7 @@
                         # image[:,:,i+3] = arr2[:,:,i+3] + ta * image[:,:,i+3]
             # Set parent owner to back owner
             # my_node = (my_node-1)>>1
-            if my_rank == my_node.parent.owner: 
+            if self.comm.rank == my_node.parent.owner: 
                 my_node = my_node.parent
             else:
                 break
@@ -1224,8 +1221,8 @@
     def store_kd_bricks(self, fn=None):
         if fn is None:
             fn = '%s_kd_bricks.h5'%self.pf
-        if my_rank != 0:
-            self.comm.recv_array(my_rank-1, tag=my_rank-1)
+        if self.comm.rank != 0:
+            self.comm.recv_array(self.comm.rank-1, tag=self.comm.rank-1)
         f = h5py.File(fn,"a")
         for node in self.depth_traverse():
             i = node.id
@@ -1237,14 +1234,14 @@
                     except:
                         pass
         f.close()
-        if my_rank != (nprocs-1):
-            self.comm.send_array([0],my_rank+1, tag=my_rank)
+        if self.comm.rank != (nprocs-1):
+            self.comm.send_array([0],self.comm.rank+1, tag=self.comm.rank)
         
     def load_kd_bricks(self,fn=None):
         if fn is None:
             fn = '%s_kd_bricks.h5' % self.pf
-        if my_rank != 0:
-            self.comm.recv_array(my_rank-1, tag=my_rank-1)
+        if self.comm.rank != 0:
+            self.comm.recv_array(self.comm.rank-1, tag=self.comm.rank-1)
         try:
             f = h5py.File(fn,"r")
             for node in self.depth_traverse():
@@ -1267,8 +1264,8 @@
             f.close()
         except:
             pass
-        if my_rank != (nprocs-1):
-            self.comm.send_array([0],my_rank+1, tag=my_rank)
+        if self.comm.rank != (nprocs-1):
+            self.comm.send_array([0],self.comm.rank+1, tag=self.comm.rank)
 
     def load_tree(self,fn):
         raise NotImplementedError()


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:17:13 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:23:17 2011 -0400
@@ -348,7 +348,7 @@
         reg = self.hierarchy.region_strict(self.center, LE, RE)
         return True, reg
 
-    def _partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
+    def partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
         LE, RE = na.array(ds.left_edge), na.array(ds.right_edge)
         # We need to establish if we're looking at a subvolume, in which case
         # we *do* want to pad things.
@@ -394,7 +394,7 @@
 
         return False, LE, RE, self.hierarchy.region_strict(self.center, LE, RE)
 
-    def _partition_region_3d(self, left_edge, right_edge, padding=0.0,
+    def partition_region_3d(self, left_edge, right_edge, padding=0.0,
             rank_ratio = 1):
         """
         Given a region, it subdivides it into smaller regions for parallel
@@ -421,7 +421,7 @@
 
         return False, LE, RE, self.hierarchy.region(self.center, LE, RE)
 
-    def _partition_hierarchy_3d_bisection_list(self):
+    def partition_hierarchy_3d_bisection_list(self):
         """
         Returns an array that is used to drive _partition_hierarchy_3d_bisection,
         below.
@@ -464,19 +464,19 @@
                 nextdim = (nextdim + 1) % 3
         return cuts
 
-    def _barrier(self):
+    def barrier(self):
         if not self._distributed: return
         mylog.debug("Opening MPI Barrier on %s", self.comm.rank)
         self.comm.Barrier()
 
-    def _mpi_exit_test(self, data=False):
+    def mpi_exit_test(self, data=False):
         # data==True -> exit. data==False -> no exit
-        mine, statuses = self._mpi_info_dict(data)
+        mine, statuses = self.comm.mpi_info_dict(data)
         if True in statuses.values():
             raise RuntimeError("Fatal error. Exiting.")
         return None
 
-    def _mpi_maxdict_dict(self, data):
+    def mpi_maxdict_dict(self, data):
         """
         Similar to above, but finds maximums for dicts of dicts. This is
         specificaly for a part of chainHOP.
@@ -494,7 +494,7 @@
             bot_keys = na.array(bot_keys, dtype='int64')
             vals = na.array(vals, dtype='float64')
             return (top_keys, bot_keys, vals)
-        self._barrier()
+        self.comm.barrier()
         size = 0
         top_keys = []
         bot_keys = []
@@ -544,7 +544,7 @@
         return (top_keys, bot_keys, vals)
 
     @parallel_passthrough
-    def _par_combine_object(self, data, op, datatype = None):
+    def par_combine_object(self, data, op, datatype = None):
         # op can be chosen from:
         #   cat
         #   join
@@ -626,15 +626,11 @@
         raise NotImplementedError
 
     @parallel_passthrough
-    def _mpi_bcast_pickled(self, data):
+    def mpi_bcast_pickled(self, data):
         data = self.comm.bcast(data, root=0)
         return data
 
-    def _should_i_write(self):
-        if not self._distributed: return True
-        return (self.comm == 0)
-
-    def _preload(self, grids, fields, io_handler):
+    def preload(self, grids, fields, io_handler):
         # This will preload if it detects we are parallel capable and
         # if so, we load *everything* that we need.  Use with some care.
         mylog.debug("Preloading %s from %s grids", fields, len(grids))
@@ -642,7 +638,7 @@
         io_handler.preload(grids, fields)
 
     @parallel_passthrough
-    def _mpi_allreduce(self, data, dtype=None, op='sum'):
+    def mpi_allreduce(self, data, dtype=None, op='sum'):
         op = op_names[op]
         if isinstance(data, na.ndarray) and data.dtype != na.bool:
             if dtype is None:
@@ -662,28 +658,28 @@
     # Non-blocking stuff.
     ###
 
-    def _mpi_nonblocking_recv(self, data, source, tag=0, dtype=None):
+    def mpi_nonblocking_recv(self, data, source, tag=0, dtype=None):
         if not self._distributed: return -1
         if dtype is None: dtype = data.dtype
         mpi_type = get_mpi_type(dtype)
         return self.comm.Irecv([data, mpi_type], source, tag)
 
-    def _mpi_nonblocking_send(self, data, dest, tag=0, dtype=None):
+    def mpi_nonblocking_send(self, data, dest, tag=0, dtype=None):
         if not self._distributed: return -1
         if dtype is None: dtype = data.dtype
         mpi_type = get_mpi_type(dtype)
         return self.comm.Isend([data, mpi_type], dest, tag)
 
-    def _mpi_Request_Waitall(self, hooks):
+    def mpi_Request_Waitall(self, hooks):
         if not self._distributed: return
         MPI.Request.Waitall(hooks)
 
-    def _mpi_Request_Waititer(self, hooks):
+    def mpi_Request_Waititer(self, hooks):
         for i in xrange(len(hooks)):
             req = MPI.Request.Waitany(hooks)
             yield req
 
-    def _mpi_Request_Testall(self, hooks):
+    def mpi_Request_Testall(self, hooks):
         """
         This returns False if any of the request hooks are un-finished,
         and True if they are all finished.
@@ -709,9 +705,9 @@
         if not self._distributed: return 0
         return self.comm.rank
 
-    def _mpi_info_dict(self, info):
+    def mpi_info_dict(self, info):
         if not self._distributed: return 0, {0:info}
-        self._barrier()
+        self.comm.barrier()
         data = None
         if self.comm.rank == 0:
             data = {0:info}
@@ -721,27 +717,27 @@
             self.comm.send(info, dest=0, tag=0)
         mylog.debug("Opening MPI Broadcast on %s", self.comm.rank)
         data = self.comm.bcast(data, root=0)
-        self._barrier()
+        self.comm.barrier()
         return self.comm.rank, data
 
-    def _get_dependencies(self, fields):
+    def get_dependencies(self, fields):
         deps = []
         fi = self.pf.field_info
         for field in fields:
             deps += ensure_list(fi[field].get_dependencies(pf=self.pf).requested)
         return list(set(deps))
 
-    def _claim_object(self, obj):
+    def claim_object(self, obj):
         if not self._distributed: return
         obj._owner = self.comm.rank
         obj._distributed = True
 
-    def _do_not_claim_object(self, obj):
+    def do_not_claim_object(self, obj):
         if not self._distributed: return
         obj._owner = -1
         obj._distributed = True
 
-    def _write_on_root(self, fn):
+    def write_on_root(self, fn):
         if not self._distributed: return open(fn, "w")
         if self.comm.rank == 0:
             return open(fn, "w")


--- a/yt/visualization/streamlines.py	Wed Oct 19 13:17:13 2011 -0400
+++ b/yt/visualization/streamlines.py	Wed Oct 19 13:23:17 2011 -0400
@@ -145,8 +145,8 @@
        
     @parallel_passthrough
     def _finalize_parallel(self,data):
-        self.streamlines = self._mpi_allreduce(self.streamlines, op='sum')
-        self.magnitudes = self._mpi_allreduce(self.magnitudes, op='sum')
+        self.streamlines = self.comm.mpi_allreduce(self.streamlines, op='sum')
+        self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
         
     def _integrate_through_brick(self, node, stream, step,
                                  periodic=False, mag=None):


--- a/yt/visualization/volume_rendering/grid_partitioner.py	Wed Oct 19 13:17:13 2011 -0400
+++ b/yt/visualization/volume_rendering/grid_partitioner.py	Wed Oct 19 13:23:17 2011 -0400
@@ -100,7 +100,7 @@
                                       " not yet supported")
         if self.bricks is not None and source is None: return
         bricks = []
-        self._preload(self.source._grids, self.fields, self.pf.h.io)
+        self.comm.preload(self.source._grids, self.fields, self.pf.h.io)
         pbar = get_pbar("Partitioning ", len(self.source._grids))
         for i, g in enumerate(self.source._grids):
             pbar.update(i)


http://bitbucket.org/yt_analysis/yt/changeset/247b3ab4f33d/
changeset:   247b3ab4f33d
branch:      yt
user:        MatthewTurk
date:        2011-10-19 19:34:04
summary:     Merging.
affected #:  3 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 13:33:46 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 13:34:04 2011 -0400
@@ -1240,11 +1240,11 @@
             if group.tasks is not None:
                 fn = ""
                 for task in group.tasks:
-                    fn += "%s.h5 " % self._get_filename(prefix, rank=task)
+                    fn += "%s.h5 " % self.comm.get_filename(prefix, rank=task)
             elif self._distributed:
-                fn = "%s.h5" % self._get_filename(prefix, rank=group._owner)
+                fn = "%s.h5" % self.comm.get_filename(prefix, rank=group._owner)
             else:
-                fn = "%s.h5" % self._get_filename(prefix)
+                fn = "%s.h5" % self.comm.get_filename(prefix)
             gn = "Halo%08i" % (group.id)
             f.write("%s %s\n" % (gn, fn))
             f.flush()
@@ -1745,10 +1745,10 @@
         --------
         >>> halos.write_particle_lists("halo-parts")
         """
-        fn = "%s.h5" % self._get_filename(prefix)
+        fn = "%s.h5" % self.comm.get_filename(prefix)
         f = h5py.File(fn, "w")
         for halo in self._groups:
-            if not self._is_mine(halo): continue
+            if not self.comm.is_mine(halo): continue
             halo.write_particle_list(f)
 
     def dump(self, basename="HopAnalysis"):


--- a/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 13:33:46 2011 -0400
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 13:34:04 2011 -0400
@@ -1176,7 +1176,7 @@
             if front.owner == self.comm.rank:
                 if front.owner == parent.owner:
                     mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
-                    arr2 = self.comm._recv_array(back.owner, tag=back.owner).reshape(
+                    arr2 = self.comm.recv_array(back.owner, tag=back.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1190,17 +1190,16 @@
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
                     mylog.debug('%04i sending my image to %04i'%(self.comm.rank,back.owner))
-                    self.comm._send_array(self.image.ravel(), back.owner, tag=self.comm.rank)
-
+                    self.comm.send_array(self.image.ravel(), back.owner, tag=self.comm.rank)
                 
             if back.owner == self.comm.rank:
                 if front.owner == parent.owner:
                     mylog.debug('%04i sending my image to %04i'%(self.comm.rank, front.owner))
-                    self.comm._send_array(self.image.ravel(), front.owner, tag=self.comm.rank)
+                    self.comm.send_array(self.image.ravel(), front.owner, tag=self.comm.rank)
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
                     mylog.debug('%04i receiving image from %04i'%(self.comm.rank,front.owner))
-                    arr2 = self.comm._recv_array(front.owner, tag=front.owner).reshape(
+                    arr2 = self.comm.recv_array(front.owner, tag=front.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1223,7 +1222,7 @@
         if fn is None:
             fn = '%s_kd_bricks.h5'%self.pf
         if self.comm.rank != 0:
-            self.comm._recv_array(self.comm.rank-1, tag=self.comm.rank-1)
+            self.comm.recv_array(self.comm.rank-1, tag=self.comm.rank-1)
         f = h5py.File(fn,"a")
         for node in self.depth_traverse():
             i = node.id
@@ -1235,14 +1234,14 @@
                     except:
                         pass
         f.close()
-        if self.comm.rank != (self.comm.size-1):
-            self.comm._send_array([0],self.comm.rank+1, tag=self.comm.rank)
+        if self.comm.rank != (nprocs-1):
+            self.comm.send_array([0],self.comm.rank+1, tag=self.comm.rank)
         
     def load_kd_bricks(self,fn=None):
         if fn is None:
             fn = '%s_kd_bricks.h5' % self.pf
         if self.comm.rank != 0:
-            self.comm._recv_array(self.comm.rank-1, tag=self.comm.rank-1)
+            self.comm.recv_array(self.comm.rank-1, tag=self.comm.rank-1)
         try:
             f = h5py.File(fn,"r")
             for node in self.depth_traverse():
@@ -1265,8 +1264,8 @@
             f.close()
         except:
             pass
-        if self.comm.rank != (self.comm.size-1):
-            self.comm._send_array([0],self.comm.rank+1, tag=self.comm.rank)
+        if self.comm.rank != (nprocs-1):
+            self.comm.send_array([0],self.comm.rank+1, tag=self.comm.rank)
 
     def load_tree(self,fn):
         raise NotImplementedError()


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:33:46 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:34:04 2011 -0400
@@ -584,7 +584,7 @@
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
             for key in field_keys:
                 dd = data[key]
-                rv = self._alltoallv_array(dd, arr_size, offsets, sizes)
+                rv = self.alltoallv_array(dd, arr_size, offsets, sizes)
                 data[key] = rv
             return data
         elif datatype == "array" and op == "cat":
@@ -613,7 +613,7 @@
             # concatenation.
             offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
-            data = self._alltoallv_array(data, arr_size, offsets, sizes)
+            data = self.alltoallv_array(data, arr_size, offsets, sizes)
             return data
         elif datatype == "list" and op == "cat":
             if self.comm.rank == 0:
@@ -744,18 +744,18 @@
         else:
             return cStringIO.StringIO()
 
-    def _get_filename(self, prefix, rank=None):
+    def get_filename(self, prefix, rank=None):
         if not self._distributed: return prefix
         if rank == None:
             return "%s_%04i" % (prefix, self.comm.rank)
         else:
             return "%s_%04i" % (prefix, rank)
 
-    def _is_mine(self, obj):
+    def is_mine(self, obj):
         if not obj._distributed: return True
         return (obj._owner == self.comm.rank)
 
-    def _send_quadtree(self, target, buf, tgd, args):
+    def send_quadtree(self, target, buf, tgd, args):
         sizebuf = na.zeros(1, 'int64')
         sizebuf[0] = buf[0].size
         self.comm.Send([sizebuf, MPI.LONG], dest=target)
@@ -763,7 +763,7 @@
         self.comm.Send([buf[1], MPI.DOUBLE], dest=target)
         self.comm.Send([buf[2], MPI.DOUBLE], dest=target)
         
-    def _recv_quadtree(self, target, tgd, args):
+    def recv_quadtree(self, target, tgd, args):
         sizebuf = na.zeros(1, 'int64')
         self.comm.Recv(sizebuf, source=target)
         buf = [na.empty((sizebuf[0],), 'int32'),
@@ -792,18 +792,18 @@
                 target = (rank & ~mask) % size
                 #print "SENDING FROM %02i to %02i" % (rank, target)
                 buf = qt.tobuffer()
-                self._send_quadtree(target, buf, tgd, args)
-                #qt = self._recv_quadtree(target, tgd, args)
+                self.send_quadtree(target, buf, tgd, args)
+                #qt = self.recv_quadtree(target, tgd, args)
             else:
                 target = (rank | mask)
                 if target < size:
                     #print "RECEIVING FROM %02i on %02i" % (target, rank)
-                    buf = self._recv_quadtree(target, tgd, args)
+                    buf = self.recv_quadtree(target, tgd, args)
                     qto = QuadTree(tgd, args[2])
                     qto.frombuffer(*buf)
                     merge_quadtrees(qt, qto)
                     del qto
-                    #self._send_quadtree(target, qt, tgd, args)
+                    #self.send_quadtree(target, qt, tgd, args)
             mask <<= 1
 
         if rank == 0:
@@ -824,7 +824,7 @@
         return qt
 
 
-    def _send_array(self, arr, dest, tag = 0):
+    def send_array(self, arr, dest, tag = 0):
         if not isinstance(arr, na.ndarray):
             self.comm.send((None,None), dest=dest, tag=tag)
             self.comm.send(arr, dest=dest, tag=tag)
@@ -835,7 +835,7 @@
         self.comm.Send([arr, MPI.CHAR], dest=dest, tag=tag)
         del tmp
 
-    def _recv_array(self, source, tag = 0):
+    def recv_array(self, source, tag = 0):
         dt, ne = self.comm.recv(source=source, tag=tag)
         if dt is None and ne is None:
             return self.comm.recv(source=source, tag=tag)
@@ -844,12 +844,12 @@
         self.comm.Recv([tmp, MPI.CHAR], source=source, tag=tag)
         return arr
 
-    def _alltoallv_array(self, send, total_size, offsets, sizes):
+    def alltoallv_array(self, send, total_size, offsets, sizes):
         if len(send.shape) > 1:
             recv = []
             for i in range(send.shape[0]):
-                recv.append(self._alltoallv_array(send[i,:].copy(), 
-                                                  total_size, offsets, sizes))
+                recv.append(self.alltoallv_array(send[i,:].copy(), 
+                                                 total_size, offsets, sizes))
             recv = na.array(recv)
             return recv
         offset = offsets[self.comm.rank]


http://bitbucket.org/yt_analysis/yt/changeset/3af8a909fb7d/
changeset:   3af8a909fb7d
branch:      yt
user:        samskillman
date:        2011-10-19 19:36:04
summary:     Moving get_dependencies to PAI.
affected #:  4 files (-1 bytes)

--- a/yt/data_objects/data_containers.py	Wed Oct 19 13:23:17 2011 -0400
+++ b/yt/data_objects/data_containers.py	Wed Oct 19 13:36:04 2011 -0400
@@ -1613,16 +1613,16 @@
         if self.preload_style == 'all':
             print "Preloading %s grids and getting %s" % (
                     len(self.source._get_grid_objs()),
-                    self.comm.get_dependencies(fields))
+                    self.get_dependencies(fields))
             self.comm.preload([g for g in self._get_grid_objs()],
-                          self.comm.get_dependencies(fields), self.hierarchy.io)
+                          self.get_dependencies(fields), self.hierarchy.io)
         # By changing the remove-from-tree method to accumulate, we can avoid
         # having to do this by level, and instead do it by CPU file
         for level in range(0, self._max_level+1):
             if self.preload_style == 'level':
                 self.comm.preload([g for g in self._get_grid_objs()
                                  if g.Level == level],
-                              self.comm.get_dependencies(fields), self.hierarchy.io)
+                              self.get_dependencies(fields), self.hierarchy.io)
             self._add_level_to_tree(tree, level, fields)
             mylog.debug("End of projecting level level %s, memory usage %0.3e", 
                         level, get_memory_usage()/1024.)
@@ -2002,13 +2002,13 @@
         # _project_level, then it would be more memory conservative
         if self.preload_style == 'all':
             print "Preloading %s grids and getting %s" % (
-                    len(self.source._grids), self.comm.get_dependencies(fields))
+                    len(self.source._grids), self.get_dependencies(fields))
             self.comm.preload(self.source._grids,
-                          self.comm.get_dependencies(fields), self.hierarchy.io)
+                          self.get_dependencies(fields), self.hierarchy.io)
         for level in range(0, self._max_level+1):
             if self.preload_style == 'level':
                 self.comm.preload(self.source.select_grids(level),
-                              self.comm.get_dependencies(fields), self.hierarchy.io)
+                              self.get_dependencies(fields), self.hierarchy.io)
             self.__calculate_overlap(level)
             my_coords, my_pdx, my_pdy, my_fields = \
                 self.__project_level(level, fields)


--- a/yt/data_objects/profiles.py	Wed Oct 19 13:23:17 2011 -0400
+++ b/yt/data_objects/profiles.py	Wed Oct 19 13:36:04 2011 -0400
@@ -81,7 +81,7 @@
 
     def _initialize_parallel(self, fields):
         g_objs = [g for g in self._get_grid_objs()]
-        self.comm.preload(g_objs, self.comm.get_dependencies(fields),
+        self.comm.preload(g_objs, self.get_dependencies(fields),
                       self._data_source.hierarchy.io)
 
     def _lazy_add_fields(self, fields, weight, accumulation):


--- a/yt/frontends/enzo/data_structures.py	Wed Oct 19 13:23:17 2011 -0400
+++ b/yt/frontends/enzo/data_structures.py	Wed Oct 19 13:36:04 2011 -0400
@@ -384,7 +384,7 @@
     def _detect_fields(self):
         self.field_list = []
         # Do this only on the root processor to save disk work.
-        if self.comm.rank == 0 or self._par_rank == None:
+        if self.comm.rank == 0 or self.comm.rank == None:
             field_list = self.get_data("/", "DataFields")
             if field_list is None:
                 mylog.info("Gathering a field list (this may take a moment.)")


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:23:17 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:36:04 2011 -0400
@@ -115,8 +115,6 @@
     for dt, val in dtype_names.items():
         if dt == dtype: return val
 
-__tocast = 'c'
-
 class ObjectIterator(object):
     """
     This is a generalized class that accepts a list of objects and then
@@ -316,6 +314,8 @@
     comm = None
     _grids = None
     _distributed = None
+    __tocast = 'c'
+
     def __init__(self, comm=MPI.COMM_WORLD):
         self.comm = comm
         self._distributed = self.comm.size > 1
@@ -720,13 +720,6 @@
         self.comm.barrier()
         return self.comm.rank, data
 
-    def get_dependencies(self, fields):
-        deps = []
-        fi = self.pf.field_info
-        for field in fields:
-            deps += ensure_list(fi[field].get_dependencies(pf=self.pf).requested)
-        return list(set(deps))
-
     def claim_object(self, obj):
         if not self._distributed: return
         obj._owner = self.comm.rank
@@ -829,7 +822,7 @@
             self.comm.send((None,None), dest=dest, tag=tag)
             self.comm.send(arr, dest=dest, tag=tag)
             return
-        tmp = arr.view(__tocast) # Cast to CHAR
+        tmp = arr.view(self.__tocast) # Cast to CHAR
         # communicate type and shape
         self.comm.send((arr.dtype.str, arr.shape), dest=dest, tag=tag)
         self.comm.Send([arr, MPI.CHAR], dest=dest, tag=tag)
@@ -840,7 +833,7 @@
         if dt is None and ne is None:
             return self.comm.recv(source=source, tag=tag)
         arr = na.empty(ne, dtype=dt)
-        tmp = arr.view(__tocast)
+        tmp = arr.view(self.__tocast)
         self.comm.Recv([tmp, MPI.CHAR], source=source, tag=tag)
         return arr
 
@@ -853,13 +846,13 @@
             recv = na.array(recv)
             return recv
         offset = offsets[self.comm.rank]
-        tmp_send = send.view(__tocast)
+        tmp_send = send.view(self.__tocast)
         recv = na.empty(total_size, dtype=send.dtype)
         recv[offset:offset+send.size] = send[:]
         dtr = send.dtype.itemsize / tmp_send.dtype.itemsize # > 1
         roff = [off * dtr for off in offsets]
         rsize = [siz * dtr for siz in sizes]
-        tmp_recv = recv.view(__tocast)
+        tmp_recv = recv.view(self.__tocast)
         self.comm.Allgatherv((tmp_send, tmp_send.size, MPI.CHAR),
                                   (tmp_recv, (rsize, roff), MPI.CHAR))
         return recv
@@ -897,6 +890,13 @@
             return ParallelObjectIterator(self, True, attr='_grids')
         return ObjectIterator(self, True, attr='_grids')
 
+    def get_dependencies(self, fields):
+        deps = []
+        fi = self.pf.field_info
+        for field in fields:
+            deps += ensure_list(fi[field].get_dependencies(pf=self.pf).requested)
+        return list(set(deps))
+
     def _initialize_parallel(self):
         pass
 


http://bitbucket.org/yt_analysis/yt/changeset/89135540f799/
changeset:   89135540f799
branch:      yt
user:        samskillman
date:        2011-10-19 19:36:12
summary:     Merging.
affected #:  8 files (-1 bytes)

--- a/yt/analysis_modules/light_cone/halo_mask.py	Wed Oct 19 13:36:04 2011 -0400
+++ b/yt/analysis_modules/light_cone/halo_mask.py	Wed Oct 19 13:36:12 2011 -0400
@@ -45,14 +45,14 @@
         light_cone_mask.append(_make_slice_mask(slice, halo_list, pixels))
 
     # Write out cube of masks from each slice.
-    if cube_file is not None and ytcfg.getint("yt", "__parallel_rank") == 0:
+    if cube_file is not None and ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
         mylog.info("Saving halo mask cube to %s." % cube_file)
         output = h5py.File(cube_file, 'a')
         output.create_dataset('haloMaskCube', data=na.array(light_cone_mask))
         output.close()
 
     # Write out final mask.
-    if mask_file is not None and ytcfg.getint("yt", "__parallel_rank") == 0:
+    if mask_file is not None and ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
         # Final mask is simply the product of the mask from each slice.
         mylog.info("Saving halo mask to %s." % mask_file)
         finalMask = na.ones(shape=(pixels, pixels))
@@ -76,7 +76,7 @@
         haloMap.extend(_make_slice_halo_map(slice, halo_list))
 
     # Write out file.
-    if ytcfg.getint("yt", "__parallel_rank") == 0:
+    if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
         mylog.info("Saving halo map to %s." % map_file)
         f = open(map_file, 'w')
         f.write("#z       x         y        M [Msun]  R [Mpc]   R [image]\n")


--- a/yt/analysis_modules/light_cone/light_cone.py	Wed Oct 19 13:36:04 2011 -0400
+++ b/yt/analysis_modules/light_cone/light_cone.py	Wed Oct 19 13:36:12 2011 -0400
@@ -108,7 +108,7 @@
         self.pixels = int(self.field_of_view_in_arcminutes * 60.0 / \
                           self.image_resolution_in_arcseconds)
 
-        if ytcfg.getint("yt", "__parallel_rank") == 0:
+        if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
             # Create output directory.
             if (os.path.exists(self.output_dir)):
                 if not(os.path.isdir(self.output_dir)):
@@ -243,7 +243,7 @@
         else:
             halo_mask_cube = light_cone_halo_mask(self, mask_file=mask_file, **kwargs)
             # Collapse cube into final mask.
-            if ytcfg.getint("yt", "__parallel_rank") == 0:
+            if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
                 self.halo_mask = na.ones(shape=(self.pixels, self.pixels), dtype=bool)
                 for mask in halo_mask_cube:
                     self.halo_mask *= mask
@@ -302,7 +302,7 @@
             output['object'].parameters.update(self.set_parameters)
             frb = _light_cone_projection(output, field, self.pixels, 
                                          weight_field=weight_field, node=node)
-            if ytcfg.getint("yt", "__parallel_rank") == 0:
+            if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
                 if save_slice_images:
                     write_image(na.log10(frb[field]), "%s_%s.png" % (name, field), cmap_name=cmap_name)
 
@@ -342,7 +342,7 @@
             if (q < len(self.light_cone_solution) - 1):
                 del output['object']
 
-        if ytcfg.getint("yt", "__parallel_rank") == 0:
+        if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
             # Add up slices to make light cone projection.
             if (weight_field is None):
                 lightConeProjection = sum(self.projection_stack)


--- a/yt/analysis_modules/light_cone/light_cone_projection.py	Wed Oct 19 13:36:04 2011 -0400
+++ b/yt/analysis_modules/light_cone/light_cone_projection.py	Wed Oct 19 13:36:12 2011 -0400
@@ -88,7 +88,7 @@
                                            field_cuts=these_field_cuts, node_name=node_name)
 
     # If parallel: all the processes have the whole projection object, but we only need to do the tiling, shifting, and cutting once.
-    if ytcfg.getint("yt", "__parallel_rank") == 0:
+    if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
 
         # 2. The Tile Problem
         # Tile projection to specified width.


--- a/yt/config.py	Wed Oct 19 13:36:04 2011 -0400
+++ b/yt/config.py	Wed Oct 19 13:36:12 2011 -0400
@@ -38,8 +38,10 @@
     inline = 'False',
     __withinreason = 'False',
     __parallel = 'False',
-    __parallel_rank = '0',
-    __parallel_size = '1',
+    __global_parallel_rank = '0',
+    __global_parallel_size = '1',
+    __topcomm_parallel_rank = '0',
+    __topcomm_parallel_size = '1',
     storeparameterfiles = 'True',
     parameterfilestore = 'parameter_files.csv',
     maximumstoredpfs = '500',


--- a/yt/funcs.py	Wed Oct 19 13:36:04 2011 -0400
+++ b/yt/funcs.py	Wed Oct 19 13:36:12 2011 -0400
@@ -187,12 +187,13 @@
        def some_root_only_function(...):
 
     """
+    from yt.config import ytcfg
     @wraps(func)
-    def donothing(*args, **kwargs):
-        return
-    from yt.config import ytcfg
-    if ytcfg.getint("yt","__parallel_rank") > 0: return donothing
-    return func
+    def check_parallel_rank(*args, **kwargs):
+        if ytcfg.getint("yt","__topcomm_parallel_rank") > 0:
+            return 
+        return func(*args, **kwargs)
+    return check_parallel_rank
 
 def deprecate(func):
     """
@@ -313,7 +314,7 @@
     from yt.config import ytcfg
     if ytcfg.getboolean("yt","suppressStreamLogging"):
         return DummyProgressBar()
-    elif ytcfg.getboolean("yt", "__parallel"):
+    elif ytcfg.getboolean("yt", "__topcomm_parallel"):
         return ParallelProgressBar(title, maxval)
     elif "SAGE_ROOT" in os.environ:
         try:
@@ -341,9 +342,13 @@
     handed back.
     """
     from yt.config import ytcfg
+    if kwargs.pop("global_rootonly", False):
+        cfg_option = "__global_parallel_rank"
+    else:
+        cfg_option = "__topcomm_parallel_rank"
     if not ytcfg.getboolean("yt","__parallel"):
         return func(*args,**kwargs)
-    if ytcfg.getint("yt","__parallel_rank") > 0: return
+    if ytcfg.getint("yt", cfg_option) > 0: return
     return func(*args, **kwargs)
 
 #


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:36:04 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:36:12 2011 -0400
@@ -51,8 +51,8 @@
     if parallel_capable:
         mylog.info("Parallel computation enabled: %s / %s",
                    MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
-        ytcfg["yt","__parallel_rank"] = str(MPI.COMM_WORLD.rank)
-        ytcfg["yt","__parallel_size"] = str(MPI.COMM_WORLD.size)
+        ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
+        ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
         ytcfg["yt","__parallel"] = "True"
         if exe_name == "embed_enzo" or \
             ("_parallel" in dir(sys) and sys._parallel == True):
@@ -129,7 +129,7 @@
         if hasattr(gs[0], 'proc_num'):
             # This one sort of knows about MPI, but not quite
             self._objs = [g for g in gs if g.proc_num ==
-                          ytcfg.getint('yt','__parallel_rank')]
+                          ytcfg.getint('yt','__topcomm_parallel_rank')]
             self._use_all = True
         else:
             self._objs = gs
@@ -859,7 +859,8 @@
 
 communication_system = CommunicationSystem()
 if parallel_capable:
-    communication_system.communicators.append(Communicator(MPI.COMM_WORLD))
+    ranks = na.arange(MPI.COMM_WORLD.size)
+    communication_system.push_with_ids(ranks)
 
 class ParallelAnalysisInterface(object):
     comm = None


--- a/yt/utilities/performance_counters.py	Wed Oct 19 13:36:04 2011 -0400
+++ b/yt/utilities/performance_counters.py	Wed Oct 19 13:36:12 2011 -0400
@@ -125,8 +125,8 @@
     def write_out(self, filename_prefix):
         if ytcfg.getboolean("yt","__parallel"):
             pfn = "%s_%03i_%03i" % (filename_prefix,
-                     ytcfg.getint("yt", "__parallel_rank"),
-                    ytcfg.getint("yt", "__parallel_size"))
+                     ytcfg.getint("yt", "__global_parallel_rank"),
+                    ytcfg.getint("yt", "__global_parallel_size"))
         else:
             pfn = "%s" % (filename_prefix)
         for n, p in sorted(self.profilers.items()):


--- a/yt/utilities/rpdb.py	Wed Oct 19 13:36:04 2011 -0400
+++ b/yt/utilities/rpdb.py	Wed Oct 19 13:36:12 2011 -0400
@@ -53,8 +53,8 @@
 
 def rpdb_excepthook(exc_type, exc, tb):
     traceback.print_exception(exc_type, exc, tb)
-    task = ytcfg.getint("yt", "__parallel_rank")
-    size = ytcfg.getint("yt", "__parallel_size")
+    task = ytcfg.getint("yt", "__global_parallel_rank")
+    size = ytcfg.getint("yt", "__global_parallel_size")
     print "Starting RPDB server on task %s ; connect with 'yt rpdb %s'" \
             % (task,task)
     handler = pdb_handler(tb)


http://bitbucket.org/yt_analysis/yt/changeset/5ec520c264b9/
changeset:   5ec520c264b9
branch:      yt
user:        samskillman
date:        2011-10-19 20:16:21
summary:     More fixes in moving to subcommunicators.
affected #:  6 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 13:36:12 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 14:16:21 2011 -0400
@@ -1849,7 +1849,7 @@
         topbounds = na.array([[0., 0., 0.], period])
         # Cut up the volume evenly initially, with no padding.
         padded, LE, RE, self._data_source = \
-            self.comm.partition_hierarchy_3d(ds=self._data_source,
+            self.partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         # also get the total mass of particles
         yt_counters("Reading Data")
@@ -1859,7 +1859,7 @@
         if ytcfg.getboolean("yt","inline") == False and \
             resize and self.comm.size != 1 and subvolume is None:
             random.seed(self.comm.rank)
-            cut_list = self.comm.partition_hierarchy_3d_bisection_list()
+            cut_list = self.partition_hierarchy_3d_bisection_list()
             root_points = self._subsample_points()
             self.bucket_bounds = []
             if self.comm.rank == 0:
@@ -1945,7 +1945,7 @@
             self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
             # Cut up the volume.
             padded, LE, RE, self._data_source = \
-                self.comm.partition_hierarchy_3d(ds=self._data_source,
+                self.partition_hierarchy_3d(ds=self._data_source,
                 padding=0.)
         self.bounds = (LE, RE)
         (LE_padding, RE_padding) = self.padding
@@ -2108,7 +2108,7 @@
         # a small part is actually going to be used.
         self.padding = 0.0
         padded, LE, RE, self._data_source = \
-            self.comm.partition_hierarchy_3d(ds = self._data_source, padding=self.padding)
+            self.partition_hierarchy_3d(ds = self._data_source, padding=self.padding)
         # For scaling the threshold, note that it's a passthrough
         if dm_only:
             select = self._get_dm_indices()
@@ -2124,7 +2124,7 @@
             self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
         self.padding = padding #* pf["unitary"] # This should be clevererer
         padded, LE, RE, self._data_source = \
-            self.comm.partition_hierarchy_3d(ds = self._data_source,
+            self.partition_hierarchy_3d(ds = self._data_source,
             padding=self.padding)
         self.bounds = (LE, RE)
         # reflect particles around the periodic boundary
@@ -2192,7 +2192,7 @@
         self.padding = 0.0 #* pf["unitary"] # This should be clevererer
         # get the total number of particles across all procs, with no padding
         padded, LE, RE, self._data_source = \
-            self.comm.partition_hierarchy_3d(ds=self._data_source,
+            self.partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         if link > 0.0:
             n_parts = self.comm.mpi_allreduce(self._data_source["particle_position_x"].size, op='sum')
@@ -2210,7 +2210,7 @@
         if subvolume is not None:
             self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
         padded, LE, RE, self._data_source = \
-            self.comm.partition_hierarchy_3d(ds=self._data_source,
+            self.partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         self.bounds = (LE, RE)
         # reflect particles around the periodic boundary


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Wed Oct 19 13:36:12 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Wed Oct 19 14:16:21 2011 -0400
@@ -161,7 +161,7 @@
             ds = pf.h.periodic_region_strict([0.]*3, self.left_edge, 
                 self.right_edge)
             padded, self.LE, self.RE, self.ds = \
-            self.comm.partition_hierarchy_3d(ds = ds, padding=0.,
+            self.partition_hierarchy_3d(ds = ds, padding=0.,
                 rank_ratio = self.vol_ratio)
         else:
             self.left_edge = left_edge
@@ -169,10 +169,10 @@
             # We do this twice, first with no 'buffer' to get the unbuffered
             # self.LE/RE, and then second to get a buffered self.ds.
             padded, self.LE, self.RE, temp = \
-                self.comm.partition_region_3d(left_edge, right_edge,
+                self.partition_region_3d(left_edge, right_edge,
                     rank_ratio=self.vol_ratio)
             padded, temp, temp, self.ds = \
-                self.comm.partition_region_3d(left_edge - self.lengths[-1], \
+                self.partition_region_3d(left_edge - self.lengths[-1], \
                 right_edge + self.lengths[-1], rank_ratio=self.vol_ratio)
         mylog.info("LE %s RE %s %s" % (str(self.LE), str(self.RE), str(self.ds)))
         self.width = self.ds.right_edge - self.ds.left_edge


--- a/yt/data_objects/data_containers.py	Wed Oct 19 13:36:12 2011 -0400
+++ b/yt/data_objects/data_containers.py	Wed Oct 19 14:16:21 2011 -0400
@@ -1841,7 +1841,7 @@
 
     def _initialize_source(self, source = None):
         if source is None:
-            check, source = self.comm.partition_hierarchy_2d(self.axis)
+            check, source = self.partition_hierarchy_2d(self.axis)
             self._check_region = check
             #self._okay_to_serialize = (not check)
         else:


--- a/yt/funcs.py	Wed Oct 19 13:36:12 2011 -0400
+++ b/yt/funcs.py	Wed Oct 19 14:16:21 2011 -0400
@@ -314,7 +314,7 @@
     from yt.config import ytcfg
     if ytcfg.getboolean("yt","suppressStreamLogging"):
         return DummyProgressBar()
-    elif ytcfg.getboolean("yt", "__topcomm_parallel"):
+    elif ytcfg.getboolean("yt", "__parallel"):
         return ParallelProgressBar(title, maxval)
     elif "SAGE_ROOT" in os.environ:
         try:


--- a/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 13:36:12 2011 -0400
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py	Wed Oct 19 14:16:21 2011 -0400
@@ -30,7 +30,8 @@
 from yt.visualization.volume_rendering.grid_partitioner import HomogenizedVolume
 from yt.utilities.amr_utils import PartitionedGrid, kdtree_get_choices
 from yt.utilities.performance_counters import yt_counters, time_function
-import yt.utilities.parallel_tools.parallel_analysis_interface as PT
+from yt.utilities.parallel_tools.parallel_analysis_interface \
+    import ParallelAnalysisInterface 
 from copy import deepcopy
 from yt.config import ytcfg
 from time import time
@@ -286,6 +287,7 @@
         'split_pos': 0.5}
 
         """
+        ParallelAnalysisInterface.__init__(self)
         self.current_split_dim = 0
 
         self.pf = pf


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 13:36:12 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 14:16:21 2011 -0400
@@ -288,6 +288,10 @@
 
 class CommunicationSystem(object):
     communicators = []
+
+    def __init__(self):
+        self.communicators.append(Communicator(MPI.COMM_WORLD))
+
     def push(self, size=None, ranks=None):
         if size is None:
             size = len(available_ranks)
@@ -302,7 +306,7 @@
         return new_comm
 
     def push_with_ids(self, ids):
-        group = self.communicators[-1].comm.Group.Incl(ids)
+        group = self.communicators[-1].comm.Get_group().Incl(ids)
         new_comm = self.communicators[-1].comm.Create(group)
         self.communicators.append(Communicator(new_comm))
         return new_comm
@@ -316,7 +320,7 @@
     _distributed = None
     __tocast = 'c'
 
-    def __init__(self, comm=MPI.COMM_WORLD):
+    def __init__(self, comm=None):
         self.comm = comm
         self._distributed = self.comm.size > 1
     """
@@ -324,145 +328,6 @@
     functions for analyzing something in parallel.
     """
 
-    def partition_hierarchy_2d(self, axis):
-        if not self._distributed:
-           return False, self.hierarchy.grid_collection(self.center, 
-                                                        self.hierarchy.grids)
-
-        xax, yax = x_dict[axis], y_dict[axis]
-        cc = MPI.Compute_dims(self.comm.size, 2)
-        mi = self.comm.rank
-        cx, cy = na.unravel_index(mi, cc)
-        x = na.mgrid[0:1:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[0:1:(cc[1]+1)*1j][cy:cy+2]
-
-        DLE, DRE = self.pf.domain_left_edge.copy(), self.pf.domain_right_edge.copy()
-        LE = na.ones(3, dtype='float64') * DLE
-        RE = na.ones(3, dtype='float64') * DRE
-        LE[xax] = x[0] * (DRE[xax]-DLE[xax]) + DLE[xax]
-        RE[xax] = x[1] * (DRE[xax]-DLE[xax]) + DLE[xax]
-        LE[yax] = y[0] * (DRE[yax]-DLE[yax]) + DLE[yax]
-        RE[yax] = y[1] * (DRE[yax]-DLE[yax]) + DLE[yax]
-        mylog.debug("Dimensions: %s %s", LE, RE)
-
-        reg = self.hierarchy.region_strict(self.center, LE, RE)
-        return True, reg
-
-    def partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
-        LE, RE = na.array(ds.left_edge), na.array(ds.right_edge)
-        # We need to establish if we're looking at a subvolume, in which case
-        # we *do* want to pad things.
-        if (LE == self.pf.domain_left_edge).all() and \
-                (RE == self.pf.domain_right_edge).all():
-            subvol = False
-        else:
-            subvol = True
-        if not self._distributed and not subvol:
-            return False, LE, RE, ds
-        if not self._distributed and subvol:
-            return True, LE, RE, \
-            self.hierarchy.periodic_region_strict(self.center,
-                LE-padding, RE+padding)
-        elif ytcfg.getboolean("yt", "inline"):
-            # At this point, we want to identify the root grid tile to which
-            # this processor is assigned.
-            # The only way I really know how to do this is to get the level-0
-            # grid that belongs to this processor.
-            grids = self.pf.h.select_grids(0)
-            root_grids = [g for g in grids
-                          if g.proc_num == self.comm.rank]
-            if len(root_grids) != 1: raise RuntimeError
-            #raise KeyError
-            LE = root_grids[0].LeftEdge
-            RE = root_grids[0].RightEdge
-            return True, LE, RE, self.hierarchy.region(self.center, LE, RE)
-
-        cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
-        mi = self.comm.rank % (self.comm.size / rank_ratio)
-        cx, cy, cz = na.unravel_index(mi, cc)
-        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
-
-        LE = na.array([x[0], y[0], z[0]], dtype='float64')
-        RE = na.array([x[1], y[1], z[1]], dtype='float64')
-
-        if padding > 0:
-            return True, \
-                LE, RE, self.hierarchy.periodic_region_strict(self.center,
-                LE-padding, RE+padding)
-
-        return False, LE, RE, self.hierarchy.region_strict(self.center, LE, RE)
-
-    def partition_region_3d(self, left_edge, right_edge, padding=0.0,
-            rank_ratio = 1):
-        """
-        Given a region, it subdivides it into smaller regions for parallel
-        analysis.
-        """
-        LE, RE = left_edge[:], right_edge[:]
-        if not self._distributed:
-            return LE, RE, re
-        
-        cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
-        mi = self.comm.rank % (self.comm.size / rank_ratio)
-        cx, cy, cz = na.unravel_index(mi, cc)
-        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
-
-        LE = na.array([x[0], y[0], z[0]], dtype='float64')
-        RE = na.array([x[1], y[1], z[1]], dtype='float64')
-
-        if padding > 0:
-            return True, \
-                LE, RE, self.hierarchy.periodic_region(self.center, LE-padding,
-                    RE+padding)
-
-        return False, LE, RE, self.hierarchy.region(self.center, LE, RE)
-
-    def partition_hierarchy_3d_bisection_list(self):
-        """
-        Returns an array that is used to drive _partition_hierarchy_3d_bisection,
-        below.
-        """
-
-        def factor(n):
-            if n == 1: return [1]
-            i = 2
-            limit = n**0.5
-            while i <= limit:
-                if n % i == 0:
-                    ret = factor(n/i)
-                    ret.append(i)
-                    return ret
-                i += 1
-            return [n]
-
-        cc = MPI.Compute_dims(self.comm.size, 3)
-        si = self.comm.size
-        
-        factors = factor(si)
-        xyzfactors = [factor(cc[0]), factor(cc[1]), factor(cc[2])]
-        
-        # Each entry of cuts is a two element list, that is:
-        # [cut dim, number of cuts]
-        cuts = []
-        # The higher cuts are in the beginning.
-        # We're going to do our best to make the cuts cyclic, i.e. x, then y,
-        # then z, etc...
-        lastdim = 0
-        for f in factors:
-            nextdim = (lastdim + 1) % 3
-            while True:
-                if f in xyzfactors[nextdim]:
-                    cuts.append([nextdim, f])
-                    topop = xyzfactors[nextdim].index(f)
-                    temp = xyzfactors[nextdim].pop(topop)
-                    lastdim = nextdim
-                    break
-                nextdim = (nextdim + 1) % 3
-        return cuts
 
     def barrier(self):
         if not self._distributed: return
@@ -471,7 +336,7 @@
 
     def mpi_exit_test(self, data=False):
         # data==True -> exit. data==False -> no exit
-        mine, statuses = self.comm.mpi_info_dict(data)
+        mine, statuses = self.mpi_info_dict(data)
         if True in statuses.values():
             raise RuntimeError("Fatal error. Exiting.")
         return None
@@ -905,4 +770,143 @@
         pass
 
 
+    def partition_hierarchy_2d(self, axis):
+        if not self._distributed:
+           return False, self.hierarchy.grid_collection(self.center, 
+                                                        self.hierarchy.grids)
+
+        xax, yax = x_dict[axis], y_dict[axis]
+        cc = MPI.Compute_dims(self.comm.size, 2)
+        mi = self.comm.rank
+        cx, cy = na.unravel_index(mi, cc)
+        x = na.mgrid[0:1:(cc[0]+1)*1j][cx:cx+2]
+        y = na.mgrid[0:1:(cc[1]+1)*1j][cy:cy+2]
+
+        DLE, DRE = self.pf.domain_left_edge.copy(), self.pf.domain_right_edge.copy()
+        LE = na.ones(3, dtype='float64') * DLE
+        RE = na.ones(3, dtype='float64') * DRE
+        LE[xax] = x[0] * (DRE[xax]-DLE[xax]) + DLE[xax]
+        RE[xax] = x[1] * (DRE[xax]-DLE[xax]) + DLE[xax]
+        LE[yax] = y[0] * (DRE[yax]-DLE[yax]) + DLE[yax]
+        RE[yax] = y[1] * (DRE[yax]-DLE[yax]) + DLE[yax]
+        mylog.debug("Dimensions: %s %s", LE, RE)
+
+        reg = self.hierarchy.region_strict(self.center, LE, RE)
+        return True, reg
+
+    def partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
+        LE, RE = na.array(ds.left_edge), na.array(ds.right_edge)
+        # We need to establish if we're looking at a subvolume, in which case
+        # we *do* want to pad things.
+        if (LE == self.pf.domain_left_edge).all() and \
+                (RE == self.pf.domain_right_edge).all():
+            subvol = False
+        else:
+            subvol = True
+        if not self._distributed and not subvol:
+            return False, LE, RE, ds
+        if not self._distributed and subvol:
+            return True, LE, RE, \
+            self.hierarchy.periodic_region_strict(self.center,
+                LE-padding, RE+padding)
+        elif ytcfg.getboolean("yt", "inline"):
+            # At this point, we want to identify the root grid tile to which
+            # this processor is assigned.
+            # The only way I really know how to do this is to get the level-0
+            # grid that belongs to this processor.
+            grids = self.pf.h.select_grids(0)
+            root_grids = [g for g in grids
+                          if g.proc_num == self.comm.rank]
+            if len(root_grids) != 1: raise RuntimeError
+            #raise KeyError
+            LE = root_grids[0].LeftEdge
+            RE = root_grids[0].RightEdge
+            return True, LE, RE, self.hierarchy.region(self.center, LE, RE)
+
+        cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
+        mi = self.comm.rank % (self.comm.size / rank_ratio)
+        cx, cy, cz = na.unravel_index(mi, cc)
+        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
+        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
+        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
+
+        LE = na.array([x[0], y[0], z[0]], dtype='float64')
+        RE = na.array([x[1], y[1], z[1]], dtype='float64')
+
+        if padding > 0:
+            return True, \
+                LE, RE, self.hierarchy.periodic_region_strict(self.center,
+                LE-padding, RE+padding)
+
+        return False, LE, RE, self.hierarchy.region_strict(self.center, LE, RE)
+
+    def partition_region_3d(self, left_edge, right_edge, padding=0.0,
+            rank_ratio = 1):
+        """
+        Given a region, it subdivides it into smaller regions for parallel
+        analysis.
+        """
+        LE, RE = left_edge[:], right_edge[:]
+        if not self._distributed:
+            return LE, RE, re
+        
+        cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
+        mi = self.comm.rank % (self.comm.size / rank_ratio)
+        cx, cy, cz = na.unravel_index(mi, cc)
+        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
+        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
+        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
+
+        LE = na.array([x[0], y[0], z[0]], dtype='float64')
+        RE = na.array([x[1], y[1], z[1]], dtype='float64')
+
+        if padding > 0:
+            return True, \
+                LE, RE, self.hierarchy.periodic_region(self.center, LE-padding,
+                    RE+padding)
+
+        return False, LE, RE, self.hierarchy.region(self.center, LE, RE)
+
+    def partition_hierarchy_3d_bisection_list(self):
+        """
+        Returns an array that is used to drive _partition_hierarchy_3d_bisection,
+        below.
+        """
+
+        def factor(n):
+            if n == 1: return [1]
+            i = 2
+            limit = n**0.5
+            while i <= limit:
+                if n % i == 0:
+                    ret = factor(n/i)
+                    ret.append(i)
+                    return ret
+                i += 1
+            return [n]
+
+        cc = MPI.Compute_dims(self.comm.size, 3)
+        si = self.comm.size
+        
+        factors = factor(si)
+        xyzfactors = [factor(cc[0]), factor(cc[1]), factor(cc[2])]
+        
+        # Each entry of cuts is a two element list, that is:
+        # [cut dim, number of cuts]
+        cuts = []
+        # The higher cuts are in the beginning.
+        # We're going to do our best to make the cuts cyclic, i.e. x, then y,
+        # then z, etc...
+        lastdim = 0
+        for f in factors:
+            nextdim = (lastdim + 1) % 3
+            while True:
+                if f in xyzfactors[nextdim]:
+                    cuts.append([nextdim, f])
+                    topop = xyzfactors[nextdim].index(f)
+                    temp = xyzfactors[nextdim].pop(topop)
+                    lastdim = nextdim
+                    break
+                nextdim = (nextdim + 1) % 3
+        return cuts
     


http://bitbucket.org/yt_analysis/yt/changeset/dc29a5b596a9/
changeset:   dc29a5b596a9
branch:      yt
user:        MatthewTurk
date:        2011-10-19 20:42:18
summary:     A couple changes necessary to get the parallel_objects routine to work, which
it now seems to do.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 14:16:21 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 14:42:18 2011 -0400
@@ -278,9 +278,12 @@
     my_communicator = communication_system.communicators[-1]
     my_size = my_communicator.size
     my_rank = my_communicator.rank
-    all_new_comms = na.arange(my_size)
-    my_new_id = int(my_rank / njobs)
-    communication_system.push_with_ids(all_new_comms[my_new_id])
+    all_new_comms = na.array_split(na.arange(my_size), njobs)
+    for i,comm_set in enumerate(all_new_comms):
+        if my_rank in comm_set:
+            my_new_id = i
+            break
+    communication_system.push_with_ids(all_new_comms[my_new_id].tolist())
 
     for obj in objects[my_new_id::njobs]:
         yield obj


http://bitbucket.org/yt_analysis/yt/changeset/3d9f42358205/
changeset:   3d9f42358205
branch:      yt
user:        MatthewTurk
date:        2011-10-19 21:36:27
summary:     Setting the topcomm values.  Also raise NotImplemented for a push() op.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 14:42:18 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 15:36:27 2011 -0400
@@ -296,6 +296,7 @@
         self.communicators.append(Communicator(MPI.COMM_WORLD))
 
     def push(self, size=None, ranks=None):
+        raise NotImplementedError
         if size is None:
             size = len(available_ranks)
         if len(available_ranks) < size:
@@ -311,6 +312,9 @@
     def push_with_ids(self, ids):
         group = self.communicators[-1].comm.Get_group().Incl(ids)
         new_comm = self.communicators[-1].comm.Create(group)
+        from yt.config import ytcfg
+        ytcfg["yt","__topcomm_parallel_size"] = new_comm.size
+        ytcfg["yt","__topcomm_parallel_rank"] = new_comm.rank
         self.communicators.append(Communicator(new_comm))
         return new_comm
 


http://bitbucket.org/yt_analysis/yt/changeset/405309e547a5/
changeset:   405309e547a5
branch:      yt
user:        samskillman
date:        2011-10-19 20:35:40
summary:     A fix for when run in serial.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 14:16:21 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 14:35:40 2011 -0400
@@ -290,8 +290,10 @@
     communicators = []
 
     def __init__(self):
-        self.communicators.append(Communicator(MPI.COMM_WORLD))
-
+        if parallel_capable:
+            self.communicators.append(Communicator(MPI.COMM_WORLD))
+        else:
+            self.communicators.append(Communicator(None))
     def push(self, size=None, ranks=None):
         if size is None:
             size = len(available_ranks)
@@ -322,13 +324,12 @@
 
     def __init__(self, comm=None):
         self.comm = comm
-        self._distributed = self.comm.size > 1
+        self._distributed = comm is not None and self.comm.size > 1
     """
     This is an interface specification providing several useful utility
     functions for analyzing something in parallel.
     """
 
-
     def barrier(self):
         if not self._distributed: return
         mylog.debug("Opening MPI Barrier on %s", self.comm.rank)


http://bitbucket.org/yt_analysis/yt/changeset/a71d19ad1162/
changeset:   a71d19ad1162
branch:      yt
user:        MatthewTurk
date:        2011-10-19 20:45:01
summary:     Merging
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 14:42:18 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 14:45:01 2011 -0400
@@ -293,8 +293,10 @@
     communicators = []
 
     def __init__(self):
-        self.communicators.append(Communicator(MPI.COMM_WORLD))
-
+        if parallel_capable:
+            self.communicators.append(Communicator(MPI.COMM_WORLD))
+        else:
+            self.communicators.append(Communicator(None))
     def push(self, size=None, ranks=None):
         if size is None:
             size = len(available_ranks)
@@ -325,13 +327,12 @@
 
     def __init__(self, comm=None):
         self.comm = comm
-        self._distributed = self.comm.size > 1
+        self._distributed = comm is not None and self.comm.size > 1
     """
     This is an interface specification providing several useful utility
     functions for analyzing something in parallel.
     """
 
-
     def barrier(self):
         if not self._distributed: return
         mylog.debug("Opening MPI Barrier on %s", self.comm.rank)


http://bitbucket.org/yt_analysis/yt/changeset/978fe33ebd2b/
changeset:   978fe33ebd2b
branch:      yt
user:        MatthewTurk
date:        2011-10-19 21:37:55
summary:     Merging
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 14:45:01 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 15:37:55 2011 -0400
@@ -298,6 +298,7 @@
         else:
             self.communicators.append(Communicator(None))
     def push(self, size=None, ranks=None):
+        raise NotImplementedError
         if size is None:
             size = len(available_ranks)
         if len(available_ranks) < size:
@@ -313,6 +314,9 @@
     def push_with_ids(self, ids):
         group = self.communicators[-1].comm.Get_group().Incl(ids)
         new_comm = self.communicators[-1].comm.Create(group)
+        from yt.config import ytcfg
+        ytcfg["yt","__topcomm_parallel_size"] = new_comm.size
+        ytcfg["yt","__topcomm_parallel_rank"] = new_comm.rank
         self.communicators.append(Communicator(new_comm))
         return new_comm
 


http://bitbucket.org/yt_analysis/yt/changeset/d3a33d25d8ff/
changeset:   d3a33d25d8ff
branch:      yt
user:        MatthewTurk
date:        2011-10-19 21:40:20
summary:     Make the ytcfg vals into strings
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 15:37:55 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 15:40:20 2011 -0400
@@ -315,8 +315,8 @@
         group = self.communicators[-1].comm.Get_group().Incl(ids)
         new_comm = self.communicators[-1].comm.Create(group)
         from yt.config import ytcfg
-        ytcfg["yt","__topcomm_parallel_size"] = new_comm.size
-        ytcfg["yt","__topcomm_parallel_rank"] = new_comm.rank
+        ytcfg["yt","__topcomm_parallel_size"] = str(new_comm.size)
+        ytcfg["yt","__topcomm_parallel_rank"] = str(new_comm.rank)
         self.communicators.append(Communicator(new_comm))
         return new_comm
 


http://bitbucket.org/yt_analysis/yt/changeset/cfe11b9d798d/
changeset:   cfe11b9d798d
branch:      yt
user:        brittonsmith
date:        2011-10-19 21:21:35
summary:     Moved mpi_maxdict_dict to be a function within parallel hop.
affected #:  2 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 14:45:01 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 15:21:35 2011 -0400
@@ -1067,11 +1067,78 @@
         """
         yt_counters("make_global_chain_densest_n")
         (self.top_keys, self.bot_keys, self.vals) = \
-            self.comm.mpi_maxdict_dict(self.chain_densest_n)
+            self.linearize_chain_dict(self.chain_densest_n)
         self.__max_memory()
         del self.chain_densest_n
         yt_counters("make_global_chain_densest_n")
-    
+
+    def linearize_chain_dict(self, data):
+        """
+        Similar to above, but finds maximums for dicts of dicts. This is
+        specificaly for a part of chainHOP.
+        """
+        if not self._distributed:
+            top_keys = []
+            bot_keys = []
+            vals = []
+            for top_key in data:
+                for bot_key in data[top_key]:
+                    top_keys.append(top_key)
+                    bot_keys.append(bot_key)
+                    vals.append(data[top_key][bot_key])
+            top_keys = na.array(top_keys, dtype='int64')
+            bot_keys = na.array(bot_keys, dtype='int64')
+            vals = na.array(vals, dtype='float64')
+            return (top_keys, bot_keys, vals)
+        self.comm.barrier()
+        size = 0
+        top_keys = []
+        bot_keys = []
+        vals = []
+        for top_key in data:
+            for bot_key in data[top_key]:
+                top_keys.append(top_key)
+                bot_keys.append(bot_key)
+                vals.append(data[top_key][bot_key])
+        top_keys = na.array(top_keys, dtype='int64')
+        bot_keys = na.array(bot_keys, dtype='int64')
+        vals = na.array(vals, dtype='float64')
+        del data
+        if self.comm.rank == 0:
+            for i in range(1,self.comm.size):
+                size = self.comm.recv(source=i, tag=0)
+                mylog.info('Global Hash Table Merge %d of %d size %d' % \
+                    (i,self.comm.size, size))
+                recv_top_keys = na.empty(size, dtype='int64')
+                recv_bot_keys = na.empty(size, dtype='int64')
+                recv_vals = na.empty(size, dtype='float64')
+                self.comm.Recv([recv_top_keys, MPI.LONG], source=i, tag=0)
+                self.comm.Recv([recv_bot_keys, MPI.LONG], source=i, tag=0)
+                self.comm.Recv([recv_vals, MPI.DOUBLE], source=i, tag=0)
+                top_keys = na.concatenate([top_keys, recv_top_keys])
+                bot_keys = na.concatenate([bot_keys, recv_bot_keys])
+                vals = na.concatenate([vals, recv_vals])
+        else:
+            size = top_keys.size
+            self.comm.send(size, dest=0, tag=0)
+            self.comm.Send([top_keys, MPI.LONG], dest=0, tag=0)
+            self.comm.Send([bot_keys, MPI.LONG], dest=0, tag=0)
+            self.comm.Send([vals, MPI.DOUBLE], dest=0, tag=0)
+        # We're going to decompose the dict into arrays, send that, and then
+        # reconstruct it. When data is too big the pickling of the dict fails.
+        if self.comm.rank == 0:
+            size = top_keys.size
+        # Broadcast them using array methods
+        size = self.comm.bcast(size, root=0)
+        if self.comm.rank != 0:
+            top_keys = na.empty(size, dtype='int64')
+            bot_keys = na.empty(size, dtype='int64')
+            vals = na.empty(size, dtype='float64')
+        self.comm.Bcast([top_keys,MPI.LONG], root=0)
+        self.comm.Bcast([bot_keys,MPI.LONG], root=0)
+        self.comm.Bcast([vals, MPI.DOUBLE], root=0)
+        return (top_keys, bot_keys, vals)
+
     def _build_groups(self):
         """
         With the collection of possible chain links, build groups.


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 14:45:01 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 15:21:35 2011 -0400
@@ -345,73 +345,6 @@
             raise RuntimeError("Fatal error. Exiting.")
         return None
 
-    def mpi_maxdict_dict(self, data):
-        """
-        Similar to above, but finds maximums for dicts of dicts. This is
-        specificaly for a part of chainHOP.
-        """
-        if not self._distributed:
-            top_keys = []
-            bot_keys = []
-            vals = []
-            for top_key in data:
-                for bot_key in data[top_key]:
-                    top_keys.append(top_key)
-                    bot_keys.append(bot_key)
-                    vals.append(data[top_key][bot_key])
-            top_keys = na.array(top_keys, dtype='int64')
-            bot_keys = na.array(bot_keys, dtype='int64')
-            vals = na.array(vals, dtype='float64')
-            return (top_keys, bot_keys, vals)
-        self.comm.barrier()
-        size = 0
-        top_keys = []
-        bot_keys = []
-        vals = []
-        for top_key in data:
-            for bot_key in data[top_key]:
-                top_keys.append(top_key)
-                bot_keys.append(bot_key)
-                vals.append(data[top_key][bot_key])
-        top_keys = na.array(top_keys, dtype='int64')
-        bot_keys = na.array(bot_keys, dtype='int64')
-        vals = na.array(vals, dtype='float64')
-        del data
-        if self.comm.rank == 0:
-            for i in range(1,self.comm.size):
-                size = self.comm.recv(source=i, tag=0)
-                mylog.info('Global Hash Table Merge %d of %d size %d' % \
-                    (i,self.comm.size, size))
-                recv_top_keys = na.empty(size, dtype='int64')
-                recv_bot_keys = na.empty(size, dtype='int64')
-                recv_vals = na.empty(size, dtype='float64')
-                self.comm.Recv([recv_top_keys, MPI.LONG], source=i, tag=0)
-                self.comm.Recv([recv_bot_keys, MPI.LONG], source=i, tag=0)
-                self.comm.Recv([recv_vals, MPI.DOUBLE], source=i, tag=0)
-                top_keys = na.concatenate([top_keys, recv_top_keys])
-                bot_keys = na.concatenate([bot_keys, recv_bot_keys])
-                vals = na.concatenate([vals, recv_vals])
-        else:
-            size = top_keys.size
-            self.comm.send(size, dest=0, tag=0)
-            self.comm.Send([top_keys, MPI.LONG], dest=0, tag=0)
-            self.comm.Send([bot_keys, MPI.LONG], dest=0, tag=0)
-            self.comm.Send([vals, MPI.DOUBLE], dest=0, tag=0)
-        # We're going to decompose the dict into arrays, send that, and then
-        # reconstruct it. When data is too big the pickling of the dict fails.
-        if self.comm.rank == 0:
-            size = top_keys.size
-        # Broadcast them using array methods
-        size = self.comm.bcast(size, root=0)
-        if self.comm.rank != 0:
-            top_keys = na.empty(size, dtype='int64')
-            bot_keys = na.empty(size, dtype='int64')
-            vals = na.empty(size, dtype='float64')
-        self.comm.Bcast([top_keys,MPI.LONG], root=0)
-        self.comm.Bcast([bot_keys,MPI.LONG], root=0)
-        self.comm.Bcast([vals, MPI.DOUBLE], root=0)
-        return (top_keys, bot_keys, vals)
-
     @parallel_passthrough
     def par_combine_object(self, data, op, datatype = None):
         # op can be chosen from:


http://bitbucket.org/yt_analysis/yt/changeset/06aa5e5efddc/
changeset:   06aa5e5efddc
branch:      yt
user:        brittonsmith
date:        2011-10-19 21:47:02
summary:     Moved mpi_maxdict_dict from parallel_analysis_interface to
parallel_hop_interface and renamed with a more descriptive function name,
linearize_chain_dict.  Removed the implementation of concatenating the arrays
and called par_combine_object for each array.
affected #:  1 file (-1 bytes)

--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 15:21:35 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 15:47:02 2011 -0400
@@ -1077,21 +1077,6 @@
         Similar to above, but finds maximums for dicts of dicts. This is
         specificaly for a part of chainHOP.
         """
-        if not self._distributed:
-            top_keys = []
-            bot_keys = []
-            vals = []
-            for top_key in data:
-                for bot_key in data[top_key]:
-                    top_keys.append(top_key)
-                    bot_keys.append(bot_key)
-                    vals.append(data[top_key][bot_key])
-            top_keys = na.array(top_keys, dtype='int64')
-            bot_keys = na.array(bot_keys, dtype='int64')
-            vals = na.array(vals, dtype='float64')
-            return (top_keys, bot_keys, vals)
-        self.comm.barrier()
-        size = 0
         top_keys = []
         bot_keys = []
         vals = []
@@ -1103,40 +1088,13 @@
         top_keys = na.array(top_keys, dtype='int64')
         bot_keys = na.array(bot_keys, dtype='int64')
         vals = na.array(vals, dtype='float64')
-        del data
-        if self.comm.rank == 0:
-            for i in range(1,self.comm.size):
-                size = self.comm.recv(source=i, tag=0)
-                mylog.info('Global Hash Table Merge %d of %d size %d' % \
-                    (i,self.comm.size, size))
-                recv_top_keys = na.empty(size, dtype='int64')
-                recv_bot_keys = na.empty(size, dtype='int64')
-                recv_vals = na.empty(size, dtype='float64')
-                self.comm.Recv([recv_top_keys, MPI.LONG], source=i, tag=0)
-                self.comm.Recv([recv_bot_keys, MPI.LONG], source=i, tag=0)
-                self.comm.Recv([recv_vals, MPI.DOUBLE], source=i, tag=0)
-                top_keys = na.concatenate([top_keys, recv_top_keys])
-                bot_keys = na.concatenate([bot_keys, recv_bot_keys])
-                vals = na.concatenate([vals, recv_vals])
-        else:
-            size = top_keys.size
-            self.comm.send(size, dest=0, tag=0)
-            self.comm.Send([top_keys, MPI.LONG], dest=0, tag=0)
-            self.comm.Send([bot_keys, MPI.LONG], dest=0, tag=0)
-            self.comm.Send([vals, MPI.DOUBLE], dest=0, tag=0)
-        # We're going to decompose the dict into arrays, send that, and then
-        # reconstruct it. When data is too big the pickling of the dict fails.
-        if self.comm.rank == 0:
-            size = top_keys.size
-        # Broadcast them using array methods
-        size = self.comm.bcast(size, root=0)
-        if self.comm.rank != 0:
-            top_keys = na.empty(size, dtype='int64')
-            bot_keys = na.empty(size, dtype='int64')
-            vals = na.empty(size, dtype='float64')
-        self.comm.Bcast([top_keys,MPI.LONG], root=0)
-        self.comm.Bcast([bot_keys,MPI.LONG], root=0)
-        self.comm.Bcast([vals, MPI.DOUBLE], root=0)
+
+        data.clear()
+
+        top_keys = self.comm.par_combine_object(top_keys, datatype='array', op='cat')
+        bot_keys = self.comm.par_combine_object(bot_keys, datatype='array', op='cat')
+        vals     = self.comm.par_combine_object(vals, datatype='array', op='cat')
+
         return (top_keys, bot_keys, vals)
 
     def _build_groups(self):


http://bitbucket.org/yt_analysis/yt/changeset/e5fe9e7ed24e/
changeset:   e5fe9e7ed24e
branch:      yt
user:        brittonsmith
date:        2011-10-19 21:47:11
summary:     Merged.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 15:47:02 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 15:47:11 2011 -0400
@@ -298,6 +298,7 @@
         else:
             self.communicators.append(Communicator(None))
     def push(self, size=None, ranks=None):
+        raise NotImplementedError
         if size is None:
             size = len(available_ranks)
         if len(available_ranks) < size:
@@ -313,6 +314,9 @@
     def push_with_ids(self, ids):
         group = self.communicators[-1].comm.Get_group().Incl(ids)
         new_comm = self.communicators[-1].comm.Create(group)
+        from yt.config import ytcfg
+        ytcfg["yt","__topcomm_parallel_size"] = str(new_comm.size)
+        ytcfg["yt","__topcomm_parallel_rank"] = str(new_comm.rank)
         self.communicators.append(Communicator(new_comm))
         return new_comm
 


http://bitbucket.org/yt_analysis/yt/changeset/25f4c9c4417d/
changeset:   25f4c9c4417d
branch:      yt
user:        brittonsmith
date:        2011-10-19 22:21:34
summary:     Merged.
affected #:  14 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 15:47:11 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Wed Oct 19 16:21:34 2011 -0400
@@ -1028,19 +1028,14 @@
         else: ii = slice(None)
         self.particle_fields = {}
         for field in self._fields:
-            if ytcfg.getboolean("yt","inline") == False:
-                tot_part = self._data_source[field].size
-                if field == "particle_index":
-                    self.particle_fields[field] = self._data_source[field][ii].astype('int64')
-                else:
-                    self.particle_fields[field] = self._data_source[field][ii].astype('float64')
+            tot_part = self._data_source[field].size
+            if field == "particle_index":
+                self.particle_fields[field] = self._data_source[field][ii].astype('int64')
             else:
-                tot_part = self._data_source[field].size
-                if field == "particle_index":
-                    self.particle_fields[field] = self._data_source[field][ii].astype('int64')
-                else:
-                    self.particle_fields[field] = self._data_source[field][ii].astype('float64')
+                self.particle_fields[field] = self._data_source[field][ii].astype('float64')
+            del self._data_source[field]
         self._base_indices = na.arange(tot_part)[ii]
+        gc.collect()
 
     def _get_dm_indices(self):
         if 'creation_time' in self._data_source.hierarchy.field_list:
@@ -1412,14 +1407,20 @@
                 self.particle_fields["particle_index"].size:
             mylog.error("Non-unique values in particle_index field. Parallel HOP will fail.")
             exit = True
+
         self.comm.mpi_exit_test(exit)
+        # Try to do this in a memory conservative way.
+        na.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
+            self.particle_fields['ParticleMassMsun'])
+        na.divide(self.particle_fields["particle_position_x"],
+            self.old_period[0], self.particle_fields["particle_position_x"])
+        na.divide(self.particle_fields["particle_position_y"],
+            self.old_period[1], self.particle_fields["particle_position_y"])
+        na.divide(self.particle_fields["particle_position_z"],
+            self.old_period[2], self.particle_fields["particle_position_z"])
         obj = ParallelHOPHaloFinder(self.period, self.padding,
             self.num_neighbors, self.bounds,
-            self.particle_fields["particle_position_x"] / self.old_period[0],
-            self.particle_fields["particle_position_y"] / self.old_period[1],
-            self.particle_fields["particle_position_z"] / self.old_period[2],
-            self.particle_fields["particle_index"],
-            self.particle_fields["ParticleMassMsun"]/self.total_mass,
+            self.particle_fields,
             self.threshold, rearrange=self.rearrange, premerge=self.premerge)
         self.densities, self.tags = obj.density, obj.chainID
         # I'm going to go ahead and delete self.densities because it's not
@@ -1446,15 +1447,12 @@
         yt_counters("Precomp bulk vel.")
         self.bulk_vel = na.zeros((self.group_count, 3), dtype='float64')
         yt_counters("bulk vel. reading data")
-        pm = self.particle_fields["ParticleMassMsun"]
-        if ytcfg.getboolean("yt","inline") == False:
-            xv = self._data_source["particle_velocity_x"][self._base_indices]
-            yv = self._data_source["particle_velocity_y"][self._base_indices]
-            zv = self._data_source["particle_velocity_z"][self._base_indices]
-        else:
-            xv = self._data_source["particle_velocity_x"][self._base_indices]
-            yv = self._data_source["particle_velocity_y"][self._base_indices]
-            zv = self._data_source["particle_velocity_z"][self._base_indices]
+        pm = obj.mass
+        # Fix this back to un-normalized units.
+        na.multiply(pm, self.total_mass, pm)
+        xv = self._data_source["particle_velocity_x"][self._base_indices]
+        yv = self._data_source["particle_velocity_y"][self._base_indices]
+        zv = self._data_source["particle_velocity_z"][self._base_indices]
         yt_counters("bulk vel. reading data")
         yt_counters("bulk vel. computing")
         select = (self.tags >= 0)
@@ -1514,6 +1512,7 @@
         self.taskID = obj.mine
         self.halo_taskmap = obj.halo_taskmap # A defaultdict.
         del obj
+        gc.collect()
         yt_counters("Precomp bulk vel.")
 
     def _parse_output(self):
@@ -1780,7 +1779,8 @@
 class parallelHF(GenericHaloFinder, parallelHOPHaloList):
     def __init__(self, pf, subvolume=None,threshold=160, dm_only=True, \
         resize=True, rearrange=True,\
-        fancy_padding=True, safety=1.5, premerge=True, sample=0.03):
+        fancy_padding=True, safety=1.5, premerge=True, sample=0.03, \
+        total_mass=None, num_particles=None):
         r"""Parallel HOP halo finder.
         
         Halos are built by:
@@ -1829,6 +1829,23 @@
         sample : float
             The fraction of the full dataset on which load-balancing is
             performed. Default = 0.03.
+        total_mass : float
+            If HOP is run on the same dataset mulitple times, the total mass
+            of particles in Msun units in the full volume can be supplied here
+            to save time.
+            This must correspond to the particles being operated on, meaning
+            if stars are included in the halo finding, they must be included
+            in this mass as well, and visa-versa.
+            If halo finding on a subvolume, this still corresponds with the
+            mass in the entire volume.
+            Default = None, which means the total mass is automatically
+            calculated.
+        num_particles : integer
+            The total number of particles in the volume, in the same fashion
+            as `total_mass` is calculated. Specifying this turns off
+            fancy_padding.
+            Default = None, which means the number of particles is
+            automatically calculated.
         
         Examples
         -------
@@ -1874,9 +1891,7 @@
             self._data_source = self.hierarchy.periodic_region_strict([0.5]*3, LE, RE)
         # get the average spacing between particles for this region
         # The except is for the serial case, where the full box is what we want.
-        if ytcfg.getboolean("yt","inline") == False:
-            data = self._data_source["particle_position_x"]
-        else:
+        if num_particles is None:
             data = self._data_source["particle_position_x"]
         try:
             l = self._data_source.right_edge - self._data_source.left_edge
@@ -1885,14 +1900,16 @@
         vol = l[0] * l[1] * l[2]
         full_vol = vol
         # We will use symmetric padding when a subvolume is being used.
-        if not fancy_padding or subvolume is not None:
-            avg_spacing = (float(vol) / data.size)**(1./3.)
+        if not fancy_padding or subvolume is not None or num_particles is not None:
+            if num_particles is None:
+                num_particles = data.size
+            avg_spacing = (float(vol) / num_particles)**(1./3.)
             # padding is a function of inter-particle spacing, this is an
             # approximation, but it's OK with the safety factor
             padding = (self.num_neighbors)**(1./3.) * self.safety * avg_spacing
             self.padding = (na.ones(3,dtype='float64')*padding, na.ones(3,dtype='float64')*padding)
             mylog.info('padding %s avg_spacing %f vol %f local_parts %d' % \
-                (str(self.padding), avg_spacing, vol, data.size))
+                (str(self.padding), avg_spacing, vol, num_particles))
         # Another approach to padding, perhaps more accurate.
         elif fancy_padding and self._distributed:
             LE_padding, RE_padding = na.empty(3,dtype='float64'), na.empty(3,dtype='float64')
@@ -1936,8 +1953,9 @@
                 (str(self.padding), avg_spacing, full_vol, data.size, str(self._data_source)))
         # Now we get the full box mass after we have the final composition of
         # subvolumes.
-        total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(), 
-                                         op='sum')
+        if total_mass is None:
+            total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(), 
+                                                 op='sum')
         if not self._distributed:
             self.padding = (na.zeros(3,dtype='float64'), na.zeros(3,dtype='float64'))
         # If we're using a subvolume, we now re-divide.
@@ -2057,7 +2075,7 @@
 
 class HOPHaloFinder(GenericHaloFinder, HOPHaloList):
     def __init__(self, pf, subvolume=None, threshold=160, dm_only=True,
-            padding=0.02):
+            padding=0.02, total_mass=None):
         r"""HOP halo finder.
         
         Halos are built by:
@@ -2091,6 +2109,17 @@
             with duplicated particles for halo finidng to work. This number
             must be no smaller than the radius of the largest halo in the box
             in code units. Default = 0.02.
+        total_mass : float
+            If HOP is run on the same dataset mulitple times, the total mass
+            of particles in Msun units in the full volume can be supplied here
+            to save time.
+            This must correspond to the particles being operated on, meaning
+            if stars are included in the halo finding, they must be included
+            in this mass as well, and visa-versa.
+            If halo finding on a subvolume, this still corresponds with the
+            mass in the entire volume.
+            Default = None, which means the total mass is automatically
+            calculated.
         
         Examples
         --------
@@ -2110,12 +2139,13 @@
         padded, LE, RE, self._data_source = \
             self.partition_hierarchy_3d(ds = self._data_source, padding=self.padding)
         # For scaling the threshold, note that it's a passthrough
-        if dm_only:
-            select = self._get_dm_indices()
-            total_mass = \
-                self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
-        else:
-            total_mass = self.comm.mpi_allreduce(self._data_source["ParticleMassMsun"].sum(dtype='float64'), op='sum')
+        if total_mass is None:
+            if dm_only:
+                select = self._get_dm_indices()
+                total_mass = \
+                    self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
+            else:
+                total_mass = self.comm.mpi_allreduce(self._data_source["ParticleMassMsun"].sum(dtype='float64'), op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles
         # are all on different processors, we should instead construct an
         # object representing the entire domain and sum it "lazily" with


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 15:47:11 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 16:21:34 2011 -0400
@@ -26,6 +26,7 @@
 from collections import defaultdict
 import itertools, sys
 import numpy as na
+import gc
 
 from yt.funcs import *
 from yt.utilities.performance_counters import yt_counters, time_function
@@ -43,7 +44,7 @@
 
 class ParallelHOPHaloFinder(ParallelAnalysisInterface):
     def __init__(self,period, padding, num_neighbors, bounds,
-            xpos, ypos, zpos, index, mass, threshold=160.0, rearrange=True,
+            particle_fields, threshold=160.0, rearrange=True,
             premerge=True):
         ParallelAnalysisInterface.__init__(self)
         self.threshold = threshold
@@ -55,12 +56,12 @@
         self.padding = padding
         self.num_neighbors = num_neighbors
         self.bounds = bounds
-        self.xpos = xpos
-        self.ypos = ypos
-        self.zpos = zpos
+        self.xpos = particle_fields.pop("particle_position_x")
+        self.ypos = particle_fields.pop("particle_position_y")
+        self.zpos = particle_fields.pop("particle_position_z")
         self.real_size = len(self.xpos)
-        self.index = na.array(index, dtype='int64')
-        self.mass = mass
+        self.index = particle_fields.pop("particle_index")
+        self.mass = particle_fields.pop("ParticleMassMsun")
         self.padded_particles = []
         self.nMerge = 4
         yt_counters("chainHOP")
@@ -242,7 +243,7 @@
         send_size = {}
         # This will reduce the size of the loop over particles.
         yt_counters("Picking padding data to send.")
-        send_count = len(na.where(self.is_inside_annulus == True)[0])
+        send_count = self.is_inside_annulus.sum()
         points = na.empty((send_count, 3), dtype='float64')
         points[:,0] = self.xpos[self.is_inside_annulus]
         points[:,1] = self.ypos[self.is_inside_annulus]
@@ -262,7 +263,7 @@
             send_real_indices[neighbor] = real_indices[is_inside].copy()
             send_points[neighbor] = shift_points[is_inside].copy()
             send_mass[neighbor] = mass[is_inside].copy()
-            send_size[neighbor] = len(na.where(is_inside == True)[0])
+            send_size[neighbor] = is_inside.sum()
         del points, shift_points, mass, real_indices
         yt_counters("Picking padding data to send.")
         # Communicate the sizes to send.
@@ -343,13 +344,22 @@
         yt_counters("init kd tree")
         # Yes, we really do need to initialize this many arrays.
         # They're deleted in _parallelHOP.
-        fKD.dens = na.asfortranarray(na.zeros(self.size, dtype='float64'))
+        fKD.dens = na.zeros(self.size, dtype='float64', order='F')
         fKD.mass = na.concatenate((self.mass, self.mass_pad))
-        fKD.pos = na.asfortranarray(na.empty((3, self.size), dtype='float64'))
+        del self.mass
+        fKD.pos = na.empty((3, self.size), dtype='float64', order='F')
         # This actually copies the data into the fortran space.
-        fKD.pos[0, :] = na.concatenate((self.xpos, self.xpos_pad))
-        fKD.pos[1, :] = na.concatenate((self.ypos, self.ypos_pad))
-        fKD.pos[2, :] = na.concatenate((self.zpos, self.zpos_pad))
+        self.psize = self.xpos.size
+        fKD.pos[0, :self.psize] = self.xpos
+        fKD.pos[1, :self.psize] = self.ypos
+        fKD.pos[2, :self.psize] = self.zpos
+        del self.xpos, self.ypos, self.zpos
+        gc.collect()
+        fKD.pos[0, self.psize:] = self.xpos_pad
+        fKD.pos[1, self.psize:] = self.ypos_pad
+        fKD.pos[2, self.psize:] = self.zpos_pad
+        del self.xpos_pad, self.ypos_pad, self.zpos_pad
+        gc.collect()
         fKD.qv = na.asfortranarray(na.empty(3, dtype='float64'))
         fKD.nn = self.num_neighbors
         # Plus 2 because we're looking for that neighbor, but only keeping 
@@ -1488,7 +1498,15 @@
         self._communicate_annulus_chainIDs()
         mylog.info('Connecting %d chains into groups...' % self.nchains)
         self._connect_chains()
+        self.mass = fKD.mass[:self.psize]
+        self.mass_pad = fKD.mass[self.psize:]
         del fKD.dens, fKD.mass, fKD.dens
+        self.xpos = fKD.pos[0, :self.psize]
+        self.ypos = fKD.pos[1, :self.psize]
+        self.zpos = fKD.pos[2, :self.psize]
+        self.xpos_pad = fKD.pos[0, self.psize:]
+        self.ypos_pad = fKD.pos[1, self.psize:]
+        self.zpos_pad = fKD.pos[2, self.psize:]
         del fKD.pos, fKD.chunk_tags
         free_tree(0) # Frees the kdtree object.
         del self.densestNN
@@ -1514,7 +1532,7 @@
             for groupID in self.I_own[taskID]:
                 self.halo_taskmap[groupID].add(taskID)
         del self.I_own
-        del self.mass, self.xpos, self.ypos, self.zpos
+        del self.xpos, self.ypos, self.zpos
 
     def __add_to_array(self, arr, key, value, type):
         """


--- a/yt/frontends/nyx/data_structures.py	Wed Oct 19 15:47:11 2011 -0400
+++ b/yt/frontends/nyx/data_structures.py	Wed Oct 19 16:21:34 2011 -0400
@@ -76,7 +76,6 @@
 
     def _prepare_grid(self):
         """ Copies all the appropriate attributes from the hierarchy. """
-        # This is definitely the slowest part of generating the hierarchy
         h = self.hierarchy  # alias
         h.grid_levels[self.id, 0] = self.Level
         h.grid_left_edge[self.id,:] = self.LeftEdge[:]
@@ -94,11 +93,12 @@
         if len(pIDs) > 0:
             self.Parent = [weakref.proxy(h.grids[pID]) for pID in pIDs]
         else:
+            # must be root grid
             self.Parent = None
 
     def _setup_dx(self):
         # So first we figure out what the index is. We don't assume that
-        # dx=dy=dz, at least here. We probably do elsewhere.
+        # dx=dy=dz here.
         id = self.id - self._id_offset
         if self.Parent is not None:
             self.dds = self.Parent[0].dds / self.pf.refine_by
@@ -132,7 +132,6 @@
         self.read_particle_header()
         self.__cache_endianness(self.levels[-1].grids[-1])
 
-        # @todo: should be first line
         AMRHierarchy.__init__(self, pf, self.data_style)
         self._setup_data_io()
         self._setup_field_list()
@@ -142,27 +141,27 @@
         """ Read the global header file for an Nyx plotfile output. """
         counter = 0
         header_file = open(header_path, 'r')
-        self.__global_header_lines = header_file.readlines()
+        self._global_header_lines = header_file.readlines()
 
         # parse the file
-        self.nyx_version = self.__global_header_lines[0].rstrip()
-        self.n_fields = int(self.__global_header_lines[1])
+        self.nyx_pf_version = self._global_header_lines[0].rstrip()
+        self.n_fields = int(self._global_header_lines[1])
 
         # why the 2?
         counter = self.n_fields + 2
         self.field_list = []
-        for i, line in enumerate(self.__global_header_lines[2:counter]):
+        for i, line in enumerate(self._global_header_lines[2:counter]):
             self.field_list.append(line.rstrip())
 
         # figure out dimensions and make sure it's 3D
-        self.dimension = int(self.__global_header_lines[counter])
+        self.dimension = int(self._global_header_lines[counter])
         if self.dimension != 3:
             raise RunTimeError("Current data is %iD. yt only supports Nyx data in 3D" % self.dimension)
 
         counter += 1
-        self.Time = float(self.__global_header_lines[counter])
+        self.Time = float(self._global_header_lines[counter])
         counter += 1
-        self.finest_grid_level = int(self.__global_header_lines[counter])
+        self.finest_grid_level = int(self._global_header_lines[counter])
         self.n_levels = self.finest_grid_level + 1
         counter += 1
 
@@ -171,28 +170,28 @@
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float, self.__global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float, self.__global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.refinementFactor_unnecessary = self.__global_header_lines[counter].split() #na.array(map(int, self.__global_header_lines[counter].split()))
+        self.refinementFactor_unnecessary = self._global_header_lines[counter].split() #na.array(map(int, self._global_header_lines[counter].split()))
         counter += 1
-        self.globalIndexSpace_unnecessary = self.__global_header_lines[counter]
+        self.globalIndexSpace_unnecessary = self._global_header_lines[counter]
         counter += 1
-        self.timestepsPerLevel_unnecessary = self.__global_header_lines[counter]
+        self.timestepsPerLevel_unnecessary = self._global_header_lines[counter]
         counter += 1
 
         self.dx = na.zeros((self.n_levels, 3))
-        for i, line in enumerate(self.__global_header_lines[counter:counter + self.n_levels]):
+        for i, line in enumerate(self._global_header_lines[counter:counter + self.n_levels]):
             self.dx[i] = na.array(map(float, line.split()))
         counter += self.n_levels
-        self.geometry = int(self.__global_header_lines[counter])
+        self.geometry = int(self._global_header_lines[counter])
         if self.geometry != 0:
             raise RunTimeError("yt only supports cartesian coordinates.")
         counter += 1
 
         # @todo: this is just to debug. eventually it should go away.
-        linebreak = int(self.__global_header_lines[counter])
+        linebreak = int(self._global_header_lines[counter])
         if linebreak != 0:
             raise RunTimeError("INTERNAL ERROR! This should be a zero.")
         counter += 1
@@ -209,11 +208,11 @@
         data_files_finder = re.compile(data_files_pattern)
 
         for level in range(0, self.n_levels):
-            tmp = self.__global_header_lines[counter].split()
+            tmp = self._global_header_lines[counter].split()
             # should this be grid_time or level_time??
             lev, ngrids, grid_time = int(tmp[0]), int(tmp[1]), float(tmp[2])
             counter += 1
-            nsteps = int(self.__global_header_lines[counter])
+            nsteps = int(self._global_header_lines[counter])
             counter += 1
             self.levels.append(NyxLevel(lev, ngrids))
 
@@ -227,10 +226,10 @@
             key_off = 0
             files = {}
             offsets = {}
-            while nfiles + tmp_offset < len(self.__global_header_lines) \
-                  and data_files_finder.match(self.__global_header_lines[nfiles + tmp_offset]):
-                filen = os.path.join(self.parameter_file.path, \
-                                     self.__global_header_lines[nfiles + tmp_offset].strip())
+            while nfiles + tmp_offset < len(self._global_header_lines) \
+                  and data_files_finder.match(self._global_header_lines[nfiles + tmp_offset]):
+                filen = os.path.join(self.parameter_file.path,
+                                     self._global_header_lines[nfiles + tmp_offset].strip())
                 # open each "_H" header file, and get the number of
                 # components within it
                 level_header_file = open(filen + '_H', 'r').read()
@@ -262,11 +261,11 @@
             for grid in range(0, ngrids):
                 gfn = fn[grid]  # filename of file containing this grid
                 gfo = off[grid] # offset within that file
-                xlo, xhi = map(float, self.__global_header_lines[counter].split())
+                xlo, xhi = map(float, self._global_header_lines[counter].split())
                 counter += 1
-                ylo, yhi = map(float, self.__global_header_lines[counter].split())
+                ylo, yhi = map(float, self._global_header_lines[counter].split())
                 counter += 1
-                zlo, zhi = map(float, self.__global_header_lines[counter].split())
+                zlo, zhi = map(float, self._global_header_lines[counter].split())
                 counter += 1
                 lo = na.array([xlo, ylo, zlo])
                 hi = na.array([xhi, yhi, zhi])
@@ -307,6 +306,7 @@
                                  for i in line.split()),
                                 dtype='int64',
                                 count=3*self.num_grids).reshape((self.num_grids, 3))
+        # we need grid_info in `populate_grid_objects`, so save it to self
         self.pgrid_info = grid_info
 
     def __cache_endianness(self, test_grid):
@@ -356,17 +356,17 @@
             g.NumberOfParticles = pg[1]
             g._particle_offset = pg[2]
 
-        self.grid_particle_count[:,0] = self.pgrid_info[:,1]
-        del self.pgrid_info  # if this is all pgrid_info is used for...
+        self.grid_particle_count[:, 0] = self.pgrid_info[:, 1]
+        del self.pgrid_info
 
         gls = na.concatenate([level.ngrids * [level.level] for level in self.levels])
         self.grid_levels[:] = gls.reshape((self.num_grids, 1))
         grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]]
                                    for level in self.levels], axis=0)
 
-        self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids, 1))
-        self.grid_dys = grid_dcs[:,1].reshape((self.num_grids, 1))
-        self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids, 1))
+        self.grid_dxs = grid_dcs[:, 0].reshape((self.num_grids, 1))
+        self.grid_dys = grid_dcs[:, 1].reshape((self.num_grids, 1))
+        self.grid_dzs = grid_dcs[:, 2].reshape((self.num_grids, 1))
 
         left_edges = []
         right_edges = []
@@ -381,7 +381,7 @@
         self.grid_dimensions = na.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]  # why the same thing twice?
-        self.gridTree = [ [] for i in range(self.num_grids)]  # meh
+        self.gridTree = [ [] for i in range(self.num_grids)]
 
         mylog.debug("Done creating grid objects")
 
@@ -389,7 +389,7 @@
         self.__setup_grid_tree()
 
         for i, grid in enumerate(self.grids):
-            if (i%1e4) == 0:
+            if (i % 1e4) == 0:
                 mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
 
             grid._prepare_grid()
@@ -469,7 +469,7 @@
         pass
 
     def _setup_unknown_fields(self):
-        # Doesn't seem useful
+        # not sure what the case for this is.
         for field in self.field_list:
             if field in self.parameter_file.field_info: continue
             mylog.info("Adding %s to list of fields", field)
@@ -588,7 +588,6 @@
         Parses the parameter file and establishes the various dictionaries.
 
         """
-        # More boxlib madness...
         self._parse_header_file()
 
         if os.path.isfile(self.fparameter_file_path):
@@ -638,27 +637,24 @@
         self.domain_dimensions = self.parameters["TopGridDimensions"]
         self.refine_by = self.parameters.get("RefineBy", 2)  # 2 is silent default? Makes sense I suppose.
 
-        if self.parameters.has_key("ComovingCoordinates") \
-           and self.parameters["ComovingCoordinates"]:
-            self.cosmological_simulation = 1
-            self.omega_lambda = self.parameters["CosmologyOmegaLambdaNow"]
-            self.omega_matter = self.parameters["CosmologyOmegaMatterNow"]
-            self.hubble_constant = self.parameters["CosmologyHubbleConstantNow"]
+        # Nyx is always cosmological.
+        self.cosmological_simulation = 1
+        self.omega_lambda = self.parameters["CosmologyOmegaLambdaNow"]
+        self.omega_matter = self.parameters["CosmologyOmegaMatterNow"]
+        self.hubble_constant = self.parameters["CosmologyHubbleConstantNow"]
 
-            # So broken. We will fix this in the new Nyx output format
-            a_file = open(os.path.join(self.path, "comoving_a"))
-            line = a_file.readline().strip()
-            a_file.close()
-            self.parameters["CosmologyCurrentRedshift"] = 1 / float(line) - 1
-            self.cosmological_scale_factor = float(line)
+        # Read in the `comoving_a` file and parse the value. We should fix this
+        # in the new Nyx output format...
+        a_file = open(os.path.join(self.path, "comoving_a"))
+        a_string = a_file.readline().strip()
+        a_file.close()
 
-            # alias
-            self.current_redshift = self.parameters["CosmologyCurrentRedshift"]
+        # Set the scale factor and redshift
+        self.cosmological_scale_factor = float(a_string)
+        self.parameters["CosmologyCurrentRedshift"] = 1 / float(a_string) - 1
 
-        else:
-            # @todo: automatic defaults
-            self.current_redshift = self.omega_lambda = self.omega_matter = \
-                self.hubble_constant = self.cosmological_simulation = 0.0
+        # alias
+        self.current_redshift = self.parameters["CosmologyCurrentRedshift"]
 
     def _parse_header_file(self):
         """
@@ -668,13 +664,12 @@
         Currently, only Time is read here.
 
         """
-        # @todo: header filename option? probably not.
         header_file = open(os.path.join(self.path, "Header"))
         lines = header_file.readlines()  # hopefully this is small
         header_file.close()
 
         n_fields = int(lines[1])  # this could change
-        self.current_time = float(lines[3 + n_fields])  # very fragile
+        self.current_time = float(lines[3 + n_fields])  # fragile
 
     def _parse_fparameter_file(self):
         """
@@ -751,7 +746,6 @@
         self.time_units["days"] = seconds / (3600 * 24.0)
         self.time_units["years"] = seconds / (3600 * 24.0 * 365)
 
-
         # not the most useful right now, but someday
         for key in nyx_particle_field_names:
             self.conversion_factors[key] = 1.0


--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx	Wed Oct 19 15:47:11 2011 -0400
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx	Wed Oct 19 16:21:34 2011 -0400
@@ -737,15 +737,33 @@
         dt = (exit_t - enter_t) / tf.ns # 4 samples should be dt=0.25
         cdef int offset = ci[0] * (self.dims[1] + 1) * (self.dims[2] + 1) \
                         + ci[1] * (self.dims[2] + 1) + ci[2]
+        # The initial and final values can be linearly interpolated between; so
+        # we just have to calculate our initial and final values.
+        cdef np.float64_t slopes[6]
         for i in range(3):
-            cell_left[i] = ci[i] * self.dds[i] + self.left_edge[i]
-            # this gets us dp as the current first sample position
-            pos[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
-            dp[i] = pos[i] - cell_left[i]
+            dp[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+            dp[i] -= ci[i] * self.dds[i] + self.left_edge[i]
             dp[i] *= self.idds[i]
             ds[i] = v_dir[i] * self.idds[i] * dt
-            local_dds[i] = v_dir[i] * dt
+        for i in range(self.n_fields):
+            slopes[i] = offset_interpolate(self.dims, dp,
+                            self.data[i] + offset)
+        for i in range(3):
+            dp[i] += ds[i] * tf.ns
+        cdef np.float64_t temp
+        for i in range(self.n_fields):
+            temp = slopes[i]
+            slopes[i] -= offset_interpolate(self.dims, dp,
+                             self.data[i] + offset)
+            slopes[i] *= -1.0/tf.ns
+            self.dvs[i] = temp
         if self.star_list != NULL:
+            for i in range(3):
+                cell_left[i] = ci[i] * self.dds[i] + self.left_edge[i]
+                # this gets us dp as the current first sample position
+                pos[i] = (enter_t + 0.5 * dt) * v_dir[i] + v_pos[i]
+                dp[i] -= tf.ns * ds[i]
+                local_dds[i] = v_dir[i] * dt
             ballq = kdtree_utils.kd_nearest_range3(
                 self.star_list, cell_left[0] + self.dds[0]*0.5,
                                 cell_left[1] + self.dds[1]*0.5,
@@ -753,15 +771,16 @@
                                 self.star_er + 0.9*self.dds[0])
                                             # ~0.866 + a bit
         for dti in range(tf.ns): 
-            for i in range(self.n_fields):
-                self.dvs[i] = offset_interpolate(self.dims, dp, self.data[i] + offset)
             #if (dv < tf.x_bounds[0]) or (dv > tf.x_bounds[1]):
             #    continue
-            if self.star_list != NULL: self.add_stars(ballq, dt, pos, rgba)
+            if self.star_list != NULL:
+                self.add_stars(ballq, dt, pos, rgba)
+                for i in range(3):
+                    dp[i] += ds[i]
+                    pos[i] += local_dds[i]
             tf.eval_transfer(dt, self.dvs, rgba, grad)
-            for i in range(3):
-                dp[i] += ds[i]
-                pos[i] += local_dds[i]
+            for i in range(self.n_fields):
+                self.dvs[i] += slopes[i]
         if ballq != NULL: kdtree_utils.kd_res_free(ballq)
 
     @cython.boundscheck(False)


--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/yt/utilities/peewee.py	Wed Oct 19 16:21:34 2011 -0400
@@ -0,0 +1,1425 @@
+#     (\
+#     (  \  /(o)\     caw!
+#     (   \/  ()/ /)
+#      (   `;.))'".) 
+#       `(/////.-'
+#    =====))=))===() 
+#      ///'       
+#     //
+#    '
+from datetime import datetime
+import logging
+import os
+import re
+import time
+
+try:
+    import sqlite3
+except ImportError:
+    sqlite3 = None
+
+try:
+    import psycopg2
+except ImportError:
+    psycopg2 = None
+
+try:
+    import MySQLdb as mysql
+except ImportError:
+    mysql = None
+
+if sqlite3 is None and psycopg2 is None and mysql is None:
+    raise ImproperlyConfigured('Either sqlite3, psycopg2 or MySQLdb must be installed')
+
+
+DATABASE_NAME = os.environ.get('PEEWEE_DATABASE', 'peewee.db')
+logger = logging.getLogger('peewee.logger')
+
+
+class BaseAdapter(object):
+    """
+    The various subclasses of `BaseAdapter` provide a bridge between the high-
+    level `Database` abstraction and the underlying python libraries like
+    psycopg2.  It also provides a way to unify the pythonic field types with
+    the underlying column types used by the database engine.
+    
+    The `BaseAdapter` provides two types of mappings:    
+    - mapping between filter operations and their database equivalents
+    - mapping between basic field types and their database column types
+    
+    The `BaseAdapter` also is the mechanism used by the `Database` class to:
+    - handle connections with the database
+    - extract information from the database cursor
+    """
+    operations = {'eq': '= %s'}
+    interpolation = '%s'
+    
+    def get_field_types(self):
+        field_types = {
+            'integer': 'INTEGER',
+            'float': 'REAL',
+            'decimal': 'NUMERIC',
+            'string': 'VARCHAR',
+            'text': 'TEXT',
+            'datetime': 'DATETIME',
+            'primary_key': 'INTEGER',
+            'foreign_key': 'INTEGER',
+            'boolean': 'SMALLINT',
+        }
+        field_types.update(self.get_field_overrides())
+        return field_types
+    
+    def get_field_overrides(self):
+        return {}
+    
+    def connect(self, database, **kwargs):
+        raise NotImplementedError
+    
+    def close(self, conn):
+        conn.close()
+    
+    def lookup_cast(self, lookup, value):
+        if lookup in ('contains', 'icontains'):
+            return '%%%s%%' % value
+        elif lookup in ('startswith', 'istartswith'):
+            return '%s%%' % value
+        return value
+    
+    def last_insert_id(self, cursor, model):
+        return cursor.lastrowid
+    
+    def rows_affected(self, cursor):
+        return cursor.rowcount
+
+
+class SqliteAdapter(BaseAdapter):
+    # note the sqlite library uses a non-standard interpolation string
+    operations = {
+        'lt': '< ?',
+        'lte': '<= ?',
+        'gt': '> ?',
+        'gte': '>= ?',
+        'eq': '= ?',
+        'ne': '!= ?', # watch yourself with this one
+        'in': 'IN (%s)', # special-case to list q-marks
+        'is': 'IS ?',
+        'icontains': "LIKE ? ESCAPE '\\'", # surround param with %'s
+        'contains': "GLOB ?", # surround param with *'s
+        'istartswith': "LIKE ? ESCAPE '\\'",
+        'startswith': "GLOB ?",
+    }
+    interpolation = '?'
+    
+    def connect(self, database, **kwargs):
+        return sqlite3.connect(database, **kwargs)
+    
+    def lookup_cast(self, lookup, value):
+        if lookup == 'contains':
+            return '*%s*' % value
+        elif lookup == 'icontains':
+            return '%%%s%%' % value
+        elif lookup == 'startswith':
+            return '%s*' % value
+        elif lookup == 'istartswith':
+            return '%s%%' % value
+        return value
+
+
+class PostgresqlAdapter(BaseAdapter):
+    operations = {
+        'lt': '< %s',
+        'lte': '<= %s',
+        'gt': '> %s',
+        'gte': '>= %s',
+        'eq': '= %s',
+        'ne': '!= %s', # watch yourself with this one
+        'in': 'IN (%s)', # special-case to list q-marks
+        'is': 'IS %s',
+        'icontains': 'ILIKE %s', # surround param with %'s
+        'contains': 'LIKE %s', # surround param with *'s
+        'istartswith': 'ILIKE %s',
+        'startswith': 'LIKE %s',
+    }
+        
+    def connect(self, database, **kwargs):
+        return psycopg2.connect(database=database, **kwargs)
+    
+    def get_field_overrides(self):
+        return {
+            'primary_key': 'SERIAL',
+            'datetime': 'TIMESTAMP'
+        }
+    
+    def last_insert_id(self, cursor, model):
+        cursor.execute("SELECT CURRVAL('\"%s_%s_seq\"')" % (
+            model._meta.db_table, model._meta.pk_name))
+        return cursor.fetchone()[0]
+    
+
+class MySQLAdapter(BaseAdapter):
+    operations = {
+        'lt': '< %s',
+        'lte': '<= %s',
+        'gt': '> %s',
+        'gte': '>= %s',
+        'eq': '= %s',
+        'ne': '!= %s', # watch yourself with this one
+        'in': 'IN (%s)', # special-case to list q-marks
+        'is': 'IS %s',
+        'icontains': 'LIKE %s', # surround param with %'s
+        'contains': 'LIKE BINARY %s', # surround param with *'s
+        'istartswith': 'LIKE %s',
+        'startswith': 'LIKE BINARY %s',
+    }
+
+    def connect(self, database, **kwargs):
+        return mysql.connect(db=database, **kwargs)
+
+    def get_field_overrides(self):
+        return {
+            'primary_key': 'integer AUTO_INCREMENT',
+            'boolean': 'bool',
+            'float': 'double precision',
+            'text': 'longtext',
+        }
+
+
+class Database(object):
+    """
+    A high-level api for working with the supported database engines.  `Database`
+    provides a wrapper around some of the functions performed by the `Adapter`,
+    in addition providing support for:
+    - execution of SQL queries
+    - creating and dropping tables and indexes
+    """
+    def __init__(self, adapter, database, **connect_kwargs):
+        self.adapter = adapter
+        self.database = database
+        self.connect_kwargs = connect_kwargs
+    
+    def connect(self):
+        self.conn = self.adapter.connect(self.database, **self.connect_kwargs)
+    
+    def close(self):
+        self.adapter.close(self.conn)
+    
+    def execute(self, sql, params=None, commit=False):
+        cursor = self.conn.cursor()
+        res = cursor.execute(sql, params or ())
+        if commit:
+            self.conn.commit()
+        logger.debug((sql, params))
+        return cursor
+    
+    def last_insert_id(self, cursor, model):
+        return self.adapter.last_insert_id(cursor, model)
+    
+    def rows_affected(self, cursor):
+        return self.adapter.rows_affected(cursor)
+    
+    def column_for_field(self, db_field):
+        try:
+            return self.adapter.get_field_types()[db_field]
+        except KeyError:
+            raise AttributeError('Unknown field type: "%s", valid types are: %s' % \
+                db_field, ', '.join(self.adapter.get_field_types().keys())
+            )
+    
+    def create_table(self, model_class):
+        framing = "CREATE TABLE %s (%s);"
+        columns = []
+
+        for field in model_class._meta.fields.values():
+            columns.append(field.to_sql())
+
+        query = framing % (model_class._meta.db_table, ', '.join(columns))
+        
+        self.execute(query, commit=True)
+    
+    def create_index(self, model_class, field, unique=False):
+        framing = 'CREATE %(unique)s INDEX %(model)s_%(field)s ON %(model)s(%(field)s);'
+        
+        if field not in model_class._meta.fields:
+            raise AttributeError(
+                'Field %s not on model %s' % (field, model_class)
+            )
+        
+        unique_expr = ternary(unique, 'UNIQUE', '')
+        
+        query = framing % {
+            'unique': unique_expr,
+            'model': model_class._meta.db_table,
+            'field': field
+        }
+        
+        self.execute(query, commit=True)
+    
+    def drop_table(self, model_class, fail_silently=False):
+        framing = fail_silently and 'DROP TABLE IF EXISTS %s;' or 'DROP TABLE %s;'
+        self.execute(framing % model_class._meta.db_table, commit=True)
+    
+    def get_indexes_for_table(self, table):
+        raise NotImplementedError
+
+
+class SqliteDatabase(Database):
+    def __init__(self, database, **connect_kwargs):
+        super(SqliteDatabase, self).__init__(SqliteAdapter(), database, **connect_kwargs)
+    
+    def get_indexes_for_table(self, table):
+        res = self.execute('PRAGMA index_list(%s);' % table)
+        rows = sorted([(r[1], r[2] == 1) for r in res.fetchall()])
+        return rows
+
+
+class PostgresqlDatabase(Database):
+    def __init__(self, database, **connect_kwargs):
+        super(PostgresqlDatabase, self).__init__(PostgresqlAdapter(), database, **connect_kwargs)
+    
+    def get_indexes_for_table(self, table):
+        res = self.execute("""
+            SELECT c2.relname, i.indisprimary, i.indisunique
+            FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_index i
+            WHERE c.relname = %s AND c.oid = i.indrelid AND i.indexrelid = c2.oid
+            ORDER BY i.indisprimary DESC, i.indisunique DESC, c2.relname""", (table,))
+        return sorted([(r[0], r[1]) for r in res.fetchall()])
+
+class MySQLDatabase(Database):
+    def __init__(self, database, **connect_kwargs):
+        super(MySQLDatabase, self).__init__(MySQLAdapter(), database, **connect_kwargs)
+    
+    def get_indexes_for_table(self, table):
+        res = self.execute('SHOW INDEXES IN %s;' % table)
+        rows = sorted([(r[2], r[1] == 0) for r in res.fetchall()])
+        return rows
+
+
+class QueryResultWrapper(object):
+    """
+    Provides an iterator over the results of a raw Query, additionally doing
+    two things:
+    - converts rows from the database into model instances
+    - ensures that multiple iterations do not result in multiple queries
+    """
+    def __init__(self, model, cursor):
+        self.model = model
+        self.cursor = cursor
+        self._result_cache = []
+        self._populated = False
+    
+    def model_from_rowset(self, model_class, row_dict):
+        instance = model_class()
+        for attr, value in row_dict.iteritems():
+            if attr in instance._meta.fields:
+                field = instance._meta.fields[attr]
+                setattr(instance, attr, field.python_value(value))
+            else:
+                setattr(instance, attr, value)
+        return instance    
+    
+    def _row_to_dict(self, row, result_cursor):
+        return dict((result_cursor.description[i][0], value)
+            for i, value in enumerate(row))
+    
+    def __iter__(self):
+        if not self._populated:
+            return self
+        else:
+            return iter(self._result_cache)
+    
+    def next(self):
+        row = self.cursor.fetchone()
+        if row:
+            row_dict = self._row_to_dict(row, self.cursor)
+            instance = self.model_from_rowset(self.model, row_dict)
+            self._result_cache.append(instance)
+            return instance
+        else:
+            self._populated = True
+            raise StopIteration
+
+
+# create
+class DoesNotExist(Exception):
+    pass
+
+
+# semantic wrappers for ordering the results of a `SelectQuery`
+def asc(f):
+    return (f, 'ASC')
+
+def desc(f):
+    return (f, 'DESC')
+
+# wrappers for performing aggregation in a `SelectQuery`
+def Count(f, alias='count'):
+    return ('COUNT', f, alias)
+
+def Max(f, alias='max'):
+    return ('MAX', f, alias)
+
+def Min(f, alias='min'):
+    return ('MIN', f, alias)
+
+# decorator for query methods to indicate that they change the state of the
+# underlying data structures
+def returns_clone(func):
+    def inner(self, *args, **kwargs):
+        clone = self.clone()
+        res = func(clone, *args, **kwargs)
+        return clone
+    return inner
+
+# helpers
+ternary = lambda cond, t, f: (cond and [t] or [f])[0]
+
+
+class Node(object):
+    def __init__(self, connector='AND'):
+        self.connector = connector
+        self.children = []
+        self.negated = False
+    
+    def connect(self, rhs, connector):
+        if isinstance(rhs, Q):
+            if connector == self.connector:
+                self.children.append(rhs)
+                return self
+            else:
+                p = Node(connector)
+                p.children = [self, rhs]
+                return p
+        elif isinstance(rhs, Node):
+            p = Node(connector)
+            p.children = [self, rhs]
+            return p
+    
+    def __or__(self, rhs):
+        return self.connect(rhs, 'OR')
+
+    def __and__(self, rhs):
+        return self.connect(rhs, 'AND')
+    
+    def __invert__(self):
+        self.negated = not self.negated
+        return self
+    
+    def __unicode__(self):
+        query = []
+        nodes = []
+        for child in self.children:
+            if isinstance(child, Q):
+                query.append(unicode(child))
+            elif isinstance(child, Node):
+                nodes.append('(%s)' % unicode(child))
+        query.extend(nodes)
+        connector = ' %s ' % self.connector
+        query = connector.join(query)
+        if self.negated:
+            query = 'NOT %s' % query
+        return query
+    
+
+class Q(object):
+    def __init__(self, **kwargs):
+        self.query = kwargs
+        self.parent = None
+        self.negated = False
+    
+    def connect(self, connector):
+        if self.parent is None:
+            self.parent = Node(connector)
+            self.parent.children.append(self)
+    
+    def __or__(self, rhs):
+        self.connect('OR')
+        return self.parent | rhs
+    
+    def __and__(self, rhs):
+        self.connect('AND')
+        return self.parent & rhs
+    
+    def __invert__(self):
+        self.negated = not self.negated
+        return self
+    
+    def __unicode__(self):
+        bits = ['%s = %s' % (k, v) for k, v in self.query.items()]
+        if len(self.query.items()) > 1:
+            connector = ' AND '
+            expr = '(%s)' % connector.join(bits)
+        else:
+            expr = bits[0]
+        if self.negated:
+            expr = 'NOT %s' % expr
+        return expr
+
+
+def parseq(*args, **kwargs):
+    node = Node()
+    
+    for piece in args:
+        if isinstance(piece, (Q, Node)):
+            node.children.append(piece)
+        else:
+            raise TypeError('Unknown object: %s', piece)
+
+    if kwargs:
+        node.children.append(Q(**kwargs))
+
+    return node
+
+
+class EmptyResultException(Exception):
+    pass
+
+
+class BaseQuery(object):
+    query_separator = '__'
+    requires_commit = True
+    force_alias = False
+    
+    def __init__(self, model):
+        self.model = model
+        self.query_context = model
+        self.database = self.model._meta.database
+        self.operations = self.database.adapter.operations
+        self.interpolation = self.database.adapter.interpolation
+        
+        self._dirty = True
+        self._where = {}
+        self._joins = []
+    
+    def clone(self):
+        raise NotImplementedError
+    
+    def lookup_cast(self, lookup, value):
+        return self.database.adapter.lookup_cast(lookup, value)
+    
+    def parse_query_args(self, model, **query):
+        parsed = {}
+        for lhs, rhs in query.iteritems():
+            if self.query_separator in lhs:
+                lhs, op = lhs.rsplit(self.query_separator, 1)
+            else:
+                op = 'eq'
+            
+            try:
+                field = model._meta.get_field_by_name(lhs)
+            except AttributeError:
+                field = model._meta.get_related_field_by_name(lhs)
+                if field is None:
+                    raise
+                if isinstance(rhs, Model):
+                    rhs = rhs.get_pk()
+            
+            if op == 'in':
+                if isinstance(rhs, SelectQuery):
+                    lookup_value = rhs
+                    operation = 'IN (%s)'
+                else:
+                    if not rhs:
+                        raise EmptyResultException
+                    lookup_value = [field.db_value(o) for o in rhs]
+                    operation = self.operations[op] % \
+                        (','.join([self.interpolation for v in lookup_value]))
+            elif op == 'is':
+                if rhs is not None:
+                    raise ValueError('__is lookups only accept None')
+                operation = 'IS NULL'
+                lookup_value = []
+            else:
+                lookup_value = field.db_value(rhs)
+                operation = self.operations[op]
+            
+            parsed[field.name] = (operation, self.lookup_cast(op, lookup_value))
+        
+        return parsed
+    
+    @returns_clone
+    def where(self, *args, **kwargs):
+        self._where.setdefault(self.query_context, [])
+        self._where[self.query_context].append(parseq(*args, **kwargs))
+
+    @returns_clone
+    def join(self, model, join_type=None, on=None):
+        if self.query_context._meta.rel_exists(model):
+            self._joins.append((model, join_type, on))
+            self.query_context = model
+        else:
+            raise AttributeError('No foreign key found between %s and %s' % \
+                (self.query_context.__name__, model.__name__))
+
+    @returns_clone
+    def switch(self, model):
+        if model == self.model:
+            self.query_context = model
+            return
+
+        for klass, join_type, on in self._joins:
+            if model == klass:
+                self.query_context = model
+                return
+        raise AttributeError('You must JOIN on %s' % model.__name__)
+    
+    def use_aliases(self):
+        return len(self._joins) > 0 or self.force_alias
+
+    def combine_field(self, alias, field_name):
+        if alias:
+            return '%s.%s' % (alias, field_name)
+        return field_name
+    
+    def compile_where(self):
+        alias_count = 0
+        alias_map = {}
+
+        alias_required = self.use_aliases()
+
+        joins = list(self._joins)
+        if self._where or len(joins):
+            joins.insert(0, (self.model, None, None))
+        
+        where_with_alias = []
+        where_data = []
+        computed_joins = []
+
+        for i, (model, join_type, on) in enumerate(joins):
+            if alias_required:
+                alias_count += 1
+                alias_map[model] = 't%d' % alias_count
+            else:
+                alias_map[model] = ''
+            
+            if i > 0:
+                from_model = joins[i-1][0]
+                field = from_model._meta.get_related_field_for_model(model, on)
+                if field:
+                    left_field = field.name
+                    right_field = model._meta.pk_name
+                else:
+                    field = from_model._meta.get_reverse_related_field_for_model(model, on)
+                    left_field = from_model._meta.pk_name
+                    right_field = field.name
+                
+                if join_type is None:
+                    if field.null and model not in self._where:
+                        join_type = 'LEFT OUTER'
+                    else:
+                        join_type = 'INNER'
+                
+                computed_joins.append(
+                    '%s JOIN %s AS %s ON %s = %s' % (
+                        join_type,
+                        model._meta.db_table,
+                        alias_map[model],
+                        self.combine_field(alias_map[from_model], left_field),
+                        self.combine_field(alias_map[model], right_field),
+                    )
+                )
+        
+        for (model, join_type, on) in joins:
+            if model in self._where:
+                for node in self._where[model]:
+                    query, data = self.parse_node(node, model, alias_map)
+                    where_with_alias.append(query)
+                    where_data.extend(data)
+        
+        return computed_joins, where_with_alias, where_data, alias_map
+    
+    def convert_where_to_params(self, where_data):
+        flattened = []
+        for clause in where_data:
+            if isinstance(clause, (tuple, list)):
+                flattened.extend(clause)
+            else:
+                flattened.append(clause)
+        return flattened
+    
+    def parse_node(self, node, model, alias_map):
+        query = []
+        query_data = []
+        nodes = []
+        for child in node.children:
+            if isinstance(child, Q):
+                parsed, data = self.parse_q(child, model, alias_map)
+                query.append(parsed)
+                query_data.extend(data)
+            elif isinstance(child, Node):
+                parsed, data = self.parse_node(child, model, alias_map)
+                query.append('(%s)' % parsed)
+                query_data.extend(data)
+        query.extend(nodes)
+        connector = ' %s ' % node.connector
+        query = connector.join(query)
+        if node.negated:
+            query = 'NOT (%s)' % query
+        return query, query_data
+    
+    def parse_q(self, q, model, alias_map):
+        query = []
+        query_data = []
+        parsed = self.parse_query_args(model, **q.query)
+        for (name, lookup) in parsed.iteritems():
+            operation, value = lookup
+            if isinstance(value, SelectQuery):
+                sql, value = self.convert_subquery(value)
+                operation = operation % sql
+
+            query_data.append(value)
+            
+            combined = self.combine_field(alias_map[model], name)
+            query.append('%s %s' % (combined, operation))
+        
+        if len(query) > 1:
+            query = '(%s)' % (' AND '.join(query))
+        else:
+            query = query[0]
+        
+        if q.negated:
+            query = 'NOT %s' % query
+        
+        return query, query_data
+
+    def convert_subquery(self, subquery):
+        subquery.query, orig_query = subquery.model._meta.pk_name, subquery.query
+        subquery.force_alias, orig_alias = True, subquery.force_alias
+        sql, data = subquery.sql()
+        subquery.query = orig_query
+        subquery.force_alias = orig_alias
+        return sql, data
+    
+    def raw_execute(self):
+        query, params = self.sql()
+        return self.database.execute(query, params, self.requires_commit)
+
+
+class RawQuery(BaseQuery):
+    def __init__(self, model, query, *params):
+        self._sql = query
+        self._params = list(params)
+        super(RawQuery, self).__init__(model)
+    
+    def sql(self):
+        return self._sql, self._params
+    
+    def execute(self):
+        return QueryResultWrapper(self.model, self.raw_execute())
+    
+    def join(self):
+        raise AttributeError('Raw queries do not support joining programmatically')
+    
+    def where(self):
+        raise AttributeError('Raw queries do not support querying programmatically')
+    
+    def switch(self):
+        raise AttributeError('Raw queries do not support switching contexts')
+    
+    def __iter__(self):
+        return self.execute()
+
+
+class SelectQuery(BaseQuery):
+    requires_commit = False
+    
+    def __init__(self, model, query=None):
+        self.query = query or '*'
+        self._group_by = []
+        self._having = []
+        self._order_by = []
+        self._pagination = None # return all by default
+        self._distinct = False
+        self._qr = None
+        super(SelectQuery, self).__init__(model)
+    
+    def clone(self):
+        query = SelectQuery(self.model, self.query)
+        query.query_context = self.query_context
+        query._group_by = list(self._group_by)
+        query._having = list(self._having)
+        query._order_by = list(self._order_by)
+        query._pagination = self._pagination and tuple(self._pagination) or None
+        query._distinct = self._distinct
+        query._qr = self._qr
+        query._where = dict(self._where)
+        query._joins = list(self._joins)
+        return query
+    
+    @returns_clone
+    def paginate(self, page_num, paginate_by=20):
+        self._pagination = (page_num, paginate_by)
+    
+    def count(self):
+        tmp_pagination = self._pagination
+        self._pagination = None
+        
+        tmp_query = self.query
+        
+        if self.use_aliases():
+            self.query = 'COUNT(t1.%s)' % (self.model._meta.pk_name)
+        else:
+            self.query = 'COUNT(%s)' % (self.model._meta.pk_name)
+        
+        res = self.database.execute(*self.sql())
+        
+        self.query = tmp_query
+        self._pagination = tmp_pagination
+        
+        return res.fetchone()[0]
+    
+    @returns_clone
+    def group_by(self, clause):
+        model = self.query_context
+        
+        if isinstance(clause, basestring):
+            fields = (clause,)
+        elif isinstance(clause, (list, tuple)):
+            fields = clause
+        elif issubclass(clause, Model):
+            model = clause
+            fields = clause._meta.get_field_names()
+        
+        self._group_by.append((model, fields))
+    
+    @returns_clone
+    def having(self, clause):
+        self._having.append(clause)
+    
+    @returns_clone
+    def distinct(self):
+        self._distinct = True
+    
+    @returns_clone
+    def order_by(self, field_or_string):
+        if isinstance(field_or_string, tuple):
+            field_or_string, ordering = field_or_string
+        else:
+            ordering = 'ASC'
+        
+        self._order_by.append(
+            (self.query_context, field_or_string, ordering)
+        )
+
+    def parse_select_query(self, alias_map):
+        if isinstance(self.query, basestring):
+            if self.query in ('*', self.model._meta.pk_name) and self.use_aliases():
+                return '%s.%s' % (alias_map[self.model], self.query)
+            return self.query
+        elif isinstance(self.query, dict):
+            qparts = []
+            aggregates = []
+            for model, cols in self.query.iteritems():
+                alias = alias_map.get(model, '')
+                for col in cols:
+                    if isinstance(col, tuple):
+                        func, col, col_alias = col
+                        aggregates.append('%s(%s) AS %s' % \
+                            (func, self.combine_field(alias, col), col_alias)
+                        )
+                    else:
+                        qparts.append(self.combine_field(alias, col))
+            return ', '.join(qparts + aggregates)
+        else:
+            raise TypeError('Unknown type encountered parsing select query')
+    
+    def sql(self):
+        joins, where, where_data, alias_map = self.compile_where()
+        
+        table = self.model._meta.db_table
+
+        params = []
+        group_by = []
+        
+        if self.use_aliases():
+            table = '%s AS %s' % (table, alias_map[self.model])
+            for model, clause in self._group_by:
+                alias = alias_map[model]
+                for field in clause:
+                    group_by.append(self.combine_field(alias, field))
+        else:
+            group_by = [c[1] for c in self._group_by]
+
+        parsed_query = self.parse_select_query(alias_map)
+        
+        if self._distinct:
+            sel = 'SELECT DISTINCT'
+        else:
+            sel = 'SELECT'
+        
+        select = '%s %s FROM %s' % (sel, parsed_query, table)
+        joins = '\n'.join(joins)
+        where = ' AND '.join(where)
+        group_by = ', '.join(group_by)
+        having = ' AND '.join(self._having)
+        
+        order_by = []
+        for piece in self._order_by:
+            model, field, ordering = piece
+            if self.use_aliases() and field in model._meta.fields:
+                field = '%s.%s' % (alias_map[model], field)
+            order_by.append('%s %s' % (field, ordering))
+        
+        pieces = [select]
+        
+        if joins:
+            pieces.append(joins)
+        if where:
+            pieces.append('WHERE %s' % where)
+            params.extend(self.convert_where_to_params(where_data))
+        
+        if group_by:
+            pieces.append('GROUP BY %s' % group_by)
+        if having:
+            pieces.append('HAVING %s' % having)
+        if order_by:
+            pieces.append('ORDER BY %s' % ', '.join(order_by))
+        if self._pagination:
+            page, paginate_by = self._pagination
+            if page > 0:
+                page -= 1
+            pieces.append('LIMIT %d OFFSET %d' % (paginate_by, page * paginate_by))
+        
+        return ' '.join(pieces), params
+    
+    def execute(self):
+        if self._dirty or not self._qr:
+            try:
+                self._qr = QueryResultWrapper(self.model, self.raw_execute())
+                self._dirty = False
+                return self._qr
+            except EmptyResultException:
+                return iter([])
+        else:
+            # call the __iter__ method directly
+            return iter(self._qr)
+    
+    def __iter__(self):
+        return self.execute()
+
+
+class UpdateQuery(BaseQuery):
+    def __init__(self, model, **kwargs):
+        self.update_query = kwargs
+        super(UpdateQuery, self).__init__(model)
+    
+    def clone(self):
+        query = UpdateQuery(self.model, **self.update_query)
+        query._where = dict(self._where)
+        query._joins = list(self._joins)
+        return query
+    
+    def parse_update(self):
+        sets = {}
+        for k, v in self.update_query.iteritems():
+            try:
+                field = self.model._meta.get_field_by_name(k)
+            except AttributeError:
+                field = self.model._meta.get_related_field_by_name(k)
+                if field is None:
+                    raise
+            
+            sets[field.name] = field.db_value(v)
+        
+        return sets
+    
+    def sql(self):
+        joins, where, where_data, alias_map = self.compile_where()
+        set_statement = self.parse_update()
+
+        params = []
+        update_params = []
+
+        for k, v in set_statement.iteritems():
+            params.append(v)
+            update_params.append('%s=%s' % (k, self.interpolation))
+        
+        update = 'UPDATE %s SET %s' % (
+            self.model._meta.db_table, ', '.join(update_params))
+        where = ' AND '.join(where)
+        
+        pieces = [update]
+        
+        if where:
+            pieces.append('WHERE %s' % where)
+            params.extend(self.convert_where_to_params(where_data))
+        
+        return ' '.join(pieces), params
+    
+    def join(self, *args, **kwargs):
+        raise AttributeError('Update queries do not support JOINs in sqlite')
+    
+    def execute(self):
+        result = self.raw_execute()
+        return self.database.rows_affected(result)
+
+
+class DeleteQuery(BaseQuery):
+    def clone(self):
+        query = DeleteQuery(self.model)
+        query._where = dict(self._where)
+        query._joins = list(self._joins)
+        return query
+    
+    def sql(self):
+        joins, where, where_data, alias_map = self.compile_where()
+
+        params = []
+        
+        delete = 'DELETE FROM %s' % (self.model._meta.db_table)
+        where = ' AND '.join(where)
+        
+        pieces = [delete]
+        
+        if where:
+            pieces.append('WHERE %s' % where)
+            params.extend(self.convert_where_to_params(where_data))
+        
+        return ' '.join(pieces), params
+    
+    def join(self, *args, **kwargs):
+        raise AttributeError('Update queries do not support JOINs in sqlite')
+    
+    def execute(self):
+        result = self.raw_execute()
+        return self.database.rows_affected(result)
+
+
+class InsertQuery(BaseQuery):
+    def __init__(self, model, **kwargs):
+        self.insert_query = kwargs
+        super(InsertQuery, self).__init__(model)
+    
+    def parse_insert(self):
+        cols = []
+        vals = []
+        for k, v in self.insert_query.iteritems():
+            field = self.model._meta.get_field_by_name(k)
+            cols.append(k)
+            vals.append(field.db_value(v))
+        
+        return cols, vals
+    
+    def sql(self):
+        cols, vals = self.parse_insert()
+        
+        insert = 'INSERT INTO %s (%s) VALUES (%s)' % (
+            self.model._meta.db_table,
+            ','.join(cols),
+            ','.join(self.interpolation for v in vals)
+        )
+        
+        return insert, vals
+    
+    def where(self, *args, **kwargs):
+        raise AttributeError('Insert queries do not support WHERE clauses')
+    
+    def join(self, *args, **kwargs):
+        raise AttributeError('Insert queries do not support JOINs')
+    
+    def execute(self):
+        result = self.raw_execute()
+        return self.database.last_insert_id(result, self.model)
+
+
+class Field(object):
+    db_field = ''
+    default = None
+    field_template = "%(column_type)s%(nullable)s"
+
+    def get_attributes(self):
+        return {}
+    
+    def __init__(self, null=False, db_index=False, *args, **kwargs):
+        self.null = null
+        self.db_index = db_index
+        self.attributes = self.get_attributes()
+        self.default = kwargs.get('default', None)
+        
+        kwargs['nullable'] = ternary(self.null, '', ' NOT NULL')
+        self.attributes.update(kwargs)
+    
+    def add_to_class(self, klass, name):
+        self.name = name
+        self.model = klass
+        setattr(klass, name, None)
+    
+    def render_field_template(self):
+        col_type = self.model._meta.database.column_for_field(self.db_field)
+        self.attributes['column_type'] = col_type
+        return self.field_template % self.attributes
+    
+    def to_sql(self):
+        rendered = self.render_field_template()
+        return '%s %s' % (self.name, rendered)
+    
+    def null_wrapper(self, value, default=None):
+        if (self.null and value is None) or default is None:
+            return value
+        return value or default
+    
+    def db_value(self, value):
+        return value
+    
+    def python_value(self, value):
+        return value
+    
+    def lookup_value(self, lookup_type, value):
+        return self.db_value(value)
+
+
+class CharField(Field):
+    db_field = 'string'
+    field_template = '%(column_type)s(%(max_length)d)%(nullable)s'
+    
+    def get_attributes(self):
+        return {'max_length': 255}
+    
+    def db_value(self, value):
+        if self.null and value is None:
+            return value
+        value = value or ''
+        return value[:self.attributes['max_length']]
+    
+    def lookup_value(self, lookup_type, value):
+        if lookup_type == 'contains':
+            return '*%s*' % self.db_value(value)
+        elif lookup_type == 'icontains':
+            return '%%%s%%' % self.db_value(value)
+        else:
+            return self.db_value(value)
+    
+
+class TextField(Field):
+    db_field = 'text'
+    
+    def db_value(self, value):
+        return self.null_wrapper(value, '')
+    
+    def lookup_value(self, lookup_type, value):
+        if lookup_type == 'contains':
+            return '*%s*' % self.db_value(value)
+        elif lookup_type == 'icontains':
+            return '%%%s%%' % self.db_value(value)
+        else:
+            return self.db_value(value)
+
+
+class DateTimeField(Field):
+    db_field = 'datetime'
+    
+    def python_value(self, value):
+        if isinstance(value, basestring):
+            value = value.rsplit('.', 1)[0]
+            return datetime(*time.strptime(value, '%Y-%m-%d %H:%M:%S')[:6])
+        return value
+
+
+class IntegerField(Field):
+    db_field = 'integer'
+    
+    def db_value(self, value):
+        return self.null_wrapper(value, 0)
+    
+    def python_value(self, value):
+        if value is not None:
+            return int(value)
+
+
+class BooleanField(IntegerField):
+    db_field = 'boolean'
+    
+    def db_value(self, value):
+        if value:
+            return 1
+        return 0
+    
+    def python_value(self, value):
+        return bool(value)
+
+
+class FloatField(Field):
+    db_field = 'float'
+    
+    def db_value(self, value):
+        return self.null_wrapper(value, 0.0)
+    
+    def python_value(self, value):
+        if value is not None:
+            return float(value)
+
+
+class PrimaryKeyField(IntegerField):
+    db_field = 'primary_key'
+    field_template = "%(column_type)s NOT NULL PRIMARY KEY"
+
+
+class ForeignRelatedObject(object):    
+    def __init__(self, to, name):
+        self.field_name = name
+        self.to = to
+        self.cache_name = '_cache_%s' % name
+    
+    def __get__(self, instance, instance_type=None):
+        if not getattr(instance, self.cache_name, None):
+            id = getattr(instance, self.field_name, 0)
+            qr = self.to.select().where(**{self.to._meta.pk_name: id}).execute()
+            setattr(instance, self.cache_name, qr.next())
+        return getattr(instance, self.cache_name)
+    
+    def __set__(self, instance, obj):
+        assert isinstance(obj, self.to), "Cannot assign %s, invalid type" % obj
+        setattr(instance, self.field_name, obj.get_pk())
+        setattr(instance, self.cache_name, obj)
+
+
+class ReverseForeignRelatedObject(object):
+    def __init__(self, related_model, name):
+        self.field_name = name
+        self.related_model = related_model
+    
+    def __get__(self, instance, instance_type=None):
+        query = {self.field_name: instance.get_pk()}
+        qr = self.related_model.select().where(**query)
+        return qr
+
+
+class ForeignKeyField(IntegerField):
+    db_field = 'foreign_key'
+    field_template = '%(column_type)s%(nullable)s REFERENCES %(to_table)s (%(to_pk)s)'
+    
+    def __init__(self, to, null=False, related_name=None, *args, **kwargs):
+        self.to = to
+        self.related_name = related_name
+        kwargs.update({
+            'to_table': to._meta.db_table,
+            'to_pk': to._meta.pk_name
+        })
+        super(ForeignKeyField, self).__init__(null=null, *args, **kwargs)
+    
+    def add_to_class(self, klass, name):
+        self.descriptor = name
+        self.name = name + '_id'
+        self.model = klass
+        
+        if self.related_name is None:
+            self.related_name = klass._meta.db_table + '_set'
+        
+        klass._meta.rel_fields[name] = self.name
+        setattr(klass, self.descriptor, ForeignRelatedObject(self.to, self.name))
+        setattr(klass, self.name, None)
+        
+        reverse_rel = ReverseForeignRelatedObject(klass, self.name)
+        setattr(self.to, self.related_name, reverse_rel)
+    
+    def lookup_value(self, lookup_type, value):
+        if isinstance(value, Model):
+            return value.get_pk()
+        return value or None
+    
+    def db_value(self, value):
+        if isinstance(value, Model):
+            return value.get_pk()
+        return value
+
+
+# define a default database object in the module scope
+database = SqliteDatabase(DATABASE_NAME)
+
+
+class BaseModelOptions(object):
+    def __init__(self, model_class, options=None):
+        # configurable options
+        options = options or {'database': database}
+        for k, v in options.items():
+            setattr(self, k, v)
+        
+        self.rel_fields = {}
+        self.fields = {}
+        self.model_class = model_class
+    
+    def get_field_names(self):
+        fields = [self.pk_name]
+        fields.extend([f for f in sorted(self.fields.keys()) if f != self.pk_name])
+        return fields
+    
+    def get_field_by_name(self, name):
+        if name in self.fields:
+            return self.fields[name]
+        raise AttributeError('Field named %s not found' % name)
+    
+    def get_related_field_by_name(self, name):
+        if name in self.rel_fields:
+            return self.fields[self.rel_fields[name]]
+    
+    def get_related_field_for_model(self, model, name=None):
+        for field in self.fields.values():
+            if isinstance(field, ForeignKeyField) and field.to == model:
+                if name is None or name == field.name or name == field.descriptor:
+                    return field
+    
+    def get_reverse_related_field_for_model(self, model, name=None):
+        for field in model._meta.fields.values():
+            if isinstance(field, ForeignKeyField) and field.to == self.model_class:
+                if name is None or name == field.name or name == field.descriptor:
+                    return field
+    
+    def rel_exists(self, model):
+        return self.get_related_field_for_model(model) or \
+               self.get_reverse_related_field_for_model(model)
+
+
+class BaseModel(type):
+    inheritable_options = ['database']
+    
+    def __new__(cls, name, bases, attrs):
+        cls = super(BaseModel, cls).__new__(cls, name, bases, attrs)
+
+        attr_dict = {}
+        meta = attrs.pop('Meta', None)
+        if meta:
+            attr_dict = meta.__dict__
+        
+        for b in bases:
+            base_meta = getattr(b, '_meta', None)
+            if not base_meta:
+                continue
+            
+            for (k, v) in base_meta.__dict__.items():
+                if k in cls.inheritable_options and k not in attr_dict:
+                    attr_dict[k] = v
+        
+        _meta = BaseModelOptions(cls, attr_dict)
+        
+        if not hasattr(_meta, 'db_table'):
+            _meta.db_table = re.sub('[^a-z]+', '_', cls.__name__.lower())
+
+        setattr(cls, '_meta', _meta)
+        
+        _meta.pk_name = None
+
+        for name, attr in cls.__dict__.items():
+            if isinstance(attr, Field):
+                attr.add_to_class(cls, name)
+                _meta.fields[attr.name] = attr
+                if isinstance(attr, PrimaryKeyField):
+                    _meta.pk_name = attr.name
+        
+        if _meta.pk_name is None:
+            _meta.pk_name = 'id'
+            pk = PrimaryKeyField()
+            pk.add_to_class(cls, _meta.pk_name)
+            _meta.fields[_meta.pk_name] = pk
+
+        _meta.model_name = cls.__name__
+                
+        if hasattr(cls, '__unicode__'):
+            setattr(cls, '__repr__', lambda self: '<%s: %s>' % (
+                _meta.model_name, self.__unicode__()))
+
+        exception_class = type('%sDoesNotExist' % _meta.model_name, (DoesNotExist,), {})
+        cls.DoesNotExist = exception_class
+        
+        return cls
+
+
+class Model(object):
+    __metaclass__ = BaseModel
+    
+    def __init__(self, *args, **kwargs):
+        for k, v in kwargs.items():
+            setattr(self, k, v)
+    
+    def __eq__(self, other):
+        return other.__class__ == self.__class__ and \
+               self.get_pk() and \
+               other.get_pk() == self.get_pk()
+    
+    def get_field_dict(self):
+        def get_field_val(field):
+            field_value = getattr(self, field.name)
+            if not self.get_pk() and field_value is None and field.default is not None:
+                if callable(field.default):
+                    field_value = field.default()
+                else:
+                    field_value = field.default
+                setattr(self, field.name, field_value)
+            return (field.name, field_value)
+        
+        pairs = map(get_field_val, self._meta.fields.values())
+        return dict(pairs)
+    
+    @classmethod
+    def create_table(cls):
+        cls._meta.database.create_table(cls)
+        
+        for field_name, field_obj in cls._meta.fields.items():
+            if isinstance(field_obj, PrimaryKeyField):
+                cls._meta.database.create_index(cls, field_obj.name, True)
+            elif isinstance(field_obj, ForeignKeyField):
+                cls._meta.database.create_index(cls, field_obj.name)
+            elif field_obj.db_index:
+                cls._meta.database.create_index(cls, field_obj.name)
+    
+    @classmethod
+    def drop_table(cls, fail_silently=False):
+        cls._meta.database.drop_table(cls, fail_silently)
+    
+    @classmethod
+    def select(cls, query=None):
+        return SelectQuery(cls, query)
+    
+    @classmethod
+    def update(cls, **query):
+        return UpdateQuery(cls, **query)
+    
+    @classmethod
+    def insert(cls, **query):
+        return InsertQuery(cls, **query)
+    
+    @classmethod
+    def delete(cls, **query):
+        return DeleteQuery(cls, **query)
+    
+    @classmethod
+    def raw(cls, sql, *params):
+        return RawQuery(cls, sql, *params)
+
+    @classmethod
+    def create(cls, **query):
+        inst = cls(**query)
+        inst.save()
+        return inst
+
+    @classmethod
+    def get_or_create(cls, **query):
+        try:
+            inst = cls.get(**query)
+        except cls.DoesNotExist:
+            inst = cls.create(**query)
+        return inst
+    
+    @classmethod            
+    def get(cls, *args, **kwargs):
+        query = cls.select().where(*args, **kwargs).paginate(1, 1)
+        try:
+            return query.execute().next()
+        except StopIteration:
+            raise cls.DoesNotExist('instance matching query does not exist:\nSQL: %s\nPARAMS: %s' % (
+                query.sql()
+            ))
+    
+    def get_pk(self):
+        return getattr(self, self._meta.pk_name, None)
+    
+    def save(self):
+        field_dict = self.get_field_dict()
+        field_dict.pop(self._meta.pk_name)
+        if self.get_pk():
+            update = self.update(
+                **field_dict
+            ).where(**{self._meta.pk_name: self.get_pk()})
+            update.execute()
+        else:
+            insert = self.insert(**field_dict)
+            new_pk = insert.execute()
+            setattr(self, self._meta.pk_name, new_pk)


http://bitbucket.org/yt_analysis/yt/changeset/a85249ff1243/
changeset:   a85249ff1243
branch:      yt
user:        MatthewTurk
date:        2011-10-19 22:27:24
summary:     Removing a spurious print statement
affected #:  1 file (-1 bytes)

--- a/yt/data_objects/particle_io.py	Wed Oct 19 16:21:34 2011 -0400
+++ b/yt/data_objects/particle_io.py	Wed Oct 19 16:27:24 2011 -0400
@@ -92,7 +92,6 @@
             conv_factors)
         for [n, v] in zip(fields_to_read, rvs):
             self.source.field_data[n] = v
-        print self.source.field_data.keys()
 
 class ParticleIOHandlerRegion(ParticleIOHandlerImplemented):
     periodic = False


http://bitbucket.org/yt_analysis/yt/changeset/e68aae597bae/
changeset:   e68aae597bae
branch:      yt
user:        brittonsmith
date:        2011-10-19 23:06:04
summary:     Removed some unnecessary deletes.
affected #:  1 file (-1 bytes)

--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 16:21:34 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 17:06:04 2011 -0400
@@ -658,10 +658,8 @@
             if self.chainID[i] != -1:
                 self.chainID[i] = map[self.chainID[i]]
         del map
-        self.densest_in_chain = dic_new.copy()
-        del dic_new
-        self.densest_in_chain_real_index = dicri_new.copy()
-        del dicri_new
+        self.densest_in_chain = dic_new
+        self.densest_in_chain_real_index = dicri_new
         self.__max_memory()
         yt_counters("preconnect pregrouping.")
         mylog.info("Preconnected %d chains." % removed)


http://bitbucket.org/yt_analysis/yt/changeset/da51532daa91/
changeset:   da51532daa91
branch:      yt
user:        brittonsmith
date:        2011-10-19 23:06:25
summary:     Merged.
affected #:  1 file (-1 bytes)

--- a/yt/data_objects/particle_io.py	Wed Oct 19 17:06:04 2011 -0400
+++ b/yt/data_objects/particle_io.py	Wed Oct 19 17:06:25 2011 -0400
@@ -92,7 +92,6 @@
             conv_factors)
         for [n, v] in zip(fields_to_read, rvs):
             self.source.field_data[n] = v
-        print self.source.field_data.keys()
 
 class ParticleIOHandlerRegion(ParticleIOHandlerImplemented):
     periodic = False


http://bitbucket.org/yt_analysis/yt/changeset/30419f296790/
changeset:   30419f296790
branch:      yt
user:        samskillman
date:        2011-10-19 23:46:12
summary:     Adding a ProcessorPool that can create workgroups.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 16:21:34 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 17:46:12 2011 -0400
@@ -274,6 +274,33 @@
     if parallel_capable: return root_only
     return func
 
+class ProcessorPool(object):
+    comm = None
+    size = None
+    ranks = None
+    availabel_ranks = None
+    tasks = None
+    workgroups = {}
+    def __init__(self):
+        self.comm = communication_system.communicators[-1]
+        self.size = self.comm.size
+        self.ranks = range(self.size)
+        self.available_ranks = range(self.size)
+
+    def add_workgroup(self, size=None, ranks=None):
+        if size is None:
+            size = len(self.available_ranks)
+        if len(self.available_ranks) < size:
+            print 'Not enough resources available'
+            raise RuntimeError
+        if ranks is None:
+            ranks = [self.available_ranks.pop(0) for i in range(size)]
+        group = self.comm.comm.Get_group().Incl(ranks)
+        new_comm = self.comm.comm.Create(group)
+        if self.comm.rank in ranks:
+            communication_system.communicators.append(Communicator(new_comm))
+        self.workgroups[len(ranks)] = ranks
+
 def parallel_objects(objects, njobs):
     my_communicator = communication_system.communicators[-1]
     my_size = my_communicator.size
@@ -673,7 +700,7 @@
     _grids = None
     _distributed = None
 
-    def __init__(self, size=1):
+    def __init__(self):
         self.comm = communication_system.communicators[-1]
         self._grids = self.comm._grids
         self._distributed = self.comm._distributed


http://bitbucket.org/yt_analysis/yt/changeset/089c2379bdea/
changeset:   089c2379bdea
branch:      yt
user:        samskillman
date:        2011-10-19 23:47:49
summary:     Merging
affected #:  2 files (-1 bytes)

--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 17:46:12 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Wed Oct 19 17:47:49 2011 -0400
@@ -658,10 +658,8 @@
             if self.chainID[i] != -1:
                 self.chainID[i] = map[self.chainID[i]]
         del map
-        self.densest_in_chain = dic_new.copy()
-        del dic_new
-        self.densest_in_chain_real_index = dicri_new.copy()
-        del dicri_new
+        self.densest_in_chain = dic_new
+        self.densest_in_chain_real_index = dicri_new
         self.__max_memory()
         yt_counters("preconnect pregrouping.")
         mylog.info("Preconnected %d chains." % removed)


--- a/yt/data_objects/particle_io.py	Wed Oct 19 17:46:12 2011 -0400
+++ b/yt/data_objects/particle_io.py	Wed Oct 19 17:47:49 2011 -0400
@@ -92,7 +92,6 @@
             conv_factors)
         for [n, v] in zip(fields_to_read, rvs):
             self.source.field_data[n] = v
-        print self.source.field_data.keys()
 
 class ParticleIOHandlerRegion(ParticleIOHandlerImplemented):
     periodic = False


http://bitbucket.org/yt_analysis/yt/changeset/b0ddfd8d37e4/
changeset:   b0ddfd8d37e4
branch:      yt
user:        MatthewTurk
date:        2011-10-19 23:50:24
summary:     Removing some barriers
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 17:47:49 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 17:50:24 2011 -0400
@@ -540,7 +540,6 @@
 
     def mpi_info_dict(self, info):
         if not self._distributed: return 0, {0:info}
-        self.comm.barrier()
         data = None
         if self.comm.rank == 0:
             data = {0:info}
@@ -550,7 +549,6 @@
             self.comm.send(info, dest=0, tag=0)
         mylog.debug("Opening MPI Broadcast on %s", self.comm.rank)
         data = self.comm.bcast(data, root=0)
-        self.comm.barrier()
         return self.comm.rank, data
 
     def claim_object(self, obj):


http://bitbucket.org/yt_analysis/yt/changeset/310f2f3680fd/
changeset:   310f2f3680fd
branch:      yt
user:        brittonsmith
date:        2011-10-20 01:02:31
summary:     Made EnzoSimulation a subclass of TimeSeriesData.
affected #:  1 file (-1 bytes)

--- a/yt/analysis_modules/simulation_handler/enzo_simulation.py	Wed Oct 19 17:06:25 2011 -0400
+++ b/yt/analysis_modules/simulation_handler/enzo_simulation.py	Wed Oct 19 19:02:31 2011 -0400
@@ -31,6 +31,9 @@
 
 dt_Tolerance = 1e-3
 
+from yt.data_objects.time_series import \
+    TimeSeriesData
+
 from yt.utilities.cosmology import \
     Cosmology, \
     EnzoCosmology
@@ -38,13 +41,15 @@
 from yt.convenience import \
     load
 
-class EnzoSimulation(object):
+class EnzoSimulation(TimeSeriesData):
     r"""Super class for performing the same operation over all data dumps in 
     a simulation from one redshift to another.
     """
-    def __init__(self, enzo_parameter_file, initial_time=None, final_time=None, initial_redshift=None, final_redshift=None,
-                 links=False, enzo_parameters=None, get_time_outputs=True, get_redshift_outputs=True, get_available_data=False,
-                 get_data_by_force=False):
+    def __init__(self, enzo_parameter_file, initial_time=None, final_time=None, 
+                 initial_redshift=None, final_redshift=None,
+                 links=False, enzo_parameters=None, 
+                 get_time_outputs=True, get_redshift_outputs=True, 
+                 get_available_data=False, get_data_by_force=False):
         r"""Initialize an Enzo Simulation object.
         
         initial_time : float
@@ -122,6 +127,11 @@
         # Get all the appropriate datasets.
         self._get_all_outputs(brute_force=get_data_by_force)
 
+        # Instantiate a TimeSeriesData object.
+        time_series_outputs = [load(output['filename']) \
+                                   for output in self.allOutputs]
+        TimeSeriesData.__init__(self, outputs=time_series_outputs)
+
     def _calculate_redshift_dump_times(self):
         "Calculates time from redshift of redshift dumps."
 


http://bitbucket.org/yt_analysis/yt/changeset/f8cdfc82114d/
changeset:   f8cdfc82114d
branch:      yt
user:        brittonsmith
date:        2011-10-20 01:03:23
summary:     Merged.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 19:02:31 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 19:03:23 2011 -0400
@@ -274,6 +274,33 @@
     if parallel_capable: return root_only
     return func
 
+class ProcessorPool(object):
+    comm = None
+    size = None
+    ranks = None
+    availabel_ranks = None
+    tasks = None
+    workgroups = {}
+    def __init__(self):
+        self.comm = communication_system.communicators[-1]
+        self.size = self.comm.size
+        self.ranks = range(self.size)
+        self.available_ranks = range(self.size)
+
+    def add_workgroup(self, size=None, ranks=None):
+        if size is None:
+            size = len(self.available_ranks)
+        if len(self.available_ranks) < size:
+            print 'Not enough resources available'
+            raise RuntimeError
+        if ranks is None:
+            ranks = [self.available_ranks.pop(0) for i in range(size)]
+        group = self.comm.comm.Get_group().Incl(ranks)
+        new_comm = self.comm.comm.Create(group)
+        if self.comm.rank in ranks:
+            communication_system.communicators.append(Communicator(new_comm))
+        self.workgroups[len(ranks)] = ranks
+
 def parallel_objects(objects, njobs):
     my_communicator = communication_system.communicators[-1]
     my_size = my_communicator.size
@@ -513,7 +540,6 @@
 
     def mpi_info_dict(self, info):
         if not self._distributed: return 0, {0:info}
-        self.comm.barrier()
         data = None
         if self.comm.rank == 0:
             data = {0:info}
@@ -523,7 +549,6 @@
             self.comm.send(info, dest=0, tag=0)
         mylog.debug("Opening MPI Broadcast on %s", self.comm.rank)
         data = self.comm.bcast(data, root=0)
-        self.comm.barrier()
         return self.comm.rank, data
 
     def claim_object(self, obj):
@@ -673,7 +698,7 @@
     _grids = None
     _distributed = None
 
-    def __init__(self, size=1):
+    def __init__(self):
         self.comm = communication_system.communicators[-1]
         self._grids = self.comm._grids
         self._distributed = self.comm._distributed


http://bitbucket.org/yt_analysis/yt/changeset/5f5a1be1f805/
changeset:   5f5a1be1f805
branch:      yt
user:        samskillman
date:        2011-10-20 04:35:16
summary:     Initial pass at workgroups is working.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 17:47:49 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 22:35:16 2011 -0400
@@ -274,19 +274,24 @@
     if parallel_capable: return root_only
     return func
 
+class Workgroup(object):
+    def __init__(self, size, ranks):
+        self.size = size
+        self.ranks = ranks
+
 class ProcessorPool(object):
     comm = None
     size = None
     ranks = None
-    availabel_ranks = None
+    available_ranks = None
     tasks = None
-    workgroups = {}
+    workgroups = []
     def __init__(self):
         self.comm = communication_system.communicators[-1]
         self.size = self.comm.size
         self.ranks = range(self.size)
         self.available_ranks = range(self.size)
-
+    
     def add_workgroup(self, size=None, ranks=None):
         if size is None:
             size = len(self.available_ranks)
@@ -295,11 +300,24 @@
             raise RuntimeError
         if ranks is None:
             ranks = [self.available_ranks.pop(0) for i in range(size)]
+        
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:
             communication_system.communicators.append(Communicator(new_comm))
-        self.workgroups[len(ranks)] = ranks
+        self.workgroups.append(Workgroup(len(ranks), ranks))
+    
+    def free_workgroup(self, workgroup):
+        for i in workgroup.ranks:
+            if self.comm.rank == i:
+                communication_system.communicators.pop()
+            self.available_ranks.append(i) 
+        del workgroup
+        self.available_ranks.sort()
+
+    def free_all(self):
+        for wg in self.workgroups:
+            self.free_workgroup(wg)
 
 def parallel_objects(objects, njobs):
     my_communicator = communication_system.communicators[-1]


http://bitbucket.org/yt_analysis/yt/changeset/8f53667608b1/
changeset:   8f53667608b1
branch:      yt
user:        samskillman
date:        2011-10-20 04:35:25
summary:     Merging
affected #:  2 files (-1 bytes)

--- a/yt/analysis_modules/simulation_handler/enzo_simulation.py	Wed Oct 19 22:35:16 2011 -0400
+++ b/yt/analysis_modules/simulation_handler/enzo_simulation.py	Wed Oct 19 22:35:25 2011 -0400
@@ -31,6 +31,9 @@
 
 dt_Tolerance = 1e-3
 
+from yt.data_objects.time_series import \
+    TimeSeriesData
+
 from yt.utilities.cosmology import \
     Cosmology, \
     EnzoCosmology
@@ -38,13 +41,15 @@
 from yt.convenience import \
     load
 
-class EnzoSimulation(object):
+class EnzoSimulation(TimeSeriesData):
     r"""Super class for performing the same operation over all data dumps in 
     a simulation from one redshift to another.
     """
-    def __init__(self, enzo_parameter_file, initial_time=None, final_time=None, initial_redshift=None, final_redshift=None,
-                 links=False, enzo_parameters=None, get_time_outputs=True, get_redshift_outputs=True, get_available_data=False,
-                 get_data_by_force=False):
+    def __init__(self, enzo_parameter_file, initial_time=None, final_time=None, 
+                 initial_redshift=None, final_redshift=None,
+                 links=False, enzo_parameters=None, 
+                 get_time_outputs=True, get_redshift_outputs=True, 
+                 get_available_data=False, get_data_by_force=False):
         r"""Initialize an Enzo Simulation object.
         
         initial_time : float
@@ -122,6 +127,11 @@
         # Get all the appropriate datasets.
         self._get_all_outputs(brute_force=get_data_by_force)
 
+        # Instantiate a TimeSeriesData object.
+        time_series_outputs = [load(output['filename']) \
+                                   for output in self.allOutputs]
+        TimeSeriesData.__init__(self, outputs=time_series_outputs)
+
     def _calculate_redshift_dump_times(self):
         "Calculates time from redshift of redshift dumps."
 


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 22:35:16 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 22:35:25 2011 -0400
@@ -558,7 +558,6 @@
 
     def mpi_info_dict(self, info):
         if not self._distributed: return 0, {0:info}
-        self.comm.barrier()
         data = None
         if self.comm.rank == 0:
             data = {0:info}
@@ -568,7 +567,6 @@
             self.comm.send(info, dest=0, tag=0)
         mylog.debug("Opening MPI Broadcast on %s", self.comm.rank)
         data = self.comm.bcast(data, root=0)
-        self.comm.barrier()
         return self.comm.rank, data
 
     def claim_object(self, obj):


http://bitbucket.org/yt_analysis/yt/changeset/1dd7e124d5cf/
changeset:   1dd7e124d5cf
branch:      yt
user:        MatthewTurk
date:        2011-10-20 04:56:11
summary:     Now if you supply a "storage" argument to parallel_objects (should be a dict!)
it will include a storage object in which you can set a result and then get a
collated result.  For instance:

storage = {}
for sto, obj in parallel_objects(range(10), njobs=2, storage=storage):
    sto.result = obj**2.0
for i, j in sorted(storage.items()): i, j
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 22:35:25 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 22:56:11 2011 -0400
@@ -319,7 +319,13 @@
         for wg in self.workgroups:
             self.free_workgroup(wg)
 
-def parallel_objects(objects, njobs):
+class ResultsStorage(object):
+    slots = ['result', 'result_id']
+    result = None
+    result_id = None
+
+def parallel_objects(objects, njobs, storage = None):
+    if not parallel_capable: raise RuntimeError
     my_communicator = communication_system.communicators[-1]
     my_size = my_communicator.size
     my_rank = my_communicator.rank
@@ -329,10 +335,23 @@
             my_new_id = i
             break
     communication_system.push_with_ids(all_new_comms[my_new_id].tolist())
+    obj_ids = na.arange(len(objects))
 
-    for obj in objects[my_new_id::njobs]:
-        yield obj
+    to_share = {}
+    for result_id, obj in zip(obj_ids, objects)[my_new_id::njobs]:
+        if storage is not None:
+            rstore = ResultsStorage()
+            rstore.result_id = result_id
+            yield rstore, obj
+            to_share[rstore.result_id] = rstore.result
+        else:
+            yield obj
     communication_system.communicators.pop()
+    if storage is not None:
+        # Now we have to broadcast it
+        new_storage = my_communicator.par_combine_object(
+                to_share, datatype = 'dict', op = 'join')
+        storage.update(new_storage)
 
 class CommunicationSystem(object):
     communicators = []


http://bitbucket.org/yt_analysis/yt/changeset/c60ffc752898/
changeset:   c60ffc752898
branch:      yt
user:        samskillman
date:        2011-10-20 04:45:31
summary:     adding the communicator onto the workgroup.
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 22:35:25 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 22:45:31 2011 -0400
@@ -275,9 +275,10 @@
     return func
 
 class Workgroup(object):
-    def __init__(self, size, ranks):
+    def __init__(self, size, ranks, comm):
         self.size = size
         self.ranks = ranks
+        self.comm = comm
 
 class ProcessorPool(object):
     comm = None
@@ -305,7 +306,7 @@
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:
             communication_system.communicators.append(Communicator(new_comm))
-        self.workgroups.append(Workgroup(len(ranks), ranks))
+        self.workgroups.append(Workgroup(len(ranks), ranks, new_comm))
     
     def free_workgroup(self, workgroup):
         for i in workgroup.ranks:


http://bitbucket.org/yt_analysis/yt/changeset/b66d3401ddf0/
changeset:   b66d3401ddf0
branch:      yt
user:        samskillman
date:        2011-10-20 06:02:15
summary:     Merging
affected #:  1 file (-1 bytes)

--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Wed Oct 19 22:45:31 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Thu Oct 20 00:02:15 2011 -0400
@@ -320,7 +320,13 @@
         for wg in self.workgroups:
             self.free_workgroup(wg)
 
-def parallel_objects(objects, njobs):
+class ResultsStorage(object):
+    slots = ['result', 'result_id']
+    result = None
+    result_id = None
+
+def parallel_objects(objects, njobs, storage = None):
+    if not parallel_capable: raise RuntimeError
     my_communicator = communication_system.communicators[-1]
     my_size = my_communicator.size
     my_rank = my_communicator.rank
@@ -330,10 +336,23 @@
             my_new_id = i
             break
     communication_system.push_with_ids(all_new_comms[my_new_id].tolist())
+    obj_ids = na.arange(len(objects))
 
-    for obj in objects[my_new_id::njobs]:
-        yield obj
+    to_share = {}
+    for result_id, obj in zip(obj_ids, objects)[my_new_id::njobs]:
+        if storage is not None:
+            rstore = ResultsStorage()
+            rstore.result_id = result_id
+            yield rstore, obj
+            to_share[rstore.result_id] = rstore.result
+        else:
+            yield obj
     communication_system.communicators.pop()
+    if storage is not None:
+        # Now we have to broadcast it
+        new_storage = my_communicator.par_combine_object(
+                to_share, datatype = 'dict', op = 'join')
+        storage.update(new_storage)
 
 class CommunicationSystem(object):
     communicators = []


http://bitbucket.org/yt_analysis/yt/changeset/93525dfdb0d1/
changeset:   93525dfdb0d1
branch:      yt
user:        brittonsmith
date:        2011-10-20 14:52:24
summary:     Merged.
affected #:  63 files (-1 bytes)

--- a/scripts/iyt	Thu Oct 20 08:17:55 2011 -0400
+++ b/scripts/iyt	Thu Oct 20 08:52:24 2011 -0400
@@ -150,7 +150,7 @@
             return self[self._key_numbers[key]]
         return UserDict.__getitem__(self, key)
     def __iter__(self):
-        return itertools.chain(self.data.iterkeys(),
+        return itertools.chain(self.field_data.iterkeys(),
                         self._key_numbers.iterkeys())
     def __repr__(self):
         s = "{" + ", \n ".join(
@@ -158,9 +158,9 @@
                     for i in sorted(self._key_numbers)]) + "}"
         return s
     def has_key(self, key):
-        return self.data.has_key(key) or self._key_numbers.has_key(key)
+        return self.field_data.has_key(key) or self._key_numbers.has_key(key)
     def keys(self):
-        return self.data.key(key) + self._key_numbers.key(key)
+        return self.field_data.key(key) + self._key_numbers.key(key)
 
 pfs = ParameterFileDict()
 pcs = []


--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/halos.py	Thu Oct 20 08:52:24 2011 -0400
@@ -0,0 +1,10 @@
+from yt.utilities.answer_testing.output_tests import \
+    SingleOutputTest, create_test
+from yt.utilities.answer_testing.halo_tests import \
+    TestHaloCountHOP, TestHaloCountFOF, TestHaloCountPHOP 
+
+create_test(TestHaloCountHOP, "halo_count_HOP", threshold=80.0)
+
+create_test(TestHaloCountFOF, "halo_count_FOF", link=0.2, padding=0.02)
+
+create_test(TestHaloCountPHOP, "halo_count_PHOP", threshold=80.0)


--- a/tests/object_field_values.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/tests/object_field_values.py	Thu Oct 20 08:52:24 2011 -0400
@@ -3,7 +3,7 @@
 
 from yt.utilities.answer_testing.output_tests import \
     YTStaticOutputTest, RegressionTestException, create_test
-from yt.funcs import ensure_list
+from yt.funcs import ensure_list, iterable
 from fields_to_test import field_list, particle_field_list
 
 class FieldHashesDontMatch(RegressionTestException):
@@ -16,26 +16,50 @@
     return func
 
 @register_object
-def centered_sphere(self):
-    center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
-    width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-    self.data_object = self.pf.h.sphere(center, width/0.25)
+def centered_sphere(tobj):
+    center = 0.5*(tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
+    width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+    tobj.data_object = tobj.pf.h.sphere(center, width/0.25)
 
 @register_object
-def off_centered_sphere(self):
-    center = 0.5*(self.pf.domain_right_edge + self.pf.domain_left_edge)
-    width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-    self.data_object = self.pf.h.sphere(center - 0.25 * width, width/0.25)
+def off_centered_sphere(tobj):
+    center = 0.5*(tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
+    width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+    tobj.data_object = tobj.pf.h.sphere(center - 0.25 * width, width/0.25)
 
 @register_object
-def corner_sphere(self):
-    width = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()
-    self.data_object = self.pf.h.sphere(self.pf.domain_left_edge, width/0.25)
+def corner_sphere(tobj):
+    width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
+    tobj.data_object = tobj.pf.h.sphere(tobj.pf.domain_left_edge, width/0.25)
 
 @register_object
+def disk(self):
+    center = (self.pf.domain_right_edge + self.pf.domain_left_edge)/2.
+    radius = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()/10.
+    height = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()/10.
+    normal = na.array([1.]*3)
+    self.data_object = self.pf.h.disk(center, normal, radius, height)
+    
+ at register_object
 def all_data(self):
     self.data_object = self.pf.h.all_data()
 
+_new_known_objects = {}
+for field in ["Density"]:#field_list:
+    for object_name in known_objects:
+        def _rfunc(oname, fname):
+            def func(tobj):
+                known_objects[oname](tobj)
+                tobj.orig_data_object = tobj.data_object
+                avg_value = tobj.orig_data_object.quantities[
+                        "WeightedAverageQuantity"](fname, "Density")
+                tobj.data_object = tobj.orig_data_object.cut_region(
+                        ["grid['%s'] > %s" % (fname, avg_value)])
+            return func
+        _new_known_objects["%s_cut_region_%s" % (object_name, field)] = \
+                _rfunc(object_name, field)
+known_objects.update(_new_known_objects)
+
 class YTFieldValuesTest(YTStaticOutputTest):
     def run(self):
         vals = self.data_object[self.field].copy()
@@ -51,6 +75,69 @@
 
 for object_name in known_objects:
     for field in field_list + particle_field_list:
+        if "cut_region" in object_name and field in particle_field_list:
+            continue
         create_test(YTFieldValuesTest, "%s_%s" % (object_name, field),
                     field = field, object_name = object_name)
+    
+class YTDerivedQuantityTest(YTStaticOutputTest):
+    def setup(self):
+        YTStaticOutputTest.setup(self)
+        known_objects[self.object_name](self)
 
+    def compare(self, old_result):
+        if hasattr(self.result, 'tostring'):
+            self.compare_array_delta(self.result, old_result, 1e-7)
+            return
+        elif iterable(self.result):
+            a1 = na.array(self.result)
+            a2 = na.array(old_result)
+            self.compare_array_delta(a1, a2, 1e-7)
+        else:
+            if self.result != old_result: raise FieldHashesDontMatch
+
+    def run(self):
+        # This only works if it takes no arguments
+        self.result = self.data_object.quantities[self.dq_name]()
+
+dq_names = ["TotalMass", "AngularMomentumVector", "CenterOfMass",
+            "BulkVelocity", "BaryonSpinParameter", "ParticleSpinParameter"]
+
+# Extrema, WeightedAverageQuantity, TotalQuantity, MaxLocation,
+# MinLocation
+
+for object_name in known_objects:
+    for dq in dq_names:
+        # Some special exceptions
+        if "cut_region" in object_name and (
+            "SpinParameter" in dq or
+            "TotalMass" in dq):
+            continue
+        create_test(YTDerivedQuantityTest, "%s_%s" % (object_name, dq),
+                    dq_name = dq, object_name = object_name)
+
+class YTDerivedQuantityTestField(YTDerivedQuantityTest):
+    def run(self):
+        self.result = self.data_object.quantities[self.dq_name](
+            self.field_name)
+
+for object_name in known_objects:
+    for field in field_list:
+        for dq in ["Extrema", "TotalQuantity", "MaxLocation", "MinLocation"]:
+            create_test(YTDerivedQuantityTestField,
+                        "%s_%s" % (object_name, field),
+                        field_name = field, dq_name = dq,
+                        object_name = object_name)
+
+class YTDerivedQuantityTest_WeightedAverageQuantity(YTDerivedQuantityTest):
+    def run(self):
+        self.result = self.data_object.quantities["WeightedAverageQuantity"](
+            self.field_name, weight="CellMassMsun")
+
+for object_name in known_objects:
+    for field in field_list:
+        create_test(YTDerivedQuantityTest_WeightedAverageQuantity,
+                    "%s_%s" % (object_name, field),
+                    field_name = field, 
+                    object_name = object_name)
+


--- a/tests/projections.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/tests/projections.py	Thu Oct 20 08:52:24 2011 -0400
@@ -1,16 +1,35 @@
 from yt.utilities.answer_testing.output_tests import \
     SingleOutputTest, create_test
 from yt.utilities.answer_testing.hydro_tests import \
-    TestProjection, TestGasDistribution
+    TestProjection, TestOffAxisProjection, TestSlice, \
+    TestRay, TestGasDistribution, Test2DGasDistribution
+
 from fields_to_test import field_list
 
+for field in field_list:
+    create_test(TestRay, "%s" % field, field = field)
+
 for axis in range(3):
     for field in field_list:
-        create_test(TestProjection, "projection_test_%s_%s" % (axis, field),
+        create_test(TestSlice, "%s_%s" % (axis, field),
                     field = field, axis = axis)
-        create_test(TestProjection, "projection_test_%s_%s_Density" % (axis, field),
+
+for axis in range(3):
+    for field in field_list:
+        create_test(TestProjection, "%s_%s" % (axis, field),
+                    field = field, axis = axis)
+        create_test(TestProjection, "%s_%s_Density" % (axis, field),
                     field = field, axis = axis, weight_field = "Density")
 
 for field in field_list:
-    create_test(TestGasDistribution, "profile_density_test_%s" % field,
+    create_test(TestOffAxisProjection, "%s_%s" % (axis, field),
+                field = field, axis = axis)
+    create_test(TestOffAxisProjection, "%s_%s_Density" % (axis, field),
+                field = field, axis = axis, weight_field = "Density")
+
+for field in field_list:
+    create_test(TestGasDistribution, "density_%s" % field,
                 field_x = "Density", field_y = field)
+    create_test(Test2DGasDistribution, "density_x-vel_%s" % field,
+                field_x = "Density", field_y = "x-velocity", field_z = field, 
+                weight = "CellMassMsun")


--- a/tests/runall.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/tests/runall.py	Thu Oct 20 08:52:24 2011 -0400
@@ -1,3 +1,4 @@
+import matplotlib; matplotlib.use('Agg')
 from yt.config import ytcfg
 ytcfg["yt","loglevel"] = "50"
 ytcfg["yt","serialize"] = "False"
@@ -6,7 +7,6 @@
     RegressionTestRunner, clear_registry, create_test, \
     TestFieldStatistics, TestAllProjections, registry_entries, \
     Xunit
-
 from yt.utilities.command_line import get_yt_version
 
 from yt.mods import *
@@ -49,6 +49,7 @@
     return mapping
 
 if __name__ == "__main__":
+    clear_registry()
     mapping = find_and_initialize_tests()
     test_storage_directory = ytcfg.get("yt","test_storage_dir")
     try:
@@ -75,7 +76,15 @@
                       help = "The name we'll call this set of tests")
     opts, args = parser.parse_args()
     if opts.list_tests:
-        print "\n    ".join(sorted(itertools.chain(*mapping.values())))
+        tests_to_run = []
+        for m, vals in mapping.items():
+            new_tests = fnmatch.filter(vals, opts.test_pattern)
+            if len(new_tests) == 0: continue
+            load_tests(m, cwd)
+            keys = set(registry_entries())
+            tests_to_run += [t for t in new_tests if t in keys]
+        tests = list(set(tests_to_run))
+        print "\n    ".join(tests)
         sys.exit(0)
     pf = load(opts.parameter_file)
     if pf is None:
@@ -97,9 +106,11 @@
     for m, vals in mapping.items():
         new_tests = fnmatch.filter(vals, opts.test_pattern)
         if len(new_tests) == 0: continue
-        tests_to_run += new_tests
         load_tests(m, cwd)
+        keys = set(registry_entries())
+        tests_to_run += [t for t in new_tests if t in keys]
     for test_name in sorted(tests_to_run):
+        print "RUNNING TEST", test_name
         rtr.run_test(test_name)
     if watcher is not None:
         rtr.watcher.report()


--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/volume_rendering.py	Thu Oct 20 08:52:24 2011 -0400
@@ -0,0 +1,36 @@
+from yt.mods import *
+import numpy as na
+
+from yt.utilities.answer_testing.output_tests import \
+    YTStaticOutputTest, RegressionTestException
+from yt.funcs import ensure_list
+
+class VolumeRenderingInconsistent(RegressionTestException):
+    pass
+
+class VolumeRenderingConsistency(YTStaticOutputTest):
+    name = "volume_rendering_consistency"
+    def run(self):
+        c = (self.pf.domain_right_edge+self.pf.domain_left_edge)/2.
+        W = na.sqrt(3.)*(self.pf.domain_right_edge-self.pf.domain_left_edge)
+        N = 512
+        n_contours=5
+        cmap = 'algae'
+        field = 'Density'
+        mi, ma = self.pf.h.all_data().quantities['Extrema'](field)[0]
+        mi, ma = na.log10(mi), na.log10(ma)
+        contour_width=(ma-mi)/100.
+        L = na.array([1.]*3)
+        tf = ColorTransferFunction((mi-2, ma+2))
+        tf.add_layers(n_contours,w=contour_width,
+                      col_bounds = (mi*1.001,ma*0.999), 
+                      colormap=cmap,alpha=na.logspace(-1,0,n_contours))
+        cam = self.pf.h.camera(c, L, W, (N,N), transfer_function = tf, no_ghost=True)
+        image = cam.snapshot()
+        # image = cam.snapshot('test_rendering_%s.png'%field)
+        self.result = image
+
+    def compare(self, old_result):
+        if not na.all(self.result==old_result):
+            raise VolumeRenderingInconsistent()
+


--- a/yt/analysis_modules/halo_finding/halo_objects.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/analysis_modules/halo_finding/halo_objects.py	Thu Oct 20 08:52:24 2011 -0400
@@ -426,7 +426,7 @@
         """
         if self.max_dens_point is not None:
             return self.max_dens_point[0]
-        max = self._mpi_allmax(self._max_dens[self.id][0])
+        max = self.comm.mpi_allreduce(self._max_dens[self.id][0], op='max')
         return max
 
     def maximum_density_location(self):
@@ -450,7 +450,7 @@
         else:
             value = na.array([0,0,0])
         # This works, and isn't appropriate but for now will be fine...
-        value = self._mpi_allsum(value)
+        value = self.comm.mpi_allreduce(value, op='sum')
         return value
 
     def center_of_mass(self):
@@ -479,8 +479,8 @@
         else:
             my_mass = 0.
             my_com = na.array([0.,0.,0.])
-        global_mass = self._mpi_allsum(my_mass)
-        global_com = self._mpi_allsum(my_com)
+        global_mass = self.comm.mpi_allreduce(my_mass, op='sum')
+        global_com = self.comm.mpi_allreduce(my_com, op='sum')
         return global_com / global_mass
 
     def total_mass(self):
@@ -499,7 +499,7 @@
             my_mass = self["ParticleMassMsun"].sum()
         else:
             my_mass = 0.
-        global_mass = self._mpi_allsum(float(my_mass))
+        global_mass = self.comm.mpi_allreduce(float(my_mass), op='sum')
         return global_mass
 
     def bulk_velocity(self):
@@ -528,7 +528,7 @@
             vy = 0.
             vz = 0.
         bv = na.array([vx,vy,vz,pm])
-        global_bv = self._mpi_allsum(bv)
+        global_bv = self.comm.mpi_allreduce(bv, op='sum')
         return global_bv[:3]/global_bv[3]
 
     def rms_velocity(self):
@@ -558,7 +558,7 @@
             ss = na.array([s, float(size)])
         else:
             ss = na.array([0.,0.])
-        global_ss = self._mpi_allsum(ss)
+        global_ss = self.comm.mpi_allreduce(ss, op='sum')
         ms = global_ss[0] / global_ss[1]
         return na.sqrt(ms) * global_ss[1]
 
@@ -598,7 +598,7 @@
             
         else:
             my_max = 0.
-        return self._mpi_allmax(my_max)
+        return self.comm.mpi_allreduce(my_max, op='max')
 
     def get_size(self):
         if self.size is not None:
@@ -607,7 +607,7 @@
             my_size = self.indices.size
         else:
             my_size = 0
-        global_size = self._mpi_allsum(my_size)
+        global_size = self.comm.mpi_allreduce(my_size, op='sum')
         return global_size
 
     def __getitem__(self, key):
@@ -736,8 +736,8 @@
             dist_max = 0.0
         # In this parallel case, we're going to find the global dist extrema
         # and built identical bins on all tasks.
-        dist_min = self._mpi_allmin(dist_min)
-        dist_max = self._mpi_allmax(dist_max)
+        dist_min = self.comm.mpi_allreduce(dist_min, op='min')
+        dist_max = self.comm.mpi_allreduce(dist_max, op='max')
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
         self.radial_bins = na.logspace(math.log10(dist_min*.99 + TINY), 
@@ -752,7 +752,7 @@
             for i in xrange(self.bin_count):
                 self.mass_bins[i+1] += self.mass_bins[i]
         # Sum up the mass_bins globally
-        self.mass_bins = self._mpi_Allsum_double(self.mass_bins)
+        self.mass_bins = self.comm.mpi_allreduce(self.mass_bins, op='sum')
         # Calculate the over densities in the bins.
         self.overdensity = self.mass_bins * Msun2g / \
         (4./3. * math.pi * rho_crit * \
@@ -1028,19 +1028,14 @@
         else: ii = slice(None)
         self.particle_fields = {}
         for field in self._fields:
-            if ytcfg.getboolean("yt","inline") == False:
-                tot_part = self._data_source[field].size
-                if field == "particle_index":
-                    self.particle_fields[field] = self._data_source[field][ii].astype('int64')
-                else:
-                    self.particle_fields[field] = self._data_source[field][ii].astype('float64')
+            tot_part = self._data_source[field].size
+            if field == "particle_index":
+                self.particle_fields[field] = self._data_source[field][ii].astype('int64')
             else:
-                tot_part = self._data_source[field].size
-                if field == "particle_index":
-                    self.particle_fields[field] = self._data_source[field][ii].astype('int64')
-                else:
-                    self.particle_fields[field] = self._data_source[field][ii].astype('float64')
+                self.particle_fields[field] = self._data_source[field][ii].astype('float64')
+            del self._data_source[field]
         self._base_indices = na.arange(tot_part)[ii]
+        gc.collect()
 
     def _get_dm_indices(self):
         if 'creation_time' in self._data_source.hierarchy.field_list:
@@ -1240,11 +1235,11 @@
             if group.tasks is not None:
                 fn = ""
                 for task in group.tasks:
-                    fn += "%s.h5 " % self._get_filename(prefix, rank=task)
+                    fn += "%s.h5 " % self.comm.get_filename(prefix, rank=task)
             elif self._distributed:
-                fn = "%s.h5" % self._get_filename(prefix, rank=group._owner)
+                fn = "%s.h5" % self.comm.get_filename(prefix, rank=group._owner)
             else:
-                fn = "%s.h5" % self._get_filename(prefix)
+                fn = "%s.h5" % self.comm.get_filename(prefix)
             gn = "Halo%08i" % (group.id)
             f.write("%s %s\n" % (gn, fn))
             f.flush()
@@ -1387,6 +1382,7 @@
         *dm_only* is set, only run it on the dark matter particles, otherwise
         on all particles.  Returns an iterable collection of *HopGroup* items.
         """
+        ParallelAnalysisInterface.__init__(self)
         self.threshold = threshold
         self.num_neighbors = num_neighbors
         self.bounds = bounds
@@ -1411,14 +1407,20 @@
                 self.particle_fields["particle_index"].size:
             mylog.error("Non-unique values in particle_index field. Parallel HOP will fail.")
             exit = True
-        self._mpi_exit_test(exit)
+
+        self.comm.mpi_exit_test(exit)
+        # Try to do this in a memory conservative way.
+        na.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
+            self.particle_fields['ParticleMassMsun'])
+        na.divide(self.particle_fields["particle_position_x"],
+            self.old_period[0], self.particle_fields["particle_position_x"])
+        na.divide(self.particle_fields["particle_position_y"],
+            self.old_period[1], self.particle_fields["particle_position_y"])
+        na.divide(self.particle_fields["particle_position_z"],
+            self.old_period[2], self.particle_fields["particle_position_z"])
         obj = ParallelHOPHaloFinder(self.period, self.padding,
             self.num_neighbors, self.bounds,
-            self.particle_fields["particle_position_x"] / self.old_period[0],
-            self.particle_fields["particle_position_y"] / self.old_period[1],
-            self.particle_fields["particle_position_z"] / self.old_period[2],
-            self.particle_fields["particle_index"],
-            self.particle_fields["ParticleMassMsun"]/self.total_mass,
+            self.particle_fields,
             self.threshold, rearrange=self.rearrange, premerge=self.premerge)
         self.densities, self.tags = obj.density, obj.chainID
         # I'm going to go ahead and delete self.densities because it's not
@@ -1445,15 +1447,12 @@
         yt_counters("Precomp bulk vel.")
         self.bulk_vel = na.zeros((self.group_count, 3), dtype='float64')
         yt_counters("bulk vel. reading data")
-        pm = self.particle_fields["ParticleMassMsun"]
-        if ytcfg.getboolean("yt","inline") == False:
-            xv = self._data_source["particle_velocity_x"][self._base_indices]
-            yv = self._data_source["particle_velocity_y"][self._base_indices]
-            zv = self._data_source["particle_velocity_z"][self._base_indices]
-        else:
-            xv = self._data_source["particle_velocity_x"][self._base_indices]
-            yv = self._data_source["particle_velocity_y"][self._base_indices]
-            zv = self._data_source["particle_velocity_z"][self._base_indices]
+        pm = obj.mass
+        # Fix this back to un-normalized units.
+        na.multiply(pm, self.total_mass, pm)
+        xv = self._data_source["particle_velocity_x"][self._base_indices]
+        yv = self._data_source["particle_velocity_y"][self._base_indices]
+        zv = self._data_source["particle_velocity_z"][self._base_indices]
         yt_counters("bulk vel. reading data")
         yt_counters("bulk vel. computing")
         select = (self.tags >= 0)
@@ -1479,7 +1478,7 @@
             del diff_subchain
         # Bring it together, and divide by the previously computed total mass
         # of each halo.
-        self.bulk_vel = self._mpi_Allsum_double(self.bulk_vel)
+        self.bulk_vel = self.comm.mpi_allreduce(self.bulk_vel, op='sum')
         for groupID in xrange(self.group_count):
             self.bulk_vel[groupID] = self.bulk_vel[groupID] / self.Tot_M[groupID]
         yt_counters("bulk vel. computing")
@@ -1501,7 +1500,7 @@
                 rms_vel_temp[u][1] = marks[i+1] - marks[i]
             del vel, marks, uniq_subchain
         # Bring it together.
-        rms_vel_temp = self._mpi_Allsum_double(rms_vel_temp)
+        rms_vel_temp = self.comm.mpi_allreduce(rms_vel_temp, op='sum')
         self.rms_vel = na.empty(self.group_count, dtype='float64')
         for groupID in xrange(self.group_count):
             # Here we do the Mean and the Root.
@@ -1513,6 +1512,7 @@
         self.taskID = obj.mine
         self.halo_taskmap = obj.halo_taskmap # A defaultdict.
         del obj
+        gc.collect()
         yt_counters("Precomp bulk vel.")
 
     def _parse_output(self):
@@ -1547,7 +1547,7 @@
                     bulk_vel=self.bulk_vel[index], tasks=self.halo_taskmap[index],
                     rms_vel=self.rms_vel[index])
                 # I don't own this halo
-                self._do_not_claim_object(self._groups[index])
+                self.comm.do_not_claim_object(self._groups[index])
                 self._max_dens[index] = [self.max_dens_point[index][0], self.max_dens_point[index][1], \
                     self.max_dens_point[index][2], self.max_dens_point[index][3]]
                 index += 1
@@ -1560,7 +1560,7 @@
                 bulk_vel=self.bulk_vel[i], tasks=self.halo_taskmap[index],
                 rms_vel=self.rms_vel[i])
             # This halo may be owned by many, including this task
-            self._claim_object(self._groups[index])
+            self.comm.claim_object(self._groups[index])
             self._max_dens[index] = [self.max_dens_point[i][0], self.max_dens_point[i][1], \
                 self.max_dens_point[i][2], self.max_dens_point[i][3]]
             cp += counts[i+1]
@@ -1573,7 +1573,7 @@
                 group_total_mass=self.Tot_M[index], max_radius=self.max_radius[index],
                 bulk_vel=self.bulk_vel[index], tasks=self.halo_taskmap[index],
                 rms_vel=self.rms_vel[index])
-            self._do_not_claim_object(self._groups[index])
+            self.comm.do_not_claim_object(self._groups[index])
             self._max_dens[index] = [self.max_dens_point[index][0], self.max_dens_point[index][1], \
                 self.max_dens_point[index][2], self.max_dens_point[index][3]]
             index += 1
@@ -1606,6 +1606,7 @@
 
 class GenericHaloFinder(HaloList, ParallelAnalysisInterface):
     def __init__(self, pf, ds, dm_only=True, padding=0.0):
+        ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.hierarchy = pf.h
         self.center = (na.array(ds.right_edge) + na.array(ds.left_edge))/2.0
@@ -1625,7 +1626,7 @@
                 max_dens[hi] = [max_dens_temp] + list(self._max_dens[halo.id])[1:4]
                 groups.append(self._halo_class(self, hi))
                 groups[-1].indices = halo.indices
-                self._claim_object(groups[-1])
+                self.comm.claim_object(groups[-1])
                 hi += 1
         del self._groups, self._max_dens # explicit >> implicit
         self._groups = groups
@@ -1638,7 +1639,7 @@
         # about processors and ownership and so forth.
         # _mpi_info_dict returns a dict of {proc: whatever} where whatever is
         # what is fed in on each proc.
-        mine, halo_info = self._mpi_info_dict(len(self))
+        mine, halo_info = self.comm.mpi_info_dict(len(self))
         nhalos = sum(halo_info.values())
         # Figure out our offset
         my_first_id = sum([v for k,v in halo_info.items() if k < mine])
@@ -1701,7 +1702,7 @@
         --------
         >>> halos.write_out("HopAnalysis.out")
         """
-        f = self._write_on_root(filename)
+        f = self.comm.write_on_root(filename)
         HaloList.write_out(self, f)
 
     def write_particle_lists_txt(self, prefix):
@@ -1720,7 +1721,7 @@
         --------
         >>> halos.write_particle_lists_txt("halo-parts")
         """
-        f = self._write_on_root("%s.txt" % prefix)
+        f = self.comm.write_on_root("%s.txt" % prefix)
         HaloList.write_particle_lists_txt(self, prefix, fp=f)
 
     @parallel_blocking_call
@@ -1743,10 +1744,10 @@
         --------
         >>> halos.write_particle_lists("halo-parts")
         """
-        fn = "%s.h5" % self._get_filename(prefix)
+        fn = "%s.h5" % self.comm.get_filename(prefix)
         f = h5py.File(fn, "w")
         for halo in self._groups:
-            if not self._is_mine(halo): continue
+            if not self.comm.is_mine(halo): continue
             halo.write_particle_list(f)
 
     def dump(self, basename="HopAnalysis"):
@@ -1778,7 +1779,8 @@
 class parallelHF(GenericHaloFinder, parallelHOPHaloList):
     def __init__(self, pf, subvolume=None,threshold=160, dm_only=True, \
         resize=True, rearrange=True,\
-        fancy_padding=True, safety=1.5, premerge=True, sample=0.03):
+        fancy_padding=True, safety=1.5, premerge=True, sample=0.03, \
+        total_mass=None, num_particles=None):
         r"""Parallel HOP halo finder.
         
         Halos are built by:
@@ -1827,6 +1829,23 @@
         sample : float
             The fraction of the full dataset on which load-balancing is
             performed. Default = 0.03.
+        total_mass : float
+            If HOP is run on the same dataset mulitple times, the total mass
+            of particles in Msun units in the full volume can be supplied here
+            to save time.
+            This must correspond to the particles being operated on, meaning
+            if stars are included in the halo finding, they must be included
+            in this mass as well, and visa-versa.
+            If halo finding on a subvolume, this still corresponds with the
+            mass in the entire volume.
+            Default = None, which means the total mass is automatically
+            calculated.
+        num_particles : integer
+            The total number of particles in the volume, in the same fashion
+            as `total_mass` is calculated. Specifying this turns off
+            fancy_padding.
+            Default = None, which means the number of particles is
+            automatically calculated.
         
         Examples
         -------
@@ -1847,7 +1866,7 @@
         topbounds = na.array([[0., 0., 0.], period])
         # Cut up the volume evenly initially, with no padding.
         padded, LE, RE, self._data_source = \
-            self._partition_hierarchy_3d(ds=self._data_source,
+            self.partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         # also get the total mass of particles
         yt_counters("Reading Data")
@@ -1855,26 +1874,24 @@
         # analyzing a subvolume.
         ds_names = ["particle_position_x","particle_position_y","particle_position_z"]
         if ytcfg.getboolean("yt","inline") == False and \
-            resize and self._mpi_get_size() != 1 and subvolume is None:
-            random.seed(self._mpi_get_rank())
-            cut_list = self._partition_hierarchy_3d_bisection_list()
+            resize and self.comm.size != 1 and subvolume is None:
+            random.seed(self.comm.rank)
+            cut_list = self.partition_hierarchy_3d_bisection_list()
             root_points = self._subsample_points()
             self.bucket_bounds = []
-            if self._mpi_get_rank() == 0:
+            if self.comm.rank == 0:
                 self._recursive_divide(root_points, topbounds, 0, cut_list)
-            self.bucket_bounds = self._mpi_bcast_pickled(self.bucket_bounds)
-            my_bounds = self.bucket_bounds[self._mpi_get_rank()]
+            self.bucket_bounds = self.comm.mpi_bcast_pickled(self.bucket_bounds)
+            my_bounds = self.bucket_bounds[self.comm.rank]
             LE, RE = my_bounds[0], my_bounds[1]
             self._data_source = self.hierarchy.region_strict([0.]*3, LE, RE)
         # If this isn't parallel, define the region as an AMRRegionStrict so
         # particle IO works.
-        if self._mpi_get_size() == 1:
+        if self.comm.size == 1:
             self._data_source = self.hierarchy.periodic_region_strict([0.5]*3, LE, RE)
         # get the average spacing between particles for this region
         # The except is for the serial case, where the full box is what we want.
-        if ytcfg.getboolean("yt","inline") == False:
-            data = self._data_source["particle_position_x"]
-        else:
+        if num_particles is None:
             data = self._data_source["particle_position_x"]
         try:
             l = self._data_source.right_edge - self._data_source.left_edge
@@ -1883,14 +1900,16 @@
         vol = l[0] * l[1] * l[2]
         full_vol = vol
         # We will use symmetric padding when a subvolume is being used.
-        if not fancy_padding or subvolume is not None:
-            avg_spacing = (float(vol) / data.size)**(1./3.)
+        if not fancy_padding or subvolume is not None or num_particles is not None:
+            if num_particles is None:
+                num_particles = data.size
+            avg_spacing = (float(vol) / num_particles)**(1./3.)
             # padding is a function of inter-particle spacing, this is an
             # approximation, but it's OK with the safety factor
             padding = (self.num_neighbors)**(1./3.) * self.safety * avg_spacing
             self.padding = (na.ones(3,dtype='float64')*padding, na.ones(3,dtype='float64')*padding)
             mylog.info('padding %s avg_spacing %f vol %f local_parts %d' % \
-                (str(self.padding), avg_spacing, vol, data.size))
+                (str(self.padding), avg_spacing, vol, num_particles))
         # Another approach to padding, perhaps more accurate.
         elif fancy_padding and self._distributed:
             LE_padding, RE_padding = na.empty(3,dtype='float64'), na.empty(3,dtype='float64')
@@ -1934,10 +1953,9 @@
                 (str(self.padding), avg_spacing, full_vol, data.size, str(self._data_source)))
         # Now we get the full box mass after we have the final composition of
         # subvolumes.
-        if ytcfg.getboolean("yt","inline") == False:
-            total_mass = self._mpi_allsum((self._data_source["ParticleMassMsun"].astype('float64')).sum())
-        else:
-            total_mass = self._mpi_allsum((self._data_source["ParticleMassMsun"].astype('float64')).sum())
+        if total_mass is None:
+            total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(), 
+                                                 op='sum')
         if not self._distributed:
             self.padding = (na.zeros(3,dtype='float64'), na.zeros(3,dtype='float64'))
         # If we're using a subvolume, we now re-divide.
@@ -1945,7 +1963,7 @@
             self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
             # Cut up the volume.
             padded, LE, RE, self._data_source = \
-                self._partition_hierarchy_3d(ds=self._data_source,
+                self.partition_hierarchy_3d(ds=self._data_source,
                 padding=0.)
         self.bounds = (LE, RE)
         (LE_padding, RE_padding) = self.padding
@@ -1959,13 +1977,13 @@
         # Read in a random subset of the points in each domain, and then
         # collect them on the root task.
         xp = self._data_source["particle_position_x"]
-        n_parts = self._mpi_allsum(xp.size)
+        n_parts = self.comm.mpi_allreduce(xp.size, op='sum')
         local_parts = xp.size
         random_points = int(self.sample * n_parts)
         # We want to get a representative selection of random particles in
         # each subvolume.
-        adjust = float(local_parts) / ( float(n_parts) / self._mpi_get_size())
-        n_random = int(adjust * float(random_points) / self._mpi_get_size())
+        adjust = float(local_parts) / ( float(n_parts) / self.comm.size)
+        n_random = int(adjust * float(random_points) / self.comm.size)
         mylog.info("Reading in %d random particles." % n_random)
         # Get unique random particles.
         my_points = na.empty((n_random, 3), dtype='float64')
@@ -1980,7 +1998,7 @@
         self._data_source.clear_data()
         del uni
         # Collect them on the root task.
-        mine, sizes = self._mpi_info_dict(n_random)
+        mine, sizes = self.comm.mpi_info_dict(n_random)
         if mine == 0:
             tot_random = sum(sizes.values())
             root_points = na.empty((tot_random, 3), dtype='float64')
@@ -1988,7 +2006,8 @@
         else:
             root_points = na.empty([])
         my_points.shape = (1, n_random*3)
-        root_points = self._mpi_concatenate_array_on_root_double(my_points[0])
+        root_points = self.comm.par_combine_object(my_points[0],
+                datatype="array", op="cat")
         del my_points
         if mine == 0:
             root_points.shape = (tot_random, 3)
@@ -2056,7 +2075,7 @@
 
 class HOPHaloFinder(GenericHaloFinder, HOPHaloList):
     def __init__(self, pf, subvolume=None, threshold=160, dm_only=True,
-            padding=0.02):
+            padding=0.02, total_mass=None):
         r"""HOP halo finder.
         
         Halos are built by:
@@ -2090,6 +2109,17 @@
             with duplicated particles for halo finidng to work. This number
             must be no smaller than the radius of the largest halo in the box
             in code units. Default = 0.02.
+        total_mass : float
+            If HOP is run on the same dataset mulitple times, the total mass
+            of particles in Msun units in the full volume can be supplied here
+            to save time.
+            This must correspond to the particles being operated on, meaning
+            if stars are included in the halo finding, they must be included
+            in this mass as well, and visa-versa.
+            If halo finding on a subvolume, this still corresponds with the
+            mass in the entire volume.
+            Default = None, which means the total mass is automatically
+            calculated.
         
         Examples
         --------
@@ -2107,14 +2137,15 @@
         # a small part is actually going to be used.
         self.padding = 0.0
         padded, LE, RE, self._data_source = \
-            self._partition_hierarchy_3d(ds = self._data_source, padding=self.padding)
+            self.partition_hierarchy_3d(ds = self._data_source, padding=self.padding)
         # For scaling the threshold, note that it's a passthrough
-        if dm_only:
-            select = self._get_dm_indices()
-            total_mass = \
-                self._mpi_allsum((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'))
-        else:
-            total_mass = self._mpi_allsum(self._data_source["ParticleMassMsun"].sum(dtype='float64'))
+        if total_mass is None:
+            if dm_only:
+                select = self._get_dm_indices()
+                total_mass = \
+                    self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
+            else:
+                total_mass = self.comm.mpi_allreduce(self._data_source["ParticleMassMsun"].sum(dtype='float64'), op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles
         # are all on different processors, we should instead construct an
         # object representing the entire domain and sum it "lazily" with
@@ -2123,7 +2154,7 @@
             self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
         self.padding = padding #* pf["unitary"] # This should be clevererer
         padded, LE, RE, self._data_source = \
-            self._partition_hierarchy_3d(ds = self._data_source,
+            self.partition_hierarchy_3d(ds = self._data_source,
             padding=self.padding)
         self.bounds = (LE, RE)
         # reflect particles around the periodic boundary
@@ -2191,10 +2222,10 @@
         self.padding = 0.0 #* pf["unitary"] # This should be clevererer
         # get the total number of particles across all procs, with no padding
         padded, LE, RE, self._data_source = \
-            self._partition_hierarchy_3d(ds=self._data_source,
+            self.partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         if link > 0.0:
-            n_parts = self._mpi_allsum(self._data_source["particle_position_x"].size)
+            n_parts = self.comm.mpi_allreduce(self._data_source["particle_position_x"].size, op='sum')
             # get the average spacing between particles
             #l = pf.domain_right_edge - pf.domain_left_edge
             #vol = l[0] * l[1] * l[2]
@@ -2209,7 +2240,7 @@
         if subvolume is not None:
             self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
         padded, LE, RE, self._data_source = \
-            self._partition_hierarchy_3d(ds=self._data_source,
+            self.partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         self.bounds = (LE, RE)
         # reflect particles around the periodic boundary


--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py	Thu Oct 20 08:52:24 2011 -0400
@@ -26,6 +26,7 @@
 from collections import defaultdict
 import itertools, sys
 import numpy as na
+import gc
 
 from yt.funcs import *
 from yt.utilities.performance_counters import yt_counters, time_function
@@ -43,8 +44,9 @@
 
 class ParallelHOPHaloFinder(ParallelAnalysisInterface):
     def __init__(self,period, padding, num_neighbors, bounds,
-            xpos, ypos, zpos, index, mass, threshold=160.0, rearrange=True,
+            particle_fields, threshold=160.0, rearrange=True,
             premerge=True):
+        ParallelAnalysisInterface.__init__(self)
         self.threshold = threshold
         self.rearrange = rearrange
         self.premerge = premerge
@@ -54,12 +56,12 @@
         self.padding = padding
         self.num_neighbors = num_neighbors
         self.bounds = bounds
-        self.xpos = xpos
-        self.ypos = ypos
-        self.zpos = zpos
+        self.xpos = particle_fields.pop("particle_position_x")
+        self.ypos = particle_fields.pop("particle_position_y")
+        self.zpos = particle_fields.pop("particle_position_z")
         self.real_size = len(self.xpos)
-        self.index = na.array(index, dtype='int64')
-        self.mass = mass
+        self.index = particle_fields.pop("particle_index")
+        self.mass = particle_fields.pop("ParticleMassMsun")
         self.padded_particles = []
         self.nMerge = 4
         yt_counters("chainHOP")
@@ -74,7 +76,7 @@
         tasks are our geometric neighbors.
         """
         self.neighbors = set([])
-        self.mine, global_bounds = self._mpi_info_dict(self.bounds)
+        self.mine, global_bounds = self.comm.mpi_info_dict(self.bounds)
         my_LE, my_RE = self.bounds
         # Put the vertices into a big list, each row is
         # array[x,y,z, taskID]
@@ -198,7 +200,7 @@
         # lists us as their neighbor, we add them as our neighbor. This is 
         # probably not needed because the stuff above should be symmetric,
         # but it isn't a big issue.
-        self.mine, global_neighbors = self._mpi_info_dict(self.neighbors)
+        self.mine, global_neighbors = self.comm.mpi_info_dict(self.neighbors)
         for taskID in global_neighbors:
             if taskID == self.mine: continue
             if self.mine in global_neighbors[taskID]:
@@ -215,7 +217,7 @@
         """
         if round == 'first':
             max_pad = na.max(self.padding)
-            self.mine, self.global_padding = self._mpi_info_dict(max_pad)
+            self.mine, self.global_padding = self.comm.mpi_info_dict(max_pad)
             self.max_padding = max(self.global_padding.itervalues())
         elif round == 'second':
             self.max_padding = 0.
@@ -234,14 +236,14 @@
         temp_LE = LE - LE_padding
         temp_RE = RE + RE_padding
         expanded_bounds = (temp_LE, temp_RE)
-        self.mine, global_exp_bounds = self._mpi_info_dict(expanded_bounds)
+        self.mine, global_exp_bounds = self.comm.mpi_info_dict(expanded_bounds)
         send_real_indices = {}
         send_points = {}
         send_mass = {}
         send_size = {}
         # This will reduce the size of the loop over particles.
         yt_counters("Picking padding data to send.")
-        send_count = len(na.where(self.is_inside_annulus == True)[0])
+        send_count = self.is_inside_annulus.sum()
         points = na.empty((send_count, 3), dtype='float64')
         points[:,0] = self.xpos[self.is_inside_annulus]
         points[:,1] = self.ypos[self.is_inside_annulus]
@@ -261,11 +263,11 @@
             send_real_indices[neighbor] = real_indices[is_inside].copy()
             send_points[neighbor] = shift_points[is_inside].copy()
             send_mass[neighbor] = mass[is_inside].copy()
-            send_size[neighbor] = len(na.where(is_inside == True)[0])
+            send_size[neighbor] = is_inside.sum()
         del points, shift_points, mass, real_indices
         yt_counters("Picking padding data to send.")
         # Communicate the sizes to send.
-        self.mine, global_send_count = self._mpi_info_dict(send_size)
+        self.mine, global_send_count = self.comm.mpi_info_dict(send_size)
         del send_size
         # Initialize the arrays to receive data.
         yt_counters("Initalizing recv arrays.")
@@ -284,19 +286,19 @@
         yt_counters("MPI stuff.")
         hooks = []
         for opp_neighbor in self.neighbors:
-            hooks.append(self._mpi_Irecv_long(recv_real_indices[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_Irecv_double(recv_points[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_Irecv_double(recv_mass[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(recv_real_indices[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(recv_points[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(recv_mass[opp_neighbor], opp_neighbor))
         # Let's wait here to be absolutely sure that all the receive buffers
         # have been created before any sending happens!
-        self._barrier()
+        self.comm.barrier()
         # Now we send the data.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_Isend_long(send_real_indices[neighbor], neighbor))
-            hooks.append(self._mpi_Isend_double(send_points[neighbor], neighbor))
-            hooks.append(self._mpi_Isend_double(send_mass[neighbor], neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(send_real_indices[neighbor], neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(send_points[neighbor], neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(send_mass[neighbor], neighbor))
         # Now we use the data, after all the comms are done.
-        self._mpi_Request_Waitall(hooks)
+        self.comm.mpi_Request_Waitall(hooks)
         yt_counters("MPI stuff.")
         yt_counters("Processing padded data.")
         del send_real_indices, send_points, send_mass
@@ -342,13 +344,22 @@
         yt_counters("init kd tree")
         # Yes, we really do need to initialize this many arrays.
         # They're deleted in _parallelHOP.
-        fKD.dens = na.asfortranarray(na.zeros(self.size, dtype='float64'))
+        fKD.dens = na.zeros(self.size, dtype='float64', order='F')
         fKD.mass = na.concatenate((self.mass, self.mass_pad))
-        fKD.pos = na.asfortranarray(na.empty((3, self.size), dtype='float64'))
+        del self.mass
+        fKD.pos = na.empty((3, self.size), dtype='float64', order='F')
         # This actually copies the data into the fortran space.
-        fKD.pos[0, :] = na.concatenate((self.xpos, self.xpos_pad))
-        fKD.pos[1, :] = na.concatenate((self.ypos, self.ypos_pad))
-        fKD.pos[2, :] = na.concatenate((self.zpos, self.zpos_pad))
+        self.psize = self.xpos.size
+        fKD.pos[0, :self.psize] = self.xpos
+        fKD.pos[1, :self.psize] = self.ypos
+        fKD.pos[2, :self.psize] = self.zpos
+        del self.xpos, self.ypos, self.zpos
+        gc.collect()
+        fKD.pos[0, self.psize:] = self.xpos_pad
+        fKD.pos[1, self.psize:] = self.ypos_pad
+        fKD.pos[2, self.psize:] = self.zpos_pad
+        del self.xpos_pad, self.ypos_pad, self.zpos_pad
+        gc.collect()
         fKD.qv = na.asfortranarray(na.empty(3, dtype='float64'))
         fKD.nn = self.num_neighbors
         # Plus 2 because we're looking for that neighbor, but only keeping 
@@ -647,10 +658,8 @@
             if self.chainID[i] != -1:
                 self.chainID[i] = map[self.chainID[i]]
         del map
-        self.densest_in_chain = dic_new.copy()
-        del dic_new
-        self.densest_in_chain_real_index = dicri_new.copy()
-        del dicri_new
+        self.densest_in_chain = dic_new
+        self.densest_in_chain_real_index = dicri_new
         self.__max_memory()
         yt_counters("preconnect pregrouping.")
         mylog.info("Preconnected %d chains." % removed)
@@ -664,7 +673,7 @@
         """
         yt_counters("globally_assign_chainIDs")
         # First find out the number of chains on each processor.
-        self.mine, chain_info = self._mpi_info_dict(chain_count)
+        self.mine, chain_info = self.comm.mpi_info_dict(chain_count)
         self.nchains = sum(chain_info.values())
         # Figure out our offset.
         self.my_first_id = sum([v for k,v in chain_info.iteritems() if k < self.mine])
@@ -683,8 +692,11 @@
         # Shift the values over effectively by concatenating them in the same
         # order as the values have been shifted in _globally_assign_chainIDs()
         yt_counters("global chain MPI stuff.")
-        self.densest_in_chain = self._mpi_concatenate_array_double(self.densest_in_chain)
-        self.densest_in_chain_real_index = self._mpi_concatenate_array_long(self.densest_in_chain_real_index)
+        self.densest_in_chain = self.comm.par_combine_object(self.densest_in_chain,
+                datatype="array", op="cat")
+        self.densest_in_chain_real_index = self.comm.par_combine_object(
+                self.densest_in_chain_real_index,
+                datatype="array", op="cat")
         yt_counters("global chain MPI stuff.")
         # Sort the chains by density here. This is an attempt to make it such
         # that the merging stuff in a few steps happens in the same order
@@ -774,16 +786,16 @@
         # Set up the receives, but don't actually use them.
         hooks = []
         for opp_neighbor in self.neighbors:
-            hooks.append(self._mpi_Irecv_long(temp_indices[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_Irecv_long(temp_chainIDs[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(temp_indices[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(temp_chainIDs[opp_neighbor], opp_neighbor))
         # Make sure all the receive buffers are set before continuing.
-        self._barrier()
+        self.comm.barrier()
         # Send padded particles to our neighbors.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_Isend_long(self.uphill_real_indices, neighbor))
-            hooks.append(self._mpi_Isend_long(self.uphill_chainIDs, neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(self.uphill_real_indices, neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(self.uphill_chainIDs, neighbor))
         # Now actually use the data once it's good to go.
-        self._mpi_Request_Waitall(hooks)
+        self.comm.mpi_Request_Waitall(hooks)
         self.__max_memory()
         so_far = 0
         for opp_neighbor in self.neighbors:
@@ -837,7 +849,8 @@
         # Now we make a global dict of how many particles each task is
         # sending.
         self.global_padded_count = {self.mine:self.uphill_chainIDs.size}
-        self.global_padded_count = self._mpi_joindict(self.global_padded_count)
+        self.global_padded_count = self.comm.par_combine_object(
+                self.global_padded_count, datatype = "dict", op = "join")
         # Send/receive 'em.
         self._communicate_uphill_info()
         del self.global_padded_count
@@ -878,7 +891,7 @@
         # it. Therefore each key (a chain) in this dict is unique, but the items
         # the keys point to are not necessarily unique.
         chainID_translate_map_global = \
-            self._mpi_minimum_array_long(chainID_translate_map_local)
+            self.comm.mpi_allreduce(chainID_translate_map_local, op='min')
         # Loop over chains, smallest to largest density, recursively until
         # we reach a self-assigned chain. Then we assign that final chainID to
         # the *current* one only.
@@ -932,7 +945,8 @@
         # but there's so many places in this that need to be globally synched
         # that it's not worth the effort right now to make this one spot better.
         global_annulus_count = {self.mine:send_count}
-        global_annulus_count = self._mpi_joindict(global_annulus_count)
+        global_annulus_count = self.comm.par_combine_object(
+                global_annulus_count, datatype = "dict", op = "join")
         # Set up the receiving arrays.
         recv_real_indices = dict.fromkeys(self.neighbors)
         recv_chainIDs = dict.fromkeys(self.neighbors)
@@ -943,16 +957,16 @@
         # Set up the receving hooks.
         hooks = []
         for opp_neighbor in self.neighbors:
-            hooks.append(self._mpi_Irecv_long(recv_real_indices[opp_neighbor], opp_neighbor))
-            hooks.append(self._mpi_Irecv_long(recv_chainIDs[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(recv_real_indices[opp_neighbor], opp_neighbor))
+            hooks.append(self.comm.mpi_nonblocking_recv(recv_chainIDs[opp_neighbor], opp_neighbor))
         # Make sure the recv buffers are set before continuing.
-        self._barrier()
+        self.comm.barrier()
         # Now we send them.
         for neighbor in self.neighbors:
-            hooks.append(self._mpi_Isend_long(real_indices, neighbor))
-            hooks.append(self._mpi_Isend_long(chainIDs, neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(real_indices, neighbor))
+            hooks.append(self.comm.mpi_nonblocking_send(chainIDs, neighbor))
         # Now we use them when they're nice and ripe.
-        self._mpi_Request_Waitall(hooks)
+        self.comm.mpi_Request_Waitall(hooks)
         self.__max_memory()
         for opp_neighbor in self.neighbors:
             opp_size = global_annulus_count[opp_neighbor]
@@ -1061,11 +1075,36 @@
         """
         yt_counters("make_global_chain_densest_n")
         (self.top_keys, self.bot_keys, self.vals) = \
-            self._mpi_maxdict_dict(self.chain_densest_n)
+            self.linearize_chain_dict(self.chain_densest_n)
         self.__max_memory()
         del self.chain_densest_n
         yt_counters("make_global_chain_densest_n")
-    
+
+    def linearize_chain_dict(self, data):
+        """
+        Similar to above, but finds maximums for dicts of dicts. This is
+        specificaly for a part of chainHOP.
+        """
+        top_keys = []
+        bot_keys = []
+        vals = []
+        for top_key in data:
+            for bot_key in data[top_key]:
+                top_keys.append(top_key)
+                bot_keys.append(bot_key)
+                vals.append(data[top_key][bot_key])
+        top_keys = na.array(top_keys, dtype='int64')
+        bot_keys = na.array(bot_keys, dtype='int64')
+        vals = na.array(vals, dtype='float64')
+
+        data.clear()
+
+        top_keys = self.comm.par_combine_object(top_keys, datatype='array', op='cat')
+        bot_keys = self.comm.par_combine_object(bot_keys, datatype='array', op='cat')
+        vals     = self.comm.par_combine_object(vals, datatype='array', op='cat')
+
+        return (top_keys, bot_keys, vals)
+
     def _build_groups(self):
         """
         With the collection of possible chain links, build groups.
@@ -1155,7 +1194,7 @@
         Set_list = []
         # We only want the holes that are modulo mine.
         keys = na.arange(groupID, dtype='int64')
-        size = self._mpi_get_size()
+        size = self.comm.size
         select = (keys % size == self.mine)
         groupIDs = keys[select]
         mine_groupIDs = set([]) # Records only ones modulo mine.
@@ -1202,7 +1241,7 @@
         del Set_list
         # To bring it all together, find the minimum values at each entry
         # globally.
-        lookup = self._mpi_minimum_array_long(lookup)
+        lookup = self.comm.mpi_allreduce(lookup, op='min')
         # Now apply this to reverse_map
         for chainID,groupID in enumerate(self.reverse_map):
             if groupID == -1:
@@ -1330,7 +1369,7 @@
         # Now we broadcast this, effectively, with an allsum. Even though
         # some groups are on multiple tasks, there is only one densest_in_chain
         # and only that task contributed above.
-        self.max_dens_point = self._mpi_Allsum_double(max_dens_point)
+        self.max_dens_point = self.comm.mpi_allreduce(max_dens_point, op='sum')
         del max_dens_point
         yt_counters("max dens point")
         # Now CoM.
@@ -1385,9 +1424,9 @@
                     CoM_M[groupID] += self.max_dens_point[groupID,1:4] - na.array([0.5,0.5,0.5])
                     CoM_M[groupID] *= Tot_M[groupID]
         # Now we find their global values
-        self.group_sizes = self._mpi_Allsum_long(size)
-        CoM_M = self._mpi_Allsum_double(CoM_M)
-        self.Tot_M = self._mpi_Allsum_double(Tot_M)
+        self.group_sizes = self.comm.mpi_allreduce(size, op='sum')
+        CoM_M = self.comm.mpi_allreduce(CoM_M, op='sum')
+        self.Tot_M = self.comm.mpi_allreduce(Tot_M, op='sum')
         self.CoM = na.empty((self.group_count,3), dtype='float64')
         for groupID in xrange(int(self.group_count)):
             self.CoM[groupID] = CoM_M[groupID] / self.Tot_M[groupID]
@@ -1405,7 +1444,7 @@
                 max_radius[u] = na.max(dist[marks[i]:marks[i+1]])
         # Find the maximum across all tasks.
         mylog.info('Fraction of particles in this region in groups: %f' % (float(calc)/self.size))
-        self.max_radius = self._mpi_double_array_max(max_radius)
+        self.max_radius = self.comm.mpi_allreduce(max_radius, op='max')
         self.max_radius = na.sqrt(self.max_radius)
         yt_counters("max radius")
         yt_counters("Precomp.")
@@ -1457,7 +1496,15 @@
         self._communicate_annulus_chainIDs()
         mylog.info('Connecting %d chains into groups...' % self.nchains)
         self._connect_chains()
+        self.mass = fKD.mass[:self.psize]
+        self.mass_pad = fKD.mass[self.psize:]
         del fKD.dens, fKD.mass, fKD.dens
+        self.xpos = fKD.pos[0, :self.psize]
+        self.ypos = fKD.pos[1, :self.psize]
+        self.zpos = fKD.pos[2, :self.psize]
+        self.xpos_pad = fKD.pos[0, self.psize:]
+        self.ypos_pad = fKD.pos[1, self.psize:]
+        self.zpos_pad = fKD.pos[2, self.psize:]
         del fKD.pos, fKD.chunk_tags
         free_tree(0) # Frees the kdtree object.
         del self.densestNN
@@ -1477,13 +1524,13 @@
         self.density = self.density[:self.real_size]
         # We'll make this a global object, which can be used to write a text
         # file giving the names of hdf5 files the particles for each halo.
-        self.mine, self.I_own = self._mpi_info_dict(self.I_own)
+        self.mine, self.I_own = self.comm.mpi_info_dict(self.I_own)
         self.halo_taskmap = defaultdict(set)
         for taskID in self.I_own:
             for groupID in self.I_own[taskID]:
                 self.halo_taskmap[groupID].add(taskID)
         del self.I_own
-        del self.mass, self.xpos, self.ypos, self.zpos
+        del self.xpos, self.ypos, self.zpos
 
     def __add_to_array(self, arr, key, value, type):
         """


--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py	Thu Oct 20 08:52:24 2011 -0400
@@ -79,6 +79,7 @@
         :param mass_column (int): The column of halo_file that contains the
         masses of the haloes. Default=4.
         """
+        ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.halo_file = halo_file
         self.omega_matter0 = omega_matter0
@@ -147,7 +148,7 @@
         # First the fit file.
         if fit:
             fitname = prefix + '-fit.dat'
-            fp = self._write_on_root(fitname)
+            fp = self.comm.write_on_root(fitname)
             line = \
             """#Columns:
 #1. log10 of mass (Msolar, NOT Msolar/h)
@@ -163,7 +164,7 @@
             fp.close()
         if self.mode == 'haloes' and haloes:
             haloname = prefix + '-haloes.dat'
-            fp = self._write_on_root(haloname)
+            fp = self.comm.write_on_root(haloname)
             line = \
             """#Columns:
 #1. log10 of mass (Msolar, NOT Msolar/h)


--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py	Thu Oct 20 08:52:24 2011 -0400
@@ -156,6 +156,7 @@
         >>> MergerTree(rf, database = '/home/user/sim1-halos.db',
         ... halo_finder_function=parallelHF)
         """
+        ParallelAnalysisInterface.__init__(self)
         self.restart_files = restart_files # list of enzo restart files
         self.with_halos = na.ones(len(restart_files), dtype='bool')
         self.database = database # the sqlite database of haloes.
@@ -168,10 +169,10 @@
         if self.sleep <= 0.:
             self.sleep = 5
         # MPI stuff
-        self.mine = self._mpi_get_rank()
+        self.mine = self.comm.rank
         if self.mine is None:
             self.mine = 0
-        self.size = self._mpi_get_size()
+        self.size = self.comm.size
         if self.size is None:
             self.size = 1
         # Get to work.
@@ -180,7 +181,7 @@
                 os.unlink(self.database)
             except:
                 pass
-        self._barrier()
+        self.comm.barrier()
         self._open_create_database()
         self._create_halo_table()
         self._run_halo_finder_add_to_db()
@@ -203,7 +204,7 @@
         # Now update the database with all the writes.
         mylog.info("Updating database with parent-child relationships.")
         self._copy_and_update_db()
-        self._barrier()
+        self.comm.barrier()
         mylog.info("Done!")
         
     def _read_halo_lists(self):
@@ -275,7 +276,7 @@
                     line = 'INSERT into Halos VALUES (' + line[:-1] + ')'
                     self.cursor.execute(line, values)
                 self.conn.commit()
-            self._barrier()
+            self.comm.barrier()
             del hp
     
     def _open_create_database(self):
@@ -283,7 +284,7 @@
         # doesn't already exist. Open it first on root, and then on the others.
         if self.mine == 0:
             self.conn = sql.connect(self.database)
-        self._barrier()
+        self.comm.barrier()
         self._ensure_db_sync()
         if self.mine != 0:
             self.conn = sql.connect(self.database)
@@ -294,7 +295,7 @@
         # parallel file system funniness, things will go bad very quickly.
         # Therefore, just to be very, very careful, we will ensure that the
         # md5 hash of the file is identical across all tasks before proceeding.
-        self._barrier()
+        self.comm.barrier()
         for i in range(5):
             try:
                 file = open(self.database)
@@ -305,7 +306,7 @@
                 file = open(self.database)
             hash = md5.md5(file.read()).hexdigest()
             file.close()
-            ignore, hashes = self._mpi_info_dict(hash)
+            ignore, hashes = self.comm.mpi_info_dict(hash)
             hashes = set(hashes.values())
             if len(hashes) == 1:
                 break
@@ -338,7 +339,7 @@
                 self.conn.commit()
             except sql.OperationalError:
                 pass
-        self._barrier()
+        self.comm.barrier()
     
     def _find_likely_children(self, parentfile, childfile):
         # For each halo in the parent list, identify likely children in the 
@@ -548,11 +549,16 @@
         child_IDs_tosend = child_IDs[child_send]
         child_halos_tosend = child_halos[child_send]
         
-        parent_IDs_tosend = self._mpi_concatenate_array_on_root_long(parent_IDs_tosend)
-        parent_masses_tosend = self._mpi_concatenate_array_on_root_double(parent_masses_tosend)
-        parent_halos_tosend = self._mpi_concatenate_array_on_root_int(parent_halos_tosend)
-        child_IDs_tosend = self._mpi_concatenate_array_on_root_long(child_IDs_tosend)
-        child_halos_tosend = self._mpi_concatenate_array_on_root_int(child_halos_tosend)
+        parent_IDs_tosend = self.comm.par_combine_object(parent_IDs_tosend,
+                datatype="array", op="cat")
+        parent_masses_tosend = self.comm.par_combine_object(parent_masses_tosend,
+                datatype="array", op="cat")
+        parent_halos_tosend = self.comm.par_combine_object(parent_halos_tosend,
+                datatype="array", op="cat")
+        child_IDs_tosend = self.comm.par_combine_object(child_IDs_tosend,
+                datatype="array", op="cat")
+        child_halos_tosend = self.comm.par_combine_object(child_halos_tosend,
+                datatype="array", op="cat")
 
         # Resort the received particles.
         Psort = parent_IDs_tosend.argsort()
@@ -599,7 +605,7 @@
             (matched, parent_IDs_tosend.size, child_IDs_tosend.size))
 
         # Now we sum up the contributions globally.
-        self.child_mass_arr = self._mpi_Allsum_double(self.child_mass_arr)
+        self.child_mass_arr = self._mpi_allsum(self.child_mass_arr)
         
         # Turn these Msol masses into percentages of the parent.
         line = "SELECT HaloMass FROM Halos WHERE SnapCurrentTimeIdentifier=%d \
@@ -712,7 +718,7 @@
             temp_cursor.close()
             temp_conn.close()
         self._close_database()
-        self._barrier()
+        self.comm.barrier()
         if self.mine == 0:
             os.rename(temp_name, self.database)
 
@@ -962,6 +968,7 @@
         >>> MergerTreeDotOutput(halos=182842, database='/home/user/sim1-halos.db',
         ... dotfile = 'halo-182842.gv')
         """
+        ParallelAnalysisInterface.__init__(self)
         self.database = database
         self.link_min = link_min
         if halos is None:
@@ -1108,6 +1115,7 @@
         >>> MergerTreeTextOutput(database='/home/user/sim1-halos.db',
         ... outfile='halos-db.txt')
         """
+        ParallelAnalysisInterface.__init__(self)
         self.database = database
         self.outfile = outfile
         result = self._open_database()


--- a/yt/analysis_modules/halo_profiler/api.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/analysis_modules/halo_profiler/api.py	Thu Oct 20 08:52:24 2011 -0400
@@ -34,4 +34,5 @@
 from .multi_halo_profiler import \
     HaloProfiler, \
     FakeProfile, \
-    shift_projections
+    shift_projections, \
+    standard_fields


--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Thu Oct 20 08:52:24 2011 -0400
@@ -164,6 +164,7 @@
         >>> hp = HP.halo_profiler("DD0242/DD0242")
         
         """
+        ParallelAnalysisInterface.__init__(self)
 
         self.dataset = dataset
         self.output_dir = output_dir
@@ -494,11 +495,13 @@
             updated_halos.append(halo)
         
         # And here is where we bring it all together.
-        updated_halos = self._mpi_catlist(updated_halos)
+        updated_halos = self.comm.par_combine_object(updated_halos,
+                            datatype="list", op="cat")
         updated_halos.sort(key = lambda a:a['id'])
         self.all_halos = updated_halos
 
-        self.filtered_halos = self._mpi_catlist(self.filtered_halos)
+        self.filtered_halos = self.comm.par_combine_object(self.filtered_halos)
+                            datatype="list", op="cat")
         self.filtered_halos.sort(key = lambda a:a['id'])
 
         if filename is not None:
@@ -582,8 +585,14 @@
             except EmptyProfileData:
                 mylog.error("Caught EmptyProfileData exception, returning None for this halo.")
                 return None
+            # Figure out which fields to add simultaneously
+            field_groupings = defaultdict(lambda: defaultdict(list))
             for hp in self.profile_fields:
-                profile.add_fields(hp['field'], weight=hp['weight_field'], accumulation=hp['accumulation'])
+                field_groupings[hp['weight_field']][hp['accumulation']].append(hp['field'])
+            for weight_field in field_groupings:
+                for accum, fields in field_groupings[weight_field].items():
+                    profile.add_fields(fields, weight=weight_field,
+                                       accumulation=accum)
 
         if virial_filter:
             self._add_actual_overdensity(profile)
@@ -995,7 +1004,7 @@
     for plot in projections:
         # Get name of data field.
         other_fields = {'px':True, 'py':True, 'pdx':True, 'pdy':True, 'weight_field':True}
-        for pfield in plot.data.keys():
+        for pfield in plot.field_data.keys():
             if not(other_fields.has_key(pfield)):
                 field = pfield
                 break
@@ -1050,12 +1059,12 @@
         add2_y_weight_field = plot['weight_field'][plot['py'] - 0.5 * plot['pdy'] < 0]
 
         # Add the hanging cells back to the projection data.
-        plot.data['px'] = na.concatenate([plot['px'], add_x_px, add_y_px, add2_x_px, add2_y_px])
-        plot.data['py'] = na.concatenate([plot['py'], add_x_py, add_y_py, add2_x_py, add2_y_py])
-        plot.data['pdx'] = na.concatenate([plot['pdx'], add_x_pdx, add_y_pdx, add2_x_pdx, add2_y_pdx])
-        plot.data['pdy'] = na.concatenate([plot['pdy'], add_x_pdy, add_y_pdy, add2_x_pdy, add2_y_pdy])
-        plot.data[field] = na.concatenate([plot[field], add_x_field, add_y_field, add2_x_field, add2_y_field])
-        plot.data['weight_field'] = na.concatenate([plot['weight_field'],
+        plot.field_data['px'] = na.concatenate([plot['px'], add_x_px, add_y_px, add2_x_px, add2_y_px])
+        plot.field_data['py'] = na.concatenate([plot['py'], add_x_py, add_y_py, add2_x_py, add2_y_py])
+        plot.field_data['pdx'] = na.concatenate([plot['pdx'], add_x_pdx, add_y_pdx, add2_x_pdx, add2_y_pdx])
+        plot.field_data['pdy'] = na.concatenate([plot['pdy'], add_x_pdy, add_y_pdy, add2_x_pdy, add2_y_pdy])
+        plot.field_data[field] = na.concatenate([plot[field], add_x_field, add_y_field, add2_x_field, add2_y_field])
+        plot.field_data['weight_field'] = na.concatenate([plot['weight_field'],
                                                     add_x_weight_field, add_y_weight_field, 
                                                     add2_x_weight_field, add2_y_weight_field])
 
@@ -1072,6 +1081,7 @@
     This is used to mimic a profile object when reading profile data from disk.
     """
     def __init__(self, pf):
+        ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self._data = {}
 
@@ -1080,3 +1090,34 @@
 
     def keys(self):
         return self._data.keys()
+
+standard_fields = [
+    ("Density", "CellMassMsun", False),
+    ("Temperature", "CellMassMsun", False),
+    ("VelocityMagnitude", "CellMassMsun", False),
+    ("Ones", None, False),
+    ("Entropy", "CellMassMsun", False),
+    ("RadialVelocity", "CellMassMsun", False),
+    ("SpecificAngularMomentumX", "CellMassMsun", False),
+    ("SpecificAngularMomentumY", "CellMassMsun", False),
+    ("SpecificAngularMomentumZ", "CellMassMsun", False),
+    ("CoolingTime", "CellMassMsun", False),
+    ("DynamicalTime", "CellMassMsun", False),
+    ("CellMassMsun", None, True),
+    ("TotalMassMsun", None, True),
+    ("Dark_Matter_Density", "CellMassMsun", False),
+    #("ParticleSpecificAngularMomentumX", "ParticleMassMsun"),
+    #("ParticleSpecificAngularMomentumY", "ParticleMassMsun"),
+    #("ParticleSpecificAngularMomentumZ", "ParticleMassMsun"),
+    ("OverDensity", "CellMassMsun", False),
+    #("ParticleMassMsun", None),
+    ("StarParticleDensity", "StarParticleMassMsun", False), # How do we weight this?
+    #("StarParticleMassMsun", None), 
+    ("StarParticleDensity", "StarParticleMassMsun", False), # How do we weight this?
+]
+
+standard_fields += [("%s_Fraction" % (s), "CellMassMsun", False)
+    for s in ["HI","HII","HeI","HeII","HeIII","H2I","H2II",
+    "HM","Electron", "DI","DII","HDI","Metal"]
+]
+


--- a/yt/analysis_modules/halo_profiler/standard_analysis.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/analysis_modules/halo_profiler/standard_analysis.py	Thu Oct 20 08:52:24 2011 -0400
@@ -28,37 +28,6 @@
 from yt.data_objects.profiles import BinnedProfile1D
 from yt.funcs import *
 
-analysis_field_list = [
-    "Density",
-    "Temperature",
-    "VelocityMagnitude",
-    ("Ones", None),
-    "Entropy",
-    "RadialVelocity",
-    "SpecificAngularMomnetumX",
-    "SpecificAngularMomnetumY",
-    "SpecificAngularMomnetumZ",
-    "CoolingTime",
-    "DynamicalTime",
-    ("CellMassMsun", None),
-    "Dark_Matter_Density",
-    #("ParticleSpecificAngularMomentumX", "ParticleMassMsun"),
-    #("ParticleSpecificAngularMomentumY", "ParticleMassMsun"),
-    #("ParticleSpecificAngularMomentumZ", "ParticleMassMsun"),
-    ("TotalMass", None),
-    "OverDensity",
-    #("ParticleMassMsun", None),
-    ("StarParticleDensity", "StarParticleMassMsun"), # How do we weight this?
-    #("StarParticleMassMsun", None), 
-    ("StarParticleDensity", "StarParticleMassMsun"), # How do we weight this?
-]
-
-analysis_field_list += ["%s_Fraction" % (s) for s in
-    ["HI","HII","HeI","HeII","HeIII","H2I","H2II","HM","Electron",
-    "DI","DII","HDI","Metal"]
-]
-    
-
 class StandardRadialAnalysis(object):
     def __init__(self, pf, center, radius, n_bins = 128, inner_radius = None):
         self.pf = pf


--- a/yt/analysis_modules/hierarchy_subset/hierarchy_subset.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/analysis_modules/hierarchy_subset/hierarchy_subset.py	Thu Oct 20 08:52:24 2011 -0400
@@ -27,6 +27,7 @@
 import numpy as na
 
 from yt.funcs import *
+from yt.data_objects.data_containers import YTFieldData
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.data_objects.static_output import \
@@ -65,7 +66,7 @@
         self.base_grid = base_pf.h.smoothed_covering_grid(level, self.LeftEdge,
                         self.RightEdge, dims=dims)
         self.base_grid.Level = self.base_grid.level
-        self.data = {}
+        self.field_data = YTFieldData()
         #self._calculate_child_masks()
         self.Parent = None
         self.Children = []


--- a/yt/analysis_modules/level_sets/contour_finder.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/analysis_modules/level_sets/contour_finder.py	Thu Oct 20 08:52:24 2011 -0400
@@ -129,7 +129,7 @@
     print "Finished joining in %0.2e seconds" % (t2-t1)
     pbar.finish()
     data_source._flush_data_to_grids("tempContours", -1, dtype='int64')
-    del data_source.data["tempContours"] # Force a reload from the grids
+    del data_source.field_data["tempContours"] # Force a reload from the grids
     data_source.get_data("tempContours", in_grids=True)
     contour_ind = {}
     i = 0
@@ -141,6 +141,6 @@
     mylog.info("Identified %s contours between %0.5e and %0.5e",
                len(contour_ind.keys()),min_val,max_val)
     for grid in chain(grid_set):
-        grid.data.pop("tempContours", None)
-    del data_source.data["tempContours"]
+        grid.field_data.pop("tempContours", None)
+    del data_source.field_data["tempContours"]
     return contour_ind


--- a/yt/analysis_modules/light_cone/halo_mask.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/analysis_modules/light_cone/halo_mask.py	Thu Oct 20 08:52:24 2011 -0400
@@ -45,14 +45,14 @@
         light_cone_mask.append(_make_slice_mask(slice, halo_list, pixels))
 
     # Write out cube of masks from each slice.
-    if cube_file is not None and ytcfg.getint("yt", "__parallel_rank") == 0:
+    if cube_file is not None and ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
         mylog.info("Saving halo mask cube to %s." % cube_file)
         output = h5py.File(cube_file, 'a')
         output.create_dataset('haloMaskCube', data=na.array(light_cone_mask))
         output.close()
 
     # Write out final mask.
-    if mask_file is not None and ytcfg.getint("yt", "__parallel_rank") == 0:
+    if mask_file is not None and ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
         # Final mask is simply the product of the mask from each slice.
         mylog.info("Saving halo mask to %s." % mask_file)
         finalMask = na.ones(shape=(pixels, pixels))
@@ -76,7 +76,7 @@
         haloMap.extend(_make_slice_halo_map(slice, halo_list))
 
     # Write out file.
-    if ytcfg.getint("yt", "__parallel_rank") == 0:
+    if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
         mylog.info("Saving halo map to %s." % map_file)
         f = open(map_file, 'w')
         f.write("#z       x         y        M [Msun]  R [Mpc]   R [image]\n")


--- a/yt/analysis_modules/light_cone/light_cone.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/analysis_modules/light_cone/light_cone.py	Thu Oct 20 08:52:24 2011 -0400
@@ -108,7 +108,7 @@
         self.pixels = int(self.field_of_view_in_arcminutes * 60.0 / \
                           self.image_resolution_in_arcseconds)
 
-        if ytcfg.getint("yt", "__parallel_rank") == 0:
+        if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
             # Create output directory.
             if (os.path.exists(self.output_dir)):
                 if not(os.path.isdir(self.output_dir)):
@@ -243,7 +243,7 @@
         else:
             halo_mask_cube = light_cone_halo_mask(self, mask_file=mask_file, **kwargs)
             # Collapse cube into final mask.
-            if ytcfg.getint("yt", "__parallel_rank") == 0:
+            if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
                 self.halo_mask = na.ones(shape=(self.pixels, self.pixels), dtype=bool)
                 for mask in halo_mask_cube:
                     self.halo_mask *= mask
@@ -302,7 +302,7 @@
             output['object'].parameters.update(self.set_parameters)
             frb = _light_cone_projection(output, field, self.pixels, 
                                          weight_field=weight_field, node=node)
-            if ytcfg.getint("yt", "__parallel_rank") == 0:
+            if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
                 if save_slice_images:
                     write_image(na.log10(frb[field]), "%s_%s.png" % (name, field), cmap_name=cmap_name)
 
@@ -342,7 +342,7 @@
             if (q < len(self.light_cone_solution) - 1):
                 del output['object']
 
-        if ytcfg.getint("yt", "__parallel_rank") == 0:
+        if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
             # Add up slices to make light cone projection.
             if (weight_field is None):
                 lightConeProjection = sum(self.projection_stack)
@@ -356,7 +356,7 @@
 
             # Save the last fixed resolution buffer for the plot collection, 
             # but replace the data with the full light cone projection data.
-            frb.data[field] = lightConeProjection
+            frb.field_data[field] = lightConeProjection
 
             # Write image.
             if save_slice_images:
@@ -370,7 +370,7 @@
             if apply_halo_mask:
                 if len(self.halo_mask) > 0:
                     mylog.info("Applying halo mask.")
-                    frb.data[field] *= self.halo_mask
+                    frb.field_data[field] *= self.halo_mask
                 else:
                     mylog.error("No halo mask loaded, call get_halo_mask.")
 


--- a/yt/analysis_modules/light_cone/light_cone_projection.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/analysis_modules/light_cone/light_cone_projection.py	Thu Oct 20 08:52:24 2011 -0400
@@ -88,7 +88,7 @@
                                            field_cuts=these_field_cuts, node_name=node_name)
 
     # If parallel: all the processes have the whole projection object, but we only need to do the tiling, shifting, and cutting once.
-    if ytcfg.getint("yt", "__parallel_rank") == 0:
+    if ytcfg.getint("yt", "__topcomm_parallel_rank") == 0:
 
         # 2. The Tile Problem
         # Tile projection to specified width.


--- a/yt/analysis_modules/simulation_handler/enzo_simulation.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/analysis_modules/simulation_handler/enzo_simulation.py	Thu Oct 20 08:52:24 2011 -0400
@@ -31,6 +31,9 @@
 
 dt_Tolerance = 1e-3
 
+from yt.data_objects.time_series import \
+    TimeSeriesData
+
 from yt.utilities.cosmology import \
     Cosmology, \
     EnzoCosmology
@@ -38,13 +41,15 @@
 from yt.convenience import \
     load
 
-class EnzoSimulation(object):
+class EnzoSimulation(TimeSeriesData):
     r"""Super class for performing the same operation over all data dumps in 
     a simulation from one redshift to another.
     """
-    def __init__(self, enzo_parameter_file, initial_time=None, final_time=None, initial_redshift=None, final_redshift=None,
-                 links=False, enzo_parameters=None, get_time_outputs=True, get_redshift_outputs=True, get_available_data=False,
-                 get_data_by_force=False):
+    def __init__(self, enzo_parameter_file, initial_time=None, final_time=None, 
+                 initial_redshift=None, final_redshift=None,
+                 links=False, enzo_parameters=None, 
+                 get_time_outputs=True, get_redshift_outputs=True, 
+                 get_available_data=False, get_data_by_force=False):
         r"""Initialize an Enzo Simulation object.
         
         initial_time : float
@@ -122,6 +127,11 @@
         # Get all the appropriate datasets.
         self._get_all_outputs(brute_force=get_data_by_force)
 
+        # Instantiate a TimeSeriesData object.
+        time_series_outputs = [load(output['filename']) \
+                                   for output in self.allOutputs]
+        TimeSeriesData.__init__(self, outputs=time_series_outputs)
+
     def _calculate_redshift_dump_times(self):
         "Calculates time from redshift of redshift dumps."
 


--- a/yt/analysis_modules/two_point_functions/two_point_functions.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py	Thu Oct 20 08:52:24 2011 -0400
@@ -98,6 +98,7 @@
         ... length_number=10, length_range=[1./128, .5],
         ... length_type="log")
         """
+        ParallelAnalysisInterface.__init__(self)
         try:
             fKD
         except NameError:
@@ -107,8 +108,8 @@
         self.constant_theta = theta
         self.constant_phi = phi
         # MPI stuff.
-        self.size = self._mpi_get_size()
-        self.mine = self._mpi_get_rank()
+        self.size = self.comm.size
+        self.mine = self.comm.rank
         self.vol_ratio = vol_ratio
         if self.vol_ratio == -1:
             self.vol_ratio = self.size
@@ -160,7 +161,7 @@
             ds = pf.h.periodic_region_strict([0.]*3, self.left_edge, 
                 self.right_edge)
             padded, self.LE, self.RE, self.ds = \
-            self._partition_hierarchy_3d(ds = ds, padding=0.,
+            self.partition_hierarchy_3d(ds = ds, padding=0.,
                 rank_ratio = self.vol_ratio)
         else:
             self.left_edge = left_edge
@@ -168,10 +169,10 @@
             # We do this twice, first with no 'buffer' to get the unbuffered
             # self.LE/RE, and then second to get a buffered self.ds.
             padded, self.LE, self.RE, temp = \
-                self._partition_region_3d(left_edge, right_edge,
+                self.partition_region_3d(left_edge, right_edge,
                     rank_ratio=self.vol_ratio)
             padded, temp, temp, self.ds = \
-                self._partition_region_3d(left_edge - self.lengths[-1], \
+                self.partition_region_3d(left_edge - self.lengths[-1], \
                 right_edge + self.lengths[-1], rank_ratio=self.vol_ratio)
         mylog.info("LE %s RE %s %s" % (str(self.LE), str(self.RE), str(self.ds)))
         self.width = self.ds.right_edge - self.ds.left_edge
@@ -273,8 +274,8 @@
                 self._setup_recv_arrays()
                 self._send_arrays()
                 t0 = time.time()
-                self._mpi_Request_Waitall(self.send_hooks)
-                self._mpi_Request_Waitall(self.recv_hooks)
+                self.comm.mpi_Request_Waitall(self.send_hooks)
+                self.comm.mpi_Request_Waitall(self.recv_hooks)
                 t1 = time.time()
                 t_waiting += (t1-t0)
                 if (self.recv_points < -1.).any() or (self.recv_points > 1.).any(): # or \
@@ -363,7 +364,7 @@
         for task in xrange(self.size):
             if task == self.mine: continue
             self.recv_done[task] = na.zeros(1, dtype='int64')
-            self.done_hooks.append(self._mpi_Irecv_long(self.recv_done[task], \
+            self.done_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_done[task], \
                 task, tag=15))
     
     def _send_done_to_root(self):
@@ -376,7 +377,7 @@
             # I send when I *think* things should finish.
             self.send_done = na.ones(1, dtype='int64') * \
                 (self.size / self.vol_ratio -1) + self.comm_cycle_count
-            self.done_hooks.append(self._mpi_Isend_long(self.send_done, \
+            self.done_hooks.append(self.comm.mpi_nonblocking_send(self.send_done, \
                     0, tag=15))
         else:
             # As root, I need to mark myself!
@@ -390,7 +391,7 @@
         """
         if self.mine == 0:
             # If other tasks aren't finished, this will return False.
-            status = self._mpi_Request_Testall(self.done_hooks)
+            status = self.comm.mpi_Request_Testall(self.done_hooks)
             # Convolve this with with root's status.
             status = status * (self.generated_points == self.total_values)
             if status == 1:
@@ -402,7 +403,7 @@
             status = 0
         # Broadcast the status from root - we stop only if root thinks we should
         # stop.
-        status = self._mpi_bcast_pickled(status)
+        status = self.comm.mpi_bcast_pickled(status)
         if status == 0: return True
         if self.comm_cycle_count < status:
             return True
@@ -418,22 +419,22 @@
         self.recv_fields_vals = na.zeros((self.comm_size, len(self.fields)*2), \
             dtype='float64')
         self.recv_gen_array = na.zeros(self.size, dtype='int64')
-        self.recv_hooks.append(self._mpi_Irecv_double(self.recv_points, \
+        self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_points, \
             (self.mine-1)%self.size, tag=10))
-        self.recv_hooks.append(self._mpi_Irecv_double(self.recv_fields_vals, \
+        self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_fields_vals, \
             (self.mine-1)%self.size, tag=20))
-        self.recv_hooks.append(self._mpi_Irecv_long(self.recv_gen_array, \
+        self.recv_hooks.append(self.comm.mpi_nonblocking_recv(self.recv_gen_array, \
             (self.mine-1)%self.size, tag=40))
 
     def _send_arrays(self):
         """
         Send the data arrays to the right-hand neighbor.
         """
-        self.send_hooks.append(self._mpi_Isend_double(self.points,\
+        self.send_hooks.append(self.comm.mpi_nonblocking_send(self.points,\
             (self.mine+1)%self.size, tag=10))
-        self.send_hooks.append(self._mpi_Isend_double(self.fields_vals,\
+        self.send_hooks.append(self.comm.mpi_nonblocking_send(self.fields_vals,\
             (self.mine+1)%self.size, tag=20))
-        self.send_hooks.append(self._mpi_Isend_long(self.gen_array, \
+        self.send_hooks.append(self.comm.mpi_nonblocking_send(self.gen_array, \
             (self.mine+1)%self.size, tag=40))
 
     def _allsum_bin_hits(self):
@@ -441,8 +442,8 @@
         Add up the hits to all the bins globally for all functions.
         """
         for fset in self._fsets:
-            fset.too_low = self._mpi_allsum(fset.too_low)
-            fset.too_high = self._mpi_allsum(fset.too_high)
+            fset.too_low = self.comm.mpi_allreduce(fset.too_low, op='sum')
+            fset.too_high = self.comm.mpi_allreduce(fset.too_high, op='sum')
             fset.binned = {}
             if self.mine == 0:
                 mylog.info("Function %s had values out of range for these fields:" % \
@@ -452,7 +453,7 @@
                     (field, fset.too_high[i], fset.too_low[i]))
             for length in self.lengths:
                 fset.length_bin_hits[length] = \
-                    self._mpi_Allsum_long(fset.length_bin_hits[length])
+                    self.comm.mpi_allreduce(fset.length_bin_hits[length], op='sum')
                 # Find out how many were successfully binned.
                 fset.binned[length] = fset.length_bin_hits[length].sum()
                 # Normalize the counts.
@@ -621,7 +622,7 @@
         >>> tpf.write_out_means()
         """
         for fset in self._fsets:
-            fp = self._write_on_root(fn % fset.function.__name__)
+            fp = self.comm.write_on_root(fn % fset.function.__name__)
             fset._avg_bin_hits()
             line = "# length".ljust(sep)
             line += "count".ljust(sep)
@@ -689,7 +690,7 @@
         for fset in self._fsets:
             # Only operate on correlation functions.
             if fset.corr_norm == None: continue
-            fp = self._write_on_root("%s_correlation.txt" % fset.function.__name__)
+            fp = self.comm.write_on_root("%s_correlation.txt" % fset.function.__name__)
             line = "# length".ljust(sep)
             line += "\\xi".ljust(sep)
             fp.write(line + "\n")


--- a/yt/config.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/config.py	Thu Oct 20 08:52:24 2011 -0400
@@ -38,8 +38,10 @@
     inline = 'False',
     __withinreason = 'False',
     __parallel = 'False',
-    __parallel_rank = '0',
-    __parallel_size = '1',
+    __global_parallel_rank = '0',
+    __global_parallel_size = '1',
+    __topcomm_parallel_rank = '0',
+    __topcomm_parallel_size = '1',
     storeparameterfiles = 'True',
     parameterfilestore = 'parameter_files.csv',
     maximumstoredpfs = '500',


--- a/yt/data_objects/data_containers.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/data_objects/data_containers.py	Thu Oct 20 08:52:24 2011 -0400
@@ -70,11 +70,11 @@
     """
     def save_state(self, grid, field=None):
         old_params = grid.field_parameters
-        old_keys = grid.data.keys()
+        old_keys = grid.field_data.keys()
         grid.field_parameters = self.field_parameters
         tr = func(self, grid, field)
         grid.field_parameters = old_params
-        grid.data = dict( [(k, grid.data[k]) for k in old_keys] )
+        grid.field_data = YTFieldData( [(k, grid.field_data[k]) for k in old_keys] )
         return tr
     return save_state
 
@@ -120,6 +120,12 @@
         return self._vc_data[field][grid.id]
     return check_cache
 
+class YTFieldData(dict):
+    """
+    A Container object for field data, instead of just having it be a dict.
+    """
+    pass
+
 class FakeGridForParticles(object):
     """
     Mock up a grid to insert particle positions and radii
@@ -128,20 +134,20 @@
     def __init__(self, grid):
         self._corners = grid._corners
         self.field_parameters = {}
-        self.data = {'x':grid['particle_position_x'],
-                     'y':grid['particle_position_y'],
-                     'z':grid['particle_position_z'],
-                     'dx':grid['dx'],
-                     'dy':grid['dy'],
-                     'dz':grid['dz']}
+        self.field_data = YTFieldData({'x':grid['particle_position_x'],
+                                       'y':grid['particle_position_y'],
+                                       'z':grid['particle_position_z'],
+                                       'dx':grid['dx'],
+                                       'dy':grid['dy'],
+                                       'dz':grid['dz']})
         self.dds = grid.dds.copy()
         self.real_grid = grid
         self.child_mask = 1
-        self.ActiveDimensions = self.data['x'].shape
+        self.ActiveDimensions = self.field_data['x'].shape
         self.DW = grid.pf.domain_right_edge - grid.pf.domain_left_edge
         
     def __getitem__(self, field):
-        if field not in self.data.keys():
+        if field not in self.field_data.keys():
             if field == "RadiusCode":
                 center = self.field_parameters['center']
                 tempx = na.abs(self['x'] - center[0])
@@ -153,7 +159,7 @@
                 tr = na.sqrt( tempx**2.0 + tempy**2.0 + tempz**2.0 )
             else:
                 raise KeyError(field)
-        else: tr = self.data[field]
+        else: tr = self.field_data[field]
         return tr
 
 class AMRData(object):
@@ -187,7 +193,7 @@
         mylog.debug("Appending object to %s (type: %s)", self.pf, type(self))
         if fields == None: fields = []
         self.fields = ensure_list(fields)[:]
-        self.data = {}
+        self.field_data = YTFieldData()
         self.field_parameters = {}
         self.__set_default_field_parameters()
         self._cut_masks = {}
@@ -249,7 +255,7 @@
         """
         Clears out all data from the AMRData instance, freeing memory.
         """
-        self.data.clear()
+        self.field_data.clear()
         if self._grids is not None:
             for grid in self._grids: grid.clear_data()
 
@@ -266,7 +272,7 @@
         """
         Checks if a data field already exists.
         """
-        return self.data.has_key(key)
+        return self.field_data.has_key(key)
 
     def _refresh_data(self):
         """
@@ -276,24 +282,24 @@
         self.get_data()
 
     def keys(self):
-        return self.data.keys()
+        return self.field_data.keys()
 
     def __getitem__(self, key):
         """
         Returns a single field.  Will add if necessary.
         """
-        if not self.data.has_key(key):
+        if not self.field_data.has_key(key):
             if key not in self.fields:
                 self.fields.append(key)
             self.get_data(key)
-        return self.data[key]
+        return self.field_data[key]
 
     def __setitem__(self, key, val):
         """
         Sets a field to be some other value.
         """
         if key not in self.fields: self.fields.append(key)
-        self.data[key] = val
+        self.field_data[key] = val
 
     def __delitem__(self, key):
         """
@@ -303,21 +309,21 @@
             del self.fields[self.fields.index(key)]
         except ValueError:
             pass
-        del self.data[key]
+        del self.field_data[key]
 
     def _generate_field_in_grids(self, fieldName):
         pass
 
     _key_fields = None
     def write_out(self, filename, fields=None, format="%0.16e"):
-        if fields is None: fields=sorted(self.data.keys())
+        if fields is None: fields=sorted(self.field_data.keys())
         if self._key_fields is None: raise ValueError
         field_order = self._key_fields[:]
         for field in field_order: self[field]
         field_order += [field for field in fields if field not in field_order]
         fid = open(filename,"w")
         fid.write("\t".join(["#"] + field_order + ["\n"]))
-        field_data = na.array([self.data[field] for field in field_order])
+        field_data = na.array([self.field_data[field] for field in field_order])
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -465,11 +471,11 @@
         else:
             fields_to_get = ensure_list(fields)
         if not self.sort_by in fields_to_get and \
-            self.sort_by not in self.data:
+            self.sort_by not in self.field_data:
             fields_to_get.insert(0, self.sort_by)
         mylog.debug("Going to obtain %s", fields_to_get)
         for field in fields_to_get:
-            if self.data.has_key(field):
+            if self.field_data.has_key(field):
                 continue
             mylog.info("Getting field %s from %s", field, len(self._grids))
             if field not in self.hierarchy.field_list and not in_grids:
@@ -478,7 +484,7 @@
             self[field] = na.concatenate(
                 [self._get_data_from_grid(grid, field)
                  for grid in self._grids])
-            if not self.data.has_key(field):
+            if not self.field_data.has_key(field):
                 continue
             if self._sortkey is None:
                 self._sortkey = na.argsort(self[self.sort_by])
@@ -764,6 +770,7 @@
         Prepares the AMR2DData, normal to *axis*.  If *axis* is 4, we are not
         aligned with any axis.
         """
+        ParallelAnalysisInterface.__init__(self)
         self.axis = axis
         AMRData.__init__(self, pf, fields, **kwargs)
         self.field = ensure_list(fields)[0]
@@ -789,7 +796,7 @@
             fields_to_get = ensure_list(fields)
         temp_data = {}
         for field in fields_to_get:
-            if self.data.has_key(field): continue
+            if self.field_data.has_key(field): continue
             if field not in self.hierarchy.field_list:
                 if self._generate_field(field):
                     continue # A "True" return means we did it
@@ -804,12 +811,12 @@
             self[field] = temp_data[field] 
         # We finalize
         if temp_data != {}:
-            temp_data = self._mpi_catdict(temp_data)
+            temp_data = self.comm.par_combine_object(temp_data,
+                    datatype='dict', op='cat')
         # And set, for the next group
         for field in temp_data.keys():
             self[field] = temp_data[field]
 
-
     def _generate_field(self, field):
         if self.pf.field_info.has_key(field):
             # First we check the validator
@@ -992,12 +999,14 @@
             points.append(self._generate_grid_coords(grid))
         if len(points) == 0:
             points = None
-            t = self._mpi_catarray(None)
+            t = self.comm.par_combine_object(None, datatype="array", op="cat")
         else:
             points = na.concatenate(points)
-            # We have to transpose here so that _mpi_catarray works properly, as
-            # it and the alltoall assume the long axis is the last one.
-            t = self._mpi_catarray(points.transpose())
+            # We have to transpose here so that _par_combine_object works
+            # properly, as it and the alltoall assume the long axis is the last
+            # one.
+            t = self.comm.par_combine_object(points.transpose(),
+                        datatype="array", op="cat")
         self['px'] = t[0,:]
         self['py'] = t[1,:]
         self['pz'] = t[2,:]
@@ -1212,7 +1221,7 @@
             points.append(self._generate_grid_coords(grid))
         if len(points) == 0: points = None
         else: points = na.concatenate(points)
-        t = self._mpi_catarray(points)
+        t = self.comm.par_combine_object(points, datatype="array", op="cat")
         pos = (t[:,0:3] - self.center)
         self['px'] = na.dot(pos, self._x_vec)
         self['py'] = na.dot(pos, self._y_vec)
@@ -1423,7 +1432,7 @@
         temp_data = {}
         _size = self.dims * self.dims
         for field in fields_to_get:
-            if self.data.has_key(field): continue
+            if self.field_data.has_key(field): continue
             if field not in self.hierarchy.field_list:
                 if self._generate_field(field):
                     continue # A "True" return means we did it
@@ -1432,8 +1441,8 @@
             self[field] = na.zeros(_size, dtype='float64')
             for grid in self._get_grids():
                 self._get_data_from_grid(grid, field)
-            self[field] = self._mpi_allsum(\
-                self[field]).reshape([self.dims]*2).transpose()
+            self[field] = self.comm.mpi_allreduce(\
+                self[field], op='sum').reshape([self.dims]*2).transpose()
 
     def interpolate_discretize(self, *args, **kwargs):
         pass
@@ -1592,7 +1601,7 @@
         else: fields = ensure_list(fields)
         # We need a new tree for every single set of fields we add
         self._obtain_fields(fields, self._node_name)
-        fields = [f for f in fields if f not in self.data]
+        fields = [f for f in fields if f not in self.field_data]
         if len(fields) == 0: return
         tree = self._get_tree(len(fields))
         coord_data = []
@@ -1604,21 +1613,21 @@
         if self.preload_style == 'all':
             print "Preloading %s grids and getting %s" % (
                     len(self.source._get_grid_objs()),
-                    self._get_dependencies(fields))
-            self._preload([g for g in self._get_grid_objs()],
-                          self._get_dependencies(fields), self.hierarchy.io)
+                    self.get_dependencies(fields))
+            self.comm.preload([g for g in self._get_grid_objs()],
+                          self.get_dependencies(fields), self.hierarchy.io)
         # By changing the remove-from-tree method to accumulate, we can avoid
         # having to do this by level, and instead do it by CPU file
         for level in range(0, self._max_level+1):
             if self.preload_style == 'level':
-                self._preload([g for g in self._get_grid_objs()
+                self.comm.preload([g for g in self._get_grid_objs()
                                  if g.Level == level],
-                              self._get_dependencies(fields), self.hierarchy.io)
+                              self.get_dependencies(fields), self.hierarchy.io)
             self._add_level_to_tree(tree, level, fields)
             mylog.debug("End of projecting level level %s, memory usage %0.3e", 
                         level, get_memory_usage()/1024.)
         # Note that this will briefly double RAM usage
-        tree = self.merge_quadtree_buffers(tree)
+        tree = self.comm.merge_quadtree_buffers(tree)
         coord_data, field_data, weight_data, dxs = [], [], [], []
         for level in range(0, self._max_level + 1):
             npos, nvals, nwvals = tree.get_all_from_level(level, False)
@@ -1832,7 +1841,7 @@
 
     def _initialize_source(self, source = None):
         if source is None:
-            check, source = self._partition_hierarchy_2d(self.axis)
+            check, source = self.partition_hierarchy_2d(self.axis)
             self._check_region = check
             #self._okay_to_serialize = (not check)
         else:
@@ -1982,7 +1991,7 @@
         if fields is None: fields = ensure_list(self.fields)[:]
         else: fields = ensure_list(fields)
         self._obtain_fields(fields, self._node_name)
-        fields = [f for f in fields if f not in self.data]
+        fields = [f for f in fields if f not in self.field_data]
         if len(fields) == 0: return
         coord_data = []
         field_data = []
@@ -1993,13 +2002,13 @@
         # _project_level, then it would be more memory conservative
         if self.preload_style == 'all':
             print "Preloading %s grids and getting %s" % (
-                    len(self.source._grids), self._get_dependencies(fields))
-            self._preload(self.source._grids,
-                          self._get_dependencies(fields), self.hierarchy.io)
+                    len(self.source._grids), self.get_dependencies(fields))
+            self.comm.preload(self.source._grids,
+                          self.get_dependencies(fields), self.hierarchy.io)
         for level in range(0, self._max_level+1):
             if self.preload_style == 'level':
-                self._preload(self.source.select_grids(level),
-                              self._get_dependencies(fields), self.hierarchy.io)
+                self.comm.preload(self.source.select_grids(level),
+                              self.get_dependencies(fields), self.hierarchy.io)
             self.__calculate_overlap(level)
             my_coords, my_pdx, my_pdy, my_fields = \
                 self.__project_level(level, fields)
@@ -2035,7 +2044,7 @@
         data['pdy'] *= 0.5
         data['fields'] = field_data
         # Now we run the finalizer, which is ignored if we don't need it
-        data = self._mpi_catdict(data)
+        data = self.comm.par_combine_object(temp_data, datatype='dict', op='cat')
         field_data = na.vsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
             self[field] = field_data[fi].ravel()
@@ -2221,7 +2230,7 @@
             self._get_data_from_grid(grid, fields_to_get, dls)
         mylog.info("IO completed; summing")
         for field in fields_to_get:
-            self[field] = self._mpi_Allsum_double(self[field])
+            self[field] = self.comm.mpi_allreduce(self[field], op='sum')
             conv = self.pf.units[self.pf.field_info[field].projection_conversion]
             self[field] *= conv
 
@@ -2304,7 +2313,7 @@
             fields_to_get = ensure_list(fields)
         mylog.debug("Going to obtain %s", fields_to_get)
         for field in fields_to_get:
-            if self.data.has_key(field):
+            if self.field_data.has_key(field):
                 continue
             if field not in self.hierarchy.field_list and not in_grids:
                 if self._generate_field(field):
@@ -2316,14 +2325,14 @@
                self.pf.field_info[field].particle_type and \
                self.pf.h.io._particle_reader:
                 self.particles.get_data(field)
-                if field not in self.data:
+                if field not in self.field_data:
                     if self._generate_field(field): continue
             mylog.info("Getting field %s from %s", field, len(self._grids))
             self[field] = na.concatenate(
                 [self._get_data_from_grid(grid, field)
                  for grid in self._grids])
         for field in fields_to_get:
-            if not self.data.has_key(field):
+            if not self.field_data.has_key(field):
                 continue
             self[field] = self[field]
 
@@ -3198,7 +3207,7 @@
             fields = ensure_list(fields)
         obtain_fields = []
         for field in fields:
-            if self.data.has_key(field): continue
+            if self.field_data.has_key(field): continue
             if field not in self.hierarchy.field_list:
                 try:
                     #print "Generating", field
@@ -3311,7 +3320,7 @@
             fields_to_get = ensure_list(field)
         for field in fields_to_get:
             grid_count = 0
-            if self.data.has_key(field):
+            if self.field_data.has_key(field):
                 continue
             mylog.debug("Getting field %s from %s possible grids",
                        field, len(self._grids))
@@ -3343,9 +3352,9 @@
 
     def _update_level_state(self, level, field = None):
         dx = self._base_dx / self.pf.refine_by**level
-        self.data['cdx'] = dx[0]
-        self.data['cdy'] = dx[1]
-        self.data['cdz'] = dx[2]
+        self.field_data['cdx'] = dx[0]
+        self.field_data['cdy'] = dx[1]
+        self.field_data['cdz'] = dx[2]
         LL = self.left_edge - self.pf.domain_left_edge
         self._old_global_startindex = self.global_startindex
         self.global_startindex = na.rint(LL / dx).astype('int64') - 1
@@ -3354,13 +3363,13 @@
         if level == 0 and self.level > 0:
             # We use one grid cell at LEAST, plus one buffer on all sides
             idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64') + 2
-            self.data[field] = na.zeros(idims,dtype='float64')-999
+            self.field_data[field] = na.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
         elif level == 0 and self.level == 0:
             DLE = self.pf.domain_left_edge
             self.global_startindex = na.array(na.floor(LL/ dx), dtype='int64')
             idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64')
-            self.data[field] = na.zeros(idims,dtype='float64')-999
+            self.field_data[field] = na.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
 
     def _refine(self, dlevel, field):


--- a/yt/data_objects/derived_quantities.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/data_objects/derived_quantities.py	Thu Oct 20 08:52:24 2011 -0400
@@ -66,6 +66,7 @@
                  combine_function, units = "",
                  n_ret = 0, force_unlazy=False):
         # We wrap the function with our object
+        ParallelAnalysisInterface.__init__(self)
         self.__doc__ = function.__doc__
         self.__name__ = name
         self.collection = collection
@@ -85,7 +86,7 @@
             e.NumberOfParticles = 1
             self.func(e, *args, **kwargs)
             mylog.debug("Preloading %s", e.requested)
-            self._preload([g for g in self._get_grid_objs()], e.requested,
+            self.comm.preload([g for g in self._get_grid_objs()], e.requested,
                           self._data_source.pf.h.io)
         if lazy_reader and not self.force_unlazy:
             return self._call_func_lazy(args, kwargs)
@@ -103,13 +104,14 @@
 
     def _finalize_parallel(self):
         # Note that we do some fancy footwork here.
-        # _mpi_catarray and its affiliated alltoall function
+        # _par_combine_object and its affiliated alltoall function
         # assume that the *long* axis is the last one.  However,
         # our long axis is the first one!
         rv = []
         for my_list in self.retvals:
             data = na.array(my_list).transpose()
-            rv.append(self._mpi_catarray(data).transpose())
+            rv.append(self.comm.par_combine_object(data,
+                        datatype="array", op="cat").transpose())
         self.retvals = rv
         
     def _call_func_unlazy(self, args, kwargs):


--- a/yt/data_objects/field_info_container.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/data_objects/field_info_container.py	Thu Oct 20 08:52:24 2011 -0400
@@ -466,5 +466,5 @@
     def __call__(self, data):
         # We need to make sure that it's an actual AMR grid
         if isinstance(data, FieldDetector): return True
-        if data._type_name == 'grid': return True
+        if getattr(data, "_type_name", None) == 'grid': return True
         raise NeedsOriginalGrid()


--- a/yt/data_objects/grid_patch.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/data_objects/grid_patch.py	Thu Oct 20 08:52:24 2011 -0400
@@ -30,6 +30,7 @@
 
 from yt.funcs import *
 
+from yt.data_objects.data_containers import YTFieldData
 from yt.utilities.definitions import x_dict, y_dict
 from .field_info_container import \
     NeedsGridType, \
@@ -49,7 +50,7 @@
     _con_args = ('id', 'filename')
     OverlappingSiblings = None
 
-    __slots__ = ['data', 'field_parameters', 'id', 'hierarchy', 'pf',
+    __slots__ = ['field_data', 'field_parameters', 'id', 'hierarchy', 'pf',
                  'ActiveDimensions', 'LeftEdge', 'RightEdge', 'Level',
                  'NumberOfParticles', 'Children', 'Parent',
                  'start_index', 'filename', '__weakref__', 'dds',
@@ -57,7 +58,7 @@
                  '_parent_id', '_children_ids']
 
     def __init__(self, id, filename=None, hierarchy=None):
-        self.data = {}
+        self.field_data = YTFieldData()
         self.field_parameters = {}
         self.id = id
         if hierarchy: self.hierarchy = weakref.proxy(hierarchy)
@@ -140,36 +141,36 @@
             raise exceptions.KeyError, field
 
     def has_key(self, key):
-        return (key in self.data)
+        return (key in self.field_data)
 
     def __getitem__(self, key):
         """
         Returns a single field.  Will add if necessary.
         """
-        if not self.data.has_key(key):
+        if not self.field_data.has_key(key):
             self.get_data(key)
-        return self.data[key]
+        return self.field_data[key]
 
     def __setitem__(self, key, val):
         """
         Sets a field to be some other value.
         """
-        self.data[key] = val
+        self.field_data[key] = val
 
     def __delitem__(self, key):
         """
         Deletes a field
         """
-        del self.data[key]
+        del self.field_data[key]
 
     def keys(self):
-        return self.data.keys()
+        return self.field_data.keys()
 
     def get_data(self, field):
         """
         Returns a field or set of fields for a key or set of keys
         """
-        if not self.data.has_key(field):
+        if not self.field_data.has_key(field):
             if field in self.hierarchy.field_list:
                 conv_factor = 1.0
                 if self.pf.field_info.has_key(field):
@@ -178,7 +179,7 @@
                    self.NumberOfParticles == 0:
                     # because this gets upcast to float
                     self[field] = na.array([],dtype='int64')
-                    return self.data[field]
+                    return self.field_data[field]
                 try:
                     temp = self.hierarchy.io.pop(self, field)
                     self[field] = na.multiply(temp, conv_factor, temp)
@@ -191,7 +192,7 @@
                     else: raise
             else:
                 self._generate_field(field)
-        return self.data[field]
+        return self.field_data[field]
 
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume
@@ -205,7 +206,7 @@
             self.dds = na.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     @property
     def _corners(self):
@@ -247,7 +248,7 @@
         """
         self._del_child_mask()
         self._del_child_indices()
-        self.data.clear()
+        self.field_data.clear()
         self._setup_dx()
 
     def check_child_masks(self):
@@ -304,11 +305,11 @@
         :meth:`clear_derived_quantities`.
         """
         for key in self.keys():
-            del self.data[key]
-        del self.data
+            del self.field_data[key]
+        del self.field_data
         if hasattr(self,"retVal"):
             del self.retVal
-        self.data = {}
+        self.field_data = YTFieldData()
         self.clear_derived_quantities()
 
     def clear_derived_quantities(self):


--- a/yt/data_objects/hierarchy.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/data_objects/hierarchy.py	Thu Oct 20 08:52:24 2011 -0400
@@ -48,6 +48,7 @@
     float_type = 'float64'
 
     def __init__(self, pf, data_style):
+        ParallelAnalysisInterface.__init__(self)
         self.parameter_file = weakref.proxy(pf)
         self.pf = self.parameter_file
 
@@ -177,7 +178,7 @@
             writeable = os.access(fn, os.W_OK)
         writeable = writeable and not ytcfg.getboolean('yt','onlydeserialize')
         # We now have our conditional stuff
-        self._barrier()
+        self.comm.barrier()
         if not writeable and not exists: return
         if writeable:
             try:


--- a/yt/data_objects/particle_io.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/data_objects/particle_io.py	Thu Oct 20 08:52:24 2011 -0400
@@ -91,8 +91,7 @@
             fields_to_read, rtype, args, grid_list, count_list,
             conv_factors)
         for [n, v] in zip(fields_to_read, rvs):
-            self.source.data[n] = v
-        print self.source.data.keys()
+            self.source.field_data[n] = v
 
 class ParticleIOHandlerRegion(ParticleIOHandlerImplemented):
     periodic = False


--- a/yt/data_objects/profiles.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/data_objects/profiles.py	Thu Oct 20 08:52:24 2011 -0400
@@ -30,6 +30,7 @@
 
 from yt.funcs import *
 
+from yt.data_objects.data_containers import YTFieldData
 from yt.utilities.data_point_utilities import \
     Bin1DProfile, Bin2DProfile, Bin3DProfile
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -63,9 +64,10 @@
 # We could, but I think we instead want to deal with the root datasource.
 class BinnedProfile(ParallelAnalysisInterface):
     def __init__(self, data_source, lazy_reader):
+        ParallelAnalysisInterface.__init__(self)
         self._data_source = data_source
         self.pf = data_source.pf
-        self._data = {}
+        self.field_data = YTFieldData()
         self._pdata = {}
         self._lazy_reader = lazy_reader
 
@@ -79,7 +81,7 @@
 
     def _initialize_parallel(self, fields):
         g_objs = [g for g in self._get_grid_objs()]
-        self._preload(g_objs, self._get_dependencies(fields),
+        self.comm.preload(g_objs, self.get_dependencies(fields),
                       self._data_source.hierarchy.io)
 
     def _lazy_add_fields(self, fields, weight, accumulation):
@@ -119,10 +121,10 @@
 
     def _finalize_parallel(self):
         for key in self.__data:
-            self.__data[key] = self._mpi_allsum(self.__data[key])
+            self.__data[key] = self.comm.mpi_allreduce(self.__data[key], op='sum')
         for key in self.__weight_data:
-            self.__weight_data[key] = self._mpi_allsum(self.__weight_data[key])
-        self.__used = self._mpi_allsum(self.__used)
+            self.__weight_data[key] = self.comm.mpi_allreduce(self.__weight_data[key], op='sum')
+        self.__used = self.comm.mpi_allreduce(self.__used, op='sum')
 
     def _unlazy_add_fields(self, fields, weight, accumulation):
         for field in fields:
@@ -148,18 +150,18 @@
             self._unlazy_add_fields(fields, weight, accumulation)
         if fractional:
             for field in fields:
-                self._data[field] /= self._data[field].sum()
+                self.field_data[field] /= self.field_data[field].sum()
 
     def keys(self):
-        return self._data.keys()
+        return self.field_data.keys()
 
     def __getitem__(self, key):
         # This raises a KeyError if it doesn't exist
         # This is because we explicitly want to add all fields
-        return self._data[key]
+        return self.field_data[key]
 
     def __setitem__(self, key, value):
-        self._data[key] = value
+        self.field_data[key] = value
 
     def _get_field(self, source, this_field, check_cut):
         # This is where we will iterate to get all contributions to a field
@@ -288,7 +290,7 @@
         # both: 0...N, left: 0...N-1, right: 1...N 
         # center: N bins that are the average (both in linear or log
         # space) of each pair of left/right edges
-        x = self._data[self.bin_field]
+        x = self.field_data[self.bin_field]
         if bin_style is 'both': pass
         elif bin_style is 'left': x = x[:-1]
         elif bin_style is 'right': x = x[1:]
@@ -307,15 +309,15 @@
         *bin_style* (left, right, center, both).
         '''
         fid = open(filename,"w")
-        fields = [field for field in sorted(self._data.keys()) if field != "UsedBins"]
+        fields = [field for field in sorted(self.field_data.keys()) if field != "UsedBins"]
         fields.remove(self.bin_field)
         fid.write("\t".join(["#"] + [self.bin_field] + fields + ["\n"]))
 
         field_data = na.array(self.choose_bins(bin_style)) 
         if bin_style is 'both':
-            field_data = na.append([field_data], na.array([self._data[field] for field in fields]), axis=0)
+            field_data = na.append([field_data], na.array([self.field_data[field] for field in fields]), axis=0)
         else: 
-            field_data = na.append([field_data], na.array([self._data[field][:-1] for field in fields]), axis=0)
+            field_data = na.append([field_data], na.array([self.field_data[field][:-1] for field in fields]), axis=0)
         
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
@@ -334,7 +336,7 @@
         *bin_style* (left, right, center, both).
         """
         fid = h5py.File(filename)
-        fields = [field for field in sorted(self._data.keys()) if (field != "UsedBins" and field != self.bin_field)]
+        fields = [field for field in sorted(self.field_data.keys()) if (field != "UsedBins" and field != self.bin_field)]
         if group_prefix is None:
             name = "%s-1d" % (self.bin_field)
         else:
@@ -346,7 +348,7 @@
         group = fid.create_group(name)
         group.attrs["x-axis-%s" % self.bin_field] = self.choose_bins(bin_style)
         for field in fields:
-            dset = group.create_dataset("%s" % field, data=self._data[field][:-1])
+            dset = group.create_dataset("%s" % field, data=self.field_data[field][:-1])
         fid.close()
 
     def _get_bin_fields(self):
@@ -467,8 +469,8 @@
         # center: N bins that are the average (both in linear or log
         # space) of each pair of left/right edges
 
-        x = self._data[self.x_bin_field]
-        y = self._data[self.y_bin_field]
+        x = self.field_data[self.x_bin_field]
+        y = self.field_data[self.y_bin_field]
         if bin_style is 'both':
             pass
         elif bin_style is 'left':
@@ -498,17 +500,17 @@
         both).
         """
         fid = open(filename,"w")
-        fields = [field for field in sorted(self._data.keys()) if field != "UsedBins"]
+        fields = [field for field in sorted(self.field_data.keys()) if field != "UsedBins"]
         fid.write("\t".join(["#"] + [self.x_bin_field, self.y_bin_field]
                           + fields + ["\n"]))
         x,y = self.choose_bins(bin_style)
         x,y = na.meshgrid(x,y)
         field_data = [x.ravel(), y.ravel()]
         if bin_style is not 'both':
-            field_data += [self._data[field][:-1,:-1].ravel() for field in fields
+            field_data += [self.field_data[field][:-1,:-1].ravel() for field in fields
                            if field not in [self.x_bin_field, self.y_bin_field]]
         else:
-            field_data += [self._data[field].ravel() for field in fields
+            field_data += [self.field_data[field].ravel() for field in fields
                            if field not in [self.x_bin_field, self.y_bin_field]]
 
         field_data = na.array(field_data)
@@ -529,7 +531,7 @@
         right, center, both).
         """
         fid = h5py.File(filename)
-        fields = [field for field in sorted(self._data.keys()) if (field != "UsedBins" and field != self.x_bin_field and field != self.y_bin_field)]
+        fields = [field for field in sorted(self.field_data.keys()) if (field != "UsedBins" and field != self.x_bin_field and field != self.y_bin_field)]
         if group_prefix is None:
             name = "%s-%s-2d" % (self.y_bin_field, self.x_bin_field)
         else:
@@ -543,7 +545,7 @@
         group.attrs["x-axis-%s" % self.x_bin_field] = xbins
         group.attrs["y-axis-%s" % self.y_bin_field] = ybins
         for field in fields:
-            dset = group.create_dataset("%s" % field, data=self._data[field][:-1,:-1])
+            dset = group.create_dataset("%s" % field, data=self.field_data[field][:-1,:-1])
         fid.close()
 
     def _get_bin_fields(self):
@@ -727,9 +729,9 @@
         # center: N bins that are the average (both in linear or log
         # space) of each pair of left/right edges
 
-        x = self._data[self.x_bin_field]
-        y = self._data[self.y_bin_field]
-        z = self._data[self.z_bin_field]
+        x = self.field_data[self.x_bin_field]
+        y = self.field_data[self.y_bin_field]
+        z = self.field_data[self.z_bin_field]
         if bin_style is 'both':
             pass
         elif bin_style is 'left':
@@ -770,7 +772,7 @@
         attributes.
         """
         fid = h5py.File(filename)
-        fields = [field for field in sorted(self._data.keys()) 
+        fields = [field for field in sorted(self.field_data.keys()) 
                   if (field != "UsedBins" and field != self.x_bin_field and field != self.y_bin_field and field != self.z_bin_field)]
         if group_prefix is None:
             name = "%s-%s-%s-3d" % (self.z_bin_field, self.y_bin_field, self.x_bin_field)
@@ -788,7 +790,7 @@
         group.attrs["z-axis-%s" % self.z_bin_field] = zbins
         
         for field in fields:
-            dset = group.create_dataset("%s" % field, data=self._data[field][:-1,:-1,:-1])
+            dset = group.create_dataset("%s" % field, data=self.field_data[field][:-1,:-1,:-1])
         fid.close()
 
 
@@ -818,7 +820,7 @@
                               self[self.z_bin_field].size),
                     'field_order':order }
         values = []
-        for field in self._data:
+        for field in self.field_data:
             if field in set_attr.values(): continue
             order.append(field)
             values.append(self[field].ravel())
@@ -832,7 +834,7 @@
         Given a *pf* parameterfile and the *name* of a stored profile, retrieve
         it into a read-only data structure.
         """
-        self._data = {}
+        self.field_data = YTFieldData()
         prof_arr = pf.h.get_data("/Profiles", name)
         if prof_arr is None: raise KeyError("No such array")
         for ax in 'xyz':
@@ -840,11 +842,11 @@
                 setattr(self, base % ax, prof_arr.getAttr(base % ax))
         for ax in 'xyz':
             fn = getattr(self, '%s_bin_field' % ax)
-            self._data[fn] = prof_arr.getAttr('%s_bin_values' % ax)
+            self.field_data[fn] = prof_arr.getAttr('%s_bin_values' % ax)
         shape = prof_arr.getAttr('shape')
         for fn, fd in zip(prof_arr.getAttr('field_order'),
                           prof_arr.read().transpose()):
-            self._data[fn] = fd.reshape(shape)
+            self.field_data[fn] = fd.reshape(shape)
 
     def add_fields(self, *args, **kwargs):
         raise RuntimeError("Sorry, you can't add to a stored profile.")


--- a/yt/data_objects/static_output.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/data_objects/static_output.py	Thu Oct 20 08:52:24 2011 -0400
@@ -162,7 +162,7 @@
     _instantiated_hierarchy = None
     @property
     def hierarchy(self):
-        if self._instantiated_hierarchy == None:
+        if self._instantiated_hierarchy is None:
             if self._hierarchy_class == None:
                 raise RuntimeError("You should not instantiate StaticOutput.")
             self._instantiated_hierarchy = self._hierarchy_class(


--- a/yt/frontends/art/data_structures.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/frontends/art/data_structures.py	Thu Oct 20 08:52:24 2011 -0400
@@ -85,7 +85,7 @@
             self.dds = na.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def get_global_startindex(self):
         """


--- a/yt/frontends/castro/data_structures.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/frontends/castro/data_structures.py	Thu Oct 20 08:52:24 2011 -0400
@@ -117,7 +117,7 @@
             self.dds = na.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):
         return "CastroGrid_%04i" % (self.id)


--- a/yt/frontends/chombo/data_structures.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/frontends/chombo/data_structures.py	Thu Oct 20 08:52:24 2011 -0400
@@ -82,7 +82,7 @@
             self.dds = na.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
 class ChomboHierarchy(AMRHierarchy):
 


--- a/yt/frontends/enzo/data_structures.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/frontends/enzo/data_structures.py	Thu Oct 20 08:52:24 2011 -0400
@@ -384,7 +384,7 @@
     def _detect_fields(self):
         self.field_list = []
         # Do this only on the root processor to save disk work.
-        if self._mpi_get_rank() == 0 or self._mpi_get_rank() == None:
+        if self.comm.rank == 0 or self.comm.rank == None:
             field_list = self.get_data("/", "DataFields")
             if field_list is None:
                 mylog.info("Gathering a field list (this may take a moment.)")
@@ -401,7 +401,7 @@
                     field_list = field_list.union(gf)
         else:
             field_list = None
-        field_list = self._mpi_bcast_pickled(field_list)
+        field_list = self.comm.mpi_bcast_pickled(field_list)
         self.save_data(list(field_list),"/","DataFields",passthrough=True)
         self.field_list = list(field_list)
 
@@ -589,7 +589,7 @@
             self.derived_field_list = self.__class__._cached_derived_field_list
 
     def _generate_random_grids(self):
-        my_rank = self._mpi_get_rank()
+        my_rank = self.comm.rank
         my_grids = self.grids[self.grid_procs.ravel() == my_rank]
         if len(my_grids) > 40:
             starter = na.random.randint(0, 20)


--- a/yt/frontends/gdf/data_structures.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/frontends/gdf/data_structures.py	Thu Oct 20 08:52:24 2011 -0400
@@ -58,7 +58,7 @@
             self.dds = na.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
 class GDFHierarchy(AMRHierarchy):
 


--- a/yt/frontends/maestro/data_structures.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/frontends/maestro/data_structures.py	Thu Oct 20 08:52:24 2011 -0400
@@ -110,7 +110,7 @@
             self.dds = na.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):
         return "MaestroGrid_%04i" % (self.id)


--- a/yt/frontends/nyx/data_structures.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/frontends/nyx/data_structures.py	Thu Oct 20 08:52:24 2011 -0400
@@ -109,7 +109,7 @@
 
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):
         return "NyxGrid_%04i" % (self.id)


--- a/yt/frontends/orion/data_structures.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/frontends/orion/data_structures.py	Thu Oct 20 08:52:24 2011 -0400
@@ -114,7 +114,7 @@
             self.dds = na.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):
         return "OrionGrid_%04i" % (self.id)


--- a/yt/frontends/ramses/data_structures.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/frontends/ramses/data_structures.py	Thu Oct 20 08:52:24 2011 -0400
@@ -80,7 +80,7 @@
             self.dds = na.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def get_global_startindex(self):
         """


--- a/yt/funcs.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/funcs.py	Thu Oct 20 08:52:24 2011 -0400
@@ -187,12 +187,13 @@
        def some_root_only_function(...):
 
     """
+    from yt.config import ytcfg
     @wraps(func)
-    def donothing(*args, **kwargs):
-        return
-    from yt.config import ytcfg
-    if ytcfg.getint("yt","__parallel_rank") > 0: return donothing
-    return func
+    def check_parallel_rank(*args, **kwargs):
+        if ytcfg.getint("yt","__topcomm_parallel_rank") > 0:
+            return 
+        return func(*args, **kwargs)
+    return check_parallel_rank
 
 def deprecate(func):
     """
@@ -341,9 +342,13 @@
     handed back.
     """
     from yt.config import ytcfg
+    if kwargs.pop("global_rootonly", False):
+        cfg_option = "__global_parallel_rank"
+    else:
+        cfg_option = "__topcomm_parallel_rank"
     if not ytcfg.getboolean("yt","__parallel"):
         return func(*args,**kwargs)
-    if ytcfg.getint("yt","__parallel_rank") > 0: return
+    if ytcfg.getint("yt", cfg_option) > 0: return
     return func(*args, **kwargs)
 
 #


--- a/yt/gui/setup.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/gui/setup.py	Thu Oct 20 08:52:24 2011 -0400
@@ -6,7 +6,6 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('gui',parent_package,top_path)
     config.add_subpackage('opengl_widgets')
-    config.add_subpackage('traited_explorer')
     config.add_subpackage('reason')
     config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()


--- a/yt/gui/traited_explorer/plot_editors.py	Thu Oct 20 08:17:55 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,128 +0,0 @@
-"""
-Figure editors for the Traits GUI
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-import sys, matplotlib
-# We want matplotlib to use a wxPython backend
-matplotlib.use('QT4Agg')
-from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
-from matplotlib.figure import Figure
-from matplotlib.axes import Axes
-
-from enthought.traits.api import Any, Instance
-from enthought.traits.ui.qt4.editor import Editor
-from enthought.traits.ui.qt4.basic_editor_factory import BasicEditorFactory
-
-from enthought.pyface.action.api import ActionController
-
-from enthought.traits.ui.menu import \
-    Menu, Action, Separator, OKCancelButtons, OKButton
-
-from matplotlib.backend_bases import Event as MPLEvent
-
-class _MPLFigureEditor(Editor):
-    """ Snagged from Gael's tutorial """
-
-    scrollable  = True
-    mpl_control = Instance(FigureCanvas)
-
-    def init(self, parent):
-        self.control = self._create_canvas(parent)
-        self.set_tooltip()
-
-    def update_editor(self):
-        pass
-
-    def _create_canvas(self, parent):
-        """ Create the MPL canvas. """
-        # The panel lets us add additional controls.
-        panel = wx.Panel(parent, -1)
-        sizer = wx.BoxSizer(wx.VERTICAL)
-        panel.SetSizer(sizer)
-        # matplotlib commands to create a canvas
-        self.mpl_control = FigureCanvas(panel, -1, self.value)
-        sizer.Add(self.mpl_control, 1, wx.LEFT | wx.TOP | wx.GROW | wx.SHAPED)
-        self.value.canvas.SetMinSize((10,8))
-        return panel
-
-class MPLFigureEditor(BasicEditorFactory):
-    klass = _MPLFigureEditor
-
-class MPLAction(Action):
-    event = Instance(MPLEvent)
-
-class _MPLVMPlotEditor(_MPLFigureEditor, ActionController):
-
-    def _create_canvas(self, parent):
-        panel = _MPLFigureEditor._create_canvas(self, parent)
-        self.mpl_control.mpl_connect("button_press_event", self.on_click)
-        return panel
-
-    def on_click(self, event):
-        if not event.inaxes: return
-        if event.button == 3:
-            my_menu = Menu(MPLAction(name="Recenter", action="object.recenter",
-                                     event=event),
-                           MPLAction(name="Yo!", action="object.do_something",
-                                     event=event))
-            wxmenu = my_menu.create_menu(self.mpl_control, self)
-            self.mpl_control.PopupMenuXY(wxmenu)
-
-    def perform ( self, action ):
-        """
-        This is largely taken/modified from the TreeEditor _perform method.
-        """
-        object            = self.object
-        method_name       = action.action
-        info              = self.ui.info
-        handler           = self.ui.handler
-        event             = action.event
-
-        if method_name.find( '.' ) >= 0:
-            if method_name.find( '(' ) < 0:
-                method_name += '(event)'
-            try:
-                eval( method_name, globals(),
-                      { 'object':  object,
-                        'editor':  self,
-                        'info':    info,
-                        'event':   event,
-                        'handler': handler } )
-            except:
-                # fixme: Should the exception be logged somewhere?
-                print sys.exc_info()
-                
-            return
-
-        method = getattr( handler, method_name, None )
-        if method is not None:
-            method( info, object )
-            return
-
-        if action.on_perform is not None:
-            action.on_perform( object )
-
-class MPLVMPlotEditor(BasicEditorFactory):
-    klass = _MPLVMPlotEditor
-


--- a/yt/gui/traited_explorer/setup.py	Thu Oct 20 08:17:55 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,10 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os, sys, os.path
-
-def configuration(parent_package='',top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('traited_explorer',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
-    #config.make_svn_version_py()
-    return config


--- a/yt/gui/traited_explorer/traited_explorer.py	Thu Oct 20 08:17:55 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,479 +0,0 @@
-"""
-New version of Reason, using a TraitsUI-based approach
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from yt.mods import *
-from yt.utilities.definitions import \
-    x_dict, \
-    y_dict
-#pf = EnzoStaticOutput("/Users/matthewturk/Research/data/galaxy1200.dir/galaxy1200")
-
-from enthought.traits.api import \
-    HasTraits, List, Instance, Str, Float, Any, Code, PythonValue, Int, CArray, \
-    Property, Enum, cached_property, DelegatesTo, Callable, Array, \
-    Button
-from enthought.traits.ui.api import \
-    Group, VGroup, HGroup, Tabbed, View, Item, ShellEditor, InstanceEditor, ListStrEditor, \
-    ListEditor, VSplit, VFlow, HSplit, VFold, ValueEditor, TreeEditor, TreeNode, RangeEditor, \
-    EnumEditor, Handler, Controller, DNDEditor
-from enthought.traits.ui.menu import \
-    Menu, Action, Separator, OKCancelButtons, OKButton
-from enthought.pyface.action.api import \
-    ActionController
-from enthought.tvtk.pyface.scene_editor import SceneEditor
-from enthought.tvtk.pyface.api import \
-    DecoratedScene
-from enthought.tvtk.pyface.scene_model import SceneModel
-
-from plot_editors import Figure, MPLFigureEditor, MPLVMPlotEditor, Axes
-
-from yt.visualization.plot_types import VMPlot, ProjectionPlot, SlicePlot
-
-import traceback
-from tvtk_interface import \
-    HierarchyImporter, YTScene
-
-class PlotCreationHandler(Controller):
-    main_window = Instance(HasTraits)
-    pnode = Instance(HasTraits)
-
-    format = Str
-    plot_type = Any
-    
-    def close(self, info, is_ok):
-        if not is_ok:
-            super(Controller, self).close(info, True)
-            return
-        spt = self.plot_type(plot_spec=self.model, pf=self.pnode.pf,
-                           name=self.format % (self.model.axis))
-        self.pnode.data_objects.append(spt)
-        self.main_window.plot_frame_tabs.append(spt)
-        spt.plot
-
-class VTKSceneCreationHandler(PlotCreationHandler):
-    importer = Instance(HierarchyImporter)
-
-    def close(self, info, is_ok):
-        if is_ok: 
-            yt_scene = YTScene(importer=self.importer,
-                scene=SceneModel())
-            spt = VTKDataObject(name = "VTK: %s" % self.pnode.pf,
-                    scene=yt_scene.scene,
-                    yt_scene=yt_scene)
-            self.pnode.data_objects.append(spt)
-            self.main_window.plot_frame_tabs.append(spt)
-        super(Controller, self).close(info, True)
-        return True
-
-
-class DataObject(HasTraits):
-    name = Str
-
-class VTKDataObject(DataObject):
-    yt_scene = Instance(YTScene)
-    scene = DelegatesTo("yt_scene")
-    add_contours = Button
-    add_isocontour = Button
-    add_x_plane = Button
-    add_y_plane = Button
-    add_z_plane = Button
-    edit_camera = Button
-    edit_operators = Button
-    edit_pipeline = Button
-    center_on_max = Button
-    operators = DelegatesTo("yt_scene")
-    traits_view = View(
-            Item("scene", editor = 
-        SceneEditor(scene_class=DecoratedScene),
-                    resizable=True, show_label=False),
-            HGroup(Item("add_contours", show_label=False),
-                   Item("add_isocontour", show_label=False),
-                   Item("add_x_plane", show_label=False),
-                   Item("add_y_plane", show_label=False),
-                   Item("add_z_plane", show_label=False),
-                   Item("edit_camera", show_label=False),
-                   Item("edit_operators", show_label=False),
-                   Item("edit_pipeline", show_label=False),
-                   Item("center_on_max", show_label=False),
-                ),
-            )
-
-    operators_edit = View(
-        Item("operators", style='custom', show_label=False,
-             editor=ListEditor(editor=InstanceEditor(),
-                               use_notebook=True),
-              name="Edit Operators"),
-        height=500.0, width=500.0, resizable=True)
-    
-    def _edit_camera_fired(self):
-        self.yt_scene.camera_path.edit_traits()
-
-    def _edit_operators_fired(self):
-        self.edit_traits(view='operators_edit')
-
-    def _edit_pipeline_fired(self):
-        from enthought.tvtk.pipeline.browser import PipelineBrowser
-        pb = PipelineBrowser(self.scene)
-        pb.show()
-
-    def _add_contours_fired(self):
-        self.yt_scene.add_contour()
-
-    def _add_isocontour_fired(self):
-        self.yt_scene.add_isocontour()
-
-    def _add_x_plane_fired(self):
-        self.yt_scene.add_x_plane()
-
-    def _add_y_plane_fired(self):
-        self.yt_scene.add_y_plane()
-
-    def _add_z_plane_fired(self):
-        self.yt_scene.add_z_plane()
-
-    def _center_on_max_fired(self):
-        self.yt_scene.do_center_on_max()
-
-class ParameterFile(HasTraits):
-    pf = Instance(EnzoStaticOutput)
-    data_objects = List(Instance(DataObject))
-    name = Str
-
-    def _name_default(self):
-        return str(self.pf)
-
-    def do_slice(self):
-        cons_view = View(
-                Item('axis'), 
-                Item('center'), 
-                Item('field', editor=EnumEditor(name='field_list')),
-                buttons=OKCancelButtons, title="Slicer: %s" % self.pf)
-        ps = SlicePlotSpec(pf=self.pf)
-        hand = PlotCreationHandler(main_window=mw, pnode=self, model=ps,
-                                   plot_type=SlicePlotTab, format="Slice: %s")
-        ps.edit_traits(cons_view, handler=hand)
-
-    def do_proj(self):
-        cons_view = View(
-                Item('axis'), 
-                Item('field', editor=EnumEditor(name='field_list')),
-                Item('weight_field', editor=EnumEditor(name='none_field_list')),
-                buttons=OKCancelButtons, title="Projector: %s" % self.pf)
-        ps = ProjPlotSpec(pf=self.pf)
-        hand = PlotCreationHandler(main_window=mw, pnode=self, model=ps,
-                                   plot_type=ProjPlotTab, format="Proj: %s")
-        ps.edit_traits(cons_view, handler=hand)
-
-    def do_vtk(self):
-        from tvtk_interface import HierarchyImporter, \
-            HierarchyImportHandler
-        importer = HierarchyImporter(pf=self.pf, max_level=self.pf.h.max_level)
-        importer.edit_traits(handler = VTKSceneCreationHandler(
-            main_window=mw, pnode=self, importer = importer))
-
-class ParameterFileCollection(HasTraits):
-    parameter_files = List(Instance(ParameterFile))
-    name = Str
-    collection = Any
-
-    def _parameter_files_default(self):
-        my_list = []
-        for f in self.collection:
-            try:
-                pf = EnzoStaticOutput(f)
-                my_list.append(
-                    ParameterFile(pf=pf, 
-                            data_objects = []))
-            except IOError: pass
-        return my_list
-
-    def _name_default(self):
-        return str(self.collection)
-
-class ParameterFileCollectionList(HasTraits):
-    parameter_file_collections = List(Instance(ParameterFileCollection))
-
-    def _parameter_file_collections_default(self):
-        return [ParameterFileCollection(collection=c)
-                for c in fido.GrabCollections()]
-
-class DataObjectList(HasTraits):
-    data_objects = List(Str)
-
-    traits_view = View(
-              Item('data_objects', show_label=False,
-                   editor=ListStrEditor())
-               )
-
-    def _data_objects_default(self):
-        return ['a','b','c']
-
-class PlotFrameTab(DataObject):
-    figure = Instance(Figure)
-
-class VMPlotSpec(HasTraits):
-    pf = Instance(EnzoStaticOutput)
-    field = Str('Density')
-    field_list = Property(depends_on = 'pf')
-
-    center = Array(shape=(3,), dtype='float64')
-    axis = Enum(0,1,2)
-
-    @cached_property
-    def _get_field_list(self):
-        fl = self.pf.h.field_list
-        df = self.pf.h.derived_field_list
-        fl.sort(); df.sort()
-        return fl + df
-
-    def _center_default(self):
-        return self.pf.h.find_max("Density")[1]
-
-class SlicePlotSpec(VMPlotSpec):
-    pass
-
-class ProjPlotSpec(VMPlotSpec):
-    weight_field = Str("None")
-    none_field_list = Property(depends_on = 'field_list')
-
-    @cached_property
-    def _get_none_field_list(self):
-        return ["None"] + self.field_list
-
-class VMPlotTab(PlotFrameTab):
-    pf = Instance(EnzoStaticOutput)
-    figure = Instance(Figure, args=())
-    field = DelegatesTo('plot_spec')
-    field_list = DelegatesTo('plot_spec')
-    plot = Instance(VMPlot)
-    axes = Instance(Axes)
-    disp_width = Float(1.0)
-    unit = Str('unitary')
-    min_width = Property(Float, depends_on=['pf','unit'])
-    max_width = Property(Float, depends_on=['pf','unit'])
-    unit_list = Property(depends_on = 'pf')
-    smallest_dx = Property(depends_on = 'pf')
-
-    traits_view = View(VGroup(
-            HGroup(Item('figure', editor=MPLVMPlotEditor(),
-                     show_label=False)),
-            HGroup(Item('disp_width',
-                     editor=RangeEditor(format="%0.2e",
-                        low_name='min_width', high_name='max_width',
-                        mode='logslider', enter_set=True),
-                     show_label=False, width=400.0),
-                   Item('unit',
-                      editor=EnumEditor(name='unit_list')),),
-            HGroup(Item('field',
-                      editor=EnumEditor(name='field_list')),
-                )),
-             resizable=True)
-
-    def __init__(self, **traits):
-        super(VMPlotTab, self).__init__(**traits)
-        self.axes = self.figure.add_subplot(111, aspect='equal')
-
-    def _field_changed(self, old, new):
-        self.plot.switch_z(new)
-        self._redraw()
-
-    @cached_property
-    def _get_min_width(self):
-        return 50.0*self.smallest_dx*self.pf[self.unit]
-
-    @cached_property
-    def _get_max_width(self):
-        return self.pf['unitary']*self.pf[self.unit]
-
-    @cached_property
-    def _get_smallest_dx(self):
-        return self.pf.h.get_smallest_dx()
-
-    @cached_property
-    def _get_unit_list(self):
-        return self.pf.units.keys()
-
-    def _unit_changed(self, old, new):
-        self.disp_width = self.disp_width * self.pf[new]/self.pf[old]
-
-    def _disp_width_changed(self, old, new):
-        self.plot.set_width(new, self.unit)
-        self._redraw()
-
-    def _redraw(self):
-        self.figure.canvas.draw()
-
-    def recenter(self, event):
-        xp, yp = event.xdata, event.ydata
-        dx = abs(self.plot.xlim[0] - self.plot.xlim[1])/self.plot.pix[0]
-        dy = abs(self.plot.ylim[0] - self.plot.ylim[1])/self.plot.pix[1]
-        x = (dx * xp) + self.plot.xlim[0]
-        y = (dy * yp) + self.plot.ylim[0]
-        xi = x_dict[self.axis]
-        yi = y_dict[self.axis]
-        cc = self.center[:]
-        cc[xi] = x; cc[yi] = y
-        self.plot.data.center = cc[:]
-        self.plot.data.set_field_parameter('center', cc.copy())
-        self.center = cc
-
-class SlicePlotTab(VMPlotTab):
-    plot_spec = Instance(SlicePlotSpec)
-
-    axis = DelegatesTo('plot_spec')
-    center = DelegatesTo('plot_spec')
-    
-    plot = Instance(SlicePlot)
-
-    def _plot_default(self):
-        coord = self.center[self.axis]
-        sl = self.pf.h.slice(self.axis, coord, center=self.center[:])
-        sp = SlicePlot(sl, self.field, self.figure, self.axes)
-        self.figure.canvas.draw()
-        return sp
-
-    def _center_changed(self, old, new):
-        #traceback.print_stack()
-        if na.all(na.abs(old - new) == 0.0): return
-        print na.abs(old-new)
-        print "Re-slicing", old, new
-        pp = self.center
-        self.plot.data.reslice(pp[self.axis])
-        self.plot._refresh_display_width()
-        self.figure.canvas.draw()
-
-class ProjPlotTab(VMPlotTab):
-    plot_spec = Instance(ProjPlotSpec)
-
-    axis = DelegatesTo('plot_spec')
-    center = DelegatesTo('plot_spec')
-    weight_field = DelegatesTo('plot_spec')
-
-    plot = Instance(ProjectionPlot)
-
-    def _plot_default(self):
-        self.field = self.field[:]
-        self.weight_field = self.weight_field[:]
-        wf = self.weight_field
-        if str(wf) == "None": wf = None
-        proj = self.pf.h.proj(self.axis, self.field, wf,
-                        center=self.center[:])
-        pp = ProjectionPlot(proj, self.field, self.figure, self.axes)
-        self.figure.canvas.draw()
-        return pp
-
-    def _center_changed(self, old, new):
-        self.plot._refresh_display_width()
-
-class SphereWrapper(DataObject):
-    radius = Float
-    unit = Str
-
-class MainWindow(HasTraits):
-    parameter_file_collections = Instance(ParameterFileCollectionList)
-    parameter_files = Instance(ParameterFileCollection)
-    plot_frame_tabs = List(Instance(DataObject))
-    open_parameterfile = Button
-    shell = PythonValue
-
-    def _shell_default(self):
-        return globals()
-    notebook_editor = ListEditor(editor=InstanceEditor(editable=True),
-                                 use_notebook=True)
-
-    traits_view = View(VSplit(
-                    HSplit(VGroup(
-                       Item('parameter_file_collections', 
-                            width=120.0, height=500.0,
-                            show_label=False,
-                            editor = TreeEditor(editable=False,
-                    nodes=[
-                        TreeNode(node_for=[ParameterFileCollectionList],
-                                 children='parameter_file_collections',
-                                 label="=Data Collections"),
-                        TreeNode(node_for=[ParameterFileCollection],
-                                 children='parameter_files',
-                                 label="name",
-                                 view=View()),
-                        TreeNode(node_for=[ParameterFile],
-                                 children='data_objects',
-                                 label="name",
-                                 menu = Menu(Action(name='Slice',
-                                                    action='object.do_slice'),
-                                             Action(name='Project',
-                                                    action='object.do_proj'),
-                                             Action(name='VTK',
-                                                    action='object.do_vtk')),
-                                 view=View()),
-                        TreeNode(node_for=[DataObject],
-                                 children='',
-                                 label="name"),
-                                ], show_icons=False),),
-                        Item('open_parameterfile', show_label=False)),
-                       Item('plot_frame_tabs', style='custom',
-                            editor = notebook_editor,
-                            show_label=False, height=500.0, width=500.0),
-                    ),
-                    HGroup(
-                       #Item('shell', editor=ShellEditor(share=True),
-                            #show_label=False, height=120.0),
-                    ),
-                ),
-               resizable=True, width=800.0, height=660.0,
-               title="reason v2 [prototype]")
-
-    def _open_parameterfile_fired(self):
-        print "OPENING"
-
-    def _parameter_file_collections_default(self):
-        return ParameterFileCollectionList()
-
-class YTScript(HasTraits):
-    code = Code
-    traits_view = View(Item('code', show_label=False),
-                       height=0.8, width=0.8, resizable=True,
-                       buttons=OKCancelButtons)
-
-class ObjectViewer(HasTraits):
-    to_view=Any
-    traits_view = View(
-            Item('to_view', editor=ValueEditor(), show_label=False),
-                     resizable=True, height=0.8, width=0.8)
-
-def view_object(obj):
-    ObjectViewer(to_view=obj).edit_traits()
-
-def run_script():
-    my_script = YTScript()
-    my_script.edit_traits()
-    return my_script
-
-class event_mock(object):
-    inaxes = True
-    button = 3
-
-dol = DataObjectList()
-mw = MainWindow(plot_frame_tabs = [])
-mw.edit_traits()
-#mw.edit_traits()


--- a/yt/gui/traited_explorer/tvtk_interface.py	Thu Oct 20 08:17:55 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,692 +0,0 @@
-"""
-This is the preliminary interface to VTK.  Note that as of VTK 5.2, it still
-requires a patchset prepared here:
-http://yt-project.org/files/vtk_composite_data.zip
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from enthought.tvtk.tools import ivtk
-from enthought.tvtk.api import tvtk 
-from enthought.traits.api import \
-    Float, HasTraits, Instance, Range, Any, Delegate, Tuple, File, Int, Str, \
-    CArray, List, Button, Bool, Property, cached_property
-from enthought.traits.ui.api import View, Item, HGroup, VGroup, TableEditor, \
-    Handler, Controller, RangeEditor, EnumEditor, InstanceEditor
-from enthought.traits.ui.menu import \
-    Menu, Action, Separator, OKCancelButtons, OKButton
-from enthought.traits.ui.table_column import ObjectColumn
-from enthought.tvtk.pyface.api import DecoratedScene
-
-import enthought.pyface.api as pyface
-
-#from yt.reason import *
-import sys
-import numpy as na
-import time, pickle, os, os.path
-from yt.funcs import *
-from yt.analysis_modules.hierarchy_subset.api import \
-        ExtractedHierarchy, ExtractedParameterFile
-
-#from enthought.tvtk.pyface.ui.wx.wxVTKRenderWindowInteractor \
-     #import wxVTKRenderWindowInteractor
-
-from enthought.mayavi.core.lut_manager import LUTManager
-
-#wxVTKRenderWindowInteractor.USE_STEREO = 1
-
-class TVTKMapperWidget(HasTraits):
-    alpha = Float(1.0)
-    post_call = Any
-    lut_manager = Instance(LUTManager)
-
-    def _alpha_changed(self, old, new):
-        self.lut_manager.lut.alpha_range = (new, new)
-        self.post_call()
-
-class MappingPlane(TVTKMapperWidget):
-    plane = Instance(tvtk.Plane)
-    _coord_redit = editor=RangeEditor(format="%0.2e",
-                              low_name='vmin', high_name='vmax',
-                              auto_set=False, enter_set=True)
-    auto_set = Bool(False)
-    traits_view = View(Item('coord', editor=_coord_redit),
-                       Item('auto_set'),
-                       Item('alpha', editor=RangeEditor(
-                              low=0.0, high=1.0,
-                              enter_set=True, auto_set=False)),
-                       Item('lut_manager', show_label=False,
-                            editor=InstanceEditor(), style='custom'))
-    vmin = Float
-    vmax = Float
-
-    def _auto_set_changed(self, old, new):
-        if new is True:
-            self._coord_redit.auto_set = True
-            self._coord_redit.enter_set = False
-        else:
-            self._coord_redit.auto_set = False
-            self._coord_redit.enter_set = True
-
-    def __init__(self, vmin, vmax, vdefault, **traits):
-        HasTraits.__init__(self, **traits)
-        self.vmin = vmin
-        self.vmax = vmax
-        trait = Range(float(vmin), float(vmax), value=vdefault)
-        self.add_trait("coord", trait)
-        self.coord = vdefault
-
-    def _coord_changed(self, old, new):
-        orig = self.plane.origin[:]
-        orig[self.axis] = new
-        self.plane.origin = orig
-        self.post_call()
-
-class MappingMarchingCubes(TVTKMapperWidget):
-    operator = Instance(tvtk.MarchingCubes)
-    mapper = Instance(tvtk.HierarchicalPolyDataMapper)
-    vmin = Float
-    vmax = Float
-    auto_set = Bool(False)
-    _val_redit = RangeEditor(format="%0.2f",
-                             low_name='vmin', high_name='vmax',
-                             auto_set=False, enter_set=True)
-    traits_view = View(Item('value', editor=_val_redit),
-                       Item('auto_set'),
-                       Item('alpha', editor=RangeEditor(
-                            low=0.0, high=1.0,
-                            enter_set=True, auto_set=False,)),
-                       Item('lut_manager', show_label=False,
-                            editor=InstanceEditor(), style='custom'))
-
-    def __init__(self, vmin, vmax, vdefault, **traits):
-        HasTraits.__init__(self, **traits)
-        self.vmin = vmin
-        self.vmax = vmax
-        trait = Range(float(vmin), float(vmax), value=vdefault)
-        self.add_trait("value", trait)
-        self.value = vdefault
-
-    def _auto_set_changed(self, old, new):
-        if new is True:
-            self._val_redit.auto_set = True
-            self._val_redit.enter_set = False
-        else:
-            self._val_redit.auto_set = False
-            self._val_redit.enter_set = True
-
-    def _value_changed(self, old, new):
-        self.operator.set_value(0, new)
-        self.post_call()
-
-class MappingIsoContour(MappingMarchingCubes):
-    operator = Instance(tvtk.ContourFilter)
-
-class CameraPosition(HasTraits):
-    position = CArray(shape=(3,), dtype='float64')
-    focal_point = CArray(shape=(3,), dtype='float64')
-    view_up = CArray(shape=(3,), dtype='float64')
-    clipping_range = CArray(shape=(2,), dtype='float64')
-    distance = Float
-    num_steps = Int(10)
-    orientation_wxyz = CArray(shape=(4,), dtype='float64')
-
-class CameraControl(HasTraits):
-    # Traits
-    positions = List(CameraPosition)
-    yt_scene = Instance('YTScene')
-    center = Delegate('yt_scene')
-    scene = Delegate('yt_scene')
-    camera = Instance(tvtk.OpenGLCamera)
-    reset_position = Instance(CameraPosition)
-    fps = Float(25.0)
-    export_filename = 'frames'
-    periodic = Bool
-
-    # UI elements
-    snapshot = Button()
-    play = Button()
-    export_frames = Button()
-    reset_path = Button()
-    recenter = Button()
-    save_path = Button()
-    load_path = Button()
-    export_path = Button()
-
-    table_def = TableEditor(
-        columns = [ ObjectColumn(name='position'),
-                    ObjectColumn(name='focal_point'),
-                    ObjectColumn(name='view_up'),
-                    ObjectColumn(name='clipping_range'),
-                    ObjectColumn(name='num_steps') ],
-        reorderable=True, deletable=True,
-        sortable=True, sort_model=True,
-        show_toolbar=True,
-        selection_mode='row',
-        selected = 'reset_position'
-                )
-
-    default_view = View(
-                VGroup(
-                  HGroup(
-                    Item('camera', show_label=False),
-                    Item('recenter', show_label=False),
-                    label='Camera'),
-                  HGroup(
-                    Item('snapshot', show_label=False),
-                    Item('play', show_label=False),
-                    Item('export_frames',show_label=False),
-                    Item('reset_path', show_label=False),
-                    Item('save_path', show_label=False),
-                    Item('load_path', show_label=False),
-                    Item('export_path', show_label=False),
-                    Item('export_filename'),
-                    Item('periodic'),
-                    Item('fps'),
-                    label='Playback'),
-                  VGroup(
-                    Item('positions', show_label=False,
-                        editor=table_def),
-                    label='Camera Path'),
-                 ),
-                resizable=True, title="Camera Path Editor",
-                       )
-
-    def _reset_position_changed(self, old, new):
-        if new is None: return
-        cam = self.scene.camera
-        cam.position = new.position
-        cam.focal_point = new.focal_point
-        cam.view_up = new.view_up
-        cam.clipping_range = new.clipping_range
-        self.scene.render()
-
-    def __init__(self, **traits):
-        HasTraits.__init__(self, **traits)
-
-    def take_snapshot(self):
-        cam = self.scene.camera
-        self.positions.append(CameraPosition(
-                position=cam.position,
-                focal_point=cam.focal_point,
-                view_up=cam.view_up,
-                clipping_range=cam.clipping_range,
-                distance=cam.distance,
-                orientation_wxyz=cam.orientation_wxyz))
-
-    def _export_path_fired(self): 
-        dlg = pyface.FileDialog(
-            action='save as',
-            wildcard="*.cpath",
-        )
-        if dlg.open() == pyface.OK:
-            print "Saving:", dlg.path
-            self.export_camera_path(dlg.path)
-
-    def export_camera_path(self, fn):
-        to_dump = dict(positions=[], focal_points=[],
-                       view_ups=[], clipping_ranges=[],
-                       distances=[], orientation_wxyzs=[])
-        def _write(cam):
-            to_dump['positions'].append(cam.position)
-            to_dump['focal_points'].append(cam.focal_point)
-            to_dump['view_ups'].append(cam.view_up)
-            to_dump['clipping_ranges'].append(cam.clipping_range)
-            to_dump['distances'].append(cam.distance)
-            to_dump['orientation_wxyzs'].append(cam.orientation_wxyz)
-        self.step_through(0.0, callback=_write)
-        pickle.dump(to_dump, open(fn, "wb"))
-
-    def _save_path_fired(self): 
-        dlg = pyface.FileDialog(
-            action='save as',
-            wildcard="*.cpath",
-        )
-        if dlg.open() == pyface.OK:
-            print "Saving:", dlg.path
-            self.dump_camera_path(dlg.path)
-
-    def dump_camera_path(self, fn):
-        to_dump = dict(positions=[], focal_points=[],
-                       view_ups=[], clipping_ranges=[],
-                       distances=[], orientation_wxyzs=[],
-                       num_stepss=[])
-        for p in self.positions:
-            to_dump['positions'].append(p.position)
-            to_dump['focal_points'].append(p.focal_point)
-            to_dump['view_ups'].append(p.view_up)
-            to_dump['clipping_ranges'].append(p.clipping_range)
-            to_dump['distances'].append(p.distance)
-            to_dump['num_stepss'].append(p.num_steps) # stupid s
-            to_dump['orientation_wxyzs'].append(p.orientation_wxyz)
-        pickle.dump(to_dump, open(fn, "wb"))
-
-    def _load_path_fired(self):
-        dlg = pyface.FileDialog(
-            action='open',
-            wildcard="*.cpath",
-        )
-        if dlg.open() == pyface.OK:
-            print "Loading:", dlg.path
-            self.load_camera_path(dlg.path)
-
-    def load_camera_path(self, fn):
-        to_use = pickle.load(open(fn, "rb"))
-        self.positions = []
-        for i in range(len(to_use['positions'])):
-            dd = {}
-            for kw in to_use:
-                # Strip the s
-                dd[kw[:-1]] = to_use[kw][i]
-            self.positions.append(
-                CameraPosition(**dd))
-
-    def _recenter_fired(self):
-        self.camera.focal_point = self.center
-        self.scene.render()
-
-    def _snapshot_fired(self):
-        self.take_snapshot()
-
-    def _play_fired(self):
-        self.step_through()
-
-    def _export_frames_fired(self):
-        self.step_through(save_frames=True)
-
-    def _reset_path_fired(self):
-        self.positions = []
-
-    def step_through(self, pause = 1.0, callback=None, save_frames=False):
-        cam = self.scene.camera
-        frame_counter=0
-        if self.periodic:
-            cyclic_pos = self.positions + [self.positions[0]]
-        else:
-            cyclic_pos = self.positions
-        for i in range(len(cyclic_pos)-1):
-            pos1 = cyclic_pos[i]
-            pos2 = cyclic_pos[i+1]
-            r = pos1.num_steps
-            for p in range(pos1.num_steps):
-                po = _interpolate(pos1.position, pos2.position, p, r)
-                fp = _interpolate(pos1.focal_point, pos2.focal_point, p, r)
-                vu = _interpolate(pos1.view_up, pos2.view_up, p, r)
-                cr = _interpolate(pos1.clipping_range, pos2.clipping_range, p, r)
-                _set_cpos(cam, po, fp, vu, cr)
-                self.scene.render()
-                if callback is not None: callback(cam)
-                if save_frames:
-                    self.scene.save("%s_%0.5d.png" % (self.export_filename,frame_counter))
-                else:
-                    time.sleep(pause * 1.0/self.fps)
-                frame_counter += 1
-
-def _interpolate(q1, q2, p, r):
-    return q1 + p*(q2 - q1)/float(r)
-
-def _set_cpos(cam, po, fp, vu, cr):
-    cam.position = po
-    cam.focal_point = fp
-    cam.view_up = vu
-    cam.clipping_range = cr
-
-class HierarchyImporter(HasTraits):
-    pf = Any
-    min_grid_level = Int(0)
-    max_level = Int(1)
-    number_of_levels = Range(0, 13)
-    max_import_levels = Property(depends_on='min_grid_level')
-    field = Str("Density")
-    field_list = List
-    center_on_max = Bool(True)
-    center = CArray(shape = (3,), dtype = 'float64')
-    cache = Bool(True)
-    smoothed = Bool(True)
-    show_grids = Bool(True)
-
-    def _field_list_default(self):
-        fl = self.pf.h.field_list
-        df = self.pf.h.derived_field_list
-        fl.sort(); df.sort()
-        return fl + df
-    
-    default_view = View(Item('min_grid_level',
-                              editor=RangeEditor(low=0,
-                                                 high_name='max_level')),
-                        Item('number_of_levels', 
-                              editor=RangeEditor(low=1,
-                                                 high_name='max_import_levels')),
-                        Item('field', editor=EnumEditor(name='field_list')),
-                        Item('center_on_max'),
-                        Item('center', enabled_when='not object.center_on_max'),
-                        Item('smoothed'),
-                        Item('cache', label='Pre-load data'),
-                        Item('show_grids'),
-                        buttons=OKCancelButtons)
-
-    def _center_default(self):
-        return [0.5,0.5,0.5]
-
-    @cached_property
-    def _get_max_import_levels(self):
-        return min(13, self.pf.h.max_level - self.min_grid_level + 1)
-
-class HierarchyImportHandler(Controller):
-    importer = Instance(HierarchyImporter)
-    
-
-    def close(self, info, is_ok):
-        if is_ok: 
-            yt_scene = YTScene(
-                importer=self.importer)
-        super(Controller, self).close(info, True)
-        return
-
-
-class YTScene(HasTraits):
-
-    # Traits
-    importer = Instance(HierarchyImporter)
-    pf = Delegate("importer")
-    min_grid_level = Delegate("importer")
-    number_of_levels = Delegate("importer")
-    field = Delegate("importer")
-    center = CArray(shape = (3,), dtype = 'float64')
-    center_on_max = Delegate("importer")
-    smoothed = Delegate("importer")
-    cache = Delegate("importer")
-    show_grids = Delegate("importer")
-
-    camera_path = Instance(CameraControl)
-    #window = Instance(ivtk.IVTKWithCrustAndBrowser)
-    #python_shell = Delegate('window')
-    #scene = Delegate('window')
-    scene = Instance(HasTraits)
-    operators = List(HasTraits)
-
-    # State variables
-    _grid_boundaries_actor = None
-
-    # Views
-    def _window_default(self):
-        # Should experiment with passing in a pipeline browser
-        # that has two root objects -- one for TVTKBases, i.e. the render
-        # window, and one that accepts our objects
-        return ivtk.IVTKWithCrustAndBrowser(size=(800,600), stereo=1)
-
-    def _camera_path_default(self):
-        return CameraControl(yt_scene=self, camera=self.scene.camera)
-
-    def __init__(self, **traits):
-        HasTraits.__init__(self, **traits)
-        max_level = min(self.pf.h.max_level,
-                        self.min_grid_level + self.number_of_levels - 1)
-        self.extracted_pf = ExtractedParameterFile(self.pf,
-                             self.min_grid_level, max_level, offset=None)
-        self.extracted_hierarchy = self.extracted_pf.h
-        self._hdata_set = tvtk.HierarchicalBoxDataSet()
-        self._ugs = []
-        self._grids = []
-        self._min_val = 1e60
-        self._max_val = -1e60
-        gid = 0
-        if self.cache:
-            for grid_set in self.extracted_hierarchy.get_levels():
-                for grid in grid_set:
-                    grid[self.field]
-        for l, grid_set in enumerate(self.extracted_hierarchy.get_levels()):
-            gid = self._add_level(grid_set, l, gid)
-        if self.show_grids:
-            self.toggle_grid_boundaries()
-            
-    def _center_default(self):
-        return self.extracted_hierarchy._convert_coords(
-                [0.5, 0.5, 0.5])
-
-    def do_center_on_max(self):
-        self.center = self.extracted_hierarchy._convert_coords(
-            self.pf.h.find_max("Density")[1])
-        self.scene.camera.focal_point = self.center
-
-    def _add_level(self, grid_set, level, gid):
-        for grid in grid_set:
-            self._hdata_set.set_refinement_ratio(level, 2)
-            gid = self._add_grid(grid, gid, level)
-        return gid
-
-    def _add_grid(self, grid, gid, level=0):
-        mylog.debug("Adding grid %s on level %s (%s)",
-                    grid.id, level, grid.Level)
-        if grid in self._grids: return
-        self._grids.append(grid)
-
-        scalars = grid.get_vertex_centered_data(self.field, smoothed=self.smoothed)
-
-        left_index = grid.get_global_startindex()
-        origin = grid.LeftEdge
-        dds = grid.dds
-        right_index = left_index + scalars.shape - 1
-        ug = tvtk.UniformGrid(origin=origin, spacing=dds,
-                              dimensions=grid.ActiveDimensions+1)
-        if self.field not in self.pf.field_info or \
-            self.pf.field_info[self.field].take_log:
-            scalars = na.log10(scalars)
-        ug.point_data.scalars = scalars.transpose().ravel()
-        ug.point_data.scalars.name = self.field
-        if grid.Level != self.min_grid_level + self.number_of_levels - 1:
-            ug.cell_visibility_array = grid.child_mask.transpose().ravel()
-        else:
-            ug.cell_visibility_array = na.ones(
-                    grid.ActiveDimensions, dtype='int').ravel()
-        self._ugs.append((grid,ug))
-        self._hdata_set.set_data_set(level, gid, left_index, right_index, ug)
-
-        self._min_val = min(self._min_val, scalars.min())
-        self._max_val = max(self._max_val, scalars.max())
-
-        gid += 1
-        return gid
-
-    def _add_data_to_ug(self, field):
-        for g, ug in self._ugs:
-            scalars_temp = grid.get_vertex_centered_data(field, smoothed=self.smoothed)
-            ii = ug.point_data.add_array(scalars_temp.transpose().ravel())
-            ug.point_data.get_array(ii).name = field
-
-    def zoom(self, dist, unit='1'):
-        vec = self.scene.camera.focal_point - \
-              self.scene.camera.position
-        self.scene.camera.position += \
-            vec * dist/self._grids[0].pf[unit]
-        self.scene.render()
-
-    def toggle_grid_boundaries(self):
-        if self._grid_boundaries_actor is None:
-            # We don't need to track this stuff right now.
-            ocf = tvtk.OutlineCornerFilter(
-                    executive=tvtk.CompositeDataPipeline(),
-                    corner_factor = 0.5)
-            ocf.input = self._hdata_set
-            ocm = tvtk.HierarchicalPolyDataMapper(
-                input_connection = ocf.output_port)
-            self._grid_boundaries_actor = tvtk.Actor(mapper = ocm)
-            self.scene.add_actor(self._grid_boundaries_actor)
-        else:
-            self._grid_boundaries_actor.visibility = \
-            (not self._grid_boundaries_actor.visibility)
-
-    def _add_sphere(self, origin=(0.0,0.0,0.0), normal=(0,1,0)):
-        sphere = tvtk.Sphere(center=origin, radius=0.25)
-        cutter = tvtk.Cutter(executive = tvtk.CompositeDataPipeline(),
-                             cut_function = sphere)
-        cutter.input = self._hdata_set
-        lut_manager = LUTManager(data_name=self.field, scene=self.scene)
-        smap = tvtk.HierarchicalPolyDataMapper(
-                        scalar_range=(self._min_val, self._max_val),
-                        lookup_table=lut_manager.lut,
-                        input_connection = cutter.output_port)
-        sactor = tvtk.Actor(mapper=smap)
-        self.scene.add_actors(sactor)
-        return sphere, lut_manager
-
-    def _add_plane(self, origin=(0.0,0.0,0.0), normal=(0,1,0)):
-        plane = tvtk.Plane(origin=origin, normal=normal)
-        cutter = tvtk.Cutter(executive = tvtk.CompositeDataPipeline(),
-                             cut_function = plane)
-        cutter.input = self._hdata_set
-        lut_manager = LUTManager(data_name=self.field, scene=self.scene)
-        smap = tvtk.HierarchicalPolyDataMapper(
-                        scalar_range=(self._min_val, self._max_val),
-                        lookup_table=lut_manager.lut,
-                        input_connection = cutter.output_port)
-        sactor = tvtk.Actor(mapper=smap)
-        self.scene.add_actors(sactor)
-        return plane, lut_manager
-
-    def add_plane(self, origin=(0.0,0.0,0.0), normal=(0,1,0)):
-        self.operators.append(self._add_plane(origin, normal))
-        return self.operators[-1]
-
-    def _add_axis_plane(self, axis):
-        normal = [0,0,0]
-        normal[axis] = 1
-        np, lut_manager = self._add_plane(self.center, normal=normal)
-        LE = self.extracted_hierarchy.min_left_edge
-        RE = self.extracted_hierarchy.max_right_edge
-        self.operators.append(MappingPlane(
-                vmin=LE[axis], vmax=RE[axis],
-                vdefault = self.center[axis],
-                post_call = self.scene.render,
-                plane = np, axis=axis, coord=0.0,
-                lut_manager = lut_manager,
-                scene=self.scene))
-
-    def add_x_plane(self):
-        self._add_axis_plane(0)
-        return self.operators[-1]
-
-    def add_y_plane(self):
-        self._add_axis_plane(1)
-        return self.operators[-1]
-
-    def add_z_plane(self):
-        self._add_axis_plane(2)
-        return self.operators[-1]
-
-    def add_contour(self, val=None):
-        if val is None: 
-            if self._min_val != self._min_val:
-                self._min_val = 1.0
-            val = (self._max_val+self._min_val) * 0.5
-        cubes = tvtk.MarchingCubes(
-                    executive = tvtk.CompositeDataPipeline())
-        cubes.input = self._hdata_set
-        cubes.set_value(0, val)
-        lut_manager = LUTManager(data_name=self.field, scene=self.scene)
-        cube_mapper = tvtk.HierarchicalPolyDataMapper(
-                                input_connection = cubes.output_port,
-                                lookup_table=lut_manager.lut)
-        cube_mapper.color_mode = 'map_scalars'
-        cube_mapper.scalar_range = (self._min_val, self._max_val)
-        cube_actor = tvtk.Actor(mapper=cube_mapper)
-        self.scene.add_actors(cube_actor)
-        self.operators.append(MappingMarchingCubes(operator=cubes,
-                    vmin=self._min_val, vmax=self._max_val,
-                    vdefault=val,
-                    mapper = cube_mapper,
-                    post_call = self.scene.render,
-                    lut_manager = lut_manager,
-                    scene=self.scene))
-        return self.operators[-1]
-
-    def add_isocontour(self, val=None):
-        if val is None: val = (self._max_val+self._min_val) * 0.5
-        isocontour = tvtk.ContourFilter(
-                    executive = tvtk.CompositeDataPipeline())
-        isocontour.input = self._hdata_set
-        isocontour.generate_values(1, (val, val))
-        lut_manager = LUTManager(data_name=self.field, scene=self.scene)
-        isocontour_normals = tvtk.PolyDataNormals(
-            executive=tvtk.CompositeDataPipeline())
-        isocontour_normals.input_connection = isocontour.output_port
-        iso_mapper = tvtk.HierarchicalPolyDataMapper(
-                                input_connection = isocontour_normals.output_port,
-                                lookup_table=lut_manager.lut)
-        iso_mapper.scalar_range = (self._min_val, self._max_val)
-        iso_actor = tvtk.Actor(mapper=iso_mapper)
-        self.scene.add_actors(iso_actor)
-        self.operators.append(MappingIsoContour(operator=isocontour,
-                    vmin=self._min_val, vmax=self._max_val,
-                    vdefault=val,
-                    mapper = iso_mapper,
-                    post_call = self.scene.render,
-                    lut_manager = lut_manager,
-                    scene=self.scene))
-        return self.operators[-1]
-
-    def display_points(self):
-        dd = self.pf.h.all_data()
-        points = tvtk.Points()
-        good = (dd["creation_time"] > 0.0)
-        points.data = na.array([ dd["particle_position_%s" % ax][good] for ax in 'xyz' ]).transpose()
-        mass = na.log10(dd["ParticleAge"][good])
-        self.conn = tvtk.CellArray()
-        for i in xrange(mass.shape[0]):
-            self.conn.insert_next_cell(1)
-            self.conn.insert_cell_point(i)
-        self.points = points
-        self.pd = tvtk.PolyData(points = self.points, verts = self.conn)
-        self.pd.point_data.scalars = mass
-        lut = tvtk.LookupTable()
-        self.pdm = tvtk.PolyDataMapper(input = self.pd,
-                                       lookup_table = lut)
-        self.pdm.scalar_range = (mass.min(), mass.max())
-        self.pdm.scalar_mode = 'use_point_data'
-        self.point_actor = tvtk.Actor(mapper = self.pdm)
-        self.scene.add_actor(self.point_actor)
-
-def get_all_parents(grid):
-    parents = []
-    if len(grid.Parents) == 0: return grid
-    for parent in grid.Parents: parents.append(get_all_parents(parent))
-    return list(set(parents))
-
-def run_vtk():
-    gui = pyface.GUI()
-    importer = HierarchyImporter()
-    importer.edit_traits(handler = HierarchyImportHandler(
-            importer = importer))
-    #ehds.edit_traits()
-    gui.start_event_loop()
-
-
-if __name__=="__main__":
-    print "This code probably won't work.  But if you want to give it a try,"
-    print "you need:"
-    print
-    print "VTK (CVS)"
-    print "Mayavi2 (from Enthought)"
-    print
-    print "If you have 'em, give it a try!"
-    print
-    run_vtk()


--- a/yt/utilities/amr_kdtree/amr_kdtree.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py	Thu Oct 20 08:52:24 2011 -0400
@@ -30,13 +30,12 @@
 from yt.visualization.volume_rendering.grid_partitioner import HomogenizedVolume
 from yt.utilities.amr_utils import PartitionedGrid, kdtree_get_choices
 from yt.utilities.performance_counters import yt_counters, time_function
-import yt.utilities.parallel_tools.parallel_analysis_interface as PT
+from yt.utilities.parallel_tools.parallel_analysis_interface \
+    import ParallelAnalysisInterface 
 from copy import deepcopy
 from yt.config import ytcfg
 from time import time
 import h5py
-my_rank = ytcfg.getint("yt", "__parallel_rank")
-nprocs = ytcfg.getint("yt", "__parallel_size")
 
 def corner_bounds(split_dim, split, current_left = None, current_right = None):
     r"""
@@ -288,12 +287,13 @@
         'split_pos': 0.5}
 
         """
+        ParallelAnalysisInterface.__init__(self)
         self.current_split_dim = 0
 
         self.pf = pf
         self.sdx = self.pf.h.get_smallest_dx()
         self._id_offset = pf.h.grids[0]._id_offset
-        if nprocs > len(pf.h.grids):
+        if self.comm.size > len(pf.h.grids):
             mylog.info('Parallel rendering requires that the number of \n \
             grids in the dataset is greater or equal to the number of \n \
             processors.  Reduce number of processors.')
@@ -379,7 +379,7 @@
 
         # If the full amr kD-tree is requested, merge the results from
         # the parallel build.
-        if merge_trees and nprocs > 1:
+        if merge_trees and self.comm.size > 1:
             self.join_parallel_trees()            
             self.my_l_corner = self.domain_left_edge
             self.my_r_corner = self.domain_right_edge
@@ -752,11 +752,11 @@
         self.rebuild_references()
                 
     def trim_references(self):
-        par_tree_depth = long(na.log2(nprocs))
-        for i in range(2**nprocs):
+        par_tree_depth = long(na.log2(self.comm.size))
+        for i in range(2**self.comm.size):
             if ((i + 1)>>par_tree_depth) == 1:
-                # There are nprocs nodes that meet this criteria
-                if (i+1-nprocs) != my_rank:
+                # There are self.comm.size nodes that meet this criteria
+                if (i+1-self.comm.size) != self.comm.rank:
                     self.tree_dict.pop(i)
                     continue
         for node in self.tree_dict.itervalues():
@@ -770,7 +770,8 @@
         if self.tree_dict[0].split_pos is None:
             self.tree_dict.pop(0)
     def merge_trees(self):
-        self.tree_dict = self._mpi_joindict(self.tree_dict)
+        self.tree_dict = self.comm.par_combine_object(self.tree_dict,
+                            datatype = "dict", op = "join")
 
     def rebuild_references(self):
         self.tree = self.tree_dict[0]
@@ -988,9 +989,9 @@
         current_node.grids = grids
         current_node.l_corner = l_corner
         current_node.r_corner = r_corner
-        # current_node.owner = my_rank
+        # current_node.owner = self.comm.rank
         current_node.id = 0
-        par_tree_depth = int(na.log2(nprocs))
+        par_tree_depth = int(na.log2(self.comm.size))
         anprocs = 2**par_tree_depth
         while current_node is not None:
             # If we don't have any grids, that means we are revisiting
@@ -1003,7 +1004,7 @@
             # This is where all the domain decomposition occurs.  
             if ((current_node.id + 1)>>par_tree_depth) == 1:
                 # There are anprocs nodes that meet this criteria
-                if (current_node.id+1-anprocs) == my_rank:
+                if (current_node.id+1-anprocs) == self.comm.rank:
                     # I own this shared node
                     self.my_l_corner = current_node.l_corner
                     self.my_r_corner = current_node.r_corner
@@ -1137,17 +1138,17 @@
                     yield node.brick
          
         self.reduce_tree_images(self.tree, front_center)
-        self._barrier()
+        self.comm.barrier()
         
     def reduce_tree_images(self, tree, viewpoint, image=None):
         if image is not None:
             self.image = image
-        rounds = int(na.log2(nprocs))
+        rounds = int(na.log2(self.comm.size))
         anprocs = 2**rounds
         my_node = tree
         my_node_id = 0
         my_node.owner = 0
-        path = na.binary_repr(anprocs+my_rank)
+        path = na.binary_repr(anprocs+self.comm.rank)
         for i in range(rounds):
             try:
                 my_node.left_child.owner = my_node.owner
@@ -1161,7 +1162,7 @@
             except:
                 rounds = i-1
         for thisround in range(rounds,0,-1):
-            #print my_rank, 'my node', my_node_id
+            #print self.comm.rank, 'my node', my_node_id
             parent = my_node.parent
             #print parent['split_ax'], parent['split_pos']
             if viewpoint[parent.split_ax] <= parent.split_pos:
@@ -1174,10 +1175,10 @@
             # mylog.debug('front owner %i back owner %i parent owner %i'%( front.owner, back.owner, parent.owner))
                 
             # Send the images around
-            if front.owner == my_rank:
+            if front.owner == self.comm.rank:
                 if front.owner == parent.owner:
-                    mylog.debug( '%04i receiving image from %04i'%(my_rank,back.owner))
-                    arr2 = PT._recv_array(back.owner, tag=back.owner).reshape(
+                    mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
+                    arr2 = self.comm.recv_array(back.owner, tag=back.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1190,18 +1191,17 @@
                         self.image[:,:,i  ] = self.image[:,:,i  ] + ta * arr2[:,:,i  ]
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
-                    mylog.debug('%04i sending my image to %04i'%(my_rank,back.owner))
-                    PT._send_array(self.image.ravel(), back.owner, tag=my_rank)
-
+                    mylog.debug('%04i sending my image to %04i'%(self.comm.rank,back.owner))
+                    self.comm.send_array(self.image.ravel(), back.owner, tag=self.comm.rank)
                 
-            if back.owner == my_rank:
+            if back.owner == self.comm.rank:
                 if front.owner == parent.owner:
-                    mylog.debug('%04i sending my image to %04i'%(my_rank, front.owner))
-                    PT._send_array(self.image.ravel(), front.owner, tag=my_rank)
+                    mylog.debug('%04i sending my image to %04i'%(self.comm.rank, front.owner))
+                    self.comm.send_array(self.image.ravel(), front.owner, tag=self.comm.rank)
                 else:
                     mylog.debug('Reducing image.  You have %i rounds to go in this binary tree' % thisround)
-                    mylog.debug('%04i receiving image from %04i'%(my_rank,front.owner))
-                    arr2 = PT._recv_array(front.owner, tag=front.owner).reshape(
+                    mylog.debug('%04i receiving image from %04i'%(self.comm.rank,front.owner))
+                    arr2 = self.comm.recv_array(front.owner, tag=front.owner).reshape(
                         (self.image.shape[0],self.image.shape[1],self.image.shape[2]))
                     for i in range(3):
                         # This is the new way: alpha corresponds to opacity of a given
@@ -1215,7 +1215,7 @@
                         # image[:,:,i+3] = arr2[:,:,i+3] + ta * image[:,:,i+3]
             # Set parent owner to back owner
             # my_node = (my_node-1)>>1
-            if my_rank == my_node.parent.owner: 
+            if self.comm.rank == my_node.parent.owner: 
                 my_node = my_node.parent
             else:
                 break
@@ -1223,8 +1223,8 @@
     def store_kd_bricks(self, fn=None):
         if fn is None:
             fn = '%s_kd_bricks.h5'%self.pf
-        if my_rank != 0:
-            PT._recv_array(my_rank-1, tag=my_rank-1)
+        if self.comm.rank != 0:
+            self.comm.recv_array(self.comm.rank-1, tag=self.comm.rank-1)
         f = h5py.File(fn,"a")
         for node in self.depth_traverse():
             i = node.id
@@ -1236,14 +1236,14 @@
                     except:
                         pass
         f.close()
-        if my_rank != (nprocs-1):
-            PT._send_array([0],my_rank+1, tag=my_rank)
+        if self.comm.rank != (nprocs-1):
+            self.comm.send_array([0],self.comm.rank+1, tag=self.comm.rank)
         
     def load_kd_bricks(self,fn=None):
         if fn is None:
             fn = '%s_kd_bricks.h5' % self.pf
-        if my_rank != 0:
-            PT._recv_array(my_rank-1, tag=my_rank-1)
+        if self.comm.rank != 0:
+            self.comm.recv_array(self.comm.rank-1, tag=self.comm.rank-1)
         try:
             f = h5py.File(fn,"r")
             for node in self.depth_traverse():
@@ -1266,8 +1266,8 @@
             f.close()
         except:
             pass
-        if my_rank != (nprocs-1):
-            PT._send_array([0],my_rank+1, tag=my_rank)
+        if self.comm.rank != (nprocs-1):
+            self.comm.send_array([0],self.comm.rank+1, tag=self.comm.rank)
 
     def load_tree(self,fn):
         raise NotImplementedError()


--- a/yt/utilities/answer_testing/api.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/utilities/answer_testing/api.py	Thu Oct 20 08:52:24 2011 -0400
@@ -45,3 +45,8 @@
 
 from .xunit import \
     Xunit
+
+from .halo_tests import \
+    TestHaloCompositionHashHOP, \
+    TestHaloCompositionHashFOF, \
+    TestHaloCompositionHashPHOP


--- a/yt/utilities/answer_testing/halo_tests.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/utilities/answer_testing/halo_tests.py	Thu Oct 20 08:52:24 2011 -0400
@@ -1,16 +1,20 @@
 from yt.mods import *
-import matplotlib; matplotlib.use("Agg")
+import matplotlib
 import pylab
 from output_tests import SingleOutputTest, YTStaticOutputTest, create_test
+from yt.analysis_modules.halo_finding.api import *
+import hashlib
+import numpy as np
 
-class TestHaloCount(YTStaticOutputTest):
+# Tests the number of halos returned by the HOP halo finder on a dataset
+class TestHaloCountHOP(YTStaticOutputTest):
     threshold = 80.0
 
     def run(self):
-        # Find the haloes using vanilla HOP.
-        haloes = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
-        # We only care about the number of haloes.
-        self.result = len(haloes)
+        # Find the halos using vanilla HOP.
+        halos = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
+        # We only care about the number of halos.
+        self.result = len(halos)
                     
     def compare(self, old_result):
         # The new value should be identical to the old one.
@@ -19,18 +23,53 @@
     def plot(self):
         return []
 
-create_test(TestHaloCount, "halo_count_test", threshold=80.0)
+# Tests the number of halos returned by the FOF halo finder on a dataset
+class TestHaloCountFOF(YTStaticOutputTest):
+    link = 0.2
+    padding = 0.02
+
+    def run(self):
+        # Find the halos using FOF.
+        halos = FOFHaloFinder(self.pf, link=self.link, dm_only=False, 
+                               padding=self.padding)
+        # We only care about the number of halos.
+        self.result = len(halos)
+                    
+    def compare(self, old_result):
+        # The new value should be identical to the old one.
+        self.compare_value_delta(self.result, old_result, 0)
+
+    def plot(self):
+        return []
+
+# Tests the number of halos returned by the Parallel HOP halo finder on a 
+# dataset
+class TestHaloCountPHOP(YTStaticOutputTest):
+    threshold = 80.0
+
+    def run(self):
+        # Find the halos using parallel HOP.
+        halos = parallelHF(self.pf, threshold=self.threshold, dm_only=False)
+        # We only care about the number of halos.
+        self.result = len(halos)
+                    
+    def compare(self, old_result):
+        # The new value should be identical to the old one.
+        self.compare_value_delta(self.result, old_result, 0)
+
+    def plot(self):
+        return []
 
 class TestHaloComposition(YTStaticOutputTest):
     threshold=80.0
     
     def run(self):
-        # Find the haloes using vanilla HOP.
-        haloes = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
+        # Find the halos using vanilla HOP.
+        halos = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
         # The result is a list of the particle IDs, stored
         # as sets for easy comparison.
         IDs = []
-        for halo in haloes:
+        for halo in halos:
             IDs.append(set(halo["particle_index"]))
         self.result = IDs
     
@@ -42,7 +81,85 @@
                 return False
         return True
     
-    def plot(self):
-        return []
+# Tests the content of the halos returned by the HOP halo finder on a dataset 
+# by comparing the hash of the arrays of all the particles contained in each
+# halo.  Evidently breaks on parallel runtime.  DO NOT USE.
+class TestHaloCompositionHashHOP(YTStaticOutputTest):
+    threshold=80.0
+    
+    def run(self):
+        # Find the halos using vanilla HOP.
+        halos = HaloFinder(self.pf, threshold=self.threshold, dm_only=False)
+        # The result is a flattened array of the arrays of the particle IDs for
+        # each halo
+        IDs = []
+        for halo in halos:
+            IDs.append(halo["particle_index"])
+        IDs = np.concatenate(IDs)
+        self.result = IDs
+    
+    def compare(self, old_result):
+        # All the lists of arrays should be identical.  To check this
+        # faster, we take the 256-bit hash of these lists and compare them
+        result_hash = hashlib.sha256(self.result.tostring()).hexdigest()
+        old_result_hash = hashlib.sha256(old_result.tostring()).hexdigest()
+        if result_hash == old_result_hash:
+            return True
+        else:
+            return False
 
-create_test(TestHaloComposition, "halo_composition_test", threshold=80.0)
+# Tests the content of the halos returned by the FOF halo finder on a dataset 
+# by comparing the hash of the arrays of all the particles contained in each
+# halo.  Evidently breaks on parallel runtime.  DO NOT USE.
+class TestHaloCompositionHashFOF(YTStaticOutputTest):
+    link = 0.2
+    padding = 0.02
+    
+    def run(self):
+        # Find the halos using vanilla FOF.
+        halos = FOFHaloFinder(self.pf, link=self.link, dm_only=False, 
+                               padding=self.padding)
+        # The result is a flattened array of the arrays of the particle IDs for
+        # each halo
+        IDs = []
+        for halo in halos:
+            IDs.append(halo["particle_index"])
+        IDs = np.concatenate(IDs)
+        self.result = IDs
+    
+    def compare(self, old_result):
+        # All the lists of arrays should be identical.  To check this
+        # faster, we take the 256-bit hash of these lists and compare them
+        result_hash = hashlib.sha256(self.result.tostring()).hexdigest()
+        old_result_hash = hashlib.sha256(old_result.tostring()).hexdigest()
+        if result_hash == old_result_hash:
+            return True
+        else:
+            return False
+
+# Tests the content of the halos returned by the Parallel HOP halo finder on a 
+# dataset by comparing the hash of the arrays of all the particles contained 
+# in each halo.  Evidently breaks on parallel runtime.  DO NOT USE.
+class TestHaloCompositionHashPHOP(YTStaticOutputTest):
+    threshold=80.0
+    
+    def run(self):
+        # Find the halos using parallel HOP.
+        halos = parallelHF(self.pf, threshold=self.threshold, dm_only=False)
+        # The result is a flattened array of the arrays of the particle IDs for
+        # each halo
+        IDs = []
+        for halo in halos:
+            IDs.append(halo["particle_index"])
+        IDs = np.concatenate(IDs)
+        self.result = IDs
+    
+    def compare(self, old_result):
+        # All the lists of arrays should be identical.  To check this
+        # faster, we take the 256-bit hash of these lists and compare them
+        result_hash = hashlib.sha256(self.result.tostring()).hexdigest()
+        old_result_hash = hashlib.sha256(old_result.tostring()).hexdigest()
+        if result_hash == old_result_hash:
+            return True
+        else:
+            return False


--- a/yt/utilities/answer_testing/hydro_tests.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/utilities/answer_testing/hydro_tests.py	Thu Oct 20 08:52:24 2011 -0400
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import matplotlib; matplotlib.use("Agg")
+import matplotlib
 import pylab
 from yt.mods import *
 from output_tests import SingleOutputTest, YTStaticOutputTest, create_test
@@ -37,14 +37,15 @@
     def run(self):
         # First we get our flattened projection -- this is the
         # Density, px, py, pdx, and pdy
-        proj = self.pf.h.proj(self.axis, self.field)
+        proj = self.pf.h.proj(self.axis, self.field,
+                              weight_field=self.weight_field)
         # Now let's stick it in a buffer
         pixelized_proj = self.pixelize(proj, self.field)
         # We just want the values, so this can be stored
         # independently of the parameter file.
-        # The .data attributes strip out everything other than the actual array
+        # The .field_data attributes strip out everything other than the actual array
         # values.
-        self.result = (proj.data, pixelized_proj.data)
+        self.result = (proj.field_data, pixelized_proj.data)
 
     def compare(self, old_result):
         proj, pixelized_proj = self.result
@@ -60,10 +61,92 @@
         pylab.clf()
         pylab.imshow(self.result[1][self.field],
             interpolation='nearest', origin='lower')
-        fn = "%s_%s_projection.png" % (self.pf, self.field)
+        fn = "%s_%s_%s_projection.png" % (self.pf, self.field,
+                                          self.weight_field)
         pylab.savefig(fn)
         return [fn]
 
+class TestOffAxisProjection(YTStaticOutputTest):
+
+    field = None
+    weight_field = None
+
+    def run(self):
+        # Here proj will just be the data array.
+        proj = off_axis_projection(self.pf, 
+                                   (0.5 * (self.pf.domain_left_edge + 
+                                           self.pf.domain_right_edge)),
+                                   [1., 1., 1.], 1., 400,
+                                   self.field, weight=self.weight_field)
+
+        # values.
+        self.result = proj
+
+    def compare(self, old_result):
+        proj  = self.result
+        oproj = old_result
+
+        self.compare_array_delta(proj, oproj, 1e-7)
+
+    def plot(self):
+        fn = "%s_%s_%s_off-axis_projection.png" % \
+            (self.pf, self.field, self.weight_field)
+        write_image(self.result, fn)
+        return [fn]
+
+class TestRay(YTStaticOutputTest):
+
+    field = None
+
+    def run(self):
+        na.random.seed(4333)
+        start_point = na.random.random(self.pf.dimensionality) * \
+            (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
+            self.pf.domain_left_edge
+        end_point   = na.random.random(self.pf.dimensionality) * \
+            (self.pf.domain_right_edge - self.pf.domain_left_edge) + \
+            self.pf.domain_left_edge
+
+        # Here proj will just be the data array.
+        ray = self.pf.h.ray(start_point, end_point, field=self.field)
+
+        # values.
+        self.result = ray[self.field]
+
+    def compare(self, old_result):
+        ray  = self.result
+        oray = old_result
+
+        self.compare_array_delta(ray, oray, 1e-7)
+
+    def plot(self):
+        return
+
+class TestSlice(YTStaticOutputTest):
+
+    field = None
+    axis = None
+
+    def run(self):
+        # Here proj will just be the data array.
+        slice = self.pf.h.slice(self.axis, 
+                                (0.5 * (self.pf.domain_left_edge + 
+                                        self.pf.domain_right_edge))[self.axis],
+                                fields=self.field)
+        # values.
+        self.result = slice.field_data
+
+    def compare(self, old_result):
+        slice  = self.result
+        oslice = old_result
+
+        self.compare_data_arrays(slice, oslice)
+
+    def plot(self):
+        fn = "%s_%s_slice.png" % (self.pf, self.field)
+        write_image(self.result[self.field], fn)
+        return [fn]
+
 # Now we create all our tests.  We are using the create_test
 # function, which is a relatively simple function that takes the base class,
 # a name, and any parameters that the test requires.
@@ -88,7 +171,7 @@
             weight=self.weight)
         # The arrays are all stored in a dictionary hanging off the profile
         # object
-        self.result = p.data._data
+        self.result = p.data.field_data
                     
     def compare(self, old_result):
         self.compare_data_arrays(
@@ -102,3 +185,21 @@
 for field in ["Temperature", "x-velocity"]:
     create_test(TestGasDistribution, "profile_density_test_%s" % field,
                 field_x = "Density", field_y = field)
+
+class Test2DGasDistribution(TestGasDistribution):
+    x_bins = 128
+    y_bins = 128
+    field_z = "CellMassMsun"
+    weight = None
+    def run(self):
+        # We're NOT going to use the low-level profiling API here,
+        # because we are avoiding the calculations of min/max,
+        # as those should be tested in another test.
+        pc = PlotCollection(self.pf, center=self.sim_center)
+        p = pc.add_phase_object(self.entire_simulation,
+            [self.field_x, self.field_y, self.field_z], x_bins = self.x_bins, y_bins = self.y_bins,
+            weight=self.weight)
+        # The arrays are all stored in a dictionary hanging off the profile
+        # object
+        self.result = p.data.field_data
+


--- a/yt/utilities/answer_testing/output_tests.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/utilities/answer_testing/output_tests.py	Thu Oct 20 08:52:24 2011 -0400
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import matplotlib
 from yt.mods import *
 
 # We first create our dictionary of tests to run.  This starts out empty, and


--- a/yt/utilities/answer_testing/particle_tests.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/utilities/answer_testing/particle_tests.py	Thu Oct 20 08:52:24 2011 -0400
@@ -1,5 +1,5 @@
+import matplotlib
 from yt.mods import *
-import matplotlib; matplotlib.use("Agg")
 import pylab
 from output_tests import SingleOutputTest, YTStaticOutputTest, create_test
 


--- a/yt/utilities/answer_testing/runner.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/utilities/answer_testing/runner.py	Thu Oct 20 08:52:24 2011 -0400
@@ -23,11 +23,12 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import matplotlib; matplotlib.use("Agg")
+import matplotlib
 import os, shelve, cPickle, sys, imp, tempfile
 
 from yt.config import ytcfg; ytcfg["yt","serialize"] = "False"
 import yt.utilities.cmdln as cmdln
+from yt.funcs import *
 from .xunit import Xunit
 
 from output_tests import test_registry, MultipleOutputTest, \
@@ -54,12 +55,14 @@
             self._path = os.path.join(path, "results")
         else:
             self._path = os.path.join(path, "results_%s" % self.id)
-        if not os.path.isdir(self._path): os.mkdir(self._path)
+        if not os.path.isdir(self._path): 
+            only_on_root(os.mkdir, self._path)
         if os.path.isfile(self._path): raise RuntimeError
 
     def _fn(self, tn):
         return os.path.join(self._path, tn)
 
+    @rootonly
     def __setitem__(self, test_name, result):
         # We have to close our shelf manually,
         # as the destructor does not necessarily do this.
@@ -79,7 +82,7 @@
 class RegressionTestRunner(object):
     def __init__(self, results_id, compare_id = None,
                  results_path = ".", compare_results_path = ".",
-                 io_log = "OutputLog"):
+                 io_log = "OutputLog", plot_tests = False):
         # This test runner assumes it has been launched with the current
         # working directory that of the test case itself.
         self.io_log = io_log
@@ -92,6 +95,7 @@
         self.results = RegressionTestStorage(results_id, path=results_path)
         self.plot_list = {}
         self.passed_tests = {}
+        self.plot_tests = plot_tests
 
     def run_all_tests(self):
         plot_list = []
@@ -126,7 +130,8 @@
         print self.id, "Running", test.name,
         test.setup()
         test.run()
-        self.plot_list[test.name] = test.plot()
+        if self.plot_tests:
+            self.plot_list[test.name] = test.plot()
         self.results[test.name] = test.result
         success, msg = self._compare(test)
         if self.old_results is None:


--- a/yt/utilities/command_line.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/utilities/command_line.py	Thu Oct 20 08:52:24 2011 -0400
@@ -573,7 +573,7 @@
         else:
             p = pc.add_slice(opts.field, opts.axis)
         from yt.gui.reason.pannable_map import PannableMapServer
-        mapper = PannableMapServer(p.data, opts.field)
+        mapper = PannableMapServer(p.field_data, opts.field)
         import yt.utilities.bottle as bottle
         bottle.debug(True)
         if opts.host is not None:
@@ -643,16 +643,18 @@
                            virial_quantities=['TotalMassMsun','RadiusMpc'])
 
         # Add profile fields.
-        hp.add_profile('CellVolume',weight_field=None,accumulation=True)
-        hp.add_profile('TotalMassMsun',weight_field=None,accumulation=True)
-        hp.add_profile('Density',weight_field=None,accumulation=False)
-        hp.add_profile('Temperature',weight_field='CellMassMsun',accumulation=False)
+        pf = hp.pf
+        all_fields = pf.h.field_list + pf.h.derived_field_list
+        for field, wv, acc in HP.standard_fields:
+            if field not in all_fields: continue
+            hp.add_profile(field, wv, acc)
         hp.make_profiles(filename="FilteredQuantities.out")
 
         # Add projection fields.
         hp.add_projection('Density',weight_field=None)
         hp.add_projection('Temperature',weight_field='Density')
-        hp.add_projection('Metallicity',weight_field='Density')
+        if "Metallicity" in all_fields:
+            hp.add_projection('Metallicity',weight_field='Density')
 
         # Make projections for all three axes using the filtered halo list and
         # save data to hdf5 files.
@@ -675,7 +677,7 @@
         pc_dummy = PlotCollection(pf, center=c)
         pr = pc_dummy.add_profile_object(dd, ["Density", "Temperature"],
                             weight="CellMassMsun")
-        ph.modify["line"](pr.data["Density"], pr.data["Temperature"])
+        ph.modify["line"](pr.field_data["Density"], pr.field_data["Temperature"])
         pc.save()
 
     @cmdln.option("-d", "--desc", action="store",
@@ -1568,7 +1570,7 @@
             save_name = "%s"%pf+"_"+field+"_rendering.png"
         if not '.png' in save_name:
             save_name += '.png'
-        if cam._mpi_get_rank() != -1:
+        if cam._par_rank != -1:
             write_bitmap(image,save_name)
         
 


--- a/yt/utilities/parallel_tools/distributed_object_collection.py	Thu Oct 20 08:17:55 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,138 +0,0 @@
-"""
-A simple distributed object mechanism, for storing array-heavy objects.
-Meant to be subclassed.
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from itertools import izip
-
-import numpy as na
-
-from yt.funcs import *
-
-from .parallel_analysis_interface import ParallelAnalysisInterface
-
-class DistributedObjectCollection(ParallelAnalysisInterface):
-    valid = True
-
-    def _get_object_info(self):
-        pass
-
-    def _set_object_info(self):
-        pass
-
-    def join_lists(self):
-        info_dict = self._get_object_info()
-        info_dict = self._mpi_catdict(info_dict)
-        self._set_object_info(info_dict)
-
-    def _collect_objects(self, desired_indices):
-        # We figure out which indices belong to which processor,
-        # then we pack them up, and we send a list to each processor.
-        request_count = []
-        owners = self._object_owners[desired_indices]
-        mylog.debug("Owner list: %s", na.unique1d(owners))
-        # Even if we have a million bricks, this should not take long.
-        s = self._mpi_get_size()
-        m = self._mpi_get_rank()
-        requests = dict( ( (i, []) for i in xrange(s) ) )
-        for i, p in izip(desired_indices, owners):
-            requests[p].append(i)
-        for p in sorted(requests):
-            requests[p] = na.array(requests[p], dtype='int64')
-            request_count.append(len(requests[p]))
-        size = len(request_count)
-        mylog.debug("Requesting: %s", request_count)
-        request_count = na.array(request_count, dtype='int64')
-        # Now we distribute our requests to all the processors.
-        # This is two-pass.  One to get the length of the arrays.  The second
-        # pass is to get the actual indices themselves.
-        request_count = self._mpi_joindict({m : request_count})
-        # Now we have our final array of requests, with arrangement
-        # (Nproc,Nproc).  First index corresponds to requesting proc, second to
-        # sending.  So [them,us] = 5 means we owe 5, whereas [us, them] means
-        # we are owed.
-        send_hooks = []
-        dsend_buffers, dsend_hooks = [], []
-        recv_hooks, recv_buffers = [], []
-        drecv_buffers, drecv_hooks = [], []
-        # We post our index-list and data receives from each processor.
-        mylog.debug("Posting data buffer receives")
-        proc_hooks = {}
-        for p, request_from in request_count.items():
-            if p == m: continue
-            size = request_from[m]
-            #if size == 0: continue
-            # We post receives of the grids we *asked* for.
-            # Note that indices into this are not necessarily processor ids.
-            # So we store.  This has to go before the appends or it's an
-            # off-by-one.
-            mylog.debug("Setting up index buffer of size %s for receive from %s",
-                        size, p)
-            proc_hooks[len(drecv_buffers)] = p
-            drecv_buffers.append(self._create_buffer(requests[p]))
-            drecv_hooks.append(self._mpi_Irecv_double(drecv_buffers[-1], p, 1))
-            recv_buffers.append(na.zeros(size, dtype='int64'))
-            # Our index list goes on 0, our buffer goes on 1.  We know how big
-            # the index list will be, now.
-            recv_hooks.append(self._mpi_Irecv_long(recv_buffers[-1], p, 0))
-        # Send our index lists into hte waiting buffers
-        mylog.debug("Sending index lists")
-        for p, ind_list in requests.items():
-            if p == m: continue
-            if len(ind_list) == 0: continue
-            # Now, we actually send our index lists.
-            send_hooks.append(self._mpi_Isend_long(ind_list, p, 0))
-        # Now we post receives for all of the data buffers.
-        mylog.debug("Sending data")
-        for i in self._mpi_Request_Waititer(recv_hooks):
-            # We get back the index, which here is identical to the processor
-            # number doing the send.  At this point, we can post our receives.
-            p = proc_hooks[i]
-            mylog.debug("Processing from %s", p)
-            ind_list = recv_buffers[i]
-            dsend_buffers.append(self._create_buffer(ind_list))
-            self._pack_buffer(ind_list, dsend_buffers[-1])
-            dsend_hooks.append(self._mpi_Isend_double(
-                dsend_buffers[-1], p, 1))
-        mylog.debug("Waiting on data receives: %s", len(drecv_hooks))
-        for i in self._mpi_Request_Waititer(drecv_hooks):
-            mylog.debug("Unpacking from %s", proc_hooks[i])
-            # Now we have to unpack our buffers
-            # Our key into this is actually the request for the processor
-            # number.
-            p = proc_hooks[i]
-            self._unpack_buffer(requests[p], drecv_buffers[i])
-        mylog.debug("Finalizing sends: %s", len(dsend_hooks))
-        for i in self._mpi_Request_Waititer(dsend_hooks):
-            continue
-
-    def _create_buffer(self, ind_list):
-        pass
-
-    def _pack_buffer(self, ind_list):
-        pass
-
-    def _unpack_buffer(self, ind_list, my_buffer):
-        pass
-


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Thu Oct 20 08:52:24 2011 -0400
@@ -51,8 +51,8 @@
     if parallel_capable:
         mylog.info("Parallel computation enabled: %s / %s",
                    MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
-        ytcfg["yt","__parallel_rank"] = str(MPI.COMM_WORLD.rank)
-        ytcfg["yt","__parallel_size"] = str(MPI.COMM_WORLD.size)
+        ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
+        ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
         ytcfg["yt","__parallel"] = "True"
         if exe_name == "embed_enzo" or \
             ("_parallel" in dir(sys) and sys._parallel == True):
@@ -82,6 +82,39 @@
 else:
     parallel_capable = False
 
+# Set up translation table
+if parallel_capable:
+    dtype_names = dict(
+            float32 = MPI.FLOAT,
+            float64 = MPI.DOUBLE,
+            int32   = MPI.INT,
+            int64   = MPI.LONG
+    )
+    op_names = dict(
+        sum = MPI.SUM,
+        min = MPI.MIN,
+        max = MPI.MAX
+    )
+
+else:
+    dtype_names = dict(
+            float32 = "MPI.FLOAT",
+            float64 = "MPI.DOUBLE",
+            int32   = "MPI.INT",
+            int64   = "MPI.LONG"
+    )
+    op_names = dict(
+            sum = "MPI.SUM",
+            min = "MPI.MIN",
+            max = "MPI.MAX"
+    )
+
+# Because the dtypes will == correctly but do not hash the same, we need this
+# function for dictionary access.
+def get_mpi_type(dtype):
+    for dt, val in dtype_names.items():
+        if dt == dtype: return val
+
 class ObjectIterator(object):
     """
     This is a generalized class that accepts a list of objects and then
@@ -96,7 +129,7 @@
         if hasattr(gs[0], 'proc_num'):
             # This one sort of knows about MPI, but not quite
             self._objs = [g for g in gs if g.proc_num ==
-                          ytcfg.getint('yt','__parallel_rank')]
+                          ytcfg.getint('yt','__topcomm_parallel_rank')]
             self._use_all = True
         else:
             self._objs = gs
@@ -182,9 +215,9 @@
     output; otherwise, the function gets called.  Used as a decorator.
     """
     @wraps(func)
-    def passage(self, data):
+    def passage(self, data, **kwargs):
         if not self._distributed: return data
-        return func(self, data)
+        return func(self, data, **kwargs)
     return passage
 
 def parallel_blocking_call(func):
@@ -241,13 +274,472 @@
     if parallel_capable: return root_only
     return func
 
-class ParallelAnalysisInterface(object):
+class Workgroup(object):
+    def __init__(self, size, ranks, comm):
+        self.size = size
+        self.ranks = ranks
+        self.comm = comm
+
+class ProcessorPool(object):
+    comm = None
+    size = None
+    ranks = None
+    available_ranks = None
+    tasks = None
+    workgroups = []
+    def __init__(self):
+        self.comm = communication_system.communicators[-1]
+        self.size = self.comm.size
+        self.ranks = range(self.size)
+        self.available_ranks = range(self.size)
+    
+    def add_workgroup(self, size=None, ranks=None):
+        if size is None:
+            size = len(self.available_ranks)
+        if len(self.available_ranks) < size:
+            print 'Not enough resources available'
+            raise RuntimeError
+        if ranks is None:
+            ranks = [self.available_ranks.pop(0) for i in range(size)]
+        
+        group = self.comm.comm.Get_group().Incl(ranks)
+        new_comm = self.comm.comm.Create(group)
+        if self.comm.rank in ranks:
+            communication_system.communicators.append(Communicator(new_comm))
+        self.workgroups.append(Workgroup(len(ranks), ranks, new_comm))
+    
+    def free_workgroup(self, workgroup):
+        for i in workgroup.ranks:
+            if self.comm.rank == i:
+                communication_system.communicators.pop()
+            self.available_ranks.append(i) 
+        del workgroup
+        self.available_ranks.sort()
+
+    def free_all(self):
+        for wg in self.workgroups:
+            self.free_workgroup(wg)
+
+class ResultsStorage(object):
+    slots = ['result', 'result_id']
+    result = None
+    result_id = None
+
+def parallel_objects(objects, njobs, storage = None):
+    if not parallel_capable: raise RuntimeError
+    my_communicator = communication_system.communicators[-1]
+    my_size = my_communicator.size
+    my_rank = my_communicator.rank
+    all_new_comms = na.array_split(na.arange(my_size), njobs)
+    for i,comm_set in enumerate(all_new_comms):
+        if my_rank in comm_set:
+            my_new_id = i
+            break
+    communication_system.push_with_ids(all_new_comms[my_new_id].tolist())
+    obj_ids = na.arange(len(objects))
+
+    to_share = {}
+    for result_id, obj in zip(obj_ids, objects)[my_new_id::njobs]:
+        if storage is not None:
+            rstore = ResultsStorage()
+            rstore.result_id = result_id
+            yield rstore, obj
+            to_share[rstore.result_id] = rstore.result
+        else:
+            yield obj
+    communication_system.communicators.pop()
+    if storage is not None:
+        # Now we have to broadcast it
+        new_storage = my_communicator.par_combine_object(
+                to_share, datatype = 'dict', op = 'join')
+        storage.update(new_storage)
+
+class CommunicationSystem(object):
+    communicators = []
+
+    def __init__(self):
+        if parallel_capable:
+            self.communicators.append(Communicator(MPI.COMM_WORLD))
+        else:
+            self.communicators.append(Communicator(None))
+    def push(self, size=None, ranks=None):
+        raise NotImplementedError
+        if size is None:
+            size = len(available_ranks)
+        if len(available_ranks) < size:
+            raise RuntimeError
+        if ranks is None:
+            ranks = [available_ranks.pop() for i in range(size)]
+        
+        group = MPI.COMM_WORLD.Group.Incl(ranks)
+        new_comm = MPI.COMM_WORLD.Create(group)
+        self.communicators.append(Communicator(new_comm))
+        return new_comm
+
+    def push_with_ids(self, ids):
+        group = self.communicators[-1].comm.Get_group().Incl(ids)
+        new_comm = self.communicators[-1].comm.Create(group)
+        from yt.config import ytcfg
+        ytcfg["yt","__topcomm_parallel_size"] = str(new_comm.size)
+        ytcfg["yt","__topcomm_parallel_rank"] = str(new_comm.rank)
+        self.communicators.append(Communicator(new_comm))
+        return new_comm
+
+    def pop(self):
+        self.communicators.pop()
+
+class Communicator(object):
+    comm = None
+    _grids = None
+    _distributed = None
+    __tocast = 'c'
+
+    def __init__(self, comm=None):
+        self.comm = comm
+        self._distributed = comm is not None and self.comm.size > 1
     """
     This is an interface specification providing several useful utility
     functions for analyzing something in parallel.
     """
+
+    def barrier(self):
+        if not self._distributed: return
+        mylog.debug("Opening MPI Barrier on %s", self.comm.rank)
+        self.comm.Barrier()
+
+    def mpi_exit_test(self, data=False):
+        # data==True -> exit. data==False -> no exit
+        mine, statuses = self.mpi_info_dict(data)
+        if True in statuses.values():
+            raise RuntimeError("Fatal error. Exiting.")
+        return None
+
+    @parallel_passthrough
+    def par_combine_object(self, data, op, datatype = None):
+        # op can be chosen from:
+        #   cat
+        #   join
+        # data is selected to be of types:
+        #   na.ndarray
+        #   dict
+        #   data field dict
+        if datatype is not None:
+            pass
+        elif isinstance(data, types.DictType):
+            datatype == "dict"
+        elif isinstance(data, na.ndarray):
+            datatype == "array"
+        elif isinstance(data, types.ListType):
+            datatype == "list"
+        # Now we have our datatype, and we conduct our operation
+        if datatype == "dict" and op == "join":
+            if self.comm.rank == 0:
+                for i in range(1,self.comm.size):
+                    data.update(self.comm.recv(source=i, tag=0))
+            else:
+                self.comm.send(data, dest=0, tag=0)
+            data = self.comm.bcast(data, root=0)
+            return data
+        elif datatype == "dict" and op == "cat":
+            field_keys = data.keys()
+            field_keys.sort()
+            size = data[field_keys[0]].shape[-1]
+            sizes = na.zeros(self.comm.size, dtype='int64')
+            outsize = na.array(size, dtype='int64')
+            self.comm.Allgather([outsize, 1, MPI.LONG],
+                                     [sizes, 1, MPI.LONG] )
+            # This nested concatenate is to get the shapes to work out correctly;
+            # if we just add [0] to sizes, it will broadcast a summation, not a
+            # concatenation.
+            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            arr_size = self.comm.allreduce(size, op=MPI.SUM)
+            for key in field_keys:
+                dd = data[key]
+                rv = self.alltoallv_array(dd, arr_size, offsets, sizes)
+                data[key] = rv
+            return data
+        elif datatype == "array" and op == "cat":
+            if data is None:
+                ncols = -1
+                size = 0
+            else:
+                if len(data) == 0:
+                    ncols = -1
+                    size = 0
+                elif len(data.shape) == 1:
+                    ncols = 1
+                    size = data.shape[0]
+                else:
+                    ncols, size = data.shape
+            ncols = self.comm.allreduce(ncols, op=MPI.MAX)
+            if size == 0:
+                data = na.zeros((ncols,0), dtype='float64') # This only works for
+            size = data.shape[-1]
+            sizes = na.zeros(self.comm.size, dtype='int64')
+            outsize = na.array(size, dtype='int64')
+            self.comm.Allgather([outsize, 1, MPI.LONG],
+                                     [sizes, 1, MPI.LONG] )
+            # This nested concatenate is to get the shapes to work out correctly;
+            # if we just add [0] to sizes, it will broadcast a summation, not a
+            # concatenation.
+            offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+            arr_size = self.comm.allreduce(size, op=MPI.SUM)
+            data = self.alltoallv_array(data, arr_size, offsets, sizes)
+            return data
+        elif datatype == "list" and op == "cat":
+            if self.comm.rank == 0:
+                data = self.__mpi_recvlist(data)
+            else:
+                self.comm.send(data, dest=0, tag=0)
+            mylog.debug("Opening MPI Broadcast on %s", self.comm.rank)
+            data = self.comm.bcast(data, root=0)
+            return data
+        raise NotImplementedError
+
+    @parallel_passthrough
+    def mpi_bcast_pickled(self, data):
+        data = self.comm.bcast(data, root=0)
+        return data
+
+    def preload(self, grids, fields, io_handler):
+        # This will preload if it detects we are parallel capable and
+        # if so, we load *everything* that we need.  Use with some care.
+        mylog.debug("Preloading %s from %s grids", fields, len(grids))
+        if not self._distributed: return
+        io_handler.preload(grids, fields)
+
+    @parallel_passthrough
+    def mpi_allreduce(self, data, dtype=None, op='sum'):
+        op = op_names[op]
+        if isinstance(data, na.ndarray) and data.dtype != na.bool:
+            if dtype is None:
+                dtype = data.dtype
+            if dtype != data.dtype:
+                data = data.astype(dtype)
+            temp = data.copy()
+            self.comm.Allreduce([temp,get_mpi_type(dtype)], 
+                                     [data,get_mpi_type(dtype)], op)
+            return data
+        else:
+            # We use old-school pickling here on the assumption the arrays are
+            # relatively small ( < 1e7 elements )
+            return self.comm.allreduce(data, op)
+
+    ###
+    # Non-blocking stuff.
+    ###
+
+    def mpi_nonblocking_recv(self, data, source, tag=0, dtype=None):
+        if not self._distributed: return -1
+        if dtype is None: dtype = data.dtype
+        mpi_type = get_mpi_type(dtype)
+        return self.comm.Irecv([data, mpi_type], source, tag)
+
+    def mpi_nonblocking_send(self, data, dest, tag=0, dtype=None):
+        if not self._distributed: return -1
+        if dtype is None: dtype = data.dtype
+        mpi_type = get_mpi_type(dtype)
+        return self.comm.Isend([data, mpi_type], dest, tag)
+
+    def mpi_Request_Waitall(self, hooks):
+        if not self._distributed: return
+        MPI.Request.Waitall(hooks)
+
+    def mpi_Request_Waititer(self, hooks):
+        for i in xrange(len(hooks)):
+            req = MPI.Request.Waitany(hooks)
+            yield req
+
+    def mpi_Request_Testall(self, hooks):
+        """
+        This returns False if any of the request hooks are un-finished,
+        and True if they are all finished.
+        """
+        if not self._distributed: return True
+        return MPI.Request.Testall(hooks)
+
+    ###
+    # End non-blocking stuff.
+    ###
+
+    ###
+    # Parallel rank and size properties.
+    ###
+
+    @property
+    def size(self):
+        if not self._distributed: return 1
+        return self.comm.size
+
+    @property
+    def rank(self):
+        if not self._distributed: return 0
+        return self.comm.rank
+
+    def mpi_info_dict(self, info):
+        if not self._distributed: return 0, {0:info}
+        data = None
+        if self.comm.rank == 0:
+            data = {0:info}
+            for i in range(1, self.comm.size):
+                data[i] = self.comm.recv(source=i, tag=0)
+        else:
+            self.comm.send(info, dest=0, tag=0)
+        mylog.debug("Opening MPI Broadcast on %s", self.comm.rank)
+        data = self.comm.bcast(data, root=0)
+        return self.comm.rank, data
+
+    def claim_object(self, obj):
+        if not self._distributed: return
+        obj._owner = self.comm.rank
+        obj._distributed = True
+
+    def do_not_claim_object(self, obj):
+        if not self._distributed: return
+        obj._owner = -1
+        obj._distributed = True
+
+    def write_on_root(self, fn):
+        if not self._distributed: return open(fn, "w")
+        if self.comm.rank == 0:
+            return open(fn, "w")
+        else:
+            return cStringIO.StringIO()
+
+    def get_filename(self, prefix, rank=None):
+        if not self._distributed: return prefix
+        if rank == None:
+            return "%s_%04i" % (prefix, self.comm.rank)
+        else:
+            return "%s_%04i" % (prefix, rank)
+
+    def is_mine(self, obj):
+        if not obj._distributed: return True
+        return (obj._owner == self.comm.rank)
+
+    def send_quadtree(self, target, buf, tgd, args):
+        sizebuf = na.zeros(1, 'int64')
+        sizebuf[0] = buf[0].size
+        self.comm.Send([sizebuf, MPI.LONG], dest=target)
+        self.comm.Send([buf[0], MPI.INT], dest=target)
+        self.comm.Send([buf[1], MPI.DOUBLE], dest=target)
+        self.comm.Send([buf[2], MPI.DOUBLE], dest=target)
+        
+    def recv_quadtree(self, target, tgd, args):
+        sizebuf = na.zeros(1, 'int64')
+        self.comm.Recv(sizebuf, source=target)
+        buf = [na.empty((sizebuf[0],), 'int32'),
+               na.empty((sizebuf[0], args[2]),'float64'),
+               na.empty((sizebuf[0],),'float64')]
+        self.comm.Recv([buf[0], MPI.INT], source=target)
+        self.comm.Recv([buf[1], MPI.DOUBLE], source=target)
+        self.comm.Recv([buf[2], MPI.DOUBLE], source=target)
+        return buf
+
+    @parallel_passthrough
+    def merge_quadtree_buffers(self, qt):
+        # This is a modified version of pairwise reduction from Lisandro Dalcin,
+        # in the reductions demo of mpi4py
+        size = self.comm.size
+        rank = self.comm.rank
+
+        mask = 1
+
+        args = qt.get_args() # Will always be the same
+        tgd = na.array([args[0], args[1]], dtype='int64')
+        sizebuf = na.zeros(1, 'int64')
+
+        while mask < size:
+            if (mask & rank) != 0:
+                target = (rank & ~mask) % size
+                #print "SENDING FROM %02i to %02i" % (rank, target)
+                buf = qt.tobuffer()
+                self.send_quadtree(target, buf, tgd, args)
+                #qt = self.recv_quadtree(target, tgd, args)
+            else:
+                target = (rank | mask)
+                if target < size:
+                    #print "RECEIVING FROM %02i on %02i" % (target, rank)
+                    buf = self.recv_quadtree(target, tgd, args)
+                    qto = QuadTree(tgd, args[2])
+                    qto.frombuffer(*buf)
+                    merge_quadtrees(qt, qto)
+                    del qto
+                    #self.send_quadtree(target, qt, tgd, args)
+            mask <<= 1
+
+        if rank == 0:
+            buf = qt.tobuffer()
+            sizebuf[0] = buf[0].size
+        self.comm.Bcast([sizebuf, MPI.LONG], root=0)
+        if rank != 0:
+            buf = [na.empty((sizebuf[0],), 'int32'),
+                   na.empty((sizebuf[0], args[2]),'float64'),
+                   na.empty((sizebuf[0],),'float64')]
+        self.comm.Bcast([buf[0], MPI.INT], root=0)
+        self.comm.Bcast([buf[1], MPI.DOUBLE], root=0)
+        self.comm.Bcast([buf[2], MPI.DOUBLE], root=0)
+        self.refined = buf[0]
+        if rank != 0:
+            qt = QuadTree(tgd, args[2])
+            qt.frombuffer(*buf)
+        return qt
+
+
+    def send_array(self, arr, dest, tag = 0):
+        if not isinstance(arr, na.ndarray):
+            self.comm.send((None,None), dest=dest, tag=tag)
+            self.comm.send(arr, dest=dest, tag=tag)
+            return
+        tmp = arr.view(self.__tocast) # Cast to CHAR
+        # communicate type and shape
+        self.comm.send((arr.dtype.str, arr.shape), dest=dest, tag=tag)
+        self.comm.Send([arr, MPI.CHAR], dest=dest, tag=tag)
+        del tmp
+
+    def recv_array(self, source, tag = 0):
+        dt, ne = self.comm.recv(source=source, tag=tag)
+        if dt is None and ne is None:
+            return self.comm.recv(source=source, tag=tag)
+        arr = na.empty(ne, dtype=dt)
+        tmp = arr.view(self.__tocast)
+        self.comm.Recv([tmp, MPI.CHAR], source=source, tag=tag)
+        return arr
+
+    def alltoallv_array(self, send, total_size, offsets, sizes):
+        if len(send.shape) > 1:
+            recv = []
+            for i in range(send.shape[0]):
+                recv.append(self.alltoallv_array(send[i,:].copy(), 
+                                                 total_size, offsets, sizes))
+            recv = na.array(recv)
+            return recv
+        offset = offsets[self.comm.rank]
+        tmp_send = send.view(self.__tocast)
+        recv = na.empty(total_size, dtype=send.dtype)
+        recv[offset:offset+send.size] = send[:]
+        dtr = send.dtype.itemsize / tmp_send.dtype.itemsize # > 1
+        roff = [off * dtr for off in offsets]
+        rsize = [siz * dtr for siz in sizes]
+        tmp_recv = recv.view(self.__tocast)
+        self.comm.Allgatherv((tmp_send, tmp_send.size, MPI.CHAR),
+                                  (tmp_recv, (rsize, roff), MPI.CHAR))
+        return recv
+
+communication_system = CommunicationSystem()
+if parallel_capable:
+    ranks = na.arange(MPI.COMM_WORLD.size)
+    communication_system.push_with_ids(ranks)
+
+class ParallelAnalysisInterface(object):
+    comm = None
     _grids = None
-    _distributed = parallel_capable
+    _distributed = None
+
+    def __init__(self):
+        self.comm = communication_system.communicators[-1]
+        self._grids = self.comm._grids
+        self._distributed = self.comm._distributed
 
     def _get_objs(self, attr, *args, **kwargs):
         if self._distributed:
@@ -268,19 +760,28 @@
             return ParallelObjectIterator(self, True, attr='_grids')
         return ObjectIterator(self, True, attr='_grids')
 
+    def get_dependencies(self, fields):
+        deps = []
+        fi = self.pf.field_info
+        for field in fields:
+            deps += ensure_list(fi[field].get_dependencies(pf=self.pf).requested)
+        return list(set(deps))
+
     def _initialize_parallel(self):
         pass
 
     def _finalize_parallel(self):
         pass
 
-    def _partition_hierarchy_2d(self, axis):
+
+    def partition_hierarchy_2d(self, axis):
         if not self._distributed:
-           return False, self.hierarchy.grid_collection(self.center, self.hierarchy.grids)
+           return False, self.hierarchy.grid_collection(self.center, 
+                                                        self.hierarchy.grids)
 
         xax, yax = x_dict[axis], y_dict[axis]
-        cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 2)
-        mi = MPI.COMM_WORLD.rank
+        cc = MPI.Compute_dims(self.comm.size, 2)
+        mi = self.comm.rank
         cx, cy = na.unravel_index(mi, cc)
         x = na.mgrid[0:1:(cc[0]+1)*1j][cx:cx+2]
         y = na.mgrid[0:1:(cc[1]+1)*1j][cy:cy+2]
@@ -297,36 +798,7 @@
         reg = self.hierarchy.region_strict(self.center, LE, RE)
         return True, reg
 
-    def _partition_hierarchy_2d_inclined(self, unit_vectors, origin, widths,
-                                         box_vectors, resolution = (1.0, 1.0)):
-        if not self._distributed:
-            ib = self.hierarchy.inclined_box(origin, box_vectors)
-            return False, ib, resolution
-        # We presuppose that unit_vectors is already unitary.  If it's not,
-        # caveat emptor.
-        uv = na.array(unit_vectors)
-        inv_mat = na.linalg.pinv(uv)
-        cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 2)
-        mi = MPI.COMM_WORLD.rank
-        cx, cy = na.unravel_index(mi, cc)
-        resolution = (1.0/cc[0], 1.0/cc[1])
-        # We are rotating with respect to the *origin*, not the back center,
-        # so we go from 0 .. width.
-        px = na.mgrid[0.0:1.0:(cc[0]+1)*1j][cx] * widths[0]
-        py = na.mgrid[0.0:1.0:(cc[1]+1)*1j][cy] * widths[1]
-        nxo = inv_mat[0,0]*px + inv_mat[0,1]*py + origin[0]
-        nyo = inv_mat[1,0]*px + inv_mat[1,1]*py + origin[1]
-        nzo = inv_mat[2,0]*px + inv_mat[2,1]*py + origin[2]
-        nbox_vectors = na.array(
-                       [unit_vectors[0] * widths[0]/cc[0],
-                        unit_vectors[1] * widths[1]/cc[1],
-                        unit_vectors[2] * widths[2]],
-                        dtype='float64')
-        norigin = na.array([nxo, nyo, nzo])
-        box = self.hierarchy.inclined_box(norigin, nbox_vectors)
-        return True, box, resolution
-        
-    def _partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
+    def partition_hierarchy_3d(self, ds, padding=0.0, rank_ratio = 1):
         LE, RE = na.array(ds.left_edge), na.array(ds.right_edge)
         # We need to establish if we're looking at a subvolume, in which case
         # we *do* want to pad things.
@@ -348,15 +820,15 @@
             # grid that belongs to this processor.
             grids = self.pf.h.select_grids(0)
             root_grids = [g for g in grids
-                          if g.proc_num == MPI.COMM_WORLD.rank]
+                          if g.proc_num == self.comm.rank]
             if len(root_grids) != 1: raise RuntimeError
             #raise KeyError
             LE = root_grids[0].LeftEdge
             RE = root_grids[0].RightEdge
             return True, LE, RE, self.hierarchy.region(self.center, LE, RE)
 
-        cc = MPI.Compute_dims(MPI.COMM_WORLD.size / rank_ratio, 3)
-        mi = MPI.COMM_WORLD.rank % (MPI.COMM_WORLD.size / rank_ratio)
+        cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
+        mi = self.comm.rank % (self.comm.size / rank_ratio)
         cx, cy, cz = na.unravel_index(mi, cc)
         x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
         y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
@@ -372,7 +844,7 @@
 
         return False, LE, RE, self.hierarchy.region_strict(self.center, LE, RE)
 
-    def _partition_region_3d(self, left_edge, right_edge, padding=0.0,
+    def partition_region_3d(self, left_edge, right_edge, padding=0.0,
             rank_ratio = 1):
         """
         Given a region, it subdivides it into smaller regions for parallel
@@ -382,8 +854,8 @@
         if not self._distributed:
             return LE, RE, re
         
-        cc = MPI.Compute_dims(MPI.COMM_WORLD.size / rank_ratio, 3)
-        mi = MPI.COMM_WORLD.rank % (MPI.COMM_WORLD.size / rank_ratio)
+        cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
+        mi = self.comm.rank % (self.comm.size / rank_ratio)
         cx, cy, cz = na.unravel_index(mi, cc)
         x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
         y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
@@ -399,7 +871,7 @@
 
         return False, LE, RE, self.hierarchy.region(self.center, LE, RE)
 
-    def _partition_hierarchy_3d_bisection_list(self):
+    def partition_hierarchy_3d_bisection_list(self):
         """
         Returns an array that is used to drive _partition_hierarchy_3d_bisection,
         below.
@@ -417,8 +889,8 @@
                 i += 1
             return [n]
 
-        cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 3)
-        si = MPI.COMM_WORLD.size
+        cc = MPI.Compute_dims(self.comm.size, 3)
+        si = self.comm.size
         
         factors = factor(si)
         xyzfactors = [factor(cc[0]), factor(cc[1]), factor(cc[2])]
@@ -441,935 +913,4 @@
                     break
                 nextdim = (nextdim + 1) % 3
         return cuts
-        
-
-    def _partition_hierarchy_3d_bisection(self, axis, bins, counts, top_bounds = None,\
-        old_group = None, old_comm = None, cut=None, old_cc=None):
-        """
-        Partition the volume into evenly weighted subvolumes using the distribution
-        in counts. The bisection happens in the MPI communicator group old_group.
-        You may need to set "MPI_COMM_MAX" and "MPI_GROUP_MAX" environment 
-        variables.
-        """
-        counts = counts.astype('int64')
-        if not self._distributed:
-            LE, RE = self.pf.domain_left_edge.copy(), self.pf.domain_right_edge.copy()
-            return False, LE, RE, self.hierarchy.grid_collection(self.center, self.hierarchy.grids)
-        
-        # First time through the world is the current group.
-        if old_group == None or old_comm == None:
-            old_group = MPI.COMM_WORLD.Get_group()
-            old_comm = MPI.COMM_WORLD
-        
-        # Figure out the gridding based on the deepness of cuts.
-        if old_cc is None:
-            cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 3)
-        else:
-            cc = old_cc
-        cc[cut[0]] /= cut[1]
-        # Set the boundaries of the full bounding box for this group.
-        if top_bounds == None:
-            LE, RE = self.pf.domain_left_edge.copy(), self.pf.domain_right_edge.copy()
-        else:
-            LE, RE = top_bounds
-
-        ra = old_group.Get_rank() # In this group, not WORLD, unless it's the first time.
-        
-        # First find the total number of particles in my group.
-        parts = old_comm.allreduce(int(counts.sum()), op=MPI.SUM)
-        # Now the full sum in the bins along this axis in this group.
-        full_counts = na.empty(counts.size, dtype='int64')
-        old_comm.Allreduce([counts, MPI.LONG], [full_counts, MPI.LONG], op=MPI.SUM)
-        # Find the bin that passes the cut points.
-        midpoints = [LE[axis]]
-        sum = 0
-        bin = 0
-        for step in xrange(1,cut[1]):
-            while sum < ((parts*step)/cut[1]):
-                lastsum = sum
-                sum += full_counts[bin]
-                bin += 1
-            # Bin edges
-            left_edge = bins[bin-1]
-            right_edge = bins[bin]
-            # Find a better approx of the midpoint cut line using a linear approx.
-            a = float(sum - lastsum) / (right_edge - left_edge)
-            midpoints.append(left_edge + (0.5 - (float(lastsum) / parts / 2)) / a)
-            #midpoint = (left_edge + right_edge) / 2.
-        midpoints.append(RE[axis])
-        # Now we need to split the members of this group into chunks. 
-        # The values that go into the _ranks are the ranks of the tasks
-        # in *this* communicator group, which go zero to size - 1. They are not
-        # the same as the global ranks!
-        groups = {}
-        ranks = {}
-        old_group_size = old_group.Get_size()
-        for step in xrange(cut[1]):
-            groups[step] = na.arange(step*old_group_size/cut[1], (step+1)*old_group_size/cut[1])
-            # [ (start, stop, step), ]
-            ranks[step] = [ (groups[step][0], groups[step][-1], 1), ] 
-        
-        # Based on where we are, adjust our LE or RE, depending on axis. At the
-        # same time assign the new MPI group membership.
-        for step in xrange(cut[1]):
-            if ra in groups[step]:
-                LE[axis] = midpoints[step]
-                RE[axis] = midpoints[step+1]
-                new_group = old_group.Range_incl(ranks[step])
-                new_comm = old_comm.Create(new_group)
-        
-        if old_cc is not None:
-            old_group.Free()
-            old_comm.Free()
-        
-        new_top_bounds = (LE,RE)
-        
-        # Using the new boundaries, regrid.
-        mi = new_comm.rank
-        cx, cy, cz = na.unravel_index(mi, cc)
-        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
-
-        my_LE = na.array([x[0], y[0], z[0]], dtype='float64')
-        my_RE = na.array([x[1], y[1], z[1]], dtype='float64')
-        
-        # Return a new subvolume and associated stuff.
-        return new_group, new_comm, my_LE, my_RE, new_top_bounds, cc,\
-            self.hierarchy.region_strict(self.center, my_LE, my_RE)
-
-    def _mpi_find_neighbor_3d(self, shift):
-        """ Given a shift array, 1x3 long, find the task ID
-        of that neighbor. For example, shift=[1,0,0] finds the neighbor
-        immediately to the right in the positive x direction. Each task
-        has 26 neighbors, of which some may be itself depending on the number
-        and arrangement of tasks.
-        """
-        if not self._distributed: return 0
-        shift = na.array(shift)
-        cc = na.array(MPI.Compute_dims(MPI.COMM_WORLD.size, 3))
-        mi = MPI.COMM_WORLD.rank
-        si = MPI.COMM_WORLD.size
-        # store some facts about myself
-        mi_cx,mi_cy,mi_cz = na.unravel_index(mi,cc)
-        mi_ar = na.array([mi_cx,mi_cy,mi_cz])
-        # these are identical on all tasks
-        # should these be calculated once and stored?
-        #dLE = na.empty((si,3), dtype='float64') # positions not needed yet...
-        #dRE = na.empty((si,3), dtype='float64')
-        tasks = na.empty((cc[0],cc[1],cc[2]), dtype='int64')
-        
-        for i in range(si):
-            cx,cy,cz = na.unravel_index(i,cc)
-            tasks[cx,cy,cz] = i
-            #x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
-            #y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
-            #z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
-            #dLE[i, :] = na.array([x[0], y[0], z[0]], dtype='float64')
-            #dRE[i, :] = na.array([x[1], y[1], z[1]], dtype='float64')
-        
-        # find the neighbor
-        ne = (mi_ar + shift) % cc
-        ne = tasks[ne[0],ne[1],ne[2]]
-        return ne
-        
-        
-    def _barrier(self):
-        if not self._distributed: return
-        mylog.debug("Opening MPI Barrier on %s", MPI.COMM_WORLD.rank)
-        MPI.COMM_WORLD.Barrier()
-
-    def _mpi_exit_test(self, data=False):
-        # data==True -> exit. data==False -> no exit
-        mine, statuses = self._mpi_info_dict(data)
-        if True in statuses.values():
-            raise RuntimeError("Fatal error. Exiting.")
-        return None
-
-    @parallel_passthrough
-    def _mpi_catrgb(self, data):
-        self._barrier()
-        data, final = data
-        if MPI.COMM_WORLD.rank == 0:
-            cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 2)
-            nsize = final[0]/cc[0], final[1]/cc[1]
-            new_image = na.zeros((final[0], final[1], 6), dtype='float64')
-            new_image[0:nsize[0],0:nsize[1],:] = data[:]
-            for i in range(1,MPI.COMM_WORLD.size):
-                cy, cx = na.unravel_index(i, cc)
-                mylog.debug("Receiving image from % into bits %s:%s, %s:%s",
-                    i, nsize[0]*cx,nsize[0]*(cx+1),
-                       nsize[1]*cy,nsize[1]*(cy+1))
-                buf = _recv_array(source=i, tag=0).reshape(
-                    (nsize[0],nsize[1],6))
-                new_image[nsize[0]*cy:nsize[0]*(cy+1),
-                          nsize[1]*cx:nsize[1]*(cx+1),:] = buf[:]
-            data = new_image
-        else:
-            _send_array(data.ravel(), dest=0, tag=0)
-        data = MPI.COMM_WORLD.bcast(data)
-        return (data, final)
-
-    @parallel_passthrough
-    def _mpi_catdict(self, data):
-        field_keys = data.keys()
-        field_keys.sort()
-        size = data[field_keys[0]].shape[-1]
-        sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
-        outsize = na.array(size, dtype='int64')
-        MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
-                                 [sizes, 1, MPI.LONG] )
-        # This nested concatenate is to get the shapes to work out correctly;
-        # if we just add [0] to sizes, it will broadcast a summation, not a
-        # concatenation.
-        offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
-        arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
-        for key in field_keys:
-            dd = data[key]
-            rv = _alltoallv_array(dd, arr_size, offsets, sizes)
-            data[key] = rv
-        return data
-
-    @parallel_passthrough
-    def _mpi_joindict(self, data):
-        #self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                data.update(MPI.COMM_WORLD.recv(source=i, tag=0))
-        else:
-            MPI.COMM_WORLD.send(data, dest=0, tag=0)
-        data = MPI.COMM_WORLD.bcast(data, root=0)
-        #self._barrier()
-        return data
-
-    @parallel_passthrough
-    def _mpi_joindict_unpickled_double(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                keys = na.empty(size, dtype='int64')
-                values = na.empty(size, dtype='float64')
-                MPI.COMM_WORLD.Recv([keys, MPI.LONG], i, 0)
-                MPI.COMM_WORLD.Recv([values, MPI.DOUBLE], i, 0)
-                for i,key in enumerate(keys):
-                    data[key] = values[i]
-            # Now convert root's data to arrays.
-            size = len(data)
-            root_keys = na.empty(size, dtype='int64')
-            root_values = na.empty(size, dtype='float64')
-            count = 0
-            for key in data:
-                root_keys[count] = key
-                root_values[count] = data[key]
-                count += 1
-        else:
-            MPI.COMM_WORLD.send(len(data), 0, 0)
-            keys = na.empty(len(data), dtype='int64')
-            values = na.empty(len(data), dtype='float64')
-            count = 0
-            for key in data:
-                keys[count] = key
-                values[count] = data[key]
-                count += 1
-            MPI.COMM_WORLD.Send([keys, MPI.LONG], 0, 0)
-            MPI.COMM_WORLD.Send([values, MPI.DOUBLE], 0, 0)
-        # Now send it back as arrays.
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        if MPI.COMM_WORLD.rank != 0:
-            del keys, values
-            root_keys = na.empty(size, dtype='int64')
-            root_values = na.empty(size, dtype='float64')
-        MPI.COMM_WORLD.Bcast([root_keys, MPI.LONG], root=0)
-        MPI.COMM_WORLD.Bcast([root_values, MPI.DOUBLE], root=0)
-        # Convert back to a dict.
-        del data
-        data = dict(itertools.izip(root_keys, root_values))
-        return data
-
-    @parallel_passthrough
-    def _mpi_joindict_unpickled_long(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                keys = na.empty(size, dtype='int64')
-                values = na.empty(size, dtype='int64')
-                MPI.COMM_WORLD.Recv([keys, MPI.LONG], i, 0)
-                MPI.COMM_WORLD.Recv([values, MPI.LONG], i, 0)
-                for i,key in enumerate(keys):
-                    data[key] = values[i]
-            # Now convert root's data to arrays.
-            size = len(data)
-            root_keys = na.empty(size, dtype='int64')
-            root_values = na.empty(size, dtype='int64')
-            count = 0
-            for key in data:
-                root_keys[count] = key
-                root_values[count] = data[key]
-                count += 1
-        else:
-            MPI.COMM_WORLD.send(len(data), 0, 0)
-            keys = na.empty(len(data), dtype='int64')
-            values = na.empty(len(data), dtype='int64')
-            count = 0
-            for key in data:
-                keys[count] = key
-                values[count] = data[key]
-                count += 1
-            MPI.COMM_WORLD.Send([keys, MPI.LONG], 0, 0)
-            MPI.COMM_WORLD.Send([values, MPI.LONG], 0, 0)
-        # Now send it back as arrays.
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        if MPI.COMM_WORLD.rank != 0:
-            del keys, values
-            root_keys = na.empty(size, dtype='int64')
-            root_values = na.empty(size, dtype='int64')
-        MPI.COMM_WORLD.Bcast([root_keys, MPI.LONG], root=0)
-        MPI.COMM_WORLD.Bcast([root_values, MPI.LONG], root=0)
-        # Convert back to a dict.
-        del data
-        data = dict(itertools.izip(root_keys,root_values))
-        return data
-
-    @parallel_passthrough
-    def _mpi_concatenate_array_long(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1, MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                new_data = na.empty(size, dtype='int64')
-                MPI.COMM_WORLD.Recv([new_data, MPI.LONG], i, 0)
-                data = na.concatenate((data, new_data))
-            size = data.size
-            del new_data
-        else:
-            MPI.COMM_WORLD.send(data.size, 0, 0)
-            MPI.COMM_WORLD.Send([data, MPI.LONG], 0, 0)
-        # Now we distribute the full array.
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        if MPI.COMM_WORLD.rank != 0:
-            del data
-            data = na.empty(size, dtype='int64')
-        MPI.COMM_WORLD.Bcast([data, MPI.LONG], root=0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_concatenate_array_double(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1, MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                new_data = na.empty(size, dtype='float64')
-                MPI.COMM_WORLD.Recv([new_data, MPI.DOUBLE], i, 0)
-                data = na.concatenate((data, new_data))
-            size = data.size
-            del new_data
-        else:
-            MPI.COMM_WORLD.send(data.size, 0, 0)
-            MPI.COMM_WORLD.Send([data, MPI.DOUBLE], 0, 0)
-        # Now we distribute the full array.
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        if MPI.COMM_WORLD.rank != 0:
-            del data
-            data = na.empty(size, dtype='float64')
-        MPI.COMM_WORLD.Bcast([data, MPI.DOUBLE], root=0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_concatenate_array_on_root_double(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1, MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                new_data = na.empty(size, dtype='float64')
-                MPI.COMM_WORLD.Recv([new_data, MPI.DOUBLE], i, 0)
-                data = na.concatenate((data, new_data))
-        else:
-            MPI.COMM_WORLD.send(data.size, 0, 0)
-            MPI.COMM_WORLD.Send([data, MPI.DOUBLE], 0, 0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_concatenate_array_on_root_int(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1, MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                new_data = na.empty(size, dtype='int32')
-                MPI.COMM_WORLD.Recv([new_data, MPI.INT], i, 0)
-                data = na.concatenate((data, new_data))
-        else:
-            MPI.COMM_WORLD.send(data.size, 0, 0)
-            MPI.COMM_WORLD.Send([data, MPI.INT], 0, 0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_concatenate_array_on_root_long(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1, MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                new_data = na.empty(size, dtype='int64')
-                MPI.COMM_WORLD.Recv([new_data, MPI.LONG], i, 0)
-                data = na.concatenate((data, new_data))
-        else:
-            MPI.COMM_WORLD.send(data.size, 0, 0)
-            MPI.COMM_WORLD.Send([data, MPI.LONG], 0, 0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_minimum_array_long(self, data):
-        """
-        Specifically for parallelHOP. For the identical array on each task,
-        it merges the arrays together, taking the lower value at each index.
-        """
-        self._barrier()
-        size = data.size # They're all the same size, of course
-        if MPI.COMM_WORLD.rank == 0:
-            new_data = na.empty(size, dtype='int64')
-            for i in range(1, MPI.COMM_WORLD.size):
-                MPI.COMM_WORLD.Recv([new_data, MPI.LONG], i, 0)
-                data = na.minimum(data, new_data)
-            del new_data
-        else:
-            MPI.COMM_WORLD.Send([data, MPI.LONG], 0, 0)
-        # Redistribute from root
-        MPI.COMM_WORLD.Bcast([data, MPI.LONG], root=0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_bcast_long_dict_unpickled(self, data):
-        self._barrier()
-        size = 0
-        if MPI.COMM_WORLD.rank == 0:
-            size = len(data)
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        root_keys = na.empty(size, dtype='int64')
-        root_values = na.empty(size, dtype='int64')
-        if MPI.COMM_WORLD.rank == 0:
-            count = 0
-            for key in data:
-                root_keys[count] = key
-                root_values[count] = data[key]
-                count += 1
-        MPI.COMM_WORLD.Bcast([root_keys, MPI.LONG], root=0)
-        MPI.COMM_WORLD.Bcast([root_values, MPI.LONG], root=0)
-        if MPI.COMM_WORLD.rank != 0:
-            data = {}
-            for i,key in enumerate(root_keys):
-                data[key] = root_values[i]
-        return data
-
-    @parallel_passthrough
-    def _mpi_maxdict(self, data):
-        """
-        For each key in data, find the maximum value across all tasks, and
-        then broadcast it back.
-        """
-        self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                temp_data = MPI.COMM_WORLD.recv(source=i, tag=0)
-                for key in temp_data:
-                    try:
-                        old_value = data[key]
-                    except KeyError:
-                        # This guarantees the new value gets added.
-                        old_value = None
-                    if old_value < temp_data[key]:
-                        data[key] = temp_data[key]
-        else:
-            MPI.COMM_WORLD.send(data, dest=0, tag=0)
-        data = MPI.COMM_WORLD.bcast(data, root=0)
-        self._barrier()
-        return data
-
-    def _mpi_maxdict_dict(self, data):
-        """
-        Similar to above, but finds maximums for dicts of dicts. This is
-        specificaly for a part of chainHOP.
-        """
-        if not self._distributed:
-            top_keys = []
-            bot_keys = []
-            vals = []
-            for top_key in data:
-                for bot_key in data[top_key]:
-                    top_keys.append(top_key)
-                    bot_keys.append(bot_key)
-                    vals.append(data[top_key][bot_key])
-            top_keys = na.array(top_keys, dtype='int64')
-            bot_keys = na.array(bot_keys, dtype='int64')
-            vals = na.array(vals, dtype='float64')
-            return (top_keys, bot_keys, vals)
-        self._barrier()
-        size = 0
-        top_keys = []
-        bot_keys = []
-        vals = []
-        for top_key in data:
-            for bot_key in data[top_key]:
-                top_keys.append(top_key)
-                bot_keys.append(bot_key)
-                vals.append(data[top_key][bot_key])
-        top_keys = na.array(top_keys, dtype='int64')
-        bot_keys = na.array(bot_keys, dtype='int64')
-        vals = na.array(vals, dtype='float64')
-        del data
-        if MPI.COMM_WORLD.rank == 0:
-            for i in range(1,MPI.COMM_WORLD.size):
-                size = MPI.COMM_WORLD.recv(source=i, tag=0)
-                mylog.info('Global Hash Table Merge %d of %d size %d' % \
-                    (i,MPI.COMM_WORLD.size, size))
-                recv_top_keys = na.empty(size, dtype='int64')
-                recv_bot_keys = na.empty(size, dtype='int64')
-                recv_vals = na.empty(size, dtype='float64')
-                MPI.COMM_WORLD.Recv([recv_top_keys, MPI.LONG], source=i, tag=0)
-                MPI.COMM_WORLD.Recv([recv_bot_keys, MPI.LONG], source=i, tag=0)
-                MPI.COMM_WORLD.Recv([recv_vals, MPI.DOUBLE], source=i, tag=0)
-                top_keys = na.concatenate([top_keys, recv_top_keys])
-                bot_keys = na.concatenate([bot_keys, recv_bot_keys])
-                vals = na.concatenate([vals, recv_vals])
-#                 for j, top_key in enumerate(top_keys):
-#                     if j%1000 == 0: mylog.info(j)
-#                     # Make sure there's an entry for top_key in data
-#                     try:
-#                         test = data[top_key]
-#                     except KeyError:
-#                         data[top_key] = {}
-#                     try:
-#                         old_value = data[top_key][bot_keys[j]]
-#                     except KeyError:
-#                         # This guarantees the new value gets added.
-#                         old_value = None
-#                     if old_value < vals[j]:
-#                         data[top_key][bot_keys[j]] = vals[j]
-        else:
-#             top_keys = []
-#             bot_keys = []
-#             vals = []
-#             for top_key in data:
-#                 for bot_key in data[top_key]:
-#                     top_keys.append(top_key)
-#                     bot_keys.append(bot_key)
-#                     vals.append(data[top_key][bot_key])
-#             top_keys = na.array(top_keys, dtype='int64')
-#             bot_keys = na.array(bot_keys, dtype='int64')
-#             vals = na.array(vals, dtype='float64')
-            size = top_keys.size
-            MPI.COMM_WORLD.send(size, dest=0, tag=0)
-            MPI.COMM_WORLD.Send([top_keys, MPI.LONG], dest=0, tag=0)
-            MPI.COMM_WORLD.Send([bot_keys, MPI.LONG], dest=0, tag=0)
-            MPI.COMM_WORLD.Send([vals, MPI.DOUBLE], dest=0, tag=0)
-        # Getting ghetto here, we're going to decompose the dict into arrays,
-        # send that, and then reconstruct it. When data is too big the pickling
-        # of the dict fails.
-        if MPI.COMM_WORLD.rank == 0:
-#             data = defaultdict(dict)
-#             for i,top_key in enumerate(top_keys):
-#                 try:
-#                     old = data[top_key][bot_keys[i]]
-#                 except KeyError:
-#                     old = None
-#                 if old < vals[i]:
-#                     data[top_key][bot_keys[i]] = vals[i]
-#             top_keys = []
-#             bot_keys = []
-#             vals = []
-#             for top_key in data:
-#                 for bot_key in data[top_key]:
-#                     top_keys.append(top_key)
-#                     bot_keys.append(bot_key)
-#                     vals.append(data[top_key][bot_key])
-#             del data
-#             top_keys = na.array(top_keys, dtype='int64')
-#             bot_keys = na.array(bot_keys, dtype='int64')
-#             vals = na.array(vals, dtype='float64')
-            size = top_keys.size
-        # Broadcast them using array methods
-        size = MPI.COMM_WORLD.bcast(size, root=0)
-        if MPI.COMM_WORLD.rank != 0:
-            top_keys = na.empty(size, dtype='int64')
-            bot_keys = na.empty(size, dtype='int64')
-            vals = na.empty(size, dtype='float64')
-        MPI.COMM_WORLD.Bcast([top_keys,MPI.LONG], root=0)
-        MPI.COMM_WORLD.Bcast([bot_keys,MPI.LONG], root=0)
-        MPI.COMM_WORLD.Bcast([vals, MPI.DOUBLE], root=0)
-        return (top_keys, bot_keys, vals)
-
-    @parallel_passthrough
-    def __mpi_recvlist(self, data):
-        # First we receive, then we make a new list.
-        data = ensure_list(data)
-        for i in range(1,MPI.COMM_WORLD.size):
-            buf = ensure_list(MPI.COMM_WORLD.recv(source=i, tag=0))
-            data += buf
-        return data
-
-    @parallel_passthrough
-    def _mpi_catlist(self, data):
-        self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            data = self.__mpi_recvlist(data)
-        else:
-            MPI.COMM_WORLD.send(data, dest=0, tag=0)
-        mylog.debug("Opening MPI Broadcast on %s", MPI.COMM_WORLD.rank)
-        data = MPI.COMM_WORLD.bcast(data, root=0)
-        self._barrier()
-        return data
-
-    @parallel_passthrough
-    def __mpi_recvarrays(self, data):
-        # First we receive, then we make a new list.
-        for i in range(1,MPI.COMM_WORLD.size):
-            buf = _recv_array(source=i, tag=0)
-            if buf is not None:
-                if data is None: data = buf
-                else: data = na.concatenate([data, buf])
-        return data
-
-    @parallel_passthrough
-    def _mpi_cat_na_array(self,data):
-        self._barrier()
-        comm = MPI.COMM_WORLD
-        if comm.rank == 0:
-            for i in range(1,comm.size):
-                buf = comm.recv(source=i, tag=0)
-                data = na.concatenate([data,buf])
-        else:
-            comm.send(data, 0, tag = 0)
-        data = comm.bcast(data, root=0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_catarray(self, data):
-        if data is None:
-            ncols = -1
-            size = 0
-        else:
-            if len(data) == 0:
-                ncols = -1
-                size = 0
-            elif len(data.shape) == 1:
-                ncols = 1
-                size = data.shape[0]
-            else:
-                ncols, size = data.shape
-        ncols = MPI.COMM_WORLD.allreduce(ncols, op=MPI.MAX)
-        if size == 0:
-            data = na.zeros((ncols,0), dtype='float64') # This only works for
-        size = data.shape[-1]
-        sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
-        outsize = na.array(size, dtype='int64')
-        MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
-                                 [sizes, 1, MPI.LONG] )
-        # This nested concatenate is to get the shapes to work out correctly;
-        # if we just add [0] to sizes, it will broadcast a summation, not a
-        # concatenation.
-        offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
-        arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
-        data = _alltoallv_array(data, arr_size, offsets, sizes)
-        return data
-
-    @parallel_passthrough
-    def _mpi_bcast_pickled(self, data):
-        #self._barrier()
-        data = MPI.COMM_WORLD.bcast(data, root=0)
-        return data
-
-    def _should_i_write(self):
-        if not self._distributed: return True
-        return (MPI.COMM_WORLD == 0)
-
-    def _preload(self, grids, fields, io_handler):
-        # This will preload if it detects we are parallel capable and
-        # if so, we load *everything* that we need.  Use with some care.
-        mylog.debug("Preloading %s from %s grids", fields, len(grids))
-        if not self._distributed: return
-        io_handler.preload(grids, fields)
-
-    @parallel_passthrough
-    def _mpi_double_array_max(self,data):
-        """
-        Finds the na.maximum of a distributed array and returns the result
-        back to all. The array should be the same length on all tasks!
-        """
-        self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            recv_data = na.empty(data.size, dtype='float64')
-            for i in xrange(1, MPI.COMM_WORLD.size):
-                MPI.COMM_WORLD.Recv([recv_data, MPI.DOUBLE], source=i, tag=0)
-                data = na.maximum(data, recv_data)
-        else:
-            MPI.COMM_WORLD.Send([data, MPI.DOUBLE], dest=0, tag=0)
-        MPI.COMM_WORLD.Bcast([data, MPI.DOUBLE], root=0)
-        return data
-
-    @parallel_passthrough
-    def _mpi_allsum(self, data):
-        #self._barrier()
-        # We use old-school pickling here on the assumption the arrays are
-        # relatively small ( < 1e7 elements )
-        if isinstance(data, na.ndarray) and data.dtype != na.bool:
-            tr = na.zeros_like(data)
-            if not data.flags.c_contiguous: data = data.copy()
-            MPI.COMM_WORLD.Allreduce(data, tr, op=MPI.SUM)
-            return tr
-        else:
-            return MPI.COMM_WORLD.allreduce(data, op=MPI.SUM)
-
-    @parallel_passthrough
-    def _mpi_Allsum_double(self, data):
-        self._barrier()
-        # Non-pickling float allsum of a float array, data.
-        temp = data.copy()
-        MPI.COMM_WORLD.Allreduce([temp, MPI.DOUBLE], [data, MPI.DOUBLE], op=MPI.SUM)
-        del temp
-        return data
-
-    @parallel_passthrough
-    def _mpi_Allsum_long(self, data):
-        self._barrier()
-        # Non-pickling float allsum of an int array, data.
-        temp = data.copy()
-        MPI.COMM_WORLD.Allreduce([temp, MPI.LONG], [data, MPI.LONG], op=MPI.SUM)
-        del temp
-        return data
-
-    @parallel_passthrough
-    def _mpi_allmax(self, data):
-        self._barrier()
-        return MPI.COMM_WORLD.allreduce(data, op=MPI.MAX)
-
-    @parallel_passthrough
-    def _mpi_allmin(self, data):
-        self._barrier()
-        return MPI.COMM_WORLD.allreduce(data, op=MPI.MIN)
-
-    ###
-    # Non-blocking stuff.
-    ###
-
-    def _mpi_Irecv_long(self, data, source, tag=0):
-        if not self._distributed: return -1
-        return MPI.COMM_WORLD.Irecv([data, MPI.LONG], source, tag)
-
-    def _mpi_Irecv_double(self, data, source, tag=0):
-        if not self._distributed: return -1
-        return MPI.COMM_WORLD.Irecv([data, MPI.DOUBLE], source, tag)
-
-    def _mpi_Isend_long(self, data, dest, tag=0):
-        if not self._distributed: return -1
-        return MPI.COMM_WORLD.Isend([data, MPI.LONG], dest, tag)
-
-    def _mpi_Isend_double(self, data, dest, tag=0):
-        if not self._distributed: return -1
-        return MPI.COMM_WORLD.Isend([data, MPI.DOUBLE], dest, tag)
-
-    def _mpi_Request_Waitall(self, hooks):
-        if not self._distributed: return
-        MPI.Request.Waitall(hooks)
-
-    def _mpi_Request_Waititer(self, hooks):
-        for i in xrange(len(hooks)):
-            req = MPI.Request.Waitany(hooks)
-            yield req
-
-    def _mpi_Request_Testall(self, hooks):
-        """
-        This returns False if any of the request hooks are un-finished,
-        and True if they are all finished.
-        """
-        if not self._distributed: return True
-        return MPI.Request.Testall(hooks)
-
-    ###
-    # End non-blocking stuff.
-    ###
-
-    def _mpi_get_size(self):
-        if not self._distributed: return 1
-        return MPI.COMM_WORLD.size
-
-    def _mpi_get_rank(self):
-        if not self._distributed: return 0
-        return MPI.COMM_WORLD.rank
-
-    def _mpi_info_dict(self, info):
-        if not self._distributed: return 0, {0:info}
-        self._barrier()
-        data = None
-        if MPI.COMM_WORLD.rank == 0:
-            data = {0:info}
-            for i in range(1, MPI.COMM_WORLD.size):
-                data[i] = MPI.COMM_WORLD.recv(source=i, tag=0)
-        else:
-            MPI.COMM_WORLD.send(info, dest=0, tag=0)
-        mylog.debug("Opening MPI Broadcast on %s", MPI.COMM_WORLD.rank)
-        data = MPI.COMM_WORLD.bcast(data, root=0)
-        self._barrier()
-        return MPI.COMM_WORLD.rank, data
-
-    def _get_dependencies(self, fields):
-        deps = []
-        fi = self.pf.field_info
-        for field in fields:
-            deps += ensure_list(fi[field].get_dependencies(pf=self.pf).requested)
-        return list(set(deps))
-
-    def _claim_object(self, obj):
-        if not self._distributed: return
-        obj._owner = MPI.COMM_WORLD.rank
-        obj._distributed = True
-
-    def _do_not_claim_object(self, obj):
-        if not self._distributed: return
-        obj._owner = -1
-        obj._distributed = True
-
-    def _write_on_root(self, fn):
-        if not self._distributed: return open(fn, "w")
-        if MPI.COMM_WORLD.rank == 0:
-            return open(fn, "w")
-        else:
-            return cStringIO.StringIO()
-
-    def _get_filename(self, prefix, rank=None):
-        if not self._distributed: return prefix
-        if rank == None:
-            return "%s_%04i" % (prefix, MPI.COMM_WORLD.rank)
-        else:
-            return "%s_%04i" % (prefix, rank)
-
-    def _is_mine(self, obj):
-        if not obj._distributed: return True
-        return (obj._owner == MPI.COMM_WORLD.rank)
-
-    def _send_quadtree(self, target, buf, tgd, args):
-        sizebuf = na.zeros(1, 'int64')
-        sizebuf[0] = buf[0].size
-        MPI.COMM_WORLD.Send([sizebuf, MPI.LONG], dest=target)
-        MPI.COMM_WORLD.Send([buf[0], MPI.INT], dest=target)
-        MPI.COMM_WORLD.Send([buf[1], MPI.DOUBLE], dest=target)
-        MPI.COMM_WORLD.Send([buf[2], MPI.DOUBLE], dest=target)
-        
-    def _recv_quadtree(self, target, tgd, args):
-        sizebuf = na.zeros(1, 'int64')
-        MPI.COMM_WORLD.Recv(sizebuf, source=target)
-        buf = [na.empty((sizebuf[0],), 'int32'),
-               na.empty((sizebuf[0], args[2]),'float64'),
-               na.empty((sizebuf[0],),'float64')]
-        MPI.COMM_WORLD.Recv([buf[0], MPI.INT], source=target)
-        MPI.COMM_WORLD.Recv([buf[1], MPI.DOUBLE], source=target)
-        MPI.COMM_WORLD.Recv([buf[2], MPI.DOUBLE], source=target)
-        return buf
-
-    @parallel_passthrough
-    def merge_quadtree_buffers(self, qt):
-        # This is a modified version of pairwise reduction from Lisandro Dalcin,
-        # in the reductions demo of mpi4py
-        size = MPI.COMM_WORLD.size
-        rank = MPI.COMM_WORLD.rank
-
-        mask = 1
-
-        args = qt.get_args() # Will always be the same
-        tgd = na.array([args[0], args[1]], dtype='int64')
-        sizebuf = na.zeros(1, 'int64')
-
-        while mask < size:
-            if (mask & rank) != 0:
-                target = (rank & ~mask) % size
-                #print "SENDING FROM %02i to %02i" % (rank, target)
-                buf = qt.tobuffer()
-                self._send_quadtree(target, buf, tgd, args)
-                #qt = self._recv_quadtree(target, tgd, args)
-            else:
-                target = (rank | mask)
-                if target < size:
-                    #print "RECEIVING FROM %02i on %02i" % (target, rank)
-                    buf = self._recv_quadtree(target, tgd, args)
-                    qto = QuadTree(tgd, args[2])
-                    qto.frombuffer(*buf)
-                    merge_quadtrees(qt, qto)
-                    del qto
-                    #self._send_quadtree(target, qt, tgd, args)
-            mask <<= 1
-
-        if rank == 0:
-            buf = qt.tobuffer()
-            sizebuf[0] = buf[0].size
-        MPI.COMM_WORLD.Bcast([sizebuf, MPI.LONG], root=0)
-        if rank != 0:
-            buf = [na.empty((sizebuf[0],), 'int32'),
-                   na.empty((sizebuf[0], args[2]),'float64'),
-                   na.empty((sizebuf[0],),'float64')]
-        MPI.COMM_WORLD.Bcast([buf[0], MPI.INT], root=0)
-        MPI.COMM_WORLD.Bcast([buf[1], MPI.DOUBLE], root=0)
-        MPI.COMM_WORLD.Bcast([buf[2], MPI.DOUBLE], root=0)
-        self.refined = buf[0]
-        if rank != 0:
-            qt = QuadTree(tgd, args[2])
-            qt.frombuffer(*buf)
-        return qt
-
-__tocast = 'c'
-
-def _send_array(arr, dest, tag = 0):
-    if not isinstance(arr, na.ndarray):
-        MPI.COMM_WORLD.send((None,None), dest=dest, tag=tag)
-        MPI.COMM_WORLD.send(arr, dest=dest, tag=tag)
-        return
-    tmp = arr.view(__tocast) # Cast to CHAR
-    # communicate type and shape
-    MPI.COMM_WORLD.send((arr.dtype.str, arr.shape), dest=dest, tag=tag)
-    MPI.COMM_WORLD.Send([arr, MPI.CHAR], dest=dest, tag=tag)
-    del tmp
-
-def _recv_array(source, tag = 0):
-    dt, ne = MPI.COMM_WORLD.recv(source=source, tag=tag)
-    if dt is None and ne is None:
-        return MPI.COMM_WORLD.recv(source=source, tag=tag)
-    arr = na.empty(ne, dtype=dt)
-    tmp = arr.view(__tocast)
-    MPI.COMM_WORLD.Recv([tmp, MPI.CHAR], source=source, tag=tag)
-    return arr
-
-def _bcast_array(arr, root = 0):
-    if MPI.COMM_WORLD.rank == root:
-        tmp = arr.view(__tocast) # Cast to CHAR
-        MPI.COMM_WORLD.bcast((arr.dtype.str, arr.shape), root=root)
-    else:
-        dt, ne = MPI.COMM_WORLD.bcast(None, root=root)
-        arr = na.empty(ne, dtype=dt)
-        tmp = arr.view(__tocast)
-    MPI.COMM_WORLD.Bcast([tmp, MPI.CHAR], root=root)
-    return arr
-
-def _alltoallv_array(send, total_size, offsets, sizes):
-    if len(send.shape) > 1:
-        recv = []
-        for i in range(send.shape[0]):
-            recv.append(_alltoallv_array(send[i,:].copy(), total_size, offsets, sizes))
-        recv = na.array(recv)
-        return recv
-    offset = offsets[MPI.COMM_WORLD.rank]
-    tmp_send = send.view(__tocast)
-    recv = na.empty(total_size, dtype=send.dtype)
-    recv[offset:offset+send.size] = send[:]
-    dtr = send.dtype.itemsize / tmp_send.dtype.itemsize # > 1
-    roff = [off * dtr for off in offsets]
-    rsize = [siz * dtr for siz in sizes]
-    tmp_recv = recv.view(__tocast)
-    MPI.COMM_WORLD.Allgatherv((tmp_send, tmp_send.size, MPI.CHAR),
-                              (tmp_recv, (rsize, roff), MPI.CHAR))
-    return recv
     


--- a/yt/utilities/performance_counters.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/utilities/performance_counters.py	Thu Oct 20 08:52:24 2011 -0400
@@ -125,8 +125,8 @@
     def write_out(self, filename_prefix):
         if ytcfg.getboolean("yt","__parallel"):
             pfn = "%s_%03i_%03i" % (filename_prefix,
-                     ytcfg.getint("yt", "__parallel_rank"),
-                    ytcfg.getint("yt", "__parallel_size"))
+                     ytcfg.getint("yt", "__global_parallel_rank"),
+                    ytcfg.getint("yt", "__global_parallel_size"))
         else:
             pfn = "%s" % (filename_prefix)
         for n, p in sorted(self.profilers.items()):


--- a/yt/utilities/physical_constants.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/utilities/physical_constants.py	Thu Oct 20 08:52:24 2011 -0400
@@ -13,7 +13,7 @@
 cross_section_thompson_cgs = 6.65e-25 # cm^2
 
 # Charge
-charge_proton_cgs = 4.803e-28 # emu = 1.602e-19  Coulombs
+charge_proton_cgs = 4.803e-10 # esu = 1.602e-19  Coulombs
 
 # Physical Constants
 boltzmann_constant_cgs = 1.3806504e-16 # erg K^-1


--- a/yt/utilities/rpdb.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/utilities/rpdb.py	Thu Oct 20 08:52:24 2011 -0400
@@ -53,8 +53,8 @@
 
 def rpdb_excepthook(exc_type, exc, tb):
     traceback.print_exception(exc_type, exc, tb)
-    task = ytcfg.getint("yt", "__parallel_rank")
-    size = ytcfg.getint("yt", "__parallel_size")
+    task = ytcfg.getint("yt", "__global_parallel_rank")
+    size = ytcfg.getint("yt", "__global_parallel_size")
     print "Starting RPDB server on task %s ; connect with 'yt rpdb %s'" \
             % (task,task)
     handler = pdb_handler(tb)


--- a/yt/visualization/streamlines.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/visualization/streamlines.py	Thu Oct 20 08:52:24 2011 -0400
@@ -99,6 +99,7 @@
                  zfield='x-velocity', volume=None,
                  dx=None, length=None, direction=1,
                  get_magnitude=False):
+        ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.start_positions = na.array(positions)
         self.N = self.start_positions.shape[0]
@@ -124,8 +125,8 @@
             self.magnitudes = na.zeros((self.N,self.steps), dtype='float64')
         
     def integrate_through_volume(self):
-        nprocs = self._mpi_get_size()
-        my_rank = self._mpi_get_rank()
+        nprocs = self.comm.size
+        my_rank = self.comm.rank
         self.streamlines[my_rank::nprocs,0,:] = self.start_positions[my_rank::nprocs]
 
         pbar = get_pbar("Streamlining", self.N)
@@ -144,8 +145,8 @@
        
     @parallel_passthrough
     def _finalize_parallel(self,data):
-        self.streamlines = self._mpi_allsum(self.streamlines)
-        self.magnitudes = self._mpi_allsum(self.magnitudes)
+        self.streamlines = self.comm.mpi_allreduce(self.streamlines, op='sum')
+        self.magnitudes = self.comm.mpi_allreduce(self.magnitudes, op='sum')
         
     def _integrate_through_brick(self, node, stream, step,
                                  periodic=False, mag=None):


--- a/yt/visualization/volume_rendering/api.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/visualization/volume_rendering/api.py	Thu Oct 20 08:52:24 2011 -0400
@@ -35,11 +35,9 @@
 from yt.utilities.amr_utils import PartitionedGrid, VectorPlane, \
     TransferFunctionProxy
 from grid_partitioner import HomogenizedVolume, \
-                             HomogenizedBrickCollection, \
                              export_partitioned_grids, \
                              import_partitioned_grids
 from image_handling import export_rgba, import_rgba, \
                            plot_channel, plot_rgb
-from software_sampler import VolumeRendering
 from camera import Camera, PerspectiveCamera, StereoPairCamera, \
     off_axis_projection


--- a/yt/visualization/volume_rendering/camera.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/visualization/volume_rendering/camera.py	Thu Oct 20 08:52:24 2011 -0400
@@ -180,6 +180,7 @@
         >>> image = cam.snapshot(fn='my_rendering.png')
 
         """
+        ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf
         if not iterable(resolution):
             resolution = (resolution, resolution)
@@ -356,7 +357,7 @@
             pbar.update(total_cells)
         pbar.finish()
 
-        if self._mpi_get_rank() is 0 and fn is not None:
+        if self.comm.rank is 0 and fn is not None:
             if clip_ratio is not None:
                 write_bitmap(image, fn, clip_ratio*image.std())
             else:
@@ -623,7 +624,7 @@
             pbar.update(total_cells)
         pbar.finish()
 
-        if self._mpi_get_rank() is 0 and fn is not None:
+        if self.comm.rank is 0 and fn is not None:
             # This assumes Density; this is a relatively safe assumption.
             import matplotlib.figure
             import matplotlib.backends.backend_agg


--- a/yt/visualization/volume_rendering/grid_partitioner.py	Thu Oct 20 08:17:55 2011 -0400
+++ b/yt/visualization/volume_rendering/grid_partitioner.py	Thu Oct 20 08:52:24 2011 -0400
@@ -29,8 +29,6 @@
 
 from yt.utilities.amr_utils import PartitionedGrid, ProtoPrism, GridFace, \
     grid_points_in_volume, find_grids_in_inclined_box
-from yt.utilities.parallel_tools.distributed_object_collection import \
-    DistributedObjectCollection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_root_only
 
@@ -45,6 +43,7 @@
     def __init__(self, fields = "Density", source = None, pf = None,
                  log_fields = None, no_ghost = False):
         # Typically, initialized as hanging off a hierarchy.  But, not always.
+        ParallelAnalysisInterface.__init__(self)
         self.no_ghost = no_ghost
         if pf is not None: self.pf = pf
         if source is None: source = self.pf.h.all_data()
@@ -101,7 +100,7 @@
                                       " not yet supported")
         if self.bricks is not None and source is None: return
         bricks = []
-        self._preload(self.source._grids, self.fields, self.pf.h.io)
+        self.comm.preload(self.source._grids, self.fields, self.pf.h.io)
         pbar = get_pbar("Partitioning ", len(self.source._grids))
         for i, g in enumerate(self.source._grids):
             pbar.update(i)
@@ -201,176 +200,6 @@
     def reset_cast(self):
         pass
 
-class HomogenizedBrickCollection(DistributedObjectCollection):
-    def __init__(self, source):
-        # The idea here is that we have two sources -- the global_domain
-        # source, which would be a decomposition of the 3D domain, and a
-        # local_domain source, which is the set of bricks we want at the end.
-        self.source = source
-        self.pf = source.pf
-
-    @classmethod
-    def load_bricks(self, base_filename):
-        pass
-
-    def write_my_bricks(self, base_filename):
-        pass
-
-    def store_bricks(self, base_filename):
-        pass
-    
-    @parallel_root_only
-    def write_hierarchy(self, base_filename):
-        pass
-    
-    def _partition_grid(self, grid, fields, log_field = None):
-        fields = ensure_list(fields)
-        if log_field is None: log_field = [True] * len(fields)
-
-        # This is not super efficient, as it re-fills the regions once for each
-        # field.
-        vcds = []
-        for i,field in enumerate(fields):
-            vcd = grid.get_vertex_centered_data(field).astype('float64')
-            if log_field[i]: vcd = na.log10(vcd)
-            vcds.append(vcd)
-
-        GF = GridFaces(grid.Children + [grid])
-        PP = ProtoPrism(grid.id, grid.LeftEdge, grid.RightEdge, GF)
-
-        pgs = []
-        for P in PP.sweep(0):
-            sl = P.get_brick(grid.LeftEdge, grid.dds, grid.child_mask)
-            if len(sl) == 0: continue
-            dd = [d[sl[0][0]:sl[0][1]+1,
-                    sl[1][0]:sl[1][1]+1,
-                    sl[2][0]:sl[2][1]+1].copy() for d in vcds]
-            pgs.append(PartitionedGrid(grid.id, len(fields), dd,
-                        P.LeftEdge, P.RightEdge, sl[-1]))
-        return pgs
-
-    def _partition_local_grids(self, fields = "Density", log_field = None):
-        fields = ensure_list(fields)
-        bricks = []
-        # We preload.
-        # UNCOMMENT FOR PARALLELISM
-        #grid_list = list(self._get_grid_objs())
-        grid_list = list(self.source._grids)
-        self._preload(grid_list, fields, self.pf.h.io)
-        pbar = get_pbar("Partitioning ", len(grid_list))
-        # UNCOMMENT FOR PARALLELISM
-        #for i, g in enumerate(self._get_grids()):
-        print "THIS MANY GRIDS!", len(grid_list)
-        for i, g in enumerate(self.source._grids):
-            pbar.update(i)
-            bricks += self._partition_grid(g, fields, log_field)
-        pbar.finish()
-        bricks = na.array(bricks, dtype='object')
-        NB = len(bricks)
-        # Now we set up our (local for now) hierarchy.  Note that to calculate
-        # intersection, we only need to do the left edge & right edge.
-        #
-        # We're going to double up a little bit here in memory.
-        self.brick_left_edges = na.zeros( (NB, 3), dtype='float64')
-        self.brick_right_edges = na.zeros( (NB, 3), dtype='float64')
-        self.brick_parents = na.zeros( NB, dtype='int64')
-        self.brick_dimensions = na.zeros( (NB, 3), dtype='int64')
-        self.brick_owners = na.ones(NB, dtype='int32') * self._mpi_get_rank()
-        self._object_owners = self.brick_owners
-        for i,b in enumerate(bricks):
-            self.brick_left_edges[i,:] = b.LeftEdge
-            self.brick_right_edges[i,:] = b.RightEdge
-            self.brick_parents[i] = b.parent_grid_id
-            self.brick_dimensions[i,:] = b.my_data[0].shape
-        # Vertex-centered means we subtract one from the shape
-        self.brick_dimensions -= 1
-        self.bricks = na.array(bricks, dtype='object')
-        # UNCOMMENT FOR PARALLELISM
-        #self.join_lists()
-
-    def _get_object_info(self):
-        # We transpose here for the catdict operation
-        info_dict = dict(left_edges = self.brick_left_edges.transpose(),
-                         right_edges = self.brick_right_edges.transpose(),
-                         parents = self.brick_parents,
-                         owners = self.brick_owners,
-                         dimensions = self.brick_dimensions.transpose(),)
-        return info_dict
-
-    def _set_object_info(self, info_dict):
-        self.brick_left_edges = info_dict.pop("left_edges").transpose()
-        self.brick_right_edges = info_dict.pop("right_edges").transpose()
-        self.brick_parents = info_dict.pop("parents")
-        self.brick_dimensions = info_dict.pop("dimensions").transpose()
-        self.brick_owners = info_dict.pop("owners")
-        self._object_owners = self.brick_owners
-        bricks = self.bricks
-        self.bricks = na.array([None] * self.brick_owners.size, dtype='object')
-        # Copy our bricks back in
-        self.bricks[self.brick_owners == self._mpi_get_rank()] = bricks[:]
-
-    def _create_buffer(self, ind_list):
-        # Note that we have vertex-centered data, so we add one before taking
-        # the prod and the sum
-        total_size = (self.brick_dimensions[ind_list,:] + 1).prod(axis=1).sum()
-        mylog.debug("Creating buffer for %s bricks (%s)",
-                    len(ind_list), total_size)
-        my_buffer = na.zeros(total_size, dtype='float64')
-        return my_buffer
-
-    def _pack_buffer(self, ind_list, my_buffer):
-        si = 0
-        for index in ind_list:
-            d = self.bricks[index].my_data.ravel()
-            my_buffer[si:si+d.size] = d[:]
-            si += d.size
-
-    def _unpack_buffer(self, ind_list, my_buffer):
-        si = 0
-        for index in ind_list:
-            pgi = self.brick_parents[index]
-            LE = self.brick_left_edges[index,:].copy()
-            RE = self.brick_right_edges[index,:].copy()
-            dims = self.brick_dimensions[index,:].copy()
-            size = (dims + 1).prod()
-            data = my_buffer[si:si+size].reshape(dims + 1)
-            self.bricks[index] = PartitionedGrid(
-                    pgi, data, LE, RE, dims)
-            si += size
-
-    def _wipe_objects(self, indices):
-        self.bricks[indices] = None
-
-    def _collect_bricks(self, intersection_source):
-        if not self._distributed: return
-        # This entire routine should instead be set up to do:
-        #   alltoall broadcast of the *number* of requested bricks
-        #   non-blocking receives posted for int arrays
-        #   sizes of data calculated
-        #   concatenated data receives posted
-        #   send all data
-        #   get bricks back
-        # This presupposes that we are using the AMRInclinedBox as a data
-        # source.  If we're not, we ought to be.
-        needed_brick_i = find_grids_in_inclined_box(
-            intersection_source.box_vectors, intersection_source.center,
-            self.brick_left_edges, self.brick_right_edges)
-        needed_brick_i = na.where(needed_brick_i)[0]
-        self._collect_objects(needed_brick_i)
-
-    def _initialize_parallel(self):
-        pass
-
-    def _finalize_parallel(self):
-        pass
-
-    def get_brick(self, brick_id):
-        pass
-
-    @property
-    def _grids(self):
-        return self.source._grids
-
 class GridFaces(object):
     def __init__(self, grids):
         self.faces = [ [], [], [] ]


--- a/yt/visualization/volume_rendering/software_sampler.py	Thu Oct 20 08:17:55 2011 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,188 +0,0 @@
-"""
-Import the components of the volume rendering extension
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2009 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-import h5py
-import numpy as na
-
-from yt.funcs import *
-
-from yt.data_objects.data_containers import data_object_registry
-from yt.utilities.amr_utils import TransferFunctionProxy, VectorPlane
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface
-from yt.visualization.volume_rendering.grid_partitioner import \
-    HomogenizedBrickCollection
-
-# We're going to register this class, but it does not directly inherit from
-# AMRData.
-class VolumeRendering(ParallelAnalysisInterface):
-    bricks = None
-    def __init__(self, normal_vector, width, center,
-                 resolution, transfer_function,
-                 fields = None, whole_box = False,
-                 sub_samples = 5, north_vector = None,
-                 pf = None):
-        # Now we replicate some of the 'cutting plane' logic
-        if not iterable(resolution):
-            resolution = (resolution, resolution)
-        self.resolution = resolution
-        self.sub_samples = sub_samples
-        if not iterable(width):
-            width = (width, width, width) # front/back, left/right, top/bottom
-        self.width = width
-        self.center = center
-        if fields is None: fields = ["Density"]
-        self.fields = fields
-        self.transfer_function = transfer_function
-
-        # Now we set up our  various vectors
-        normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
-        if north_vector is None:
-            vecs = na.identity(3)
-            t = na.cross(normal_vector, vecs).sum(axis=1)
-            ax = t.argmax()
-            north_vector = na.cross(vecs[ax,:], normal_vector).ravel()
-        north_vector /= na.sqrt(na.dot(north_vector, north_vector))
-        east_vector = -na.cross(north_vector, normal_vector).ravel()
-        east_vector /= na.sqrt(na.dot(east_vector, east_vector))
-        self.unit_vectors = [north_vector, east_vector, normal_vector]
-        self.box_vectors = na.array([self.unit_vectors[0]*self.width[0],
-                                     self.unit_vectors[1]*self.width[1],
-                                     self.unit_vectors[2]*self.width[2]])
-
-        self.origin = center - 0.5*width[0]*self.unit_vectors[0] \
-                             - 0.5*width[1]*self.unit_vectors[1] \
-                             - 0.5*width[2]*self.unit_vectors[2]
-        self.back_center = center - 0.5*width[0]*self.unit_vectors[2]
-        self.front_center = center + 0.5*width[0]*self.unit_vectors[2]
-
-        self._initialize_source()
-        self._construct_vector_array()
-
-    def _initialize_source(self):
-        check, source, rf = self._partition_hierarchy_2d_inclined(
-                self.unit_vectors, self.origin, self.width, self.box_vectors)
-        if check:
-            self._base_source = self.pf.h.inclined_box(
-                self.origin, self.box_vectors)
-        else:
-            # To avoid doubling-up
-            self._base_source = source
-        self.source = source
-        self.res_fac = rf
-        # Note that if we want to do this in parallel, with 3D domain decomp
-        # for the grid/bricks, we can supply self._base_source here.  But,
-        # _distributed can't be overridden in that case.
-        self._brick_collection = HomogenizedBrickCollection(self.source)
-
-    def ray_cast(self, finalize=True):
-        if self.bricks is None: self.partition_grids()
-        # Now we order our bricks
-        total_cells, LE, RE = 0, [], []
-        for b in self.bricks:
-            LE.append(b.LeftEdge)
-            RE.append(b.RightEdge)
-            total_cells += na.prod(b.my_data[0].shape)
-        LE = na.array(LE) - self.back_center
-        RE = na.array(RE) - self.back_center
-        LE = na.sum(LE * self.unit_vectors[2], axis=1)
-        RE = na.sum(RE * self.unit_vectors[2], axis=1)
-        dist = na.minimum(LE, RE)
-        ind = na.argsort(dist)
-        pbar = get_pbar("Ray casting ", total_cells)
-        total_cells = 0
-        tfp = TransferFunctionProxy(self.transfer_function)
-        tfp.ns = self.sub_samples
-        for i, b in enumerate(self.bricks[ind]):
-            pos = b.cast_plane(tfp, self.vector_plane)
-            total_cells += na.prod(b.my_data[0].shape)
-            pbar.update(total_cells)
-        pbar.finish()
-        if finalize: self._finalize()
-
-    def _finalize(self):
-        #im = self._mpi_catdict(dict(image=self.image)).pop('image')
-        im, f = self._mpi_catrgb((self.image, self.resolution))
-        self.image = im
-
-    def dump_image(self, prefix):
-        fn = "%s.h5" % (self._get_filename(prefix))
-        mylog.info("Saving to %s", fn)
-        f = h5py.File(fn, "w")
-        f.create_dataset("/image", data=self.image)
-
-    def load_bricks(self, fn):
-        self.bricks = import_partitioned_grids(fn)
-
-    def save_bricks(self, fn):
-        # This will need to be modified for parallel
-        export_partitioned_grids(self.bricks, fn)
-
-    def save_image(self, prefix = None, norm = 1.0):
-        if norm is not None:
-            mi, ma = self.image.min(), norm*self.image.max()
-            print "Normalizing with ", mi, ma
-            image = (na.clip(self.image, mi, ma) - mi)/(ma - mi)
-        else:
-            image = self.image
-        if prefix is None: prefix = "%s_volume_rendering" % (self.pf)
-        plot_rgb(image, prefix)
-
-    def partition_grids(self):
-        log_field = []
-        for field in self.fields:
-            log_field.append(field in self.pf.field_info and 
-                             self.pf.field_info[field].take_log)
-        self._brick_collection._partition_local_grids(self.fields, log_field)
-        # UNCOMMENT FOR PARALLELISM
-        #self._brick_collection._collect_bricks(self.source)
-        self.bricks = self._brick_collection.bricks
-
-    def _construct_vector_array(self):
-        rx = self.resolution[0] * self.res_fac[0]
-        ry = self.resolution[1] * self.res_fac[1]
-        # We should move away from pre-generation of vectors like this and into
-        # the usage of on-the-fly generation in the VolumeIntegrator module
-        self.image = na.zeros((rx,ry,3), dtype='float64', order='C')
-        # We might have a different width and back_center
-        bl = self.source.box_lengths
-        px = na.linspace(-bl[0]/2.0, bl[0]/2.0, rx)[:,None]
-        py = na.linspace(-bl[1]/2.0, bl[1]/2.0, ry)[None,:]
-        inv_mat = self.source._inv_mat
-        bc = self.source.origin + 0.5*self.source.box_vectors[0] \
-                                + 0.5*self.source.box_vectors[1]
-        vectors = na.zeros((rx, ry, 3),
-                            dtype='float64', order='C')
-        vectors[:,:,0] = inv_mat[0,0]*px + inv_mat[0,1]*py + bc[0]
-        vectors[:,:,1] = inv_mat[1,0]*px + inv_mat[1,1]*py + bc[1]
-        vectors[:,:,2] = inv_mat[2,0]*px + inv_mat[2,1]*py + bc[2]
-        bounds = (px.min(), px.max(), py.min(), py.max())
-        self.vector_plane = VectorPlane(vectors, self.box_vectors[2],
-                                    bc, bounds, self.image,
-                                    self.source._x_vec, self.source._y_vec)
-        self.vp_bounds = bounds
-        self.vectors = vectors
-
-data_object_registry["volume_rendering"] = VolumeRendering


http://bitbucket.org/yt_analysis/yt/changeset/cbc39cb56384/
changeset:   cbc39cb56384
branch:      yt
user:        brittonsmith
date:        2011-10-20 15:19:44
summary:     Merged.
affected #:  0 files (-1 bytes)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list