[Yt-svn] yt-commit r686 - in branches/parallel_profiles/yt: . lagos

mturk at wrangler.dreamhost.com mturk at wrangler.dreamhost.com
Sat Jul 19 00:11:22 PDT 2008


Author: mturk
Date: Sat Jul 19 00:11:21 2008
New Revision: 686
URL: http://yt.spacepope.org/changeset/686

Log:
Profiles in parallel "work" but I have not verified correctness.  Additionally,
due to IO issues, speedup is ... very little!  We'll need #76 before this is a
substantial improvement.



Modified:
   branches/parallel_profiles/yt/lagos/Profiles.py
   branches/parallel_profiles/yt/parallel_tools.py

Modified: branches/parallel_profiles/yt/lagos/Profiles.py
==============================================================================
--- branches/parallel_profiles/yt/lagos/Profiles.py	(original)
+++ branches/parallel_profiles/yt/lagos/Profiles.py	Sat Jul 19 00:11:21 2008
@@ -56,6 +56,7 @@
         self._lazy_reader = lazy_reader
 
     def _lazy_add_fields(self, fields, weight, accumulation):
+        self._ngrids = 0
         self.__data = {}         # final results will go here
         self.__weight_data = {}  # we need to track the weights as we go
         for field in fields:
@@ -64,6 +65,7 @@
         self.__used = self._get_empty_field().astype('bool')
         pbar = get_pbar('Binning grids', len(self._data_source._grids))
         for gi,grid in enumerate(self._get_grids()):
+            self._ngrids += 1
             pbar.update(gi)
             args = self._get_bins(grid, check_cut=True)
             if not args: # No bins returned for this grid, so forget it!
@@ -90,13 +92,14 @@
         from mpi4py import MPI # I am displeased by importing here
         temp = self._get_empty_field()
         for key in self.__data:
-            temp = MPI.COMM_WORLD.Allreduce(self.__data[key], temp, MPI.SUM)
+            MPI.COMM_WORLD.Allreduce(self.__data[key], temp, MPI.SUM)
             self.__data[key] += temp
         for key in self.__weight_data:
-            temp = MPI.COMM_WORLD.Allreduce(self.__weight_data[key], temp, MPI.SUM)
+            MPI.COMM_WORLD.Allreduce(self.__weight_data[key], temp, MPI.SUM)
             self.__weight_data[key] += temp
-        temp = self.__used.copy()
-        self.__used = MPI.COMM_WORLD.Allreduce(self.__used, temp, MPI.LOR)
+        temp = self.__used.copy().astype('int32')
+        MPI.COMM_WORLD.Allreduce(self.__used.astype('int32'), temp, MPI.SUM)
+        self.__used = (temp > 0)
 
     def _unlazy_add_fields(self, fields, weight, accumulation):
         for field in fields:

Modified: branches/parallel_profiles/yt/parallel_tools.py
==============================================================================
--- branches/parallel_profiles/yt/parallel_tools.py	(original)
+++ branches/parallel_profiles/yt/parallel_tools.py	Sat Jul 19 00:11:21 2008
@@ -24,21 +24,26 @@
 """
 
 import itertools
+from yt.config import ytcfg
 from yt.arraytypes import *
 
 try:
     from mpi4py import MPI
     parallel_capable = True
+    print "MPI %s / %s" % (MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
 except ImportError:
     parallel_capable = False
 
+print "PARALLEL COMPATIBLE:", parallel_capable
+
 class GridIterator(object):
     def __init__(self, pobj):
         self.pobj = pobj
         if hasattr(pobj, '_grids') and pobj._grids is not None:
-            self._grids = pobj._grids
+            gs = pobj._grids
         else:
-            self._grids = pobj._data_source._grids
+            gs = pobj._data_source._grids
+        self._grids = sorted(gs, key = lambda g: g.filename)
         self.ng = len(self._grids)
 
     def __iter__(self):
@@ -73,7 +78,7 @@
 
     def next(self):
         if self.pos < len(self.my_grid_ids):
-            gid = self.my_grids_ids[self.pos]
+            gid = self.my_grid_ids[self.pos]
             self.pos += 1
             return self._grids[gid]
         self.pobj._finalize_parallel()
@@ -84,7 +89,7 @@
 
     def _get_grids(self):
         if parallel_capable and \
-           ytcfg.get_boolean("yt","parallel"):
+           ytcfg.getboolean("yt","parallel"):
             return ParallelGridIterator(self)
         return GridIterator(self)
 



More information about the yt-svn mailing list