[Yt-svn] commit/yt: 2 new changesets

Bitbucket commits-noreply at bitbucket.org
Tue Oct 25 09:53:16 PDT 2011


2 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/e69392231b08/
changeset:   e69392231b08
branch:      yt
user:        sskory
date:        2011-10-25 17:41:51
summary:     Fixes to parallelHF after the parallel tools refactoring that only revealed
themselves in situations where not all tasks have meaningful data in several
of the communication steps.
affected #:  2 files

diff -r 8c2c87d59bdf8d0c4b0c9302ee63883fd7f1d871 -r e69392231b08b34023c6489ad7d9a589f26113e3 yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -840,7 +840,7 @@
         """
         yt_counters("connect_chains_across_tasks")
         # Remote (lower dens) chain -> local (higher) chain.
-        chainID_translate_map_local = na.arange(self.nchains)
+        chainID_translate_map_local = na.arange(self.nchains, dtype='int64')
         # Build the stuff to send.
         self.uphill_real_indices = na.concatenate((
             self.index, self.index_pad))[self.padded_particles]
@@ -891,7 +891,8 @@
         # it. Therefore each key (a chain) in this dict is unique, but the items
         # the keys point to are not necessarily unique.
         chainID_translate_map_global = \
-            self.comm.mpi_allreduce(chainID_translate_map_local, op='min')
+            self.comm.mpi_allreduce(chainID_translate_map_local, op='min',
+            dtype='int64')
         # Loop over chains, smallest to largest density, recursively until
         # we reach a self-assigned chain. Then we assign that final chainID to
         # the *current* one only.
@@ -1102,7 +1103,6 @@
         top_keys = self.comm.par_combine_object(top_keys, datatype='array', op='cat')
         bot_keys = self.comm.par_combine_object(bot_keys, datatype='array', op='cat')
         vals     = self.comm.par_combine_object(vals, datatype='array', op='cat')
-
         return (top_keys, bot_keys, vals)
 
     def _build_groups(self):


diff -r 8c2c87d59bdf8d0c4b0c9302ee63883fd7f1d871 -r e69392231b08b34023c6489ad7d9a589f26113e3 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -462,7 +462,10 @@
             if data is None:
                 ncols = -1
                 size = 0
+                dtype = 'float64'
+                mylog.info('Warning: Array passed to par_combine_object was None. Setting dtype to float64. This may break things!')
             else:
+                dtype = data.dtype
                 if len(data) == 0:
                     ncols = -1
                     size = 0
@@ -472,8 +475,8 @@
                 else:
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
-            if size == 0:
-                data = na.zeros((ncols,0), dtype='float64') # This only works for
+            if ncols == 0:
+                    data = na.zeros(0, dtype=dtype) # This only works for
             size = data.shape[-1]
             sizes = na.zeros(self.comm.size, dtype='int64')
             outsize = na.array(size, dtype='int64')
@@ -485,6 +488,8 @@
             offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
             arr_size = self.comm.allreduce(size, op=MPI.SUM)
             data = self.alltoallv_array(data, arr_size, offsets, sizes)
+            # We want this to behave like an actual concatenate so we'll
+            # eliminate extra zeros that get added.
             return data
         elif datatype == "list" and op == "cat":
             if self.comm.rank == 0:



https://bitbucket.org/yt_analysis/yt/changeset/a747f285f152/
changeset:   a747f285f152
branch:      yt
user:        sskory
date:        2011-10-25 17:42:10
summary:     Merge.
affected #:  2 files

diff -r e69392231b08b34023c6489ad7d9a589f26113e3 -r a747f285f152e8394d332fbd72ebe15b88fe1d10 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -77,8 +77,11 @@
         pbar.update(gi+1)
         cm = data_source._get_cut_mask(grid)
         if cm is True: cm = na.ones(grid.ActiveDimensions, dtype='bool')
+        old_field_parameters = grid.field_parameters
+        grid.field_parameters = data_source.field_parameters
         local_ind = na.where( (grid[field] > min_val)
                             & (grid[field] < max_val) & cm )
+        grid.field_parameters = old_field_parameters
         if local_ind[0].size == 0: continue
         kk = na.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
         grid["tempContours"] = na.ones(grid.ActiveDimensions, dtype='int64') * -1


diff -r e69392231b08b34023c6489ad7d9a589f26113e3 -r a747f285f152e8394d332fbd72ebe15b88fe1d10 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -275,10 +275,11 @@
     return func
 
 class Workgroup(object):
-    def __init__(self, size, ranks, comm):
+    def __init__(self, size, ranks, comm, name):
         self.size = size
         self.ranks = ranks
         self.comm = comm
+	self.name = name
 
 class ProcessorPool(object):
     comm = None
@@ -293,7 +294,7 @@
         self.ranks = range(self.size)
         self.available_ranks = range(self.size)
     
-    def add_workgroup(self, size=None, ranks=None):
+    def add_workgroup(self, size=None, ranks=None, name=None):
         if size is None:
             size = len(self.available_ranks)
         if len(self.available_ranks) < size:
@@ -301,12 +302,16 @@
             raise RuntimeError
         if ranks is None:
             ranks = [self.available_ranks.pop(0) for i in range(size)]
-        
+
+	# Default name to the workgroup number.
+        if name is None: 
+	    name = string(len(workgroups))
+	    
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:
             communication_system.communicators.append(Communicator(new_comm))
-        self.workgroups.append(Workgroup(len(ranks), ranks, new_comm))
+        self.workgroups.append(Workgroup(len(ranks), ranks, new_comm, name))
     
     def free_workgroup(self, workgroup):
         for i in workgroup.ranks:
@@ -741,8 +746,11 @@
     _grids = None
     _distributed = None
 
-    def __init__(self):
-        self.comm = communication_system.communicators[-1]
+    def __init__(self, comm = None):
+        if comm is None:
+            self.comm = communication_system.communicators[-1]
+        else:
+            self.comm = comm
         self._grids = self.comm._grids
         self._distributed = self.comm._distributed

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list