[Yt-svn] yt: 2 new changesets

hg at spacepope.org hg at spacepope.org
Mon Sep 27 11:42:37 PDT 2010


hg Repository: yt
details:   yt/rev/12c7358da86a
changeset: 3417:12c7358da86a
user:      Britton Smith <brittonsmith at gmail.com>
date:
Mon Sep 27 14:41:52 2010 -0400
description:
Added 1D and 2D field container imports.

hg Repository: yt
details:   yt/rev/4da7902d96a1
changeset: 3418:4da7902d96a1
user:      Britton Smith <brittonsmith at gmail.com>
date:
Mon Sep 27 14:42:31 2010 -0400
description:
Merged.

diffstat:

 yt/analysis_modules/list_modules.py                        |  24 ++++++++++++
 yt/data_objects/data_containers.py                         |  24 ++++++-----
 yt/data_objects/derived_quantities.py                      |  30 ++++++++++++--
 yt/data_objects/field_info_container.py                    |   1 +
 yt/frontends/enzo/data_structures.py                       |   3 +-
 yt/mods.py                                                 |   2 +-
 yt/utilities/parallel_tools/parallel_analysis_interface.py |  36 ++++++++++-------
 7 files changed, 87 insertions(+), 33 deletions(-)

diffs (240 lines):

diff -r 2f3e0a0d802f -r 4da7902d96a1 yt/analysis_modules/list_modules.py
--- a/yt/analysis_modules/list_modules.py	Thu Sep 23 22:35:15 2010 -0700
+++ b/yt/analysis_modules/list_modules.py	Mon Sep 27 14:42:31 2010 -0400
@@ -24,6 +24,7 @@
 """
 
 import os
+import sys
 
 def get_available_modules():
     modpath = os.path.abspath(os.path.dirname(__file__))
@@ -32,3 +33,26 @@
         if os.path.isdir(d) and os.path.isfile(os.path.join(d, "api.py")):
             available_modules.append(os.path.basename(d))
     return available_modules
+
+class AnalysisModuleLoader(object):
+
+    @property
+    def available_modules(self):
+        return get_available_modules()
+
+    def __getattr__(self, attr):
+        try:
+            name = "yt.analysis_modules.%s.api" % (attr)
+            nm = __import__(name, level=-1)
+            setattr(self, attr, sys.modules[name])
+        except ImportError:
+            raise AttributeError(attr)
+        return getattr(self, attr)
+
+    def __dir__(self):
+        # This is a badly behaving object.  I was unable to get this line:
+        #return super(AnalysisModuleLoader, self).__dir__() + self.available_modules
+        # to work, so we simply return only the methods we know about.
+        return ["available_modules"] + self.available_modules
+
+amods = AnalysisModuleLoader()
diff -r 2f3e0a0d802f -r 4da7902d96a1 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py	Thu Sep 23 22:35:15 2010 -0700
+++ b/yt/data_objects/data_containers.py	Mon Sep 27 14:42:31 2010 -0400
@@ -764,20 +764,22 @@
             points.append(self._generate_grid_coords(grid))
         if len(points) == 0: points = None
         else: points = na.concatenate(points)
-        t = self._mpi_catarray(points)
-        self['px'] = t[:,0]
-        self['py'] = t[:,1]
-        self['pz'] = t[:,2]
-        self['pdx'] = t[:,3]
-        self['pdy'] = t[:,4]
-        self['pdz'] = t[:,3] # Does not matter!
+        # We have to transpose here so that _mpi_catarray works properly, as
+        # it and the alltoall assume the long axis is the last one.
+        t = self._mpi_catarray(points.transpose())
+        self['px'] = t[0,:]
+        self['py'] = t[1,:]
+        self['pz'] = t[2,:]
+        self['pdx'] = t[3,:]
+        self['pdy'] = t[4,:]
+        self['pdz'] = t[3,:] # Does not matter!
 
         # Now we set the *actual* coordinates
-        self[axis_names[x_dict[self.axis]]] = t[:,0]
-        self[axis_names[y_dict[self.axis]]] = t[:,1]
-        self[axis_names[self.axis]] = t[:,2]
+        self[axis_names[x_dict[self.axis]]] = t[0,:]
+        self[axis_names[y_dict[self.axis]]] = t[1,:]
+        self[axis_names[self.axis]] = t[2,:]
 
-        self.ActiveDimensions = (t.shape[0], 1, 1)
+        self.ActiveDimensions = (t.shape[1], 1, 1)
 
     def _get_list_of_grids(self):
         goodI = ((self.pf.h.grid_right_edge[:,self.axis] > self.coord)
diff -r 2f3e0a0d802f -r 4da7902d96a1 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py	Thu Sep 23 22:35:15 2010 -0700
+++ b/yt/data_objects/derived_quantities.py	Mon Sep 27 14:42:31 2010 -0400
@@ -29,11 +29,12 @@
 
 from yt.funcs import *
 
+from yt.config import ytcfg
+from yt.data_objects.field_info_container import \
+    FieldDetector
 from yt.utilities.data_point_utilities import FindBindingEnergy
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
-from yt.funcs import \
-    get_pbar, wraps
 
 __CUDA_BLOCK_SIZE = 256
 
@@ -43,10 +44,21 @@
     def __init__(self, grid, data_source):
         self.grid = grid
         self.data_source = data_source
+        # We have a local cache so that *within* a call to the DerivedQuantity
+        # function, we only read each field once.  Otherwise, when preloading
+        # the field will be popped and removed and lost if the underlying data
+        # source's _get_data_from_grid method is wrapped by restore_state.
+        # This is common.  So, if data[something] is accessed multiple times by
+        # a single quantity, the second time will re-read the data the slow
+        # way.
+        self.local_cache = {}
     def __getattr__(self, attr):
         return getattr(self.grid, attr)
     def __getitem__(self, item):
-        return self.data_source._get_data_from_grid(self.grid, item)
+        if item not in self.local_cache:
+            data = self.data_source._get_data_from_grid(self.grid, item)
+            self.local_cache[item] = data
+        return self.local_cache[item]
 
 class DerivedQuantity(ParallelAnalysisInterface):
     def __init__(self, collection, name, function,
@@ -64,7 +76,7 @@
 
     def __call__(self, *args, **kwargs):
         lazy_reader = kwargs.pop('lazy_reader', True)
-        preload = kwargs.pop('preload', False)
+        preload = kwargs.pop('preload', ytcfg.getboolean("yt","__parallel"))
         if preload:
             if not lazy_reader: mylog.debug("Turning on lazy_reader because of preload")
             lazy_reader = True
@@ -89,7 +101,15 @@
         return self.c_func(self._data_source, *self.retvals)
 
     def _finalize_parallel(self):
-        self.retvals = [na.array(self._mpi_catlist(my_list)) for my_list in self.retvals]
+        # Note that we do some fancy footwork here.
+        # _mpi_catarray and its affiliated alltoall function
+        # assume that the *long* axis is the last one.  However,
+        # our long axis is the first one!
+        rv = []
+        for my_list in self.retvals:
+            data = na.array(my_list).transpose()
+            rv.append(self._mpi_catarray(data).transpose())
+        self.retvals = rv
         
     def _call_func_unlazy(self, args, kwargs):
         retval = self.func(self._data_source, *args, **kwargs)
diff -r 2f3e0a0d802f -r 4da7902d96a1 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py	Thu Sep 23 22:35:15 2010 -0700
+++ b/yt/data_objects/field_info_container.py	Mon Sep 27 14:42:31 2010 -0400
@@ -142,6 +142,7 @@
     Level = 1
     NumberOfParticles = 1
     _read_exception = None
+    _id_offset = 0
     def __init__(self, nd = 16, pf = None):
         self.nd = nd
         self.ActiveDimensions = [nd,nd,nd]
diff -r 2f3e0a0d802f -r 4da7902d96a1 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py	Thu Sep 23 22:35:15 2010 -0700
+++ b/yt/frontends/enzo/data_structures.py	Mon Sep 27 14:42:31 2010 -0400
@@ -45,7 +45,8 @@
 from yt.utilities.logger import ytLogger as mylog
 
 from .definitions import parameterDict
-from .fields import EnzoFieldContainer, add_enzo_field
+from .fields import EnzoFieldContainer, Enzo1DFieldContainer, \
+    Enzo2DFieldContainer, add_enzo_field
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_blocking_call
diff -r 2f3e0a0d802f -r 4da7902d96a1 yt/mods.py
--- a/yt/mods.py	Thu Sep 23 22:35:15 2010 -0700
+++ b/yt/mods.py	Mon Sep 27 14:42:31 2010 -0400
@@ -65,7 +65,7 @@
     ChomboStaticOutput, ChomboFieldInfo, add_chombo_field
 
 from yt.analysis_modules.list_modules import \
-    get_available_modules
+    get_available_modules, amods
 available_analysis_modules = get_available_modules()
 
 # Import our analysis modules
diff -r 2f3e0a0d802f -r 4da7902d96a1 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Thu Sep 23 22:35:15 2010 -0700
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Mon Sep 27 14:42:31 2010 -0400
@@ -881,9 +881,14 @@
         field_keys = data.keys()
         field_keys.sort()
         size = data[field_keys[0]].shape[-1]
-        # MPI_Scan is an inclusive scan
-        sizes = MPI.COMM_WORLD.alltoall( [size]*MPI.COMM_WORLD.size )
-        offsets = na.add.accumulate([0] + sizes)[:-1]
+        sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
+        outsize = na.array(size, dtype='int64')
+        MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
+                                 [sizes, 1, MPI.LONG] )
+        # This nested concatenate is to get the shapes to work out correctly;
+        # if we just add [0] to sizes, it will broadcast a summation, not a
+        # concatenation.
+        offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
         arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
         for key in field_keys:
             dd = data[key]
@@ -1299,14 +1304,17 @@
 
     @parallel_passthrough
     def _mpi_catarray(self, data):
-        self._barrier()
-        if MPI.COMM_WORLD.rank == 0:
-            data = self.__mpi_recvarrays(data)
-        else:
-            _send_array(data, dest=0, tag=0)
-        mylog.debug("Opening MPI Broadcast on %s", MPI.COMM_WORLD.rank)
-        data = _bcast_array(data, root=0)
-        self._barrier()
+        size = data.shape[-1]
+        sizes = na.zeros(MPI.COMM_WORLD.size, dtype='int64')
+        outsize = na.array(size, dtype='int64')
+        MPI.COMM_WORLD.Allgather([outsize, 1, MPI.LONG],
+                                 [sizes, 1, MPI.LONG] )
+        # This nested concatenate is to get the shapes to work out correctly;
+        # if we just add [0] to sizes, it will broadcast a summation, not a
+        # concatenation.
+        offsets = na.add.accumulate(na.concatenate([[0], sizes]))[:-1]
+        arr_size = MPI.COMM_WORLD.allreduce(size, op=MPI.SUM)
+        data = _alltoallv_array(data, arr_size, offsets, sizes)
         return data
 
     @parallel_passthrough
@@ -1522,12 +1530,10 @@
     recv = na.empty(total_size, dtype=send.dtype)
     recv[offset:offset+send.size] = send[:]
     dtr = send.dtype.itemsize / tmp_send.dtype.itemsize # > 1
-    soff = [0] * MPI.COMM_WORLD.size
-    ssize = [tmp_send.size] * MPI.COMM_WORLD.size
     roff = [off * dtr for off in offsets]
     rsize = [siz * dtr for siz in sizes]
     tmp_recv = recv.view(__tocast)
-    MPI.COMM_WORLD.Alltoallv((tmp_send, (ssize, soff), MPI.CHAR),
-                             (tmp_recv, (rsize, roff), MPI.CHAR))
+    MPI.COMM_WORLD.Allgatherv((tmp_send, tmp_send.size, MPI.CHAR),
+                              (tmp_recv, (rsize, roff), MPI.CHAR))
     return recv
     



More information about the yt-svn mailing list