[Yt-svn] yt-commit r1188 - in trunk/yt/lagos: . hop

mturk at wrangler.dreamhost.com mturk at wrangler.dreamhost.com
Wed Feb 25 09:23:42 PST 2009


Author: mturk
Date: Wed Feb 25 09:23:41 2009
New Revision: 1188
URL: http://yt.spacepope.org/changeset/1188

Log:
Initial import of writing out halo particle lists.  Particle indices are now
int64 in my tests.  More testing is needed on writing out the particle lists,
and attributes from derived quantities need to be added.



Modified:
   trunk/yt/lagos/BaseDataTypes.py
   trunk/yt/lagos/BaseGridType.py
   trunk/yt/lagos/ParallelTools.py
   trunk/yt/lagos/UniversalFields.py
   trunk/yt/lagos/hop/SS_HopOutput.py

Modified: trunk/yt/lagos/BaseDataTypes.py
==============================================================================
--- trunk/yt/lagos/BaseDataTypes.py	(original)
+++ trunk/yt/lagos/BaseDataTypes.py	Wed Feb 25 09:23:41 2009
@@ -1271,7 +1271,8 @@
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
         if field in self.pf.field_info and self.pf.field_info[field].particle_type:
-            if grid.NumberOfParticles == 0: return na.array([])
+            # int64 -> float64 with the first real set of data
+            if grid.NumberOfParticles == 0: return na.array([], dtype='int64')
             pointI = self._get_particle_indices(grid)
             if self.pf.field_info[field].vector_field:
                 f = grid[field]

Modified: trunk/yt/lagos/BaseGridType.py
==============================================================================
--- trunk/yt/lagos/BaseGridType.py	(original)
+++ trunk/yt/lagos/BaseGridType.py	Wed Feb 25 09:23:41 2009
@@ -83,6 +83,7 @@
                 except self._read_exception, exc:
                     if field in self.pf.field_info:
                         if self.pf.field_info[field].particle_type:
+                            # because this gets upcast to float
                             self[field] = na.array([],dtype='int64')
                         elif self.pf.field_info[field].not_in_all:
                             self[field] = na.zeros(self.ActiveDimensions, dtype='float64')

Modified: trunk/yt/lagos/ParallelTools.py
==============================================================================
--- trunk/yt/lagos/ParallelTools.py	(original)
+++ trunk/yt/lagos/ParallelTools.py	Wed Feb 25 09:23:41 2009
@@ -137,7 +137,6 @@
         for attrname in d:
             if attrname.startswith("_") or attrname in skip:
                 if attrname not in extra: continue
-            print "Wrapping", attrname
             attr = getattr(cls, attrname)
             if type(attr) == types.MethodType:
                 setattr(cls, attrname, parallel_simple_proxy(attr))
@@ -165,6 +164,7 @@
 
 class ParallelAnalysisInterface(object):
     _grids = None
+    _distributed = parallel_capable
 
     def _get_grids(self, *args, **kwargs):
         if parallel_capable:
@@ -343,6 +343,14 @@
         else:
             return cStringIO.StringIO()
 
+    def _get_filename(self, prefix):
+        if not parallel_capable: return prefix
+        return "%s_%03i" % (prefix, MPI.COMM_WORLD.rank)
+
+    def _is_mine(self, obj):
+        if not obj._distributed: return True
+        return (obj._owner == MPI.COMM_WORLD.rank)
+
 __tocast = 'c'
 
 def _send_array(arr, dest, tag = 0):

Modified: trunk/yt/lagos/UniversalFields.py
==============================================================================
--- trunk/yt/lagos/UniversalFields.py	(original)
+++ trunk/yt/lagos/UniversalFields.py	Wed Feb 25 09:23:41 2009
@@ -127,23 +127,30 @@
 add_field("SoundSpeed", function=_SoundSpeed,
           units=r"\rm{cm}/\rm{s}")
 
-def particle_func(p_field):
+def particle_func(p_field, dtype='float64'):
     def _Particles(field, data):
         if not data.NumberOfParticles > 0:
-            return na.array([], dtype='float64')
+            return na.array([], dtype=dtype)
         try:
-            return data._read_data(p_field).astype('float64')
+            return data._read_data(p_field).astype(dtype)
         except data._read_exception:
             pass
         # This is bad.  But it's the best idea I have right now.
-        return data._read_data(p_field.replace("_"," ")).astype('float64')
+        return data._read_data(p_field.replace("_"," ")).astype(dtype)
     return _Particles
-for pf in ["index", "type", "mass"] + \
+for pf in ["type", "mass"] + \
           ["position_%s" % ax for ax in 'xyz']:
     pfunc = particle_func("particle_%s" % (pf))
     add_field("particle_%s" % pf, function=pfunc,
               validators = [ValidateSpatial(0)],
               particle_type=True)
+
+def _convRetainInt(data):
+    return 1
+add_field("particle_index", function=particle_func("particle_index", "int64"),
+          validators = [ValidateSpatial(0)], particle_type=True,
+          convert_function=_convRetainInt)
+
 def _get_vel_convert(ax):
     def _convert_p_vel(data):
         return data.convert("%s-velocity" % ax)

Modified: trunk/yt/lagos/hop/SS_HopOutput.py
==============================================================================
--- trunk/yt/lagos/hop/SS_HopOutput.py	(original)
+++ trunk/yt/lagos/hop/SS_HopOutput.py	Wed Feb 25 09:23:41 2009
@@ -156,7 +156,7 @@
     _processing = False
     _owner = 0
     indices = None
-    dont_wrap = ["get_sphere"]
+    dont_wrap = ["get_sphere", "write_particle_list"]
     extra_wrap = ["__getitem__"]
 
     def __init__(self, hop_output, id, indices = None):
@@ -245,15 +245,19 @@
     def get_size(self):
         return self.indices.size
 
-    def get_particle_indices(self):
-        return self["particle_index"]
-    
-    def get_particle_positions(self,f):
-        return self["particle_position_%s" % f]
-    
-    def get_particle_velocities(self,f):
-        return self["particle_velocity_%s" % f]
-    
+    @parallel_blocking_call
+    def write_particle_list(self, handle):
+        self._processing = True
+        gn = "Halo%08i" % (self.id)
+        handle.createGroup("/", gn)
+        for field in ["particle_position_%s" % ax for ax in 'xyz'] \
+                   + ["particle_velocity_%s" % ax for ax in 'xyz'] \
+                   + ["particle_index"]:
+            handle.createArray("/%s" % gn, field, self[field])
+        n = handle.getNode("/", gn)
+        # set attributes on n
+        self._processing = False
+
 class HaloFinder(HopList, ParallelAnalysisInterface):
     def __init__(self, pf, threshold=160.0, dm_only=True, padding=0.2):
         self.pf = pf
@@ -326,14 +330,11 @@
         self._groups = [HopGroup(self, i) for i in range(my_first_id)] + \
                        self._groups + \
                        [HopGroup(self, i) for i in range(after, nhalos)]
-        # MJT: Sorting doesn't work yet.  They need to be sorted.
-        #haloes.sort(lambda x, y: cmp(len(x.indices),len(y.indices)))
-        # Unfortunately, we can't sort *just yet*.
         id = 0
         for proc in sorted(halo_info.keys()):
             for halo in self._groups[id:id+halo_info[proc]]:
                 halo.id = id
-                halo._distributed = True
+                halo._distributed = self._distributed
                 halo._owner = proc
                 id += 1
         self._groups.sort(key = lambda h: -1 * h.get_size())
@@ -357,3 +358,10 @@
     def write_out(self, filename):
         f = self._write_on_root(filename)
         HopList.write_out(self, f)
+
+    def write_particle_lists(self, prefix):
+        fn = "%s.h5" % self._get_filename(prefix)
+        f = tables.openFile(fn, "w")
+        for halo in self._groups:
+            if not self._is_mine(halo): continue
+            halo.write_particle_list(f)



More information about the yt-svn mailing list